summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2024-10-29 18:25:24 +1000
committerDave Airlie <airlied@redhat.com>2024-10-29 18:25:24 +1000
commite7103f8785504dd5c6aad118fbc64fc49eda33af (patch)
tree67d50004ab06337ca473520b6a104bb146899ae2
parentc9ff14d0339a7838b71d9f196bd4244eeb6e2808 (diff)
parentdac64cb3e029e9ae9ca251798bcb9cdb118d68d5 (diff)
Merge tag 'amd-drm-next-6.13-2024-10-25' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.13-2024-10-25: amdgpu: - SDMA queue reset support - SMU 13.0.6 updates - Add debugfs interface to help limit jpeg queue scheduling for testing - JPEG 4.0.3 updates - Initial runtime repartitioning support - GFX9 fixes - Misc code cleanups - Rework IP structures to better handle multiple instances of an IP - DML updates - DSC fixes - HDR fixes - Brightness control updates - Runtime pm cleanup - DMCUB fixes - DCN 3.5 updates - Struct drm_edid cleanup - Fetch EDID from _DDC if available - Ring noop optimizations - MES logging fixes - 3DLUT fixes - DCN 4.x fixes - SMU 13.x fixes - Fixes for set_soft_freq_range() - ACPI fixes - SMU 14.x updates - PSR-SU fixes - fdinfo cleanup - DCN documentation updates amdkfd: - Misc code cleanups - Increase event FIFO size - Copy wave state fixes for SDMA radeon: - Fix possible overflow in packet3 check - Late init connector fix - Always set GEM function pointer Documentation: - Update drm-memory documentation From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20241025132336.2416913-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--Documentation/gpu/amdgpu/display/dc-arch-overview.svg731
-rw-r--r--Documentation/gpu/amdgpu/display/dc-components.svg732
-rw-r--r--Documentation/gpu/amdgpu/display/dc-debug.rst187
-rw-r--r--Documentation/gpu/amdgpu/display/dcn-blocks.rst2
-rw-r--r--Documentation/gpu/amdgpu/display/dcn-overview.rst2
-rw-r--r--Documentation/gpu/amdgpu/display/index.rst1
-rw-r--r--Documentation/gpu/amdgpu/display/programming-model-dcn.rst162
-rw-r--r--Documentation/gpu/drm-usage-stats.rst25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c507
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c388
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c417
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c321
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c311
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c308
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c52
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c305
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c119
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c255
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c285
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_debug.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h165
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h19
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c17
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h43
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h23
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h103
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c335
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c48
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c50
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c53
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c428
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h561
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c574
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c74
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h132
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c433
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c1271
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c209
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c83
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--include/uapi/linux/kfd_ioctl.h7
334 files changed, 9322 insertions, 8887 deletions
diff --git a/Documentation/gpu/amdgpu/display/dc-arch-overview.svg b/Documentation/gpu/amdgpu/display/dc-arch-overview.svg
new file mode 100644
index 000000000000..23394931cf26
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/dc-arch-overview.svg
@@ -0,0 +1,731 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ width="1204.058"
+ height="510.57321"
+ viewBox="0 0 318.57366 135.08917"
+ version="1.1"
+ id="svg8"
+ inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
+ sodipodi:docname="dc-arch-overview.svg"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:dc="http://purl.org/dc/elements/1.1/">
+ <defs
+ id="defs2">
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker8858"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8616"
+ style="fill:#aa00d4;fill-opacity:1;fill-rule:evenodd;stroke:#aa00d4;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8622"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8592"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8610"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1200"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-8"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-5"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-1"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-1"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3-4"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-6-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-1-0"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1200-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker8858-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8616-5"
+ style="fill:#00ffcc;fill-opacity:1;fill-rule:evenodd;stroke:#00ffcc;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-56"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3-9"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.4"
+ inkscape:cx="812.5"
+ inkscape:cy="315"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="3840"
+ inkscape:window-height="2083"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
+ showguides="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ units="px"
+ inkscape:snap-global="false"
+ inkscape:showpageshadow="2"
+ inkscape:pagecheckerboard="0"
+ inkscape:deskcolor="#d1d1d1" />
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(399.57097,11.171866)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ x="-297.75696"
+ y="109.44505"
+ id="text1063" />
+ <path
+ style="fill:#008000;stroke:#008000;stroke-width:0.463298;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.463298, 0.926596;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -120.41395,84.001461 h -9.04766"
+ id="path1171-0-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#ff0000;stroke-width:0.982225;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.982225, 1.96445;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -129.96274,90.649221 h 8.66407"
+ id="path1171-7-1-3-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#3771c8;stroke-width:0.745037;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m -121.33167,97.283841 h -7.91265"
+ id="path7149-3-7-8"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ x="-115.55721"
+ y="85.330681"
+ id="text12079"><tspan
+ sodipodi:role="line"
+ id="tspan12077"
+ x="-115.55721"
+ y="85.330681"
+ style="font-size:4.80199px;stroke-width:0.163704">Board/Platform</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ x="-115.75885"
+ y="92.435066"
+ id="text12079-3"><tspan
+ sodipodi:role="line"
+ id="tspan12077-1"
+ x="-115.75885"
+ y="92.435066"
+ style="font-size:4.80199px;stroke-width:0.163704">SoC</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:6.54816px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ x="-115.6041"
+ y="98.608604"
+ id="text12079-3-4"><tspan
+ sodipodi:role="line"
+ id="tspan12077-1-9"
+ x="-115.6041"
+ y="98.608604"
+ style="font-size:4.80199px;stroke-width:0.163704">Component</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-368.54205"
+ y="92.633011"
+ id="text1010-5"><tspan
+ sodipodi:role="line"
+ x="-368.54205"
+ y="92.633011"
+ style="font-size:6.35px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan1057">DRAM</tspan></text>
+ <g
+ id="g730"
+ transform="translate(6.9386906,-2.5203356)">
+ <text
+ id="text838-5-2-6-2"
+ y="32.372173"
+ x="-372.97867"
+ style="font-style:normal;font-weight:normal;font-size:6.54814px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ xml:space="preserve"><tspan
+ id="tspan936-1-2-3"
+ style="text-align:center;text-anchor:middle;stroke-width:0.163704"
+ y="32.372173"
+ x="-372.97867"
+ sodipodi:role="line">dc_plane</tspan></text>
+ <rect
+ ry="6.9139691e-07"
+ y="18.717371"
+ x="-390.50565"
+ height="23.904575"
+ width="35.080177"
+ id="rect834-5-2-6-75"
+ style="fill:none;stroke:#000000;stroke-width:0.561714;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <g
+ id="g738"
+ transform="translate(6.9386906,31.346346)">
+ <text
+ id="text734"
+ y="32.372173"
+ x="-372.97867"
+ style="font-style:normal;font-weight:normal;font-size:6.54814px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ xml:space="preserve"><tspan
+ id="tspan732"
+ style="text-align:center;text-anchor:middle;stroke-width:0.163704"
+ y="32.372173"
+ x="-372.97867"
+ sodipodi:role="line">dc_plane</tspan></text>
+ <rect
+ ry="6.9139691e-07"
+ y="18.717371"
+ x="-390.50565"
+ height="23.904575"
+ width="35.080177"
+ id="rect736"
+ style="fill:none;stroke:#000000;stroke-width:0.561714;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <rect
+ ry="2.1256196e-06"
+ y="8.5983658"
+ x="-389.18051"
+ height="73.491852"
+ width="46.307304"
+ id="rect744"
+ style="fill:none;stroke:#3771c8;stroke-width:1.13159;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <g
+ id="g757"
+ transform="translate(-19.949528,-8.6078171)">
+ <text
+ id="text600"
+ y="56.289795"
+ x="-256.91336"
+ style="font-style:normal;font-weight:normal;font-size:6.54814px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ xml:space="preserve"><tspan
+ id="tspan598"
+ style="text-align:center;text-anchor:middle;stroke-width:0.163704"
+ y="56.289795"
+ x="-256.91336"
+ sodipodi:role="line">DC</tspan></text>
+ <rect
+ ry="1.7458606e-06"
+ y="23.771139"
+ x="-289.21854"
+ height="60.361938"
+ width="65.042557"
+ id="rect602"
+ style="fill:none;stroke:#000000;stroke-width:1.21541;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <rect
+ ry="2.3633565e-06"
+ y="4.4885707"
+ x="-316.43292"
+ height="81.711441"
+ width="79.57225"
+ id="rect787"
+ style="fill:none;stroke:#3771c8;stroke-width:1.5641;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <g
+ id="g765"
+ transform="translate(6.5577393,-7.020317)">
+ <text
+ id="text608"
+ y="31.942825"
+ x="-189.71797"
+ style="font-style:normal;font-weight:normal;font-size:6.54814px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ xml:space="preserve"><tspan
+ id="tspan606"
+ style="text-align:center;text-anchor:middle;stroke-width:0.163704"
+ y="31.942825"
+ x="-189.71797"
+ sodipodi:role="line">dc_link</tspan></text>
+ <rect
+ ry="6.8036792e-07"
+ y="18.197111"
+ x="-211.99069"
+ height="23.523254"
+ width="44.846642"
+ id="rect610"
+ style="fill:none;stroke:#000000;stroke-width:0.630025;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <rect
+ ry="1.0582555e-06"
+ y="4.3160448"
+ x="-210.69141"
+ height="36.588463"
+ width="55.543594"
+ id="rect794"
+ style="fill:none;stroke:#3771c8;stroke-width:0.874443;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <g
+ id="g781"
+ transform="translate(6.5577393,37.542802)">
+ <text
+ id="text777"
+ y="31.942825"
+ x="-189.71797"
+ style="font-style:normal;font-weight:normal;font-size:6.54814px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ xml:space="preserve"><tspan
+ id="tspan775"
+ style="text-align:center;text-anchor:middle;stroke-width:0.163704"
+ y="31.942825"
+ x="-189.71797"
+ sodipodi:role="line">dc_link</tspan></text>
+ <rect
+ ry="6.8036792e-07"
+ y="18.197111"
+ x="-211.99069"
+ height="23.523254"
+ width="44.846642"
+ id="rect779"
+ style="fill:none;stroke:#000000;stroke-width:0.630025;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ <rect
+ ry="1.0582555e-06"
+ y="50.466679"
+ x="-210.69141"
+ height="36.588463"
+ width="55.543594"
+ id="rect796"
+ style="fill:none;stroke:#3771c8;stroke-width:0.874443;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <g
+ id="g2151"
+ transform="translate(2.1659807,-25.895798)">
+ <rect
+ ry="9.2671934e-07"
+ y="29.395185"
+ x="-132.25786"
+ height="32.040688"
+ width="44.742229"
+ id="rect618"
+ style="fill:none;stroke:#3771c8;stroke-width:0.734435;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <g
+ id="g838"
+ transform="translate(1.9073486e-6,0.26687336)">
+ <text
+ id="text616"
+ y="47.132744"
+ x="-110.03735"
+ style="font-style:normal;font-weight:normal;font-size:6.54814px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163704"
+ xml:space="preserve"><tspan
+ id="tspan614"
+ style="text-align:center;text-anchor:middle;stroke-width:0.163704"
+ y="47.132744"
+ x="-110.03735"
+ sodipodi:role="line">dc_link</tspan></text>
+ <rect
+ ry="5.7260945e-07"
+ y="35.249866"
+ x="-126.21788"
+ height="19.797579"
+ width="32.66227"
+ id="rect833"
+ style="fill:none;stroke:#000000;stroke-width:0.493257;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ </g>
+ </g>
+ <rect
+ ry="3.6076738e-06"
+ y="-9.4559708"
+ x="-397.85507"
+ height="124.73286"
+ width="250.94243"
+ id="rect1307"
+ style="fill:none;stroke:#008000;stroke-width:3.43179;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:6.86358, 3.43179;stroke-dashoffset:0" />
+ <rect
+ ry="2.9172609e-06"
+ y="-4.5401988"
+ x="-393.52301"
+ height="100.8623"
+ width="174.14117"
+ id="rect1990"
+ style="fill:none;stroke:#ff0000;stroke-width:2.57074;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:2.57074, 5.14148;stroke-dashoffset:0" />
+ <path
+ style="fill:none;stroke:#aa00d4;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="m -317.69814,47.452094 h -23.80954"
+ id="path2142" />
+ <path
+ style="fill:none;stroke:#aa00d4;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="m -130.71642,19.101665 h -23.80954"
+ id="path2144" />
+ <g
+ aria-label="}"
+ transform="rotate(180,-59.876965,-0.22738225)"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#aa00d4;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ id="text1003-5">
+ <path
+ d="m 92.00239,-21.748413 h 0.86816 c 0,0 15.81267,-0.177767 16.15994,-0.5333 0.35553,-0.355534 1.10026,-1.124479 1.10026,-2.306836 v -20.048953 c 0,-1.289844 0.18603,-2.228288 0.5581,-2.815332 0.37207,-0.587044 0.45004,-0.992187 1.36781,-1.215429 -0.91777,-0.206706 -0.99574,-0.603581 -1.36781,-1.190625 -0.37207,-0.587045 -0.5581,-1.529623 -0.5581,-2.827735 v -19.913761 c 0,-1.174088 -0.74473,-1.938899 -1.10026,-2.294433 -0.34727,-0.363802 -15.00239,-0.545703 -16.15994,-0.545703 h -0.86816 v -1.773536 h 0.78134 c 2.05879,0 17.33403,0.305924 18.02029,0.917774 0.69453,0.60358 1.0418,1.81901 1.0418,3.646289 v 19.814542 c 0,1.231966 0.22324,2.087728 0.66973,2.567285 0.44648,0.471289 1.25677,0.706934 2.43086,0.706934 h 0.76894 v 1.773535 h -0.76894 c -1.17409,0 -1.98438,0.239778 -2.43086,0.719336 -0.44649,0.479557 -0.66973,1.343587 -0.66973,2.59209 v 19.937331 c 0,1.827279 -0.34727,3.046842 -1.0418,3.658691 -0.68626,0.611849 -15.9615,0.917774 -18.02029,0.917774 h -0.78134 z"
+ style="font-size:25.4px;fill:#aa00d4;stroke-width:0.264583"
+ id="path1005-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccsscccsscsccscsscsccscsscscc" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-275.85803"
+ y="92.633011"
+ id="text2157"><tspan
+ sodipodi:role="line"
+ x="-275.85803"
+ y="92.633011"
+ style="font-size:6.35px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan2155">DCN</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-279.29822"
+ y="110.19857"
+ id="text3141"><tspan
+ sodipodi:role="line"
+ x="-279.29822"
+ y="110.19857"
+ style="font-weight:bold;font-size:6.35px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan3139">SoC</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-275.85803"
+ y="123.8538"
+ id="text3375"><tspan
+ sodipodi:role="line"
+ x="-275.85803"
+ y="123.8538"
+ style="font-size:6.35px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan3373">Board/Platform</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-107.57491"
+ y="42.939579"
+ id="text3379"><tspan
+ sodipodi:role="line"
+ x="-107.57491"
+ y="42.939579"
+ style="font-size:6.35px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan3377">Display</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-182.71582"
+ y="46.643749"
+ id="text3383"><tspan
+ sodipodi:role="line"
+ x="-182.71582"
+ y="46.643749"
+ style="font-size:6.35px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan3381">Connector</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.175px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-182.71582"
+ y="93.210457"
+ id="text3387"><tspan
+ sodipodi:role="line"
+ x="-182.71582"
+ y="93.210457"
+ style="font-size:6.35px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan3385">Connector</tspan></text>
+ </g>
+</svg>
diff --git a/Documentation/gpu/amdgpu/display/dc-components.svg b/Documentation/gpu/amdgpu/display/dc-components.svg
new file mode 100644
index 000000000000..f84bb2a57c05
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/dc-components.svg
@@ -0,0 +1,732 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ width="533.42053"
+ height="631.18573"
+ viewBox="0 0 141.13418 167.00122"
+ version="1.1"
+ id="svg8"
+ inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
+ sodipodi:docname="dc-components.svg"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:dc="http://purl.org/dc/elements/1.1/">
+ <defs
+ id="defs2">
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker8858"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8616"
+ style="fill:#aa00d4;fill-opacity:1;fill-rule:evenodd;stroke:#aa00d4;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8622"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8592"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8610"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1200"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-9"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-8"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-5"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-1"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-1"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3-4"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-6-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-1-0"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-2-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-9-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path1200-6"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker8858-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path8616-5"
+ style="fill:#00ffcc;fill-opacity:1;fill-rule:evenodd;stroke:#00ffcc;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-3-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-6-56"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-8-0-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path1200-9-3-9"
+ style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.4"
+ inkscape:cx="482.85714"
+ inkscape:cy="470"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="3840"
+ inkscape:window-height="2083"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
+ showguides="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ units="px"
+ inkscape:snap-global="false"
+ inkscape:showpageshadow="2"
+ inkscape:pagecheckerboard="0"
+ inkscape:deskcolor="#d1d1d1" />
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(384.1992,26.608359)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.0511px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.101278"
+ x="-330.72058"
+ y="57.56284"
+ id="text1063" />
+ <rect
+ ry="4.7572436e-07"
+ y="-26.142614"
+ x="-383.73346"
+ height="16.447845"
+ width="140.2027"
+ id="rect744"
+ style="fill:none;stroke:#3771c8;stroke-width:0.93149;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="1.0800992e-06"
+ y="-5.1415901"
+ x="-383.27942"
+ height="37.343693"
+ width="40.239418"
+ id="rect602"
+ style="fill:none;stroke:#000000;stroke-width:0.751929;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.476px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-363.2121"
+ y="17.270189"
+ id="text3379"><tspan
+ sodipodi:role="line"
+ x="-363.2121"
+ y="17.270189"
+ style="font-size:10.476px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan3377">Core</tspan></text>
+ <rect
+ ry="1.0800992e-06"
+ y="-5.1415901"
+ x="-331.06259"
+ height="37.343693"
+ width="40.239418"
+ id="rect526"
+ style="fill:none;stroke:#000000;stroke-width:0.751929;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="4.4701343e-07"
+ y="-5.2654457"
+ x="-286.88507"
+ height="15.455184"
+ width="43.167706"
+ id="rect528"
+ style="fill:none;stroke:#000000;stroke-width:0.501024;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="4.4701343e-07"
+ y="15.68337"
+ x="-286.88507"
+ height="15.455184"
+ width="43.167706"
+ id="rect530"
+ style="fill:none;stroke:#000000;stroke-width:0.501024;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="4.4701343e-07"
+ y="36.959518"
+ x="-286.88507"
+ height="15.455184"
+ width="43.167706"
+ id="rect532"
+ style="fill:none;stroke:#000000;stroke-width:0.501024;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="1.6213723e-06"
+ y="60.089264"
+ x="-286.65378"
+ height="56.057846"
+ width="42.705132"
+ id="rect534"
+ style="fill:none;stroke:#000000;stroke-width:0.949072;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="4.4031123e-07"
+ y="37.077362"
+ x="-382.96875"
+ height="15.223459"
+ width="92.225845"
+ id="rect536"
+ style="fill:none;stroke:#000000;stroke-width:0.726817;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="4.4031123e-07"
+ y="59.989784"
+ x="-382.96875"
+ height="15.223459"
+ width="92.225845"
+ id="rect538"
+ style="fill:none;stroke:#000000;stroke-width:0.726817;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="4.4031123e-07"
+ y="80.283493"
+ x="-382.96875"
+ height="15.223459"
+ width="92.225845"
+ id="rect540"
+ style="fill:none;stroke:#000000;stroke-width:0.726817;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <rect
+ ry="4.3543034e-07"
+ y="124.89404"
+ x="-382.88803"
+ height="15.054706"
+ width="139.2859"
+ id="rect554"
+ style="fill:none;stroke:#000000;stroke-width:0.888245;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:8.73001px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-311.29712"
+ y="-16.144287"
+ id="text660"><tspan
+ sodipodi:role="line"
+ x="-311.29712"
+ y="-16.144287"
+ style="font-size:8.73001px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan658">Display Core API (dc/dc.h)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:10.476px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-311.40384"
+ y="17.511137"
+ id="text664"><tspan
+ sodipodi:role="line"
+ x="-311.40384"
+ y="17.511137"
+ style="font-size:10.476px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan662">Link</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-336.97806"
+ y="43.095863"
+ id="text668"><tspan
+ sodipodi:role="line"
+ x="-336.97806"
+ y="43.095863"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan666">Hardware Sequencer API</tspan><tspan
+ sodipodi:role="line"
+ x="-336.97806"
+ y="48.552124"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan670">(dc/inc/hw_sequence.h)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-337.03479"
+ y="68.73642"
+ id="text750"><tspan
+ sodipodi:role="line"
+ x="-337.03479"
+ y="68.73642"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan748">Hardware Sequencer</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-336.98022"
+ y="89.209091"
+ id="text756"><tspan
+ sodipodi:role="line"
+ x="-336.98022"
+ y="89.209091"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan754">Block Level API (dc/inc/hw)</tspan></text>
+ <g
+ id="g1543"
+ transform="matrix(0.61866289,0,0,0.61866289,-146.50941,-10.146755)">
+ <rect
+ ry="7.3007396e-07"
+ y="180.25436"
+ x="-382.5336"
+ height="25.241808"
+ width="29.376135"
+ id="rect542"
+ style="fill:none;stroke:#000000;stroke-width:0.528201;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-367.99722"
+ y="195.3941"
+ id="text838"><tspan
+ sodipodi:role="line"
+ x="-367.99722"
+ y="195.3941"
+ style="font-size:7.05556px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan836">DCHUB</tspan></text>
+ </g>
+ <a
+ id="a1538"
+ transform="matrix(0.61866289,0,0,0.61866289,-154.037,-10.146755)">
+ <rect
+ ry="7.3027257e-07"
+ y="180.25093"
+ x="-339.82092"
+ height="25.248676"
+ width="28.609333"
+ id="rect546"
+ style="fill:none;stroke:#000000;stroke-width:0.521332;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-325.67853"
+ y="195.35883"
+ id="text842"><tspan
+ sodipodi:role="line"
+ x="-325.67853"
+ y="195.35883"
+ style="font-size:7.05556px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan840">HUBP</tspan></text>
+ </a>
+ <g
+ id="g1533"
+ transform="matrix(0.61866289,0,0,0.61866289,-154.69251,-10.146755)">
+ <rect
+ ry="7.3027257e-07"
+ y="180.25093"
+ x="-308.59961"
+ height="25.248676"
+ width="28.609333"
+ id="rect844"
+ style="fill:none;stroke:#000000;stroke-width:0.521332;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-294.45721"
+ y="195.3941"
+ id="text848"><tspan
+ sodipodi:role="line"
+ x="-294.45721"
+ y="195.3941"
+ style="font-size:7.05556px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan846">DPP</tspan></text>
+ </g>
+ <g
+ id="g1528"
+ transform="matrix(0.61866289,0,0,0.61866289,-155.67539,-10.146755)">
+ <rect
+ ry="7.3027257e-07"
+ y="180.25093"
+ x="-276.84912"
+ height="25.248676"
+ width="28.609333"
+ id="rect850"
+ style="fill:none;stroke:#000000;stroke-width:0.521332;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-262.77728"
+ y="195.3941"
+ id="text854"><tspan
+ sodipodi:role="line"
+ x="-262.77728"
+ y="195.3941"
+ style="font-size:7.05556px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan852">MPC</tspan></text>
+ </g>
+ <g
+ id="g1523"
+ transform="matrix(0.61866289,0,0,0.61866289,-157.64019,-10.146755)">
+ <rect
+ ry="7.3027257e-07"
+ y="180.25093"
+ x="-243.51147"
+ height="25.248676"
+ width="28.609333"
+ id="rect856"
+ style="fill:none;stroke:#000000;stroke-width:0.521332;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+ x="-229.2068"
+ y="193.25275"
+ id="text860"><tspan
+ sodipodi:role="line"
+ x="-229.2068"
+ y="193.25275"
+ style="font-size:7.05556px;text-align:center;text-anchor:middle;stroke-width:0.264583"
+ id="tspan858">...</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-313.35858"
+ y="133.55629"
+ id="text951"><tspan
+ sodipodi:role="line"
+ x="-313.35858"
+ y="133.55629"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan949">Hardware Registers</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-265.39505"
+ y="86.926537"
+ id="text1044"><tspan
+ sodipodi:role="line"
+ x="-265.39505"
+ y="86.926537"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan1042">DMUB</tspan><tspan
+ sodipodi:role="line"
+ x="-265.39505"
+ y="92.382797"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan1046">Block</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-265.42343"
+ y="43.272846"
+ id="text1052"><tspan
+ sodipodi:role="line"
+ x="-265.42343"
+ y="43.272846"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan1048">DMUB Hardware API</tspan><tspan
+ sodipodi:role="line"
+ x="-265.42343"
+ y="48.729107"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan1050">(dmub/dmub_srv.h)</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-265.40161"
+ y="24.997644"
+ id="text1058"><tspan
+ sodipodi:role="line"
+ x="-265.40161"
+ y="24.997644"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan1056">DMUB Service</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.36501px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.163688"
+ x="-265.30121"
+ y="0.99768418"
+ id="text1064"><tspan
+ sodipodi:role="line"
+ x="-265.30121"
+ y="0.99768418"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan1062">DMUB Service API</tspan><tspan
+ sodipodi:role="line"
+ x="-265.30121"
+ y="6.4539466"
+ style="font-size:4.36501px;text-align:center;text-anchor:middle;stroke-width:0.163688"
+ id="tspan1066">(dc/dc_dmub_srv.h)</tspan></text>
+ </g>
+</svg>
diff --git a/Documentation/gpu/amdgpu/display/dc-debug.rst b/Documentation/gpu/amdgpu/display/dc-debug.rst
index 817631b1dbf3..013f63b271f3 100644
--- a/Documentation/gpu/amdgpu/display/dc-debug.rst
+++ b/Documentation/gpu/amdgpu/display/dc-debug.rst
@@ -2,6 +2,181 @@
Display Core Debug tools
========================
+In this section, you will find helpful information on debugging the amdgpu
+driver from the display perspective. This page introduces debug mechanisms and
+procedures to help you identify if some issues are related to display code.
+
+Narrow down display issues
+==========================
+
+Since the display is the driver's visual component, it is common to see users
+reporting issues as a display when another component causes the problem. This
+section equips users to determine if a specific issue was caused by the display
+component or another part of the driver.
+
+DC dmesg important messages
+---------------------------
+
+The dmesg log is the first source of information to be checked, and amdgpu
+takes advantage of this feature by logging some valuable information. When
+looking for the issues associated with amdgpu, remember that each component of
+the driver (e.g., smu, PSP, dm, etc.) is loaded one by one, and this
+information can be found in the dmesg log. In this sense, look for the part of
+the log that looks like the below log snippet::
+
+ [ 4.254295] [drm] initializing kernel modesetting (IP DISCOVERY 0x1002:0x744C 0x1002:0x0E3B 0xC8).
+ [ 4.254718] [drm] register mmio base: 0xFCB00000
+ [ 4.254918] [drm] register mmio size: 1048576
+ [ 4.260095] [drm] add ip block number 0 <soc21_common>
+ [ 4.260318] [drm] add ip block number 1 <gmc_v11_0>
+ [ 4.260510] [drm] add ip block number 2 <ih_v6_0>
+ [ 4.260696] [drm] add ip block number 3 <psp>
+ [ 4.260878] [drm] add ip block number 4 <smu>
+ [ 4.261057] [drm] add ip block number 5 <dm>
+ [ 4.261231] [drm] add ip block number 6 <gfx_v11_0>
+ [ 4.261402] [drm] add ip block number 7 <sdma_v6_0>
+ [ 4.261568] [drm] add ip block number 8 <vcn_v4_0>
+ [ 4.261729] [drm] add ip block number 9 <jpeg_v4_0>
+ [ 4.261887] [drm] add ip block number 10 <mes_v11_0>
+
+From the above example, you can see the line that reports that `<dm>`,
+(**Display Manager**), was loaded, which means that display can be part of the
+issue. If you do not see that line, something else might have failed before
+amdgpu loads the display component, indicating that we don't have a
+display issue.
+
+After you identified that the DM was loaded correctly, you can check for the
+display version of the hardware in use, which can be retrieved from the dmesg
+log with the command::
+
+ dmesg | grep -i 'display core'
+
+This command shows a message that looks like this::
+
+ [ 4.655828] [drm] Display Core v3.2.285 initialized on DCN 3.2
+
+This message has two key pieces of information:
+
+* **The DC version (e.g., v3.2.285)**: Display developers release a new DC version
+ every week, and this information can be advantageous in a situation where a
+ user/developer must find a good point versus a bad point based on a tested
+ version of the display code. Remember from page :ref:`Display Core <amdgpu-display-core>`,
+ that every week the new patches for display are heavily tested with IGT and
+ manual tests.
+* **The DCN version (e.g., DCN 3.2)**: The DCN block is associated with the
+ hardware generation, and the DCN version conveys the hardware generation that
+ the driver is currently running. This information helps to narrow down the
+ code debug area since each DCN version has its files in the DC folder per DCN
+ component (from the example, the developer might want to focus on
+ files/folders/functions/structs with the dcn32 label might be executed).
+ However, keep in mind that DC reuses code across different DCN versions; for
+ example, it is expected to have some callbacks set in one DCN that are the same
+ as those from another DCN. In summary, use the DCN version just as a guide.
+
+From the dmesg file, it is also possible to get the ATOM bios code by using::
+
+ dmesg | grep -i 'ATOM BIOS'
+
+Which generates an output that looks like this::
+
+ [ 4.274534] amdgpu: ATOM BIOS: 113-D7020100-102
+
+This type of information is useful to be reported.
+
+Avoid loading display core
+--------------------------
+
+Sometimes, it might be hard to figure out which part of the driver is causing
+the issue; if you suspect that the display is not part of the problem and your
+bug scenario is simple (e.g., some desktop configuration) you can try to remove
+the display component from the equation. First, you need to identify `dm` ID
+from the dmesg log; for example, search for the following log::
+
+ [ 4.254295] [drm] initializing kernel modesetting (IP DISCOVERY 0x1002:0x744C 0x1002:0x0E3B 0xC8).
+ [..]
+ [ 4.260095] [drm] add ip block number 0 <soc21_common>
+ [ 4.260318] [drm] add ip block number 1 <gmc_v11_0>
+ [..]
+ [ 4.261057] [drm] add ip block number 5 <dm>
+
+Notice from the above example that the `dm` id is 5 for this specific hardware.
+Next, you need to run the following binary operation to identify the IP block
+mask::
+
+ 0xffffffff & ~(1 << [DM ID])
+
+From our example the IP mask is::
+
+ 0xffffffff & ~(1 << 5) = 0xffffffdf
+
+Finally, to disable DC, you just need to set the below parameter in your
+bootloader::
+
+ amdgpu.ip_block_mask = 0xffffffdf
+
+If you can boot your system with the DC disabled and still see the issue, it
+means you can rule DC out of the equation. However, if the bug disappears, you
+still need to consider the DC part of the problem and keep narrowing down the
+issue. In some scenarios, disabling DC is impossible since it might be
+necessary to use the display component to reproduce the issue (e.g., play a
+game).
+
+**Note: This will probably lead to the absence of a display output.**
+
+Display flickering
+------------------
+
+Display flickering might have multiple causes; one is the lack of proper power
+to the GPU or problems in the DPM switches. A good first generic verification
+is to set the GPU to use high voltage::
+
+ bash -c "echo high > /sys/class/drm/card0/device/power_dpm_force_performance_level"
+
+The above command sets the GPU/APU to use the maximum power allowed which
+disables DPM switches. If forcing DPM levels high does not fix the issue, it
+is less likely that the issue is related to power management. If the issue
+disappears, there is a good chance that other components might be involved, and
+the display should not be ignored since this could be a DPM issues. From the
+display side, if the power increase fixes the issue, it is worth debugging the
+clock configuration and the pipe split police used in the specific
+configuration.
+
+Display artifacts
+-----------------
+
+Users may see some screen artifacts that can be categorized into two different
+types: localized artifacts and general artifacts. The localized artifacts
+happen in some specific areas, such as around the UI window corners; if you see
+this type of issue, there is a considerable chance that you have a userspace
+problem, likely Mesa or similar. The general artifacts usually happen on the
+entire screen. They might be caused by a misconfiguration at the driver level
+of the display parameters, but the userspace might also cause this issue. One
+way to identify the source of the problem is to take a screenshot or make a
+desktop video capture when the problem happens; after checking the
+screenshot/video recording, if you don't see any of the artifacts, it means
+that the issue is likely on the the driver side. If you can still see the
+problem in the data collected, it is an issue that probably happened during
+rendering, and the display code just got the framebuffer already corrupted.
+
+Disabling/Enabling specific features
+====================================
+
+DC has a struct named `dc_debug_options`, which is statically initialized by
+all DCE/DCN components based on the specific hardware characteristic. This
+structure usually facilitates the bring-up phase since developers can start
+with many disabled features and enable them individually. This is also an
+important debug feature since users can change it when debugging specific
+issues.
+
+For example, dGPU users sometimes see a problem where a horizontal fillet of
+flickering happens in some specific part of the screen. This could be an
+indication of Sub-Viewport issues; after the users identified the target DCN,
+they can set the `force_disable_subvp` field to true in the statically
+initialized version of `dc_debug_options` to see if the issue gets fixed. Along
+the same lines, users/developers can also try to turn off `fams2_config` and
+`enable_single_display_2to1_odm_policy`. In summary, the `dc_debug_options` is
+an interesting form for identifying the problem.
+
DC Visual Confirmation
======================
@@ -76,6 +251,18 @@ change in real-time by using something like::
When reporting a bug related to DC, consider attaching this log before and
after you reproduce the bug.
+Collect Firmware information
+============================
+
+When reporting issues, it is important to have the firmware information since
+it can be helpful for debugging purposes. To get all the firmware information,
+use the command::
+
+ cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
+
+From the display perspective, pay attention to the firmware of the DMCU and
+DMCUB.
+
DMUB Firmware Debug
===================
diff --git a/Documentation/gpu/amdgpu/display/dcn-blocks.rst b/Documentation/gpu/amdgpu/display/dcn-blocks.rst
index 5e34366f6dbe..756957128dad 100644
--- a/Documentation/gpu/amdgpu/display/dcn-blocks.rst
+++ b/Documentation/gpu/amdgpu/display/dcn-blocks.rst
@@ -1,3 +1,5 @@
+.. _dcn_blocks:
+
==========
DCN Blocks
==========
diff --git a/Documentation/gpu/amdgpu/display/dcn-overview.rst b/Documentation/gpu/amdgpu/display/dcn-overview.rst
index 9fea6500448b..eb54a6802e04 100644
--- a/Documentation/gpu/amdgpu/display/dcn-overview.rst
+++ b/Documentation/gpu/amdgpu/display/dcn-overview.rst
@@ -1,3 +1,5 @@
+.. _dcn_overview:
+
=======================
Display Core Next (DCN)
=======================
diff --git a/Documentation/gpu/amdgpu/display/index.rst b/Documentation/gpu/amdgpu/display/index.rst
index f0c342e00a39..bd2d797c123e 100644
--- a/Documentation/gpu/amdgpu/display/index.rst
+++ b/Documentation/gpu/amdgpu/display/index.rst
@@ -90,6 +90,7 @@ table of content:
display-manager.rst
dcn-overview.rst
dcn-blocks.rst
+ programming-model-dcn.rst
mpo-overview.rst
dc-debug.rst
display-contributing.rst
diff --git a/Documentation/gpu/amdgpu/display/programming-model-dcn.rst b/Documentation/gpu/amdgpu/display/programming-model-dcn.rst
new file mode 100644
index 000000000000..c1b48d49fb0b
--- /dev/null
+++ b/Documentation/gpu/amdgpu/display/programming-model-dcn.rst
@@ -0,0 +1,162 @@
+====================
+DC Programming Model
+====================
+
+In the :ref:`Display Core Next (DCN) <dcn_overview>` and :ref:`DCN Block
+<dcn_blocks>` pages, you learned about the hardware components and how they
+interact with each other. On this page, the focus is shifted to the display
+code architecture. Hence, it is reasonable to remind the reader that the code
+in DC is shared with other OSes; for this reason, DC provides a set of
+abstractions and operations to connect different APIs with the hardware
+configuration. See DC as a service available for a Display Manager (amdgpu_dm)
+to access and configure DCN/DCE hardware (DCE is also part of DC, but for
+simplicity's sake, this documentation only examines DCN).
+
+.. note::
+ For this page, we will use the term GPU to refers to dGPU and APU.
+
+Overview
+========
+
+From the display hardware perspective, it is plausible to expect that if a
+problem is well-defined, it will probably be implemented at the hardware level.
+On the other hand, when there are multiple ways of achieving something without
+a very well-defined scope, the solution is usually implemented as a policy at
+the DC level. In other words, some policies are defined in the DC core
+(resource management, power optimization, image quality, etc.), and the others
+implemented in hardware are enabled via DC configuration.
+
+In terms of hardware management, DCN has multiple instances of the same block
+(e.g., HUBP, DPP, MPC, etc), and during the driver execution, it might be
+necessary to use some of these instances. The core has policies in place for
+handling those instances. Regarding resource management, the DC objective is
+quite simple: minimize the hardware shuffle when the driver performs some
+actions. When the state changes from A to B, the transition is considered
+easier to maneuver if the hardware resource is still used for the same set of
+driver objects. Usually, adding and removing a resource to a `pipe_ctx` (more
+details below) is not a problem; however, moving a resource from one `pipe_ctx`
+to another should be avoided.
+
+Another area of influence for DC is power optimization, which has a myriad of
+arrangement possibilities. In some way, just displaying an image via DCN should
+be relatively straightforward; however, showing it with the best power
+footprint is more desirable, but it has many associated challenges.
+Unfortunately, there is no straight-forward analytic way to determine if a
+configuration is the best one for the context due to the enormous variety of
+variables related to this problem (e.g., many different DCN/DCE hardware
+versions, different displays configurations, etc.) for this reason DC
+implements a dedicated library for trying some configuration and verify if it
+is possible to support it or not. This type of policy is extremely complex to
+create and maintain, and amdgpu driver relies on Display Mode Library (DML) to
+generate the best decisions.
+
+In summary, DC must deal with the complexity of handling multiple scenarios and
+determine policies to manage them. All of the above information is conveyed to
+give the reader some idea about the complexity of driving a display from the
+driver's perspective. This page hopes to allow the reader to better navigate
+over the amdgpu display code.
+
+Display Driver Architecture Overview
+====================================
+
+The diagram below provides an overview of the display driver architecture;
+notice it illustrates the software layers adopted by DC:
+
+.. kernel-figure:: dc-components.svg
+
+The first layer of the diagram is the high-level DC API represented by the
+`dc.h` file; below it are two big blocks represented by Core and Link. Next is
+the hardware configuration block; the main file describing it is
+the`hw_sequencer.h`, where the implementation of the callbacks can be found in
+the hardware sequencer folder. Almost at the end, you can see the block level
+API (`dc/inc/hw`), which represents each DCN low-level block, such as HUBP,
+DPP, MPC, OPTC, etc. Notice on the left side of the diagram that we have a
+different set of layers representing the interaction with the DMUB
+microcontroller.
+
+Basic Objects
+-------------
+
+The below diagram outlines the basic display objects. In particular, pay
+attention to the names in the boxes since they represent a data structure in
+the driver:
+
+.. kernel-figure:: dc-arch-overview.svg
+
+Let's start with the central block in the image, `dc`. The `dc` struct is
+initialized per GPU; for example, one GPU has one `dc` instance, two GPUs have
+two `dc` instances, and so forth. In other words we have one 'dc' per 'amdgpu'
+instance. In some ways, this object behaves like the `Singleton` pattern.
+
+After the `dc` block in the diagram, you can see the `dc_link` component, which
+is a low-level abstraction for the connector. One interesting aspect of the
+image is that connectors are not part of the DCN block; they are defined by the
+platform/board and not by the SoC. The `dc_link` struct is the high-level data
+container with information such as connected sinks, connection status, signal
+types, etc. After `dc_link`, there is the `dc_sink`, which is the object that
+represents the connected display.
+
+.. note::
+ For historical reasons, we used the name `dc_link`, which gives the
+ wrong impression that this abstraction only deals with physical connections
+ that the developer can easily manipulate. However, this also covers
+ conections like eDP or cases where the output is connected to other devices.
+
+There are two structs that are not represented in the diagram since they were
+elaborated in the DCN overview page (check the DCN block diagram :ref:`Display
+Core Next (DCN) <dcn_overview>`); still, it is worth bringing back for this
+overview which is `dc_stream` and `dc_state`. The `dc_stream` stores many
+properties associated with the data transmission, but most importantly, it
+represents the data flow from the connector to the display. Next we have
+`dc_state`, which represents the logic state within the hardware at the moment;
+`dc_state` is composed of `dc_stream` and `dc_plane`. The `dc_stream` is the DC
+version of `drm_crtc` and represents the post-blending pipeline.
+
+Speaking of the `dc_plane` data structure (first part of the diagram), you can
+think about it as an abstraction similar to `drm_plane` that represents the
+pre-blending portion of the pipeline. This image was probably processed by GFX
+and is ready to be composited under a `dc_stream`. Normally, the driver may
+have one or more `dc_plane` connected to the same `dc_stream`, which defines a
+composition at the DC level.
+
+Basic Operations
+----------------
+
+Now that we have covered the basic objects, it is time to examine some of the
+basic hardware/software operations. Let's start with the `dc_create()`
+function, which directly works with the `dc` data struct; this function behaves
+like a constructor responsible for the basic software initialization and
+preparing for enabling other parts of the API. It is important to highlight
+that this operation does not touch any hardware configuration; it is only a
+software initialization.
+
+Next, we have the `dc_hardware_init()`, which also relies on the `dc` data
+struct. Its main function is to put the hardware in a valid state. It is worth
+highlighting that the hardware might initialize in an unknown state, and it is
+a requirement to put it in a valid state; this function has multiple callbacks
+for the hardware-specific initialization, whereas `dc_hardware_init` does the
+hardware initialization and is the first point where we touch hardware.
+
+The `dc_get_link_at_index` is an operation that depends on the `dc_link` data
+structure. This function retrieves and enumerates all the `dc_links` available
+on the device; this is required since this information is not part of the SoC
+definition but depends on the board configuration. As soon as the `dc_link` is
+initialized, it is useful to figure out if any of them are already connected to
+the display by using the `dc_link_detect()` function. After the driver figures
+out if any display is connected to the device, the challenging phase starts:
+configuring the monitor to show something. Nonetheless, dealing with the ideal
+configuration is not a DC task since this is the Display Manager (`amdgpu_dm`)
+responsibility which in turn is responsible for dealing with the atomic
+commits. The only interface DC provides to the configuration phase is the
+function `dc_validate_with_context` that receives the configuration information
+and, based on that, validates whether the hardware can support it or not. It is
+important to add that even if the display supports some specific configuration,
+it does not mean the DCN hardware can support it.
+
+After the DM and DC agree upon the configuration, the stream configuration
+phase starts. This task activates one or more `dc_stream` at this phase, and in
+the best-case scenario, you might be able to turn the display on with a black
+screen (it does not show anything yet since it does not have any plane
+associated with it). The final step would be to call the
+`dc_update_planes_and_stream,` which will add or remove planes.
+
diff --git a/Documentation/gpu/drm-usage-stats.rst b/Documentation/gpu/drm-usage-stats.rst
index 566e122e6e60..b43cd47f3bb8 100644
--- a/Documentation/gpu/drm-usage-stats.rst
+++ b/Documentation/gpu/drm-usage-stats.rst
@@ -149,7 +149,9 @@ Memory
Each possible memory type which can be used to store buffer objects by the
GPU in question shall be given a stable and unique name to be returned as the
-string here. The name "memory" is reserved to refer to normal system memory.
+string here.
+
+The region name "memory" is reserved to refer to normal system memory.
Value shall reflect the amount of storage currently consumed by the buffer
objects belong to this client, in the respective memory region.
@@ -157,6 +159,9 @@ objects belong to this client, in the respective memory region.
Default unit shall be bytes with optional unit specifiers of 'KiB' or 'MiB'
indicating kibi- or mebi-bytes.
+This key is deprecated and is an alias for drm-resident-<region>. Only one of
+the two should be present in the output.
+
- drm-shared-<region>: <uint> [KiB|MiB]
The total size of buffers that are shared with another file (e.g., have more
@@ -164,20 +169,34 @@ than a single handle).
- drm-total-<region>: <uint> [KiB|MiB]
-The total size of buffers that including shared and private memory.
+The total size of all created buffers including shared and private memory. The
+backing store for the buffers does not have to be currently instantiated to be
+counted under this category.
- drm-resident-<region>: <uint> [KiB|MiB]
-The total size of buffers that are resident in the specified region.
+The total size of buffers that are resident (have their backing store present or
+instantiated) in the specified region.
+
+This is an alias for drm-memory-<region> and only one of the two should be
+present in the output.
- drm-purgeable-<region>: <uint> [KiB|MiB]
The total size of buffers that are purgeable.
+For example drivers which implement a form of 'madvise' like functionality can
+here count buffers which have instantiated backing store, but have been marked
+with an equivalent of MADV_DONTNEED.
+
- drm-active-<region>: <uint> [KiB|MiB]
The total size of buffers that are active on one or more engines.
+One practical example of this can be presence of unsignaled fences in an GEM
+buffer reservation object. Therefore the active category is a subset of
+resident.
+
Implementation Details
======================
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index b0f95a7649bf..3a588fecb0c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -85,16 +85,9 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
-
- if (r) {
- dev_err(adev->dev,
- "suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -246,7 +239,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to get BIF handle\n");
return -EINVAL;
}
- r = cmn_block->version->funcs->resume(adev);
+ r = amdgpu_ip_block_resume(cmn_block);
if (r)
return r;
@@ -282,15 +275,10 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type ==
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- adev->ip_blocks[i].status.hw = true;
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -304,7 +292,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(
- (void *)adev);
+ &adev->ip_blocks[i]);
if (r) {
dev_err(adev->dev,
"late_init of IP block <%s> failed %d after reset\n",
@@ -417,6 +405,7 @@ static struct amdgpu_reset_handler aldebaran_mode2_handler = {
static struct amdgpu_reset_handler
*aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = {
&aldebaran_mode2_handler,
+ &xgmi_reset_on_init_handler,
};
int aldebaran_reset_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9b1e0ede05a4..3af5acff8518 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -131,10 +131,6 @@ struct amdgpu_mgpu_info {
uint32_t num_gpu;
uint32_t num_dgpu;
uint32_t num_apu;
-
- /* delayed reset_func for XGMI configuration if necessary */
- struct delayed_work delayed_reset_work;
- bool pending_reset;
};
enum amdgpu_ss {
@@ -365,8 +361,11 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
-bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
#define AMDGPU_MAX_IP_NUM 16
@@ -389,6 +388,7 @@ struct amdgpu_ip_block_version {
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
+ struct amdgpu_device *adev;
};
int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
@@ -563,6 +563,7 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE2,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
+ AMD_RESET_METHOD_ON_INIT,
};
struct amdgpu_video_codec_info {
@@ -821,6 +822,24 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+/*
+ * Custom Init levels could be defined for different situations where a full
+ * initialization of all hardware blocks are not expected. Sample cases are
+ * custom init sequences after resume after S0i3/S3, reset on initialization,
+ * partial reset of blocks etc. Presently, this defines only two levels. Levels
+ * are described in corresponding struct definitions - amdgpu_init_default,
+ * amdgpu_init_minimal_xgmi.
+ */
+enum amdgpu_init_lvl_id {
+ AMDGPU_INIT_LEVEL_DEFAULT,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+};
+
+struct amdgpu_init_level {
+ enum amdgpu_init_lvl_id level;
+ uint32_t hwini_ip_block_mask;
+};
+
#define AMDGPU_RESET_MAGIC_NUM 64
#define AMDGPU_MAX_DF_PERFMONS 4
struct amdgpu_reset_domain;
@@ -1166,6 +1185,8 @@ struct amdgpu_device {
bool enforce_isolation[MAX_XCP];
/* Added this mutex for cleaner shader isolation between GFX and compute processes */
struct mutex enforce_isolation_mutex;
+
+ struct amdgpu_init_level *init_lvl;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1261,6 +1282,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context);
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
+
int emu_soc_asic_init(struct amdgpu_device *adev);
/*
@@ -1450,23 +1473,15 @@ void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
-bool amdgpu_atpx_dgpu_req_power_for_displays(void);
bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
-static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
-#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void);
-#else
-static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
-#endif
-
/*
* KMS
*/
@@ -1619,4 +1634,6 @@ extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
extern const struct attribute_group amdgpu_flash_attr_group;
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index bf6c4a0d0525..ec5e0dcf8613 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -98,9 +98,9 @@ enum {
ACP_TILE_DSP2,
};
-static int acp_sw_init(void *handle)
+static int acp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->acp.parent = adev->dev;
@@ -112,9 +112,9 @@ static int acp_sw_init(void *handle)
return 0;
}
-static int acp_sw_fini(void *handle)
+static int acp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->acp.cgs_device)
amdgpu_cgs_destroy_device(adev->acp.cgs_device);
@@ -219,10 +219,10 @@ static const struct dmi_system_id acp_quirk_table[] = {
/**
* acp_hw_init - start and test ACP block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_init(void *handle)
+static int acp_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
u64 acp_base;
@@ -230,13 +230,7 @@ static int acp_hw_init(void *handle)
u32 count = 0;
struct i2s_platform_data *i2s_pdata = NULL;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- const struct amdgpu_ip_block *ip_block =
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
-
- if (!ip_block)
- return -EINVAL;
+ struct amdgpu_device *adev = ip_block->adev;
r = amd_acp_hw_init(adev->acp.cgs_device,
ip_block->version->major, ip_block->version->minor);
@@ -503,14 +497,14 @@ failure:
/**
* acp_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_fini(void *handle)
+static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
{
u32 val = 0;
u32 count = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* return early if no ACP */
if (!adev->acp.acp_genpd) {
@@ -565,9 +559,9 @@ static int acp_hw_fini(void *handle)
return 0;
}
-static int acp_suspend(void *handle)
+static int acp_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* power up on suspend */
if (!adev->acp.acp_cell)
@@ -575,9 +569,9 @@ static int acp_suspend(void *handle)
return 0;
}
-static int acp_resume(void *handle)
+static int acp_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* power down again on resume */
if (!adev->acp.acp_cell)
@@ -585,26 +579,11 @@ static int acp_resume(void *handle)
return 0;
}
-static int acp_early_init(void *handle)
-{
- return 0;
-}
-
static bool acp_is_idle(void *handle)
{
return true;
}
-static int acp_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int acp_soft_reset(void *handle)
-{
- return 0;
-}
-
static int acp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -624,8 +603,6 @@ static int acp_set_powergating_state(void *handle,
static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
- .early_init = acp_early_init,
- .late_init = NULL,
.sw_init = acp_sw_init,
.sw_fini = acp_sw_fini,
.hw_init = acp_hw_init,
@@ -633,12 +610,8 @@ static const struct amd_ip_funcs acp_ip_funcs = {
.suspend = acp_suspend,
.resume = acp_resume,
.is_idle = acp_is_idle,
- .wait_for_idle = acp_wait_for_idle,
- .soft_reset = acp_soft_reset,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version acp_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f85ace0384d2..cce85389427f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
struct acpi_buffer *params)
{
acpi_status status;
+ union acpi_object *obj;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
&buffer);
+ obj = (union acpi_object *)buffer.pointer;
- /* Fail only if calling the method fails and ATIF is supported */
+ /* Fail if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
- kfree(buffer.pointer);
+ kfree(obj);
return NULL;
}
- return buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
+ obj->type);
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
}
/**
@@ -791,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
return -EIO;
}
+ kfree(info);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 4f08b153cb66..b545940e512b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -889,3 +889,18 @@ int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
return kgd2kfd_start_sched(adev->kfd.dev, node_id);
}
+
+/* Config CGTT_SQ_CLK_CTRL */
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable)
+{
+ int r;
+
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable,
+ reg_override_enable, perfmon_override_enable);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index f9d119448442..7e0a22072536 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -266,6 +266,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst);
int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 3bc0cbf45bc5..cc66ebb7bae1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -944,9 +944,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
*
* @adev: Handle of device whose registers are to be read
* @queue_idx: Index of queue in the queue-map bit-field
- * @wave_cnt: Output parameter updated with number of waves in flight
- * @vmid: Output parameter updated with VMID of queue whose wave count
- * is being collected
+ * @queue_cnt: Stores the wave count and doorbell offset for an active queue
* @inst: xcc's instance number on a multi-XCC setup
*/
static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
@@ -1133,10 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
- if (!adev->debug_exp_resets &&
- !adev->gfx.num_gfx_rings)
- return 0;
-
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ce5ca304dba9..f30548f4c3b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
list_add_tail(&vm->vm_list_node,
&(vm->process_info->vm_list_head));
vm->process_info->n_vms++;
-
- *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
+ if (ef)
+ *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
mutex_unlock(&vm->process_info->lock);
return 0;
@@ -2524,11 +2524,14 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
/* First eviction, stop the queues */
r = kgd2kfd_quiesce_mm(mni->mm,
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
- if (r)
+
+ if (r && r != -ESRCH)
pr_err("Failed to quiesce KFD\n");
- queue_delayed_work(system_freezable_wq,
- &process_info->restore_userptr_work,
- msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+
+ if (r != -ESRCH)
+ queue_delayed_work(system_freezable_wq,
+ &process_info->restore_userptr_work,
+ msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
}
mutex_unlock(&process_info->notifier_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 0c8975ac5af9..093141ad6ed0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1145,8 +1145,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock)
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock)
{
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
@@ -1161,8 +1161,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ return amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 0811474e8fd3..0e16432d9a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,8 +163,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock);
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock);
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 375f02002579..3893e6fc2f03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -89,18 +89,6 @@ bool amdgpu_is_atpx_hybrid(void)
return amdgpu_atpx_priv.atpx.is_hybrid;
}
-bool amdgpu_atpx_dgpu_req_power_for_displays(void)
-{
- return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
-}
-
-#if defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void)
-{
- return amdgpu_atpx_priv.dhandle;
-}
-#endif
-
/**
* amdgpu_atpx_call - call an ATPX method
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1e475eb01417..d891ab779ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
/* Only a single BO list is allowed to simplify handling. */
if (p->bo_list)
- ret = -EINVAL;
+ goto free_partial_kdata;
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index cbef720de779..37d8657f0776 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2095,6 +2095,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog)
amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
+ amdgpu_debugfs_jpeg_sched_mask_init(adev);
+
amdgpu_ras_debugfs_create_all(adev);
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 5ac59b62020c..946c48829f19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -203,6 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
struct amdgpu_coredump_info *coredump = data;
struct drm_print_iterator iter;
struct amdgpu_vm_fault_info *fault_info;
+ struct amdgpu_ip_block *ip_block;
int ver;
iter.data = buffer;
@@ -282,13 +283,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
/* dump the ip state for each ip */
drm_printf(&p, "IP Dump\n");
for (int i = 0; i < coredump->adev->num_ip_blocks; i++) {
- if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) {
- drm_printf(&p, "IP: %s\n",
- coredump->adev->ip_blocks[i]
- .version->funcs->name);
- coredump->adev->ip_blocks[i]
- .version->funcs->print_ip_state(
- (void *)coredump->adev, &p);
+ ip_block = &coredump->adev->ip_blocks[i];
+ if (ip_block->version->funcs->print_ip_state) {
+ drm_printf(&p, "IP: %s\n", ip_block->version->funcs->name);
+ ip_block->version->funcs->print_ip_state(ip_block, &p);
drm_printf(&p, "\n");
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 128e086b4251..be5fb87ed5cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -145,6 +145,51 @@ const char *amdgpu_asic_name[] = {
"LAST",
};
+#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM - 1, 0)
+/*
+ * Default init level where all blocks are expected to be initialized. This is
+ * the level of initialization expected by default and also after a full reset
+ * of the device.
+ */
+struct amdgpu_init_level amdgpu_init_default = {
+ .level = AMDGPU_INIT_LEVEL_DEFAULT,
+ .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
+};
+
+/*
+ * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
+ * is used for cases like reset on initialization where the entire hive needs to
+ * be reset before first use.
+ */
+struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
+ .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+ .hwini_ip_block_mask =
+ BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
+ BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
+ BIT(AMD_IP_BLOCK_TYPE_PSP)
+};
+
+static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
+ enum amd_ip_block_type block)
+{
+ return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
+}
+
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl)
+{
+ switch (lvl) {
+ case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
+ adev->init_lvl = &amdgpu_init_minimal_xgmi;
+ break;
+ case AMDGPU_INIT_LEVEL_DEFAULT:
+ fallthrough;
+ default:
+ adev->init_lvl = &amdgpu_init_default;
+ break;
+ }
+}
+
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
/**
@@ -228,6 +273,42 @@ void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
}
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->suspend) {
+ r = ip_block->version->funcs->suspend(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = false;
+ return 0;
+}
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->resume) {
+ r = ip_block->version->funcs->resume(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = true;
+ return 0;
+}
+
/**
* DOC: board_info
*
@@ -1656,7 +1737,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
}
/* Don't post if we need to reset whole hive on init */
- if (adev->gmc.xgmi.pending_reset)
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
return false;
if (adev->has_hw_reset) {
@@ -2160,9 +2241,12 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
- if (r)
- return r;
+ if (adev->ip_blocks[i].version->funcs->wait_for_idle) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle(
+ &adev->ip_blocks[i]);
+ if (r)
+ return r;
+ }
break;
}
}
@@ -2171,26 +2255,24 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_idle - is the hardware IP idle
+ * amdgpu_device_ip_is_valid - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
- * Check if the hardware IP is idle or not.
- * Returns true if it the IP is idle, false if not.
+ * Check if the hardware IP is enable or not.
+ * Returns true if it the IP is enable, false if not.
*/
-bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
if (adev->ip_blocks[i].version->type == block_type)
- return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
+ return adev->ip_blocks[i].status.valid;
}
- return true;
+ return false;
}
@@ -2272,6 +2354,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
ip_block_version->funcs->name);
+ adev->ip_blocks[adev->num_ip_blocks].adev = adev;
+
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
return 0;
@@ -2567,25 +2651,25 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ ip_block = &adev->ip_blocks[i];
+
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_WARN("disabled ip block: %d <%s>\n",
i, adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
- } else {
- if (adev->ip_blocks[i].version->funcs->early_init) {
- r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
- if (r == -ENOENT) {
- adev->ip_blocks[i].status.valid = false;
- } else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- total = false;
- } else {
- adev->ip_blocks[i].status.valid = true;
- }
+ } else if (ip_block->version->funcs->early_init) {
+ r = ip_block->version->funcs->early_init(ip_block);
+ if (r == -ENOENT) {
+ adev->ip_blocks[i].status.valid = false;
+ } else if (r) {
+ DRM_ERROR("early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ total = false;
} else {
adev->ip_blocks[i].status.valid = true;
}
+ } else {
+ adev->ip_blocks[i].status.valid = true;
}
/* get the vbios after the asic_funcs are set up */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
@@ -2634,10 +2718,13 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
(amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2659,7 +2746,10 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2682,6 +2772,10 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
continue;
+ if (!amdgpu_ip_member_of_hwini(adev,
+ AMD_IP_BLOCK_TYPE_PSP))
+ break;
+
if (!adev->ip_blocks[i].status.sw)
continue;
@@ -2690,22 +2784,18 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
break;
if (amdgpu_in_reset(adev) || adev->in_suspend) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
} else {
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
+ adev->ip_blocks[i].status.hw = true;
}
-
- adev->ip_blocks[i].status.hw = true;
break;
}
}
@@ -2787,6 +2877,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
*/
static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
+ bool init_badpage;
int i, r;
r = amdgpu_ras_init(adev);
@@ -2796,17 +2887,23 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
- if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- goto init_failed;
+ if (adev->ip_blocks[i].version->funcs->sw_init) {
+ r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
+ if (r) {
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ goto init_failed;
+ }
}
adev->ip_blocks[i].status.sw = true;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
/* need to do common hw init early so everything is set up for gmc */
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
goto init_failed;
@@ -2823,7 +2920,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
goto init_failed;
}
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
goto init_failed;
@@ -2896,7 +2993,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
* Note: theoretically, this should be called before all vram allocations
* to protect retired page from abusing
*/
- r = amdgpu_ras_recovery_init(adev);
+ init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
+ r = amdgpu_ras_recovery_init(adev, init_badpage);
if (r)
goto init_failed;
@@ -2936,7 +3034,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
/* Don't init kfd if whole hive need to be reset during init */
- if (!adev->gmc.xgmi.pending_reset) {
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
}
@@ -3136,7 +3234,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->funcs->late_init) {
- r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("late_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -3207,6 +3305,25 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return 0;
}
+static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (!ip_block->version->funcs->hw_fini) {
+ DRM_ERROR("hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
+ } else {
+ r = ip_block->version->funcs->hw_fini(ip_block);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ }
+ }
+
+ ip_block->status.hw = false;
+}
+
/**
* amdgpu_device_smu_fini_early - smu hw_fini wrapper
*
@@ -3216,7 +3333,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
*/
static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
{
- int i, r;
+ int i;
if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
return;
@@ -3225,13 +3342,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- adev->ip_blocks[i].status.hw = false;
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
break;
}
}
@@ -3245,7 +3356,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].version->funcs->early_fini)
continue;
- r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -3264,14 +3375,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
-
- adev->ip_blocks[i].status.hw = false;
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
}
if (amdgpu_sriov_vf(adev)) {
@@ -3317,12 +3421,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ib_pool_fini(adev);
amdgpu_seq64_fini(adev);
}
-
- r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ if (adev->ip_blocks[i].version->funcs->sw_fini) {
+ r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
}
adev->ip_blocks[i].status.sw = false;
adev->ip_blocks[i].status.valid = false;
@@ -3332,7 +3437,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.late_initialized)
continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
- adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.late_initialized = false;
}
@@ -3404,15 +3509,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
continue;
/* XXX handle errors */
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
- /* XXX handle errors */
- if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -3450,14 +3549,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
/* skip unnecessary suspend if we do not initialize them yet */
- if (adev->gmc.xgmi.pending_reset &&
- !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
- adev->ip_blocks[i].status.hw = false;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
continue;
- }
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
@@ -3491,13 +3585,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
continue;
/* XXX handle errors */
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
- /* XXX handle errors */
- if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.hw = false;
+
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
@@ -3571,7 +3661,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
!block->status.valid)
continue;
- r = block->version->funcs->hw_init(adev);
+ r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
@@ -3610,15 +3700,19 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
block->status.hw)
continue;
- if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
- r = block->version->funcs->resume(adev);
- else
- r = block->version->funcs->hw_init(adev);
-
- DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
- if (r)
- return r;
- block->status.hw = true;
+ if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
+ } else {
+ r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
+ if (r) {
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ block->status.hw = true;
+ }
}
}
@@ -3649,13 +3743,9 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = true;
}
}
@@ -3687,13 +3777,9 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -4194,6 +4280,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_set_mcbp(adev);
+ /*
+ * By default, use default mode where all blocks are expected to be
+ * initialized. At present a 'swinit' of blocks is required to be
+ * completed before the need for a different level is detected.
+ */
+ amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
@@ -4266,20 +4358,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
if (adev->gmc.xgmi.num_physical_nodes) {
dev_info(adev->dev, "Pending hive reset.\n");
- adev->gmc.xgmi.pending_reset = true;
- /* Only need to init necessary block for SMU to handle the reset */
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
- if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
- DRM_DEBUG("IP %s disabled for hw_init.\n",
- adev->ip_blocks[i].version->funcs->name);
- adev->ip_blocks[i].status.hw = true;
- }
- }
+ amdgpu_set_init_level(adev,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
!amdgpu_device_has_display_hardware(adev)) {
r = psp_gpu_reset(adev);
@@ -4387,7 +4467,7 @@ fence_driver_init:
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- if (!adev->gmc.xgmi.pending_reset) {
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
r = amdgpu_device_ip_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
@@ -4437,6 +4517,7 @@ fence_driver_init:
amdgpu_fru_sysfs_init(adev);
amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_cfg_sysfs_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4464,9 +4545,8 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->gmc.xgmi.pending_reset)
- queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
- msecs_to_jiffies(AMDGPU_RESUME_MS));
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
amdgpu_device_check_iommu_direct_map(adev);
@@ -4560,6 +4640,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_cfg_sysfs_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4695,7 +4776,7 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
continue;
- r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
goto unprepare;
}
@@ -4899,7 +4980,8 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
adev->ip_blocks[i].status.hang =
- adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ adev->ip_blocks[i].version->funcs->check_soft_reset(
+ &adev->ip_blocks[i]);
if (adev->ip_blocks[i].status.hang) {
dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
@@ -4928,7 +5010,7 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -4990,7 +5072,7 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->soft_reset) {
- r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -5019,7 +5101,7 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->post_soft_reset)
- r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -5310,7 +5392,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < tmp_adev->num_ip_blocks; i++)
if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
tmp_adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)tmp_adev);
+ ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
}
@@ -5326,74 +5408,25 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-int amdgpu_do_asic_reset(struct list_head *device_list_handle,
- struct amdgpu_reset_context *reset_context)
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
{
- struct amdgpu_device *tmp_adev = NULL;
- bool need_full_reset, skip_hw_reset, vram_lost = false;
- int r = 0;
-
- /* Try reset handler method first */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
-
- reset_context->reset_device_list = device_list_handle;
- r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
- /* If reset handler not implemented, continue; otherwise return */
- if (r == -EOPNOTSUPP)
- r = 0;
- else
- return r;
-
- /* Reset handler not implemented, use the default method */
- need_full_reset =
- test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
-
- /*
- * ASIC reset has to be done on all XGMI hive nodes ASAP
- * to allow proper links negotiation in FW (within 1 sec)
- */
- if (!skip_hw_reset && need_full_reset) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- /* For XGMI run all resets in parallel to speed up the process */
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- tmp_adev->gmc.xgmi.pending_reset = false;
- if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
- r = -EALREADY;
- } else
- r = amdgpu_asic_reset(tmp_adev);
-
- if (r) {
- dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
- r, adev_to_drm(tmp_adev)->unique);
- goto out;
- }
- }
+ struct list_head *device_list_handle;
+ bool full_reset, vram_lost = false;
+ struct amdgpu_device *tmp_adev;
+ int r;
- /* For XGMI wait for all resets to complete before proceed */
- if (!r) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- flush_work(&tmp_adev->xgmi_reset_work);
- r = tmp_adev->asic_reset_res;
- if (r)
- break;
- }
- }
- }
- }
+ device_list_handle = reset_context->reset_device_list;
- if (!r && amdgpu_ras_intr_triggered()) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
- }
+ if (!device_list_handle)
+ return -EINVAL;
- amdgpu_ras_intr_cleared();
- }
+ full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ r = 0;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (need_full_reset) {
+ /* After reset, it's default init level */
+ amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_DEFAULT);
+ if (full_reset) {
/* post card */
amdgpu_ras_set_fed(tmp_adev, false);
r = amdgpu_device_asic_init(tmp_adev);
@@ -5483,7 +5516,6 @@ out:
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
- need_full_reset = true;
r = -EAGAIN;
goto end;
}
@@ -5494,10 +5526,85 @@ out:
}
end:
- if (need_full_reset)
+ return r;
+}
+
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset, skip_hw_reset;
+ int r = 0;
+
+ /* Try reset handler method first */
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+
+ reset_context->reset_device_list = device_list_handle;
+ r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -EOPNOTSUPP)
+ r = 0;
+ else
+ return r;
+
+ /* Reset handler not implemented, use the default method */
+ need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
+ /*
+ * ASIC reset has to be done on all XGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (!skip_hw_reset && need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_unbound_wq,
+ &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
+ goto out;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ reset_list) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ amdgpu_ras_reset_error_count(tmp_adev,
+ AMDGPU_RAS_BLOCK__MMHUB);
+ }
+
+ amdgpu_ras_intr_cleared();
+ }
+
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r == -EAGAIN)
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
else
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
+out:
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 4bd61c169ca8..73f4d56c5de4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1723,37 +1723,75 @@ union nps_info {
struct nps_info_v1_0 v1;
};
+static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
+ union nps_info *nps_data)
+{
+ uint64_t vram_size, pos, offset;
+ struct nps_info_header *nhdr;
+ struct binary_header bhdr;
+ uint16_t checksum;
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
+
+ offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+
+ amdgpu_device_vram_access(adev, (pos + offset), nps_data,
+ sizeof(*nps_data), false);
+
+ nhdr = (struct nps_info_header *)(nps_data);
+ if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
+ le32_to_cpu(nhdr->size_bytes),
+ checksum)) {
+ dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
- int *range_cnt)
+ int *range_cnt, bool refresh)
{
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
+ union nps_info nps_data;
u16 offset;
- int i;
+ int i, r;
if (!nps_type || !range_cnt || !ranges)
return -EINVAL;
- if (!adev->mman.discovery_bin) {
- dev_err(adev->dev,
- "fetch mem range failed, ip discovery uninitialized\n");
- return -EINVAL;
- }
+ if (refresh) {
+ r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
+ if (r)
+ return r;
+ nps_info = &nps_data;
+ } else {
+ if (!adev->mman.discovery_bin) {
+ dev_err(adev->dev,
+ "fetch mem range failed, ip discovery uninitialized\n");
+ return -EINVAL;
+ }
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
- if (!offset)
- return -ENOENT;
+ if (!offset)
+ return -ENOENT;
- /* If verification fails, return as if NPS table doesn't exist */
- if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
- return -ENOENT;
+ /* If verification fails, return as if NPS table doesn't exist */
+ if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
+ return -ENOENT;
- nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info =
+ (union nps_info *)(adev->mman.discovery_bin + offset);
+ }
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
case 1:
@@ -2492,6 +2530,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
} else {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
@@ -2508,6 +2547,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
}
break;
case CHIP_VEGA20:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index f5d36525ec3e..b44d56465c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -33,6 +33,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
- int *range_cnt);
+ int *range_cnt, bool refresh);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7273c98c3963..6ac7d335e28e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -232,8 +232,6 @@ int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
-static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
-
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
"DRM_UT_DRIVER",
@@ -248,9 +246,6 @@ DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
- .delayed_reset_work = __DELAYED_WORK_INITIALIZER(
- mgpu_info.delayed_reset_work,
- amdgpu_drv_delayed_reset_work_handler, 0),
};
int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff;
@@ -2439,6 +2434,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct amdgpu_device *adev = drm_to_adev(dev);
amdgpu_xcp_dev_unplug(adev);
+ amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
@@ -2477,82 +2473,6 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
adev->mp1_state = PP_MP1_STATE_NONE;
}
-/**
- * amdgpu_drv_delayed_reset_work_handler - work handler for reset
- *
- * @work: work_struct.
- */
-static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
-{
- struct list_head device_list;
- struct amdgpu_device *adev;
- int i, r;
- struct amdgpu_reset_context reset_context;
-
- memset(&reset_context, 0, sizeof(reset_context));
-
- mutex_lock(&mgpu_info.mutex);
- if (mgpu_info.pending_reset == true) {
- mutex_unlock(&mgpu_info.mutex);
- return;
- }
- mgpu_info.pending_reset = true;
- mutex_unlock(&mgpu_info.mutex);
-
- /* Use a common context, just need to make sure full reset is done */
- reset_context.method = AMD_RESET_METHOD_NONE;
- set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- reset_context.reset_req_dev = adev;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- if (r) {
- dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, adev_to_drm(adev)->unique);
- }
- if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work))
- r = -EALREADY;
- }
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- flush_work(&adev->xgmi_reset_work);
- adev->gmc.xgmi.pending_reset = false;
- }
-
- /* reset function will rebuild the xgmi hive info , clear it now */
- for (i = 0; i < mgpu_info.num_dgpu; i++)
- amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev);
-
- INIT_LIST_HEAD(&device_list);
-
- for (i = 0; i < mgpu_info.num_dgpu; i++)
- list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list);
-
- /* unregister the GPU first, reset function will add them back */
- list_for_each_entry(adev, &device_list, reset_list)
- amdgpu_unregister_gpu_instance(adev);
-
- /* Use a common context, just need to make sure full reset is done */
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
-
- if (r) {
- DRM_ERROR("reinit gpus failure");
- return;
- }
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete) {
- kgd2kfd_init_zone_device(adev);
- amdgpu_amdkfd_device_init(adev);
- amdgpu_amdkfd_drm_client_create(adev);
- }
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
- }
-}
-
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
@@ -3075,6 +2995,12 @@ static int __init amdgpu_init(void)
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
+ if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) {
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ pr_crit("Overdrive is enabled, please disable it before "
+ "reporting any bugs unrelated to overdrive.\n");
+ }
+
/* let modprobe override vga console setting */
return pci_register_driver(&amdgpu_kms_pci_driver);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index c7df7fa3459f..00a4ab082459 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -59,18 +59,21 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_mem_stats stats;
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { };
ktime_t usage[AMDGPU_HW_IP_NUM];
- unsigned int hw_ip;
+ const char *pl_name[] = {
+ [TTM_PL_VRAM] = "vram",
+ [TTM_PL_TT] = "gtt",
+ [TTM_PL_SYSTEM] = "cpu",
+ };
+ unsigned int hw_ip, i;
int ret;
- memset(&stats, 0, sizeof(stats));
-
ret = amdgpu_bo_reserve(vm->root.bo, false);
if (ret)
return;
- amdgpu_vm_get_memory(vm, &stats);
+ amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats));
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
@@ -82,24 +85,35 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
*/
drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
- drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
- drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
- drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
+
+ for (i = 0; i < TTM_PL_PRIV; i++)
+ drm_print_memory_stats(p,
+ &stats[i].drm,
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ pl_name[i]);
+
+ /* Legacy amdgpu keys, alias to drm-resident-memory-: */
+ drm_printf(p, "drm-memory-vram:\t%llu KiB\n",
+ stats[TTM_PL_VRAM].total/1024UL);
+ drm_printf(p, "drm-memory-gtt: \t%llu KiB\n",
+ stats[TTM_PL_TT].total/1024UL);
+ drm_printf(p, "drm-memory-cpu: \t%llu KiB\n",
+ stats[TTM_PL_SYSTEM].total/1024UL);
+
+ /* Amdgpu specific memory accounting keys: */
drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n",
- stats.visible_vram/1024UL);
+ stats[TTM_PL_VRAM].visible/1024UL);
drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
- stats.evicted_vram/1024UL);
+ stats[TTM_PL_VRAM].evicted/1024UL);
drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n",
- stats.evicted_visible_vram/1024UL);
+ stats[TTM_PL_VRAM].evicted_visible/1024UL);
drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
- stats.requested_vram/1024UL);
+ stats[TTM_PL_VRAM].requested/1024UL);
drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n",
- stats.requested_visible_vram/1024UL);
+ stats[TTM_PL_VRAM].requested_visible/1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
- stats.requested_gtt/1024UL);
- drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
- drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
- drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
+ stats[TTM_PL_TT].requested/1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 83e54697f0ee..e96984c53e72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -87,16 +87,6 @@ int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
return bit;
}
-void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
- int *me, int *pipe, int *queue)
-{
- *queue = bit % adev->gfx.me.num_queue_per_pipe;
- *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
- *me = (bit / adev->gfx.me.num_queue_per_pipe)
- / adev->gfx.me.num_pipe_per_me;
-}
-
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
int me, int pipe, int queue)
{
@@ -415,7 +405,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
}
/* prepare MQD backup */
- kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
+ kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
if (!kiq->mqd_backup) {
dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n", ring->name);
@@ -438,7 +428,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
+ adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.me.mqd_backup[i]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
return -ENOMEM;
@@ -462,7 +452,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
+ adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[j]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
return -ENOMEM;
@@ -1363,35 +1353,35 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return count;
}
+static const char *xcp_desc[] = {
+ [AMDGPU_SPX_PARTITION_MODE] = "SPX",
+ [AMDGPU_DPX_PARTITION_MODE] = "DPX",
+ [AMDGPU_TPX_PARTITION_MODE] = "TPX",
+ [AMDGPU_QPX_PARTITION_MODE] = "QPX",
+ [AMDGPU_CPX_PARTITION_MODE] = "CPX",
+};
+
static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
struct device_attribute *addr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- char *supported_partition;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ int size = 0, mode;
+ char *sep = "";
- /* TBD */
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- supported_partition = "SPX, DPX, QPX, CPX";
- break;
- case 6:
- supported_partition = "SPX, TPX, CPX";
- break;
- case 4:
- supported_partition = "SPX, DPX, CPX";
- break;
- /* this seems only existing in emulation phase */
- case 2:
- supported_partition = "SPX, CPX";
- break;
- default:
- supported_partition = "Not supported";
- break;
+ if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
+ sep = ", ";
}
- return sysfs_emit(buf, "%s\n", supported_partition);
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
}
static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
@@ -1614,32 +1604,55 @@ static DEVICE_ATTR(available_compute_partition, 0444,
int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
int r;
+ if (!xcp_mgr)
+ return 0;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
+
+ if (!xcp_switch_supported)
+ dev_attr_current_compute_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+
r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
if (r)
return r;
- r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
+ if (xcp_switch_supported)
+ r = device_create_file(adev->dev,
+ &dev_attr_available_compute_partition);
return r;
}
void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
+
+ if (!xcp_mgr)
+ return;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
- device_remove_file(adev->dev, &dev_attr_available_compute_partition);
+
+ if (xcp_switch_supported)
+ device_remove_file(adev->dev,
+ &dev_attr_available_compute_partition);
}
int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_sriov_vf(adev)) {
- r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
- if (r)
- return r;
- }
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
if (r)
@@ -1650,8 +1663,7 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
- device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 5644e10a86a9..f710178a21bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -540,8 +540,6 @@ bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
-void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
- int *me, int *pipe, int *queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 17a19d49d30a..1c19a65e6553 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1065,18 +1065,6 @@ uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
}
-/**
- * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
- * from CPU's view
- *
- * @adev: amdgpu_device pointer
- * @bo: amdgpu buffer object
- */
-uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
-{
- return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
-}
-
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
{
struct amdgpu_bo *vram_bo = NULL;
@@ -1130,6 +1118,79 @@ release_buffer:
return ret;
}
+static const char *nps_desc[] = {
+ [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
+ [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
+ [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
+ [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
+ [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
+ [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
+};
+
+static ssize_t available_memory_partition_show(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int size = 0, mode;
+ char *sep = "";
+
+ for_each_inst(mode, adev->gmc.supported_nps_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
+ sep = ", ";
+ }
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t current_memory_partition_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_memory_partition mode;
+ struct amdgpu_hive_info *hive;
+ int i;
+
+ mode = UNKNOWN_MEMORY_PARTITION_MODE;
+ for_each_inst(i, adev->gmc.supported_nps_modes) {
+ if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) {
+ mode = i;
+ break;
+ }
+ }
+
+ if (mode == UNKNOWN_MEMORY_PARTITION_MODE)
+ return -EINVAL;
+
+ if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) {
+ dev_info(
+ adev->dev,
+ "requested NPS mode is same as current NPS mode, skipping\n");
+ return count;
+ }
+
+ /* If device is part of hive, all devices in the hive should request the
+ * same mode. Hence store the requested mode in hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ atomic_set(&hive->requested_nps_mode, mode);
+ amdgpu_put_xgmi_hive(hive);
+ } else {
+ adev->gmc.requested_nps_mode = mode;
+ }
+
+ dev_info(
+ adev->dev,
+ "NPS mode change requested, please remove and reload the driver\n");
+
+ return count;
+}
+
static ssize_t current_memory_partition_show(
struct device *dev, struct device_attribute *addr, char *buf)
{
@@ -1138,53 +1199,65 @@ static ssize_t current_memory_partition_show(
enum amdgpu_memory_partition mode;
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- switch (mode) {
- case AMDGPU_NPS1_PARTITION_MODE:
- return sysfs_emit(buf, "NPS1\n");
- case AMDGPU_NPS2_PARTITION_MODE:
- return sysfs_emit(buf, "NPS2\n");
- case AMDGPU_NPS3_PARTITION_MODE:
- return sysfs_emit(buf, "NPS3\n");
- case AMDGPU_NPS4_PARTITION_MODE:
- return sysfs_emit(buf, "NPS4\n");
- case AMDGPU_NPS6_PARTITION_MODE:
- return sysfs_emit(buf, "NPS6\n");
- case AMDGPU_NPS8_PARTITION_MODE:
- return sysfs_emit(buf, "NPS8\n");
- default:
+ if ((mode >= ARRAY_SIZE(nps_desc)) ||
+ (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
return sysfs_emit(buf, "UNKNOWN\n");
- }
+
+ return sysfs_emit(buf, "%s\n", nps_desc[mode]);
}
-static DEVICE_ATTR_RO(current_memory_partition);
+static DEVICE_ATTR_RW(current_memory_partition);
+static DEVICE_ATTR_RO(available_memory_partition);
int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev)
{
+ bool nps_switch_support;
+ int r = 0;
+
if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
return 0;
+ nps_switch_support = (hweight32(adev->gmc.supported_nps_modes &
+ AMDGPU_ALL_NPS_MASK) > 1);
+ if (!nps_switch_support)
+ dev_attr_current_memory_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+ else
+ r = device_create_file(adev->dev,
+ &dev_attr_available_memory_partition);
+
+ if (r)
+ return r;
+
return device_create_file(adev->dev,
&dev_attr_current_memory_partition);
}
void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev)
{
+ if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
+ return;
+
device_remove_file(adev->dev, &dev_attr_current_memory_partition);
+ device_remove_file(adev->dev, &dev_attr_available_memory_partition);
}
int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
- int exp_ranges)
+ uint8_t *exp_ranges)
{
struct amdgpu_gmc_memrange *ranges;
int range_cnt, ret, i, j;
uint32_t nps_type;
+ bool refresh;
- if (!mem_ranges)
+ if (!mem_ranges || !exp_ranges)
return -EINVAL;
+ refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
+ (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
- &range_cnt);
+ &range_cnt, refresh);
if (ret)
return ret;
@@ -1192,16 +1265,16 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
/* TODO: For now, expect ranges and partition count to be the same.
* Adjust if there are holes expected in any NPS domain.
*/
- if (range_cnt != exp_ranges) {
+ if (*exp_ranges && (range_cnt != *exp_ranges)) {
dev_warn(
adev->dev,
"NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d",
- exp_ranges, nps_type, range_cnt);
+ *exp_ranges, nps_type, range_cnt);
ret = -EINVAL;
goto err;
}
- for (i = 0; i < exp_ranges; ++i) {
+ for (i = 0; i < range_cnt; ++i) {
if (ranges[i].base_address >= ranges[i].limit_address) {
dev_warn(
adev->dev,
@@ -1242,8 +1315,81 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
ranges[i].limit_address - ranges[i].base_address + 1;
}
+ if (!*exp_ranges)
+ *exp_ranges = range_cnt;
err:
kfree(ranges);
return ret;
}
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode)
+{
+ /* Not supported on VF devices and APUs */
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return -EOPNOTSUPP;
+
+ if (!adev->psp.funcs) {
+ dev_err(adev->dev,
+ "PSP interface not available for nps mode change request");
+ return -EINVAL;
+ }
+
+ return psp_memory_partition(&adev->psp, nps_mode);
+}
+
+static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev,
+ int req_nps_mode,
+ int cur_nps_mode)
+{
+ return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) ==
+ BIT(req_nps_mode)) &&
+ req_nps_mode != cur_nps_mode);
+}
+
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev)
+{
+ int req_nps_mode, cur_nps_mode, r;
+ struct amdgpu_hive_info *hive;
+
+ if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes ||
+ !adev->gmc.gmc_funcs->request_mem_partition_mode)
+ return;
+
+ cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ req_nps_mode = atomic_read(&hive->requested_nps_mode);
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode,
+ cur_nps_mode)) {
+ amdgpu_put_xgmi_hive(hive);
+ return;
+ }
+ r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode);
+ amdgpu_put_xgmi_hive(hive);
+ goto out;
+ }
+
+ req_nps_mode = adev->gmc.requested_nps_mode;
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode))
+ return;
+
+ /* even if this fails, we should let driver unload w/o blocking */
+ r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode);
+out:
+ if (r)
+ dev_err(adev->dev, "NPS mode change request failed\n");
+ else
+ dev_info(
+ adev->dev,
+ "NPS mode change request done, reload driver to complete the change\n");
+}
+
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
+{
+ if (adev->gmc.gmc_funcs->need_reset_on_init)
+ return adev->gmc.gmc_funcs->need_reset_on_init(adev);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 4d951a1baefa..459a30fe239f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -73,6 +73,13 @@ enum amdgpu_memory_partition {
AMDGPU_NPS8_PARTITION_MODE = 8,
};
+#define AMDGPU_ALL_NPS_MASK \
+ (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE))
+
+#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+
/*
* GMC page fault information
*/
@@ -161,6 +168,10 @@ struct amdgpu_gmc_funcs {
enum amdgpu_memory_partition (*query_mem_partition_mode)(
struct amdgpu_device *adev);
+ /* Request NPS mode */
+ int (*request_mem_partition_mode)(struct amdgpu_device *adev,
+ int nps_mode);
+ bool (*need_reset_on_init)(struct amdgpu_device *adev);
};
struct amdgpu_xgmi_ras {
@@ -182,7 +193,6 @@ struct amdgpu_xgmi {
bool supported;
struct ras_common_if *ras_if;
bool connected_to_cpu;
- bool pending_reset;
struct amdgpu_xgmi_ras *ras;
};
@@ -305,6 +315,9 @@ struct amdgpu_gmc {
struct amdgpu_mem_partition_info *mem_partitions;
uint8_t num_mem_partitions;
const struct amdgpu_gmc_funcs *gmc_funcs;
+ enum amdgpu_memory_partition requested_nps_mode;
+ uint32_t supported_nps_modes;
+ uint32_t reset_flags;
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
@@ -447,13 +460,17 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
-uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev);
int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev);
void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev);
int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
- int exp_ranges);
+ uint8_t *exp_ranges);
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode);
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 00d6211e0fbf..f0765ccde668 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -225,15 +225,6 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
kfree(i2c);
}
-/* Add the default buses */
-void amdgpu_i2c_init(struct amdgpu_device *adev)
-{
- if (amdgpu_hw_i2c)
- DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
-
- amdgpu_atombios_i2c_init(adev);
-}
-
/* remove all the buses */
void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
@@ -247,22 +238,6 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
}
}
-/* Add additional buses */
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name)
-{
- struct drm_device *dev = adev_to_drm(adev);
- int i;
-
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (!adev->i2c_bus[i]) {
- adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
- return;
- }
- }
-}
-
/* looks up bus based on id */
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index 63c2ff7499e1..21e3d1dad0a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -28,11 +28,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name);
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
-void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *i2c_bus);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 92d27d32de41..8e712a11aba5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -342,15 +342,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
* @ring: ring we want to submit job to
* @job: job who wants to use the VMID
* @id: resulting VMID
- * @fence: fence to wait for if no id could be grabbed
*
* Try to reuse a VMID for this submission.
*/
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_job *job,
- struct amdgpu_vmid **id,
- struct dma_fence **fence)
+ struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
@@ -429,7 +427,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !id)
goto error;
} else {
- r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
+ r = amdgpu_vmid_grab_used(vm, ring, job, &id);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 4766e99dd98f..263ce1811cc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,33 +33,17 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
-static int isp_sw_init(void *handle)
-{
- return 0;
-}
-
-static int isp_sw_fini(void *handle)
-{
- return 0;
-}
-
/**
* isp_hw_init - start and test isp block
*
- * @handle: handle for amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int isp_hw_init(void *handle)
+static int isp_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
- const struct amdgpu_ip_block *ip_block =
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP);
-
- if (!ip_block)
- return -EINVAL;
-
if (isp->funcs->hw_init != NULL)
return isp->funcs->hw_init(isp);
@@ -69,13 +53,12 @@ static int isp_hw_init(void *handle)
/**
* isp_hw_fini - stop the hardware block
*
- * @handle: handle for amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int isp_hw_fini(void *handle)
+static int isp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_isp *isp = &adev->isp;
+ struct amdgpu_isp *isp = &ip_block->adev->isp;
if (isp->funcs->hw_fini != NULL)
return isp->funcs->hw_fini(isp);
@@ -83,16 +66,6 @@ static int isp_hw_fini(void *handle)
return -ENODEV;
}
-static int isp_suspend(void *handle)
-{
- return 0;
-}
-
-static int isp_resume(void *handle)
-{
- return 0;
-}
-
static int isp_load_fw_by_psp(struct amdgpu_device *adev)
{
const struct common_firmware_header *hdr;
@@ -122,9 +95,10 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev)
return r;
}
-static int isp_early_init(void *handle)
+static int isp_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
@@ -154,16 +128,6 @@ static bool isp_is_idle(void *handle)
return true;
}
-static int isp_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int isp_soft_reset(void *handle)
-{
- return 0;
-}
-
static int isp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -179,16 +143,9 @@ static int isp_set_powergating_state(void *handle,
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
- .late_init = NULL,
- .sw_init = isp_sw_init,
- .sw_fini = isp_sw_fini,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
- .suspend = isp_suspend,
- .resume = isp_resume,
.is_idle = isp_is_idle,
- .wait_for_idle = isp_wait_for_idle,
- .soft_reset = isp_soft_reset,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index fffa4430784f..ea2663169bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -42,7 +42,7 @@ static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].version->funcs->dump_ip_state)
adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)adev);
+ ->dump_ip_state((void *)&adev->ip_blocks[i]);
dev_info(adev->dev, "Dumping IP State Completed\n");
amdgpu_coredump(adev, true, false, job);
@@ -356,10 +356,10 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
if (r)
goto error;
- if (!fence && job->gang_submit)
+ if (job->gang_submit)
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
- while (!fence && job->vm && !job->vmid) {
+ if (!fence && job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r) {
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 6df99cb00d9a..95e2796919fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -342,3 +342,76 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
return psp_execute_ip_fw_load(&adev->psp, &ucode);
}
+
+/*
+ * debugfs for to enable/disable jpeg job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1 << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->sched.ready)
+ mask |= 1 << ((i * adev->jpeg.num_jpeg_rings) + j);
+ }
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops,
+ amdgpu_debugfs_jpeg_sched_mask_get,
+ amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1))
+ return;
+ sprintf(name, "amdgpu_jpeg_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_jpeg_sched_mask_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index f9cdd873ac9b..819dc7a0af99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -149,5 +149,6 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 10b61ff63802..6909af56fcad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -905,7 +905,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.me_id = ring->me;
queue_input.pipe_id = ring->pipe;
queue_input.queue_id = ring->queue;
- queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
@@ -1203,8 +1203,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r)
+ if (r) {
+ amdgpu_mes_unlock(&adev->mes);
goto clean_up_memory;
+ }
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
@@ -1237,7 +1239,6 @@ clean_up_ring:
amdgpu_ring_fini(ring);
clean_up_memory:
kfree(ring);
- amdgpu_mes_unlock(&adev->mes);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index f61d117b0caf..79c2f807b9fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -101,6 +101,7 @@ struct amdgpu_nbio_funcs {
int (*get_compute_partition_mode)(struct amdgpu_device *adev);
u32 (*get_memory_partition_mode)(struct amdgpu_device *adev,
u32 *supp_modes);
+ bool (*is_nps_switch_requested)(struct amdgpu_device *adev);
u64 (*get_pcie_replay_count)(struct amdgpu_device *adev);
void (*set_reg_remap)(struct amdgpu_device *adev);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 44819cdba7fb..1e6a044e3143 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1170,54 +1170,92 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats)
+ struct amdgpu_mem_stats *stats,
+ unsigned int sz)
{
+ const unsigned int domain_to_pl[] = {
+ [ilog2(AMDGPU_GEM_DOMAIN_CPU)] = TTM_PL_SYSTEM,
+ [ilog2(AMDGPU_GEM_DOMAIN_GTT)] = TTM_PL_TT,
+ [ilog2(AMDGPU_GEM_DOMAIN_VRAM)] = TTM_PL_VRAM,
+ [ilog2(AMDGPU_GEM_DOMAIN_GDS)] = AMDGPU_PL_GDS,
+ [ilog2(AMDGPU_GEM_DOMAIN_GWS)] = AMDGPU_PL_GWS,
+ [ilog2(AMDGPU_GEM_DOMAIN_OA)] = AMDGPU_PL_OA,
+ [ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL,
+ };
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
+ struct drm_gem_object *obj = &bo->tbo.base;
uint64_t size = amdgpu_bo_size(bo);
- struct drm_gem_object *obj;
- bool shared;
-
- /* Abort if the BO doesn't currently have a backing store */
- if (!res)
- return;
+ unsigned int type;
- obj = &bo->tbo.base;
- shared = drm_gem_object_is_shared_for_memory_stats(obj);
+ if (!res) {
+ /*
+ * If no backing store use one of the preferred domain for basic
+ * stats. We take the MSB since that should give a reasonable
+ * view.
+ */
+ BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT ||
+ TTM_PL_VRAM < TTM_PL_SYSTEM);
+ type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK);
+ if (!type)
+ return;
+ type--;
+ if (drm_WARN_ON_ONCE(&adev->ddev,
+ type >= ARRAY_SIZE(domain_to_pl)))
+ return;
+ type = domain_to_pl[type];
+ } else {
+ type = res->mem_type;
+ }
- switch (res->mem_type) {
+ /* Squash some into 'cpu' to keep the legacy userspace view. */
+ switch (type) {
case TTM_PL_VRAM:
- stats->vram += size;
- if (amdgpu_res_cpu_visible(adev, res))
- stats->visible_vram += size;
- if (shared)
- stats->vram_shared += size;
- break;
case TTM_PL_TT:
- stats->gtt += size;
- if (shared)
- stats->gtt_shared += size;
- break;
case TTM_PL_SYSTEM:
+ break;
default:
- stats->cpu += size;
- if (shared)
- stats->cpu_shared += size;
+ type = TTM_PL_SYSTEM;
break;
}
+ if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz))
+ return;
+
+ /* DRM stats common fields: */
+
+ stats[type].total += size;
+ if (drm_gem_object_is_shared_for_memory_stats(obj))
+ stats[type].drm.shared += size;
+ else
+ stats[type].drm.private += size;
+
+ if (res) {
+ stats[type].drm.resident += size;
+
+ if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP))
+ stats[type].drm.active += size;
+ else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
+ stats[type].drm.purgeable += size;
+
+ if (type == TTM_PL_VRAM && amdgpu_res_cpu_visible(adev, res))
+ stats[type].visible += size;
+ }
+
+ /* amdgpu specific stats: */
+
if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
- stats->requested_vram += size;
+ stats[TTM_PL_VRAM].requested += size;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- stats->requested_visible_vram += size;
+ stats[TTM_PL_VRAM].requested_visible += size;
- if (res->mem_type != TTM_PL_VRAM) {
- stats->evicted_vram += size;
+ if (type != TTM_PL_VRAM) {
+ stats[TTM_PL_VRAM].evicted += size;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- stats->evicted_visible_vram += size;
+ stats[TTM_PL_VRAM].evicted_visible += size;
}
} else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
- stats->requested_gtt += size;
+ stats[TTM_PL_TT].requested += size;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 717e47b46167..7260349917ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -140,30 +140,14 @@ struct amdgpu_bo_vm {
};
struct amdgpu_mem_stats {
- /* current VRAM usage, includes visible VRAM */
- uint64_t vram;
- /* current shared VRAM usage, includes visible VRAM */
- uint64_t vram_shared;
- /* current visible VRAM usage */
- uint64_t visible_vram;
- /* current GTT usage */
- uint64_t gtt;
- /* current shared GTT usage */
- uint64_t gtt_shared;
- /* current system memory usage */
- uint64_t cpu;
- /* current shared system memory usage */
- uint64_t cpu_shared;
- /* sum of evicted buffers, includes visible VRAM */
- uint64_t evicted_vram;
- /* sum of evicted buffers due to CPU access */
- uint64_t evicted_visible_vram;
- /* how much userspace asked for, includes vis.VRAM */
- uint64_t requested_vram;
- /* how much userspace asked for */
- uint64_t requested_visible_vram;
- /* how much userspace asked for */
- uint64_t requested_gtt;
+ struct drm_memory_stats drm;
+
+ uint64_t total;
+ uint64_t visible;
+ uint64_t evicted;
+ uint64_t evicted_visible;
+ uint64_t requested;
+ uint64_t requested_visible;
};
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
@@ -328,7 +312,8 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats);
+ struct amdgpu_mem_stats *stats,
+ unsigned int size);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 0b28b2cf1517..abd5e980c9c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -159,9 +159,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
return ret;
}
-static int psp_early_init(void *handle)
+static int psp_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
psp->autoload_supported = true;
@@ -421,9 +421,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
return ret;
}
-static int psp_sw_init(void *handle)
+static int psp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
int ret;
struct psp_runtime_boot_cfg_entry boot_cfg_entry;
@@ -527,9 +527,9 @@ failed1:
return ret;
}
-static int psp_sw_fini(void *handle)
+static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
@@ -639,6 +639,8 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "AUTOLOAD_RLC";
case GFX_CMD_ID_BOOT_CFG:
return "BOOT_CFG";
+ case GFX_CMD_ID_CONFIG_SQ_PERFMON:
+ return "CONFIG_SQ_PERFMON";
default:
return "UNKNOWN CMD";
}
@@ -1043,6 +1045,31 @@ static int psp_rl_load(struct amdgpu_device *adev)
return ret;
}
+int psp_memory_partition(struct psp_context *psp, int mode)
+{
+ struct psp_gfx_cmd_resp *cmd;
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
+ cmd->cmd.cmd_memory_part.mode = mode;
+
+ dev_info(psp->adev->dev,
+ "Requesting %d memory partition change through PSP", mode);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "PSP request failed to change to NPS%d mode\n", mode);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
int psp_spatial_partition(struct psp_context *psp, int mode)
{
struct psp_gfx_cmd_resp *cmd;
@@ -2264,6 +2291,19 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
}
}
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return false;
+
+ if (psp->funcs && psp->funcs->is_reload_needed)
+ return psp->funcs->is_reload_needed(psp);
+
+ return false;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -2958,10 +2998,10 @@ failed:
return ret;
}
-static int psp_hw_init(void *handle)
+static int psp_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
mutex_lock(&adev->firmware.mutex);
/*
@@ -2987,9 +3027,9 @@ failed:
return -EINVAL;
}
-static int psp_hw_fini(void *handle)
+static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
if (psp->ta_fw) {
@@ -3011,10 +3051,10 @@ static int psp_hw_fini(void *handle)
return 0;
}
-static int psp_suspend(void *handle)
+static int psp_suspend(struct amdgpu_ip_block *ip_block)
{
int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
@@ -3074,10 +3114,10 @@ out:
return ret;
}
-static int psp_resume(void *handle)
+static int psp_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
dev_info(adev->dev, "PSP is resuming...\n");
@@ -3736,8 +3776,44 @@ out:
return err;
}
+int psp_config_sq_perfmon(struct psp_context *psp,
+ uint32_t xcp_id, bool core_override_enable,
+ bool reg_override_enable, bool perfmon_override_enable)
+{
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (xcp_id > MAX_XCP) {
+ dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
+ return -EINVAL;
+ }
+
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
+ dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
+ cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
+ cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
+ cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
+ cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
+ xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
+
+ release_psp_cmd_buf(psp);
+ return ret;
+}
+
static int psp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+ enum amd_clockgating_state state)
{
return 0;
}
@@ -4019,17 +4095,12 @@ const struct attribute_group amdgpu_flash_attr_group = {
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
- .late_init = NULL,
.sw_init = psp_sw_init,
.sw_fini = psp_sw_fini,
.hw_init = psp_hw_init,
.hw_fini = psp_hw_fini,
.suspend = psp_suspend,
.resume = psp_resume,
- .is_idle = NULL,
- .check_soft_reset = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index e8abbbcb4326..567cb1f924ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -139,6 +139,7 @@ struct psp_funcs {
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
+ bool (*is_reload_needed)(struct psp_context *psp);
};
struct ta_funcs {
@@ -552,9 +553,15 @@ int psp_load_fw_list(struct psp_context *psp,
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
int psp_spatial_partition(struct psp_context *psp, int mode);
+int psp_memory_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
+
+int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1a1395c5fff1..1d9eda883bb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -3146,7 +3146,42 @@ static int amdgpu_ras_page_retirement_thread(void *param)
return 0;
}
-int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ if (!con || amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
+
+ if (ret)
+ return ret;
+
+ /* HW not usable */
+ if (amdgpu_ras_is_rma(adev))
+ return -EHWPOISON;
+
+ if (con->eeprom_control.ras_num_recs) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret)
+ return ret;
+
+ amdgpu_dpm_send_hbm_bad_pages_num(
+ adev, con->eeprom_control.ras_num_recs);
+
+ if (con->update_channel_flag == true) {
+ amdgpu_dpm_send_hbm_bad_channel_flag(
+ adev, con->eeprom_control.bad_channel_bitmap);
+ con->update_channel_flag = false;
+ }
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data **data;
@@ -3181,31 +3216,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
- /* Todo: During test the SMU might fail to read the eeprom through I2C
- * when the GPU is pending on XGMI reset during probe time
- * (Mostly after second bus reset), skip it now
- */
- if (adev->gmc.xgmi.pending_reset)
- return 0;
- ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
- /*
- * This calling fails when is_rma is true or
- * ret != 0.
- */
- if (amdgpu_ras_is_rma(adev) || ret)
- goto free;
-
- if (con->eeprom_control.ras_num_recs) {
- ret = amdgpu_ras_load_bad_pages(adev);
+ if (init_bp_info) {
+ ret = amdgpu_ras_init_badpage_info(adev);
if (ret)
goto free;
-
- amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
-
- if (con->update_channel_flag == true) {
- amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
- con->update_channel_flag = false;
- }
}
mutex_init(&con->page_rsv_lock);
@@ -4294,8 +4308,27 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
}
- if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ int hive_ras_recovery = 0;
+
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+ /* In the case of multiple GPUs, after a GPU has started
+ * resetting all GPUs on hive, other GPUs do not need to
+ * trigger GPU reset again.
+ */
+ if (!hive_ras_recovery)
+ amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ else
+ atomic_set(&ras->in_recovery, 0);
+ } else {
+ flush_work(&ras->recovery_work);
amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 669720a9c60a..871b2d6278e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -736,8 +736,8 @@ struct amdgpu_ras_block_hw_ops {
* 8: feature disable
*/
-
-int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev);
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info);
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 66c1a868c0e1..24dae7cdbe95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -26,6 +26,156 @@
#include "sienna_cichlid.h"
#include "smu_v13_0_10.h"
+static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
+
+ /* XXX handle errors */
+ amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ /* VCN FW shared region is in frambuffer, there are some flags
+ * initialized in that region during sw_init. Make sure the region is
+ * backed up.
+ */
+ amdgpu_vcn_save_vcpu_bo(adev);
+
+ return 0;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev;
+ int r;
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ amdgpu_unregister_gpu_instance(tmp_adev);
+ r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: prepare for reset failed");
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r)
+ return r;
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ if (!tmp_adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(tmp_adev);
+ amdgpu_amdkfd_device_init(tmp_adev);
+ amdgpu_amdkfd_drm_client_create(tmp_adev);
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_perform_reset(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ dev_dbg(adev->dev, "xgmi roi - hw reset\n");
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset =
+ amdgpu_asic_reset_method(adev);
+ }
+ r = 0;
+ /* Mode1 reset needs to be triggered on all devices together */
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: reset failed with error, %d",
+ r);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+ }
+
+ return r;
+}
+
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *adev;
+ int r;
+
+ if (!reset_device_list || list_empty(reset_device_list) ||
+ list_is_singular(reset_device_list))
+ return -EINVAL;
+
+ adev = list_first_entry(reset_device_list, struct amdgpu_device,
+ reset_list);
+ r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+ if (r)
+ return r;
+
+ r = amdgpu_reset_perform_reset(adev, reset_context);
+
+ return r;
+}
+
+struct amdgpu_reset_handler xgmi_reset_on_init_handler = {
+ .reset_method = AMD_RESET_METHOD_ON_INIT,
+ .prepare_env = NULL,
+ .prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt,
+ .perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset,
+ .restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt,
+ .restore_env = NULL,
+ .do_reset = NULL,
+};
+
int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 1cb920abc2fe..f8628bc898df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -153,4 +153,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \
(handler = (*reset_ctl->reset_handlers)[i]); \
++i)
+
+extern struct amdgpu_reset_handler xgmi_reset_on_init_handler;
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 690976665cf6..42f616c05f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -108,10 +108,26 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
*/
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- int i;
+ uint32_t occupied, chunk1, chunk2;
+ uint32_t *dst;
- for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ occupied = ring->wptr & ring->buf_mask;
+ dst = (void *)&ring->ring[occupied];
+ chunk1 = ring->buf_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count) ? count : chunk1;
+ chunk2 = count - chunk1;
+
+ if (chunk1)
+ memset32(dst, ring->funcs->nop, chunk1);
+
+ if (chunk2) {
+ dst = (void *)ring->ring;
+ memset32(dst, ring->funcs->nop, chunk2);
+ }
+
+ ring->wptr += count;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count;
}
/**
@@ -141,6 +157,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
{
uint32_t count;
+ if (ring->count_dw < 0)
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f93f51002201..574336d6714a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -377,8 +377,6 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
{
- if (ring->count_dw <= 0)
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
ring->ring[ring->wptr++ & ring->buf_mask] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
@@ -390,9 +388,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
unsigned occupied, chunk1, chunk2;
void *dst;
- if (unlikely(ring->count_dw < count_dw))
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
-
occupied = ring->wptr & ring->buf_mask;
dst = (void *)&ring->ring[occupied];
chunk1 = ring->buf_mask + 1 - occupied;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 74adb983ab03..0637414fc70e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -812,7 +812,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
/* Map SG to device */
r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
if (r)
- goto release_sg;
+ goto release_sg_table;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
@@ -820,6 +820,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
return 0;
+release_sg_table:
+ sg_free_table(ttm->sg);
release_sg:
kfree(ttm->sg);
ttm->sg = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 138d80017f35..2852a6064c9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -34,6 +34,7 @@
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
+#define __AMDGPU_PL_LAST (TTM_PL_PRIV + 4)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index 6162582d0aa2..bd2d3863c3ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -765,9 +765,9 @@ static int umsch_mm_init(struct amdgpu_device *adev)
}
-static int umsch_mm_early_init(void *handle)
+static int umsch_mm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
@@ -784,9 +784,9 @@ static int umsch_mm_early_init(void *handle)
return 0;
}
-static int umsch_mm_late_init(void *handle)
+static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
return 0;
@@ -794,9 +794,9 @@ static int umsch_mm_late_init(void *handle)
return umsch_mm_test(adev);
}
-static int umsch_mm_sw_init(void *handle)
+static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = umsch_mm_init(adev);
@@ -815,9 +815,9 @@ static int umsch_mm_sw_init(void *handle)
return 0;
}
-static int umsch_mm_sw_fini(void *handle)
+static int umsch_mm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
@@ -839,9 +839,9 @@ static int umsch_mm_sw_fini(void *handle)
return 0;
}
-static int umsch_mm_hw_init(void *handle)
+static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = umsch_mm_load_microcode(&adev->umsch_mm);
@@ -857,9 +857,9 @@ static int umsch_mm_hw_init(void *handle)
return 0;
}
-static int umsch_mm_hw_fini(void *handle)
+static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
umsch_mm_ring_stop(&adev->umsch_mm);
@@ -873,18 +873,14 @@ static int umsch_mm_hw_fini(void *handle)
return 0;
}
-static int umsch_mm_suspend(void *handle)
+static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return umsch_mm_hw_fini(adev);
+ return umsch_mm_hw_fini(ip_block);
}
-static int umsch_mm_resume(void *handle)
+static int umsch_mm_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return umsch_mm_hw_init(adev);
+ return umsch_mm_hw_init(ip_block);
}
void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm)
@@ -997,8 +993,6 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
.hw_fini = umsch_mm_hw_fini,
.suspend = umsch_mm_suspend,
.resume = umsch_mm_resume,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 43f44cc201cb..aecb78e0519f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -294,21 +294,12 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t
return ret;
}
-int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
{
unsigned int size;
void *ptr;
int i, idx;
- bool in_ras_intr = amdgpu_ras_intr_triggered();
-
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
- /* err_event_athub will corrupt VCPU buffer, so we need to
- * restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
- return 0;
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -327,9 +318,24 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
}
+
return 0;
}
+int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+{
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ /* err_event_athub will corrupt VCPU buffer, so we need to
+ * restore fw data and clear buffer in amdgpu_vcn_resume() */
+ if (in_ras_intr)
+ return 0;
+
+ return amdgpu_vcn_save_vcpu_bo(adev);
+}
+
int amdgpu_vcn_resume(struct amdgpu_device *adev)
{
unsigned int size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 2a1f3dbb14d3..765b809d48a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -518,5 +518,6 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index d4c2afafbb73..8bf28d336807 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -493,10 +493,10 @@ const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static int amdgpu_vkms_sw_init(void *handle)
+static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
@@ -536,9 +536,9 @@ static int amdgpu_vkms_sw_init(void *handle)
return 0;
}
-static int amdgpu_vkms_sw_fini(void *handle)
+static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++)
@@ -555,9 +555,9 @@ static int amdgpu_vkms_sw_fini(void *handle)
return 0;
}
-static int amdgpu_vkms_hw_init(void *handle)
+static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -600,31 +600,31 @@ static int amdgpu_vkms_hw_init(void *handle)
return 0;
}
-static int amdgpu_vkms_hw_fini(void *handle)
+static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int amdgpu_vkms_suspend(void *handle)
+static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = drm_mode_config_helper_suspend(adev_to_drm(adev));
if (r)
return r;
- return amdgpu_vkms_hw_fini(handle);
+
+ return 0;
}
-static int amdgpu_vkms_resume(void *handle)
+static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_vkms_hw_init(handle);
+ r = amdgpu_vkms_hw_init(ip_block);
if (r)
return r;
- return drm_mode_config_helper_resume(adev_to_drm(adev));
+ return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
}
static bool amdgpu_vkms_is_idle(void *handle)
@@ -632,16 +632,6 @@ static bool amdgpu_vkms_is_idle(void *handle)
return true;
}
-static int amdgpu_vkms_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int amdgpu_vkms_soft_reset(void *handle)
-{
- return 0;
-}
-
static int amdgpu_vkms_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -656,8 +646,6 @@ static int amdgpu_vkms_set_powergating_state(void *handle,
static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.name = "amdgpu_vkms",
- .early_init = NULL,
- .late_init = NULL,
.sw_init = amdgpu_vkms_sw_init,
.sw_fini = amdgpu_vkms_sw_fini,
.hw_init = amdgpu_vkms_hw_init,
@@ -665,12 +653,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.suspend = amdgpu_vkms_suspend,
.resume = amdgpu_vkms_resume,
.is_idle = amdgpu_vkms_is_idle,
- .wait_for_idle = amdgpu_vkms_wait_for_idle,
- .soft_reset = amdgpu_vkms_soft_reset,
.set_clockgating_state = amdgpu_vkms_set_clockgating_state,
.set_powergating_state = amdgpu_vkms_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6005280f5f38..6b855810ee86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1083,7 +1083,8 @@ error_free:
}
static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
- struct amdgpu_mem_stats *stats)
+ struct amdgpu_mem_stats *stats,
+ unsigned int size)
{
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1099,34 +1100,35 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
!dma_resv_trylock(bo->tbo.base.resv))
return;
- amdgpu_bo_get_memory(bo, stats);
+ amdgpu_bo_get_memory(bo, stats, size);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
dma_resv_unlock(bo->tbo.base.resv);
}
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats)
+ struct amdgpu_mem_stats *stats,
+ unsigned int size)
{
struct amdgpu_bo_va *bo_va, *tmp;
spin_lock(&vm->status_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ amdgpu_vm_bo_get_memory(bo_va, stats, size);
spin_unlock(&vm->status_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 52dd7cdfdc81..c5b41e3ed14f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -567,7 +567,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats);
+ struct amdgpu_mem_stats *stats,
+ unsigned int size);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 5acd20ff5979..46713a158d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -295,9 +295,9 @@ int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
return 0;
}
-static int vpe_early_init(void *handle)
+static int vpe_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
@@ -356,9 +356,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe)
return 0;
}
-static int vpe_sw_init(void *handle)
+static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
@@ -381,9 +381,9 @@ out:
return ret;
}
-static int vpe_sw_fini(void *handle)
+static int vpe_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
release_firmware(vpe->fw);
@@ -398,9 +398,9 @@ static int vpe_sw_fini(void *handle)
return 0;
}
-static int vpe_hw_init(void *handle)
+static int vpe_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
@@ -421,9 +421,9 @@ static int vpe_hw_init(void *handle)
return 0;
}
-static int vpe_hw_fini(void *handle)
+static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
vpe_ring_stop(vpe);
@@ -434,20 +434,18 @@ static int vpe_hw_fini(void *handle)
return 0;
}
-static int vpe_suspend(void *handle)
+static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vpe.idle_work);
- return vpe_hw_fini(adev);
+ return vpe_hw_fini(ip_block);
}
-static int vpe_resume(void *handle)
+static int vpe_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vpe_hw_init(adev);
+ return vpe_hw_init(ip_block);
}
static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -908,14 +906,12 @@ static void vpe_set_ring_funcs(struct amdgpu_device *adev)
const struct amd_ip_funcs vpe_ip_funcs = {
.name = "vpe_v6_1",
.early_init = vpe_early_init,
- .late_init = NULL,
.sw_init = vpe_sw_init,
.sw_fini = vpe_sw_fini,
.hw_init = vpe_hw_init,
.hw_fini = vpe_hw_fini,
.suspend = vpe_suspend,
.resume = vpe_resume,
- .soft_reset = NULL,
.set_clockgating_state = vpe_set_clockgating_state,
.set_powergating_state = vpe_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index a6d456ec6aeb..83a16918ea76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -433,3 +433,246 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
}
}
+#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
+ static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
+ struct amdgpu_xcp_res_details *xcp_res, char *buf) \
+ { \
+ return sysfs_emit(buf, "%d\n", xcp_res->_name); \
+ }
+
+struct amdgpu_xcp_res_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
+};
+
+#define XCP_CFG_SYSFS_RES_ATTR(_name) \
+ struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = 0400 }, \
+ .show = amdgpu_xcp_res_sysfs_##_name##_show, \
+ }
+
+XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
+XCP_CFG_SYSFS_RES_ATTR(num_inst);
+XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
+XCP_CFG_SYSFS_RES_ATTR(num_shared);
+
+#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
+
+static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
+ &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
+ &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
+};
+
+static const char *xcp_desc[] = {
+ [AMDGPU_SPX_PARTITION_MODE] = "SPX",
+ [AMDGPU_DPX_PARTITION_MODE] = "DPX",
+ [AMDGPU_TPX_PARTITION_MODE] = "TPX",
+ [AMDGPU_QPX_PARTITION_MODE] = "QPX",
+ [AMDGPU_CPX_PARTITION_MODE] = "CPX",
+};
+
+ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
+
+#define to_xcp_attr(x) \
+ container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
+#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
+
+static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_res_sysfs_attribute *attribute;
+ struct amdgpu_xcp_res_details *xcp_res;
+
+ attribute = to_xcp_attr(attr);
+ xcp_res = to_xcp_res(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(xcp_res, buf);
+}
+
+static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
+ .show = xcp_cfg_res_sysfs_attr_show,
+};
+
+static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
+ .sysfs_ops = &xcp_cfg_res_sysfs_ops,
+ .default_groups = xcp_cfg_res_sysfs_groups,
+};
+
+const char *xcp_res_names[] = {
+ [AMDGPU_XCP_RES_XCC] = "xcc",
+ [AMDGPU_XCP_RES_DMA] = "dma",
+ [AMDGPU_XCP_RES_DEC] = "dec",
+ [AMDGPU_XCP_RES_JPEG] = "jpeg",
+};
+
+static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
+ return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
+
+ return -EOPNOTSUPP;
+}
+
+#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
+static ssize_t supported_xcp_configs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
+ int size = 0, mode;
+ char *sep = "";
+
+ if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
+ sep = ", ";
+ }
+
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t xcp_config_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+
+ return sysfs_emit(buf, "%s\n",
+ amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
+}
+
+static ssize_t xcp_config_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ int mode, r;
+
+ if (!strncasecmp("SPX", buf, strlen("SPX")))
+ mode = AMDGPU_SPX_PARTITION_MODE;
+ else if (!strncasecmp("DPX", buf, strlen("DPX")))
+ mode = AMDGPU_DPX_PARTITION_MODE;
+ else if (!strncasecmp("TPX", buf, strlen("TPX")))
+ mode = AMDGPU_TPX_PARTITION_MODE;
+ else if (!strncasecmp("QPX", buf, strlen("QPX")))
+ mode = AMDGPU_QPX_PARTITION_MODE;
+ else if (!strncasecmp("CPX", buf, strlen("CPX")))
+ mode = AMDGPU_CPX_PARTITION_MODE;
+ else
+ return -EINVAL;
+
+ r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
+
+ if (r)
+ return r;
+
+ xcp_cfg->mode = mode;
+ return size;
+}
+
+static struct kobj_attribute xcp_cfg_sysfs_mode =
+ __ATTR_RW_MODE(xcp_config, 0644);
+
+static void xcp_cfg_sysfs_release(struct kobject *kobj)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+
+ kfree(xcp_cfg);
+}
+
+static const struct kobj_type xcp_cfg_sysfs_ktype = {
+ .release = xcp_cfg_sysfs_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct kobj_attribute supp_part_sysfs_mode =
+ __ATTR_RO(supported_xcp_configs);
+
+static const struct attribute *xcp_attrs[] = {
+ &supp_part_sysfs_mode.attr,
+ &xcp_cfg_sysfs_mode.attr,
+ NULL,
+};
+
+void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_res_details *xcp_res;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ int i, r, j, rid, mode;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
+ if (!xcp_cfg)
+ return;
+ xcp_cfg->xcp_mgr = adev->xcp_mgr;
+
+ r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
+ &adev->dev->kobj, "compute_partition_config");
+ if (r)
+ goto err1;
+
+ r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
+ if (r)
+ goto err1;
+
+ mode = (xcp_cfg->xcp_mgr->mode ==
+ AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
+ AMDGPU_SPX_PARTITION_MODE :
+ xcp_cfg->xcp_mgr->mode;
+ r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
+ if (r)
+ goto err1;
+
+ xcp_cfg->mode = mode;
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ rid = xcp_res->id;
+ r = kobject_init_and_add(&xcp_res->kobj,
+ &xcp_cfg_res_sysfs_ktype,
+ &xcp_cfg->kobj, "%s",
+ xcp_res_names[rid]);
+ if (r)
+ goto err;
+ }
+
+ adev->xcp_mgr->xcp_cfg = xcp_cfg;
+ return;
+err:
+ for (j = 0; j < i; j++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ kobject_put(&xcp_res->kobj);
+ }
+
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+err1:
+ kobject_put(&xcp_cfg->kobj);
+}
+
+void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_res_details *xcp_res;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ int i;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ kobject_put(&xcp_res->kobj);
+ }
+
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+ kobject_put(&xcp_cfg->kobj);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 32775260556f..7ac89d78a5bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -56,6 +56,29 @@ enum AMDGPU_XCP_STATE {
AMDGPU_XCP_RESUME,
};
+enum amdgpu_xcp_res_id {
+ AMDGPU_XCP_RES_XCC,
+ AMDGPU_XCP_RES_DMA,
+ AMDGPU_XCP_RES_DEC,
+ AMDGPU_XCP_RES_JPEG,
+ AMDGPU_XCP_RES_MAX,
+};
+
+struct amdgpu_xcp_res_details {
+ enum amdgpu_xcp_res_id id;
+ u8 num_inst;
+ u8 num_shared;
+ struct kobject kobj;
+};
+
+struct amdgpu_xcp_cfg {
+ u8 mode;
+ struct amdgpu_xcp_res_details xcp_res[AMDGPU_XCP_RES_MAX];
+ u8 num_res;
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ struct kobject kobj;
+};
+
struct amdgpu_xcp_ip_funcs {
int (*prepare_suspend)(void *handle, uint32_t inst_mask);
int (*suspend)(void *handle, uint32_t inst_mask);
@@ -97,6 +120,9 @@ struct amdgpu_xcp_mgr {
/* Used to determine KFD memory size limits per XCP */
unsigned int num_xcp_per_mem_partition;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ uint32_t supp_xcp_modes;
+ uint32_t avail_xcp_modes;
};
struct amdgpu_xcp_mgr_funcs {
@@ -108,7 +134,9 @@ struct amdgpu_xcp_mgr_funcs {
struct amdgpu_xcp_ip *ip);
int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_xcp *xcp, uint8_t *mem_id);
-
+ int (*get_xcp_res_info)(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg);
int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -146,6 +174,9 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
+void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev);
+
#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
(adev)->xcp_mgr->funcs->select_scheds ? \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 7de449fae1e3..b47422b0b5b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -667,6 +667,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
task_barrier_init(&hive->tb);
hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
hive->hi_req_gpu = NULL;
+ atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
/*
* hive pstate on boot is high in vega20 so we have to go to low
@@ -800,6 +801,23 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
return -EINVAL;
}
+bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ /* Sharing should always be enabled for non-SRIOV. */
+ if (!amdgpu_sriov_vf(adev))
+ return true;
+
+ for (i = 0 ; i < top->num_nodes; ++i)
+ if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
+ return !!top->nodes[i].is_sharing_enabled;
+
+ return false;
+}
+
/*
* Devices that support extended data require the entire hive to initialize with
* the shared memory buffer flag set.
@@ -860,8 +878,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.supported)
return 0;
- if (!adev->gmc.xgmi.pending_reset &&
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
ret = psp_xgmi_initialize(&adev->psp, false, true);
if (ret) {
dev_err(adev->dev,
@@ -907,8 +924,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
task_barrier_add_task(&hive->tb);
- if (!adev->gmc.xgmi.pending_reset &&
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
/* update node list for other device in the hive */
if (tmp_adev != adev) {
@@ -985,7 +1001,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
}
- if (!ret && !adev->gmc.xgmi.pending_reset)
+ if (!ret)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
exit_unlock:
@@ -1500,3 +1516,117 @@ int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
+
+static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work)
+{
+ struct amdgpu_hive_info *hive =
+ container_of(work, struct amdgpu_hive_info, reset_on_init_work);
+ struct amdgpu_reset_context reset_context;
+ struct amdgpu_device *tmp_adev;
+ struct list_head device_list;
+ int r;
+
+ mutex_lock(&hive->hive_lock);
+
+ INIT_LIST_HEAD(&device_list);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+
+ tmp_adev = list_first_entry(&device_list, struct amdgpu_device,
+ reset_list);
+ amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+
+ reset_context.method = AMD_RESET_METHOD_ON_INIT;
+ reset_context.reset_req_dev = tmp_adev;
+ reset_context.hive = hive;
+ reset_context.reset_device_list = &device_list;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
+ amdgpu_reset_do_xgmi_reset_on_init(&reset_context);
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ r = amdgpu_ras_init_badpage_info(tmp_adev);
+ if (r && r != -EHWPOISON)
+ dev_err(tmp_adev->dev,
+ "error during bad page data initialization");
+ }
+}
+
+static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive)
+{
+ INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work);
+ amdgpu_reset_domain_schedule(hive->reset_domain,
+ &hive->reset_on_init_work);
+}
+
+int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+ bool reset_scheduled;
+ int num_devs;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ return -EINVAL;
+
+ mutex_lock(&hive->hive_lock);
+ num_devs = atomic_read(&hive->number_devices);
+ reset_scheduled = false;
+ if (num_devs == adev->gmc.xgmi.num_physical_nodes) {
+ amdgpu_xgmi_schedule_reset_on_init(hive);
+ reset_scheduled = true;
+ }
+
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+
+ if (reset_scheduled)
+ flush_work(&hive->reset_on_init_work);
+
+ return 0;
+}
+
+int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive,
+ int req_nps_mode)
+{
+ struct amdgpu_device *tmp_adev;
+ int cur_nps_mode, r;
+
+ /* This is expected to be called only during unload of driver. The
+ * request needs to be placed only once for all devices in the hive. If
+ * one of them fail, revert the request for previous successful devices.
+ * After placing the request, make hive mode as UNKNOWN so that other
+ * devices don't request anymore.
+ */
+ mutex_lock(&hive->hive_lock);
+ if (atomic_read(&hive->requested_nps_mode) ==
+ UNKNOWN_MEMORY_PARTITION_MODE) {
+ dev_dbg(adev->dev, "Unexpected entry for hive NPS change");
+ mutex_unlock(&hive->hive_lock);
+ return 0;
+ }
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ r = adev->gmc.gmc_funcs->request_mem_partition_mode(
+ tmp_adev, req_nps_mode);
+ if (r)
+ break;
+ }
+ if (r) {
+ /* Request back current mode if one of the requests failed */
+ cur_nps_mode =
+ adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev);
+ list_for_each_entry_continue_reverse(
+ tmp_adev, &hive->device_list, gmc.xgmi.head)
+ adev->gmc.gmc_funcs->request_mem_partition_mode(
+ tmp_adev, cur_nps_mode);
+ }
+ /* Set to UNKNOWN so that other devices don't request anymore */
+ atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
+ mutex_unlock(&hive->hive_lock);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index a3bfc16de6d4..8cc7ab38db7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -45,6 +45,8 @@ struct amdgpu_hive_info {
struct amdgpu_reset_domain *reset_domain;
atomic_t ras_recovery;
struct ras_event_manager event_mgr;
+ struct work_struct reset_on_init_work;
+ atomic_t requested_nps_mode;
};
struct amdgpu_pcs_ras_field {
@@ -64,6 +66,8 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
@@ -75,5 +79,10 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
}
int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev);
+
+int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive,
+ int req_nps_mode);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 5e8833e4fed2..890976b7ce77 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -447,6 +447,61 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ num_xcp = 1;
+ break;
+ case AMDGPU_DPX_PARTITION_MODE:
+ num_xcp = 2;
+ break;
+ case AMDGPU_TPX_PARTITION_MODE:
+ num_xcp = 3;
+ break;
+ case AMDGPU_QPX_PARTITION_MODE:
+ num_xcp = 4;
+ break;
+ case AMDGPU_CPX_PARTITION_MODE:
+ num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xcp_cfg->num_res = ARRAY_SIZE(max_res);
+
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ res_lt_xcp = max_res[i] < num_xcp;
+ xcp_cfg->xcp_res[i].id = i;
+ xcp_cfg->xcp_res[i].num_inst =
+ res_lt_xcp ? 1 : max_res[i] / num_xcp;
+ xcp_cfg->xcp_res[i].num_inst =
+ i == AMDGPU_XCP_RES_JPEG ?
+ xcp_cfg->xcp_res[i].num_inst *
+ adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
+ xcp_cfg->xcp_res[i].num_shared =
+ res_lt_xcp ? num_xcp / max_res[i] : 1;
+ }
+
+ return 0;
+}
+
static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
@@ -530,6 +585,57 @@ static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr,
return ret;
}
+static void
+__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ /* this seems only existing in emulation phase */
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ int mode;
+
+ xcp_mgr->avail_xcp_modes = 0;
+
+ for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
+ if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
+ xcp_mgr->avail_xcp_modes |= BIT(mode);
+ }
+}
+
static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
int mode, int *num_xcps)
{
@@ -578,6 +684,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+ if (!ret)
+ __aqua_vanjaram_update_available_partition_mode(xcp_mgr);
unlock:
if (flags & AMDGPU_XCP_OPS_KFD)
amdgpu_amdkfd_unlock_kfd(adev);
@@ -656,9 +764,11 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
.query_partition_mode = &aqua_vanjaram_query_partition_mode,
.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
+ .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
.select_scheds = &aqua_vanjaram_select_scheds,
- .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
+ .update_partition_sched_list =
+ &aqua_vanjaram_update_partition_sched_list
};
static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
@@ -673,6 +783,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
if (ret)
return ret;
+ __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
/* TODO: Default memory node affinity init */
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index cf1d5d462b67..e2cb1f080e88 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1985,9 +1985,9 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.query_video_codecs = &cik_query_video_codecs,
};
-static int cik_common_early_init(void *handle)
+static int cik_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->smc_rreg = &cik_smc_rreg;
adev->smc_wreg = &cik_smc_wreg;
@@ -2124,19 +2124,9 @@ static int cik_common_early_init(void *handle)
return 0;
}
-static int cik_common_sw_init(void *handle)
+static int cik_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static int cik_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-static int cik_common_hw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* move the golden regs per IP block */
cik_init_golden_registers(adev);
@@ -2148,23 +2138,14 @@ static int cik_common_hw_init(void *handle)
return 0;
}
-static int cik_common_hw_fini(void *handle)
+static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int cik_common_suspend(void *handle)
+static int cik_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_common_hw_fini(adev);
-}
-
-static int cik_common_resume(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_common_hw_init(adev);
+ return cik_common_hw_init(ip_block);
}
static bool cik_common_is_idle(void *handle)
@@ -2172,12 +2153,9 @@ static bool cik_common_is_idle(void *handle)
return true;
}
-static int cik_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-static int cik_common_soft_reset(void *handle)
+
+static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* XXX hard reset?? */
return 0;
@@ -2198,20 +2176,13 @@ static int cik_common_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
- .late_init = NULL,
- .sw_init = cik_common_sw_init,
- .sw_fini = cik_common_sw_fini,
.hw_init = cik_common_hw_init,
.hw_fini = cik_common_hw_fini,
- .suspend = cik_common_suspend,
.resume = cik_common_resume,
.is_idle = cik_common_is_idle,
- .wait_for_idle = cik_common_wait_for_idle,
.soft_reset = cik_common_soft_reset,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version cik_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 576baa9dbb0e..1da17755ad53 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -283,9 +283,9 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev,
WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int cik_ih_early_init(void *handle)
+static int cik_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -297,10 +297,10 @@ static int cik_ih_early_init(void *handle)
return 0;
}
-static int cik_ih_sw_init(void *handle)
+static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -311,9 +311,9 @@ static int cik_ih_sw_init(void *handle)
return r;
}
-static int cik_ih_sw_fini(void *handle)
+static int cik_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -321,34 +321,28 @@ static int cik_ih_sw_fini(void *handle)
return 0;
}
-static int cik_ih_hw_init(void *handle)
+static int cik_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return cik_ih_irq_init(adev);
}
-static int cik_ih_hw_fini(void *handle)
+static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cik_ih_irq_disable(adev);
+ cik_ih_irq_disable(ip_block->adev);
return 0;
}
-static int cik_ih_suspend(void *handle)
+static int cik_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_ih_hw_fini(adev);
+ return cik_ih_hw_fini(ip_block);
}
-static int cik_ih_resume(void *handle)
+static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_ih_hw_init(adev);
+ return cik_ih_hw_init(ip_block);
}
static bool cik_ih_is_idle(void *handle)
@@ -362,11 +356,11 @@ static bool cik_ih_is_idle(void *handle)
return true;
}
-static int cik_ih_wait_for_idle(void *handle)
+static int cik_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -378,9 +372,9 @@ static int cik_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cik_ih_soft_reset(void *handle)
+static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -423,7 +417,6 @@ static int cik_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
- .late_init = NULL,
.sw_init = cik_ih_sw_init,
.sw_fini = cik_ih_sw_fini,
.hw_init = cik_ih_hw_init,
@@ -435,8 +428,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
.soft_reset = cik_ih_soft_reset,
.set_clockgating_state = cik_ih_set_clockgating_state,
.set_powergating_state = cik_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cik_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 952737de9411..ede1a028d48d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -54,7 +54,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
-static int cik_sdma_soft_reset(void *handle);
+static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
@@ -918,9 +918,9 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
}
}
-static int cik_sdma_early_init(void *handle)
+static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
@@ -937,10 +937,10 @@ static int cik_sdma_early_init(void *handle)
return 0;
}
-static int cik_sdma_sw_init(void *handle)
+static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r, i;
/* SDMA trap event */
@@ -977,9 +977,9 @@ static int cik_sdma_sw_init(void *handle)
return r;
}
-static int cik_sdma_sw_fini(void *handle)
+static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -989,10 +989,10 @@ static int cik_sdma_sw_fini(void *handle)
return 0;
}
-static int cik_sdma_hw_init(void *handle)
+static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = cik_sdma_start(adev);
if (r)
@@ -1001,9 +1001,9 @@ static int cik_sdma_hw_init(void *handle)
return r;
}
-static int cik_sdma_hw_fini(void *handle)
+static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cik_ctx_switch_enable(adev, false);
cik_sdma_enable(adev, false);
@@ -1011,20 +1011,16 @@ static int cik_sdma_hw_fini(void *handle)
return 0;
}
-static int cik_sdma_suspend(void *handle)
+static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_sdma_hw_fini(adev);
+ return cik_sdma_hw_fini(ip_block);
}
-static int cik_sdma_resume(void *handle)
+static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cik_sdma_soft_reset(handle);
+ cik_sdma_soft_reset(ip_block);
- return cik_sdma_hw_init(adev);
+ return cik_sdma_hw_init(ip_block);
}
static bool cik_sdma_is_idle(void *handle)
@@ -1039,11 +1035,11 @@ static bool cik_sdma_is_idle(void *handle)
return true;
}
-static int cik_sdma_wait_for_idle(void *handle)
+static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1056,10 +1052,10 @@ static int cik_sdma_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cik_sdma_soft_reset(void *handle)
+static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp;
/* sdma0 */
@@ -1217,7 +1213,6 @@ static int cik_sdma_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
- .late_init = NULL,
.sw_init = cik_sdma_sw_init,
.sw_fini = cik_sdma_sw_fini,
.hw_init = cik_sdma_hw_init,
@@ -1229,8 +1224,6 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.soft_reset = cik_sdma_soft_reset,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 072643787384..d72973bd570d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -274,9 +274,9 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev,
WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int cz_ih_early_init(void *handle)
+static int cz_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -288,10 +288,10 @@ static int cz_ih_early_init(void *handle)
return 0;
}
-static int cz_ih_sw_init(void *handle)
+static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -302,9 +302,9 @@ static int cz_ih_sw_init(void *handle)
return r;
}
-static int cz_ih_sw_fini(void *handle)
+static int cz_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -312,10 +312,10 @@ static int cz_ih_sw_fini(void *handle)
return 0;
}
-static int cz_ih_hw_init(void *handle)
+static int cz_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = cz_ih_irq_init(adev);
if (r)
@@ -324,27 +324,21 @@ static int cz_ih_hw_init(void *handle)
return 0;
}
-static int cz_ih_hw_fini(void *handle)
+static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cz_ih_irq_disable(adev);
+ cz_ih_irq_disable(ip_block->adev);
return 0;
}
-static int cz_ih_suspend(void *handle)
+static int cz_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cz_ih_hw_fini(adev);
+ return cz_ih_hw_fini(ip_block);
}
-static int cz_ih_resume(void *handle)
+static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cz_ih_hw_init(adev);
+ return cz_ih_hw_init(ip_block);
}
static bool cz_ih_is_idle(void *handle)
@@ -358,11 +352,11 @@ static bool cz_ih_is_idle(void *handle)
return true;
}
-static int cz_ih_wait_for_idle(void *handle)
+static int cz_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -374,10 +368,10 @@ static int cz_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cz_ih_soft_reset(void *handle)
+static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -421,7 +415,6 @@ static int cz_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
- .late_init = NULL,
.sw_init = cz_ih_sw_init,
.sw_fini = cz_ih_sw_fini,
.hw_init = cz_ih_hw_init,
@@ -433,8 +426,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
.soft_reset = cz_ih_soft_reset,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cz_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 70c1399f738d..5098c50d54c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2738,9 +2738,9 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
return 0;
}
-static int dce_v10_0_early_init(void *handle)
+static int dce_v10_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
@@ -2765,10 +2765,10 @@ static int dce_v10_0_early_init(void *handle)
return 0;
}
-static int dce_v10_0_sw_init(void *handle)
+static int dce_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2844,9 +2844,9 @@ static int dce_v10_0_sw_init(void *handle)
return 0;
}
-static int dce_v10_0_sw_fini(void *handle)
+static int dce_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2862,10 +2862,10 @@ static int dce_v10_0_sw_fini(void *handle)
return 0;
}
-static int dce_v10_0_hw_init(void *handle)
+static int dce_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v10_0_init_golden_registers(adev);
@@ -2887,10 +2887,10 @@ static int dce_v10_0_hw_init(void *handle)
return 0;
}
-static int dce_v10_0_hw_fini(void *handle)
+static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v10_0_hpd_fini(adev);
@@ -2905,9 +2905,9 @@ static int dce_v10_0_hw_fini(void *handle)
return 0;
}
-static int dce_v10_0_suspend(void *handle)
+static int dce_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -2917,18 +2917,18 @@ static int dce_v10_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v10_0_hw_fini(handle);
+ return dce_v10_0_hw_fini(ip_block);
}
-static int dce_v10_0_resume(void *handle)
+static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v10_0_hw_init(handle);
+ ret = dce_v10_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -2948,22 +2948,17 @@ static bool dce_v10_0_is_idle(void *handle)
return true;
}
-static int dce_v10_0_wait_for_idle(void *handle)
+static bool dce_v10_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static bool dce_v10_0_check_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return dce_v10_0_is_display_hung(adev);
}
-static int dce_v10_0_soft_reset(void *handle)
+static int dce_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3322,7 +3317,6 @@ static int dce_v10_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
- .late_init = NULL,
.sw_init = dce_v10_0_sw_init,
.sw_fini = dce_v10_0_sw_fini,
.hw_init = dce_v10_0_hw_init,
@@ -3330,13 +3324,10 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.suspend = dce_v10_0_suspend,
.resume = dce_v10_0_resume,
.is_idle = dce_v10_0_is_idle,
- .wait_for_idle = dce_v10_0_wait_for_idle,
.check_soft_reset = dce_v10_0_check_soft_reset,
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f154c24499c8..c5680ff4ab9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2851,9 +2851,9 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
return 0;
}
-static int dce_v11_0_early_init(void *handle)
+static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
@@ -2891,10 +2891,10 @@ static int dce_v11_0_early_init(void *handle)
return 0;
}
-static int dce_v11_0_sw_init(void *handle)
+static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2971,9 +2971,9 @@ static int dce_v11_0_sw_init(void *handle)
return 0;
}
-static int dce_v11_0_sw_fini(void *handle)
+static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2989,10 +2989,10 @@ static int dce_v11_0_sw_fini(void *handle)
return 0;
}
-static int dce_v11_0_hw_init(void *handle)
+static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v11_0_init_golden_registers(adev);
@@ -3025,10 +3025,10 @@ static int dce_v11_0_hw_init(void *handle)
return 0;
}
-static int dce_v11_0_hw_fini(void *handle)
+static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v11_0_hpd_fini(adev);
@@ -3043,9 +3043,9 @@ static int dce_v11_0_hw_fini(void *handle)
return 0;
}
-static int dce_v11_0_suspend(void *handle)
+static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -3055,18 +3055,18 @@ static int dce_v11_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v11_0_hw_fini(handle);
+ return dce_v11_0_hw_fini(ip_block);
}
-static int dce_v11_0_resume(void *handle)
+static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v11_0_hw_init(handle);
+ ret = dce_v11_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -3086,15 +3086,10 @@ static bool dce_v11_0_is_idle(void *handle)
return true;
}
-static int dce_v11_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int dce_v11_0_soft_reset(void *handle)
+static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (dce_v11_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3454,7 +3449,6 @@ static int dce_v11_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
- .late_init = NULL,
.sw_init = dce_v11_0_sw_init,
.sw_fini = dce_v11_0_sw_fini,
.hw_init = dce_v11_0_hw_init,
@@ -3462,12 +3456,9 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.suspend = dce_v11_0_suspend,
.resume = dce_v11_0_resume,
.is_idle = dce_v11_0_is_idle,
- .wait_for_idle = dce_v11_0_wait_for_idle,
.soft_reset = dce_v11_0_soft_reset,
.set_clockgating_state = dce_v11_0_set_clockgating_state,
.set_powergating_state = dce_v11_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index a7fcb135827f..eb7de9122d99 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2633,9 +2633,9 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
return 0;
}
-static int dce_v6_0_early_init(void *handle)
+static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
@@ -2664,11 +2664,11 @@ static int dce_v6_0_early_init(void *handle)
return 0;
}
-static int dce_v6_0_sw_init(void *handle)
+static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
bool ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2743,9 +2743,9 @@ static int dce_v6_0_sw_init(void *handle)
return r;
}
-static int dce_v6_0_sw_fini(void *handle)
+static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2760,10 +2760,10 @@ static int dce_v6_0_sw_fini(void *handle)
return 0;
}
-static int dce_v6_0_hw_init(void *handle)
+static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* disable vga render */
dce_v6_0_set_vga_render_state(adev, false);
@@ -2783,10 +2783,10 @@ static int dce_v6_0_hw_init(void *handle)
return 0;
}
-static int dce_v6_0_hw_fini(void *handle)
+static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v6_0_hpd_fini(adev);
@@ -2801,9 +2801,9 @@ static int dce_v6_0_hw_fini(void *handle)
return 0;
}
-static int dce_v6_0_suspend(void *handle)
+static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -2812,18 +2812,18 @@ static int dce_v6_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v6_0_hw_fini(handle);
+ return dce_v6_0_hw_fini(ip_block);
}
-static int dce_v6_0_resume(void *handle)
+static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v6_0_hw_init(handle);
+ ret = dce_v6_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -2843,12 +2843,7 @@ static bool dce_v6_0_is_idle(void *handle)
return true;
}
-static int dce_v6_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int dce_v6_0_soft_reset(void *handle)
+static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
return 0;
@@ -3144,7 +3139,6 @@ static int dce_v6_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
- .late_init = NULL,
.sw_init = dce_v6_0_sw_init,
.sw_fini = dce_v6_0_sw_fini,
.hw_init = dce_v6_0_hw_init,
@@ -3152,12 +3146,9 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.suspend = dce_v6_0_suspend,
.resume = dce_v6_0_resume,
.is_idle = dce_v6_0_is_idle,
- .wait_for_idle = dce_v6_0_wait_for_idle,
.soft_reset = dce_v6_0_soft_reset,
.set_clockgating_state = dce_v6_0_set_clockgating_state,
.set_powergating_state = dce_v6_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 77ac3f114d24..04b79ff87f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2644,9 +2644,9 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
return 0;
}
-static int dce_v8_0_early_init(void *handle)
+static int dce_v8_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
@@ -2680,10 +2680,10 @@ static int dce_v8_0_early_init(void *handle)
return 0;
}
-static int dce_v8_0_sw_init(void *handle)
+static int dce_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2764,9 +2764,9 @@ static int dce_v8_0_sw_init(void *handle)
return 0;
}
-static int dce_v8_0_sw_fini(void *handle)
+static int dce_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2782,10 +2782,10 @@ static int dce_v8_0_sw_fini(void *handle)
return 0;
}
-static int dce_v8_0_hw_init(void *handle)
+static int dce_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* disable vga render */
dce_v8_0_set_vga_render_state(adev, false);
@@ -2805,10 +2805,10 @@ static int dce_v8_0_hw_init(void *handle)
return 0;
}
-static int dce_v8_0_hw_fini(void *handle)
+static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v8_0_hpd_fini(adev);
@@ -2823,9 +2823,9 @@ static int dce_v8_0_hw_fini(void *handle)
return 0;
}
-static int dce_v8_0_suspend(void *handle)
+static int dce_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -2835,18 +2835,18 @@ static int dce_v8_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v8_0_hw_fini(handle);
+ return dce_v8_0_hw_fini(ip_block);
}
-static int dce_v8_0_resume(void *handle)
+static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v8_0_hw_init(handle);
+ ret = dce_v8_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -2866,15 +2866,10 @@ static bool dce_v8_0_is_idle(void *handle)
return true;
}
-static int dce_v8_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int dce_v8_0_soft_reset(void *handle)
+static int dce_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (dce_v8_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3232,7 +3227,6 @@ static int dce_v8_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
- .late_init = NULL,
.sw_init = dce_v8_0_sw_init,
.sw_fini = dce_v8_0_sw_fini,
.hw_init = dce_v8_0_hw_init,
@@ -3240,12 +3234,9 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.suspend = dce_v8_0_suspend,
.resume = dce_v8_0_resume,
.is_idle = dce_v8_0_is_idle,
- .wait_for_idle = dce_v8_0_wait_for_idle,
.soft_reset = dce_v8_0_soft_reset,
.set_clockgating_state = dce_v8_0_set_clockgating_state,
.set_powergating_state = dce_v8_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 45ed97038df0..9da95b25e158 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3677,13 +3677,19 @@ static int gfx_v10_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -4683,11 +4689,11 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v10_0_sw_init(void *handle)
+static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
@@ -4726,6 +4732,11 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
/* KIQ event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
@@ -4842,6 +4853,9 @@ static int gfx_v10_0_sw_init(void *handle)
gfx_v10_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -4866,10 +4880,10 @@ static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_ptr);
}
-static int gfx_v10_0_sw_fini(void *handle)
+static int gfx_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4881,6 +4895,8 @@ static int gfx_v10_0_sw_fini(void *handle)
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev, 0);
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v10_0_pfp_fini(adev);
gfx_v10_0_ce_fini(adev);
gfx_v10_0_me_fini(adev);
@@ -4891,6 +4907,7 @@ static int gfx_v10_0_sw_fini(void *handle)
gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
gfx_v10_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
@@ -6374,7 +6391,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -6412,7 +6429,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
- /* Set the wb address wether it's enabled or not */
+ /* Set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -7366,14 +7383,17 @@ static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCPG_PSP_DEBUG, data);
}
-static int gfx_v10_0_hw_init(void *handle)
+static int gfx_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!amdgpu_emu_mode)
gfx_v10_0_init_golden_registers(adev);
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/**
* For gfx 10, rlc firmware loading relies on smu firmware is
@@ -7418,9 +7438,9 @@ static int gfx_v10_0_hw_init(void *handle)
return r;
}
-static int gfx_v10_0_hw_fini(void *handle)
+static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -7431,7 +7451,7 @@ static int gfx_v10_0_hw_fini(void *handle)
* otherwise the gfxoff disallowing will be failed to set.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
- gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE);
+ gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -7456,14 +7476,14 @@ static int gfx_v10_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v10_0_suspend(void *handle)
+static int gfx_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v10_0_hw_fini(handle);
+ return gfx_v10_0_hw_fini(ip_block);
}
-static int gfx_v10_0_resume(void *handle)
+static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v10_0_hw_init(handle);
+ return gfx_v10_0_hw_init(ip_block);
}
static bool gfx_v10_0_is_idle(void *handle)
@@ -7477,11 +7497,11 @@ static bool gfx_v10_0_is_idle(void *handle)
return true;
}
-static int gfx_v10_0_wait_for_idle(void *handle)
+static int gfx_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -7495,11 +7515,11 @@ static int gfx_v10_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v10_0_soft_reset(void *handle)
+static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
@@ -7678,9 +7698,9 @@ static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static int gfx_v10_0_early_init(void *handle)
+static int gfx_v10_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
@@ -7722,9 +7742,9 @@ static int gfx_v10_0_early_init(void *handle)
return gfx_v10_0_init_microcode(adev);
}
-static int gfx_v10_0_late_init(void *handle)
+static int gfx_v10_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -9402,8 +9422,6 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -9414,8 +9432,7 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
@@ -9568,9 +9585,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
return amdgpu_ring_test_ring(ring);
}
-static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
@@ -9632,9 +9649,9 @@ static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v10_ip_dump(void *handle)
+static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
@@ -9699,6 +9716,13 @@ static void gfx_v10_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static void gfx_v10_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -9749,7 +9773,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 8, /* gfx_v10_0_emit_mem_sync */
+ 8 + /* gfx_v10_0_emit_mem_sync */
+ 2, /* gfx_v10_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -9772,6 +9797,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9791,7 +9819,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
- 8, /* gfx_v10_0_emit_mem_sync */
+ 8 + /* gfx_v10_0_emit_mem_sync */
+ 2, /* gfx_v10_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -9809,6 +9838,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index d3e8be82a172..894fc04201c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -293,14 +293,20 @@ static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -483,8 +489,6 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -495,8 +499,7 @@ static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
@@ -1536,11 +1539,11 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v11_0_sw_init(void *handle)
+static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
@@ -1575,6 +1578,11 @@ static int gfx_v11_0_sw_init(void *handle)
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ }
+
/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&
amdgpu_sriov_is_pp_one_vf(adev))
@@ -1700,6 +1708,10 @@ static int gfx_v11_0_sw_init(void *handle)
gfx_v11_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1732,10 +1744,10 @@ static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.rlc_autoload_ptr);
}
-static int gfx_v11_0_sw_fini(void *handle)
+static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -1749,6 +1761,8 @@ static int gfx_v11_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev, 0);
}
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v11_0_pfp_fini(adev);
gfx_v11_0_me_fini(adev);
gfx_v11_0_rlc_fini(adev);
@@ -1759,6 +1773,8 @@ static int gfx_v11_0_sw_fini(void *handle)
gfx_v11_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
kfree(adev->gfx.ip_dump_gfx_queues);
@@ -1893,8 +1909,10 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- /* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ /*
+ * Initialize all compute VMIDs to have no GDS, GWS, or OA
+ * access. These should be enabled by FW for target VMIDs.
+ */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
@@ -3555,7 +3573,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -3593,7 +3611,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
- /* Set the wb address wether it's enabled or not */
+ /* Set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -4568,10 +4586,13 @@ static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
}
-static int gfx_v11_0_hw_init(void *handle)
+static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
if (adev->gfx.imu.funcs) {
@@ -4665,9 +4686,9 @@ static int gfx_v11_0_hw_init(void *handle)
return r;
}
-static int gfx_v11_0_hw_fini(void *handle)
+static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4703,14 +4724,14 @@ static int gfx_v11_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v11_0_suspend(void *handle)
+static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v11_0_hw_fini(handle);
+ return gfx_v11_0_hw_fini(ip_block);
}
-static int gfx_v11_0_resume(void *handle)
+static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v11_0_hw_init(handle);
+ return gfx_v11_0_hw_init(ip_block);
}
static bool gfx_v11_0_is_idle(void *handle)
@@ -4724,11 +4745,11 @@ static bool gfx_v11_0_is_idle(void *handle)
return true;
}
-static int gfx_v11_0_wait_for_idle(void *handle)
+static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -4774,12 +4795,12 @@ int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v11_0_soft_reset(void *handle)
+static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
int r, i, j, k;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
@@ -4905,10 +4926,10 @@ static int gfx_v11_0_soft_reset(void *handle)
return gfx_v11_0_cp_resume(adev);
}
-static bool gfx_v11_0_check_soft_reset(void *handle)
+static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
long tmo = msecs_to_jiffies(1000);
@@ -4929,12 +4950,13 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
return false;
}
-static int gfx_v11_0_post_soft_reset(void *handle)
+static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
/**
* GFX soft reset will impact MES, need resume MES when do GFX soft reset
*/
- return amdgpu_mes_resume((struct amdgpu_device *)handle);
+ return amdgpu_mes_resume(adev);
}
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
@@ -4995,9 +5017,9 @@ static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static int gfx_v11_0_early_init(void *handle)
+static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
@@ -5018,9 +5040,9 @@ static int gfx_v11_0_early_init(void *handle)
return gfx_v11_0_init_microcode(adev);
}
-static int gfx_v11_0_late_init(void *handle)
+static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -6639,9 +6661,9 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
-static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
@@ -6703,9 +6725,9 @@ static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v11_ip_dump(void *handle)
+static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
@@ -6769,6 +6791,13 @@ static void gfx_v11_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.name = "gfx_v11_0",
.early_init = gfx_v11_0_early_init,
@@ -6818,7 +6847,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
22 + /* SET_Q_PREEMPTION_MODE */
8 + 8 + /* FENCE x2 */
- 8, /* gfx_v11_0_emit_mem_sync */
+ 8 + /* gfx_v11_0_emit_mem_sync */
+ 2, /* gfx_v11_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
.emit_ib = gfx_v11_0_ring_emit_ib_gfx,
.emit_fence = gfx_v11_0_ring_emit_fence,
@@ -6841,6 +6871,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6861,7 +6894,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
- 8, /* gfx_v11_0_emit_mem_sync */
+ 8 + /* gfx_v11_0_emit_mem_sync */
+ 2, /* gfx_v11_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
.emit_fence = gfx_v11_0_ring_emit_fence,
@@ -6879,6 +6913,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 47b47d21f464..9fec28d8a5fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -1319,12 +1319,12 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v12_0_sw_init(void *handle)
+static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id = 0;
unsigned num_compute_rings;
int xcc_id = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
@@ -1346,6 +1346,12 @@ static int gfx_v12_0_sw_init(void *handle)
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
/* recalculate compute rings to use based on hardware configuration */
num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe) / 2;
@@ -1460,6 +1466,10 @@ static int gfx_v12_0_sw_init(void *handle)
gfx_v12_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1492,10 +1502,10 @@ static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.rlc_autoload_ptr);
}
-static int gfx_v12_0_sw_fini(void *handle)
+static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -1519,6 +1529,8 @@ static int gfx_v12_0_sw_fini(void *handle)
gfx_v12_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
kfree(adev->gfx.ip_dump_gfx_queues);
@@ -2601,7 +2613,7 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -3513,10 +3525,10 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-static int gfx_v12_0_hw_init(void *handle)
+static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
@@ -3603,9 +3615,9 @@ static int gfx_v12_0_hw_init(void *handle)
return r;
}
-static int gfx_v12_0_hw_fini(void *handle)
+static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
@@ -3643,14 +3655,14 @@ static int gfx_v12_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v12_0_suspend(void *handle)
+static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v12_0_hw_fini(handle);
+ return gfx_v12_0_hw_fini(ip_block);
}
-static int gfx_v12_0_resume(void *handle)
+static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v12_0_hw_init(handle);
+ return gfx_v12_0_hw_init(ip_block);
}
static bool gfx_v12_0_is_idle(void *handle)
@@ -3664,11 +3676,11 @@ static bool gfx_v12_0_is_idle(void *handle)
return true;
}
-static int gfx_v12_0_wait_for_idle(void *handle)
+static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -3695,9 +3707,9 @@ static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev)
return clock;
}
-static int gfx_v12_0_early_init(void *handle)
+static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
@@ -3717,9 +3729,9 @@ static int gfx_v12_0_early_init(void *handle)
return gfx_v12_0_init_microcode(adev);
}
-static int gfx_v12_0_late_init(void *handle)
+static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -5022,8 +5034,6 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring)
static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -5034,13 +5044,19 @@ static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
+static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
@@ -5102,9 +5118,9 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v12_ip_dump(void *handle)
+static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
@@ -5297,7 +5313,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 8, /* gfx_v12_0_emit_mem_sync */
+ 8 + /* gfx_v12_0_emit_mem_sync */
+ 2, /* gfx_v12_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */
.emit_ib = gfx_v12_0_ring_emit_ib_gfx,
.emit_fence = gfx_v12_0_ring_emit_fence,
@@ -5318,6 +5335,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5336,7 +5356,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v12_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */
- 8, /* gfx_v12_0_emit_mem_sync */
+ 8 + /* gfx_v12_0_emit_mem_sync */
+ 2, /* gfx_v12_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */
.emit_ib = gfx_v12_0_ring_emit_ib_compute,
.emit_fence = gfx_v12_0_ring_emit_fence,
@@ -5353,6 +5374,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 564f0b9336b6..41f50bf380c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3023,9 +3023,9 @@ static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
.start = gfx_v6_0_rlc_start
};
-static int gfx_v6_0_early_init(void *handle)
+static int gfx_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
@@ -3039,10 +3039,10 @@ static int gfx_v6_0_early_init(void *handle)
return 0;
}
-static int gfx_v6_0_sw_init(void *handle)
+static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
@@ -3107,10 +3107,10 @@ static int gfx_v6_0_sw_init(void *handle)
return r;
}
-static int gfx_v6_0_sw_fini(void *handle)
+static int gfx_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -3122,10 +3122,10 @@ static int gfx_v6_0_sw_fini(void *handle)
return 0;
}
-static int gfx_v6_0_hw_init(void *handle)
+static int gfx_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v6_0_constants_init(adev);
@@ -3142,9 +3142,9 @@ static int gfx_v6_0_hw_init(void *handle)
return r;
}
-static int gfx_v6_0_hw_fini(void *handle)
+static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v6_0_cp_enable(adev, false);
adev->gfx.rlc.funcs->stop(adev);
@@ -3153,18 +3153,14 @@ static int gfx_v6_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v6_0_suspend(void *handle)
+static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v6_0_hw_fini(adev);
+ return gfx_v6_0_hw_fini(ip_block);
}
-static int gfx_v6_0_resume(void *handle)
+static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v6_0_hw_init(adev);
+ return gfx_v6_0_hw_init(ip_block);
}
static bool gfx_v6_0_is_idle(void *handle)
@@ -3177,24 +3173,19 @@ static bool gfx_v6_0_is_idle(void *handle)
return true;
}
-static int gfx_v6_0_wait_for_idle(void *handle)
+static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v6_0_is_idle(handle))
+ if (gfx_v6_0_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v6_0_soft_reset(void *handle)
-{
- return 0;
-}
-
static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -3444,7 +3435,6 @@ static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
- .late_init = NULL,
.sw_init = gfx_v6_0_sw_init,
.sw_fini = gfx_v6_0_sw_fini,
.hw_init = gfx_v6_0_hw_init,
@@ -3453,11 +3443,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.resume = gfx_v6_0_resume,
.is_idle = gfx_v6_0_is_idle,
.wait_for_idle = gfx_v6_0_wait_for_idle,
- .soft_reset = gfx_v6_0_soft_reset,
.set_clockgating_state = gfx_v6_0_set_clockgating_state,
.set_powergating_state = gfx_v6_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index f146806c4633..824d5913103b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2559,7 +2559,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -2876,7 +2876,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
wb_gpu_addr = ring->rptr_gpu_addr;
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
@@ -4134,9 +4134,9 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.update_spm_vmid = gfx_v7_0_update_spm_vmid
};
-static int gfx_v7_0_early_init(void *handle)
+static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
@@ -4151,9 +4151,9 @@ static int gfx_v7_0_early_init(void *handle)
return 0;
}
-static int gfx_v7_0_late_init(void *handle)
+static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -4343,10 +4343,10 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
return 0;
}
-static int gfx_v7_0_sw_init(void *handle)
+static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j, k, r, ring_id;
switch (adev->asic_type) {
@@ -4439,9 +4439,9 @@ static int gfx_v7_0_sw_init(void *handle)
return r;
}
-static int gfx_v7_0_sw_fini(void *handle)
+static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
@@ -4465,10 +4465,10 @@ static int gfx_v7_0_sw_fini(void *handle)
return 0;
}
-static int gfx_v7_0_hw_init(void *handle)
+static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v7_0_constants_init(adev);
@@ -4486,9 +4486,9 @@ static int gfx_v7_0_hw_init(void *handle)
return r;
}
-static int gfx_v7_0_hw_fini(void *handle)
+static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4499,18 +4499,14 @@ static int gfx_v7_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v7_0_suspend(void *handle)
+static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v7_0_hw_fini(adev);
+ return gfx_v7_0_hw_fini(ip_block);
}
-static int gfx_v7_0_resume(void *handle)
+static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v7_0_hw_init(adev);
+ return gfx_v7_0_hw_init(ip_block);
}
static bool gfx_v7_0_is_idle(void *handle)
@@ -4523,11 +4519,11 @@ static bool gfx_v7_0_is_idle(void *handle)
return true;
}
-static int gfx_v7_0_wait_for_idle(void *handle)
+static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -4540,11 +4536,11 @@ static int gfx_v7_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v7_0_soft_reset(void *handle)
+static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32(mmGRBM_STATUS);
@@ -5009,8 +5005,6 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.soft_reset = gfx_v7_0_soft_reset,
.set_clockgating_state = gfx_v7_0_set_clockgating_state,
.set_powergating_state = gfx_v7_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bc8295812cc8..480c41ee947e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1894,12 +1894,12 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
-static int gfx_v8_0_sw_init(void *handle)
+static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (adev->asic_type) {
case CHIP_TONGA:
@@ -2037,9 +2037,9 @@ static int gfx_v8_0_sw_init(void *handle)
return 0;
}
-static int gfx_v8_0_sw_fini(void *handle)
+static int gfx_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
@@ -4260,7 +4260,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -4783,10 +4783,10 @@ static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
gfx_v8_0_cp_compute_enable(adev, enable);
}
-static int gfx_v8_0_hw_init(void *handle)
+static int gfx_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
@@ -4865,13 +4865,13 @@ static int gfx_v8_0_wait_for_rlc_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_wait_for_idle(void *handle)
+static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v8_0_is_idle(handle))
+ if (gfx_v8_0_is_idle(adev))
return 0;
udelay(1);
@@ -4879,9 +4879,9 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_hw_fini(void *handle)
+static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4897,8 +4897,9 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
+
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- if (!gfx_v8_0_wait_for_idle(adev))
+ if (!gfx_v8_0_wait_for_idle(ip_block))
gfx_v8_0_cp_enable(adev, false);
else
pr_err("cp is busy, skip halt cp\n");
@@ -4911,19 +4912,19 @@ static int gfx_v8_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v8_0_suspend(void *handle)
+static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v8_0_hw_fini(handle);
+ return gfx_v8_0_hw_fini(ip_block);
}
-static int gfx_v8_0_resume(void *handle)
+static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v8_0_hw_init(handle);
+ return gfx_v8_0_hw_init(ip_block);
}
-static bool gfx_v8_0_check_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
@@ -4983,9 +4984,9 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
}
}
-static int gfx_v8_0_pre_soft_reset(void *handle)
+static int gfx_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
@@ -5024,9 +5025,9 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
return 0;
}
-static int gfx_v8_0_soft_reset(void *handle)
+static int gfx_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
@@ -5086,9 +5087,9 @@ static int gfx_v8_0_soft_reset(void *handle)
return 0;
}
-static int gfx_v8_0_post_soft_reset(void *handle)
+static int gfx_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
@@ -5254,9 +5255,9 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
};
-static int gfx_v8_0_early_init(void *handle)
+static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
@@ -5271,9 +5272,9 @@ static int gfx_v8_0_early_init(void *handle)
return 0;
}
-static int gfx_v8_0_late_init(void *handle)
+static int gfx_v8_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -6947,8 +6948,6 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
.get_clockgating_state = gfx_v8_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 23f0573ae47b..66947850d7e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2198,12 +2198,12 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v9_0_sw_init(void *handle)
+static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
unsigned int hw_prio;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
@@ -2223,6 +2223,18 @@ static int gfx_v9_0_sw_init(void *handle)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 88) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -2398,10 +2410,10 @@ static int gfx_v9_0_sw_init(void *handle)
}
-static int gfx_v9_0_sw_fini(void *handle)
+static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
@@ -2418,6 +2430,8 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev, 0);
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v9_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
@@ -3184,6 +3198,15 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
@@ -3346,7 +3369,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
@@ -3393,7 +3416,15 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
- (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
@@ -3914,6 +3945,10 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return r;
}
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_cp_gfx_enable(adev, false);
+ gfx_v9_0_cp_compute_enable(adev, false);
+
r = gfx_v9_0_kiq_resume(adev);
if (r)
return r;
@@ -3970,10 +4005,10 @@ static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
gfx_v9_0_cp_compute_enable(adev, enable);
}
-static int gfx_v9_0_hw_init(void *handle)
+static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
adev->gfx.cleaner_shader_ptr);
@@ -3999,9 +4034,9 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
-static int gfx_v9_0_hw_fini(void *handle)
+static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
@@ -4051,14 +4086,14 @@ static int gfx_v9_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v9_0_suspend(void *handle)
+static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_0_hw_fini(handle);
+ return gfx_v9_0_hw_fini(ip_block);
}
-static int gfx_v9_0_resume(void *handle)
+static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_0_hw_init(handle);
+ return gfx_v9_0_hw_init(ip_block);
}
static bool gfx_v9_0_is_idle(void *handle)
@@ -4072,24 +4107,24 @@ static bool gfx_v9_0_is_idle(void *handle)
return true;
}
-static int gfx_v9_0_wait_for_idle(void *handle)
+static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_0_is_idle(handle))
+ if (gfx_v9_0_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v9_0_soft_reset(void *handle)
+static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
@@ -4745,9 +4780,9 @@ fail:
return r;
}
-static int gfx_v9_0_early_init(void *handle)
+static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
@@ -4771,9 +4806,9 @@ static int gfx_v9_0_early_init(void *handle)
return gfx_v9_0_init_microcode(adev);
}
-static int gfx_v9_0_ecc_late_init(void *handle)
+static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/*
@@ -4805,9 +4840,9 @@ static int gfx_v9_0_ecc_late_init(void *handle)
return 0;
}
-static int gfx_v9_0_late_init(void *handle)
+static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -4822,7 +4857,7 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ecc_late_init(handle);
+ r = gfx_v9_0_ecc_late_init(ip_block);
if (r)
return r;
@@ -7167,8 +7202,6 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -7179,8 +7212,7 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
@@ -7237,10 +7269,6 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (!adev->debug_exp_resets &&
- !adev->gfx.num_gfx_rings)
- return -EINVAL;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -7316,9 +7344,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
return amdgpu_ring_test_ring(ring);
}
-static void gfx_v9_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
@@ -7356,9 +7384,9 @@ static void gfx_v9_ip_print(void *handle, struct drm_printer *p)
}
-static void gfx_v9_ip_dump(void *handle)
+static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
index 36c0292b5110..0b6bd09b7529 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,3 +24,45 @@
static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
/* Add the cleaner shader code here */
};
+
+/* Define the cleaner shader gfx_9_4_2 */
+static const u32 gfx_9_4_2_cleaner_shader_hex[] = {
+ 0xbf068100, 0xbf84003b,
+ 0xbf8a0000, 0xb07c0000,
+ 0xbe8200ff, 0x00000078,
+ 0xbf110802, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0x7e060280, 0x7e080280,
+ 0x7e0a0280, 0x7e0c0280,
+ 0x7e0e0280, 0x80828802,
+ 0xbe803202, 0xbf84fff5,
+ 0xbf9c0000, 0xbe8200ff,
+ 0x80000000, 0x86020102,
+ 0xbf840011, 0xbefe00c1,
+ 0xbeff00c1, 0xd28c0001,
+ 0x0001007f, 0xd28d0001,
+ 0x0002027e, 0x10020288,
+ 0xbe8200bf, 0xbefc00c1,
+ 0xd89c2000, 0x00020201,
+ 0xd89c6040, 0x00040401,
+ 0x320202ff, 0x00000400,
+ 0x80828102, 0xbf84fff8,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea0180,
+ 0xbeec0180, 0xbeee0180,
+ 0xbef00180, 0xbef20180,
+ 0xbef40180, 0xbef60180,
+ 0xbef80180, 0xbefa0180,
+ 0xbf810000, 0xbf8d0001,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea01ff,
+ 0x000000ee, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
new file mode 100644
index 000000000000..35b8cf9070bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// MI200 : Clear SGPRs, VGPRs and LDS
+// Uses two kernels launched separately:
+// 1. Clean VGPRs, LDS, and lower SGPRs
+// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
+// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
+// Waves in the workgroup share the 64KB of LDS
+// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
+// Each wave clears 128 VGPRs, so all 512 in the SIMD
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+// 2. Clean remaining SGPRs
+// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
+// Waves are allocating 96 SGPRs
+// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
+// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
+// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
+// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
+// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
+
+shader main
+ asic(MI200)
+ type(CS)
+ wave_size(64)
+// Note: original source code from SQ team
+
+// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks)
+
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
+ S_BARRIER
+
+ s_movk_i32 m0, 0x0000
+ s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance)
+ //
+ // CLEAR VGPRs
+ //
+ s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing
+label_0005:
+ v_mov_b32 v0, 0
+ v_mov_b32 v1, 0
+ v_mov_b32 v2, 0
+ v_mov_b32 v3, 0
+ v_mov_b32 v4, 0
+ v_mov_b32 v5, 0
+ v_mov_b32 v6, 0
+ v_mov_b32 v7, 0
+ s_sub_u32 s2, s2, 8
+ s_set_gpr_idx_idx s2
+ s_cbranch_scc0 label_0005
+ s_set_gpr_idx_off
+
+ //
+ //
+
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+ //
+ // CLEAR SGPRs
+ //
+label_clean_sgpr_1:
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+s_endpgm
+
+label_0023:
+
+ s_sethalt 1
+
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop1:
+
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop1
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0xee //clear vcc
+
+s_endpgm
+end
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index c100845409f7..016290f00592 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1049,10 +1049,10 @@ static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v9_4_3_sw_init(void *handle)
+static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id, xcc_id, num_xcc;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
@@ -1165,12 +1165,9 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
-
- if (!amdgpu_sriov_vf(adev)) {
- r = amdgpu_gfx_sysfs_init(adev);
- if (r)
- return r;
- }
+ r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
gfx_v9_4_3_alloc_ip_dump(adev);
@@ -1181,10 +1178,10 @@ static int gfx_v9_4_3_sw_init(void *handle)
return 0;
}
-static int gfx_v9_4_3_sw_fini(void *handle)
+static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, num_xcc;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
@@ -1201,8 +1198,7 @@ static int gfx_v9_4_3_sw_fini(void *handle)
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_gfx_sysfs_fini(adev);
+ amdgpu_gfx_sysfs_fini(adev);
amdgpu_gfx_sysfs_isolation_shader_fini(adev);
kfree(adev->gfx.ip_dump_core);
@@ -1247,8 +1243,10 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
mutex_unlock(&adev->srbm_mutex);
- /* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ /*
+ * Initialize all compute VMIDs to have no GDS, GWS, or OA
+ * access. These should be enabled by FW for target VMIDs.
+ */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
@@ -2343,10 +2341,10 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
}
-static int gfx_v9_4_3_hw_init(void *handle)
+static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
adev->gfx.cleaner_shader_ptr);
@@ -2367,9 +2365,9 @@ static int gfx_v9_4_3_hw_init(void *handle)
return r;
}
-static int gfx_v9_4_3_hw_fini(void *handle)
+static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
@@ -2384,14 +2382,14 @@ static int gfx_v9_4_3_hw_fini(void *handle)
return 0;
}
-static int gfx_v9_4_3_suspend(void *handle)
+static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_4_3_hw_fini(handle);
+ return gfx_v9_4_3_hw_fini(ip_block);
}
-static int gfx_v9_4_3_resume(void *handle)
+static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_4_3_hw_init(handle);
+ return gfx_v9_4_3_hw_init(ip_block);
}
static bool gfx_v9_4_3_is_idle(void *handle)
@@ -2408,24 +2406,24 @@ static bool gfx_v9_4_3_is_idle(void *handle)
return true;
}
-static int gfx_v9_4_3_wait_for_idle(void *handle)
+static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_4_3_is_idle(handle))
+ if (gfx_v9_4_3_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v9_4_3_soft_reset(void *handle)
+static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
@@ -2509,9 +2507,9 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static int gfx_v9_4_3_early_init(void *handle)
+static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
@@ -2527,9 +2525,9 @@ static int gfx_v9_4_3_early_init(void *handle)
return gfx_v9_4_3_init_microcode(adev);
}
-static int gfx_v9_4_3_late_init(void *handle)
+static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -3056,9 +3054,6 @@ static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
uint32_t value = 0;
- if (!adev->debug_exp_resets)
- return;
-
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
@@ -3574,9 +3569,6 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int r;
- if (!adev->debug_exp_resets)
- return -EINVAL;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -4567,8 +4559,6 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -4579,13 +4569,12 @@ static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_no
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k;
uint32_t xcc_id, xcc_offset, inst_offset;
uint32_t num_xcc, reg, num_inst;
@@ -4643,9 +4632,9 @@ static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v9_4_3_ip_dump(void *handle)
+static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k;
uint32_t num_xcc, reg, num_inst;
uint32_t xcc_id, xcc_offset, inst_offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9784a2892185..697599c46240 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -175,7 +175,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (!amdgpu_sriov_vf(adev))
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (status != 0)
hub->vmhub_funcs->print_l2_protection_fault_status(adev,
status);
@@ -630,9 +633,9 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
}
-static int gmc_v10_0_early_init(void *handle)
+static int gmc_v10_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v10_0_set_mmhub_funcs(adev);
gmc_v10_0_set_gfxhub_funcs(adev);
@@ -651,9 +654,9 @@ static int gmc_v10_0_early_init(void *handle)
return 0;
}
-static int gmc_v10_0_late_init(void *handle)
+static int gmc_v10_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -769,10 +772,10 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static int gmc_v10_0_sw_init(void *handle)
+static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfxhub.funcs->init(adev);
@@ -920,9 +923,9 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_table_vram_free(adev);
}
-static int gmc_v10_0_sw_fini(void *handle)
+static int gmc_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_vm_manager_fini(adev);
gmc_v10_0_gart_fini(adev);
@@ -985,9 +988,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v10_0_hw_init(void *handle)
+static int gmc_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
@@ -1032,9 +1035,9 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v10_0_hw_fini(void *handle)
+static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v10_0_gart_disable(adev);
@@ -1053,25 +1056,22 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v10_0_suspend(void *handle)
+static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v10_0_hw_fini(adev);
+ gmc_v10_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v10_0_resume(void *handle)
+static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v10_0_hw_init(adev);
+ r = gmc_v10_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -1082,17 +1082,12 @@ static bool gmc_v10_0_is_idle(void *handle)
return true;
}
-static int gmc_v10_0_wait_for_idle(void *handle)
+static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v10.*/
return 0;
}
-static int gmc_v10_0_soft_reset(void *handle)
-{
- return 0;
-}
-
static int gmc_v10_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1154,7 +1149,6 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
.resume = gmc_v10_0_resume,
.is_idle = gmc_v10_0_is_idle,
.wait_for_idle = gmc_v10_0_wait_for_idle,
- .soft_reset = gmc_v10_0_soft_reset,
.set_clockgating_state = gmc_v10_0_set_clockgating_state,
.set_powergating_state = gmc_v10_0_set_powergating_state,
.get_clockgating_state = gmc_v10_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 2797fd84432b..f893ab4c14df 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -144,7 +144,10 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev))
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (status != 0)
hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
@@ -601,9 +604,9 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
}
}
-static int gmc_v11_0_early_init(void *handle)
+static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v11_0_set_gfxhub_funcs(adev);
gmc_v11_0_set_mmhub_funcs(adev);
@@ -622,9 +625,9 @@ static int gmc_v11_0_early_init(void *handle)
return 0;
}
-static int gmc_v11_0_late_init(void *handle)
+static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -729,10 +732,10 @@ static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static int gmc_v11_0_sw_init(void *handle)
+static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->init(adev);
@@ -849,9 +852,9 @@ static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_table_vram_free(adev);
}
-static int gmc_v11_0_sw_fini(void *handle)
+static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_vm_manager_fini(adev);
gmc_v11_0_gart_fini(adev);
@@ -908,9 +911,9 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v11_0_hw_init(void *handle)
+static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
@@ -940,9 +943,9 @@ static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v11_0_hw_fini(void *handle)
+static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
@@ -961,25 +964,22 @@ static int gmc_v11_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v11_0_suspend(void *handle)
+static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v11_0_hw_fini(adev);
+ gmc_v11_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v11_0_resume(void *handle)
+static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v11_0_hw_init(adev);
+ r = gmc_v11_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -990,17 +990,12 @@ static bool gmc_v11_0_is_idle(void *handle)
return true;
}
-static int gmc_v11_0_wait_for_idle(void *handle)
+static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v11.*/
return 0;
}
-static int gmc_v11_0_soft_reset(void *handle)
-{
- return 0;
-}
-
static int gmc_v11_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1041,7 +1036,6 @@ const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
.resume = gmc_v11_0_resume,
.is_idle = gmc_v11_0_is_idle,
.wait_for_idle = gmc_v11_0_wait_for_idle,
- .soft_reset = gmc_v11_0_soft_reset,
.set_clockgating_state = gmc_v11_0_set_clockgating_state,
.set_powergating_state = gmc_v11_0_set_powergating_state,
.get_clockgating_state = gmc_v11_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index edcb5351f8cc..d22b027fd0bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -137,7 +137,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev))
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (status != 0)
hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
@@ -604,9 +607,9 @@ static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
}
}
-static int gmc_v12_0_early_init(void *handle)
+static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v12_0_set_gfxhub_funcs(adev);
gmc_v12_0_set_mmhub_funcs(adev);
@@ -624,9 +627,9 @@ static int gmc_v12_0_early_init(void *handle)
return 0;
}
-static int gmc_v12_0_late_init(void *handle)
+static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -731,10 +734,10 @@ static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static int gmc_v12_0_sw_init(void *handle)
+static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->init(adev);
@@ -841,9 +844,9 @@ static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_table_vram_free(adev);
}
-static int gmc_v12_0_sw_fini(void *handle)
+static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_vm_manager_fini(adev);
gmc_v12_0_gart_fini(adev);
@@ -894,10 +897,10 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v12_0_hw_init(void *handle)
+static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* The sequence of these two function calls matters.*/
gmc_v12_0_init_golden_registers(adev);
@@ -924,9 +927,9 @@ static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v12_0_hw_fini(void *handle)
+static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
@@ -945,25 +948,22 @@ static int gmc_v12_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v12_0_suspend(void *handle)
+static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v12_0_hw_fini(adev);
+ gmc_v12_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v12_0_resume(void *handle)
+static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v12_0_hw_init(adev);
+ r = gmc_v12_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -974,17 +974,12 @@ static bool gmc_v12_0_is_idle(void *handle)
return true;
}
-static int gmc_v12_0_wait_for_idle(void *handle)
+static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v11.*/
return 0;
}
-static int gmc_v12_0_soft_reset(void *handle)
-{
- return 0;
-}
-
static int gmc_v12_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1025,7 +1020,6 @@ const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
.resume = gmc_v12_0_resume,
.is_idle = gmc_v12_0_is_idle,
.wait_for_idle = gmc_v12_0_wait_for_idle,
- .soft_reset = gmc_v12_0_soft_reset,
.set_clockgating_state = gmc_v12_0_set_clockgating_state,
.set_powergating_state = gmc_v12_0_set_powergating_state,
.get_clockgating_state = gmc_v12_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d36725666b54..ca000b3d1afc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -43,7 +43,7 @@
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
-static int gmc_v6_0_wait_for_idle(void *handle);
+static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
@@ -64,8 +64,13 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
+ struct amdgpu_ip_block *ip_block;
- gmc_v6_0_wait_for_idle((void *)adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ gmc_v6_0_wait_for_idle(ip_block);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -213,6 +218,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
{
int i, j;
+ struct amdgpu_ip_block *ip_block;
+
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
@@ -224,7 +231,11 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v6_0_wait_for_idle((void *)adev))
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
@@ -251,7 +262,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
- if (gmc_v6_0_wait_for_idle((void *)adev))
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
@@ -762,9 +773,9 @@ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
}
}
-static int gmc_v6_0_early_init(void *handle)
+static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v6_0_set_gmc_funcs(adev);
gmc_v6_0_set_irq_funcs(adev);
@@ -772,9 +783,9 @@ static int gmc_v6_0_early_init(void *handle)
return 0;
}
-static int gmc_v6_0_late_init(void *handle)
+static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
@@ -799,10 +810,10 @@ static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static int gmc_v6_0_sw_init(void *handle)
+static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -876,9 +887,9 @@ static int gmc_v6_0_sw_init(void *handle)
return 0;
}
-static int gmc_v6_0_sw_fini(void *handle)
+static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -889,10 +900,10 @@ static int gmc_v6_0_sw_fini(void *handle)
return 0;
}
-static int gmc_v6_0_hw_init(void *handle)
+static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v6_0_mc_program(adev);
@@ -914,9 +925,9 @@ static int gmc_v6_0_hw_init(void *handle)
return 0;
}
-static int gmc_v6_0_hw_fini(void *handle)
+static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v6_0_gart_disable(adev);
@@ -924,21 +935,19 @@ static int gmc_v6_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v6_0_suspend(void *handle)
+static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v6_0_hw_fini(adev);
+ gmc_v6_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v6_0_resume(void *handle)
+static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- r = gmc_v6_0_hw_init(adev);
+ r = gmc_v6_0_hw_init(ip_block);
if (r)
return r;
@@ -950,6 +959,7 @@ static int gmc_v6_0_resume(void *handle)
static bool gmc_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
@@ -959,13 +969,13 @@ static bool gmc_v6_0_is_idle(void *handle)
return true;
}
-static int gmc_v6_0_wait_for_idle(void *handle)
+static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gmc_v6_0_is_idle(handle))
+ if (gmc_v6_0_is_idle(adev))
return 0;
udelay(1);
}
@@ -973,9 +983,10 @@ static int gmc_v6_0_wait_for_idle(void *handle)
}
-static int gmc_v6_0_soft_reset(void *handle)
+static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -992,7 +1003,8 @@ static int gmc_v6_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v6_0_mc_stop(adev);
- if (gmc_v6_0_wait_for_idle(adev))
+
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
tmp = RREG32(mmSRBM_SOFT_RESET);
@@ -1109,8 +1121,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.soft_reset = gmc_v6_0_soft_reset,
.set_clockgating_state = gmc_v6_0_set_clockgating_state,
.set_powergating_state = gmc_v6_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 994432fb57ea..07f45f1a503a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -52,7 +52,7 @@
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
-static int gmc_v7_0_wait_for_idle(void *handle);
+static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
@@ -921,9 +921,9 @@ static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
}
}
-static int gmc_v7_0_early_init(void *handle)
+static int gmc_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v7_0_set_gmc_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
@@ -940,9 +940,9 @@ static int gmc_v7_0_early_init(void *handle)
return 0;
}
-static int gmc_v7_0_late_init(void *handle)
+static int gmc_v7_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
@@ -968,10 +968,10 @@ static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static int gmc_v7_0_sw_init(void *handle)
+static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -1060,9 +1060,9 @@ static int gmc_v7_0_sw_init(void *handle)
return 0;
}
-static int gmc_v7_0_sw_fini(void *handle)
+static int gmc_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1074,10 +1074,10 @@ static int gmc_v7_0_sw_fini(void *handle)
return 0;
}
-static int gmc_v7_0_hw_init(void *handle)
+static int gmc_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v7_0_init_golden_registers(adev);
@@ -1101,9 +1101,9 @@ static int gmc_v7_0_hw_init(void *handle)
return 0;
}
-static int gmc_v7_0_hw_fini(void *handle)
+static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v7_0_gart_disable(adev);
@@ -1111,25 +1111,22 @@ static int gmc_v7_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v7_0_suspend(void *handle)
+static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v7_0_hw_fini(adev);
+ gmc_v7_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v7_0_resume(void *handle)
+static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v7_0_hw_init(adev);
+ r = gmc_v7_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -1146,11 +1143,11 @@ static bool gmc_v7_0_is_idle(void *handle)
return true;
}
-static int gmc_v7_0_wait_for_idle(void *handle)
+static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -1167,9 +1164,9 @@ static int gmc_v7_0_wait_for_idle(void *handle)
}
-static int gmc_v7_0_soft_reset(void *handle)
+static int gmc_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1351,8 +1348,6 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.soft_reset = gmc_v7_0_soft_reset,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 86488c052f82..12d5967ecd45 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -53,7 +53,7 @@
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
-static int gmc_v8_0_wait_for_idle(void *handle);
+static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
@@ -170,8 +170,13 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
+ struct amdgpu_ip_block *ip_block;
- gmc_v8_0_wait_for_idle(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ gmc_v8_0_wait_for_idle(ip_block);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -426,6 +431,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
u32 tmp;
int i, j;
@@ -439,7 +445,11 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v8_0_wait_for_idle((void *)adev))
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ if (gmc_v8_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
@@ -474,7 +484,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
- if (gmc_v8_0_wait_for_idle((void *)adev))
+ if (gmc_v8_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -1027,9 +1037,9 @@ static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
}
}
-static int gmc_v8_0_early_init(void *handle)
+static int gmc_v8_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v8_0_set_gmc_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
@@ -1046,9 +1056,9 @@ static int gmc_v8_0_early_init(void *handle)
return 0;
}
-static int gmc_v8_0_late_init(void *handle)
+static int gmc_v8_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
@@ -1076,10 +1086,10 @@ static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
#define mmMC_SEQ_MISC0_FIJI 0xA71
-static int gmc_v8_0_sw_init(void *handle)
+static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -1173,9 +1183,9 @@ static int gmc_v8_0_sw_init(void *handle)
return 0;
}
-static int gmc_v8_0_sw_fini(void *handle)
+static int gmc_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1187,10 +1197,10 @@ static int gmc_v8_0_sw_fini(void *handle)
return 0;
}
-static int gmc_v8_0_hw_init(void *handle)
+static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v8_0_init_golden_registers(adev);
@@ -1222,9 +1232,9 @@ static int gmc_v8_0_hw_init(void *handle)
return 0;
}
-static int gmc_v8_0_hw_fini(void *handle)
+static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v8_0_gart_disable(adev);
@@ -1232,25 +1242,22 @@ static int gmc_v8_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v8_0_suspend(void *handle)
+static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v8_0_hw_fini(adev);
+ gmc_v8_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v8_0_resume(void *handle)
+static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v8_0_hw_init(adev);
+ r = gmc_v8_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -1267,11 +1274,11 @@ static bool gmc_v8_0_is_idle(void *handle)
return true;
}
-static int gmc_v8_0_wait_for_idle(void *handle)
+static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -1289,10 +1296,10 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static bool gmc_v8_0_check_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
@@ -1316,23 +1323,23 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
return false;
}
-static int gmc_v8_0_pre_soft_reset(void *handle)
+static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->gmc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_stop(adev);
- if (gmc_v8_0_wait_for_idle(adev))
+ if (gmc_v8_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
return 0;
}
-static int gmc_v8_0_soft_reset(void *handle)
+static int gmc_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->gmc.srbm_soft_reset)
@@ -1361,9 +1368,9 @@ static int gmc_v8_0_soft_reset(void *handle)
return 0;
}
-static int gmc_v8_0_post_soft_reset(void *handle)
+static int gmc_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->gmc.srbm_soft_reset)
return 0;
@@ -1715,8 +1722,6 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
.get_clockgating_state = gmc_v8_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c76ac0dfe572..f43ded8a0aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -672,6 +672,12 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
(amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
return 0;
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (!status)
+ return 0;
+
if (!amdgpu_sriov_vf(adev))
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -1387,14 +1393,44 @@ gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
}
static enum amdgpu_memory_partition
+gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+
+ return AMDGPU_NPS1_PARTITION_MODE;
+}
+
+static enum amdgpu_memory_partition
gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
- return AMDGPU_NPS1_PARTITION_MODE;
+ return gmc_v9_0_query_vf_memory_partition(adev);
return gmc_v9_0_get_memory_partition(adev, NULL);
}
+static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
+{
+ if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
+ adev->nbio.funcs->is_nps_switch_requested(adev)) {
+ adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS;
+ return true;
+ }
+
+ return false;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
@@ -1406,6 +1442,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
.query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
+ .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
+ .need_reset_on_init = &gmc_v9_0_need_reset_on_init,
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -1545,9 +1583,31 @@ static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
adev->gmc.xgmi.ras = &xgmi_ras;
}
-static int gmc_v9_0_early_init(void *handle)
+static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->gmc.supported_nps_modes = 0;
+
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return;
+
+ /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
+ * supported modes as 0.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ adev->gmc.supported_nps_modes =
+ BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ default:
+ break;
+ }
+}
+
+static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
/*
* 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
@@ -1601,9 +1661,9 @@ static int gmc_v9_0_early_init(void *handle)
return 0;
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -1900,6 +1960,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
switch (mode) {
case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
case AMDGPU_NPS1_PARTITION_MODE:
adev->gmc.num_mem_partitions = 1;
break;
@@ -1919,7 +1981,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
/* Use NPS range info, if populated */
r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
- adev->gmc.num_mem_partitions);
+ &adev->gmc.num_mem_partitions);
if (!r) {
l = 0;
for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
@@ -1929,6 +1991,11 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
}
} else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_err(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1");
+ adev->gmc.num_mem_partitions = 1;
+ }
/* Fallback to sw based calculation */
size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
size /= adev->gmc.num_mem_partitions;
@@ -1987,10 +2054,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
adev->gmc.vram_width = 128 * 64;
}
-static int gmc_v9_0_sw_init(void *handle)
+static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
unsigned long inst_mask = adev->aid_mask;
adev->gfxhub.funcs->init(adev);
@@ -2165,6 +2232,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ gmc_v9_0_init_nps_details(adev);
/*
* number of VMs
* VMID 0 is reserved for System
@@ -2198,9 +2266,9 @@ static int gmc_v9_0_sw_init(void *handle)
return 0;
}
-static int gmc_v9_0_sw_fini(void *handle)
+static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
@@ -2308,9 +2376,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_hw_init(void *handle)
+static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool value;
int i, r;
@@ -2393,9 +2461,9 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v9_0_hw_fini(void *handle)
+static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v9_0_gart_disable(adev);
@@ -2413,32 +2481,44 @@ static int gmc_v9_0_hw_fini(void *handle)
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, false);
- amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ /*
+ * For minimal init, late_init is not called, hence VM fault/RAS irqs
+ * are not enabled.
+ */
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- if (adev->gmc.ecc_irq.funcs &&
- amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ }
return 0;
}
-static int gmc_v9_0_suspend(void *handle)
+static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gmc_v9_0_hw_fini(adev);
+ return gmc_v9_0_hw_fini(ip_block);
}
-static int gmc_v9_0_resume(void *handle)
+static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v9_0_hw_init(adev);
+ /* If a reset is done for NPS mode switch, read the memory range
+ * information again.
+ */
+ if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
+ gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
+ }
+
+ r = gmc_v9_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -2449,13 +2529,13 @@ static bool gmc_v9_0_is_idle(void *handle)
return true;
}
-static int gmc_v9_0_wait_for_idle(void *handle)
+static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v9.*/
return 0;
}
-static int gmc_v9_0_soft_reset(void *handle)
+static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* XXX for emulation.*/
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 07984f7c3ae7..7f45e93c0397 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -273,9 +273,9 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev,
WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int iceland_ih_early_init(void *handle)
+static int iceland_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -287,10 +287,10 @@ static int iceland_ih_early_init(void *handle)
return 0;
}
-static int iceland_ih_sw_init(void *handle)
+static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -301,9 +301,9 @@ static int iceland_ih_sw_init(void *handle)
return r;
}
-static int iceland_ih_sw_fini(void *handle)
+static int iceland_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -311,34 +311,28 @@ static int iceland_ih_sw_fini(void *handle)
return 0;
}
-static int iceland_ih_hw_init(void *handle)
+static int iceland_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return iceland_ih_irq_init(adev);
}
-static int iceland_ih_hw_fini(void *handle)
+static int iceland_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- iceland_ih_irq_disable(adev);
+ iceland_ih_irq_disable(ip_block->adev);
return 0;
}
-static int iceland_ih_suspend(void *handle)
+static int iceland_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return iceland_ih_hw_fini(adev);
+ return iceland_ih_hw_fini(ip_block);
}
-static int iceland_ih_resume(void *handle)
+static int iceland_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return iceland_ih_hw_init(adev);
+ return iceland_ih_hw_init(ip_block);
}
static bool iceland_ih_is_idle(void *handle)
@@ -352,11 +346,11 @@ static bool iceland_ih_is_idle(void *handle)
return true;
}
-static int iceland_ih_wait_for_idle(void *handle)
+static int iceland_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -368,10 +362,10 @@ static int iceland_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int iceland_ih_soft_reset(void *handle)
+static int iceland_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -413,7 +407,6 @@ static int iceland_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
- .late_init = NULL,
.sw_init = iceland_ih_sw_init,
.sw_fini = iceland_ih_sw_fini,
.hw_init = iceland_ih_hw_init,
@@ -425,8 +418,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.soft_reset = iceland_ih_soft_reset,
.set_clockgating_state = iceland_ih_set_clockgating_state,
.set_powergating_state = iceland_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 18a761d6ef33..38f953fd65d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -559,19 +559,19 @@ static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs;
}
-static int ih_v6_0_early_init(void *handle)
+static int ih_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_0_set_interrupt_funcs(adev);
ih_v6_0_set_self_irq_funcs(adev);
return 0;
}
-static int ih_v6_0_sw_init(void *handle)
+static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
@@ -614,19 +614,19 @@ static int ih_v6_0_sw_init(void *handle)
return r;
}
-static int ih_v6_0_sw_fini(void *handle)
+static int ih_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int ih_v6_0_hw_init(void *handle)
+static int ih_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = ih_v6_0_irq_init(adev);
if (r)
@@ -635,27 +635,21 @@ static int ih_v6_0_hw_init(void *handle)
return 0;
}
-static int ih_v6_0_hw_fini(void *handle)
+static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v6_0_irq_disable(adev);
+ ih_v6_0_irq_disable(ip_block->adev);
return 0;
}
-static int ih_v6_0_suspend(void *handle)
+static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_0_hw_fini(adev);
+ return ih_v6_0_hw_fini(ip_block);
}
-static int ih_v6_0_resume(void *handle)
+static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_0_hw_init(adev);
+ return ih_v6_0_hw_init(ip_block);
}
static bool ih_v6_0_is_idle(void *handle)
@@ -664,13 +658,13 @@ static bool ih_v6_0_is_idle(void *handle)
return true;
}
-static int ih_v6_0_wait_for_idle(void *handle)
+static int ih_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int ih_v6_0_soft_reset(void *handle)
+static int ih_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -785,7 +779,6 @@ static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
.name = "ih_v6_0",
.early_init = ih_v6_0_early_init,
- .late_init = NULL,
.sw_init = ih_v6_0_sw_init,
.sw_fini = ih_v6_0_sw_fini,
.hw_init = ih_v6_0_hw_init,
@@ -798,8 +791,6 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
.set_clockgating_state = ih_v6_0_set_clockgating_state,
.set_powergating_state = ih_v6_0_set_powergating_state,
.get_clockgating_state = ih_v6_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 2e0469feca1e..61381e0c3795 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -532,9 +532,9 @@ static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs;
}
-static int ih_v6_1_early_init(void *handle)
+static int ih_v6_1_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -547,10 +547,10 @@ static int ih_v6_1_early_init(void *handle)
return 0;
}
-static int ih_v6_1_sw_init(void *handle)
+static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
@@ -593,19 +593,19 @@ static int ih_v6_1_sw_init(void *handle)
return r;
}
-static int ih_v6_1_sw_fini(void *handle)
+static int ih_v6_1_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int ih_v6_1_hw_init(void *handle)
+static int ih_v6_1_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = ih_v6_1_irq_init(adev);
if (r)
@@ -614,27 +614,21 @@ static int ih_v6_1_hw_init(void *handle)
return 0;
}
-static int ih_v6_1_hw_fini(void *handle)
+static int ih_v6_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v6_1_irq_disable(adev);
+ ih_v6_1_irq_disable(ip_block->adev);
return 0;
}
-static int ih_v6_1_suspend(void *handle)
+static int ih_v6_1_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_1_hw_fini(adev);
+ return ih_v6_1_hw_fini(ip_block);
}
-static int ih_v6_1_resume(void *handle)
+static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_1_hw_init(adev);
+ return ih_v6_1_hw_init(ip_block);
}
static bool ih_v6_1_is_idle(void *handle)
@@ -643,13 +637,13 @@ static bool ih_v6_1_is_idle(void *handle)
return true;
}
-static int ih_v6_1_wait_for_idle(void *handle)
+static int ih_v6_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int ih_v6_1_soft_reset(void *handle)
+static int ih_v6_1_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -768,7 +762,6 @@ static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
.name = "ih_v6_1",
.early_init = ih_v6_1_early_init,
- .late_init = NULL,
.sw_init = ih_v6_1_sw_init,
.sw_fini = ih_v6_1_sw_fini,
.hw_init = ih_v6_1_hw_init,
@@ -781,8 +774,6 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
.set_clockgating_state = ih_v6_1_set_clockgating_state,
.set_powergating_state = ih_v6_1_set_powergating_state,
.get_clockgating_state = ih_v6_1_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 6852081fcff2..d2428cf5d385 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -528,19 +528,19 @@ static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs;
}
-static int ih_v7_0_early_init(void *handle)
+static int ih_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v7_0_set_interrupt_funcs(adev);
ih_v7_0_set_self_irq_funcs(adev);
return 0;
}
-static int ih_v7_0_sw_init(void *handle)
+static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
@@ -583,19 +583,19 @@ static int ih_v7_0_sw_init(void *handle)
return r;
}
-static int ih_v7_0_sw_fini(void *handle)
+static int ih_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int ih_v7_0_hw_init(void *handle)
+static int ih_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = ih_v7_0_irq_init(adev);
if (r)
@@ -604,27 +604,21 @@ static int ih_v7_0_hw_init(void *handle)
return 0;
}
-static int ih_v7_0_hw_fini(void *handle)
+static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v7_0_irq_disable(adev);
+ ih_v7_0_irq_disable(ip_block->adev);
return 0;
}
-static int ih_v7_0_suspend(void *handle)
+static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v7_0_hw_fini(adev);
+ return ih_v7_0_hw_fini(ip_block);
}
-static int ih_v7_0_resume(void *handle)
+static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v7_0_hw_init(adev);
+ return ih_v7_0_hw_init(ip_block);
}
static bool ih_v7_0_is_idle(void *handle)
@@ -633,13 +627,13 @@ static bool ih_v7_0_is_idle(void *handle)
return true;
}
-static int ih_v7_0_wait_for_idle(void *handle)
+static int ih_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int ih_v7_0_soft_reset(void *handle)
+static int ih_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -758,7 +752,6 @@ static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
.name = "ih_v7_0",
.early_init = ih_v7_0_early_init,
- .late_init = NULL,
.sw_init = ih_v7_0_sw_init,
.sw_fini = ih_v7_0_sw_fini,
.hw_init = ih_v7_0_hw_init,
@@ -771,8 +764,6 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
.set_clockgating_state = ih_v7_0_set_clockgating_state,
.set_powergating_state = ih_v7_0_set_powergating_state,
.get_clockgating_state = ih_v7_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 6e0e88076224..03b8b7cd5229 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -458,13 +458,13 @@ static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev,
/**
* jpeg_v1_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-int jpeg_v1_0_early_init(void *handle)
+int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
@@ -478,12 +478,12 @@ int jpeg_v1_0_early_init(void *handle)
/**
* jpeg_v1_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-int jpeg_v1_0_sw_init(void *handle)
+int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -509,13 +509,13 @@ int jpeg_v1_0_sw_init(void *handle)
/**
* jpeg_v1_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG free up sw allocation
*/
-void jpeg_v1_0_sw_fini(void *handle)
+void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_ring_fini(adev->jpeg.inst->ring_dec);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
index 9654d22e0376..097328635083 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
@@ -24,9 +24,9 @@
#ifndef __JPEG_V1_0_H__
#define __JPEG_V1_0_H__
-int jpeg_v1_0_early_init(void *handle);
-int jpeg_v1_0_sw_init(void *handle);
-void jpeg_v1_0_sw_fini(void *handle);
+int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block);
+int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block);
+void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block);
void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
#define JPEG_V1_REG_RANGE_START 0x8000
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 41c0f8750dc1..d6823fb45d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -41,13 +41,13 @@ static int jpeg_v2_0_set_powergating_state(void *handle,
/**
* jpeg_v2_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v2_0_early_init(void *handle)
+static int jpeg_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
@@ -61,13 +61,13 @@ static int jpeg_v2_0_early_init(void *handle)
/**
* jpeg_v2_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v2_0_sw_init(void *handle)
+static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -104,14 +104,14 @@ static int jpeg_v2_0_sw_init(void *handle)
/**
* jpeg_v2_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v2_0_sw_fini(void *handle)
+static int jpeg_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_jpeg_suspend(adev);
if (r)
@@ -125,12 +125,12 @@ static int jpeg_v2_0_sw_fini(void *handle)
/**
* jpeg_v2_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v2_0_hw_init(void *handle)
+static int jpeg_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -142,13 +142,13 @@ static int jpeg_v2_0_hw_init(void *handle)
/**
* jpeg_v2_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v2_0_hw_fini(void *handle)
+static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -162,20 +162,19 @@ static int jpeg_v2_0_hw_fini(void *handle)
/**
* jpeg_v2_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v2_0_suspend(void *handle)
+static int jpeg_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v2_0_hw_fini(adev);
+ r = jpeg_v2_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -183,20 +182,19 @@ static int jpeg_v2_0_suspend(void *handle)
/**
* jpeg_v2_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v2_0_resume(void *handle)
+static int jpeg_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v2_0_hw_init(adev);
+ r = jpeg_v2_0_hw_init(ip_block);
return r;
}
@@ -666,9 +664,9 @@ static bool jpeg_v2_0_is_idle(void *handle)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v2_0_wait_for_idle(void *handle)
+static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
@@ -744,7 +742,6 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.name = "jpeg_v2_0",
.early_init = jpeg_v2_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v2_0_sw_init,
.sw_fini = jpeg_v2_0_sw_fini,
.hw_init = jpeg_v2_0_hw_init,
@@ -753,14 +750,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.resume = jpeg_v2_0_resume,
.is_idle = jpeg_v2_0_is_idle,
.wait_for_idle = jpeg_v2_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
.set_powergating_state = jpeg_v2_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index eedb9a829d95..5063a38801d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -50,13 +50,13 @@ static int amdgpu_ih_clientid_jpeg[] = {
/**
* jpeg_v2_5_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v2_5_early_init(void *handle)
+static int jpeg_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 harvest;
int i;
@@ -81,15 +81,15 @@ static int jpeg_v2_5_early_init(void *handle)
/**
* jpeg_v2_5_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v2_5_sw_init(void *handle)
+static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
@@ -153,14 +153,14 @@ static int jpeg_v2_5_sw_init(void *handle)
/**
* jpeg_v2_5_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v2_5_sw_fini(void *handle)
+static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_jpeg_suspend(adev);
if (r)
@@ -174,12 +174,12 @@ static int jpeg_v2_5_sw_fini(void *handle)
/**
* jpeg_v2_5_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v2_5_hw_init(void *handle)
+static int jpeg_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -202,13 +202,13 @@ static int jpeg_v2_5_hw_init(void *handle)
/**
* jpeg_v2_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v2_5_hw_fini(void *handle)
+static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -231,20 +231,19 @@ static int jpeg_v2_5_hw_fini(void *handle)
/**
* jpeg_v2_5_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v2_5_suspend(void *handle)
+static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v2_5_hw_fini(adev);
+ r = jpeg_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -252,20 +251,19 @@ static int jpeg_v2_5_suspend(void *handle)
/**
* jpeg_v2_5_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v2_5_resume(void *handle)
+static int jpeg_v2_5_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v2_5_hw_init(adev);
+ r = jpeg_v2_5_hw_init(ip_block);
return r;
}
@@ -501,9 +499,9 @@ static bool jpeg_v2_5_is_idle(void *handle)
return ret;
}
-static int jpeg_v2_5_wait_for_idle(void *handle)
+static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -615,7 +613,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.name = "jpeg_v2_5",
.early_init = jpeg_v2_5_early_init,
- .late_init = NULL,
.sw_init = jpeg_v2_5_sw_init,
.sw_fini = jpeg_v2_5_sw_fini,
.hw_init = jpeg_v2_5_hw_init,
@@ -624,20 +621,13 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.resume = jpeg_v2_5_resume,
.is_idle = jpeg_v2_5_is_idle,
.wait_for_idle = jpeg_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.name = "jpeg_v2_6",
.early_init = jpeg_v2_5_early_init,
- .late_init = NULL,
.sw_init = jpeg_v2_5_sw_init,
.sw_fini = jpeg_v2_5_sw_fini,
.hw_init = jpeg_v2_5_hw_init,
@@ -646,14 +636,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.resume = jpeg_v2_5_resume,
.is_idle = jpeg_v2_5_is_idle,
.wait_for_idle = jpeg_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index b1e7fd25afbc..10adbb7cbf53 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -42,13 +42,13 @@ static int jpeg_v3_0_set_powergating_state(void *handle,
/**
* jpeg_v3_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v3_0_early_init(void *handle)
+static int jpeg_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 harvest;
@@ -75,13 +75,13 @@ static int jpeg_v3_0_early_init(void *handle)
/**
* jpeg_v3_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v3_0_sw_init(void *handle)
+static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -118,13 +118,13 @@ static int jpeg_v3_0_sw_init(void *handle)
/**
* jpeg_v3_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v3_0_sw_fini(void *handle)
+static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
@@ -139,12 +139,12 @@ static int jpeg_v3_0_sw_fini(void *handle)
/**
* jpeg_v3_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v3_0_hw_init(void *handle)
+static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -156,13 +156,13 @@ static int jpeg_v3_0_hw_init(void *handle)
/**
* jpeg_v3_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v3_0_hw_fini(void *handle)
+static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -176,20 +176,19 @@ static int jpeg_v3_0_hw_fini(void *handle)
/**
* jpeg_v3_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v3_0_suspend(void *handle)
+static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v3_0_hw_fini(adev);
+ r = jpeg_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -197,20 +196,19 @@ static int jpeg_v3_0_suspend(void *handle)
/**
* jpeg_v3_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v3_0_resume(void *handle)
+static int jpeg_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v3_0_hw_init(adev);
+ r = jpeg_v3_0_hw_init(ip_block);
return r;
}
@@ -459,9 +457,9 @@ static bool jpeg_v3_0_is_idle(void *handle)
return ret;
}
-static int jpeg_v3_0_wait_for_idle(void *handle)
+static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
@@ -535,7 +533,6 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.name = "jpeg_v3_0",
.early_init = jpeg_v3_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v3_0_sw_init,
.sw_fini = jpeg_v3_0_sw_fini,
.hw_init = jpeg_v3_0_hw_init,
@@ -544,14 +541,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.resume = jpeg_v3_0_resume,
.is_idle = jpeg_v3_0_is_idle,
.wait_for_idle = jpeg_v3_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
.set_powergating_state = jpeg_v3_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 6c5c1a68a9b7..89953c0f5f1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -48,13 +48,13 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
/**
* jpeg_v4_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v4_0_early_init(void *handle)
+static int jpeg_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
@@ -70,13 +70,13 @@ static int jpeg_v4_0_early_init(void *handle)
/**
* jpeg_v4_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v4_0_sw_init(void *handle)
+static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -130,13 +130,13 @@ static int jpeg_v4_0_sw_init(void *handle)
/**
* jpeg_v4_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v4_0_sw_fini(void *handle)
+static int jpeg_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
@@ -151,12 +151,12 @@ static int jpeg_v4_0_sw_fini(void *handle)
/**
* jpeg_v4_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v4_0_hw_init(void *handle)
+static int jpeg_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
@@ -187,13 +187,13 @@ static int jpeg_v4_0_hw_init(void *handle)
/**
* jpeg_v4_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_hw_fini(void *handle)
+static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (!amdgpu_sriov_vf(adev)) {
@@ -210,20 +210,19 @@ static int jpeg_v4_0_hw_fini(void *handle)
/**
* jpeg_v4_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v4_0_suspend(void *handle)
+static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v4_0_hw_fini(adev);
+ r = jpeg_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -231,20 +230,19 @@ static int jpeg_v4_0_suspend(void *handle)
/**
* jpeg_v4_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v4_0_resume(void *handle)
+static int jpeg_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v4_0_hw_init(adev);
+ r = jpeg_v4_0_hw_init(ip_block);
return r;
}
@@ -621,9 +619,9 @@ static bool jpeg_v4_0_is_idle(void *handle)
return ret;
}
-static int jpeg_v4_0_wait_for_idle(void *handle)
+static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
@@ -702,7 +700,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.name = "jpeg_v4_0",
.early_init = jpeg_v4_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v4_0_sw_init,
.sw_fini = jpeg_v4_0_sw_fini,
.hw_init = jpeg_v4_0_hw_init,
@@ -711,14 +708,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.resume = jpeg_v4_0_resume,
.is_idle = jpeg_v4_0_is_idle,
.wait_for_idle = jpeg_v4_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 86958cb2c2ab..6917e4a8e96a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -68,13 +68,13 @@ static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
/**
* jpeg_v4_0_3_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v4_0_3_early_init(void *handle)
+static int jpeg_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
@@ -88,13 +88,13 @@ static int jpeg_v4_0_3_early_init(void *handle)
/**
* jpeg_v4_0_3_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v4_0_3_sw_init(void *handle)
+static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r, jpeg_inst;
@@ -165,13 +165,13 @@ static int jpeg_v4_0_3_sw_init(void *handle)
/**
* jpeg_v4_0_3_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v4_0_3_sw_fini(void *handle)
+static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
@@ -299,12 +299,12 @@ static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev)
/**
* jpeg_v4_0_3_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v4_0_3_hw_init(void *handle)
+static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r, jpeg_inst;
@@ -358,13 +358,13 @@ static int jpeg_v4_0_3_hw_init(void *handle)
/**
* jpeg_v4_0_3_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_3_hw_fini(void *handle)
+static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
cancel_delayed_work_sync(&adev->jpeg.idle_work);
@@ -380,20 +380,19 @@ static int jpeg_v4_0_3_hw_fini(void *handle)
/**
* jpeg_v4_0_3_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v4_0_3_suspend(void *handle)
+static int jpeg_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v4_0_3_hw_fini(adev);
+ r = jpeg_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -401,20 +400,19 @@ static int jpeg_v4_0_3_suspend(void *handle)
/**
* jpeg_v4_0_3_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v4_0_3_resume(void *handle)
+static int jpeg_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v4_0_3_hw_init(adev);
+ r = jpeg_v4_0_3_hw_init(ip_block);
return r;
}
@@ -674,11 +672,12 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
- }
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x80004000);
+ amdgpu_ring_write(ring,
+ PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
+ 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80004000);
+ }
}
/**
@@ -694,11 +693,12 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x62a04);
- }
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x00004000);
+ amdgpu_ring_write(ring,
+ PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
+ 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00004000);
+ }
}
/**
@@ -743,14 +743,6 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x3fbc);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x1);
-
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
amdgpu_ring_write(ring, 0);
@@ -929,9 +921,9 @@ static bool jpeg_v4_0_3_is_idle(void *handle)
return ret;
}
-static int jpeg_v4_0_3_wait_for_idle(void *handle)
+static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
int i, j;
@@ -1058,7 +1050,6 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.name = "jpeg_v4_0_3",
.early_init = jpeg_v4_0_3_early_init,
- .late_init = NULL,
.sw_init = jpeg_v4_0_3_sw_init,
.sw_fini = jpeg_v4_0_3_sw_fini,
.hw_init = jpeg_v4_0_3_hw_init,
@@ -1067,14 +1058,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.resume = jpeg_v4_0_3_resume,
.is_idle = jpeg_v4_0_3_is_idle,
.wait_for_idle = jpeg_v4_0_3_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
@@ -1088,7 +1073,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */
- 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */
+ 18 + 18 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */
8 + 16,
.emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 44eeed445ea9..f3cce523f3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -61,13 +61,13 @@ static int amdgpu_ih_clientid_jpeg[] = {
/**
* jpeg_v4_0_5_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v4_0_5_early_init(void *handle)
+static int jpeg_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
@@ -94,13 +94,13 @@ static int jpeg_v4_0_5_early_init(void *handle)
/**
* jpeg_v4_0_5_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v4_0_5_sw_init(void *handle)
+static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r, i;
@@ -159,13 +159,13 @@ static int jpeg_v4_0_5_sw_init(void *handle)
/**
* jpeg_v4_0_5_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v4_0_5_sw_fini(void *handle)
+static int jpeg_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
@@ -180,12 +180,12 @@ static int jpeg_v4_0_5_sw_fini(void *handle)
/**
* jpeg_v4_0_5_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v4_0_5_hw_init(void *handle)
+static int jpeg_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r = 0;
@@ -210,13 +210,13 @@ static int jpeg_v4_0_5_hw_init(void *handle)
/**
* jpeg_v4_0_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_5_hw_fini(void *handle)
+static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -237,20 +237,19 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
/**
* jpeg_v4_0_5_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v4_0_5_suspend(void *handle)
+static int jpeg_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v4_0_5_hw_fini(adev);
+ r = jpeg_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -258,20 +257,19 @@ static int jpeg_v4_0_5_suspend(void *handle)
/**
* jpeg_v4_0_5_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v4_0_5_resume(void *handle)
+static int jpeg_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v4_0_5_hw_init(adev);
+ r = jpeg_v4_0_5_hw_init(ip_block);
return r;
}
@@ -637,9 +635,9 @@ static bool jpeg_v4_0_5_is_idle(void *handle)
return ret;
}
-static int jpeg_v4_0_5_wait_for_idle(void *handle)
+static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -743,7 +741,6 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.name = "jpeg_v4_0_5",
.early_init = jpeg_v4_0_5_early_init,
- .late_init = NULL,
.sw_init = jpeg_v4_0_5_sw_init,
.sw_fini = jpeg_v4_0_5_sw_fini,
.hw_init = jpeg_v4_0_5_hw_init,
@@ -752,14 +749,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.resume = jpeg_v4_0_5_resume,
.is_idle = jpeg_v4_0_5_is_idle,
.wait_for_idle = jpeg_v4_0_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index d662aa841f97..06840d1dae79 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -42,13 +42,13 @@ static int jpeg_v5_0_0_set_powergating_state(void *handle,
/**
* jpeg_v5_0_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v5_0_0_early_init(void *handle)
+static int jpeg_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
@@ -62,13 +62,13 @@ static int jpeg_v5_0_0_early_init(void *handle)
/**
* jpeg_v5_0_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v5_0_0_sw_init(void *handle)
+static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -106,13 +106,13 @@ static int jpeg_v5_0_0_sw_init(void *handle)
/**
* jpeg_v5_0_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v5_0_0_sw_fini(void *handle)
+static int jpeg_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
@@ -127,12 +127,12 @@ static int jpeg_v5_0_0_sw_fini(void *handle)
/**
* jpeg_v5_0_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v5_0_0_hw_init(void *handle)
+static int jpeg_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
@@ -153,13 +153,13 @@ static int jpeg_v5_0_0_hw_init(void *handle)
/**
* jpeg_v5_0_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v5_0_0_hw_fini(void *handle)
+static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -173,20 +173,19 @@ static int jpeg_v5_0_0_hw_fini(void *handle)
/**
* jpeg_v5_0_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v5_0_0_suspend(void *handle)
+static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v5_0_0_hw_fini(adev);
+ r = jpeg_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -194,20 +193,19 @@ static int jpeg_v5_0_0_suspend(void *handle)
/**
* jpeg_v5_0_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v5_0_0_resume(void *handle)
+static int jpeg_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v5_0_0_hw_init(adev);
+ r = jpeg_v5_0_0_hw_init(ip_block);
return r;
}
@@ -546,9 +544,9 @@ static bool jpeg_v5_0_0_is_idle(void *handle)
return ret;
}
-static int jpeg_v5_0_0_wait_for_idle(void *handle)
+static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
@@ -622,7 +620,6 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v5_0_0_sw_init,
.sw_fini = jpeg_v5_0_0_sw_fini,
.hw_init = jpeg_v5_0_0_hw_init,
@@ -631,14 +628,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.resume = jpeg_v5_0_0_resume,
.is_idle = jpeg_v5_0_0_is_idle,
.wait_for_idle = jpeg_v5_0_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 231a3d490ea8..0e758ebf2372 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -55,8 +55,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
-static int mes_v11_0_hw_init(void *handle);
-static int mes_v11_0_hw_fini(void *handle);
+static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
+static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
@@ -366,7 +366,7 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
uint32_t queue_id, uint32_t vmid)
{
struct amdgpu_device *adev = mes->adev;
- uint32_t value;
+ uint32_t value, reg;
int i, r = 0;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
@@ -424,6 +424,31 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_SDMA) {
+ dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ switch (me_id) {
+ case 1:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ);
+ break;
+ case 0:
+ default:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ);
+ break;
+ }
+
+ value = 1 << queue_id;
+ WREG32(reg, value);
+ /* wait for queue reset done */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(reg) & value))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on sdma queue reset done\n");
+ r = -ETIMEDOUT;
+ }
}
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
@@ -1336,9 +1361,9 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
return 0;
}
-static int mes_v11_0_sw_init(void *handle)
+static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
adev->mes.funcs = &mes_v11_0_funcs;
@@ -1377,9 +1402,9 @@ static int mes_v11_0_sw_init(void *handle)
return 0;
}
-static int mes_v11_0_sw_fini(void *handle)
+static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1473,6 +1498,7 @@ static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
+ struct amdgpu_ip_block *ip_block;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
@@ -1496,6 +1522,12 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
+ if (unlikely(!ip_block)) {
+ dev_err(adev->dev, "Failed to get MES handle\n");
+ return -EINVAL;
+ }
+
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
@@ -1506,7 +1538,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
adev->mes.enable_legacy_queue_map = false;
if (adev->mes.enable_legacy_queue_map) {
- r = mes_v11_0_hw_init(adev);
+ r = mes_v11_0_hw_init(ip_block);
if (r)
goto failure;
}
@@ -1514,7 +1546,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
return r;
failure:
- mes_v11_0_hw_fini(adev);
+ mes_v11_0_hw_fini(ip_block);
return r;
}
@@ -1535,10 +1567,10 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
return 0;
}
-static int mes_v11_0_hw_init(void *handle)
+static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->mes.ring[0].sched.ready)
goto out;
@@ -1590,13 +1622,13 @@ out:
return 0;
failure:
- mes_v11_0_hw_fini(adev);
+ mes_v11_0_hw_fini(ip_block);
return r;
}
-static int mes_v11_0_hw_fini(void *handle)
+static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_is_mes_info_enable(adev)) {
amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
&adev->mes.resource_1_addr);
@@ -1604,33 +1636,31 @@ static int mes_v11_0_hw_fini(void *handle)
return 0;
}
-static int mes_v11_0_suspend(void *handle)
+static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_mes_suspend(adev);
+ r = amdgpu_mes_suspend(ip_block->adev);
if (r)
return r;
- return mes_v11_0_hw_fini(adev);
+ return mes_v11_0_hw_fini(ip_block);
}
-static int mes_v11_0_resume(void *handle)
+static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = mes_v11_0_hw_init(adev);
+ r = mes_v11_0_hw_init(ip_block);
if (r)
return r;
- return amdgpu_mes_resume(adev);
+ return amdgpu_mes_resume(ip_block->adev);
}
-static int mes_v11_0_early_init(void *handle)
+static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1644,9 +1674,9 @@ static int mes_v11_0_early_init(void *handle)
return 0;
}
-static int mes_v11_0_late_init(void *handle)
+static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
@@ -1666,8 +1696,6 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.hw_fini = mes_v11_0_hw_fini,
.suspend = mes_v11_0_suspend,
.resume = mes_v11_0_resume,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 8d27421689c9..3daa8862e622 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -39,8 +39,8 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin");
-static int mes_v12_0_hw_init(void *handle);
-static int mes_v12_0_hw_fini(void *handle);
+static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block);
+static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block);
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
@@ -621,7 +621,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
}
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -1326,9 +1326,9 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
return 0;
}
-static int mes_v12_0_sw_init(void *handle)
+static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
adev->mes.funcs = &mes_v12_0_funcs;
@@ -1336,7 +1336,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
- adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
+ adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
r = amdgpu_mes_init(adev);
if (r)
@@ -1362,9 +1362,9 @@ static int mes_v12_0_sw_init(void *handle)
return 0;
}
-static int mes_v12_0_sw_fini(void *handle)
+static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1452,6 +1452,7 @@ static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
+ struct amdgpu_ip_block *ip_block;
if (adev->enable_uni_mes)
mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]);
@@ -1479,6 +1480,12 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v12_0_enable(adev, true);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
+ if (unlikely(!ip_block)) {
+ dev_err(adev->dev, "Failed to get MES handle\n");
+ return -EINVAL;
+ }
+
r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
@@ -1492,7 +1499,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
}
if (adev->mes.enable_legacy_queue_map) {
- r = mes_v12_0_hw_init(adev);
+ r = mes_v12_0_hw_init(ip_block);
if (r)
goto failure;
}
@@ -1500,7 +1507,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
return r;
failure:
- mes_v12_0_hw_fini(adev);
+ mes_v12_0_hw_fini(ip_block);
return r;
}
@@ -1522,10 +1529,10 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
return 0;
}
-static int mes_v12_0_hw_init(void *handle)
+static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->mes.ring[0].sched.ready)
goto out;
@@ -1584,42 +1591,40 @@ out:
return 0;
failure:
- mes_v12_0_hw_fini(adev);
+ mes_v12_0_hw_fini(ip_block);
return r;
}
-static int mes_v12_0_hw_fini(void *handle)
+static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int mes_v12_0_suspend(void *handle)
+static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_mes_suspend(adev);
+ r = amdgpu_mes_suspend(ip_block->adev);
if (r)
return r;
- return mes_v12_0_hw_fini(adev);
+ return mes_v12_0_hw_fini(ip_block);
}
-static int mes_v12_0_resume(void *handle)
+static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = mes_v12_0_hw_init(adev);
+ r = mes_v12_0_hw_init(ip_block);
if (r)
return r;
- return amdgpu_mes_resume(adev);
+ return amdgpu_mes_resume(ip_block->adev);
}
-static int mes_v12_0_early_init(void *handle)
+static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1631,9 +1636,9 @@ static int mes_v12_0_early_init(void *handle)
return 0;
}
-static int mes_v12_0_late_init(void *handle)
+static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index e3ddd22aa172..e9a6f33ca710 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -229,6 +229,52 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
0);
}
+static void mmhub_v1_0_init_saw(struct amdgpu_device *adev)
+{
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ uint32_t tmp;
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ lower_32_bits(pt_base >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ upper_32_bits(pt_base >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+
+ /* Program SAW CONTEXT0 CNTL */
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL);
+ tmp |= 1 << CONTEXT0_CNTL_ENABLE_OFFSET;
+ tmp &= ~(3 << CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET);
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL, tmp);
+
+ /* Disable all Contexts except Context0 */
+ tmp = 0xfffe;
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXTS_DISABLE, tmp);
+
+ /* Program SAW CNTL4 */
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4);
+ tmp |= 1 << VMC_TAP_PDE_REQUEST_SNOOP_OFFSET;
+ tmp |= 1 << VMC_TAP_PTE_REQUEST_SNOOP_OFFSET;
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4, tmp);
+}
+
static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
@@ -283,6 +329,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
+
+ if (amdgpu_ip_version(adev, ISP_HWIP, 0))
+ mmhub_v1_0_init_saw(adev);
}
static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index b281462093f1..0820ed62e2e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -542,19 +542,19 @@ static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
}
-static int navi10_ih_early_init(void *handle)
+static int navi10_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
navi10_ih_set_interrupt_funcs(adev);
navi10_ih_set_self_irq_funcs(adev);
return 0;
}
-static int navi10_ih_sw_init(void *handle)
+static int navi10_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
@@ -593,43 +593,37 @@ static int navi10_ih_sw_init(void *handle)
return r;
}
-static int navi10_ih_sw_fini(void *handle)
+static int navi10_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int navi10_ih_hw_init(void *handle)
+static int navi10_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return navi10_ih_irq_init(adev);
}
-static int navi10_ih_hw_fini(void *handle)
+static int navi10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- navi10_ih_irq_disable(adev);
+ navi10_ih_irq_disable(ip_block->adev);
return 0;
}
-static int navi10_ih_suspend(void *handle)
+static int navi10_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return navi10_ih_hw_fini(adev);
+ return navi10_ih_hw_fini(ip_block);
}
-static int navi10_ih_resume(void *handle)
+static int navi10_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return navi10_ih_hw_init(adev);
+ return navi10_ih_hw_init(ip_block);
}
static bool navi10_ih_is_idle(void *handle)
@@ -638,13 +632,13 @@ static bool navi10_ih_is_idle(void *handle)
return true;
}
-static int navi10_ih_wait_for_idle(void *handle)
+static int navi10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int navi10_ih_soft_reset(void *handle)
+static int navi10_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -700,7 +694,6 @@ static void navi10_ih_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs navi10_ih_ip_funcs = {
.name = "navi10_ih",
.early_init = navi10_ih_early_init,
- .late_init = NULL,
.sw_init = navi10_ih_sw_init,
.sw_fini = navi10_ih_sw_fini,
.hw_init = navi10_ih_hw_init,
@@ -713,8 +706,6 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
.set_clockgating_state = navi10_ih_set_clockgating_state,
.set_powergating_state = navi10_ih_set_powergating_state,
.get_clockgating_state = navi10_ih_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
index a5b60c9a2418..c88284ff92d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
@@ -68,6 +68,7 @@
#define SDMA_SUBOP_POLL_REG_WRITE_MEM 1
#define SDMA_SUBOP_POLL_DBIT_WRITE_MEM 2
#define SDMA_SUBOP_POLL_MEM_VERIFY 3
+#define SDMA_SUBOP_VM_INVALIDATION 4
#define HEADER_AGENT_DISPATCH 4
#define HEADER_BARRIER 5
#define SDMA_OP_AQL_COPY 0
@@ -4041,6 +4042,69 @@
/*
+** Definitions for SDMA_PKT_VM_INVALIDATION packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8
+#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift)
+
+/*define for gfx_eng_id field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift 16
+#define SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift)
+
+/*define for mm_eng_id field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift 24
+#define SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift)
+
+/*define for INVALIDATEREQ word*/
+/*define for invalidatereq field*/
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) (((x) & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift)
+
+/*define for ADDRESSRANGELO word*/
+/*define for addressrangelo field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift)
+
+/*define for ADDRESSRANGEHI word*/
+/*define for invalidateack field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift)
+
+/*define for addressrangehi field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift)
+
+/*define for reserved field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift)
+
+
+/*
** Definitions for SDMA_PKT_ATOMIC packet
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 8d80df94bd8b..a26a9be58eac 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -414,8 +414,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
- amdgpu_ras_set_fed(adev, true);
- amdgpu_ras_reset_gpu(adev);
+ amdgpu_ras_global_ras_isr(adev);
}
amdgpu_ras_error_data_fini(&err_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index d1bd79bbae53..8a0a63ac88d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -401,6 +401,17 @@ static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev)
return px;
}
+static bool nbio_v7_9_is_nps_switch_requested(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
+ tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS,
+ CHANGE_STATUE);
+
+ /* 0x8 - NPS switch requested */
+ return (tmp == 0x8);
+}
static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev,
u32 *supp_modes)
{
@@ -508,6 +519,7 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.remap_hdp_registers = nbio_v7_9_remap_hdp_registers,
.get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode,
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+ .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested,
.init_registers = nbio_v7_9_init_registers,
.get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.set_reg_remap = nbio_v7_9_set_reg_remap,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 4938e6b340e9..6b72169be8f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -634,9 +634,9 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = {
.query_video_codecs = &nv_query_video_codecs,
};
-static int nv_common_early_init(void *handle)
+static int nv_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -944,9 +944,9 @@ static int nv_common_early_init(void *handle)
return 0;
}
-static int nv_common_late_init(void *handle)
+static int nv_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
@@ -973,9 +973,9 @@ static int nv_common_late_init(void *handle)
return 0;
}
-static int nv_common_sw_init(void *handle)
+static int nv_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_add_irq_id(adev);
@@ -983,14 +983,9 @@ static int nv_common_sw_init(void *handle)
return 0;
}
-static int nv_common_sw_fini(void *handle)
+static int nv_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static int nv_common_hw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->nbio.funcs->apply_lc_spc_mode_wa)
adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
@@ -1014,9 +1009,9 @@ static int nv_common_hw_init(void *handle)
return 0;
}
-static int nv_common_hw_fini(void *handle)
+static int nv_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because nv_enable_doorbell_aperture
@@ -1029,18 +1024,14 @@ static int nv_common_hw_fini(void *handle)
return 0;
}
-static int nv_common_suspend(void *handle)
+static int nv_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return nv_common_hw_fini(adev);
+ return nv_common_hw_fini(ip_block);
}
-static int nv_common_resume(void *handle)
+static int nv_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return nv_common_hw_init(adev);
+ return nv_common_hw_init(ip_block);
}
static bool nv_common_is_idle(void *handle)
@@ -1048,16 +1039,6 @@ static bool nv_common_is_idle(void *handle)
return true;
}
-static int nv_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int nv_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static int nv_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1115,17 +1096,12 @@ static const struct amd_ip_funcs nv_common_ip_funcs = {
.early_init = nv_common_early_init,
.late_init = nv_common_late_init,
.sw_init = nv_common_sw_init,
- .sw_fini = nv_common_sw_fini,
.hw_init = nv_common_hw_init,
.hw_fini = nv_common_hw_fini,
.suspend = nv_common_suspend,
.resume = nv_common_resume,
.is_idle = nv_common_is_idle,
- .wait_for_idle = nv_common_wait_for_idle,
- .soft_reset = nv_common_soft_reset,
.set_clockgating_state = nv_common_set_clockgating_state,
.set_powergating_state = nv_common_set_powergating_state,
.get_clockgating_state = nv_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 37b5ddd6f13b..f4a91b126c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -103,6 +103,10 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */
GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */
GFX_CMD_ID_SRIOV_SPATIAL_PART = 0x00000027, /* Configure spatial partitioning mode */
+ /*IDs of performance monitoring/profiling*/
+ GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */
+ /* Dynamic memory partitioninig (NPS mode change)*/
+ GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
};
/* PSP boot config sub-commands */
@@ -351,6 +355,20 @@ struct psp_gfx_cmd_sriov_spatial_part {
uint32_t override_this_aid;
};
+/*Structure for sq performance monitoring/profiling enable/disable*/
+struct psp_gfx_cmd_config_sq_perfmon {
+ uint32_t gfx_xcp_mask;
+ uint8_t core_override;
+ uint8_t reg_override;
+ uint8_t perfmon_override;
+ uint8_t reserved[5];
+};
+
+struct psp_gfx_cmd_fb_memory_part {
+ uint32_t mode; /* requested NPS mode */
+ uint32_t resvd;
+};
+
/* All GFX ring buffer commands. */
union psp_gfx_commands
{
@@ -365,6 +383,8 @@ union psp_gfx_commands
struct psp_gfx_cmd_load_toc cmd_load_toc;
struct psp_gfx_cmd_boot_cfg boot_cfg;
struct psp_gfx_cmd_sriov_spatial_part cmd_spatial_part;
+ struct psp_gfx_cmd_config_sq_perfmon config_sq_perfmon;
+ struct psp_gfx_cmd_fb_memory_part cmd_memory_part;
};
struct psp_gfx_uresp_reserved
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 51e470e8d67d..c4b775aaee9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -823,6 +823,30 @@ static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp)
return (pmfw_ver < 0x557300);
}
+static bool psp_v13_0_is_reload_needed(struct psp_context *psp)
+{
+ uint32_t ucode_ver;
+
+ if (!psp_v13_0_is_sos_alive(psp))
+ return false;
+
+ /* Restrict reload support only to specific IP versions */
+ switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ /* TOS version read from microcode header */
+ ucode_ver = psp->sos.fw_version;
+ /* Read TOS version from hardware */
+ psp_v13_0_init_sos_version(psp);
+ return (ucode_ver != psp->sos.fw_version);
+ default:
+ return false;
+ }
+
+ return false;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -847,6 +871,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
.is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
+ .is_reload_needed = psp_v13_0_is_reload_needed,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 725392522267..7948d74f8722 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -807,9 +807,9 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static int sdma_v2_4_early_init(void *handle)
+static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
@@ -826,11 +826,11 @@ static int sdma_v2_4_early_init(void *handle)
return 0;
}
-static int sdma_v2_4_sw_init(void *handle)
+static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
@@ -866,9 +866,9 @@ static int sdma_v2_4_sw_init(void *handle)
return r;
}
-static int sdma_v2_4_sw_fini(void *handle)
+static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -878,10 +878,10 @@ static int sdma_v2_4_sw_fini(void *handle)
return 0;
}
-static int sdma_v2_4_hw_init(void *handle)
+static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v2_4_init_golden_registers(adev);
@@ -892,27 +892,21 @@ static int sdma_v2_4_hw_init(void *handle)
return r;
}
-static int sdma_v2_4_hw_fini(void *handle)
+static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- sdma_v2_4_enable(adev, false);
+ sdma_v2_4_enable(ip_block->adev, false);
return 0;
}
-static int sdma_v2_4_suspend(void *handle)
+static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v2_4_hw_fini(adev);
+ return sdma_v2_4_hw_fini(ip_block);
}
-static int sdma_v2_4_resume(void *handle)
+static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v2_4_hw_init(adev);
+ return sdma_v2_4_hw_init(ip_block);
}
static bool sdma_v2_4_is_idle(void *handle)
@@ -927,11 +921,11 @@ static bool sdma_v2_4_is_idle(void *handle)
return true;
}
-static int sdma_v2_4_wait_for_idle(void *handle)
+static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -944,10 +938,10 @@ static int sdma_v2_4_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v2_4_soft_reset(void *handle)
+static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
@@ -1102,7 +1096,6 @@ static int sdma_v2_4_set_powergating_state(void *handle,
static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
- .late_init = NULL,
.sw_init = sdma_v2_4_sw_init,
.sw_fini = sdma_v2_4_sw_fini,
.hw_init = sdma_v2_4_hw_init,
@@ -1114,8 +1107,6 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.soft_reset = sdma_v2_4_soft_reset,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index e65194fe94af..9a3d729545a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1080,9 +1080,9 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static int sdma_v3_0_early_init(void *handle)
+static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
switch (adev->asic_type) {
@@ -1106,11 +1106,11 @@ static int sdma_v3_0_early_init(void *handle)
return 0;
}
-static int sdma_v3_0_sw_init(void *handle)
+static int sdma_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
@@ -1152,9 +1152,9 @@ static int sdma_v3_0_sw_init(void *handle)
return r;
}
-static int sdma_v3_0_sw_fini(void *handle)
+static int sdma_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -1164,10 +1164,10 @@ static int sdma_v3_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v3_0_hw_init(void *handle)
+static int sdma_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v3_0_init_golden_registers(adev);
@@ -1178,9 +1178,9 @@ static int sdma_v3_0_hw_init(void *handle)
return r;
}
-static int sdma_v3_0_hw_fini(void *handle)
+static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
@@ -1188,18 +1188,14 @@ static int sdma_v3_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v3_0_suspend(void *handle)
+static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v3_0_hw_fini(adev);
+ return sdma_v3_0_hw_fini(ip_block);
}
-static int sdma_v3_0_resume(void *handle)
+static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v3_0_hw_init(adev);
+ return sdma_v3_0_hw_init(ip_block);
}
static bool sdma_v3_0_is_idle(void *handle)
@@ -1214,11 +1210,11 @@ static bool sdma_v3_0_is_idle(void *handle)
return true;
}
-static int sdma_v3_0_wait_for_idle(void *handle)
+static int sdma_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1231,9 +1227,9 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static bool sdma_v3_0_check_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS2);
@@ -1252,9 +1248,9 @@ static bool sdma_v3_0_check_soft_reset(void *handle)
}
}
-static int sdma_v3_0_pre_soft_reset(void *handle)
+static int sdma_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
if (!adev->sdma.srbm_soft_reset)
@@ -1271,9 +1267,9 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
return 0;
}
-static int sdma_v3_0_post_soft_reset(void *handle)
+static int sdma_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
if (!adev->sdma.srbm_soft_reset)
@@ -1290,9 +1286,9 @@ static int sdma_v3_0_post_soft_reset(void *handle)
return 0;
}
-static int sdma_v3_0_soft_reset(void *handle)
+static int sdma_v3_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp;
@@ -1538,7 +1534,6 @@ static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
- .late_init = NULL,
.sw_init = sdma_v3_0_sw_init,
.sw_fini = sdma_v3_0_sw_fini,
.hw_init = sdma_v3_0_hw_init,
@@ -1554,8 +1549,6 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
.get_clockgating_state = sdma_v3_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 23ef4eb36b40..c1f98f6cf20d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1751,9 +1751,9 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
-static int sdma_v4_0_early_init(void *handle)
+static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = sdma_v4_0_init_microcode(adev);
@@ -1780,9 +1780,9 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
-static int sdma_v4_0_late_init(void *handle)
+static int sdma_v4_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v4_0_setup_ulv(adev);
@@ -1792,11 +1792,11 @@ static int sdma_v4_0_late_init(void *handle)
return 0;
}
-static int sdma_v4_0_sw_init(void *handle)
+static int sdma_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
uint32_t *ptr;
@@ -1929,9 +1929,9 @@ static int sdma_v4_0_sw_init(void *handle)
return r;
}
-static int sdma_v4_0_sw_fini(void *handle)
+static int sdma_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1951,9 +1951,9 @@ static int sdma_v4_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v4_0_hw_init(void *handle)
+static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
@@ -1964,9 +1964,9 @@ static int sdma_v4_0_hw_init(void *handle)
return sdma_v4_0_start(adev);
}
-static int sdma_v4_0_hw_fini(void *handle)
+static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
if (amdgpu_sriov_vf(adev))
@@ -1988,9 +1988,9 @@ static int sdma_v4_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v4_0_suspend(void *handle)
+static int sdma_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SMU saves SDMA state for us */
if (adev->in_s0ix) {
@@ -1998,12 +1998,12 @@ static int sdma_v4_0_suspend(void *handle)
return 0;
}
- return sdma_v4_0_hw_fini(adev);
+ return sdma_v4_0_hw_fini(ip_block);
}
-static int sdma_v4_0_resume(void *handle)
+static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SMU restores SDMA state for us */
if (adev->in_s0ix) {
@@ -2012,7 +2012,7 @@ static int sdma_v4_0_resume(void *handle)
return 0;
}
- return sdma_v4_0_hw_init(adev);
+ return sdma_v4_0_hw_init(ip_block);
}
static bool sdma_v4_0_is_idle(void *handle)
@@ -2030,11 +2030,11 @@ static bool sdma_v4_0_is_idle(void *handle)
return true;
}
-static int sdma_v4_0_wait_for_idle(void *handle)
+static int sdma_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i, j;
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
for (j = 0; j < adev->sdma.num_instances; j++) {
@@ -2049,7 +2049,7 @@ static int sdma_v4_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v4_0_soft_reset(void *handle)
+static int sdma_v4_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -2350,9 +2350,9 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
uint32_t instance_offset;
@@ -2371,9 +2371,9 @@ static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v4_0_dump_ip_state(void *handle)
+static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index c77889040760..9c7cea0890c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -1290,9 +1290,9 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
-static int sdma_v4_4_2_early_init(void *handle)
+static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = sdma_v4_4_2_init_microcode(adev);
@@ -1318,9 +1318,9 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
#endif
-static int sdma_v4_4_2_late_init(void *handle)
+static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
#if 0
struct ras_ih_if ih_info = {
.cb = sdma_v4_4_2_process_ras_data_cb,
@@ -1332,11 +1332,11 @@ static int sdma_v4_4_2_late_init(void *handle)
return 0;
}
-static int sdma_v4_4_2_sw_init(void *handle)
+static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 aid_id;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
uint32_t *ptr;
@@ -1445,9 +1445,9 @@ static int sdma_v4_4_2_sw_init(void *handle)
return r;
}
-static int sdma_v4_4_2_sw_fini(void *handle)
+static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1467,10 +1467,10 @@ static int sdma_v4_4_2_sw_fini(void *handle)
return 0;
}
-static int sdma_v4_4_2_hw_init(void *handle)
+static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
@@ -1482,9 +1482,9 @@ static int sdma_v4_4_2_hw_init(void *handle)
return r;
}
-static int sdma_v4_4_2_hw_fini(void *handle)
+static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
int i;
@@ -1508,21 +1508,19 @@ static int sdma_v4_4_2_hw_fini(void *handle)
static int sdma_v4_4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
-static int sdma_v4_4_2_suspend(void *handle)
+static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_in_reset(adev))
sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
- return sdma_v4_4_2_hw_fini(adev);
+ return sdma_v4_4_2_hw_fini(ip_block);
}
-static int sdma_v4_4_2_resume(void *handle)
+static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v4_4_2_hw_init(adev);
+ return sdma_v4_4_2_hw_init(ip_block);
}
static bool sdma_v4_4_2_is_idle(void *handle)
@@ -1540,11 +1538,11 @@ static bool sdma_v4_4_2_is_idle(void *handle)
return true;
}
-static int sdma_v4_4_2_wait_for_idle(void *handle)
+static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i, j;
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
for (j = 0; j < adev->sdma.num_instances; j++) {
@@ -1559,7 +1557,7 @@ static int sdma_v4_4_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v4_4_2_soft_reset(void *handle)
+static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -1857,9 +1855,9 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v4_4_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
uint32_t instance_offset;
@@ -1878,9 +1876,9 @@ static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v4_4_2_dump_ip_state(void *handle)
+static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 3e48ea38385d..d31c4860933f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -705,14 +705,16 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v5_0_gfx_resume - setup and start the async dma engines
+ * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them (NAVI10).
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
+static int sdma_v5_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
@@ -722,142 +724,163 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
u32 temp;
u32 wptr_poll_cntl;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
+ ring = &adev->sdma.instance[i].ring;
- if (!amdgpu_sriov_vf(adev))
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
-
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
- mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
- SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
- wptr_poll_cntl);
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
-
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
- ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
- ring->gpu_addr >> 40);
-
+ }
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
+ mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
+ wptr_poll_cntl);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
+ ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
+ ring->gpu_addr >> 40);
+
+ if (!restore)
ring->wptr = 0;
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
+ }
- doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
- mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
+ mmSDMA0_GFX_DOORBELL_OFFSET));
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
- doorbell_offset);
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
+ doorbell_offset);
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index, 20);
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index, 20);
- if (amdgpu_sriov_vf(adev))
- sdma_v5_0_ring_set_wptr(ring);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v5_0_ring_set_wptr(ring);
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- if (!amdgpu_sriov_vf(adev)) {
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ }
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
+ }
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v5_0_ctx_switch_enable(adev, true);
- sdma_v5_0_enable(adev, true);
- }
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v5_0_ctx_switch_enable(adev, true);
+ sdma_v5_0_enable(adev, true);
+ }
- r = amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_test_helper(ring);
+}
+
+/**
+ * sdma_v5_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (NAVI10).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v5_0_gfx_resume_instance(adev, i, false);
if (r)
return r;
}
@@ -1366,9 +1389,9 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
-static int sdma_v5_0_early_init(void *handle)
+static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = sdma_v5_0_init_microcode(adev);
@@ -1385,11 +1408,11 @@ static int sdma_v5_0_early_init(void *handle)
}
-static int sdma_v5_0_sw_init(void *handle)
+static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
uint32_t *ptr;
@@ -1439,9 +1462,9 @@ static int sdma_v5_0_sw_init(void *handle)
return r;
}
-static int sdma_v5_0_sw_fini(void *handle)
+static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -1454,10 +1477,10 @@ static int sdma_v5_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v5_0_hw_init(void *handle)
+static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v5_0_init_golden_registers(adev);
@@ -1466,9 +1489,9 @@ static int sdma_v5_0_hw_init(void *handle)
return r;
}
-static int sdma_v5_0_hw_fini(void *handle)
+static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1479,18 +1502,14 @@ static int sdma_v5_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v5_0_suspend(void *handle)
+static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_0_hw_fini(adev);
+ return sdma_v5_0_hw_fini(ip_block);
}
-static int sdma_v5_0_resume(void *handle)
+static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_0_hw_init(adev);
+ return sdma_v5_0_hw_init(ip_block);
}
static bool sdma_v5_0_is_idle(void *handle)
@@ -1508,11 +1527,11 @@ static bool sdma_v5_0_is_idle(void *handle)
return true;
}
-static int sdma_v5_0_wait_for_idle(void *handle)
+static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
@@ -1525,13 +1544,100 @@ static int sdma_v5_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v5_0_soft_reset(void *handle)
+static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
}
+static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r;
+ u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ /* stop queue */
+ ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+
+ rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
+ break;
+ udelay(1);
+ }
+
+ /* check sdma copy engine all idle if frozen not received*/
+ if (j == adev->usec_timeout) {
+ stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG));
+ if ((stat1_reg & 0x3FF) != 0x3FF) {
+ DRM_ERROR("cannot soft reset as sdma not idle\n");
+ r = -ETIMEDOUT;
+ goto err0;
+ }
+ }
+
+ f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+
+ cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+
+ /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
+ preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
+ preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
+
+ soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
+
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ udelay(50);
+
+ soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ /* unfreeze*/
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ r = sdma_v5_0_gfx_resume_instance(adev, i, true);
+
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
@@ -1778,9 +1884,9 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
uint32_t instance_offset;
@@ -1799,9 +1905,9 @@ static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v5_0_dump_ip_state(void *handle)
+static void sdma_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
@@ -1823,7 +1929,6 @@ static void sdma_v5_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.name = "sdma_v5_0",
.early_init = sdma_v5_0_early_init,
- .late_init = NULL,
.sw_init = sdma_v5_0_sw_init,
.sw_fini = sdma_v5_0_sw_fini,
.hw_init = sdma_v5_0_hw_init,
@@ -1874,6 +1979,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
.preempt_ib = sdma_v5_0_ring_preempt_ib,
+ .reset = sdma_v5_0_reset_queue,
};
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index bc9b240a3488..ffa8c62ac101 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -522,14 +522,17 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v5_2_gfx_resume - setup and start the async dma engines
+ * sdma_v5_2_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them.
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
+
+static int sdma_v5_2_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
@@ -539,139 +542,161 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
u32 temp;
u32 wptr_poll_cntl;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
+ ring = &adev->sdma.instance[i].ring;
- if (!amdgpu_sriov_vf(adev))
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ }
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
- mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
- SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
- wptr_poll_cntl);
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
-
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
-
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
+ wptr_poll_cntl);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+
+ if (!restore)
ring->wptr = 0;
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
- }
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ }
- doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
- if (amdgpu_sriov_vf(adev))
- sdma_v5_2_ring_set_wptr(ring);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v5_2_ring_set_wptr(ring);
- /* set minor_ptr_update to 0 after wptr programed */
+ /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* SRIOV VF has no control of any of registers below */
- if (!amdgpu_sriov_vf(adev)) {
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
- SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
-
- /* unhalt engine */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
- }
+ /* SRIOV VF has no control of any of registers below */
+ if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
+ SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+
+ /* unhalt engine */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
+ }
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v5_2_ctx_switch_enable(adev, true);
- sdma_v5_2_enable(adev, true);
- }
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v5_2_ctx_switch_enable(adev, true);
+ sdma_v5_2_enable(adev, true);
+ }
- r = amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_test_helper(ring);
+}
+
+/**
+ * sdma_v5_2_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v5_2_gfx_resume_instance(adev, i, false);
if (r)
return r;
}
@@ -736,9 +761,9 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v5_2_soft_reset(void *handle)
+static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset;
u32 tmp;
int i;
@@ -778,6 +803,7 @@ static int sdma_v5_2_soft_reset(void *handle)
static int sdma_v5_2_start(struct amdgpu_device *adev)
{
int r = 0;
+ struct amdgpu_ip_block *ip_block;
if (amdgpu_sriov_vf(adev)) {
sdma_v5_2_ctx_switch_enable(adev, false);
@@ -798,7 +824,11 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
- sdma_v5_2_soft_reset(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SDMA);
+ if (!ip_block)
+ return -EINVAL;
+
+ sdma_v5_2_soft_reset(ip_block);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
/* enable sdma ring preemption */
@@ -1180,7 +1210,28 @@ static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
+ uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
+
+ /* Update the PD address for this VMID. */
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
+ (hub->ctx_addr_distance * vmid),
+ lower_32_bits(pd_addr));
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
+ (hub->ctx_addr_distance * vmid),
+ upper_32_bits(pd_addr));
+
+ /* Trigger invalidation. */
+ amdgpu_ring_write(ring,
+ SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f));
+ amdgpu_ring_write(ring, req);
+ amdgpu_ring_write(ring, 0xFFFFFFFF);
+ amdgpu_ring_write(ring,
+ SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) |
+ SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F));
}
static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring,
@@ -1216,9 +1267,9 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
-static int sdma_v5_2_early_init(void *handle)
+static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_sdma_init_microcode(adev, 0, true);
@@ -1268,11 +1319,11 @@ static unsigned sdma_v5_2_seq_to_trap_id(int seq_num)
return -EINVAL;
}
-static int sdma_v5_2_sw_init(void *handle)
+static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
uint32_t *ptr;
@@ -1316,9 +1367,9 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
-static int sdma_v5_2_sw_fini(void *handle)
+static int sdma_v5_2_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -1331,16 +1382,16 @@ static int sdma_v5_2_sw_fini(void *handle)
return 0;
}
-static int sdma_v5_2_hw_init(void *handle)
+static int sdma_v5_2_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return sdma_v5_2_start(adev);
}
-static int sdma_v5_2_hw_fini(void *handle)
+static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1351,18 +1402,14 @@ static int sdma_v5_2_hw_fini(void *handle)
return 0;
}
-static int sdma_v5_2_suspend(void *handle)
+static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_2_hw_fini(adev);
+ return sdma_v5_2_hw_fini(ip_block);
}
-static int sdma_v5_2_resume(void *handle)
+static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_2_hw_init(adev);
+ return sdma_v5_2_hw_init(ip_block);
}
static bool sdma_v5_2_is_idle(void *handle)
@@ -1380,11 +1427,11 @@ static bool sdma_v5_2_is_idle(void *handle)
return true;
}
-static int sdma_v5_2_wait_for_idle(void *handle)
+static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1, sdma2, sdma3;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
@@ -1399,6 +1446,96 @@ static int sdma_v5_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r;
+ u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ /* stop queue */
+ ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+
+ rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+
+ if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
+ break;
+ udelay(1);
+ }
+
+
+ if (j == adev->usec_timeout) {
+ stat1_reg = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG));
+ if ((stat1_reg & 0x3FF) != 0x3FF) {
+ DRM_ERROR("cannot soft reset as sdma not idle\n");
+ r = -ETIMEDOUT;
+ goto err0;
+ }
+ }
+
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+
+ cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+
+ /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
+ preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
+ preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
+
+ soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
+
+
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ udelay(50);
+
+ soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ /* unfreeze and unhalt */
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ r = sdma_v5_2_gfx_resume_instance(adev, i, true);
+
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
@@ -1736,9 +1873,9 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
amdgpu_gfx_off_ctrl(adev, true);
}
-static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v5_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
uint32_t instance_offset;
@@ -1757,9 +1894,9 @@ static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v5_2_dump_ip_state(void *handle)
+static void sdma_v5_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
@@ -1781,7 +1918,6 @@ static void sdma_v5_2_dump_ip_state(void *handle)
static const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
- .late_init = NULL,
.sw_init = sdma_v5_2_sw_init,
.sw_fini = sdma_v5_2_sw_fini,
.hw_init = sdma_v5_2_hw_init,
@@ -1834,6 +1970,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_2_ring_init_cond_exec,
.preempt_ib = sdma_v5_2_ring_preempt_ib,
+ .reset = sdma_v5_2_reset_queue,
};
static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 208a1fa9d4e7..5635f2d84090 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -469,14 +469,16 @@ static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v6_0_gfx_resume - setup and start the async dma engines
+ * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them.
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
+static int sdma_v6_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
@@ -485,132 +487,152 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
u32 doorbell_offset;
u32 temp;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
-
- if (!amdgpu_sriov_vf(adev))
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ ring = &adev->sdma.instance[i].ring;
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
+ }
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
+
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+
+ if (!restore)
+ ring->wptr = 0;
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ }
- ring->wptr = 0;
+ doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
+ if (i == 0)
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
- doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
+ if (amdgpu_sriov_vf(adev))
+ sdma_v6_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
+
+ /* Set up sdma hang watchdog */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
+ /* 100ms per unit */
+ temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
+ max(adev->usec_timeout/100000, 1));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
+ SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
-
- if (i == 0)
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
-
- if (amdgpu_sriov_vf(adev))
- sdma_v6_0_ring_set_wptr(ring);
-
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
-
- /* Set up sdma hang watchdog */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
- /* 100ms per unit */
- temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
- max(adev->usec_timeout/100000, 1));
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
- SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
-
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
+ }
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
- if (amdgpu_sriov_vf(adev))
- sdma_v6_0_enable(adev, true);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v6_0_enable(adev, true);
+
+ return amdgpu_ring_test_helper(ring);
+}
- r = amdgpu_ring_test_helper(ring);
+/**
+ * sdma_v6_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v6_0_gfx_resume_instance(adev, i, false);
if (r)
return r;
}
@@ -733,9 +755,9 @@ static int sdma_v6_0_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v6_0_soft_reset(void *handle)
+static int sdma_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp;
int i;
@@ -769,9 +791,9 @@ static int sdma_v6_0_soft_reset(void *handle)
return sdma_v6_0_start(adev);
}
-static bool sdma_v6_0_check_soft_reset(void *handle)
+static bool sdma_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
long tmo = msecs_to_jiffies(1000);
@@ -1272,9 +1294,9 @@ static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
}
}
-static int sdma_v6_0_early_init(void *handle)
+static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_sdma_init_microcode(adev, 0, true);
@@ -1291,11 +1313,11 @@ static int sdma_v6_0_early_init(void *handle)
return 0;
}
-static int sdma_v6_0_sw_init(void *handle)
+static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
uint32_t *ptr;
@@ -1343,9 +1365,9 @@ static int sdma_v6_0_sw_init(void *handle)
return r;
}
-static int sdma_v6_0_sw_fini(void *handle)
+static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -1358,16 +1380,16 @@ static int sdma_v6_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v6_0_hw_init(void *handle)
+static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return sdma_v6_0_start(adev);
}
-static int sdma_v6_0_hw_fini(void *handle)
+static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1378,18 +1400,14 @@ static int sdma_v6_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v6_0_suspend(void *handle)
+static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v6_0_hw_fini(adev);
+ return sdma_v6_0_hw_fini(ip_block);
}
-static int sdma_v6_0_resume(void *handle)
+static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v6_0_hw_init(adev);
+ return sdma_v6_0_hw_init(ip_block);
}
static bool sdma_v6_0_is_idle(void *handle)
@@ -1407,11 +1425,11 @@ static bool sdma_v6_0_is_idle(void *handle)
return true;
}
-static int sdma_v6_0_wait_for_idle(void *handle)
+static int sdma_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
@@ -1469,6 +1487,31 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
+static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
+ if (r)
+ return r;
+
+ return sdma_v6_0_gfx_resume_instance(adev, i, true);
+}
+
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1556,9 +1599,9 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
}
-static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v6_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
uint32_t instance_offset;
@@ -1577,9 +1620,9 @@ static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v6_0_dump_ip_state(void *handle)
+static void sdma_v6_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
@@ -1601,7 +1644,6 @@ static void sdma_v6_0_dump_ip_state(void *handle)
const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.name = "sdma_v6_0",
.early_init = sdma_v6_0_early_init,
- .late_init = NULL,
.sw_init = sdma_v6_0_sw_init,
.sw_fini = sdma_v6_0_sw_fini,
.hw_init = sdma_v6_0_hw_init,
@@ -1652,6 +1694,7 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v6_0_ring_init_cond_exec,
.preempt_ib = sdma_v6_0_ring_preempt_ib,
+ .reset = sdma_v6_0_reset_queue,
};
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1726,7 +1769,7 @@ static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint64_t dst_offset,
uint32_t byte_count)
{
- ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index a8763496aed3..d2ce6b6a7ff6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+/*define for compression field for sdma7*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16
+#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift)
+
static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
@@ -747,9 +753,9 @@ static int sdma_v7_0_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v7_0_soft_reset(void *handle)
+static int sdma_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp;
int i;
@@ -783,9 +789,9 @@ static int sdma_v7_0_soft_reset(void *handle)
return sdma_v7_0_start(adev);
}
-static bool sdma_v7_0_check_soft_reset(void *handle)
+static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
long tmo = msecs_to_jiffies(1000);
@@ -1253,9 +1259,9 @@ static void sdma_v7_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
-static int sdma_v7_0_early_init(void *handle)
+static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_sdma_init_microcode(adev, 0, true);
@@ -1273,11 +1279,11 @@ static int sdma_v7_0_early_init(void *handle)
return 0;
}
-static int sdma_v7_0_sw_init(void *handle)
+static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
uint32_t *ptr;
@@ -1320,9 +1326,9 @@ static int sdma_v7_0_sw_init(void *handle)
return r;
}
-static int sdma_v7_0_sw_fini(void *handle)
+static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -1338,16 +1344,16 @@ static int sdma_v7_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v7_0_hw_init(void *handle)
+static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return sdma_v7_0_start(adev);
}
-static int sdma_v7_0_hw_fini(void *handle)
+static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1358,18 +1364,14 @@ static int sdma_v7_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v7_0_suspend(void *handle)
+static int sdma_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v7_0_hw_fini(adev);
+ return sdma_v7_0_hw_fini(ip_block);
}
-static int sdma_v7_0_resume(void *handle)
+static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v7_0_hw_init(adev);
+ return sdma_v7_0_hw_init(ip_block);
}
static bool sdma_v7_0_is_idle(void *handle)
@@ -1387,11 +1389,11 @@ static bool sdma_v7_0_is_idle(void *handle)
return true;
}
-static int sdma_v7_0_wait_for_idle(void *handle)
+static int sdma_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v7_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
@@ -1538,9 +1540,9 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
{
}
-static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v7_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
uint32_t instance_offset;
@@ -1559,9 +1561,9 @@ static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v7_0_dump_ip_state(void *handle)
+static void sdma_v7_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
@@ -1724,7 +1726,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint64_t dst_offset,
uint32_t byte_count)
{
- ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) |
+ SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 85235470e872..00f63d3fbea7 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -2022,9 +2022,9 @@ static uint32_t si_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
-static int si_common_early_init(void *handle)
+static int si_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->smc_rreg = &si_smc_rreg;
adev->smc_wreg = &si_smc_wreg;
@@ -2148,17 +2148,6 @@ static int si_common_early_init(void *handle)
return 0;
}
-static int si_common_sw_init(void *handle)
-{
- return 0;
-}
-
-static int si_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-
static void si_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -2633,9 +2622,9 @@ static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
pcie_set_readrq(adev->pdev, 512);
}
-static int si_common_hw_init(void *handle)
+static int si_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
si_fix_pci_max_read_req_size(adev);
si_init_golden_registers(adev);
@@ -2645,23 +2634,14 @@ static int si_common_hw_init(void *handle)
return 0;
}
-static int si_common_hw_fini(void *handle)
+static int si_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int si_common_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_common_hw_fini(adev);
-}
-
-static int si_common_resume(void *handle)
+static int si_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_common_hw_init(adev);
+ return si_common_hw_init(ip_block);
}
static bool si_common_is_idle(void *handle)
@@ -2669,16 +2649,6 @@ static bool si_common_is_idle(void *handle)
return true;
}
-static int si_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int si_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static int si_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -2694,20 +2664,12 @@ static int si_common_set_powergating_state(void *handle,
static const struct amd_ip_funcs si_common_ip_funcs = {
.name = "si_common",
.early_init = si_common_early_init,
- .late_init = NULL,
- .sw_init = si_common_sw_init,
- .sw_fini = si_common_sw_fini,
.hw_init = si_common_hw_init,
.hw_fini = si_common_hw_fini,
- .suspend = si_common_suspend,
.resume = si_common_resume,
.is_idle = si_common_is_idle,
- .wait_for_idle = si_common_wait_for_idle,
- .soft_reset = si_common_soft_reset,
.set_clockgating_state = si_common_set_clockgating_state,
.set_powergating_state = si_common_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version si_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 11db5b755832..47647a6083e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -457,9 +457,9 @@ static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static int si_dma_early_init(void *handle)
+static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->sdma.num_instances = 2;
@@ -471,11 +471,11 @@ static int si_dma_early_init(void *handle)
return 0;
}
-static int si_dma_sw_init(void *handle)
+static int si_dma_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* DMA0 trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
@@ -506,9 +506,9 @@ static int si_dma_sw_init(void *handle)
return r;
}
-static int si_dma_sw_fini(void *handle)
+static int si_dma_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -517,39 +517,34 @@ static int si_dma_sw_fini(void *handle)
return 0;
}
-static int si_dma_hw_init(void *handle)
+static int si_dma_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return si_dma_start(adev);
}
-static int si_dma_hw_fini(void *handle)
+static int si_dma_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_dma_stop(adev);
+ si_dma_stop(ip_block->adev);
return 0;
}
-static int si_dma_suspend(void *handle)
+static int si_dma_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_dma_hw_fini(adev);
+ return si_dma_hw_fini(ip_block);
}
-static int si_dma_resume(void *handle)
+static int si_dma_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_dma_hw_init(adev);
+ return si_dma_hw_init(ip_block);
}
static bool si_dma_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
u32 tmp = RREG32(SRBM_STATUS2);
if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
@@ -558,20 +553,20 @@ static bool si_dma_is_idle(void *handle)
return true;
}
-static int si_dma_wait_for_idle(void *handle)
+static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_dma_is_idle(handle))
+ if (si_dma_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int si_dma_soft_reset(void *handle)
+static int si_dma_soft_reset(struct amdgpu_ip_block *ip_block)
{
DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
return 0;
@@ -696,7 +691,6 @@ static int si_dma_set_powergating_state(void *handle,
static const struct amd_ip_funcs si_dma_ip_funcs = {
.name = "si_dma",
.early_init = si_dma_early_init,
- .late_init = NULL,
.sw_init = si_dma_sw_init,
.sw_fini = si_dma_sw_fini,
.hw_init = si_dma_hw_init,
@@ -708,8 +702,6 @@ static const struct amd_ip_funcs si_dma_ip_funcs = {
.soft_reset = si_dma_soft_reset,
.set_clockgating_state = si_dma_set_clockgating_state,
.set_powergating_state = si_dma_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 5237395e4fab..2ec1ebe4db11 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -156,19 +156,19 @@ static void si_ih_set_rptr(struct amdgpu_device *adev,
WREG32(IH_RB_RPTR, ih->rptr);
}
-static int si_ih_early_init(void *handle)
+static int si_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
si_ih_set_interrupt_funcs(adev);
return 0;
}
-static int si_ih_sw_init(void *handle)
+static int si_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -177,43 +177,37 @@ static int si_ih_sw_init(void *handle)
return amdgpu_irq_init(adev);
}
-static int si_ih_sw_fini(void *handle)
+static int si_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int si_ih_hw_init(void *handle)
+static int si_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return si_ih_irq_init(adev);
}
-static int si_ih_hw_fini(void *handle)
+static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_ih_irq_disable(adev);
+ si_ih_irq_disable(ip_block->adev);
return 0;
}
-static int si_ih_suspend(void *handle)
+static int si_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_ih_hw_fini(adev);
+ return si_ih_hw_fini(ip_block);
}
-static int si_ih_resume(void *handle)
+static int si_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_ih_hw_init(adev);
+ return si_ih_hw_init(ip_block);
}
static bool si_ih_is_idle(void *handle)
@@ -227,22 +221,22 @@ static bool si_ih_is_idle(void *handle)
return true;
}
-static int si_ih_wait_for_idle(void *handle)
+static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_ih_is_idle(handle))
+ if (si_ih_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int si_ih_soft_reset(void *handle)
+static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(SRBM_STATUS);
@@ -284,7 +278,6 @@ static int si_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs si_ih_ip_funcs = {
.name = "si_ih",
.early_init = si_ih_early_init,
- .late_init = NULL,
.sw_init = si_ih_sw_init,
.sw_fini = si_ih_sw_fini,
.hw_init = si_ih_hw_init,
@@ -296,8 +289,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
.soft_reset = si_ih_soft_reset,
.set_clockgating_state = si_ih_set_clockgating_state,
.set_powergating_state = si_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs si_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
index 481217c32d85..9b01e074af47 100644
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -81,15 +81,9 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
-
- if (r) {
- dev_err(adev->dev,
- "suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -175,15 +169,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = true;
}
}
@@ -193,15 +181,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type ==
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = true;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -213,7 +195,7 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(
- (void *)adev);
+ &adev->ip_blocks[i]);
if (r) {
dev_err(adev->dev,
"late_init of IP block <%s> failed %d after reset\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
index 0af648931df5..e70ebad3f9fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
@@ -80,15 +80,9 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_MES))
continue;
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
-
- if (r) {
- dev_err(adev->dev,
- "suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -186,15 +180,9 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type ==
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = true;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -208,7 +196,7 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(
- (void *)adev);
+ &adev->ip_blocks[i]);
if (r) {
dev_err(adev->dev,
"late_init of IP block <%s> failed %d after reset\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8d16dacdc172..93e44e7ee3fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -587,11 +587,13 @@ static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
* 2) S3 suspend abort and TOS already launched.
*/
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
- !adev->suspend_complete &&
- sol_reg)
+ sol_reg) {
+ adev->suspend_complete = false;
return true;
-
- return false;
+ } else {
+ adev->suspend_complete = true;
+ return false;
+ }
}
static int soc15_asic_reset(struct amdgpu_device *adev)
@@ -829,6 +831,10 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_RENOIR)
return true;
+ if (amdgpu_gmc_need_reset_on_init(adev))
+ return true;
+ if (amdgpu_psp_tos_reload_needed(adev))
+ return true;
/* Just return false for soc15 GPUs. Reset does not seem to
* be necessary.
*/
@@ -929,9 +935,9 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
.get_reg_state = &aqua_vanjaram_get_reg_state,
};
-static int soc15_common_early_init(void *handle)
+static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -1198,9 +1204,9 @@ static int soc15_common_early_init(void *handle)
return 0;
}
-static int soc15_common_late_init(void *handle)
+static int soc15_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
@@ -1213,9 +1219,9 @@ static int soc15_common_late_init(void *handle)
return 0;
}
-static int soc15_common_sw_init(void *handle)
+static int soc15_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
@@ -1227,9 +1233,9 @@ static int soc15_common_sw_init(void *handle)
return 0;
}
-static int soc15_common_sw_fini(void *handle)
+static int soc15_common_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->df.funcs &&
adev->df.funcs->sw_fini)
@@ -1251,9 +1257,9 @@ static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
}
}
-static int soc15_common_hw_init(void *handle)
+static int soc15_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable aspm */
soc15_program_aspm(adev);
@@ -1280,9 +1286,9 @@ static int soc15_common_hw_init(void *handle)
return 0;
}
-static int soc15_common_hw_fini(void *handle)
+static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc15_enable_doorbell_aperture
@@ -1295,7 +1301,12 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ /*
+ * For minimal init, late_init is not called, hence RAS irqs are not
+ * enabled.
+ */
if ((!amdgpu_sriov_vf(adev)) &&
+ (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
if (adev->nbio.ras &&
@@ -1309,22 +1320,20 @@ static int soc15_common_hw_fini(void *handle)
return 0;
}
-static int soc15_common_suspend(void *handle)
+static int soc15_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc15_common_hw_fini(adev);
+ return soc15_common_hw_fini(ip_block);
}
-static int soc15_common_resume(void *handle)
+static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (soc15_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
soc15_asic_reset(adev);
}
- return soc15_common_hw_init(adev);
+ return soc15_common_hw_init(ip_block);
}
static bool soc15_common_is_idle(void *handle)
@@ -1332,16 +1341,6 @@ static bool soc15_common_is_idle(void *handle)
return true;
}
-static int soc15_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int soc15_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
@@ -1492,11 +1491,7 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = {
.suspend = soc15_common_suspend,
.resume = soc15_common_resume,
.is_idle = soc15_common_is_idle,
- .wait_for_idle = soc15_common_wait_for_idle,
- .soft_reset = soc15_common_soft_reset,
.set_clockgating_state = soc15_common_set_clockgating_state,
.set_powergating_state = soc15_common_set_powergating_state,
.get_clockgating_state= soc15_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index d30ad7d56def..1c07ebdc0d1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -556,9 +556,9 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs = {
.update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
-static int soc21_common_early_init(void *handle)
+static int soc21_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -794,9 +794,9 @@ static int soc21_common_early_init(void *handle)
return 0;
}
-static int soc21_common_late_init(void *handle)
+static int soc21_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
@@ -832,9 +832,9 @@ static int soc21_common_late_init(void *handle)
return 0;
}
-static int soc21_common_sw_init(void *handle)
+static int soc21_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_add_irq_id(adev);
@@ -842,14 +842,9 @@ static int soc21_common_sw_init(void *handle)
return 0;
}
-static int soc21_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-static int soc21_common_hw_init(void *handle)
+static int soc21_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable aspm */
soc21_program_aspm(adev);
@@ -867,9 +862,9 @@ static int soc21_common_hw_init(void *handle)
return 0;
}
-static int soc21_common_hw_fini(void *handle)
+static int soc21_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc21_enable_doorbell_aperture
@@ -890,11 +885,9 @@ static int soc21_common_hw_fini(void *handle)
return 0;
}
-static int soc21_common_suspend(void *handle)
+static int soc21_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc21_common_hw_fini(adev);
+ return soc21_common_hw_fini(ip_block);
}
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
@@ -917,16 +910,16 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
return false;
}
-static int soc21_common_resume(void *handle)
+static int soc21_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (soc21_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend aborted, resetting...");
soc21_asic_reset(adev);
}
- return soc21_common_hw_init(adev);
+ return soc21_common_hw_init(ip_block);
}
static bool soc21_common_is_idle(void *handle)
@@ -934,16 +927,6 @@ static bool soc21_common_is_idle(void *handle)
return true;
}
-static int soc21_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int soc21_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static int soc21_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1002,17 +985,12 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = {
.early_init = soc21_common_early_init,
.late_init = soc21_common_late_init,
.sw_init = soc21_common_sw_init,
- .sw_fini = soc21_common_sw_fini,
.hw_init = soc21_common_hw_init,
.hw_fini = soc21_common_hw_fini,
.suspend = soc21_common_suspend,
.resume = soc21_common_resume,
.is_idle = soc21_common_is_idle,
- .wait_for_idle = soc21_common_wait_for_idle,
- .soft_reset = soc21_common_soft_reset,
.set_clockgating_state = soc21_common_set_clockgating_state,
.set_powergating_state = soc21_common_set_powergating_state,
.get_clockgating_state = soc21_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index fd4c3d4f8387..3af10ef4b793 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -363,9 +363,9 @@ static const struct amdgpu_asic_funcs soc24_asic_funcs = {
.update_umd_stable_pstate = &soc24_update_umd_stable_pstate,
};
-static int soc24_common_early_init(void *handle)
+static int soc24_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -440,9 +440,9 @@ static int soc24_common_early_init(void *handle)
return 0;
}
-static int soc24_common_late_init(void *handle)
+static int soc24_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_get_irq(adev);
@@ -455,9 +455,9 @@ static int soc24_common_late_init(void *handle)
return 0;
}
-static int soc24_common_sw_init(void *handle)
+static int soc24_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_add_irq_id(adev);
@@ -465,14 +465,9 @@ static int soc24_common_sw_init(void *handle)
return 0;
}
-static int soc24_common_sw_fini(void *handle)
+static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static int soc24_common_hw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable aspm */
soc24_program_aspm(adev);
@@ -494,9 +489,9 @@ static int soc24_common_hw_init(void *handle)
return 0;
}
-static int soc24_common_hw_fini(void *handle)
+static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc21_enable_doorbell_aperture
@@ -512,18 +507,14 @@ static int soc24_common_hw_fini(void *handle)
return 0;
}
-static int soc24_common_suspend(void *handle)
+static int soc24_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc24_common_hw_fini(adev);
+ return soc24_common_hw_fini(ip_block);
}
-static int soc24_common_resume(void *handle)
+static int soc24_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc24_common_hw_init(adev);
+ return soc24_common_hw_init(ip_block);
}
static bool soc24_common_is_idle(void *handle)
@@ -531,16 +522,6 @@ static bool soc24_common_is_idle(void *handle)
return true;
}
-static int soc24_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int soc24_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static int soc24_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -595,14 +576,11 @@ static const struct amd_ip_funcs soc24_common_ip_funcs = {
.early_init = soc24_common_early_init,
.late_init = soc24_common_late_init,
.sw_init = soc24_common_sw_init,
- .sw_fini = soc24_common_sw_fini,
.hw_init = soc24_common_hw_init,
.hw_fini = soc24_common_hw_fini,
.suspend = soc24_common_suspend,
.resume = soc24_common_resume,
.is_idle = soc24_common_is_idle,
- .wait_for_idle = soc24_common_wait_for_idle,
- .soft_reset = soc24_common_soft_reset,
.set_clockgating_state = soc24_common_set_clockgating_state,
.set_powergating_state = soc24_common_set_powergating_state,
.get_clockgating_state = soc24_common_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 24d49d813607..5a04a6770138 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -283,9 +283,9 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev,
}
}
-static int tonga_ih_early_init(void *handle)
+static int tonga_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -297,10 +297,10 @@ static int tonga_ih_early_init(void *handle)
return 0;
}
-static int tonga_ih_sw_init(void *handle)
+static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true);
if (r)
@@ -314,9 +314,9 @@ static int tonga_ih_sw_init(void *handle)
return r;
}
-static int tonga_ih_sw_fini(void *handle)
+static int tonga_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -324,10 +324,10 @@ static int tonga_ih_sw_fini(void *handle)
return 0;
}
-static int tonga_ih_hw_init(void *handle)
+static int tonga_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = tonga_ih_irq_init(adev);
if (r)
@@ -336,27 +336,21 @@ static int tonga_ih_hw_init(void *handle)
return 0;
}
-static int tonga_ih_hw_fini(void *handle)
+static int tonga_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- tonga_ih_irq_disable(adev);
+ tonga_ih_irq_disable(ip_block->adev);
return 0;
}
-static int tonga_ih_suspend(void *handle)
+static int tonga_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return tonga_ih_hw_fini(adev);
+ return tonga_ih_hw_fini(ip_block);
}
-static int tonga_ih_resume(void *handle)
+static int tonga_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return tonga_ih_hw_init(adev);
+ return tonga_ih_hw_init(ip_block);
}
static bool tonga_ih_is_idle(void *handle)
@@ -370,11 +364,11 @@ static bool tonga_ih_is_idle(void *handle)
return true;
}
-static int tonga_ih_wait_for_idle(void *handle)
+static int tonga_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -386,9 +380,9 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static bool tonga_ih_check_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -405,29 +399,27 @@ static bool tonga_ih_check_soft_reset(void *handle)
}
}
-static int tonga_ih_pre_soft_reset(void *handle)
+static int tonga_ih_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->irq.srbm_soft_reset)
+ if (!ip_block->adev->irq.srbm_soft_reset)
return 0;
- return tonga_ih_hw_fini(adev);
+ return tonga_ih_hw_fini(ip_block);
}
-static int tonga_ih_post_soft_reset(void *handle)
+static int tonga_ih_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->irq.srbm_soft_reset)
return 0;
- return tonga_ih_hw_init(adev);
+ return tonga_ih_hw_init(ip_block);
}
-static int tonga_ih_soft_reset(void *handle)
+static int tonga_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->irq.srbm_soft_reset)
@@ -471,7 +463,6 @@ static int tonga_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
- .late_init = NULL,
.sw_init = tonga_ih_sw_init,
.sw_fini = tonga_ih_sw_fini,
.hw_init = tonga_ih_hw_init,
@@ -486,8 +477,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 805d6662c88b..bdbca25d80c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -531,9 +531,9 @@ static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev)
}
-static int uvd_v3_1_early_init(void *handle)
+static int uvd_v3_1_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
uvd_v3_1_set_ring_funcs(adev);
@@ -542,10 +542,10 @@ static int uvd_v3_1_early_init(void *handle)
return 0;
}
-static int uvd_v3_1_sw_init(void *handle)
+static int uvd_v3_1_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
void *ptr;
uint32_t ucode_len;
@@ -580,10 +580,10 @@ static int uvd_v3_1_sw_init(void *handle)
return r;
}
-static int uvd_v3_1_sw_fini(void *handle)
+static int uvd_v3_1_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -621,13 +621,13 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
/**
* uvd_v3_1_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v3_1_hw_init(void *handle)
+static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@@ -688,13 +688,13 @@ done:
/**
* uvd_v3_1_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v3_1_hw_fini(void *handle)
+static int uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -704,17 +704,17 @@ static int uvd_v3_1_hw_fini(void *handle)
return 0;
}
-static int uvd_v3_1_prepare_suspend(void *handle)
+static int uvd_v3_1_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v3_1_suspend(void *handle)
+static int uvd_v3_1_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -740,23 +740,22 @@ static int uvd_v3_1_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v3_1_hw_fini(adev);
+ r = uvd_v3_1_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v3_1_resume(void *handle)
+static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v3_1_hw_init(adev);
+ return uvd_v3_1_hw_init(ip_block);
}
static bool uvd_v3_1_is_idle(void *handle)
@@ -766,10 +765,10 @@ static bool uvd_v3_1_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v3_1_wait_for_idle(void *handle)
+static int uvd_v3_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
@@ -778,9 +777,9 @@ static int uvd_v3_1_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int uvd_v3_1_soft_reset(void *handle)
+static int uvd_v3_1_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uvd_v3_1_stop(adev);
@@ -806,7 +805,6 @@ static int uvd_v3_1_set_powergating_state(void *handle,
static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
.name = "uvd_v3_1",
.early_init = uvd_v3_1_early_init,
- .late_init = NULL,
.sw_init = uvd_v3_1_sw_init,
.sw_fini = uvd_v3_1_sw_fini,
.hw_init = uvd_v3_1_hw_init,
@@ -819,8 +817,6 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
.soft_reset = uvd_v3_1_soft_reset,
.set_clockgating_state = uvd_v3_1_set_clockgating_state,
.set_powergating_state = uvd_v3_1_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version uvd_v3_1_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 3f19c606f4de..a836dc9cfcad 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -90,9 +90,9 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
-static int uvd_v4_2_early_init(void *handle)
+static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
uvd_v4_2_set_ring_funcs(adev);
@@ -101,10 +101,10 @@ static int uvd_v4_2_early_init(void *handle)
return 0;
}
-static int uvd_v4_2_sw_init(void *handle)
+static int uvd_v4_2_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/* UVD TRAP */
@@ -130,10 +130,10 @@ static int uvd_v4_2_sw_init(void *handle)
return r;
}
-static int uvd_v4_2_sw_fini(void *handle)
+static int uvd_v4_2_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -147,13 +147,13 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
/**
* uvd_v4_2_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v4_2_hw_init(void *handle)
+static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@@ -202,13 +202,13 @@ done:
/**
* uvd_v4_2_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v4_2_hw_fini(void *handle)
+static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -218,17 +218,17 @@ static int uvd_v4_2_hw_fini(void *handle)
return 0;
}
-static int uvd_v4_2_prepare_suspend(void *handle)
+static int uvd_v4_2_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v4_2_suspend(void *handle)
+static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -254,23 +254,22 @@ static int uvd_v4_2_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v4_2_hw_fini(adev);
+ r = uvd_v4_2_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v4_2_resume(void *handle)
+static int uvd_v4_2_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v4_2_hw_init(adev);
+ return uvd_v4_2_hw_init(ip_block);
}
/**
@@ -666,10 +665,10 @@ static bool uvd_v4_2_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v4_2_wait_for_idle(void *handle)
+static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
@@ -678,9 +677,9 @@ static int uvd_v4_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int uvd_v4_2_soft_reset(void *handle)
+static int uvd_v4_2_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uvd_v4_2_stop(adev);
@@ -756,7 +755,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
- .late_init = NULL,
.sw_init = uvd_v4_2_sw_init,
.sw_fini = uvd_v4_2_sw_fini,
.hw_init = uvd_v4_2_hw_init,
@@ -769,8 +767,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.soft_reset = uvd_v4_2_soft_reset,
.set_clockgating_state = uvd_v4_2_set_clockgating_state,
.set_powergating_state = uvd_v4_2_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index efd903c21d48..ab55fae3569e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -88,9 +88,9 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
-static int uvd_v5_0_early_init(void *handle)
+static int uvd_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
uvd_v5_0_set_ring_funcs(adev);
@@ -99,10 +99,10 @@ static int uvd_v5_0_early_init(void *handle)
return 0;
}
-static int uvd_v5_0_sw_init(void *handle)
+static int uvd_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/* UVD TRAP */
@@ -128,10 +128,10 @@ static int uvd_v5_0_sw_init(void *handle)
return r;
}
-static int uvd_v5_0_sw_fini(void *handle)
+static int uvd_v5_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -143,13 +143,13 @@ static int uvd_v5_0_sw_fini(void *handle)
/**
* uvd_v5_0_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v5_0_hw_init(void *handle)
+static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@@ -200,13 +200,13 @@ done:
/**
* uvd_v5_0_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v5_0_hw_fini(void *handle)
+static int uvd_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -216,17 +216,17 @@ static int uvd_v5_0_hw_fini(void *handle)
return 0;
}
-static int uvd_v5_0_prepare_suspend(void *handle)
+static int uvd_v5_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v5_0_suspend(void *handle)
+static int uvd_v5_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -252,23 +252,22 @@ static int uvd_v5_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v5_0_hw_fini(adev);
+ r = uvd_v5_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v5_0_resume(void *handle)
+static int uvd_v5_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v5_0_hw_init(adev);
+ return uvd_v5_0_hw_init(ip_block);
}
/**
@@ -588,10 +587,10 @@ static bool uvd_v5_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v5_0_wait_for_idle(void *handle)
+static int uvd_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
@@ -600,9 +599,9 @@ static int uvd_v5_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int uvd_v5_0_soft_reset(void *handle)
+static int uvd_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uvd_v5_0_stop(adev);
@@ -796,10 +795,15 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
+ struct amdgpu_ip_block *ip_block;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
+ if (!ip_block)
+ return -EINVAL;
if (enable) {
/* wait for STATUS to clear */
- if (uvd_v5_0_wait_for_idle(handle))
+ if (uvd_v5_0_wait_for_idle(ip_block))
return -EBUSY;
uvd_v5_0_enable_clock_gating(adev, true);
@@ -863,7 +867,6 @@ out:
static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
- .late_init = NULL,
.sw_init = uvd_v5_0_sw_init,
.sw_fini = uvd_v5_0_sw_fini,
.hw_init = uvd_v5_0_hw_init,
@@ -877,8 +880,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
.get_clockgating_state = uvd_v5_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 495de5068455..39f8c3d3a135 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -354,9 +354,9 @@ error:
return r;
}
-static int uvd_v6_0_early_init(void *handle)
+static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
if (!(adev->flags & AMD_IS_APU) &&
@@ -375,11 +375,11 @@ static int uvd_v6_0_early_init(void *handle)
return 0;
}
-static int uvd_v6_0_sw_init(void *handle)
+static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
@@ -435,10 +435,10 @@ static int uvd_v6_0_sw_init(void *handle)
return r;
}
-static int uvd_v6_0_sw_fini(void *handle)
+static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -455,13 +455,13 @@ static int uvd_v6_0_sw_fini(void *handle)
/**
* uvd_v6_0_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v6_0_hw_init(void *handle)
+static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int i, r;
@@ -524,13 +524,13 @@ done:
/**
* uvd_v6_0_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v6_0_hw_fini(void *handle)
+static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -540,17 +540,17 @@ static int uvd_v6_0_hw_fini(void *handle)
return 0;
}
-static int uvd_v6_0_prepare_suspend(void *handle)
+static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v6_0_suspend(void *handle)
+static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -576,23 +576,22 @@ static int uvd_v6_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v6_0_hw_fini(adev);
+ r = uvd_v6_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v6_0_resume(void *handle)
+static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v6_0_hw_init(adev);
+ return uvd_v6_0_hw_init(ip_block);
}
/**
@@ -1151,22 +1150,22 @@ static bool uvd_v6_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v6_0_wait_for_idle(void *handle)
+static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (uvd_v6_0_is_idle(handle))
+ if (uvd_v6_0_is_idle(adev))
return 0;
}
return -ETIMEDOUT;
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static bool uvd_v6_0_check_soft_reset(void *handle)
+static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1184,9 +1183,9 @@ static bool uvd_v6_0_check_soft_reset(void *handle)
}
}
-static int uvd_v6_0_pre_soft_reset(void *handle)
+static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
@@ -1195,9 +1194,9 @@ static int uvd_v6_0_pre_soft_reset(void *handle)
return 0;
}
-static int uvd_v6_0_soft_reset(void *handle)
+static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->uvd.inst->srbm_soft_reset)
@@ -1226,9 +1225,9 @@ static int uvd_v6_0_soft_reset(void *handle)
return 0;
}
-static int uvd_v6_0_post_soft_reset(void *handle)
+static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
@@ -1455,11 +1454,16 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ip_block *ip_block;
bool enable = (state == AMD_CG_STATE_GATE);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
+ if (!ip_block)
+ return -EINVAL;
+
if (enable) {
/* wait for STATUS to clear */
- if (uvd_v6_0_wait_for_idle(handle))
+ if (uvd_v6_0_wait_for_idle(ip_block))
return -EBUSY;
uvd_v6_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
@@ -1528,7 +1532,6 @@ out:
static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
- .late_init = NULL,
.sw_init = uvd_v6_0_sw_init,
.sw_fini = uvd_v6_0_sw_fini,
.hw_init = uvd_v6_0_hw_init,
@@ -1545,8 +1548,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
.get_clockgating_state = uvd_v6_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 6068b784dc69..079131aeb2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -361,9 +361,9 @@ error:
return r;
}
-static int uvd_v7_0_early_init(void *handle)
+static int uvd_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->asic_type == CHIP_VEGA20) {
u32 harvest;
@@ -395,12 +395,12 @@ static int uvd_v7_0_early_init(void *handle)
return 0;
}
-static int uvd_v7_0_sw_init(void *handle)
+static int uvd_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
@@ -487,10 +487,10 @@ static int uvd_v7_0_sw_init(void *handle)
return r;
}
-static int uvd_v7_0_sw_fini(void *handle)
+static int uvd_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, j, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_virt_free_mm_table(adev);
@@ -510,13 +510,13 @@ static int uvd_v7_0_sw_fini(void *handle)
/**
* uvd_v7_0_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v7_0_hw_init(void *handle)
+static int uvd_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
uint32_t tmp;
int i, j, r;
@@ -588,13 +588,13 @@ done:
/**
* uvd_v7_0_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v7_0_hw_fini(void *handle)
+static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -608,17 +608,17 @@ static int uvd_v7_0_hw_fini(void *handle)
return 0;
}
-static int uvd_v7_0_prepare_suspend(void *handle)
+static int uvd_v7_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v7_0_suspend(void *handle)
+static int uvd_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -644,23 +644,22 @@ static int uvd_v7_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v7_0_hw_fini(adev);
+ r = uvd_v7_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v7_0_resume(void *handle)
+static int uvd_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v7_0_hw_init(adev);
+ return uvd_v7_0_hw_init(ip_block);
}
/**
@@ -1463,104 +1462,6 @@ static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-#if 0
-static bool uvd_v7_0_is_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
-}
-
-static int uvd_v7_0_wait_for_idle(void *handle)
-{
- unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (uvd_v7_0_is_idle(handle))
- return 0;
- }
- return -ETIMEDOUT;
-}
-
-#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static bool uvd_v7_0_check_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(mmSRBM_STATUS);
-
- if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
- REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
- (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
- AMDGPU_UVD_STATUS_BUSY_MASK))
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
- SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
-
- if (srbm_soft_reset) {
- adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
- return true;
- } else {
- adev->uvd.inst[ring->me].srbm_soft_reset = 0;
- return false;
- }
-}
-
-static int uvd_v7_0_pre_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->uvd.inst[ring->me].srbm_soft_reset)
- return 0;
-
- uvd_v7_0_stop(adev);
- return 0;
-}
-
-static int uvd_v7_0_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset;
-
- if (!adev->uvd.inst[ring->me].srbm_soft_reset)
- return 0;
- srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
-
- if (srbm_soft_reset) {
- u32 tmp;
-
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
-
- return 0;
-}
-
-static int uvd_v7_0_post_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->uvd.inst[ring->me].srbm_soft_reset)
- return 0;
-
- mdelay(5);
-
- return uvd_v7_0_start(adev);
-}
-#endif
-
static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1610,171 +1511,6 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-#if 0
-static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
-{
- uint32_t data, data1, data2, suvd_flags;
-
- data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
- data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
- data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
-
- data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
- UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
-
- suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK;
-
- data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
- (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
- (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
-
- data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
- UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
- UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
- UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
- UVD_CGC_CTRL__SYS_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MODE_MASK |
- UVD_CGC_CTRL__MPEG2_MODE_MASK |
- UVD_CGC_CTRL__REGS_MODE_MASK |
- UVD_CGC_CTRL__RBC_MODE_MASK |
- UVD_CGC_CTRL__LMI_MC_MODE_MASK |
- UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
- UVD_CGC_CTRL__IDCT_MODE_MASK |
- UVD_CGC_CTRL__MPRD_MODE_MASK |
- UVD_CGC_CTRL__MPC_MODE_MASK |
- UVD_CGC_CTRL__LBSI_MODE_MASK |
- UVD_CGC_CTRL__LRBBM_MODE_MASK |
- UVD_CGC_CTRL__WCB_MODE_MASK |
- UVD_CGC_CTRL__VCPU_MODE_MASK |
- UVD_CGC_CTRL__JPEG_MODE_MASK |
- UVD_CGC_CTRL__JPEG2_MODE_MASK |
- UVD_CGC_CTRL__SCPU_MODE_MASK);
- data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
- data1 |= suvd_flags;
-
- WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
- WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
- WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
- WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
-}
-
-static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
-{
- uint32_t data, data1, cgc_flags, suvd_flags;
-
- data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
- data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
-
- cgc_flags = UVD_CGC_GATE__SYS_MASK |
- UVD_CGC_GATE__UDEC_MASK |
- UVD_CGC_GATE__MPEG2_MASK |
- UVD_CGC_GATE__RBC_MASK |
- UVD_CGC_GATE__LMI_MC_MASK |
- UVD_CGC_GATE__IDCT_MASK |
- UVD_CGC_GATE__MPRD_MASK |
- UVD_CGC_GATE__MPC_MASK |
- UVD_CGC_GATE__LBSI_MASK |
- UVD_CGC_GATE__LRBBM_MASK |
- UVD_CGC_GATE__UDEC_RE_MASK |
- UVD_CGC_GATE__UDEC_CM_MASK |
- UVD_CGC_GATE__UDEC_IT_MASK |
- UVD_CGC_GATE__UDEC_DB_MASK |
- UVD_CGC_GATE__UDEC_MP_MASK |
- UVD_CGC_GATE__WCB_MASK |
- UVD_CGC_GATE__VCPU_MASK |
- UVD_CGC_GATE__SCPU_MASK |
- UVD_CGC_GATE__JPEG_MASK |
- UVD_CGC_GATE__JPEG2_MASK;
-
- suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK;
-
- data |= cgc_flags;
- data1 |= suvd_flags;
-
- WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
- WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
-}
-
-static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
- else
- tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-
-static int uvd_v7_0_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE);
-
- uvd_v7_0_set_bypass_mode(adev, enable);
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
- if (enable) {
- /* disable HW gating and enable Sw gating */
- uvd_v7_0_set_sw_clock_gating(adev);
- } else {
- /* wait for STATUS to clear */
- if (uvd_v7_0_wait_for_idle(handle))
- return -EBUSY;
-
- /* enable HW gates because UVD is idle */
- /* uvd_v7_0_set_hw_clock_gating(adev); */
- }
-
- return 0;
-}
-
-static int uvd_v7_0_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- /* This doesn't actually powergate the UVD block.
- * That's done in the dpm code via the SMC. This
- * just re-inits the block as necessary. The actual
- * gating still happens in the dpm code. We should
- * revisit this when there is a cleaner line between
- * the smc and the hw blocks
- */
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
- return 0;
-
- WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
-
- if (state == AMD_PG_STATE_GATE) {
- uvd_v7_0_stop(adev);
- return 0;
- } else {
- return uvd_v7_0_start(adev);
- }
-}
-#endif
-
static int uvd_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1785,7 +1521,6 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
.name = "uvd_v7_0",
.early_init = uvd_v7_0_early_init,
- .late_init = NULL,
.sw_init = uvd_v7_0_sw_init,
.sw_fini = uvd_v7_0_sw_fini,
.hw_init = uvd_v7_0_hw_init,
@@ -1793,12 +1528,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
.prepare_suspend = uvd_v7_0_prepare_suspend,
.suspend = uvd_v7_0_suspend,
.resume = uvd_v7_0_resume,
- .is_idle = NULL /* uvd_v7_0_is_idle */,
- .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
- .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
- .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
- .soft_reset = NULL /* uvd_v7_0_soft_reset */,
- .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
.set_clockgating_state = uvd_v7_0_set_clockgating_state,
.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 66fada199bda..c1ed91b39415 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -208,13 +208,13 @@ static bool vce_v2_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
}
-static int vce_v2_0_wait_for_idle(void *handle)
+static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (vce_v2_0_is_idle(handle))
+ if (vce_v2_0_is_idle(adev))
return 0;
}
return -ETIMEDOUT;
@@ -274,15 +274,21 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
static int vce_v2_0_stop(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
int i;
int status;
+
if (vce_v2_0_lmi_clean(adev)) {
DRM_INFO("vce is not idle \n");
return 0;
}
- if (vce_v2_0_wait_for_idle(adev)) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
+ if (!ip_block)
+ return -EINVAL;
+
+ if (vce_v2_0_wait_for_idle(ip_block)) {
DRM_INFO("VCE is busy, Can't set clock gating");
return 0;
}
@@ -398,9 +404,9 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
}
}
-static int vce_v2_0_early_init(void *handle)
+static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->vce.num_rings = 2;
@@ -410,11 +416,11 @@ static int vce_v2_0_early_init(void *handle)
return 0;
}
-static int vce_v2_0_sw_init(void *handle)
+static int vce_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* VCE */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
@@ -444,10 +450,10 @@ static int vce_v2_0_sw_init(void *handle)
return r;
}
-static int vce_v2_0_sw_fini(void *handle)
+static int vce_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vce_suspend(adev);
if (r)
@@ -456,10 +462,10 @@ static int vce_v2_0_sw_fini(void *handle)
return amdgpu_vce_sw_fini(adev);
}
-static int vce_v2_0_hw_init(void *handle)
+static int vce_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
vce_v2_0_enable_mgcg(adev, true, false);
@@ -475,19 +481,17 @@ static int vce_v2_0_hw_init(void *handle)
return 0;
}
-static int vce_v2_0_hw_fini(void *handle)
+static int vce_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cancel_delayed_work_sync(&adev->vce.idle_work);
+ cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
return 0;
}
-static int vce_v2_0_suspend(void *handle)
+static int vce_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
@@ -513,28 +517,27 @@ static int vce_v2_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v2_0_hw_fini(adev);
+ r = vce_v2_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
-static int vce_v2_0_resume(void *handle)
+static int vce_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vce_resume(adev);
+ r = amdgpu_vce_resume(ip_block->adev);
if (r)
return r;
- return vce_v2_0_hw_init(adev);
+ return vce_v2_0_hw_init(ip_block);
}
-static int vce_v2_0_soft_reset(void *handle)
+static int vce_v2_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
mdelay(5);
@@ -614,7 +617,6 @@ static int vce_v2_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
- .late_init = NULL,
.sw_init = vce_v2_0_sw_init,
.sw_fini = vce_v2_0_sw_fini,
.hw_init = vce_v2_0_hw_init,
@@ -626,8 +628,6 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.soft_reset = vce_v2_0_soft_reset,
.set_clockgating_state = vce_v2_0_set_clockgating_state,
.set_powergating_state = vce_v2_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 4bfba2931b08..6bb318a06f19 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -64,7 +64,7 @@
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vce_v3_0_wait_for_idle(void *handle);
+static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
/**
@@ -396,9 +396,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
}
}
-static int vce_v3_0_early_init(void *handle)
+static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
@@ -415,9 +415,9 @@ static int vce_v3_0_early_init(void *handle)
return 0;
}
-static int vce_v3_0_sw_init(void *handle)
+static int vce_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r, i;
@@ -453,10 +453,10 @@ static int vce_v3_0_sw_init(void *handle)
return r;
}
-static int vce_v3_0_sw_fini(void *handle)
+static int vce_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vce_suspend(adev);
if (r)
@@ -465,10 +465,10 @@ static int vce_v3_0_sw_fini(void *handle)
return amdgpu_vce_sw_fini(adev);
}
-static int vce_v3_0_hw_init(void *handle)
+static int vce_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vce_v3_0_override_vce_clock_gating(adev, true);
@@ -485,14 +485,14 @@ static int vce_v3_0_hw_init(void *handle)
return 0;
}
-static int vce_v3_0_hw_fini(void *handle)
+static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vce.idle_work);
- r = vce_v3_0_wait_for_idle(handle);
+ r = vce_v3_0_wait_for_idle(ip_block);
if (r)
return r;
@@ -500,10 +500,10 @@ static int vce_v3_0_hw_fini(void *handle)
return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
}
-static int vce_v3_0_suspend(void *handle)
+static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -528,23 +528,22 @@ static int vce_v3_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v3_0_hw_fini(adev);
+ r = vce_v3_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
-static int vce_v3_0_resume(void *handle)
+static int vce_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vce_resume(adev);
+ r = amdgpu_vce_resume(ip_block->adev);
if (r)
return r;
- return vce_v3_0_hw_init(adev);
+ return vce_v3_0_hw_init(ip_block);
}
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
@@ -609,13 +608,13 @@ static bool vce_v3_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS2) & mask);
}
-static int vce_v3_0_wait_for_idle(void *handle)
+static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++)
- if (vce_v3_0_is_idle(handle))
+ if (vce_v3_0_is_idle(adev))
return 0;
return -ETIMEDOUT;
@@ -627,9 +626,9 @@ static int vce_v3_0_wait_for_idle(void *handle)
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-static bool vce_v3_0_check_soft_reset(void *handle)
+static bool vce_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
/* According to VCE team , we should use VCE_STATUS instead
@@ -668,9 +667,9 @@ static bool vce_v3_0_check_soft_reset(void *handle)
}
}
-static int vce_v3_0_soft_reset(void *handle)
+static int vce_v3_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->vce.srbm_soft_reset)
@@ -699,29 +698,29 @@ static int vce_v3_0_soft_reset(void *handle)
return 0;
}
-static int vce_v3_0_pre_soft_reset(void *handle)
+static int vce_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
- return vce_v3_0_suspend(adev);
+ return vce_v3_0_suspend(ip_block);
}
-static int vce_v3_0_post_soft_reset(void *handle)
+static int vce_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
- return vce_v3_0_resume(adev);
+ return vce_v3_0_resume(ip_block);
}
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -897,7 +896,6 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
- .late_init = NULL,
.sw_init = vce_v3_0_sw_init,
.sw_fini = vce_v3_0_sw_fini,
.hw_init = vce_v3_0_hw_init,
@@ -913,8 +911,6 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
.get_clockgating_state = vce_v3_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 0748bf44c880..79ee555768a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -407,9 +407,9 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
return 0;
}
-static int vce_v4_0_early_init(void *handle)
+static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
adev->vce.num_rings = 1;
@@ -422,9 +422,9 @@ static int vce_v4_0_early_init(void *handle)
return 0;
}
-static int vce_v4_0_sw_init(void *handle)
+static int vce_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
unsigned size;
@@ -493,10 +493,10 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
-static int vce_v4_0_sw_fini(void *handle)
+static int vce_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* free MM table */
amdgpu_virt_free_mm_table(adev);
@@ -513,10 +513,10 @@ static int vce_v4_0_sw_fini(void *handle)
return amdgpu_vce_sw_fini(adev);
}
-static int vce_v4_0_hw_init(void *handle)
+static int vce_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
r = vce_v4_0_sriov_start(adev);
@@ -536,14 +536,14 @@ static int vce_v4_0_hw_init(void *handle)
return 0;
}
-static int vce_v4_0_hw_fini(void *handle)
+static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vce.idle_work);
if (!amdgpu_sriov_vf(adev)) {
- /* vce_v4_0_wait_for_idle(handle); */
+ /* vce_v4_0_wait_for_idle(ip_block); */
vce_v4_0_stop(adev);
} else {
/* full access mode, so don't touch any VCE register */
@@ -553,9 +553,9 @@ static int vce_v4_0_hw_fini(void *handle)
return 0;
}
-static int vce_v4_0_suspend(void *handle)
+static int vce_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r, idx;
if (adev->vce.vcpu_bo == NULL)
@@ -594,16 +594,16 @@ static int vce_v4_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v4_0_hw_fini(adev);
+ r = vce_v4_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
-static int vce_v4_0_resume(void *handle)
+static int vce_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r, idx;
if (adev->vce.vcpu_bo == NULL)
@@ -624,7 +624,7 @@ static int vce_v4_0_resume(void *handle)
return r;
}
- return vce_v4_0_hw_init(adev);
+ return vce_v4_0_hw_init(ip_block);
}
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
@@ -691,273 +691,6 @@ static int vce_v4_0_set_clockgating_state(void *handle,
return 0;
}
-#if 0
-static bool vce_v4_0_is_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 mask = 0;
-
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
-
- return !(RREG32(mmSRBM_STATUS2) & mask);
-}
-
-static int vce_v4_0_wait_for_idle(void *handle)
-{
- unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- for (i = 0; i < adev->usec_timeout; i++)
- if (vce_v4_0_is_idle(handle))
- return 0;
-
- return -ETIMEDOUT;
-}
-
-#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
-#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
-#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
-#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
- VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-
-static bool vce_v4_0_check_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset = 0;
-
- /* According to VCE team , we should use VCE_STATUS instead
- * SRBM_STATUS.VCE_BUSY bit for busy status checking.
- * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
- * instance's registers are accessed
- * (0 for 1st instance, 10 for 2nd instance).
- *
- *VCE_STATUS
- *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
- *|----+----+-----------+----+----+----+----------+---------+----|
- *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
- *
- * VCE team suggest use bit 3--bit 6 for busy status check
- */
- mutex_lock(&adev->grbm_idx_mutex);
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
- if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
- }
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
- if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
- }
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- if (srbm_soft_reset) {
- adev->vce.srbm_soft_reset = srbm_soft_reset;
- return true;
- } else {
- adev->vce.srbm_soft_reset = 0;
- return false;
- }
-}
-
-static int vce_v4_0_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset;
-
- if (!adev->vce.srbm_soft_reset)
- return 0;
- srbm_soft_reset = adev->vce.srbm_soft_reset;
-
- if (srbm_soft_reset) {
- u32 tmp;
-
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
-
- return 0;
-}
-
-static int vce_v4_0_pre_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->vce.srbm_soft_reset)
- return 0;
-
- mdelay(5);
-
- return vce_v4_0_suspend(adev);
-}
-
-
-static int vce_v4_0_post_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->vce.srbm_soft_reset)
- return 0;
-
- mdelay(5);
-
- return vce_v4_0_resume(adev);
-}
-
-static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
-{
- u32 tmp, data;
-
- tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
- if (override)
- data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
- else
- data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
- if (tmp != data)
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
-}
-
-static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
- bool gated)
-{
- u32 data;
-
- /* Set Override to disable Clock Gating */
- vce_v4_0_override_vce_clock_gating(adev, true);
-
- /* This function enables MGCG which is controlled by firmware.
- With the clocks in the gated state the core is still
- accessible but the firmware will throttle the clocks on the
- fly as necessary.
- */
- if (gated) {
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
- data |= 0x1ff;
- data &= ~0xef0000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
- data |= 0x3ff000;
- data &= ~0xffc00000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
- data |= 0x2;
- data &= ~0x00010000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
- data |= 0x37f;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
- data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
- } else {
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
- data &= ~0x80010;
- data |= 0xe70008;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
- data |= 0xffc00000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
- data |= 0x10000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
- data &= ~0xffc00000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
- data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8);
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
- }
- vce_v4_0_override_vce_clock_gating(adev, false);
-}
-
-static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
- else
- tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-static int vce_v4_0_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE);
- int i;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_TONGA) ||
- (adev->asic_type == CHIP_FIJI))
- vce_v4_0_set_bypass_mode(adev, enable);
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
- return 0;
-
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < 2; i++) {
- /* Program VCE Instance 0 or 1 if not harvested */
- if (adev->vce.harvest_config & (1 << i))
- continue;
-
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
-
- if (enable) {
- /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
- uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
- data &= ~(0xf | 0xff0);
- data |= ((0x0 << 0) | (0x04 << 4));
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
-
- /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
- data &= ~(0xf | 0xff0);
- data |= ((0x0 << 0) | (0x04 << 4));
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
- }
-
- vce_v4_0_set_vce_sw_clock_gating(adev, enable);
- }
-
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-#endif
-
static int vce_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -1076,19 +809,12 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
const struct amd_ip_funcs vce_v4_0_ip_funcs = {
.name = "vce_v4_0",
.early_init = vce_v4_0_early_init,
- .late_init = NULL,
.sw_init = vce_v4_0_sw_init,
.sw_fini = vce_v4_0_sw_fini,
.hw_init = vce_v4_0_hw_init,
.hw_fini = vce_v4_0_hw_fini,
.suspend = vce_v4_0_suspend,
.resume = vce_v4_0_resume,
- .is_idle = NULL /* vce_v4_0_is_idle */,
- .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
- .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
- .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
- .soft_reset = NULL /* vce_v4_0_soft_reset */,
- .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
.set_clockgating_state = vce_v4_0_set_clockgating_state,
.set_powergating_state = vce_v4_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index ecdfbfefd66a..10e99c926fb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -95,14 +95,14 @@ static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
* vcn_v1_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v1_0_early_init(void *handle)
+static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->vcn.num_enc_rings = 2;
@@ -110,7 +110,7 @@ static int vcn_v1_0_early_init(void *handle)
vcn_v1_0_set_enc_ring_funcs(adev);
vcn_v1_0_set_irq_funcs(adev);
- jpeg_v1_0_early_init(handle);
+ jpeg_v1_0_early_init(ip_block);
return amdgpu_vcn_early_init(adev);
}
@@ -118,17 +118,17 @@ static int vcn_v1_0_early_init(void *handle)
/**
* vcn_v1_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v1_0_sw_init(void *handle)
+static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -197,7 +197,7 @@ static int vcn_v1_0_sw_init(void *handle)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
}
- r = jpeg_v1_0_sw_init(handle);
+ r = jpeg_v1_0_sw_init(ip_block);
/* Allocate memory for VCN IP Dump buffer */
ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
@@ -213,20 +213,20 @@ static int vcn_v1_0_sw_init(void *handle)
/**
* vcn_v1_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v1_0_sw_fini(void *handle)
+static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
- jpeg_v1_0_sw_fini(handle);
+ jpeg_v1_0_sw_fini(ip_block);
r = amdgpu_vcn_sw_fini(adev);
@@ -238,13 +238,13 @@ static int vcn_v1_0_sw_fini(void *handle)
/**
* vcn_v1_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v1_0_hw_init(void *handle)
+static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
@@ -268,13 +268,13 @@ static int vcn_v1_0_hw_init(void *handle)
/**
* vcn_v1_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v1_0_hw_fini(void *handle)
+static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -290,14 +290,14 @@ static int vcn_v1_0_hw_fini(void *handle)
/**
* vcn_v1_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v1_0_suspend(void *handle)
+static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool idle_work_unexecuted;
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -306,7 +306,7 @@ static int vcn_v1_0_suspend(void *handle)
amdgpu_dpm_enable_uvd(adev, false);
}
- r = vcn_v1_0_hw_fini(adev);
+ r = vcn_v1_0_hw_fini(ip_block);
if (r)
return r;
@@ -318,20 +318,19 @@ static int vcn_v1_0_suspend(void *handle)
/**
* vcn_v1_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v1_0_resume(void *handle)
+static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v1_0_hw_init(adev);
+ r = vcn_v1_0_hw_init(ip_block);
return r;
}
@@ -1384,9 +1383,9 @@ static bool vcn_v1_0_is_idle(void *handle)
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
-static int vcn_v1_0_wait_for_idle(void *handle)
+static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
@@ -1925,9 +1924,9 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
}
-static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
uint32_t inst_off, is_powered;
@@ -1957,9 +1956,9 @@ static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v1_0_dump_ip_state(void *handle)
+static void vcn_v1_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1988,7 +1987,6 @@ static void vcn_v1_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v1_0_sw_init,
.sw_fini = vcn_v1_0_sw_fini,
.hw_init = vcn_v1_0_hw_init,
@@ -1997,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.resume = vcn_v1_0_resume,
.is_idle = vcn_v1_0_is_idle,
.wait_for_idle = vcn_v1_0_wait_for_idle,
- .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
- .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
- .soft_reset = NULL /* vcn_v1_0_soft_reset */,
- .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = vcn_v1_0_set_powergating_state,
.dump_ip_state = vcn_v1_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index bfd067e2d2f1..e0322cbca3ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -100,14 +100,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v2_0_early_init(void *handle)
+static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
adev->vcn.num_enc_rings = 1;
@@ -124,17 +124,17 @@ static int vcn_v2_0_early_init(void *handle)
/**
* vcn_v2_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v2_0_sw_init(void *handle)
+static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
@@ -237,14 +237,14 @@ static int vcn_v2_0_sw_init(void *handle)
/**
* vcn_v2_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v2_0_sw_fini(void *handle)
+static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -268,13 +268,13 @@ static int vcn_v2_0_sw_fini(void *handle)
/**
* vcn_v2_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v2_0_hw_init(void *handle)
+static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
@@ -305,13 +305,13 @@ static int vcn_v2_0_hw_init(void *handle)
/**
* vcn_v2_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v2_0_hw_fini(void *handle)
+static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -326,20 +326,19 @@ static int vcn_v2_0_hw_fini(void *handle)
/**
* vcn_v2_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v2_0_suspend(void *handle)
+static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v2_0_hw_fini(adev);
+ r = vcn_v2_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -347,20 +346,19 @@ static int vcn_v2_0_suspend(void *handle)
/**
* vcn_v2_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v2_0_resume(void *handle)
+static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v2_0_hw_init(adev);
+ r = vcn_v2_0_hw_init(ip_block);
return r;
}
@@ -1326,9 +1324,9 @@ static bool vcn_v2_0_is_idle(void *handle)
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
-static int vcn_v2_0_wait_for_idle(void *handle)
+static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
@@ -2034,9 +2032,9 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
uint32_t inst_off, is_powered;
@@ -2066,9 +2064,9 @@ static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v2_0_dump_ip_state(void *handle)
+static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -2097,7 +2095,6 @@ static void vcn_v2_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v2_0_sw_init,
.sw_fini = vcn_v2_0_sw_fini,
.hw_init = vcn_v2_0_hw_init,
@@ -2106,10 +2103,6 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.resume = vcn_v2_0_resume,
.is_idle = vcn_v2_0_is_idle,
.wait_for_idle = vcn_v2_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_v2_0_set_powergating_state,
.dump_ip_state = vcn_v2_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 04e9e806e318..6aa08281d094 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -110,14 +110,14 @@ static int amdgpu_ih_clientid_vcns[] = {
/**
* vcn_v2_5_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v2_5_early_init(void *handle)
+static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
@@ -151,17 +151,17 @@ static int vcn_v2_5_early_init(void *handle)
/**
* vcn_v2_5_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v2_5_sw_init(void *handle)
+static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
if (adev->vcn.harvest_config & (1 << j))
@@ -295,14 +295,14 @@ static int vcn_v2_5_sw_init(void *handle)
/**
* vcn_v2_5_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v2_5_sw_fini(void *handle)
+static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -333,13 +333,13 @@ static int vcn_v2_5_sw_fini(void *handle)
/**
* vcn_v2_5_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v2_5_hw_init(void *handle)
+static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r = 0;
@@ -381,13 +381,13 @@ static int vcn_v2_5_hw_init(void *handle)
/**
* vcn_v2_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v2_5_hw_fini(void *handle)
+static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -411,20 +411,19 @@ static int vcn_v2_5_hw_fini(void *handle)
/**
* vcn_v2_5_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v2_5_suspend(void *handle)
+static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v2_5_hw_fini(adev);
+ r = vcn_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -432,20 +431,19 @@ static int vcn_v2_5_suspend(void *handle)
/**
* vcn_v2_5_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v2_5_resume(void *handle)
+static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v2_5_hw_init(adev);
+ r = vcn_v2_5_hw_init(ip_block);
return r;
}
@@ -1786,9 +1784,9 @@ static bool vcn_v2_5_is_idle(void *handle)
return ret;
}
-static int vcn_v2_5_wait_for_idle(void *handle)
+static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1926,9 +1924,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
uint32_t inst_off, is_powered;
@@ -1958,9 +1956,9 @@ static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v2_5_dump_ip_state(void *handle)
+static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1989,7 +1987,6 @@ static void vcn_v2_5_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
- .late_init = NULL,
.sw_init = vcn_v2_5_sw_init,
.sw_fini = vcn_v2_5_sw_fini,
.hw_init = vcn_v2_5_hw_init,
@@ -1998,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.resume = vcn_v2_5_resume,
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
@@ -2011,7 +2004,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.name = "vcn_v2_6",
.early_init = vcn_v2_5_early_init,
- .late_init = NULL,
.sw_init = vcn_v2_5_sw_init,
.sw_fini = vcn_v2_5_sw_fini,
.hw_init = vcn_v2_5_hw_init,
@@ -2020,10 +2012,6 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.resume = vcn_v2_5_resume,
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 65dd68b32280..6732ad7f16f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -116,14 +116,14 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v3_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v3_0_early_init(void *handle)
+static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
@@ -153,18 +153,18 @@ static int vcn_v3_0_early_init(void *handle)
/**
* vcn_v3_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v3_0_sw_init(void *handle)
+static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -299,13 +299,13 @@ static int vcn_v3_0_sw_init(void *handle)
/**
* vcn_v3_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v3_0_sw_fini(void *handle)
+static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -338,13 +338,13 @@ static int vcn_v3_0_sw_fini(void *handle)
/**
* vcn_v3_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v3_0_hw_init(void *handle)
+static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r;
@@ -413,13 +413,13 @@ static int vcn_v3_0_hw_init(void *handle)
/**
* vcn_v3_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v3_0_hw_fini(void *handle)
+static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -443,20 +443,19 @@ static int vcn_v3_0_hw_fini(void *handle)
/**
* vcn_v3_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v3_0_suspend(void *handle)
+static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v3_0_hw_fini(adev);
+ r = vcn_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -464,20 +463,19 @@ static int vcn_v3_0_suspend(void *handle)
/**
* vcn_v3_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v3_0_resume(void *handle)
+static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v3_0_hw_init(adev);
+ r = vcn_v3_0_hw_init(ip_block);
return r;
}
@@ -2116,9 +2114,9 @@ static bool vcn_v3_0_is_idle(void *handle)
return ret;
}
-static int vcn_v3_0_wait_for_idle(void *handle)
+static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2251,9 +2249,9 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
uint32_t inst_off;
@@ -2284,9 +2282,9 @@ static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v3_0_dump_ip_state(void *handle)
+static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -2315,7 +2313,6 @@ static void vcn_v3_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v3_0_sw_init,
.sw_fini = vcn_v3_0_sw_fini,
.hw_init = vcn_v3_0_hw_init,
@@ -2324,10 +2321,6 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.resume = vcn_v3_0_resume,
.is_idle = vcn_v3_0_is_idle,
.wait_for_idle = vcn_v3_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_v3_0_set_powergating_state,
.dump_ip_state = vcn_v3_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 26c6f10a8c8f..5512259cac79 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -106,14 +106,14 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v4_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v4_0_early_init(void *handle)
+static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
if (amdgpu_sriov_vf(adev)) {
@@ -164,14 +164,14 @@ static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v4_0_sw_init(void *handle)
+static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
uint32_t *ptr;
@@ -253,13 +253,13 @@ static int vcn_v4_0_sw_init(void *handle)
/**
* vcn_v4_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v4_0_sw_fini(void *handle)
+static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -294,13 +294,13 @@ static int vcn_v4_0_sw_fini(void *handle)
/**
* vcn_v4_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v4_0_hw_init(void *handle)
+static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -341,13 +341,13 @@ static int vcn_v4_0_hw_init(void *handle)
/**
* vcn_v4_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_hw_fini(void *handle)
+static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -372,20 +372,19 @@ static int vcn_v4_0_hw_fini(void *handle)
/**
* vcn_v4_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v4_0_suspend(void *handle)
+static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v4_0_hw_fini(adev);
+ r = vcn_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -393,20 +392,19 @@ static int vcn_v4_0_suspend(void *handle)
/**
* vcn_v4_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v4_0_resume(void *handle)
+static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v4_0_hw_init(adev);
+ r = vcn_v4_0_hw_init(ip_block);
return r;
}
@@ -1975,13 +1973,13 @@ static bool vcn_v4_0_is_idle(void *handle)
/**
* vcn_v4_0_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v4_0_wait_for_idle(void *handle)
+static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2158,9 +2156,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
uint32_t inst_off, is_powered;
@@ -2190,9 +2188,9 @@ static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v4_0_dump_ip_state(void *handle)
+static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -2222,7 +2220,6 @@ static void vcn_v4_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v4_0_sw_init,
.sw_fini = vcn_v4_0_sw_fini,
.hw_init = vcn_v4_0_hw_init,
@@ -2231,10 +2228,6 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.resume = vcn_v4_0_resume,
.is_idle = vcn_v4_0_is_idle,
.wait_for_idle = vcn_v4_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_v4_0_set_powergating_state,
.dump_ip_state = vcn_v4_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 0fda70336300..0d5c94bfc0ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -98,13 +98,13 @@ static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
/**
* vcn_v4_0_3_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int vcn_v4_0_3_early_init(void *handle)
+static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -119,13 +119,13 @@ static int vcn_v4_0_3_early_init(void *handle)
/**
* vcn_v4_0_3_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v4_0_3_sw_init(void *handle)
+static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
@@ -212,13 +212,13 @@ static int vcn_v4_0_3_sw_init(void *handle)
/**
* vcn_v4_0_3_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v4_0_3_sw_fini(void *handle)
+static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(&adev->ddev, &idx)) {
@@ -249,13 +249,13 @@ static int vcn_v4_0_3_sw_fini(void *handle)
/**
* vcn_v4_0_3_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v4_0_3_hw_init(void *handle)
+static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
@@ -308,13 +308,13 @@ static int vcn_v4_0_3_hw_init(void *handle)
/**
* vcn_v4_0_3_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_3_hw_fini(void *handle)
+static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -327,20 +327,19 @@ static int vcn_v4_0_3_hw_fini(void *handle)
/**
* vcn_v4_0_3_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v4_0_3_suspend(void *handle)
+static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = vcn_v4_0_3_hw_fini(adev);
+ r = vcn_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -348,20 +347,19 @@ static int vcn_v4_0_3_suspend(void *handle)
/**
* vcn_v4_0_3_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v4_0_3_resume(void *handle)
+static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v4_0_3_hw_init(adev);
+ r = vcn_v4_0_3_hw_init(ip_block);
return r;
}
@@ -1567,13 +1565,13 @@ static bool vcn_v4_0_3_is_idle(void *handle)
/**
* vcn_v4_0_3_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v4_0_3_wait_for_idle(void *handle)
+static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1733,9 +1731,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
}
-static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
uint32_t inst_off, is_powered;
@@ -1765,9 +1763,9 @@ static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v4_0_3_dump_ip_state(void *handle)
+static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off, inst_id;
@@ -1798,7 +1796,6 @@ static void vcn_v4_0_3_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
- .late_init = NULL,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1807,10 +1804,6 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.resume = vcn_v4_0_3_resume,
.is_idle = vcn_v4_0_3_is_idle,
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_v4_0_3_set_powergating_state,
.dump_ip_state = vcn_v4_0_3_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 9d4f5352a62c..71961fb3f7ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -104,14 +104,14 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v4_0_5_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v4_0_5_early_init(void *handle)
+static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -124,14 +124,14 @@ static int vcn_v4_0_5_early_init(void *handle)
/**
* vcn_v4_0_5_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v4_0_5_sw_init(void *handle)
+static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
uint32_t *ptr;
@@ -220,13 +220,13 @@ static int vcn_v4_0_5_sw_init(void *handle)
/**
* vcn_v4_0_5_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v4_0_5_sw_fini(void *handle)
+static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -261,13 +261,13 @@ static int vcn_v4_0_5_sw_fini(void *handle)
/**
* vcn_v4_0_5_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v4_0_5_hw_init(void *handle)
+static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -291,13 +291,13 @@ static int vcn_v4_0_5_hw_init(void *handle)
/**
* vcn_v4_0_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_5_hw_fini(void *handle)
+static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -320,20 +320,19 @@ static int vcn_v4_0_5_hw_fini(void *handle)
/**
* vcn_v4_0_5_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v4_0_5_suspend(void *handle)
+static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v4_0_5_hw_fini(adev);
+ r = vcn_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -341,20 +340,19 @@ static int vcn_v4_0_5_suspend(void *handle)
/**
* vcn_v4_0_5_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v4_0_5_resume(void *handle)
+static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v4_0_5_hw_init(adev);
+ r = vcn_v4_0_5_hw_init(ip_block);
return r;
}
@@ -1469,13 +1467,13 @@ static bool vcn_v4_0_5_is_idle(void *handle)
/**
* vcn_v4_0_5_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v4_0_5_wait_for_idle(void *handle)
+static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1616,9 +1614,9 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
uint32_t inst_off, is_powered;
@@ -1648,9 +1646,9 @@ static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v4_0_5_dump_ip_state(void *handle)
+static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1680,7 +1678,6 @@ static void vcn_v4_0_5_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
- .late_init = NULL,
.sw_init = vcn_v4_0_5_sw_init,
.sw_fini = vcn_v4_0_5_sw_fini,
.hw_init = vcn_v4_0_5_hw_init,
@@ -1689,10 +1686,6 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.resume = vcn_v4_0_5_resume,
.is_idle = vcn_v4_0_5_is_idle,
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_v4_0_5_set_powergating_state,
.dump_ip_state = vcn_v4_0_5_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index c305386358b4..fe2cc1a80c13 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -87,14 +87,14 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v5_0_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v5_0_0_early_init(void *handle)
+static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -108,14 +108,14 @@ static int vcn_v5_0_0_early_init(void *handle)
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v5_0_0_sw_init(void *handle)
+static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
uint32_t *ptr;
@@ -187,13 +187,13 @@ static int vcn_v5_0_0_sw_init(void *handle)
/**
* vcn_v5_0_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v5_0_0_sw_fini(void *handle)
+static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -225,13 +225,13 @@ static int vcn_v5_0_0_sw_fini(void *handle)
/**
* vcn_v5_0_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v5_0_0_hw_init(void *handle)
+static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -255,13 +255,13 @@ static int vcn_v5_0_0_hw_init(void *handle)
/**
* vcn_v5_0_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v5_0_0_hw_fini(void *handle)
+static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -284,20 +284,19 @@ static int vcn_v5_0_0_hw_fini(void *handle)
/**
* vcn_v5_0_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v5_0_0_suspend(void *handle)
+static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v5_0_0_hw_fini(adev);
+ r = vcn_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -305,20 +304,19 @@ static int vcn_v5_0_0_suspend(void *handle)
/**
* vcn_v5_0_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v5_0_0_resume(void *handle)
+static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v5_0_0_hw_init(adev);
+ r = vcn_v5_0_0_hw_init(ip_block);
return r;
}
@@ -1196,13 +1194,13 @@ static bool vcn_v5_0_0_is_idle(void *handle)
/**
* vcn_v5_0_0_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v5_0_0_wait_for_idle(void *handle)
+static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1343,9 +1341,9 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
uint32_t inst_off, is_powered;
@@ -1375,9 +1373,9 @@ static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v5_0_dump_ip_state(void *handle)
+static void vcn_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1406,7 +1404,6 @@ static void vcn_v5_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v5_0_0_sw_init,
.sw_fini = vcn_v5_0_0_sw_fini,
.hw_init = vcn_v5_0_0_hw_init,
@@ -1415,10 +1412,6 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.resume = vcn_v5_0_0_resume,
.is_idle = vcn_v5_0_0_is_idle,
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
.dump_ip_state = vcn_v5_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index bf68e18e3824..0fedadd0a6a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -472,18 +472,18 @@ static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
}
-static int vega10_ih_early_init(void *handle)
+static int vega10_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega10_ih_set_interrupt_funcs(adev);
vega10_ih_set_self_irq_funcs(adev);
return 0;
}
-static int vega10_ih_sw_init(void *handle)
+static int vega10_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
@@ -525,43 +525,35 @@ static int vega10_ih_sw_init(void *handle)
return r;
}
-static int vega10_ih_sw_fini(void *handle)
+static int vega10_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int vega10_ih_hw_init(void *handle)
+static int vega10_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega10_ih_irq_init(adev);
+ return vega10_ih_irq_init(ip_block->adev);
}
-static int vega10_ih_hw_fini(void *handle)
+static int vega10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- vega10_ih_irq_disable(adev);
+ vega10_ih_irq_disable(ip_block->adev);
return 0;
}
-static int vega10_ih_suspend(void *handle)
+static int vega10_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega10_ih_hw_fini(adev);
+ return vega10_ih_hw_fini(ip_block);
}
-static int vega10_ih_resume(void *handle)
+static int vega10_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega10_ih_hw_init(adev);
+ return vega10_ih_hw_init(ip_block);
}
static bool vega10_ih_is_idle(void *handle)
@@ -570,13 +562,13 @@ static bool vega10_ih_is_idle(void *handle)
return true;
}
-static int vega10_ih_wait_for_idle(void *handle)
+static int vega10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int vega10_ih_soft_reset(void *handle)
+static int vega10_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -633,7 +625,6 @@ static int vega10_ih_set_powergating_state(void *handle,
const struct amd_ip_funcs vega10_ih_ip_funcs = {
.name = "vega10_ih",
.early_init = vega10_ih_early_init,
- .late_init = NULL,
.sw_init = vega10_ih_sw_init,
.sw_fini = vega10_ih_sw_fini,
.hw_init = vega10_ih_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index ac439f0565e3..1c9aff742e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
+ if (enable) {
+ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
+ * is switching the bit from 0 to 1
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Clear RB_OVERFLOW bit */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ }
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -526,18 +553,18 @@ static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
}
-static int vega20_ih_early_init(void *handle)
+static int vega20_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega20_ih_set_interrupt_funcs(adev);
vega20_ih_set_self_irq_funcs(adev);
return 0;
}
-static int vega20_ih_sw_init(void *handle)
+static int vega20_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr = true;
int r;
@@ -586,19 +613,19 @@ static int vega20_ih_sw_init(void *handle)
return r;
}
-static int vega20_ih_sw_fini(void *handle)
+static int vega20_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int vega20_ih_hw_init(void *handle)
+static int vega20_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = vega20_ih_irq_init(adev);
if (r)
@@ -607,27 +634,21 @@ static int vega20_ih_hw_init(void *handle)
return 0;
}
-static int vega20_ih_hw_fini(void *handle)
+static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- vega20_ih_irq_disable(adev);
+ vega20_ih_irq_disable(ip_block->adev);
return 0;
}
-static int vega20_ih_suspend(void *handle)
+static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega20_ih_hw_fini(adev);
+ return vega20_ih_hw_fini(ip_block);
}
-static int vega20_ih_resume(void *handle)
+static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega20_ih_hw_init(adev);
+ return vega20_ih_hw_init(ip_block);
}
static bool vega20_ih_is_idle(void *handle)
@@ -636,13 +657,13 @@ static bool vega20_ih_is_idle(void *handle)
return true;
}
-static int vega20_ih_wait_for_idle(void *handle)
+static int vega20_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int vega20_ih_soft_reset(void *handle)
+static int vega20_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -696,7 +717,6 @@ static int vega20_ih_set_powergating_state(void *handle,
const struct amd_ip_funcs vega20_ih_ip_funcs = {
.name = "vega20_ih",
.early_init = vega20_ih_early_init,
- .late_init = NULL,
.sw_init = vega20_ih_sw_init,
.sw_fini = vega20_ih_sw_fini,
.hw_init = vega20_ih_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d39c670f6220..b3fa54c0514e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1455,9 +1455,9 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
#define CZ_REV_BRISTOL(rev) \
((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
-static int vi_common_early_init(void *handle)
+static int vi_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU) {
adev->smc_rreg = &cz_smc_rreg;
@@ -1679,9 +1679,9 @@ static int vi_common_early_init(void *handle)
return 0;
}
-static int vi_common_late_init(void *handle)
+static int vi_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_vi_mailbox_get_irq(adev);
@@ -1689,9 +1689,9 @@ static int vi_common_late_init(void *handle)
return 0;
}
-static int vi_common_sw_init(void *handle)
+static int vi_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_vi_mailbox_add_irq_id(adev);
@@ -1699,14 +1699,9 @@ static int vi_common_sw_init(void *handle)
return 0;
}
-static int vi_common_sw_fini(void *handle)
+static int vi_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static int vi_common_hw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* move the golden regs per IP block */
vi_init_golden_registers(adev);
@@ -1718,9 +1713,9 @@ static int vi_common_hw_init(void *handle)
return 0;
}
-static int vi_common_hw_fini(void *handle)
+static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable the doorbell aperture */
vi_enable_doorbell_aperture(adev, false);
@@ -1731,18 +1726,14 @@ static int vi_common_hw_fini(void *handle)
return 0;
}
-static int vi_common_suspend(void *handle)
+static int vi_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vi_common_hw_fini(adev);
+ return vi_common_hw_fini(ip_block);
}
-static int vi_common_resume(void *handle)
+static int vi_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vi_common_hw_init(adev);
+ return vi_common_hw_init(ip_block);
}
static bool vi_common_is_idle(void *handle)
@@ -1750,16 +1741,6 @@ static bool vi_common_is_idle(void *handle)
return true;
}
-static int vi_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int vi_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
@@ -2047,19 +2028,14 @@ static const struct amd_ip_funcs vi_common_ip_funcs = {
.early_init = vi_common_early_init,
.late_init = vi_common_late_init,
.sw_init = vi_common_sw_init,
- .sw_fini = vi_common_sw_fini,
.hw_init = vi_common_hw_init,
.hw_fini = vi_common_hw_fini,
.suspend = vi_common_suspend,
.resume = vi_common_resume,
.is_idle = vi_common_is_idle,
- .wait_for_idle = vi_common_wait_for_idle,
- .soft_reset = vi_common_soft_reset,
.set_clockgating_state = vi_common_set_clockgating_state,
.set_powergating_state = vi_common_set_powergating_state,
.get_clockgating_state = vi_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version vi_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 9044bdb38cf4..065d87841459 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -365,7 +365,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+ err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id,
NULL, NULL, NULL, &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
@@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
size >>= 1;
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
+ atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
}
mutex_unlock(&p->mutex);
@@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+ atomic64_sub(size, &pdd->vram_usage);
err_unlock:
err_pdd:
@@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
+ atomic64_add(bo_bucket->size, &pdd->vram_usage);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 48caecf7e72e..723f1220e1cc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -28,6 +28,7 @@
#include "kfd_topology.h"
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_xgmi.h"
/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
* GPU processor ID are expressed with Bit[31]=1.
@@ -2329,6 +2330,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
continue;
if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id)
continue;
+ if (!amdgpu_xgmi_get_is_sharing_enabled(kdev->adev, peer_dev->gpu->adev))
+ continue;
sub_type_hdr = (typeof(sub_type_hdr))(
(char *)sub_type_hdr +
sizeof(struct crat_subtype_iolink));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 648f40091aa3..38c19dc8311d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2048,7 +2048,7 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
{
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
struct device *dev = dqm->dev->adev->dev;
- uint64_t *fence_addr = dqm->fence_addr;
+ uint64_t *fence_addr = dqm->fence_addr;
while (*fence_addr != fence_value) {
/* Fatal err detected, this response won't come */
@@ -2254,6 +2254,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
goto out;
*dqm->fence_addr = KFD_FENCE_INIT;
+ mb();
pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
@@ -3173,7 +3174,7 @@ struct copy_context_work_handler_workarea {
struct kfd_process *p;
};
-static void copy_context_work_handler (struct work_struct *work)
+static void copy_context_work_handler(struct work_struct *work)
{
struct copy_context_work_handler_workarea *workarea;
struct mqd_manager *mqd_mgr;
@@ -3200,6 +3201,9 @@ static void copy_context_work_handler (struct work_struct *work)
struct qcm_process_device *qpd = &pdd->qpd;
list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE)
+ continue;
+
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
/* We ignore the return value from get_wave_state
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 8ee3d07ffbdf..eacfeb32f35d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -445,14 +445,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
mpages, cpages, migrate.npages);
- kfd_smi_event_migration_end(node, p->lead_thread->pid,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- 0, node->id, trigger);
-
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT,
+ 0, node->id, trigger, r);
out:
if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
@@ -751,14 +750,13 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(node, p->lead_thread->pid,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- node->id, 0, trigger);
-
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT,
+ node->id, 0, trigger, r);
out:
if (!r && cpages) {
mpages = cpages - upages;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d6530febabad..9e5ca0b93b2a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -775,7 +775,7 @@ struct kfd_process_device {
enum kfd_pdd_bound bound;
/* VRAM usage */
- uint64_t vram_usage;
+ atomic64_t vram_usage;
struct attribute attr_vram;
char vram_filename[MAX_SYSFS_FILENAME_LEN];
@@ -1347,7 +1347,6 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
void pqm_uninit(struct process_queue_manager *pqm);
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_node *dev,
- struct file *f,
struct queue_properties *properties,
unsigned int *qid,
const struct kfd_criu_queue_priv_data *q_data,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d07acf1b2f93..d4aa843aacfd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -332,7 +332,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
- return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
@@ -1625,7 +1625,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
- pdd->vram_usage = 0;
+ atomic64_set(&pdd->vram_usage, 0);
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
@@ -1702,12 +1702,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
- &ef);
+ p->ef ? NULL : &ef);
if (ret) {
dev_err(dev->adev->dev, "Failed to create process VM object\n");
return ret;
}
- RCU_INIT_POINTER(p->ef, ef);
+
+ if (!p->ef)
+ RCU_INIT_POINTER(p->ef, ef);
+
pdd->drm_priv = drm_file->private_data;
ret = kfd_process_device_reserve_ib_mem(pdd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 01b960b15274..c76db22a1000 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -235,7 +235,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_node *dev, struct queue **q,
struct queue_properties *q_properties,
- struct file *f, unsigned int qid)
+ unsigned int qid)
{
int retval;
@@ -300,7 +300,6 @@ cleanup:
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_node *dev,
- struct file *f,
struct queue_properties *properties,
unsigned int *qid,
const struct kfd_criu_queue_priv_data *q_data,
@@ -374,7 +373,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* allocate_sdma_queue() in create_queue() has the
* corresponding check logic.
*/
- retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -395,7 +394,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -1029,8 +1028,7 @@ int kfd_criu_restore_queue(struct kfd_process *p,
print_queue_properties(&qp);
- ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack,
- NULL);
+ ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
goto exit;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index de8b9abf7afc..9b8169761ec5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -44,7 +44,7 @@ struct kfd_smi_client {
bool suser;
};
-#define MAX_KFIFO_SIZE 1024
+#define KFD_MAX_KFIFO_SIZE 8192
static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *);
static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *);
@@ -86,7 +86,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
struct kfd_smi_client *client = filep->private_data;
unsigned char *buf;
- size = min_t(size_t, size, MAX_KFIFO_SIZE);
+ size = min_t(size_t, size, KFD_MAX_KFIFO_SIZE);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -292,12 +292,13 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
- uint32_t from, uint32_t to, uint32_t trigger)
+ uint32_t from, uint32_t to, uint32_t trigger,
+ int error_code)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
KFD_EVENT_FMT_MIGRATE_END(
ktime_get_boottime_ns(), pid, start, end - start,
- from, to, trigger));
+ from, to, trigger, error_code));
}
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
@@ -354,7 +355,7 @@ int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
return -ENOMEM;
INIT_LIST_HEAD(&client->list);
- ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL);
+ ret = kfifo_alloc(&client->fifo, KFD_MAX_KFIFO_SIZE, GFP_KERNEL);
if (ret) {
kfree(client);
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 85010b8307f8..503bff13d815 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -44,7 +44,8 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
uint32_t trigger);
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
- uint32_t from, uint32_t to, uint32_t trigger);
+ uint32_t from, uint32_t to, uint32_t trigger,
+ int error_code);
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger);
void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 04e746923697..3e2911895c74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
+
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ struct mm_struct *mm;
+
+ mm = svm_bo->eviction_fence->mm;
+ /*
+ * The forked child process takes svm_bo device pages ref, svm_bo could be
+ * released after parent process is gone.
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (p) {
+ pdd = kfd_get_process_device_data(svm_bo->node, p);
+ if (pdd)
+ atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
+ kfd_unref_process(p);
+ }
+ mmput(mm);
+ }
+
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
@@ -532,6 +553,7 @@ int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
+ struct kfd_process_device *pdd;
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
@@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
list_add(&prange->svm_bo_list, &svm_bo->range_list);
spin_unlock(&svm_bo->list_lock);
+ pdd = svm_range_get_pdd_by_node(prange, node);
+ if (pdd)
+ atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
+
return 0;
reserve_bo_failed:
@@ -3085,8 +3111,6 @@ retry_write_locked:
start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
if (prange->actual_loc != 0 || best_loc != 0) {
- migration = true;
-
if (best_loc) {
r = svm_migrate_to_vram(prange, best_loc, start, last,
mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
@@ -3109,7 +3133,9 @@ retry_write_locked:
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
r, svms, start, last);
- goto out_unlock_range;
+ goto out_migrate_fail;
+ } else {
+ migration = true;
}
}
@@ -3119,6 +3145,7 @@ retry_write_locked:
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, start, last);
+out_migrate_fail:
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 60c617fcc97e..75d6b90104f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -320,18 +320,18 @@ static bool dm_is_idle(void *handle)
return true;
}
-static int dm_wait_for_idle(void *handle)
+static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return 0;
}
-static bool dm_check_soft_reset(void *handle)
+static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
return false;
}
-static int dm_soft_reset(void *handle)
+static int dm_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return 0;
@@ -968,7 +968,7 @@ static int dm_set_powergating_state(void *handle,
}
/* Prototypes of private functions */
-static int dm_early_init(void *handle);
+static int dm_early_init(struct amdgpu_ip_block *ip_block);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@@ -1696,6 +1696,26 @@ dm_allocate_gpu_mem(
return da->cpu_ptr;
}
+void
+dm_free_gpu_mem(
+ struct amdgpu_device *adev,
+ enum dc_gpu_mem_alloc_type type,
+ void *pvMem)
+{
+ struct dal_allocation *da;
+
+ /* walk the da list in DM */
+ list_for_each_entry(da, &adev->dm.da_list, list) {
+ if (pvMem == da->cpu_ptr) {
+ amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
+ list_del(&da->list);
+ kfree(da);
+ break;
+ }
+ }
+
+}
+
static enum dmub_status
dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
enum dmub_gpint_command command_code,
@@ -1762,16 +1782,20 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
/* Send the chunk */
ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
if (ret != DMUB_STATUS_OK)
- /* No need to free bb here since it shall be done in dm_sw_fini() */
- return NULL;
+ goto free_bb;
}
/* Now ask DMUB to copy the bb */
ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
if (ret != DMUB_STATUS_OK)
- return NULL;
+ goto free_bb;
return bb;
+
+free_bb:
+ dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb);
+ return NULL;
+
}
static enum dmub_ips_disable_type dm_get_default_ips_mode(
@@ -1886,7 +1910,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
else
init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
} else {
- init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
+ else
+ init_data.flags.gpu_vm_support =
+ (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
}
adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
@@ -2115,9 +2143,9 @@ error:
return -EINVAL;
}
-static int amdgpu_dm_early_fini(void *handle)
+static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_dm_audio_fini(adev);
@@ -2509,9 +2537,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0;
}
-static int dm_sw_init(void *handle)
+static int dm_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
@@ -2531,9 +2559,9 @@ static int dm_sw_init(void *handle)
return load_dmcu_fw(adev);
}
-static int dm_sw_fini(void *handle)
+static int dm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct dal_allocation *da;
list_for_each_entry(da, &adev->dm.da_list, list) {
@@ -2541,11 +2569,11 @@ static int dm_sw_fini(void *handle)
amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
list_del(&da->list);
kfree(da);
+ adev->dm.bb_from_dmub = NULL;
break;
}
}
- adev->dm.bb_from_dmub = NULL;
kfree(adev->dm.dmub_fb_info);
adev->dm.dmub_fb_info = NULL;
@@ -2598,9 +2626,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
return ret;
}
-static int dm_late_init(void *handle)
+static int dm_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
@@ -2790,7 +2818,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
/**
* dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdgpu_dm device.
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
@@ -2808,9 +2836,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
* - Vblank support
* - Debug FS entries, if enabled
*/
-static int dm_hw_init(void *handle)
+static int dm_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/* Create DAL display manager */
@@ -2824,15 +2852,15 @@ static int dm_hw_init(void *handle)
/**
* dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdgpu_dm device.
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
* were loaded. Also flush IRQ workqueues and disable them.
*/
-static int dm_hw_fini(void *handle)
+static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_dm_hpd_fini(adev);
@@ -2936,9 +2964,9 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
-static int dm_suspend(void *handle)
+static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
@@ -2972,10 +3000,11 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
- if (adev->dm.dc->caps.ips_support)
- dc_allow_idle_optimizations(adev->dm.dc, true);
-
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dm->dc->caps.ips_support && adev->in_s0ix)
+ dc_allow_idle_optimizations(dm->dc, true);
+
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -3124,9 +3153,9 @@ cleanup:
kfree(bundle);
}
-static int dm_resume(void *handle)
+static int dm_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct drm_device *ddev = adev_to_drm(adev);
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
@@ -3378,8 +3407,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.soft_reset = dm_soft_reset,
.set_clockgating_state = dm_set_clockgating_state,
.set_powergating_state = dm_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version dm_ip_block = {
@@ -3494,7 +3521,7 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
amdgpu_dm_update_freesync_caps(connector,
- aconnector->edid);
+ aconnector->drm_edid);
} else {
amdgpu_dm_update_freesync_caps(connector, NULL);
if (!aconnector->dc_sink) {
@@ -3553,18 +3580,19 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
- aconnector->edid = NULL;
+ aconnector->drm_edid = NULL;
if (aconnector->dc_link->aux_mode) {
- drm_dp_cec_unset_edid(
- &aconnector->dm_dp_aux.aux);
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
}
} else {
- aconnector->edid =
- (struct edid *)sink->dc_edid.raw_edid;
+ const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid;
+
+ aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
+ drm_edid_connector_update(connector, aconnector->drm_edid);
if (aconnector->dc_link->aux_mode)
- drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
- aconnector->edid);
+ drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
+ connector->display_info.source_physical_address);
}
if (!aconnector->timing_requested) {
@@ -3575,17 +3603,16 @@ void amdgpu_dm_update_connector_after_detect(
"failed to create aconnector->requested_timing\n");
}
- drm_connector_update_edid_property(connector, aconnector->edid);
- amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
update_connector_ext_caps(aconnector);
} else {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
- drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->edid = NULL;
+ drm_edid_free(aconnector->drm_edid);
+ aconnector->drm_edid = NULL;
kfree(aconnector->timing_requested);
aconnector->timing_requested = NULL;
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
@@ -5176,15 +5203,20 @@ static ssize_t s3_debug_store(struct device *device,
int s3_state;
struct drm_device *drm_dev = dev_get_drvdata(device);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_ip_block *ip_block;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE);
+ if (!ip_block)
+ return -EINVAL;
ret = kstrtoint(buf, 0, &s3_state);
if (ret == 0) {
if (s3_state) {
- dm_resume(adev);
+ dm_resume(ip_block);
drm_kms_helper_hotplug_event(adev_to_drm(adev));
} else
- dm_suspend(adev);
+ dm_suspend(ip_block);
}
return ret == 0 ? count : 0;
@@ -5256,9 +5288,9 @@ static int dm_init_microcode(struct amdgpu_device *adev)
return r;
}
-static int dm_early_init(void *handle)
+static int dm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header);
@@ -7121,32 +7153,24 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
- struct edid *edid;
- struct i2c_adapter *ddc;
-
- if (dc_link && dc_link->aux_mode)
- ddc = &aconnector->dm_dp_aux.aux.ddc;
- else
- ddc = &aconnector->i2c->base;
+ const struct drm_edid *drm_edid;
- /*
- * Note: drm_get_edid gets edid in the following order:
- * 1) override EDID if set via edid_override debugfs,
- * 2) firmware EDID if set via edid_firmware module parameter
- * 3) regular DDC read.
- */
- edid = drm_get_edid(connector, ddc);
- if (!edid) {
+ drm_edid = drm_edid_read(connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
}
- aconnector->edid = edid;
-
+ aconnector->drm_edid = drm_edid;
/* Update emulated (virtual) sink's EDID */
if (dc_em_sink && dc_link) {
+ // FIXME: Get rid of drm_edid_raw()
+ const struct edid *edid = drm_edid_raw(drm_edid);
+
memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
- memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ memmove(dc_em_sink->dc_edid.raw_edid, edid,
+ (edid->extensions + 1) * EDID_LENGTH);
dm_helpers_parse_edid_caps(
dc_link,
&dc_em_sink->dc_edid,
@@ -7176,36 +7200,26 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
- struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
- struct edid *edid;
- struct i2c_adapter *ddc;
-
- if (dc_link->aux_mode)
- ddc = &aconnector->dm_dp_aux.aux.ddc;
- else
- ddc = &aconnector->i2c->base;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
- /*
- * Note: drm_get_edid gets edid in the following order:
- * 1) override EDID if set via edid_override debugfs,
- * 2) firmware EDID if set via edid_firmware module parameter
- * 3) regular DDC read.
- */
- edid = drm_get_edid(connector, ddc);
- if (!edid) {
+ drm_edid = drm_edid_read(connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
}
- if (drm_detect_hdmi_monitor(edid))
+ if (connector->display_info.is_hdmi)
init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
- aconnector->edid = edid;
+ aconnector->drm_edid = drm_edid;
+ edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
aconnector->dc_em_sink = dc_link_add_remote_sink(
aconnector->dc_link,
(uint8_t *)edid,
@@ -7892,16 +7906,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
}
static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
- struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (edid) {
+ if (drm_edid) {
/* empty probed_modes */
INIT_LIST_HEAD(&connector->probed_modes);
amdgpu_dm_connector->num_modes =
- drm_add_edid_modes(connector, edid);
+ drm_edid_connector_add_modes(connector);
/* sorting the probed modes before calling function
* amdgpu_dm_get_native_mode() since EDID can have
@@ -7915,10 +7929,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
amdgpu_dm_get_native_mode(connector);
/* Freesync capabilities are reset by calling
- * drm_add_edid_modes() and need to be
+ * drm_edid_connector_add_modes() and need to be
* restored here.
*/
- amdgpu_dm_update_freesync_caps(connector, edid);
+ amdgpu_dm_update_freesync_caps(connector, drm_edid);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@@ -8014,12 +8028,12 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
}
static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
- struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!(amdgpu_freesync_vid_mode && edid))
+ if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -8032,24 +8046,24 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
- struct edid *edid = amdgpu_dm_connector->edid;
+ const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
struct dc_link_settings *verified_link_cap =
&amdgpu_dm_connector->dc_link->verified_link_cap;
const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
- if (!drm_edid_is_valid(edid)) {
+ if (!drm_edid) {
amdgpu_dm_connector->num_modes =
drm_add_modes_noedid(connector, 640, 480);
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
} else {
- amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
amdgpu_dm_connector_add_common_modes(encoder, connector);
- amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
amdgpu_dm_fbc_init(connector);
@@ -8373,7 +8387,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
IP_VERSION(3, 5, 0) ||
acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED) {
+ DC_PSR_VERSION_UNSUPPORTED ||
+ !(adev->flags & AMD_IS_APU)) {
timing = &acrtc_state->stream->timing;
/* at least 2 frames */
@@ -9573,7 +9588,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
WARN_ON(!dc_commit_streams(dm->dc, &params));
/* Allow idle optimization when vblank count is 0 for display off */
- if (dm->active_vblank_irq_count == 0)
+ if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev))
dc_allow_idle_optimizations(dm->dc, true);
mutex_unlock(&dm->dc_lock);
@@ -10104,6 +10119,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
+
+ trace_amdgpu_dm_atomic_commit_tail_finish(state);
}
static int dm_force_atomic_commit(struct drm_connector *connector)
@@ -12004,7 +12021,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
}
static void parse_edid_displayid_vrr(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
@@ -12047,7 +12064,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector,
}
static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
- struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
u8 *edid_ext = NULL;
int i;
@@ -12082,7 +12099,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
}
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
- struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ const struct edid *edid,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
u8 *edid_ext = NULL;
int i;
@@ -12116,7 +12134,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
* amdgpu_dm_update_freesync_caps - Update Freesync capabilities
*
* @connector: Connector to query.
- * @edid: EDID from monitor
+ * @drm_edid: DRM EDID from monitor
*
* Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
* track of some of the display information in the internal data struct used by
@@ -12124,19 +12142,16 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
* FreeSync parameters.
*/
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
- struct edid *edid)
+ const struct drm_edid *drm_edid)
{
int i = 0;
- struct detailed_timing *timing;
- struct detailed_non_pixel *data;
- struct detailed_data_monitor_range *range;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = NULL;
struct dc_sink *sink;
-
struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ const struct edid *edid;
bool freesync_capable = false;
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
@@ -12149,13 +12164,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->dc_sink :
amdgpu_dm_connector->dc_em_sink;
- if (!edid || !sink) {
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid || !sink) {
dm_con_state = to_dm_connector_state(connector->state);
amdgpu_dm_connector->min_vfreq = 0;
amdgpu_dm_connector->max_vfreq = 0;
- connector->display_info.monitor_range.min_vfreq = 0;
- connector->display_info.monitor_range.max_vfreq = 0;
freesync_capable = false;
goto update;
@@ -12166,6 +12181,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
+ edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+
/* Some eDP panels only have the refresh rate range info in DisplayID */
if ((connector->display_info.monitor_range.min_vfreq == 0 ||
connector->display_info.monitor_range.max_vfreq == 0))
@@ -12173,67 +12190,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
- bool edid_check_required = false;
-
- if (amdgpu_dm_connector->dc_link &&
- amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
- if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
- amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
- if (amdgpu_dm_connector->max_vfreq -
- amdgpu_dm_connector->min_vfreq > 10)
- freesync_capable = true;
- } else {
- edid_check_required = edid->version > 1 ||
- (edid->version == 1 &&
- edid->revision > 1);
- }
- }
-
- if (edid_check_required) {
- for (i = 0; i < 4; i++) {
-
- timing = &edid->detailed_timings[i];
- data = &timing->data.other_data;
- range = &data->data.range;
- /*
- * Check if monitor has continuous frequency mode
- */
- if (data->type != EDID_DETAIL_MONITOR_RANGE)
- continue;
- /*
- * Check for flag range limits only. If flag == 1 then
- * no additional timing information provided.
- * Default GTF, GTF Secondary curve and CVT are not
- * supported
- */
- if (range->flags != 1)
- continue;
-
- connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
- connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
-
- if (edid->revision >= 4) {
- if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
- connector->display_info.monitor_range.min_vfreq += 255;
- if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
- connector->display_info.monitor_range.max_vfreq += 255;
- }
-
- amdgpu_dm_connector->min_vfreq =
- connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq =
- connector->display_info.monitor_range.max_vfreq;
-
- break;
- }
-
- if (amdgpu_dm_connector->max_vfreq -
- amdgpu_dm_connector->min_vfreq > 10) {
-
- freesync_capable = true;
- }
- }
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (vsdb_info.replay_mode) {
@@ -12242,12 +12202,9 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
}
- } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) {
- timing = &edid->detailed_timings[i];
- data = &timing->data.other_data;
-
amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 15d4690c74d6..25e95775c45c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -673,7 +673,7 @@ struct amdgpu_dm_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
- struct edid *edid;
+ const struct drm_edid *drm_edid;
/* shared with amdgpu */
struct amdgpu_hpd hpd;
@@ -951,7 +951,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
- struct edid *edid);
+ const struct drm_edid *drm_edid);
void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
@@ -1004,6 +1004,9 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
enum dc_gpu_mem_alloc_type type,
size_t size,
long long *addr);
+void dm_free_gpu_mem(struct amdgpu_device *adev,
+ enum dc_gpu_mem_alloc_type type,
+ void *addr);
bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index a2cf2c066a76..ffa4d3965b4b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -36,7 +36,7 @@
#include "amdgpu_dm_debugfs.h"
#define HPD_DETECTION_PERIOD_uS 5000000
-#define HPD_DETECTION_TIME_uS 1000
+#define HPD_DETECTION_TIME_uS 100000
void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
{
@@ -154,6 +154,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_psr_enable(vblank_work->stream);
if (dm->idle_workqueue &&
+ (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) &&
dm->dc->idle_optimizations_allowed &&
dm->idle_workqueue->enable &&
!dm->idle_workqueue->running)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index db56b0aa5454..6a97bb2d9160 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1529,7 +1529,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -1543,8 +1542,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -1558,10 +1555,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_clock_en);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -1719,7 +1715,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -1733,8 +1728,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -1748,10 +1741,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_slice_width);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -1907,7 +1899,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -1921,8 +1912,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -1936,10 +1925,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_slice_height);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2091,7 +2079,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2105,8 +2092,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2120,10 +2105,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_bits_per_pixel);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2270,7 +2254,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2284,8 +2267,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2299,10 +2280,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_pic_width);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2328,7 +2308,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2342,8 +2321,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2357,10 +2334,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_pic_height);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2401,7 +2377,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2415,8 +2390,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2430,10 +2403,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_chunk_size);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2474,7 +2446,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2488,8 +2459,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2503,10 +2472,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_slice_bpg_offset);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 069e0195e50a..b0fea0856866 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -23,6 +23,8 @@
*
*/
+#include <acpi/video.h>
+
#include <linux/string.h>
#include <linux/acpi.h>
#include <linux/i2c.h>
@@ -44,6 +46,7 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
+#include "clk_mgr.h"
static u32 edid_extract_panel_id(struct edid *edid)
{
@@ -641,6 +644,8 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
// write rc data
memmove(rc_data, data, length);
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
+ if (ret < 0)
+ goto err;
}
// write rc offset
@@ -649,20 +654,21 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
+ if (ret < 0)
+ goto err;
// write rc length
rc_length[0] = (unsigned char) length & 0xFF;
rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
+ if (ret < 0)
+ goto err;
// write rc cmd
rc_cmd = cmd | 0x80;
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
-
- if (ret < 0) {
- DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
- return false;
- }
+ if (ret < 0)
+ goto err;
// poll until active is 0
for (i = 0; i < 10; i++) {
@@ -685,6 +691,10 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
drm_dbg_dp(aux->drm_dev, "success = %d\n", success);
return success;
+
+err:
+ DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
+ return false;
}
static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
@@ -891,6 +901,60 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
return dp_sink_present;
}
+static int
+dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct drm_connector *connector = data;
+ struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
+ unsigned char start = block * EDID_LENGTH;
+ void *edid;
+ int r;
+
+ if (!acpidev)
+ return -ENODEV;
+
+ /* fetch the entire edid from BIOS */
+ r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid);
+ if (r < 0) {
+ drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r);
+ return r;
+ }
+ if (len > r || start > r || start + len > r) {
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ memcpy(buf, edid + start, len);
+ r = 0;
+
+cleanup:
+ kfree(edid);
+
+ return r;
+}
+
+static const struct drm_edid *
+dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID)
+ return NULL;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ break;
+ default:
+ return NULL;
+ }
+
+ if (connector->force == DRM_FORCE_OFF)
+ return NULL;
+
+ return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector);
+}
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
@@ -901,7 +965,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct i2c_adapter *ddc;
int retry = 3;
enum dc_edid_status edid_status;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
if (link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
@@ -912,26 +977,31 @@ enum dc_edid_status dm_helpers_read_local_edid(
* do check sum and retry to make sure read correct edid.
*/
do {
-
- edid = drm_get_edid(&aconnector->base, ddc);
+ drm_edid = dm_helpers_read_acpi_edid(aconnector);
+ if (drm_edid)
+ drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name);
+ else
+ drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid_connector_update(connector, drm_edid);
/* DP Compliance Test 4.2.2.6 */
if (link->aux_mode && connector->edid_corrupt)
drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
- if (!edid && connector->edid_corrupt) {
+ if (!drm_edid && connector->edid_corrupt) {
connector->edid_corrupt = false;
return EDID_BAD_CHECKSUM;
}
- if (!edid)
+ if (!drm_edid)
return EDID_NO_RESPONSE;
+ edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
/* We don't need the original edid anymore */
- kfree(edid);
+ drm_edid_free(drm_edid);
edid_status = dm_helpers_parse_edid_caps(
link,
@@ -1054,17 +1124,8 @@ void dm_helpers_free_gpu_mem(
void *pvMem)
{
struct amdgpu_device *adev = ctx->driver_context;
- struct dal_allocation *da;
-
- /* walk the da list in DM */
- list_for_each_entry(da, &adev->dm.da_list, list) {
- if (pvMem == da->cpu_ptr) {
- amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
- list_del(&da->list);
- kfree(da);
- break;
- }
- }
+
+ dm_free_gpu_mem(adev, type, pvMem);
}
bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
@@ -1121,6 +1182,8 @@ bool dm_helpers_dp_handle_test_pattern_request(
struct pipe_ctx *pipe_ctx = NULL;
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_device *dev = aconnector->base.dev;
+ struct dc_state *dc_state = ctx->dc->current_state;
+ struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
int i;
for (i = 0; i < MAX_PIPES; i++) {
@@ -1221,6 +1284,16 @@ bool dm_helpers_dp_handle_test_pattern_request(
pipe_ctx->stream->test_pattern.type = test_pattern;
pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
+ /* Temp W/A for compliance test failure */
+ dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
+ clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
+ dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ ctx->dc->clk_mgr->funcs->update_clocks(
+ ctx->dc->clk_mgr,
+ dc_state,
+ false);
+
dc_link_dp_set_test_pattern(
(struct dc_link *) link,
test_pattern,
@@ -1301,4 +1374,4 @@ bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream
{
// TODO
return false;
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index a08e8a0b696c..6e4359490613 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -129,7 +129,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
dc_sink_release(aconnector->dc_sink);
}
- kfree(aconnector->edid);
+ drm_edid_free(aconnector->drm_edid);
drm_connector_cleanup(connector);
drm_dp_mst_put_port_malloc(aconnector->mst_output_port);
@@ -182,7 +182,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->edid = NULL;
+ aconnector->drm_edid = NULL;
aconnector->dsc_aux = NULL;
port->passthrough_aux = NULL;
}
@@ -302,16 +302,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector)
return drm_add_edid_modes(connector, NULL);
- if (!aconnector->edid) {
- struct edid *edid;
+ if (!aconnector->drm_edid) {
+ const struct drm_edid *drm_edid;
- edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
+ drm_edid = drm_dp_mst_edid_read(connector,
+ &aconnector->mst_root->mst_mgr,
+ aconnector->mst_output_port);
- if (!edid) {
+ if (!drm_edid) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID, false);
- drm_connector_update_edid_property(
+ drm_edid_connector_update(
&aconnector->base,
NULL);
@@ -345,7 +347,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return ret;
}
- aconnector->edid = edid;
+ aconnector->drm_edid = drm_edid;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID, true);
}
@@ -360,10 +362,13 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ const struct edid *edid;
+
+ edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw()
dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
- (uint8_t *)aconnector->edid,
- (aconnector->edid->extensions + 1) * EDID_LENGTH,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
&init_params);
if (!dc_sink) {
@@ -405,7 +410,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
- connector, aconnector->edid);
+ connector, aconnector->drm_edid);
#if defined(CONFIG_DRM_AMD_DC_FP)
if (!validate_dsc_caps_on_connector(aconnector))
@@ -419,10 +424,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
}
}
- drm_connector_update_edid_property(
- &aconnector->base, aconnector->edid);
+ drm_edid_connector_update(&aconnector->base, aconnector->drm_edid);
- ret = drm_add_edid_modes(connector, aconnector->edid);
+ ret = drm_edid_connector_add_modes(connector);
return ret;
}
@@ -500,7 +504,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->edid = NULL;
+ aconnector->drm_edid = NULL;
aconnector->dsc_aux = NULL;
port->passthrough_aux = NULL;
@@ -1120,6 +1124,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int i, k, ret;
bool debugfs_overwrite = false;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1127,7 +1132,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return PTR_ERR(mst_state);
/* Set up params */
- DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count);
+ DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count);
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -1143,6 +1148,14 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (!aconnector->mst_output_port)
continue;
+ new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
+
+ if (!new_conn_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n",
+ __func__, __LINE__, stream);
+ continue;
+ }
+
stream->timing.flags.DSC = 0;
params[count].timing = &stream->timing;
@@ -1175,6 +1188,8 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
count++;
}
+ DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count);
+
if (count == 0) {
ASSERT(0);
return 0;
@@ -1302,7 +1317,7 @@ static bool is_dsc_need_re_compute(
continue;
aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
- if (!aconnector || !aconnector->dsc_aux)
+ if (!aconnector)
continue;
stream_on_link[new_stream_on_link_num] = aconnector;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index adc710fe4a45..8d2cf95ae739 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -78,10 +78,3 @@ void bios_set_scratch_critical_state(
uint32_t critial_state = state ? 1 : 0;
REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
}
-
-uint32_t bios_get_vga_enabled_displays(
- struct dc_bios *bios)
-{
- return REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index e1b4a40a353d..ab162f2fe577 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -34,7 +34,6 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
bool bios_is_accelerated_mode(struct dc_bios *bios);
void bios_set_scratch_acc_mode_change(struct dc_bios *bios, uint32_t state);
void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
-uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b46a3afe48ca..7d68006137a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -257,11 +257,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 };
int i;
-
for (i = 0; i < context->stream_count; ++i) {
const struct dc_stream_state *stream = context->streams[i];
const struct dc_link *link = stream->link;
- uint8_t lowest_dpia_index = 0, hr_index = 0;
+ uint8_t lowest_dpia_index = 0;
+ unsigned int hr_index = 0;
if (!link)
continue;
@@ -271,6 +271,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
continue;
hr_index = (link->link_index - lowest_dpia_index) / 2;
+ if (hr_index >= MAX_HOST_ROUTERS_NUM)
+ continue;
host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing(
&stream->timing, dc_link_get_highest_encoding_format(link));
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5c39390ecbd5..5a12fc75f97f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -621,8 +621,8 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
* dc_stream_configure_crc() - Configure CRC capture for the given stream.
* @dc: DC Object
* @stream: The stream to configure CRC on.
- * @enable: Enable CRC if true, disable otherwise.
* @crc_window: CRC window (x/y start/end) information
+ * @enable: Enable CRC if true, disable otherwise.
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
*
@@ -1157,6 +1157,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR)
+ get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else {
if (dc->ctx->dce_version < DCN_VERSION_2_0)
color_space_to_black_color(
@@ -1233,16 +1235,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
*/
if (is_phantom) {
if (tg->funcs->enable_crtc) {
- int main_pipe_width = 0, main_pipe_height = 0;
- struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
-
- if (old_paired_stream) {
- main_pipe_width = old_paired_stream->dst.width;
- main_pipe_height = old_paired_stream->dst.height;
- }
-
- if (dc->hwss.blank_phantom)
- dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ if (dc->hwseq->funcs.blank_pixel_data)
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
tg->funcs->enable_crtc(tg);
}
}
@@ -1437,6 +1431,7 @@ void dc_hardware_init(struct dc *dc)
detect_edp_presence(dc);
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
dc->hwss.init_hw(dc);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
}
void dc_init_callbacks(struct dc *dc,
@@ -1876,6 +1871,41 @@ void dc_z10_save_init(struct dc *dc)
dc->hwss.z10_save_init(dc);
}
+/* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory
+ * Prevents over allocation of DET during unlock process
+ * e.g. 2 pipe config with different streams with a max of 20 DET segments
+ * Before: After:
+ * - Pipe0: 10 DET segments - Pipe0: 12 DET segments
+ * - Pipe1: 10 DET segments - Pipe1: 8 DET segments
+ * If Pipe0 gets updated first, 22 DET segments will be allocated
+ */
+static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context)
+{
+ unsigned int i = 0;
+ struct pipe_ctx *pipe = NULL;
+ struct timing_generator *tg = NULL;
+
+ if (!dc->config.set_pipe_unlock_order)
+ return;
+
+ memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first));
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ continue;
+ }
+
+ if (resource_calculate_det_for_stream(context, pipe) <
+ resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) {
+ dc->scratch.pipes_to_unlock_first[i] = true;
+ }
+ }
+}
+
/**
* dc_commit_state_no_check - Apply context to the hardware
*
@@ -1974,6 +2004,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
}
+ determine_pipe_unlock_order(dc, context);
/* Program all planes within new context*/
if (dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, context);
@@ -2156,6 +2187,14 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
context->power_source = params->power_source;
res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+
+ /*
+ * Only update link encoder to stream assignment after bandwidth validation passed.
+ */
+ if (res == DC_OK && dc->res_pool->funcs->link_encs_assign)
+ dc->res_pool->funcs->link_encs_assign(
+ dc, context, context->streams, context->stream_count);
+
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -2477,41 +2516,35 @@ static enum surface_update_type get_scaling_info_update_type(
if (!u->scaling_info)
return UPDATE_TYPE_FAST;
- if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height
+ || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
+ || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
+ || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
|| u->scaling_info->scaling_quality.integer_scaling !=
- u->surface->scaling_quality.integer_scaling
- ) {
+ u->surface->scaling_quality.integer_scaling) {
update_flags->bits.scaling_change = 1;
+ if (u->scaling_info->src_rect.width > u->surface->src_rect.width
+ || u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ /* Making src rect bigger requires a bandwidth change */
+ update_flags->bits.clock_change = 1;
+
if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
/* Making dst rect smaller requires a bandwidth change */
update_flags->bits.bandwidth_change = 1;
- }
- if (u->scaling_info->src_rect.width != u->surface->src_rect.width
- || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
-
- update_flags->bits.scaling_change = 1;
- if (u->scaling_info->src_rect.width > u->surface->src_rect.width
- || u->scaling_info->src_rect.height > u->surface->src_rect.height)
- /* Making src rect bigger requires a bandwidth change */
- update_flags->bits.clock_change = 1;
+ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
+ /* Changing clip size of a large surface may result in MPC slice count change */
+ update_flags->bits.bandwidth_change = 1;
}
- if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
- (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
- u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
- /* Changing clip size of a large surface may result in MPC slice count change */
- update_flags->bits.bandwidth_change = 1;
-
- if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
- u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
- update_flags->bits.clip_size_change = 1;
-
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@@ -2520,13 +2553,13 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
update_flags->bits.position_change = 1;
+ /* process every update flag before returning */
if (update_flags->bits.clock_change
|| update_flags->bits.bandwidth_change
|| update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.position_change ||
- update_flags->bits.clip_size_change)
+ if (update_flags->bits.position_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -2617,7 +2650,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
elevate_update_type(&overall_type, type);
}
- if (update_flags->bits.lut_3d) {
+ if (update_flags->bits.lut_3d &&
+ u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
@@ -2637,6 +2671,29 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
return overall_type;
}
+/* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't
+ * while both planes are flip_immediate
+ */
+static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count)
+{
+ bool has_flip_immediate_plane = false;
+ int i;
+
+ for (i = 0; i < surface_count; i++) {
+ if (updates[i].surface->flip_immediate) {
+ has_flip_immediate_plane = true;
+ break;
+ }
+ }
+
+ if (has_flip_immediate_plane && surface_count > 1) {
+ for (i = 0; i < surface_count; i++) {
+ if (updates[i].surface->flip_immediate)
+ updates[i].surface->update_flags.bits.addr_update = 1;
+ }
+ }
+}
+
static enum surface_update_type check_update_surfaces_for_stream(
struct dc *dc,
struct dc_surface_update *updates,
@@ -2699,6 +2756,9 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->scaler_sharpener_update)
su_flags->bits.scaler_sharpener = 1;
+ if (stream_update->sharpening_required)
+ su_flags->bits.sharpening_required = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2870,10 +2930,20 @@ static void copy_surface_update_to_plane(
sizeof(struct dc_transfer_func_distributed_points));
}
- if (srf_update->func_shaper)
+ if (srf_update->cm2_params) {
+ surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
+ surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
+ surface->mcm_luts = srf_update->cm2_params->cm2_luts;
+ }
+
+ if (srf_update->func_shaper) {
memcpy(&surface->in_shaper_func, srf_update->func_shaper,
sizeof(surface->in_shaper_func));
+ if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER)
+ surface->mcm_luts.shaper = &surface->in_shaper_func;
+ }
+
if (srf_update->lut3d_func)
memcpy(&surface->lut3d_func, srf_update->lut3d_func,
sizeof(surface->lut3d_func));
@@ -2886,10 +2956,17 @@ static void copy_surface_update_to_plane(
surface->sdr_white_level_nits =
srf_update->sdr_white_level_nits;
- if (srf_update->blend_tf)
+ if (srf_update->blend_tf) {
memcpy(&surface->blend_tf, srf_update->blend_tf,
sizeof(surface->blend_tf));
+ if (surface->mcm_lut1d_enable)
+ surface->mcm_luts.lut1d_func = &surface->blend_tf;
+ }
+
+ if (srf_update->cm2_params || srf_update->blend_tf)
+ surface->lut_bank_a = !surface->lut_bank_a;
+
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
*srf_update->input_csc_color_matrix;
@@ -2901,11 +2978,7 @@ static void copy_surface_update_to_plane(
if (srf_update->gamut_remap_matrix)
surface->gamut_remap_matrix =
*srf_update->gamut_remap_matrix;
- if (srf_update->cm2_params) {
- surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
- surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
- surface->mcm_luts = srf_update->cm2_params->cm2_luts;
- }
+
if (srf_update->cursor_csc_color_matrix)
surface->cursor_csc_color_matrix =
*srf_update->cursor_csc_color_matrix;
@@ -3037,6 +3110,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
if (update->scaler_sharpener_update)
stream->scaler_sharpener_update = *update->scaler_sharpener_update;
+ if (update->sharpening_required)
+ stream->sharpening_required = *update->sharpening_required;
}
static void backup_planes_and_stream_state(
@@ -3153,6 +3228,11 @@ static bool update_planes_and_stream_state(struct dc *dc,
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
+ /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
+ * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
+ * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
+ */
+ force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
if (update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.current_state, stream);
@@ -3225,8 +3305,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type != UPDATE_TYPE_MED)
continue;
- if (surface->update_flags.bits.clip_size_change ||
- surface->update_flags.bits.position_change) {
+ if (surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -3625,6 +3704,10 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
bool should_offload_fams2_flip = false;
+ bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+
+ if (should_lock_all_pipes)
+ determine_pipe_unlock_order(dc, context);
if (dc->debug.fams2_config.bits.enable &&
dc->debug.fams2_config.bits.enable_offload_flip &&
@@ -3677,13 +3760,14 @@ static void commit_planes_for_stream_fast(struct dc *dc,
if (!pipe_ctx->plane_state)
continue;
- if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
+
pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
- dc->hwss.program_triplebuffer != NULL &&
- !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
- /*triple buffer for VUpdate only*/
+ dc->hwss.program_triplebuffer != NULL &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
pipe_ctx->plane_state->triplebuffer_flips = true;
}
}
@@ -3742,6 +3826,8 @@ static void commit_planes_for_stream(struct dc *dc,
bool subvp_curr_use = false;
uint8_t current_stream_mask = 0;
+ if (should_lock_all_pipes)
+ determine_pipe_unlock_order(dc, context);
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
@@ -3920,19 +4006,20 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (!pipe_ctx->plane_state)
continue;
- if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
- dc->hwss.program_triplebuffer != NULL &&
- !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
- /*triple buffer for VUpdate only*/
- pipe_ctx->plane_state->triplebuffer_flips = true;
+ dc->hwss.program_triplebuffer != NULL &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
}
}
if (update_type == UPDATE_TYPE_FULL) {
/* force vsync flip when reconfiguring pipes to prevent underflow */
plane_state->flip_immediate = false;
+ plane_state->triplebuffer_flips = false;
}
}
@@ -3953,7 +4040,6 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
-
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
@@ -4028,7 +4114,7 @@ static void commit_planes_for_stream(struct dc *dc,
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
- /*only enable triplebuffer for fast_update*/
+ /*only enable triplebuffer for fast_update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
@@ -4777,6 +4863,11 @@ static bool update_planes_and_stream_v1(struct dc *dc,
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
+ /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
+ * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
+ * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
+ */
+ force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
if (update_type >= UPDATE_TYPE_FULL) {
@@ -5065,11 +5156,26 @@ static bool update_planes_and_stream_v3(struct dc *dc,
return true;
}
+static void clear_update_flags(struct dc_surface_update *srf_updates,
+ int surface_count, struct dc_stream_state *stream)
+{
+ int i;
+
+ if (stream)
+ stream->update_flags.raw = 0;
+
+ for (i = 0; i < surface_count; i++)
+ if (srf_updates[i].surface)
+ srf_updates[i].surface->update_flags.raw = 0;
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/*
* update planes and stream version 3 separates FULL and FAST updates
@@ -5086,10 +5192,16 @@ bool dc_update_planes_and_stream(struct dc *dc,
* features as they are now transparent to the new sequence.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01)
- return update_planes_and_stream_v3(dc, srf_updates,
+ ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
- return update_planes_and_stream_v2(dc, srf_updates,
+ else
+ ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
+
+ return ret;
}
void dc_commit_updates_for_stream(struct dc *dc,
@@ -5099,6 +5211,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
@@ -5106,17 +5220,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
- update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ } else
+ ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
@@ -5976,7 +6090,12 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
{
struct dc_power_profile profile = { 0 };
- profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ if (!context || !context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc)
+ return profile;
+ struct dc *dc = context->clk_mgr->ctx->dc;
+
+ if (dc->res_pool->funcs->get_power_profile)
+ profile.power_level = dc->res_pool->funcs->get_power_profile(context);
return profile;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 7ee2be8f82c4..2fdcf8d59b9f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -497,6 +497,23 @@ void get_mclk_switch_visual_confirm_color(
}
}
+void get_cursor_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ if (pipe_ctx->stream && pipe_ctx->stream->cursor_position.enable) {
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ } else {
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ }
+}
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
@@ -1071,8 +1088,13 @@ void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_con
if (!pipe_ctx->stream)
continue;
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+ /* For full update we must wait for all double buffer updates, not just DRR updates. This
+ * is particularly important for minimal transitions. Only check for OTG_MASTER pipes,
+ * as non-OTG Master pipes share the same OTG as
+ */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && dc->hwss.wait_for_all_pending_updates) {
+ dc->hwss.wait_for_all_pending_updates(pipe_ctx);
+ }
hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c7599c40d4be..33125b95c3a1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -765,25 +765,6 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
-/*
- * This is a preliminary vp size calculation to allow us to check taps support.
- * The result is completely overridden afterwards.
- */
-static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
-{
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
-
- data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
- data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
- data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
- data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- swap(data->viewport.width, data->viewport.height);
- swap(data->viewport_c.width, data->viewport_c.height);
- }
-}
-
static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
{
struct rect rec;
@@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
+ struct scaling_taps temp = {0};
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -1525,8 +1507,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
calculate_recout(pipe_ctx);
/* depends on pixel format */
calculate_scaling_ratios(pipe_ctx);
- /* depends on scaling ratios and recout, does not calculate offset yet */
- calculate_viewport_size(pipe_ctx);
/*
* LB calculations depend on vp size, h/v_active and scaling ratios
@@ -1547,6 +1527,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+ // get TAP value with 100x100 dummy data for max scaling qualify, override
+ // if a new scaling quality required
+ pipe_ctx->plane_res.scl_data.viewport.width = 100;
+ pipe_ctx->plane_res.scl_data.viewport.height = 100;
+ pipe_ctx->plane_res.scl_data.viewport_c.width = 100;
+ pipe_ctx->plane_res.scl_data.viewport_c.height = 100;
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ temp = pipe_ctx->plane_res.scl_data.taps;
+
+ calculate_inits_and_viewports(pipe_ctx);
+
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
@@ -1573,11 +1571,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
&plane_state->scaling_quality);
}
- /*
- * Depends on recout, scaling ratios, h_active and taps
- * May need to re-check lb size after this in some obscure scenario
- */
- if (res)
+ if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps ||
+ pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps ||
+ pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c ||
+ pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c))
calculate_inits_and_viewports(pipe_ctx);
/*
@@ -4094,14 +4091,6 @@ enum dc_status dc_validate_global_state(
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
- /*
- * Only update link encoder to stream assignment after bandwidth validation passed.
- * TODO: Split out assignment and validation.
- */
- if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
- dc->res_pool->funcs->link_encs_assign(
- dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 9a406d74c0dd..5d233c09d239 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -421,7 +421,6 @@ bool dc_stream_program_cursor_position(
/* apply/update visual confirm */
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) {
/* update software state */
- uint32_t color_value = MAX_TG_COLOR_VALUE;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -429,15 +428,7 @@ bool dc_stream_program_cursor_position(
/* adjust visual confirm color for all pipes with current stream */
if (stream == pipe_ctx->stream) {
- if (stream->cursor_position.enable) {
- pipe_ctx->visual_confirm_color.color_r_cr = color_value;
- pipe_ctx->visual_confirm_color.color_g_y = 0;
- pipe_ctx->visual_confirm_color.color_b_cb = 0;
- } else {
- pipe_ctx->visual_confirm_color.color_r_cr = 0;
- pipe_ctx->visual_confirm_color.color_g_y = 0;
- pipe_ctx->visual_confirm_color.color_b_cb = color_value;
- }
+ get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
/* programming hardware */
if (pipe_ctx->plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3992ad73165b..6d76dc110d38 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.301"
+#define DC_VER "3.2.306"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -227,6 +227,10 @@ struct dc_dmub_caps {
uint8_t fams_ver;
};
+struct dc_scl_caps {
+ bool sharpener_support;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -292,6 +296,7 @@ struct dc_caps {
bool sequential_ono;
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
+ struct dc_scl_caps scl_caps;
};
struct dc_bug_wa {
@@ -463,6 +468,7 @@ struct dc_config {
unsigned int enable_fpo_flicker_detection;
bool disable_hbr_audio_dp2;
bool consolidated_dpia_dp_lt;
+ bool set_pipe_unlock_order;
};
enum visual_confirm {
@@ -1253,7 +1259,6 @@ union surface_update_flags {
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
- uint32_t clip_size_change: 1;
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
@@ -1355,6 +1360,7 @@ struct dc_plane_state {
enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
+ int adaptive_sharpness_policy;
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
@@ -1461,6 +1467,7 @@ struct dc {
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
struct dml2_configuration_options dml2_options;
@@ -1513,7 +1520,7 @@ struct dc_surface_update {
* change cm2_params.component_settings: Full update
* change cm2_params.cm2_luts: Fast update
*/
- struct dc_cm2_parameters *cm2_params;
+ const struct dc_cm2_parameters *cm2_params;
const struct dc_csc_transform *cursor_csc_color_matrix;
unsigned int sdr_white_level_nits;
};
@@ -1770,7 +1777,6 @@ struct dc_link {
bool dongle_mode_timing_override;
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
- bool disable_assr_for_uhbr;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -1786,6 +1792,7 @@ struct dc_link {
// BW ALLOCATON USB4 ONLY
struct dc_dpia_bw_alloc dpia_bw_alloc_config;
bool skip_implict_edp_power_control;
+ enum backlight_control_type backlight_control_type;
};
/* Return an enumerated dc_link.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 1e7de0f03290..f0417ee6fcf8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -519,7 +519,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
union dmub_rb_cmd cmd = { 0 };
unsigned int panel_inst = 0;
- if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst))
+ if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) &&
+ dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
return;
memset(&cmd, 0, sizeof(cmd));
@@ -1294,6 +1295,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
memset(&new_signals, 0, sizeof(new_signals));
+ new_signals.bits.allow_idle = 1; /* always set */
+
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
new_signals.bits.allow_pg = 1;
@@ -1389,7 +1392,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
*/
dc_dmub_srv->needs_idle_wake = false;
- if (prev_driver_signals.bits.allow_ips2 &&
+ if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
DC_LOG_IPS(
@@ -1450,7 +1453,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
}
dc_dmub_srv_notify_idle(dc, false);
- if (prev_driver_signals.bits.allow_ips1) {
+ if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
@@ -1862,3 +1865,81 @@ void dc_dmub_srv_fams2_passthrough_flip(
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
}
}
+
+bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
+{
+ bool result;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
+ start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return result;
+}
+
+void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
+{
+ uint32_t i;
+ enum dmub_gpint_command command_code;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return;
+
+ switch (output->ips_mode) {
+ case DMUB_IPS_MODE_IPS1_MAX:
+ command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
+ break;
+ case DMUB_IPS_MODE_IPS2:
+ command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
+ break;
+ case DMUB_IPS_MODE_IPS1_RCG:
+ command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
+ break;
+ case DMUB_IPS_MODE_IPS1_ONO2_ON:
+ command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
+ break;
+ default:
+ command_code = DMUB_GPINT__INVALID_COMMAND;
+ break;
+ }
+
+ if (command_code == DMUB_GPINT__INVALID_COMMAND)
+ return;
+
+ // send gpint commands and wait for ack
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
+ (uint16_t)(output->ips_mode),
+ &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->residency_percent = 0;
+
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
+ (uint16_t)(output->ips_mode),
+ &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->entry_counter = 0;
+
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
+ (uint16_t)(output->ips_mode),
+ &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_active_time_us[0] = 0;
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
+ (uint16_t)(output->ips_mode),
+ &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_active_time_us[1] = 0;
+
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
+ (uint16_t)(output->ips_mode),
+ &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_inactive_time_us[0] = 0;
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
+ (uint16_t)(output->ips_mode),
+ &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_inactive_time_us[1] = 0;
+
+ // NUM_IPS_HISTOGRAM_BUCKETS = 16
+ for (i = 0; i < 16; i++)
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->histogram[i] = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 42f0cb672d8b..10b48198b7a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -209,4 +209,43 @@ void dc_dmub_srv_fams2_passthrough_flip(
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
int surface_count);
+
+/**
+ * struct ips_residency_info - struct containing info from dmub_ips_residency_stats
+ *
+ * @ips_mode: The mode of IPS that the follow stats appertain to
+ * @residency_percent: The percentage of time spent in given IPS mode in millipercent
+ * @entry_counter: The number of entries made in to this IPS state
+ * @total_active_time_us: uint32_t array of length 2 representing time in the given IPS mode
+ * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits.
+ * @total_inactive_time_us: uint32_t array of length 2 representing time outside the given IPS mode
+ * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits.
+ * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c
+ */
+struct ips_residency_info {
+ enum dmub_ips_mode ips_mode;
+ unsigned int residency_percent;
+ unsigned int entry_counter;
+ unsigned int total_active_time_us[2];
+ unsigned int total_inactive_time_us[2];
+ unsigned int histogram[16];
+};
+
+/**
+ * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status
+ *
+ * @dc_dmub_srv: The DC DMUB service pointer
+ * @start_measurement: Describes whether to start or stop measurement
+ *
+ * Return: true if GPINT was sent successfully, false otherwise
+ */
+bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement);
+
+/**
+ * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info
+ *
+ * @dc_dmub_srv: The DC DMUB service pointer
+ * @output: Output struct to copy the the residency info to
+ */
+void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 41bd95e9177a..8dd6eb044829 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1166,6 +1166,7 @@ struct dpcd_caps {
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
int8_t branch_fw_revision[2];
+ int8_t branch_vendor_specific_data[4];
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
@@ -1191,6 +1192,7 @@ struct dpcd_caps {
struct edp_psr_info psr_info;
struct replay_info pr_info;
+ uint16_t edp_oled_emission_rate;
};
union dpcd_sink_ext_caps {
@@ -1204,7 +1206,7 @@ union dpcd_sink_ext_caps {
uint8_t oled : 1;
uint8_t reserved_2 : 1;
uint8_t miniled : 1;
- uint8_t reserved : 1;
+ uint8_t emission_output : 1;
} bits;
uint8_t raw;
};
@@ -1358,6 +1360,9 @@ struct dp_trace {
#ifndef DP_TUNNELING_IRQ
#define DP_TUNNELING_IRQ (1 << 5)
#endif
+#ifndef DP_BRANCH_VENDOR_SPECIFIC_START
+#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
+#endif
/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
#ifndef DP_TUNNELING_CAPABILITIES
#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index 44afcd989224..bd37ec82b42d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -26,7 +26,6 @@
#ifndef _DC_PLANE_H_
#define _DC_PLANE_H_
-#include "dc.h"
#include "dc_hw_types.h"
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 603552dbd771..24aa9df892f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -38,6 +38,7 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality,
spl_scaling_quality->h_taps = scaling_quality->h_taps;
spl_scaling_quality->v_taps_c = scaling_quality->v_taps_c;
spl_scaling_quality->v_taps = scaling_quality->v_taps;
+ spl_scaling_quality->integer_scaling = scaling_quality->integer_scaling;
}
static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality,
const struct spl_taps *spl_scaling_quality)
@@ -187,14 +188,14 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active;
spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active;
- spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy;
+ spl_in->sharpen_policy = (enum sharpen_policy)plane_state->adaptive_sharpness_policy;
spl_in->debug.scale_to_sharpness_policy =
(enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy;
/* Check if it is stream is in fullscreen and if its HDR.
* Use this to determine sharpness levels
*/
- spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->is_fullscreen = pipe_ctx->stream->sharpening_required;
spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream);
spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
index caa45db50232..db1e63a7d460 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -26,7 +26,6 @@
#ifndef _DC_STATE_H_
#define _DC_STATE_H_
-#include "dc.h"
#include "inc/core_status.h"
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 14ea47eda0c8..413970588a26 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -143,6 +143,7 @@ union stream_update_flags {
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
uint32_t scaler_sharpener : 1;
+ uint32_t sharpening_required : 1;
} bits;
uint32_t raw;
@@ -310,6 +311,7 @@ struct dc_stream_state {
struct luminance_data lumin_data;
bool scaler_sharpener_update;
+ bool sharpening_required;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -356,6 +358,7 @@ struct dc_stream_update {
struct dc_cursor_position *cursor_position;
bool *hw_cursor_req;
bool *scaler_sharpener_update;
+ bool *sharpening_required;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6d7989b751e2..3401f4c9fb10 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -179,6 +179,7 @@ struct dc_panel_patch {
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
unsigned int disable_colorimetry;
+ uint8_t blankstream_before_otg_off;
};
struct dc_edid_caps {
@@ -922,6 +923,12 @@ struct display_endpoint_id {
enum display_endpoint_type ep_type;
};
+enum backlight_control_type {
+ BACKLIGHT_CONTROL_PWM = 0,
+ BACKLIGHT_CONTROL_VESA_AUX = 1,
+ BACKLIGHT_CONTROL_AMD_AUX = 2,
+};
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
struct otg_phy_mux {
uint8_t phy_output_num;
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 0b889004509a..d3e46c3cfa57 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -580,9 +580,6 @@ static void dccg401_set_dpstreamclk(
int otg_inst,
int dp_hpo_inst)
{
- /* set the dtbclk_p source */
- dccg401_set_dtbclk_p_src(dccg, src, otg_inst);
-
/* enabled to select one of the DTBCLKs for pipe */
if (src == REFCLK)
dccg401_disable_dpstreamclk(dccg, dp_hpo_inst);
@@ -805,33 +802,6 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- switch (link_enc_inst) {
- case 0:
- REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
- SYMCLKA_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1);
- break;
- case 1:
- REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
- SYMCLKB_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1);
- break;
- case 2:
- REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
- SYMCLKC_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1);
- break;
- case 3:
- REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
- SYMCLKD_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1);
- break;
- }
-
switch (stream_enc_inst) {
case 0:
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
@@ -864,37 +834,8 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
}
}
-/*get other front end connected to this backend*/
-static uint8_t dccg401_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst)
-{
- uint8_t num_enabled_symclk_fe = 0;
- uint32_t fe_clk_en[4] = {0}, be_clk_sel[4] = {0};
- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- uint8_t i;
-
- REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0],
- SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
-
- REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1],
- SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
-
- REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2],
- SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
-
- REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3],
- SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
-
- for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) {
- if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
- num_enabled_symclk_fe++;
- }
-
- return num_enabled_symclk_fe;
-}
-
static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
{
- uint8_t num_enabled_symclk_fe = 0;
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (stream_enc_inst) {
@@ -919,31 +860,6 @@ static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_ins
SYMCLKD_FE_SRC_SEL, 0);
break;
}
-
- /*check other enabled symclk fe connected to this be */
- num_enabled_symclk_fe = dccg401_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst);
- /*only turn off backend clk if other front ends attached to this backend are all off,
- for mst, only turn off the backend if this is the last front end*/
- if (num_enabled_symclk_fe == 0) {
- switch (link_enc_inst) {
- case 0:
- REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
- SYMCLKA_CLOCK_ENABLE, 0);
- break;
- case 1:
- REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
- SYMCLKB_CLOCK_ENABLE, 0);
- break;
- case 2:
- REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
- SYMCLKC_CLOCK_ENABLE, 0);
- break;
- case 3:
- REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
- SYMCLKD_CLOCK_ENABLE, 0);
- break;
- }
- }
}
static const struct dccg_funcs dccg401_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
index 8db9f7514466..889f314cac65 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
@@ -717,7 +717,7 @@ static struct link_encoder *dce60_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index eaed5d1c398a..dcd2cdfe91eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -365,23 +365,18 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
region_start = -MAX_LOW_POINT;
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
} else {
- /* 11 segments
- * segment is from 2^-10 to 2^1
+ /* 13 segments
+ * segment is from 2^-12 to 2^0
* There are less than 256 points, for optimization
*/
- seg_distr[0] = 3;
- seg_distr[1] = 4;
- seg_distr[2] = 4;
- seg_distr[3] = 4;
- seg_distr[4] = 4;
- seg_distr[5] = 4;
- seg_distr[6] = 4;
- seg_distr[7] = 4;
- seg_distr[8] = 4;
- seg_distr[9] = 4;
- seg_distr[10] = 1;
-
- region_start = -10;
+ const uint8_t SEG_COUNT = 12;
+
+ for (i = 0; i < SEG_COUNT; i++)
+ seg_distr[i] = 4;
+
+ seg_distr[SEG_COUNT] = 1;
+
+ region_start = -SEG_COUNT;
region_end = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index f31f0e3abfc0..1e1038fb04e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -140,23 +140,18 @@ bool cm3_helper_translate_curve_to_hw_format(
region_start = -MAX_LOW_POINT;
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
} else {
- /* 11 segments
- * segment is from 2^-10 to 2^0
+ /* 13 segments
+ * segment is from 2^-12 to 2^0
* There are less than 256 points, for optimization
*/
- seg_distr[0] = 3;
- seg_distr[1] = 4;
- seg_distr[2] = 4;
- seg_distr[3] = 4;
- seg_distr[4] = 4;
- seg_distr[5] = 4;
- seg_distr[6] = 4;
- seg_distr[7] = 4;
- seg_distr[8] = 4;
- seg_distr[9] = 4;
- seg_distr[10] = 1;
-
- region_start = -10;
+ const uint8_t SEG_COUNT = 12;
+
+ for (i = 0; i < SEG_COUNT; i++)
+ seg_distr[i] = 4;
+
+ seg_distr[SEG_COUNT] = 1;
+
+ region_start = -SEG_COUNT;
region_end = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index 5b343f745cf3..ae81451a3a72 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc)
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
+static bool enc314_is_fifo_enabled(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val;
+
+ REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
+ return (reset_val != 0);
+}
+
void enc314_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
@@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.enable_fifo = enc314_enable_fifo,
.disable_fifo = enc314_disable_fifo,
+ .is_fifo_enabled = enc314_is_fifo_enabled,
.set_input_mode = enc314_set_dig_input_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 565f3c492477..0c8c4a080c50 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -785,12 +785,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 9d6675ecc5f1..c935903b68e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -845,12 +845,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 4fce64a030b6..390c1a77fda6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 3fa9a5da02f6..843d6004258c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index eb3ed965e48b..cd8cca651419 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -1049,12 +1049,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 9e1c18b90805..5718000627b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -435,8 +435,6 @@ static void get_meta_and_pte_attr(
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -485,8 +483,6 @@ static void get_meta_and_pte_attr(
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 1c10ba4dcdde..cee1b351e105 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1280,12 +1280,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -1775,15 +1772,6 @@ static unsigned int CalculateVMAndRowBytes(
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
- } else if (MacroTileSizeBytes == 4096) {
- PixelPTEReqHeightPTEs = 1;
- *PixelPTEReqHeight = MacroTileHeight;
- *PixelPTEReqWidth = 8 * *MacroTileWidth;
- *PTERequestSize = 64;
- if (ScanDirection != dm_vert)
- FractionOfPTEReturnDrop = 0;
- else
- FractionOfPTEReturnDrop = 7.0 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index b28fcc8608ff..76d3bb3c9155 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -392,8 +392,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double)blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -464,8 +462,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 2b275e680379..f567a9023682 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1444,12 +1444,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index b57b095cd4a8..c46bda2141ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -413,8 +413,6 @@ static void get_meta_and_pte_attr(
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -481,8 +479,6 @@ static void get_meta_and_pte_attr(
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index debfa31583a6..5865e8fa2d8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1461,12 +1461,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index 61b3bebf24c9..b7d2a0caec11 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -501,8 +501,6 @@ static void get_meta_and_pte_attr(
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -569,8 +567,6 @@ static void get_meta_and_pte_attr(
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index d92fb428ee96..86ac7d59fd32 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -4097,12 +4097,9 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index a201dbb743d7..d9e63c4fdd95 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -204,8 +204,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.num_states = 8,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 250.0,
- .sr_enter_plus_exit_z8_time_us = 350.0,
+ .sr_exit_z8_time_us = 263.0,
+ .sr_enter_plus_exit_z8_time_us = 363.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index d8bfc85e5dcd..88dc2b97e7bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -559,12 +559,11 @@ static void get_surf_rq_param(
const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
- bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
bool surf_linear;
bool surf_vert;
unsigned int bytes_per_element;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 8697eac1e1f7..7a01a956e4bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -1036,6 +1036,7 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported;
context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0;
context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz;
+ context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
}
void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index d35dd507cb9f..bbc28b9a15a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -13,11 +13,11 @@
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
- *dml_ctx = (struct dml2_context *)kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
if (!(*dml_ctx))
return false;
- (*dml_ctx)->v21.dml_init.dml2_instance = (struct dml2_instance *)kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
+ (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
@@ -27,7 +27,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
- (*dml_ctx)->v21.mode_programming.programming = (struct dml2_display_cfg_programming *)kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
+ (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 0aa4e4d343b0..3d41ffde91c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -159,6 +159,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters
phantom->timing.v_total = meta->v_total;
phantom->timing.v_active = meta->v_active;
phantom->timing.v_front_porch = meta->v_front_porch;
+ phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active;
phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
phantom->timing.drr_config.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index ab229e1598ae..714b5c39b7e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -425,6 +425,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters
phantom->timing.v_total = meta->v_total;
phantom->timing.v_active = meta->v_active;
phantom->timing.v_front_porch = meta->v_front_porch;
+ phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active;
phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
phantom->timing.drr_config.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 1cf9015e854a..5a09dd298e6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -1798,6 +1798,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
}
if (s->pmo_dcn4.num_pstate_candidates > 0) {
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates - 1].allow_state_increase = true;
s->pmo_dcn4.cur_pstate_candidate = -1;
return true;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 866b0abcff1b..9190c1328d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -209,8 +209,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1;
if (odms_needed <= unused_dpps) {
- unused_dpps -= odms_needed;
-
if (odms_needed == 1) {
p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1;
optimization_done = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
index cd1706d301e7..f09cba8e29cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
@@ -690,6 +690,7 @@ struct dcn20_dpp {
int lb_memory_size;
int lb_bits_per_entry;
bool is_write_to_ram_a_safe;
+ bool dispclk_r_gate_disable;
struct scaler_data scl_data;
struct pwl_params pwl_data;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index b110f35ef66b..f236824126e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -572,6 +572,7 @@ struct dcn3_dpp {
int lb_memory_size;
int lb_bits_per_entry;
bool is_write_to_ram_a_safe;
+ bool dispclk_r_gate_disable;
struct scaler_data scl_data;
struct pwl_params pwl_data;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index 8473c694bfdc..62b7012cda43 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -50,13 +50,21 @@ void dpp35_dppclk_control(
DPPCLK_RATE_CONTROL, dppclk_div,
DPP_CLOCK_ENABLE, 1);
else
- REG_UPDATE_2(DPP_CONTROL,
+ if (dpp->dispclk_r_gate_disable)
+ REG_UPDATE_2(DPP_CONTROL,
DPP_CLOCK_ENABLE, 1,
DISPCLK_R_GATE_DISABLE, 1);
+ else
+ REG_UPDATE(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 1);
} else
- REG_UPDATE_2(DPP_CONTROL,
+ if (dpp->dispclk_r_gate_disable)
+ REG_UPDATE_2(DPP_CONTROL,
DPP_CLOCK_ENABLE, 0,
DISPCLK_R_GATE_DISABLE, 0);
+ else
+ REG_UPDATE(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 0);
}
void dpp35_program_bias_and_scale_fcnv(
@@ -128,6 +136,10 @@ bool dpp35_construct(
(const struct dcn3_dpp_mask *)(tf_mask));
dpp->base.funcs = &dcn35_dpp_funcs;
+
+ // w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs
+ if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10)
+ dpp->dispclk_r_gate_disable = true;
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index f344478e9bd4..b099989d9364 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -443,7 +443,6 @@ struct gpio *dal_gpio_create_irq(
case GPIO_ID_GPIO_PAD:
break;
default:
- id = GPIO_ID_HPD;
ASSERT_CRITICAL(false);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4fbed0298adf..c31ec44ccd8c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -3142,9 +3142,10 @@ static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
}
bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+ struct set_backlight_level_params *params)
{
+ uint32_t backlight_pwm_u16_16 = params->backlight_pwm_u16_16;
+ uint32_t frame_ramp = params->frame_ramp;
struct dc_link *link = pipe_ctx->stream->link;
struct dc *dc = link->ctx->dc;
struct abm *abm = pipe_ctx->stream_res.abm;
@@ -3315,7 +3316,7 @@ void dce110_disable_link_output(struct dc_link *link,
* from enable/disable link output and only call edp panel control
* in enable_link_dp and disable_link_dp once.
*/
- if (dmcu != NULL && dmcu->funcs->lock_phy)
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index ed3cc3648e8e..06789ac3a224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -88,8 +88,7 @@ void dce110_edp_wait_for_hpd_ready(
bool power_up);
bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *params);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
void dce110_disable_link_output(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index a6a1db5ba8ba..681bb92c6069 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -3453,7 +3453,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2.x + r2.width;
r2_b = r2.y + r2.height;
- split_pipe = test_pipe;
/**
* There is another half plane on same layer because of
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index a80c08582932..05424a9af58b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1458,8 +1458,12 @@ void dcn20_pipe_control_lock(
} else {
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- else
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ else {
+ if (dc->hwseq->funcs.perform_3dlut_wa_unlock)
+ dc->hwseq->funcs.perform_3dlut_wa_unlock(pipe);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ }
}
}
@@ -1732,7 +1736,6 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
- plane_state->update_flags.bits.clip_size_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
@@ -1745,7 +1748,6 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
- (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
hubp->funcs->mem_program_viewport(
@@ -2056,22 +2058,15 @@ void dcn20_program_front_end_for_ctx(
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
- if (dc->hwss.blank_phantom) {
- int main_pipe_width = 0, main_pipe_height = 0;
- struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream);
-
- if (phantom_stream) {
- main_pipe_width = phantom_stream->dst.width;
- main_pipe_height = phantom_stream->dst.height;
- }
-
- dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ if (dc->hwseq->funcs.blank_pixel_data) {
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
}
tg->funcs->enable_crtc(tg);
}
@@ -2255,9 +2250,9 @@ void dcn20_post_unlock_program_front_end(
struct timing_generator *tg = pipe->stream_res.tg;
- if (tg->funcs->get_double_buffer_pending) {
+ if (tg->funcs->get_optc_double_buffer_pending) {
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && tg->funcs->get_double_buffer_pending(tg); j++)
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
udelay(polling_interval_us);
}
}
@@ -2771,7 +2766,6 @@ void dcn20_reset_back_end_for_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
- int i;
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -2838,19 +2832,16 @@ void dcn20_reset_back_end_for_pipe(
}
}
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
- break;
-
- if (i == dc->res_pool->pipe_count)
- return;
-
/*
* In case of a dangling plane, setting this to NULL unconditionally
* causes failures during reset hw ctx where, if stream is NULL,
* it is expected that the pipe_ctx pointers to pipes and plane are NULL.
*/
pipe_ctx->stream = NULL;
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 1ea95f8d4cbc..630e05f32c80 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -242,14 +242,15 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
}
bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+ struct set_backlight_level_params *params)
{
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
uint32_t otg_inst;
+ uint32_t backlight_pwm_u16_16 = params->backlight_pwm_u16_16;
+ uint32_t frame_ramp = params->frame_ramp;
if (!abm || !tg || !panel_cntl)
return false;
@@ -257,7 +258,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
otg_inst = tg->inst;
if (dc->dc->res_pool->dmcu) {
- dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
+ dce110_set_backlight_level(pipe_ctx, params);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h
index 9cee9bdb8de9..a7eaaa4596be 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h
@@ -50,8 +50,7 @@ void dcn21_PLAT_58856_wa(struct dc_state *context,
void dcn21_set_pipe(struct pipe_ctx *pipe_ctx);
void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *params);
bool dcn21_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index bded33575493..e89ebfda4873 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -245,6 +245,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = false;
int acquired_rmu = 0;
@@ -283,8 +284,14 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
stream->lut3d_func->state.bits.rmu_mux_num);
+ if (!result)
+ DC_LOG_ERROR("%s: program_3dlut failed\n", __func__);
+
result = mpc->funcs->program_shaper(mpc, shaper_lut,
stream->lut3d_func->state.bits.rmu_mux_num);
+ if (!result)
+ DC_LOG_ERROR("%s: program_shaper failed\n", __func__);
+
} else {
// loop through the available mux and release the requested mpcc_id
mpc->funcs->release_rmu(mpc, mpcc_id);
@@ -486,7 +493,6 @@ bool dcn30_mmhubbub_warmup(
}
/*following is the original: warmup each DWB's mcif buffer*/
for (i = 0; i < num_dwb; i++) {
- dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst];
/*warmup is for VM mode only*/
if (wb_info[i].mcif_buf_params.p_vmid == 0)
@@ -1185,3 +1191,30 @@ void dcn30_prepare_bandwidth(struct dc *dc,
if (!dc->clk_mgr->clks.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
}
+
+void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ bool pending_updates = false;
+ unsigned int i;
+
+ if (tg && tg->funcs->is_tg_enabled(tg)) {
+ // Poll for 100ms maximum
+ for (i = 0; i < 100000; i++) {
+ pending_updates = false;
+ if (tg->funcs->get_optc_double_buffer_pending)
+ pending_updates |= tg->funcs->get_optc_double_buffer_pending(tg);
+
+ if (tg->funcs->get_otg_double_buffer_pending)
+ pending_updates |= tg->funcs->get_otg_double_buffer_pending(tg);
+
+ if (tg->funcs->get_pipe_update_pending && pipe_ctx->plane_state)
+ pending_updates |= tg->funcs->get_pipe_update_pending(tg);
+
+ if (!pending_updates)
+ break;
+
+ udelay(1);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 6a153e7ce910..4b90b781c4f2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -96,4 +96,6 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 2a8dc40d2847..0e8d32e3dbae 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -108,7 +108,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
- .is_abm_supported = dcn21_is_abm_supported
+ .is_abm_supported = dcn21_is_abm_supported,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 93e49d87a67c..780ce4c064aa 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -107,6 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
index 0bca48ccbfa2..a6e0115a53ee 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
@@ -23,8 +23,8 @@
*
*/
-#ifndef __DC_DCN30_INIT_H__
-#define __DC_DCN30_INIT_H__
+#ifndef __DC_DCN301_INIT_H__
+#define __DC_DCN301_INIT_H__
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 3d4b31bd9946..bfc78a42bc2a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -517,6 +517,11 @@ static void dcn31_reset_back_end_for_pipe(
dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ if ((!pipe_ctx->stream->dpms_off || pipe_ctx->stream->link->link_status.link_active)
+ && pipe_ctx->stream->sink && pipe_ctx->stream->sink->edid_caps.panel_patch.blankstream_before_otg_off) {
+ dc->hwss.blank_stream(pipe_ctx);
+ }
+
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
pipe_ctx->stream_res.tg,
OPTC_DSC_DISABLED, 0, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 4e93eeedfc1b..9b88eb72086d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -355,6 +355,20 @@ void dcn314_calculate_pix_rate_divider(
}
}
+static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe)
+{
+ return pipe && pipe->stream
+ // Check dig's otg instance.
+ && pipe->stream_res.stream_enc
+ && pipe->stream_res.stream_enc->funcs->dig_source_otg
+ && pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc)
+ && pipe->stream->link && pipe->stream->link->link_enc
+ && pipe->stream->link->link_enc->funcs->is_dig_enabled
+ && pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)
+ && pipe->stream_res.stream_enc->funcs->is_fifo_enabled
+ && pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc);
+}
+
void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
@@ -371,7 +385,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) &&
+ !pipe->stream->apply_seamless_boot_optimization &&
+ !pipe->stream->apply_edp_fast_boot_optimization) {
+ if (dcn314_is_pipe_dig_fifo_on(pipe))
+ continue;
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
@@ -478,7 +496,7 @@ void dcn314_disable_link_output(struct dc_link *link,
* from enable/disable link output and only call edp panel control
* in enable_link_dp and disable_link_dp once.
*/
- if (dmcu != NULL && dmcu->funcs->lock_phy)
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 2e8c9f738259..d7f8b2dcaa6b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -439,6 +439,7 @@ bool dcn32_set_mpc_shaper_3dlut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = false;
@@ -458,13 +459,13 @@ bool dcn32_set_mpc_shaper_3dlut(
if (stream->lut3d_func &&
stream->lut3d_func->state.bits.initialized == 1) {
- result = mpc->funcs->program_3dlut(mpc,
- &stream->lut3d_func->lut_3d,
- mpcc_id);
+ result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, mpcc_id);
+ if (!result)
+ DC_LOG_ERROR("%s: program_3dlut failed\n", __func__);
- result = mpc->funcs->program_shaper(mpc,
- shaper_lut,
- mpcc_id);
+ result = mpc->funcs->program_shaper(mpc, shaper_lut, mpcc_id);
+ if (!result)
+ DC_LOG_ERROR("%s: program_shaper failed\n", __func__);
}
return result;
@@ -1398,10 +1399,10 @@ void dcn32_disable_link_output(struct dc_link *link,
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_backlight_control &&
+ link->dc->hwss.edp_power_control &&
!link->skip_implict_edp_power_control)
link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ else if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
@@ -1698,52 +1699,6 @@ void dcn32_init_blank(
hws->funcs.wait_for_blank_complete(opp);
}
-void dcn32_blank_phantom(struct dc *dc,
- struct timing_generator *tg,
- int width,
- int height)
-{
- struct dce_hwseq *hws = dc->hwseq;
- enum dc_color_space color_space;
- struct tg_color black_color = {0};
- struct output_pixel_processor *opp = NULL;
- uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
- uint32_t i;
-
- /* program opp dpg blank color */
- color_space = COLOR_SPACE_SRGB;
- color_space_to_black_color(dc, color_space, &black_color);
-
- otg_active_width = width;
- otg_active_height = height;
-
- /* get the OPTC source */
- tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
-
- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
- if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
- opp = dc->res_pool->opps[i];
- break;
- }
- }
-
- if (opp && opp->funcs->opp_set_disp_pattern_generator)
- opp->funcs->opp_set_disp_pattern_generator(
- opp,
- CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- COLOR_DEPTH_UNDEFINED,
- &black_color,
- otg_active_width,
- otg_active_height,
- 0);
-
- if (tg->funcs->is_tg_enabled(tg))
- hws->funcs.wait_for_blank_complete(opp);
-}
-
/* phantom stream id's can change often, but can be identical between contexts.
* This function checks for the condition the streams are identical to avoid
* redundant pipe transitions.
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index cac4a08b92a4..0303a5953673 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -119,11 +119,6 @@ void dcn32_init_blank(
struct dc *dc,
struct timing_generator *tg);
-void dcn32_blank_phantom(struct dc *dc,
- struct timing_generator *tg,
- int width,
- int height);
-
bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 3422b564ae98..dbcd2dfb19c1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -117,10 +117,10 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
- .blank_phantom = dcn32_blank_phantom,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 2bbf1fef94fd..55dc5799e725 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -123,7 +123,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
- .program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index d00822e8daa5..a93864b63d48 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -122,7 +122,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
- .program_outstanding_updates = dcn32_program_outstanding_updates,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 0b743669f23b..3c70f40bf047 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -506,7 +506,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
/* 1D LUT */
- if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) {
+ if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
@@ -521,7 +521,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
}
if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable, lut_bank_a, mpcc_id);
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
}
/* Shaper */
@@ -669,11 +669,17 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
- struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+ struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
+ struct mpc *mpc = dc->res_pool->mpc;
bool result;
const struct pwl_params *lut_params = NULL;
bool rval;
+ if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
+ dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
+ return true;
+ }
+
mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
// 1D LUT
@@ -844,6 +850,13 @@ enum dc_status dcn401_enable_stream_timing(
odm_slice_width, last_odm_slice_width);
}
+ /* set DTBCLK_P */
+ if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
+ if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
+ }
+ }
+
/* HW program guide assume display already disable
* by unplug sequence. OTG assume stop.
*/
@@ -1004,8 +1017,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
} else {
- /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */
- dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst);
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
@@ -1063,7 +1074,6 @@ static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2.x + r2.width;
r2_b = r2.y + r2.height;
- split_pipe = test_pipe;
/**
* There is another half plane on same layer because of
@@ -1097,6 +1107,58 @@ void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct
}
}
+static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
+{
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+ uint8_t i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ link_encoding,
+ &pipe_ctx->pll_settings);
+ break;
+ }
+ }
+}
+
+void dcn401_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control &&
+ !link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
+ disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ } else {
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control &&
+ !link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -1669,7 +1731,7 @@ void dcn401_hardware_release(struct dc *dc)
}
}
-void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
+void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
{
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
@@ -1695,6 +1757,9 @@ void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context,
hubbub->funcs->wait_for_det_update)
hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
}
+ } else {
+ if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
+ hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
}
}
}
@@ -1705,7 +1770,6 @@ void dcn401_interdependent_update_lock(struct dc *dc,
unsigned int i = 0;
struct pipe_ctx *pipe = NULL;
struct timing_generator *tg = NULL;
- bool pipe_unlocked[MAX_PIPES] = {0};
if (lock) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1719,48 +1783,91 @@ void dcn401_interdependent_update_lock(struct dc *dc,
dc->hwss.pipe_control_lock(dc, pipe, true);
}
} else {
- /* Unlock pipes based on the change in DET allocation instead of pipe index
- * Prevents over allocation of DET during unlock process
- * e.g. 2 pipe config with different streams with a max of 20 DET segments
- * Before: After:
- * - Pipe0: 10 DET segments - Pipe0: 12 DET segments
- * - Pipe1: 10 DET segments - Pipe1: 8 DET segments
- * If Pipe0 gets updated first, 22 DET segments will be allocated
- */
+ /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
- int current_pipe_idx = i;
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!tg->funcs->is_tg_enabled(tg) ||
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
- pipe_unlocked[i] = true;
continue;
}
- // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared
- struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream);
-
- if (old_otg_master)
- current_pipe_idx = old_otg_master->pipe_idx;
- if (resource_calculate_det_for_stream(context, pipe) <
- resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) {
+ if (dc->scratch.pipes_to_unlock_first[i]) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
dc->hwss.pipe_control_lock(dc, pipe, false);
- pipe_unlocked[i] = true;
- dcn401_wait_for_det_buffer_update(dc, context, pipe);
+ /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
+ dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
}
}
+ /* Unlocking the rest of the pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (pipe_unlocked[i])
+ if (dc->scratch.pipes_to_unlock_first[i])
continue;
+
pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ continue;
+ }
+
dc->hwss.pipe_control_lock(dc, pipe, false);
}
}
}
+void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
+{
+ /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
+ * HUBP will properly fetch 3DLUT contents after unlock.
+ *
+ * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
+ * of whether OTG lock is currently being held or not.
+ */
+ struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
+ struct pipe_ctx *odm_pipe, *mpc_pipe;
+ int i, wa_pipe_ct = 0;
+
+ for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
+ for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
+ if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
+ == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
+ && mpc_pipe->plane_state->mcm_shaper_3dlut_setting
+ == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
+ wa_pipes[wa_pipe_ct++] = mpc_pipe;
+ }
+ }
+ }
+
+ if (wa_pipe_ct > 0) {
+ if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
+ pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
+
+ for (i = 0; i < wa_pipe_ct; ++i) {
+ if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
+ wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
+ pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
+
+ for (i = 0; i < wa_pipe_ct; ++i) {
+ if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
+ wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
+ }
+
+ if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
+ pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
+ } else {
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ }
+}
+
void dcn401_program_outstanding_updates(struct dc *dc,
struct dc_state *context)
{
@@ -1770,3 +1877,125 @@ void dcn401_program_outstanding_updates(struct dc *dc,
if (hubbub->funcs->program_compbuf_segments)
hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
}
+
+void dcn401_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+ if (pipe_ctx->stream_res.stream_enc == NULL) {
+ pipe_ctx->stream = NULL;
+ return;
+ }
+
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
+ }
+ }
+
+ /* by upper caller loop, parent pipe: pipe0, will be reset last.
+ * back end share by all pipes and will be disable only when disable
+ * parent pipe.
+ */
+ if (pipe_ctx->top_pipe == NULL) {
+
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
+ pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
+ /* TODO - convert symclk_ref_cnts for otg to a bit map to solve
+ * the case where the same symclk is shared across multiple otg
+ * instances
+ */
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ link->phy_state.symclk_ref_cnts.otg = 0;
+ if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
+ link_hwss->disable_link_output(link,
+ &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
+
+ /* reset DTBCLK_P */
+ if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
+ dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
+ }
+
+/*
+ * In case of a dangling plane, setting this to NULL unconditionally
+ * causes failures during reset hw ctx where, if stream is NULL,
+ * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
+ */
+ pipe_ctx->stream = NULL;
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
+ DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+}
+
+void dcn401_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Reset Back End*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ if (hws->funcs.reset_back_end_for_pipe)
+ hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index a27e62081685..28a513dfc005 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -55,6 +55,10 @@ void dcn401_populate_mcm_luts(struct dc *dc,
bool lut_bank_a);
void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
+void dcn401_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx);
bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable);
@@ -81,7 +85,16 @@ void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
-void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
+void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context);
+void dcn401_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn401_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index a2ca07235c83..c73305e57d39 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -84,7 +84,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
- .disable_link_output = dcn32_disable_link_output,
+ .disable_link_output = dcn401_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.enable_phantom_streams = dcn32_enable_phantom_streams,
@@ -93,13 +93,13 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
- .blank_phantom = dcn32_blank_phantom,
.wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
@@ -111,7 +111,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
- .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -136,8 +136,9 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
- .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
+ .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
.populate_mcm_luts = NULL,
+ .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index ac9205625623..1df17c54f3a9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -174,6 +174,11 @@ union block_sequence_params {
struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
};
+struct set_backlight_level_params {
+ uint32_t backlight_pwm_u16_16;
+ uint32_t frame_ramp;
+};
+
enum block_sequence_func {
DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST = 0,
OPTC_PIPE_CONTROL_LOCK,
@@ -365,8 +370,7 @@ struct hw_sequencer_funcs {
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *params);
void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
@@ -462,6 +466,7 @@ struct hw_sequencer_funcs {
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
+ void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx);
};
void color_space_to_black_color(
@@ -504,6 +509,10 @@ void get_mclk_switch_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_cursor_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 0ac675456979..22a5d4a03c98 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -182,6 +182,7 @@ struct hwseq_private_funcs {
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
+ void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index bfb8b8502d20..8597e866bfe6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -215,6 +215,10 @@ struct resource_funcs {
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
+ /*
+ * Get indicator of power from a context that went through full validation
+ */
+ int (*get_power_profile)(const struct dc_state *context);
};
struct audio_support{
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 3d4c8bd42b49..b74e18cc1e66 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -342,7 +342,11 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
- bool (*get_double_buffer_pending)(struct timing_generator *tg);
+ bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
+ bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
+ bool (*get_pipe_update_pending)(struct timing_generator *tg);
+ void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
+ bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index d21ee9d12d26..e026c728042a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -48,6 +48,9 @@
#include "dm_helpers.h"
#include "clk_mgr.h"
+ // Offset DPCD 050Eh == 0x5A
+#define MST_HUB_ID_0x5A 0x5A
+
#define DC_LOGGER \
link->ctx->logger
#define DC_LOGGER_INIT(logger)
@@ -692,6 +695,15 @@ static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
link->wa_flags.dpia_mst_dsc_always_on = true;
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->type == dc_connection_mst_branch &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.branch_vendor_specific_data[2] == MST_HUB_ID_0x5A &&
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) {
+ link->wa_flags.dpia_mst_dsc_always_on = true;
+ }
}
static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index d78c8ec4de79..e05b8fddf2af 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1207,6 +1207,13 @@ static void get_active_converter_info(
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
+
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_VENDOR_SPECIFIC_START,
+ (uint8_t *)link->dpcd_caps.branch_vendor_specific_data,
+ sizeof(link->dpcd_caps.branch_vendor_specific_data));
+
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
union dp_dfp_cap_ext dfp_cap_ext;
@@ -1409,7 +1416,8 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
if (!link->ctx->dmub_srv ||
link->ep_type != DISPLAY_ENDPOINT_PHY ||
- link->link_enc->features.flags.bits.DP_IS_USB_C == 0)
+ link->link_enc->features.flags.bits.DP_IS_USB_C == 0 ||
+ link->link_enc->features.flags.bits.IS_DP2_CAPABLE == 0)
return false;
memset(&cmd, 0, sizeof(cmd));
@@ -1422,7 +1430,9 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cable_id->raw = cmd.cable_id.data.output_raw;
DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
}
- return cmd.cable_id.header.ret_status == 1;
+
+ ASSERT(cmd.cable_id.header.ret_status);
+ return true;
}
static void retrieve_cable_id(struct dc_link *link)
@@ -1626,6 +1636,8 @@ static bool retrieve_link_cap(struct dc_link *link)
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ dm_error("%s: Read tunneling device data failed.\n", __func__);
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
@@ -1842,6 +1854,9 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_FEC_CAPABILITY,
&link->dpcd_caps.fec_cap.raw,
sizeof(link->dpcd_caps.fec_cap.raw));
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_FEC_CAPABILITY) failed\n", __func__, __LINE__);
+
status = core_link_read_dpcd(
link,
DP_DSC_SUPPORT,
@@ -1864,6 +1879,9 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_DSC_BRANCH_OVERALL_THROUGHPUT_0) failed\n", __func__, __LINE__);
+
DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
@@ -2055,6 +2073,14 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE,
&link->dpcd_caps.pr_info.max_deviation_line,
sizeof(link->dpcd_caps.pr_info.max_deviation_line));
+
+ /*
+ * OLED Emission Rate info
+ */
+ if (link->dpcd_sink_ext_caps.bits.emission_output)
+ core_link_read_dpcd(link, DP_SINK_EMISSION_RATE,
+ (uint8_t *)&link->dpcd_caps.edp_oled_emission_rate,
+ sizeof(link->dpcd_caps.edp_oled_emission_rate));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
@@ -2103,6 +2129,8 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
/* get max link encoder capability */
if (link_enc)
link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
+ else
+ return max_link_cap;
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2136,10 +2164,15 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
*/
cable_max_link_rate = get_cable_max_link_rate(link);
- if (!link->dc->debug.ignore_cable_id &&
- cable_max_link_rate != LINK_RATE_UNKNOWN) {
- if (cable_max_link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate = cable_max_link_rate;
+ if (!link->dc->debug.ignore_cable_id) {
+ if (cable_max_link_rate != LINK_RATE_UNKNOWN)
+ // cable max link rate known
+ max_link_cap.link_rate = MIN(max_link_cap.link_rate, cable_max_link_rate);
+ else if (link_enc->funcs->is_in_alt_mode && link_enc->funcs->is_in_alt_mode(link_enc))
+ // cable max link rate ambiguous, DP alt mode, limit to HBR3
+ max_link_cap.link_rate = MIN(max_link_cap.link_rate, LINK_RATE_HIGH3);
+ //else {}
+ // cable max link rate ambiguous, DP, do nothing
if (!link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY &&
link->dpcd_caps.cable_id.bits.CABLE_TYPE >= 2)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 6af42ba9885c..0d123e647652 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -59,12 +59,18 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
dpcd_dp_tun_data,
sizeof(dpcd_dp_tun_data));
+ if (status != DC_OK)
+ goto err;
+
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
dpcd_topology_data,
sizeof(dpcd_topology_data));
+ if (status != DC_OK)
+ goto err;
+
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
@@ -75,6 +81,7 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
+err:
return status;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 27b881f947e8..754c895e1bfb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -272,7 +272,7 @@ void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
- fsleep(wait_in_micro_secs);
+ usleep_range_state(wait_in_micro_secs, wait_in_micro_secs, TASK_UNINTERRUPTIBLE);
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
@@ -1107,9 +1107,13 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_DOWNSPREAD_CTRL) failed\n", __func__, __LINE__);
status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LANE_COUNT_SET) failed\n", __func__, __LINE__);
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
lt_settings->link_settings.use_link_rate_set == true) {
@@ -1125,12 +1129,19 @@ enum dc_status dpcd_set_link_settings(
supported_link_rates, sizeof(supported_link_rates));
}
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__);
+
status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
&lt_settings->link_settings.link_rate_set, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_RATE_SET) failed\n", __func__, __LINE__);
} else {
rate = get_dpcd_link_rate(&lt_settings->link_settings);
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__);
}
if (rate) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index b5cf75975fff..ccf8096dde29 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -412,7 +412,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
/* 5. check CR done*/
if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_SUCCESS;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 3aa05a2be6c0..43a467f6ce7b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -157,31 +157,11 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
uint32_t backlight_millinits,
uint32_t transition_time_in_ms)
{
- struct dpcd_source_backlight_set dpcd_backlight_set;
- uint8_t backlight_control = isHDR ? 1 : 0;
-
if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- // OLEDs have no PWM, they can only use AUX
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- backlight_control = 1;
-
- *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
- *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
-
-
- if (!link->dpcd_caps.panel_luminance_control) {
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *)(&dpcd_backlight_set),
- sizeof(dpcd_backlight_set)) != DC_OK)
- return false;
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
- &backlight_control, 1) != DC_OK)
- return false;
- } else {
+ if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX) {
uint8_t backlight_enable = 0;
struct target_luminance_value *target_luminance = NULL;
@@ -205,6 +185,24 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
(uint8_t *)(target_luminance),
sizeof(struct target_luminance_value)) != DC_OK)
return false;
+ } else {
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+ uint8_t backlight_control = isHDR ? 1 : 0;
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
}
return true;
@@ -523,13 +521,13 @@ bool edp_set_backlight_level(const struct dc_link *link,
uint32_t frame_ramp)
{
struct dc *dc = link->ctx->dc;
-
DC_LOGGER_INIT(link->ctx->logger);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
if (dc_is_embedded_signal(link->connector_signal)) {
struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
+ struct set_backlight_level_params backlight_level_param = { 0 };
if (link->panel_cntl)
link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16;
@@ -544,10 +542,12 @@ bool edp_set_backlight_level(const struct dc_link *link,
return false;
}
+ backlight_level_param.backlight_pwm_u16_16 = backlight_pwm_u16_16;
+ backlight_level_param.frame_ramp = frame_ramp;
+
dc->hwss.set_backlight_level(
pipe_ctx,
- backlight_pwm_u16_16,
- frame_ramp);
+ &backlight_level_param);
}
return true;
}
@@ -940,8 +940,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
struct replay_context replay_context = { 0 };
unsigned int lineTimeInNs = 0;
-
- union replay_enable_and_configuration replay_config;
+ union replay_enable_and_configuration replay_config = { 0 };
union dpcd_alpm_configuration alpm_config;
@@ -1168,9 +1167,6 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (link_res->hpo_dp_link_enc) {
- if (link->wa_flags.disable_assr_for_uhbr)
- return;
-
link_enc_index = link_res->hpo_dp_link_enc->inst;
use_hpo_dp_link_enc = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index b7a57f98553d..40757f20d73f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -202,6 +202,7 @@ struct dcn_optc_registers {
uint32_t OPTC_CLOCK_CONTROL;
uint32_t OPTC_WIDTH_CONTROL2;
uint32_t OTG_PSTATE_REGISTER;
+ uint32_t OTG_PIPE_UPDATE_STATUS;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -566,6 +567,12 @@ struct dcn_optc_registers {
type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;\
type OPTC_DOUBLE_BUFFER_PENDING;\
+#define TG_REG_FIELD_LIST_DCN2_0(type) \
+ type OTG_FLIP_PENDING;\
+ type OTG_DC_REG_UPDATE_PENDING;\
+ type OTG_CURSOR_UPDATE_PENDING;\
+ type OTG_VUPDATE_KEEPOUT_STATUS;\
+
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
@@ -600,6 +607,7 @@ struct dcn_optc_registers {
struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
+ TG_REG_FIELD_LIST_DCN2_0(uint8_t)
TG_REG_FIELD_LIST_DCN3_2(uint8_t)
TG_REG_FIELD_LIST_DCN3_5(uint8_t)
TG_REG_FIELD_LIST_DCN401(uint8_t)
@@ -607,6 +615,7 @@ struct dcn_optc_shift {
struct dcn_optc_mask {
TG_REG_FIELD_LIST(uint32_t)
+ TG_REG_FIELD_LIST_DCN2_0(uint32_t)
TG_REG_FIELD_LIST_DCN3_2(uint32_t)
TG_REG_FIELD_LIST_DCN3_5(uint32_t)
TG_REG_FIELD_LIST_DCN401(uint32_t)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
index 364034b19028..928e110b95fb 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
@@ -43,7 +43,8 @@
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
SR(DWB_SOURCE_SELECT),\
SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \
- SRI(OTG_DRR_CONTROL, OTG, inst)
+ SRI(OTG_DRR_CONTROL, OTG, inst),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
@@ -53,6 +54,10 @@
SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index abcd03d78668..4c95c0958612 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
@@ -271,6 +271,48 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
optc1->opp_count = opp_cnt;
}
+/* OTG status register that indicates OPTC update is pending */
+bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t update_pending = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_DOUBLE_BUFFER_PENDING,
+ &update_pending);
+
+ return (update_pending == 1);
+}
+
+/* OTG status register that indicates OTG update is pending */
+bool optc3_get_otg_update_pending(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t update_pending = 0;
+
+ REG_GET(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_UPDATE_PENDING,
+ &update_pending);
+
+ return (update_pending == 1);
+}
+
+/* OTG status register that indicates surface update is pending */
+bool optc3_get_pipe_update_pending(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t flip_pending = 0;
+ uint32_t dc_update_pending = 0;
+
+ REG_GET_2(OTG_PIPE_UPDATE_STATUS,
+ OTG_FLIP_PENDING,
+ &flip_pending,
+ OTG_DC_REG_UPDATE_PENDING,
+ &dc_update_pending);
+
+ return (flip_pending == 1 || dc_update_pending == 1);
+}
+
/**
* optc3_set_timing_double_buffer() - DRR double buffering control
*
@@ -375,6 +417,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
};
void dcn30_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
index bda974d432ea..e2303f9eaf13 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
@@ -109,7 +109,8 @@
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
- SR(DWB_SOURCE_SELECT)
+ SR(DWB_SOURCE_SELECT),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define DCN30_VTOTAL_REGS_SF(mask_sh)
@@ -209,6 +210,7 @@
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
@@ -319,7 +321,11 @@
SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
void dcn30_timing_generator_init(struct optc *optc1);
@@ -356,4 +362,7 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
void optc3_tg_init(struct timing_generator *optc);
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
+bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc);
+bool optc3_get_otg_update_pending(struct timing_generator *optc);
+bool optc3_get_pipe_update_pending(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index 1a22ae89fb55..d7a45ef2d01b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
@@ -169,6 +169,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
};
void dcn301_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index 30b81a448ce2..fbbe86d00c2e 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
@@ -99,7 +99,8 @@
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
SRI(OTG_CRC_CNTL2, OTG, inst),\
SR(DWB_SOURCE_SELECT),\
- SRI(OTG_DRR_CONTROL, OTG, inst)
+ SRI(OTG_DRR_CONTROL, OTG, inst),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -254,7 +255,11 @@
SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
void dcn31_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
index 99c098e76116..0ff72b97b465 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
@@ -98,7 +98,8 @@
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
- SRI(OTG_DRR_CONTROL, OTG, inst)
+ SRI(OTG_DRR_CONTROL, OTG, inst),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -248,7 +249,11 @@
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
void dcn314_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 00094f0e8470..c217f653b3c8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -297,18 +297,6 @@ static void optc32_set_drr(
optc32_setup_manual_trigger(optc);
}
-bool optc32_get_double_buffer_pending(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t update_pending = 0;
-
- REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
- OPTC_DOUBLE_BUFFER_PENDING,
- &update_pending);
-
- return (update_pending == 1);
-}
-
static struct timing_generator_funcs dcn32_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -373,7 +361,9 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
- .get_double_buffer_pending = optc32_get_double_buffer_pending,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 665d7c52f67c..0b0964a9da74 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -177,7 +177,11 @@
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
void dcn32_timing_generator_init(struct optc *optc1);
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
@@ -185,6 +189,5 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
-bool optc32_get_double_buffer_pending(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index d077e2392379..be749ab41dce 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
@@ -67,7 +67,11 @@
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\
SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\
SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\
- SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh)
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
void dcn35_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index a5d6a7dca554..783ca9acc762 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -430,6 +430,35 @@ static void optc401_program_global_sync(
REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout);
}
+static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, optc1->vready_offset + 10,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, enable);
+
+ return;
+}
+
+static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+ uint32_t lock_status = 0;
+
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, locked,
+ 1, 150000);
+
+ REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &lock_status);
+
+ if (lock_status != locked)
+ return false;
+
+ return true;
+}
+
static struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -493,7 +522,11 @@ static struct timing_generator_funcs dcn401_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
- .get_double_buffer_pending = optc32_get_double_buffer_pending,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .set_vupdate_keepout = optc401_set_vupdate_keepout,
+ .wait_update_lock_status = optc401_wait_update_lock_status,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
index bb13a645802d..1be89571986f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
@@ -159,7 +159,11 @@
SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\
SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\
SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\
- SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh)
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
void dcn401_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 53a5f4cb648c..e698543ec937 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -623,7 +623,7 @@ static struct link_encoder *dce100_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 91da5cf85b69..035c6cfdaee5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -668,7 +668,7 @@ static struct link_encoder *dce110_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 162856c523e4..480a50967385 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -629,7 +629,7 @@ static struct link_encoder *dce112_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 621825a51f46..c63c59623433 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -706,7 +706,7 @@ static struct link_encoder *dce120_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index a73d3c6ef425..3d5113f010bb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -723,7 +723,7 @@ static struct link_encoder *dce80_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 563c5eec83ff..05d6d41ef9d3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -751,7 +751,7 @@ static struct link_encoder *dcn10_link_encoder_create(
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc10)
+ if (!enc10 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index eea2b3b307cd..288189913e1e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -920,7 +920,7 @@ struct link_encoder *dcn20_link_encoder_create(
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index fc54483b9104..15180ad71513 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -797,7 +797,7 @@ static struct link_encoder *dcn201_link_encoder_create(
kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
struct dcn10_link_encoder *enc10;
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
enc10 = &enc20->enc10;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 347e6aaea582..14b28841657d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -1298,7 +1298,7 @@ static struct link_encoder *dcn21_link_encoder_create(
kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc21)
+ if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 5040a4c6ed18..baa4e2647dad 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -927,7 +927,7 @@ static struct link_encoder *dcn30_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn30_link_encoder_construct(enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 7d04739c3ba1..d8a7c2cf05de 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -883,7 +883,7 @@ static struct link_encoder *dcn301_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn301_link_encoder_construct(enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 5791b5cc2875..40c20b04635a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -893,7 +893,7 @@ static struct link_encoder *dcn302_link_encoder_create(
{
struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 63f0f882c861..daf1b65fd088 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -839,7 +839,7 @@ static struct link_encoder *dcn303_link_encoder_create(
{
struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index ac8cb20e2e3b..36bb26182e11 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1093,7 +1093,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 169924d0a839..58a5fbcf22bf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1149,7 +1149,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 3f4b9dba4112..3acad708c31b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1091,7 +1091,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
@@ -1812,6 +1812,11 @@ static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
+static int dcn315_get_power_profile(const struct dc_state *context)
+{
+ return !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1840,6 +1845,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
+ .get_power_profile = dcn315_get_power_profile,
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 5fd52c5fcee4..ce56f5d162c0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -1085,7 +1085,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index a124ad9bd108..aaaa888d112d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1039,7 +1039,7 @@ static struct link_encoder *dcn32_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
@@ -1990,6 +1990,10 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
return 0;
}
+ if (dc->caps.max_cab_allocation_bytes == 0) {
+ return 0xffffffff;
+ }
+
/* add 2 lines for worst case alignment */
cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 7901792afb7b..86c6e5e8c42e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -1054,7 +1054,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
- SRI_ARR(OTG_DRR_CONTROL, OTG, inst)
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
/* HUBP */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 827a94f84f10..35acc13cb5a9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1035,7 +1035,7 @@ static struct link_encoder *dcn321_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 893a9d9ee870..795f2c71c70f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1074,7 +1074,7 @@ static struct link_encoder *dcn35_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 70abd32ce2ad..0b8dc2eff596 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1054,7 +1054,7 @@ static struct link_encoder *dcn35_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 9d56fbdcd06a..306b4117e219 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1032,7 +1032,7 @@ static struct link_encoder *dcn401_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
@@ -1579,7 +1579,8 @@ static void dcn401_destroy_resource_pool(struct resource_pool **pool)
}
static struct dc_cap_funcs cap_funcs = {
- .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
+ .get_subvp_en = dcn32_subvp_in_use,
};
static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -1688,6 +1689,45 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
}
}
+static int dcn401_get_power_profile(const struct dc_state *context)
+{
+ int uclk_mhz = context->bw_ctx.bw.dcn.clk.dramclk_khz / 1000;
+ int dpm_level = 0;
+
+ for (int i = 0; i < context->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
+ if (context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz == 0 ||
+ uclk_mhz < context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
+ break;
+ if (uclk_mhz > context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
+ dpm_level++;
+ }
+
+ return dpm_level;
+}
+
+static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+{
+ unsigned int num_available_chans = 1;
+
+ /* channels for MALL must be a power of 2 */
+ while (num_chans > 1) {
+ num_available_chans = (num_available_chans << 1);
+ num_chans = (num_chans >> 1);
+ }
+
+ /* cannot be odd */
+ num_available_chans &= ~1;
+
+ /* clamp to max available channels for MALL per ASIC */
+ if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
+ } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
+ }
+
+ return num_available_chans;
+}
+
static struct resource_funcs dcn401_res_pool_funcs = {
.destroy = dcn401_destroy_resource_pool,
.link_enc_create = dcn401_link_encoder_create,
@@ -1714,6 +1754,7 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.prepare_mcache_programming = dcn401_prepare_mcache_programming,
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_power_profile = dcn401_get_power_profile,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1795,14 +1836,12 @@ static bool dcn401_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- /* total size = mall per channel * num channels * 1024 * 1024 */
- dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
/* Calculate the available MALL space */
- dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
+ dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
dc, dc->ctx->dc_bios->vram_info.num_chans) *
dc->caps.mall_size_per_mem_channel * 1024 * 1024;
dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
@@ -1867,6 +1906,7 @@ static bool dcn401_resource_construct(
dc->config.prefer_easf = true;
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
+ dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2132,6 +2172,7 @@ static bool dcn401_resource_construct(
/* SPL */
spl_init_easf_filter_coeffs();
spl_init_blur_scale_coeffs();
+ dc->caps.scl_caps.sharpener_support = true;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 514d1ce20df9..bdafa7496cea 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -536,8 +536,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
- SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
- SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst)
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst), \
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
/* HUBBUB */
#define HUBBUB_REG_LIST_DCN4_01_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 014e8a296f0c..f043c7e32e16 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -848,13 +848,13 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
* surfaces based on policy setting
*/
if (!spl_is_yuv420(spl_in->basic_in.format) &&
- (spl_in->debug.sharpen_policy == SHARPEN_YUV))
+ (spl_in->sharpen_policy == SHARPEN_YUV))
return enable_isharp;
else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) &&
- (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
+ (spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
return enable_isharp;
else if (!spl_in->is_fullscreen &&
- spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL)
+ spl_in->sharpen_policy == SHARPEN_FULLSCREEN_ALL)
return enable_isharp;
/*
@@ -885,6 +885,18 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.viewport.width > max_downscale_src_width)
return false;
+ /* Disable adaptive scaler and sharpener when integer scaling is enabled */
+ if (spl_in->scaling_quality.integer_scaling) {
+ spl_scratch->scl_data.taps.h_taps = 1;
+ spl_scratch->scl_data.taps.v_taps = 1;
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ *enable_isharp = false;
+ return true;
+ }
+
/* Check if we are using EASF or not */
skip_easf = enable_easf(spl_in, spl_scratch);
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 2a74ff5fdfdb..fcb5d389592b 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -5,10 +5,8 @@
#ifndef __DC_SPL_TYPES_H__
#define __DC_SPL_TYPES_H__
+#include "spl_debug.h"
#include "spl_os_types.h" // swap
-#ifndef SPL_ASSERT
-#define SPL_ASSERT(_bool) ((void *)0)
-#endif
#include "spl_fixpt31_32.h" // fixed31_32 and related functions
#include "spl_custom_float.h" // custom float and related functions
@@ -510,7 +508,6 @@ struct spl_funcs {
struct spl_debug {
int visual_confirm_base_offset;
int visual_confirm_dpp_offset;
- enum sharpen_policy sharpen_policy;
enum scale_to_sharpness_policy scale_to_sharpness_policy;
};
@@ -532,6 +529,7 @@ struct spl_in {
int h_active;
int v_active;
int sdr_white_level_nits;
+ enum sharpen_policy sharpen_policy;
};
// end of SPL inputs
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
index 5696dafd0894..a6f6132df241 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
@@ -5,21 +5,26 @@
#ifndef SPL_DEBUG_H
#define SPL_DEBUG_H
-#ifdef SPL_ASSERT
-#undef SPL_ASSERT
-#endif
-#define SPL_ASSERT(b)
+#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
+#define SPL_ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ kgdb_breakpoint(); \
+ } \
+} while (0)
+#else
+#define SPL_ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ ; \
+ } \
+} while (0)
+#endif /* CONFIG_HAVE_KGDB || CONFIG_KGDB */
-#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0)
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+#define SPL_ASSERT(expr) SPL_ASSERT_CRITICAL(expr)
+#else
+#define SPL_ASSERT(expr) WARN_ON(!(expr))
+#endif /* CONFIG_DEBUG_KERNEL_DC */
-#ifdef SPL_DALMSG
-#undef SPL_DALMSG
-#endif
-#define SPL_DALMSG(b)
-
-#ifdef SPL_DAL_ASSERT_MSG
-#undef SPL_DAL_ASSERT_MSG
-#endif
-#define SPL_DAL_ASSERT_MSG(b, m)
+#define SPL_BREAK_TO_DEBUGGER() SPL_ASSERT(0)
#endif // SPL_DEBUG_H
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
index a95565df5487..5fd79d9c67e2 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
@@ -29,7 +29,7 @@ static inline unsigned long long complete_integer_division_u64(
{
unsigned long long result;
- ASSERT(divisor);
+ SPL_ASSERT(divisor);
result = spl_div64_u64_rem(dividend, divisor, remainder);
@@ -63,7 +63,7 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den
unsigned long long res_value = complete_integer_division_u64(
arg1_value, arg2_value, &remainder);
- ASSERT(res_value <= LONG_MAX);
+ SPL_ASSERT(res_value <= (unsigned long long)LONG_MAX);
/* determine fractional part */
{
@@ -85,7 +85,7 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den
{
unsigned long long summand = (remainder << 1) >= arg2_value;
- ASSERT(res_value <= LLONG_MAX - summand);
+ SPL_ASSERT(res_value <= (unsigned long long)LLONG_MAX - summand);
res_value += summand;
}
@@ -118,19 +118,19 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed
res.value = arg1_int * arg2_int;
- ASSERT(res.value <= (long long)LONG_MAX);
+ SPL_ASSERT(res.value <= (long long)LONG_MAX);
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg1_int * arg2_fra;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg2_int * arg1_fra;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -139,7 +139,7 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)spl_fixpt_half.value);
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -163,17 +163,17 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
res.value = arg_int * arg_int;
- ASSERT(res.value <= (long long)LONG_MAX);
+ SPL_ASSERT(res.value <= (long long)LONG_MAX);
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg_int * arg_fra;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -182,7 +182,7 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)spl_fixpt_half.value);
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -196,7 +196,7 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
* Good idea to use Newton's method
*/
- ASSERT(arg.value);
+ SPL_ASSERT(arg.value);
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
@@ -295,7 +295,7 @@ static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed3
n + 1);
/* TODO find correct res */
- ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
+ SPL_ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
do
res = spl_fixpt_add(
@@ -337,9 +337,9 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
spl_fixpt_ln2,
m));
- ASSERT(m != 0);
+ SPL_ASSERT(m != 0);
- ASSERT(spl_fixpt_lt(
+ SPL_ASSERT(spl_fixpt_lt(
spl_fixpt_abs(r),
spl_fixpt_one));
@@ -364,7 +364,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg)
struct spl_fixed31_32 error;
- ASSERT(arg.value > 0);
+ SPL_ASSERT(arg.value > 0);
/* TODO if arg is negative, return NaN */
/* TODO if arg is zero, return -INF */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
index 8a045e2f8699..ed2647f9a099 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
@@ -5,11 +5,8 @@
#ifndef __SPL_FIXED31_32_H__
#define __SPL_FIXED31_32_H__
-#include "os_types.h"
+#include "spl_debug.h"
#include "spl_os_types.h" // swap
-#ifndef ASSERT
-#define ASSERT(_bool) ((void *)0)
-#endif
#ifndef LLONG_MAX
#define LLONG_MAX 9223372036854775807ll
@@ -194,7 +191,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp(
*/
static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
{
- ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
arg.value = arg.value << shift;
@@ -231,7 +228,7 @@ static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, st
{
struct spl_fixed31_32 res;
- ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ SPL_ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
res.value = arg1.value + arg2.value;
@@ -256,7 +253,7 @@ static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, st
{
struct spl_fixed31_32 res;
- ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ SPL_ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
res.value = arg1.value - arg2.value;
@@ -448,7 +445,7 @@ static inline int spl_fixpt_round(struct spl_fixed31_32 arg)
const long long summand = spl_fixpt_half.value;
- ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+ SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
@@ -469,7 +466,7 @@ static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg)
const long long summand = spl_fixpt_one.value -
spl_fixpt_epsilon.value;
- ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+ SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
@@ -504,7 +501,7 @@ static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg
bool negative = arg.value < 0;
if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
- ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ SPL_ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
return arg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
index 709706ed4f2c..2e6ba71960ac 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
@@ -6,6 +6,8 @@
#ifndef _SPL_OS_TYPES_H_
#define _SPL_OS_TYPES_H_
+#include "spl_debug.h"
+
#include <linux/slab.h>
#include <linux/kgdb.h>
#include <linux/kref.h>
@@ -18,7 +20,6 @@
* general debug capabilities
*
*/
-#define SPL_BREAK_TO_DEBUGGER() ASSERT(0)
static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index fe5b6f7a3eb1..ff27229cc3a4 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -570,6 +570,14 @@ struct dmub_notification {
};
};
+/* enum dmub_ips_mode - IPS mode identifier */
+enum dmub_ips_mode {
+ DMUB_IPS_MODE_IPS1_MAX = 0,
+ DMUB_IPS_MODE_IPS2,
+ DMUB_IPS_MODE_IPS1_RCG,
+ DMUB_IPS_MODE_IPS1_ONO2_ON
+};
+
/**
* DMUB firmware version helper macro - useful for checking if the version
* of a firmware to know if feature or functionality is supported or present.
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index ebcf68bfae2b..6edd3d34c7b5 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -170,6 +170,11 @@
#pragma pack(push, 1)
#define ABM_NUM_OF_ACE_SEGMENTS 5
+/**
+ * Debug FW state offset
+ */
+#define DMUB_DEBUG_FW_STATE_OFFSET 0x300
+
union abm_flags {
struct {
/**
@@ -747,7 +752,8 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
- uint32_t reserved_bits : 28; /**< Reversed bits */
+ uint32_t allow_idle : 1; /**< 1 if driver is allowing idle */
+ uint32_t reserved_bits : 27; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -1051,11 +1057,106 @@ enum dmub_gpint_command {
DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119,
/**
+ * DESC: Set IPS residency measurement
+ * ARGS: 0 - Disable ips measurement
+ * 1 - Enable ips measurement
+ */
+ DMUB_GPINT__IPS_RESIDENCY = 121,
+
+ /**
* DESC: Enable measurements for various task duration
* ARGS: 0 - Disable measurement
* 1 - Enable measurement
*/
DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123,
+
+ /**
+ * DESC: Gets IPS residency in microseconds
+ * ARGS: 0 - Return IPS1 residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total residency in microseconds - lower 32 bits
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO = 124,
+
+ /**
+ * DESC: Gets IPS1 histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER = 125,
+
+ /**
+ * DESC: Gets IPS2 histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER = 126,
+
+ /**
+ * DESC: Gets IPS residency
+ * ARGS: 0 - Return IPS1 residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total residency in milli-percent.
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT = 127,
+
+ /**
+ * DESC: Gets IPS1_RCG histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER = 128,
+
+ /**
+ * DESC: Gets IPS1_ONO2_ON histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER = 129,
+
+ /**
+ * DESC: Gets IPS entry counter during residency measurement
+ * ARGS: 0 - Return IPS1 entry counts
+ * 1 - Return IPS2 entry counts
+ * 2 - Return IPS1_RCG entry counts
+ * 3 - Return IPS2_ONO2_ON entry counts
+ * RETURN: Entry counter for selected IPS mode
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER = 130,
+
+ /**
+ * DESC: Gets IPS inactive residency in microseconds
+ * ARGS: 0 - Return IPS1_MAX residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total inactive residency in microseconds - lower 32 bits
+ */
+ DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO = 131,
+
+ /**
+ * DESC: Gets IPS inactive residency in microseconds
+ * ARGS: 0 - Return IPS1_MAX residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total inactive residency in microseconds - upper 32 bits
+ */
+ DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI = 132,
+
+ /**
+ * DESC: Gets IPS residency in microseconds
+ * ARGS: 0 - Return IPS1 residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total residency in microseconds - upper 32 bits
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI = 133,
};
/**
@@ -1306,9 +1407,10 @@ enum dmub_out_cmd_type {
/* DMUB_CMD__DPIA command sub-types. */
enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0,
- DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1,
+ DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, // will be replaced by DPIA_SET_CONFIG_REQUEST
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3,
+ DMUB_CMD__DPIA_SET_CONFIG_REQUEST = 4,
};
/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */
@@ -2097,7 +2199,7 @@ struct dmub_rb_cmd_dig1_dpia_control {
};
/**
- * SET_CONFIG Command Payload
+ * SET_CONFIG Command Payload (deprecated)
*/
struct set_config_cmd_payload {
uint8_t msg_type; /* set config message type */
@@ -2105,7 +2207,7 @@ struct set_config_cmd_payload {
};
/**
- * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. (deprecated)
*/
struct dmub_cmd_set_config_control_data {
struct set_config_cmd_payload cmd_pkt;
@@ -2114,6 +2216,17 @@ struct dmub_cmd_set_config_control_data {
};
/**
+ * SET_CONFIG Request Command Payload
+ */
+struct set_config_request_cmd_payload {
+ uint8_t instance; /* DPIA instance */
+ uint8_t immed_status; /* Immediate status returned in case of error */
+ uint8_t msg_type; /* set config message type */
+ uint8_t reserved;
+ uint32_t msg_data; /* set config message data */
+};
+
+/**
* DMUB command structure for SET_CONFIG command.
*/
struct dmub_rb_cmd_set_config_access {
@@ -2122,6 +2235,14 @@ struct dmub_rb_cmd_set_config_access {
};
/**
+ * DMUB command structure for SET_CONFIG request command.
+ */
+struct dmub_rb_cmd_set_config_request {
+ struct dmub_cmd_header header; /* header */
+ struct set_config_request_cmd_payload payload; /* set config request payload */
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
*/
struct dmub_cmd_mst_alloc_slots_control_data {
@@ -4316,9 +4437,37 @@ struct dmub_cmd_abm_set_backlight_data {
uint8_t panel_mask;
/**
+ * Backlight control type.
+ * Value 0 is PWM backlight control.
+ * Value 1 is VAUX backlight control.
+ * Value 2 is AMD DPCD AUX backlight control.
+ */
+ uint8_t backlight_control_type;
+
+ /**
* Explicit padding to 4 byte boundary.
*/
- uint8_t pad[2];
+ uint8_t pad[1];
+
+ /**
+ * Minimum luminance in nits.
+ */
+ uint32_t min_luminance;
+
+ /**
+ * Maximum luminance in nits.
+ */
+ uint32_t max_luminance;
+
+ /**
+ * Minimum backlight in pwm.
+ */
+ uint32_t min_backlight_pwm;
+
+ /**
+ * Maximum backlight in pwm.
+ */
+ uint32_t max_backlight_pwm;
};
/**
@@ -5318,7 +5467,11 @@ union dmub_rb_cmd {
/**
* Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
*/
- struct dmub_rb_cmd_set_config_access set_config_access;
+ struct dmub_rb_cmd_set_config_access set_config_access; // (deprecated)
+ /**
+ * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ */
+ struct dmub_rb_cmd_set_config_request set_config_request;
/**
* Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
*/
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index aee5170f5fb2..de8f3cfed6c8 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -164,18 +164,19 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
-#define DP_SOURCE_SEQUENCE 0x30c
-#define DP_SOURCE_TABLE_REVISION 0x310
-#define DP_SOURCE_PAYLOAD_SIZE 0x311
-#define DP_SOURCE_SINK_CAP 0x317
-#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
-#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
-#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
-#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
-#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340
+#define DP_SOURCE_SEQUENCE 0x30C
+#define DP_SOURCE_TABLE_REVISION 0x310
+#define DP_SOURCE_PAYLOAD_SIZE 0x311
+#define DP_SOURCE_SINK_CAP 0x317
+#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
+#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
+#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
+#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
+#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340
#define DP_SINK_PR_REPLAY_STATUS 0x378
#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
+#define DP_SINK_EMISSION_RATE 0x37E
/* Remove once drm_dp_helper.h is updated upstream */
#ifndef DP_TOTAL_LTTPR_CNT
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index a48d564d1660..4d68c1c6e210 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -61,11 +61,13 @@
#define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__)
#define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
-#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__)
#define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__)
#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__)
+#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__)
+#define DC_LOG_REGISTER_READ(...) pr_debug("[REGISTER_READ]: "__VA_ARGS__)
+#define DC_LOG_REGISTER_WRITE(...) pr_debug("[REGISTER_WRITE]: "__VA_ARGS__)
struct dc_log_buffer_ctx {
char *buf;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bbd259cea4f4..fc4268729017 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -48,6 +48,7 @@
#define VSYNCS_BETWEEN_FLIP_THRESHOLD 2
#define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5
#define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500
+#define MICRO_HZ_TO_HZ(x) (x / 1000000)
struct core_freesync {
struct mod_freesync public;
@@ -132,9 +133,19 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
((unsigned int)(div64_u64((1000000000ULL * 1000000),
refresh_in_uhz)));
- v_total = div64_u64(div64_u64(((unsigned long long)(
- frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
- stream->timing.h_total) + 500000, 1000000);
+ if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) {
+ /* When the target refresh rate is the minimum panel refresh rate,
+ * round down the vtotal value to avoid stretching vblank over
+ * panel's vtotal boundary.
+ */
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total), 1000000);
+ } else {
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total) + 500000, 1000000);
+ }
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 3cd52e7a9c77..95838c7ab054 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 3f91926a50e9..7eefcb0f5070 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -28,6 +28,8 @@
#define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */
+struct amdgpu_ip_block;
+
/*
* Chip flags
@@ -337,6 +339,11 @@ enum DC_DEBUG_MASK {
* @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
*/
DC_FORCE_IPS_ENABLE = 0x4000,
+ /**
+ * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for
+ * eDP display from ACPI _DDC method.
+ */
+ DC_DISABLE_ACPI_EDID = 0x8000,
};
enum amd_dpm_forced_level;
@@ -377,30 +384,30 @@ enum amd_dpm_forced_level;
*/
struct amd_ip_funcs {
char *name;
- int (*early_init)(void *handle);
- int (*late_init)(void *handle);
- int (*sw_init)(void *handle);
- int (*sw_fini)(void *handle);
- int (*early_fini)(void *handle);
- int (*hw_init)(void *handle);
- int (*hw_fini)(void *handle);
- void (*late_fini)(void *handle);
- int (*prepare_suspend)(void *handle);
- int (*suspend)(void *handle);
- int (*resume)(void *handle);
+ int (*early_init)(struct amdgpu_ip_block *ip_block);
+ int (*late_init)(struct amdgpu_ip_block *ip_block);
+ int (*sw_init)(struct amdgpu_ip_block *ip_block);
+ int (*sw_fini)(struct amdgpu_ip_block *ip_block);
+ int (*early_fini)(struct amdgpu_ip_block *ip_block);
+ int (*hw_init)(struct amdgpu_ip_block *ip_block);
+ int (*hw_fini)(struct amdgpu_ip_block *ip_block);
+ void (*late_fini)(struct amdgpu_ip_block *ip_block);
+ int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
+ int (*suspend)(struct amdgpu_ip_block *ip_block);
+ int (*resume)(struct amdgpu_ip_block *ip_block);
bool (*is_idle)(void *handle);
- int (*wait_for_idle)(void *handle);
- bool (*check_soft_reset)(void *handle);
- int (*pre_soft_reset)(void *handle);
- int (*soft_reset)(void *handle);
- int (*post_soft_reset)(void *handle);
+ int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
+ bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
+ int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
+ int (*soft_reset)(struct amdgpu_ip_block *ip_block);
+ int (*post_soft_reset)(struct amdgpu_ip_block *ip_block);
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
void (*get_clockgating_state)(void *handle, u64 *flags);
- void (*dump_ip_state)(void *handle);
- void (*print_ip_state)(void *handle, struct drm_printer *p);
+ void (*dump_ip_state)(struct amdgpu_ip_block *ip_block);
+ void (*print_ip_state)(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
index 2c3ce243861a..380e44230bda 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
@@ -1232,6 +1232,29 @@
#define mmMC_VM_MX_L1_PERFCOUNTER_HI 0x059d
#define mmMC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 0
+// Stand Alone Walker Registers
+#define VMC_TAP_PDE_REQUEST_SNOOP_OFFSET 8
+#define VMC_TAP_PTE_REQUEST_SNOOP_OFFSET 11
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0606
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0607
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0608
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0609
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x060a
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x060b
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_CNTL 0x0604
+#define mmVM_L2_SAW_CONTEXT0_CNTL_BASE_IDX 0
+#define CONTEXT0_CNTL_ENABLE_OFFSET 0
+#define CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET 1
+#define mmVM_L2_SAW_CONTEXTS_DISABLE 0x060c
+#define mmVM_L2_SAW_CONTEXTS_DISABLE_BASE_IDX 0
+#define mmVM_L2_SAW_CNTL4 0x0603
+#define mmVM_L2_SAW_CNTL4_BASE_IDX 0
// addressBlock: mmhub_utcl2_atcl2dec
// base address: 0x69900
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 19a48d98830a..2fa71f68205e 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -336,7 +336,8 @@ enum pp_policy_soc_pstate {
#define MAX_CLKS 4
#define NUM_VCN 4
#define NUM_JPEG_ENG 32
-
+#define MAX_XCC 8
+#define NUM_XCP 8
struct seq_file;
enum amd_pp_clock_type;
struct amd_pp_simple_clock_info;
@@ -350,6 +351,15 @@ struct pp_smu_wm_range_sets;
struct pp_smu_nv_clock_table;
struct dpm_clocks;
+struct amdgpu_xcp_metrics {
+ /* Utilization Instantaneous (%) */
+ u32 gfx_busy_inst[MAX_XCC];
+ u16 jpeg_busy[NUM_JPEG_ENG];
+ u16 vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ u64 gfx_busy_acc[MAX_XCC];
+};
+
struct amd_pm_funcs {
/* export for dpm on ci and si */
int (*pre_set_power_state)(void *handle);
@@ -872,6 +882,97 @@ struct gpu_metrics_v1_5 {
uint16_t padding;
};
+struct gpu_metrics_v1_6 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Accumulation cycle counter */
+ uint32_t accumulation_counter;
+
+ /* Accumulated throttler residencies */
+ uint32_t prochot_residency_acc;
+ uint32_t ppt_residency_acc;
+ uint32_t socket_thm_residency_acc;
+ uint32_t vr_thm_residency_acc;
+ uint32_t hbm_thm_residency_acc;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ /* Number of current partition */
+ uint16_t num_partition;
+
+ /* XCP metrics stats */
+ struct amdgpu_xcp_metrics xcp_stats[NUM_XCP];
+
+ /* PCIE other end recovery counter */
+ uint32_t pcie_lc_perf_other_end_recovery;
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index d5d6ab484e5a..ea940773353c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -145,15 +145,12 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%s\n",
@@ -185,11 +182,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_power_state(adev, state);
@@ -273,15 +268,12 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%s\n",
@@ -336,11 +328,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
@@ -374,16 +364,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
@@ -412,17 +399,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
@@ -485,11 +469,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
if (ret)
@@ -544,15 +526,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (size <= 0)
@@ -580,11 +559,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
@@ -808,11 +785,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
type,
@@ -865,11 +840,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
@@ -888,7 +861,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -929,11 +901,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
@@ -960,17 +930,14 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1029,11 +996,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
@@ -1042,7 +1007,6 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1102,11 +1066,9 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
if (ret)
return ret;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
@@ -1283,15 +1245,12 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%d\n", value);
@@ -1317,11 +1276,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
@@ -1345,15 +1302,12 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%d\n", value);
@@ -1379,11 +1333,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
@@ -1427,17 +1379,14 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1492,11 +1441,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
@@ -1520,16 +1467,13 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ r = pm_runtime_get_if_active(adev->dev);
+ if (r <= 0)
+ return r ?: -EPERM;
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1639,15 +1583,12 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%llu %llu %i\n",
@@ -1770,11 +1711,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
@@ -1782,7 +1721,6 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1807,14 +1745,14 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
@@ -1849,15 +1787,12 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1890,11 +1825,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -1906,7 +1839,6 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -2008,11 +1940,9 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ r = pm_runtime_resume_and_get(ddev->dev);
+ if (r < 0)
return r;
- }
r = kstrtoint(buf, 10, &bias);
if (r)
@@ -2335,11 +2265,9 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
@@ -2772,15 +2700,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
@@ -2817,11 +2742,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ ret = pm_runtime_resume_and_get(adev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
@@ -2866,11 +2789,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (err)
return err;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
@@ -2907,15 +2828,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
- }
+ err = pm_runtime_get_if_active(adev->dev);
+ if (err <= 0)
+ return err ?: -EPERM;
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
@@ -2937,15 +2855,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
- }
+ err = pm_runtime_get_if_active(adev->dev);
+ if (err <= 0)
+ return err ?: -EPERM;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
@@ -3001,15 +2916,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
- }
+ err = pm_runtime_get_if_active(adev->dev);
+ if (err <= 0)
+ return err ?: -EPERM;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
@@ -3036,11 +2948,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (err)
return err;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
@@ -3076,15 +2986,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
@@ -3119,11 +3026,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
@@ -3248,11 +3153,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ r = pm_runtime_get_if_active(adev->dev);
+ if (r <= 0)
+ return r ?: -EPERM;
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
@@ -3262,7 +3165,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
else
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return size;
@@ -3339,11 +3241,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_set_power_limit(adev, value);
@@ -3787,17 +3687,14 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(adev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(adev->dev);
pm_runtime_put_autosuspend(adev->dev);
return size;
@@ -3879,23 +3776,23 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = pm_runtime_get_sync(adev->dev);
+ ret = pm_runtime_resume_and_get(adev->dev);
if (ret < 0)
- goto err_out0;
+ return ret;
ret = amdgpu_dpm_odn_edit_dpm_table(adev,
cmd_type,
parameter,
parameter_size);
if (ret)
- goto err_out1;
+ goto err_out;
if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
ret = amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL);
if (ret)
- goto err_out1;
+ goto err_out;
}
pm_runtime_mark_last_busy(adev->dev);
@@ -3903,9 +3800,8 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
return count;
-err_out1:
+err_out:
pm_runtime_mark_last_busy(adev->dev);
-err_out0:
pm_runtime_put_autosuspend(adev->dev);
return ret;
@@ -4758,11 +4654,9 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(dev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(dev->dev);
+ r = pm_runtime_resume_and_get(dev->dev);
+ if (r < 0)
return r;
- }
if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
r = amdgpu_debugfs_pm_info_pp(m, adev);
@@ -4777,7 +4671,6 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "\n");
out:
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index e8b6989a40f3..8908646ad620 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2954,9 +2954,9 @@ static int kv_dpm_get_temp(void *handle)
return actual_temp;
}
-static int kv_dpm_early_init(void *handle)
+static int kv_dpm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->powerplay.pp_funcs = &kv_dpm_funcs;
adev->powerplay.pp_handle = adev;
@@ -2965,10 +2965,10 @@ static int kv_dpm_early_init(void *handle)
return 0;
}
-static int kv_dpm_late_init(void *handle)
+static int kv_dpm_late_init(struct amdgpu_ip_block *ip_block)
{
/* powerdown unused blocks for now */
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->pm.dpm_enabled)
return 0;
@@ -2979,11 +2979,10 @@ static int kv_dpm_late_init(void *handle)
return 0;
}
-static int kv_dpm_sw_init(void *handle)
+static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
+ struct amdgpu_device *adev = ip_block->adev;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
&adev->pm.dpm.thermal.irq);
if (ret)
@@ -3024,9 +3023,9 @@ dpm_failed:
return ret;
}
-static int kv_dpm_sw_fini(void *handle)
+static int kv_dpm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
flush_work(&adev->pm.dpm.thermal.work);
@@ -3035,10 +3034,10 @@ static int kv_dpm_sw_fini(void *handle)
return 0;
}
-static int kv_dpm_hw_init(void *handle)
+static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!amdgpu_dpm)
return 0;
@@ -3053,9 +3052,9 @@ static int kv_dpm_hw_init(void *handle)
return ret;
}
-static int kv_dpm_hw_fini(void *handle)
+static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled)
kv_dpm_disable(adev);
@@ -3063,9 +3062,9 @@ static int kv_dpm_hw_fini(void *handle)
return 0;
}
-static int kv_dpm_suspend(void *handle)
+static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* disable dpm */
@@ -3076,10 +3075,10 @@ static int kv_dpm_suspend(void *handle)
return 0;
}
-static int kv_dpm_resume(void *handle)
+static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
@@ -3100,17 +3099,6 @@ static bool kv_dpm_is_idle(void *handle)
return true;
}
-static int kv_dpm_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-
-static int kv_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -3314,12 +3302,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.suspend = kv_dpm_suspend,
.resume = kv_dpm_resume,
.is_idle = kv_dpm_is_idle,
- .wait_for_idle = kv_dpm_wait_for_idle,
- .soft_reset = kv_dpm_soft_reset,
.set_clockgating_state = kv_dpm_set_clockgating_state,
.set_powergating_state = kv_dpm_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version kv_smu_ip_block = {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index a1baa13ab2c2..ee23a0f897c5 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -4755,13 +4755,15 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
u32 dram_timing;
u32 dram_timing2;
u32 burst_time;
+ int ret;
arb_regs->mc_arb_rfsh_rate =
(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
- amdgpu_atombios_set_engine_dram_timings(adev,
- pl->sclk,
- pl->mclk);
+ ret = amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk,
+ pl->mclk);
+ if (ret)
+ return ret;
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -7619,10 +7621,10 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_late_init(void *handle)
+static int si_dpm_late_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->pm.dpm_enabled)
return 0;
@@ -7716,10 +7718,10 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
return err;
}
-static int si_dpm_sw_init(void *handle)
+static int si_dpm_sw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
if (ret)
@@ -7763,9 +7765,9 @@ dpm_failed:
return ret;
}
-static int si_dpm_sw_fini(void *handle)
+static int si_dpm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
flush_work(&adev->pm.dpm.thermal.work);
@@ -7774,11 +7776,11 @@ static int si_dpm_sw_fini(void *handle)
return 0;
}
-static int si_dpm_hw_init(void *handle)
+static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!amdgpu_dpm)
return 0;
@@ -7793,9 +7795,9 @@ static int si_dpm_hw_init(void *handle)
return ret;
}
-static int si_dpm_hw_fini(void *handle)
+static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled)
si_dpm_disable(adev);
@@ -7803,9 +7805,9 @@ static int si_dpm_hw_fini(void *handle)
return 0;
}
-static int si_dpm_suspend(void *handle)
+static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* disable dpm */
@@ -7816,10 +7818,10 @@ static int si_dpm_suspend(void *handle)
return 0;
}
-static int si_dpm_resume(void *handle)
+static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
@@ -7841,17 +7843,12 @@ static bool si_dpm_is_idle(void *handle)
return true;
}
-static int si_dpm_wait_for_idle(void *handle)
+static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX */
return 0;
}
-static int si_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
static int si_dpm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -7928,10 +7925,10 @@ static void si_dpm_print_power_state(void *handle,
amdgpu_dpm_print_ps_status(adev, rps);
}
-static int si_dpm_early_init(void *handle)
+static int si_dpm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->powerplay.pp_funcs = &si_dpm_funcs;
adev->powerplay.pp_handle = adev;
@@ -8047,11 +8044,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = {
.resume = si_dpm_resume,
.is_idle = si_dpm_is_idle,
.wait_for_idle = si_dpm_wait_for_idle,
- .soft_reset = si_dpm_soft_reset,
.set_clockgating_state = si_dpm_set_clockgating_state,
.set_powergating_state = si_dpm_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version si_smu_ip_block =
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index a71c6117d7e5..26624a716fc6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -75,11 +75,10 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev)
hwmgr = NULL;
}
-static int pp_early_init(void *handle)
+static int pp_early_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = handle;
-
+ struct amdgpu_device *adev = ip_block->adev;
ret = amd_powerplay_create(adev);
if (ret != 0)
@@ -131,9 +130,9 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work)
orderly_poweroff(true);
}
-static int pp_sw_init(void *handle)
+static int pp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
@@ -148,9 +147,9 @@ static int pp_sw_init(void *handle)
return ret;
}
-static int pp_sw_fini(void *handle)
+static int pp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
hwmgr_sw_fini(hwmgr);
@@ -160,10 +159,10 @@ static int pp_sw_fini(void *handle)
return 0;
}
-static int pp_hw_init(void *handle)
+static int pp_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret = 0;
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
ret = hwmgr_hw_init(hwmgr);
@@ -174,10 +173,9 @@ static int pp_hw_init(void *handle)
return ret;
}
-static int pp_hw_fini(void *handle)
+static int pp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
@@ -217,9 +215,9 @@ static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
}
}
-static int pp_late_init(void *handle)
+static int pp_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (hwmgr && hwmgr->pm_en)
@@ -231,9 +229,9 @@ static int pp_late_init(void *handle)
return 0;
}
-static void pp_late_fini(void *handle)
+static void pp_late_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.smu_prv_buffer)
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
@@ -246,25 +244,15 @@ static bool pp_is_idle(void *handle)
return false;
}
-static int pp_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int pp_sw_reset(void *handle)
-{
- return 0;
-}
-
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
-static int pp_suspend(void *handle)
+static int pp_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
@@ -272,10 +260,9 @@ static int pp_suspend(void *handle)
return hwmgr_suspend(hwmgr);
}
-static int pp_resume(void *handle)
+static int pp_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
return hwmgr_resume(hwmgr);
}
@@ -298,12 +285,8 @@ static const struct amd_ip_funcs pp_ip_funcs = {
.suspend = pp_suspend,
.resume = pp_resume,
.is_idle = pp_is_idle,
- .wait_for_idle = pp_wait_for_idle,
- .soft_reset = pp_sw_reset,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version pp_smu_ip_block =
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index b56298d9da98..fe24219c3bf4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -28,7 +28,6 @@
#include "ppatomctrl.h"
#include "atombios.h"
#include "cgs_common.h"
-#include "ppevvmath.h"
#define MEM_ID_MASK 0xff000000
#define MEM_ID_SHIFT 24
@@ -677,433 +676,6 @@ bool atomctrl_get_pp_assign_pin(
return bRet;
}
-int atomctrl_calculate_voltage_evv_on_sclk(
- struct pp_hwmgr *hwmgr,
- uint8_t voltage_type,
- uint32_t sclk,
- uint16_t virtual_voltage_Id,
- uint16_t *voltage,
- uint16_t dpm_level,
- bool debug)
-{
- ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
- struct amdgpu_device *adev = hwmgr->adev;
- EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
- EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
- EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
- EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
- EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
- EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
- EFUSE_INPUT_PARAMETER sInput_FuseValues;
- READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
-
- uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
- fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
- fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
- fInt fLkg_FT, repeat;
- fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
- fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
- fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
- fInt fSclk_margin, fSclk, fEVV_V;
- fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
- uint32_t ul_FT_Lkg_V0NORM;
- fInt fLn_MaxDivMin, fMin, fAverage, fRange;
- fInt fRoots[2];
- fInt fStepSize = GetScaledFraction(625, 100000);
-
- int result;
-
- getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
- smu_atom_get_data_table(hwmgr->adev,
- GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
- NULL, NULL, NULL);
-
- if (!getASICProfilingInfo)
- return -1;
-
- if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
- (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
- getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
- return -1;
-
- /*-----------------------------------------------------------
- *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
- *-----------------------------------------------------------
- */
- fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
-
- switch (dpm_level) {
- case 1:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
- break;
- case 2:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
- break;
- case 3:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
- break;
- case 4:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
- break;
- case 5:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
- break;
- case 6:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
- break;
- case 7:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
- break;
- default:
- pr_err("DPM Level not supported\n");
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
- }
-
- /*-------------------------
- * DECODING FUSE VALUES
- * ------------------------
- */
- /*Decode RO_Fused*/
- sRO_fuse = getASICProfilingInfo->sRoFuse;
-
- sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- /* Finally, the actual fuse value */
- ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
- fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
- fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
-
- sCACm_fuse = getASICProfilingInfo->sCACm;
-
- sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
-
- fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
-
- sCACb_fuse = getASICProfilingInfo->sCACb;
-
- sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
-
- fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
-
- sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
-
- sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
-
- fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
- fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
-
- sKv_m_fuse = getASICProfilingInfo->sKv_m;
-
- sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
- if (result)
- return result;
-
- ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
- fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
- fRange = fMultiply(fRange, ConvertToFraction(-1));
-
- fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
- fAverage, fRange, sKv_m_fuse.ucEfuseLength);
-
- sKv_b_fuse = getASICProfilingInfo->sKv_b;
-
- sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
-
- fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
- fAverage, fRange, sKv_b_fuse.ucEfuseLength);
-
- /* Decoding the Leakage - No special struct container */
- /*
- * usLkgEuseIndex=56
- * ucLkgEfuseBitLSB=6
- * ucLkgEfuseLength=10
- * ulLkgEncodeLn_MaxDivMin=69077
- * ulLkgEncodeMax=1000000
- * ulLkgEncodeMin=1000
- * ulEfuseLogisticAlpha=13
- */
-
- sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
- sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
- sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
- fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
-
- fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
- fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
- fLkg_FT = fFT_Lkg_V0NORM;
-
- /*-------------------------------------------
- * PART 2 - Grabbing all required values
- *-------------------------------------------
- */
- fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
- fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
- fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
- fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
- fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
- fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
- fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
- fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
-
- fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
- fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
- fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
-
- fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
-
- fMargin_FMAX_mean = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
- fMargin_Plat_mean = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
- fMargin_FMAX_sigma = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
- fMargin_Plat_sigma = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
-
- fMargin_DC_sigma = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
- fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
-
- fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
- fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
- fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
- fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
- fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
-
- fSclk = GetScaledFraction(sclk, 100);
-
- fV_max = fDivide(GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
- fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
- fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
- fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
- fV_FT = fDivide(GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
- fV_min = fDivide(GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
-
- /*-----------------------
- * PART 3
- *-----------------------
- */
-
- fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
- fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
- fC_Term = fAdd(fMargin_RO_c,
- fAdd(fMultiply(fSM_A0, fLkg_FT),
- fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
- fAdd(fMultiply(fSM_A3, fSclk),
- fSubtract(fSM_A7, fRO_fused)))));
-
- fVDDC_base = fSubtract(fRO_fused,
- fSubtract(fMargin_RO_c,
- fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
- fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
-
- repeat = fSubtract(fVDDC_base,
- fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
-
- fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
- fGetSquare(repeat)),
- fAdd(fMultiply(fMargin_RO_b, repeat),
- fMargin_RO_c));
-
- fDC_SCLK = fSubtract(fRO_fused,
- fSubtract(fRO_DC_margin,
- fSubtract(fSM_A3,
- fMultiply(fSM_A2, repeat))));
- fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
-
- fSigma_DC = fSubtract(fSclk, fDC_SCLK);
-
- fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
- fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
- fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
- fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
-
- fSquared_Sigma_DC = fGetSquare(fSigma_DC);
- fSquared_Sigma_CR = fGetSquare(fSigma_CR);
- fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
-
- fSclk_margin = fAdd(fMicro_FMAX,
- fAdd(fMicro_CR,
- fAdd(fMargin_fixed,
- fSqrt(fAdd(fSquared_Sigma_FMAX,
- fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
- /*
- fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
- fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
- fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
- */
-
- fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
- fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
- fC_Term = fAdd(fRO_DC_margin,
- fAdd(fMultiply(fSM_A0, fLkg_FT),
- fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
- fAdd(fSclk, fSclk_margin)),
- fAdd(fMultiply(fSM_A3,
- fAdd(fSclk, fSclk_margin)),
- fSubtract(fSM_A7, fRO_fused)))));
-
- SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
-
- if (GreaterThan(fRoots[0], fRoots[1]))
- fEVV_V = fRoots[1];
- else
- fEVV_V = fRoots[0];
-
- if (GreaterThan(fV_min, fEVV_V))
- fEVV_V = fV_min;
- else if (GreaterThan(fEVV_V, fV_max))
- fEVV_V = fSubtract(fV_max, fStepSize);
-
- fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
-
- /*-----------------
- * PART 4
- *-----------------
- */
-
- fV_x = fV_min;
-
- while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
- fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
- fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
- fGetSquare(fV_x)), fDerateTDP);
-
- fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
- fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
- fT_prod), fKv_b_fused), fV_x)), fV_x)));
- fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
- fKt_Beta_fused, fT_prod)));
- fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
- fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
- fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
- fKt_Beta_fused, fT_FT)));
-
- fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
-
- fTDP_Current = fDivide(fTDP_Power, fV_x);
-
- fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
- ConvertToFraction(10)));
-
- fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
-
- if (GreaterThan(fV_max, fV_NL) &&
- (GreaterThan(fV_NL, fEVV_V) ||
- Equal(fV_NL, fEVV_V))) {
- fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
-
- *voltage = (uint16_t)fV_NL.partial.real;
- break;
- } else
- fV_x = fAdd(fV_x, fStepSize);
- }
-
- return result;
-}
-
/**
* atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table.
* @hwmgr: input: pointer to hwManager
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
index 1f987e846628..22b0ac12df97 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
@@ -316,8 +316,6 @@ extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
pp_atomctrl_clock_dividers_kong *dividers);
extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
uint16_t end_index, uint32_t *efuse);
-extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers);
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
deleted file mode 100644
index 409aeec6baa9..000000000000
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <asm/div64.h>
-
-enum ppevvmath_constants {
- /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
- SHIFT_AMOUNT = 16,
-
- /* Change this value to change the number of decimal places in the final output - 5 is a good default */
- PRECISION = 5,
-
- SHIFTED_2 = (2 << SHIFT_AMOUNT),
-
- /* 32767 - Might change in the future */
- MAX = (1 << (SHIFT_AMOUNT - 1)) - 1,
-};
-
-/* -------------------------------------------------------------------------------
- * NEW TYPE - fINT
- * -------------------------------------------------------------------------------
- * A variable of type fInt can be accessed in 3 ways using the dot (.) operator
- * fInt A;
- * A.full => The full number as it is. Generally not easy to read
- * A.partial.real => Only the integer portion
- * A.partial.decimal => Only the fractional portion
- */
-typedef union _fInt {
- int full;
- struct _partial {
- unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/
- int real: 32 - SHIFT_AMOUNT;
- } partial;
-} fInt;
-
-/* -------------------------------------------------------------------------------
- * Function Declarations
- * -------------------------------------------------------------------------------
- */
-static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
-static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
-static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
-static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
-
-static fInt fNegate(fInt); /* Returns -1 * input fInt value */
-static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
-static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
-static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
-static fInt fDivide (fInt A, fInt B); /* Returns A/B */
-static fInt fGetSquare(fInt); /* Returns the square of a fInt number */
-static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
-
-static int uAbs(int); /* Returns the Absolute value of the Int */
-static int uPow(int base, int exponent); /* Returns base^exponent an INT */
-
-static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
-static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
-static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
-
-static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
-static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
-
-/* Fuse decoding functions
- * -------------------------------------------------------------------------------------
- */
-static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
-static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
-static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
-
-/* Internal Support Functions - Use these ONLY for testing or adding to internal functions
- * -------------------------------------------------------------------------------------
- * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons.
- */
-static fInt Divide (int, int); /* Divide two INTs and return result as FINT */
-static fInt fNegate(fInt);
-
-static int uGetScaledDecimal (fInt); /* Internal function */
-static int GetReal (fInt A); /* Internal function */
-
-/* -------------------------------------------------------------------------------------
- * TROUBLESHOOTING INFORMATION
- * -------------------------------------------------------------------------------------
- * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767)
- * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767)
- * 3) fMultiply - OutputOutOfRangeException:
- * 4) fGetSquare - OutputOutOfRangeException:
- * 5) fDivide - DivideByZeroException
- * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number
- */
-
-/* -------------------------------------------------------------------------------------
- * START OF CODE
- * -------------------------------------------------------------------------------------
- */
-static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
-{
- uint32_t i;
- bool bNegated = false;
-
- fInt fPositiveOne = ConvertToFraction(1);
- fInt fZERO = ConvertToFraction(0);
-
- fInt lower_bound = Divide(78, 10000);
- fInt solution = fPositiveOne; /*Starting off with baseline of 1 */
- fInt error_term;
-
- static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
- static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
-
- if (GreaterThan(fZERO, exponent)) {
- exponent = fNegate(exponent);
- bNegated = true;
- }
-
- while (GreaterThan(exponent, lower_bound)) {
- for (i = 0; i < 11; i++) {
- if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) {
- exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000));
- solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000));
- }
- }
- }
-
- error_term = fAdd(fPositiveOne, exponent);
-
- solution = fMultiply(solution, error_term);
-
- if (bNegated)
- solution = fDivide(fPositiveOne, solution);
-
- return solution;
-}
-
-static fInt fNaturalLog(fInt value)
-{
- uint32_t i;
- fInt upper_bound = Divide(8, 1000);
- fInt fNegativeOne = ConvertToFraction(-1);
- fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */
- fInt error_term;
-
- static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
- static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
-
- while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) {
- for (i = 0; i < 10; i++) {
- if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) {
- value = fDivide(value, GetScaledFraction(k_array[i], 10000));
- solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000));
- }
- }
- }
-
- error_term = fAdd(fNegativeOne, value);
-
- return fAdd(solution, error_term);
-}
-
-static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
-{
- fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
- fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
-
- fInt f_decoded_value;
-
- f_decoded_value = fDivide(f_fuse_value, f_bit_max_value);
- f_decoded_value = fMultiply(f_decoded_value, f_range);
- f_decoded_value = fAdd(f_decoded_value, f_min);
-
- return f_decoded_value;
-}
-
-
-static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
-{
- fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
- fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
-
- fInt f_CONSTANT_NEG13 = ConvertToFraction(-13);
- fInt f_CONSTANT1 = ConvertToFraction(1);
-
- fInt f_decoded_value;
-
- f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1);
- f_decoded_value = fNaturalLog(f_decoded_value);
- f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13));
- f_decoded_value = fAdd(f_decoded_value, f_average);
-
- return f_decoded_value;
-}
-
-static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
-{
- fInt fLeakage;
- fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
-
- fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse));
- fLeakage = fDivide(fLeakage, f_bit_max_value);
- fLeakage = fExponential(fLeakage);
- fLeakage = fMultiply(fLeakage, f_min);
-
- return fLeakage;
-}
-
-static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
-{
- fInt temp;
-
- if (X <= MAX)
- temp.full = (X << SHIFT_AMOUNT);
- else
- temp.full = 0;
-
- return temp;
-}
-
-static fInt fNegate(fInt X)
-{
- fInt CONSTANT_NEGONE = ConvertToFraction(-1);
- return fMultiply(X, CONSTANT_NEGONE);
-}
-
-static fInt Convert_ULONG_ToFraction(uint32_t X)
-{
- fInt temp;
-
- if (X <= MAX)
- temp.full = (X << SHIFT_AMOUNT);
- else
- temp.full = 0;
-
- return temp;
-}
-
-static fInt GetScaledFraction(int X, int factor)
-{
- int times_shifted, factor_shifted;
- bool bNEGATED;
- fInt fValue;
-
- times_shifted = 0;
- factor_shifted = 0;
- bNEGATED = false;
-
- if (X < 0) {
- X = -1*X;
- bNEGATED = true;
- }
-
- if (factor < 0) {
- factor = -1*factor;
- bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */
- }
-
- if ((X > MAX) || factor > MAX) {
- if ((X/factor) <= MAX) {
- while (X > MAX) {
- X = X >> 1;
- times_shifted++;
- }
-
- while (factor > MAX) {
- factor = factor >> 1;
- factor_shifted++;
- }
- } else {
- fValue.full = 0;
- return fValue;
- }
- }
-
- if (factor == 1)
- return ConvertToFraction(X);
-
- fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor));
-
- fValue.full = fValue.full << times_shifted;
- fValue.full = fValue.full >> factor_shifted;
-
- return fValue;
-}
-
-/* Addition using two fInts */
-static fInt fAdd (fInt X, fInt Y)
-{
- fInt Sum;
-
- Sum.full = X.full + Y.full;
-
- return Sum;
-}
-
-/* Addition using two fInts */
-static fInt fSubtract (fInt X, fInt Y)
-{
- fInt Difference;
-
- Difference.full = X.full - Y.full;
-
- return Difference;
-}
-
-static bool Equal(fInt A, fInt B)
-{
- if (A.full == B.full)
- return true;
- else
- return false;
-}
-
-static bool GreaterThan(fInt A, fInt B)
-{
- if (A.full > B.full)
- return true;
- else
- return false;
-}
-
-static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
-{
- fInt Product;
- int64_t tempProduct;
-
- /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/
- /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION
- bool X_LessThanOne, Y_LessThanOne;
-
- X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0);
- Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0);
-
- if (X_LessThanOne && Y_LessThanOne) {
- Product.full = X.full * Y.full;
- return Product
- }*/
-
- tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */
- tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */
- Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */
-
- return Product;
-}
-
-static fInt fDivide (fInt X, fInt Y)
-{
- fInt fZERO, fQuotient;
- int64_t longlongX, longlongY;
-
- fZERO = ConvertToFraction(0);
-
- if (Equal(Y, fZERO))
- return fZERO;
-
- longlongX = (int64_t)X.full;
- longlongY = (int64_t)Y.full;
-
- longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */
-
- div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */
-
- fQuotient.full = (int)longlongX;
- return fQuotient;
-}
-
-static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
-{
- fInt fullNumber, scaledDecimal, scaledReal;
-
- scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */
-
- scaledDecimal.full = uGetScaledDecimal(A);
-
- fullNumber = fAdd(scaledDecimal, scaledReal);
-
- return fullNumber.full;
-}
-
-static fInt fGetSquare(fInt A)
-{
- return fMultiply(A, A);
-}
-
-/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
-static fInt fSqrt(fInt num)
-{
- fInt F_divide_Fprime, Fprime;
- fInt test;
- fInt twoShifted;
- int seed, counter, error;
- fInt x_new, x_old, C, y;
-
- fInt fZERO = ConvertToFraction(0);
-
- /* (0 > num) is the same as (num < 0), i.e., num is negative */
-
- if (GreaterThan(fZERO, num) || Equal(fZERO, num))
- return fZERO;
-
- C = num;
-
- if (num.partial.real > 3000)
- seed = 60;
- else if (num.partial.real > 1000)
- seed = 30;
- else if (num.partial.real > 100)
- seed = 10;
- else
- seed = 2;
-
- counter = 0;
-
- if (Equal(num, fZERO)) /*Square Root of Zero is zero */
- return fZERO;
-
- twoShifted = ConvertToFraction(2);
- x_new = ConvertToFraction(seed);
-
- do {
- counter++;
-
- x_old.full = x_new.full;
-
- test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */
- y = fSubtract(test, C); /*y = f(x) = x^2 - C; */
-
- Fprime = fMultiply(twoShifted, x_old);
- F_divide_Fprime = fDivide(y, Fprime);
-
- x_new = fSubtract(x_old, F_divide_Fprime);
-
- error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old);
-
- if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/
- return x_new;
-
- } while (uAbs(error) > 0);
-
- return x_new;
-}
-
-static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
-{
- fInt *pRoots = &Roots[0];
- fInt temp, root_first, root_second;
- fInt f_CONSTANT10, f_CONSTANT100;
-
- f_CONSTANT100 = ConvertToFraction(100);
- f_CONSTANT10 = ConvertToFraction(10);
-
- while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) {
- A = fDivide(A, f_CONSTANT10);
- B = fDivide(B, f_CONSTANT10);
- C = fDivide(C, f_CONSTANT10);
- }
-
- temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */
- temp = fMultiply(temp, C); /* root = 4*A*C */
- temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */
- temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */
-
- root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */
- root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */
-
- root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */
- root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */
-
- root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */
- root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */
-
- *(pRoots + 0) = root_first;
- *(pRoots + 1) = root_second;
-}
-
-/* -----------------------------------------------------------------------------
- * SUPPORT FUNCTIONS
- * -----------------------------------------------------------------------------
- */
-
-/* Conversion Functions */
-static int GetReal (fInt A)
-{
- return (A.full >> SHIFT_AMOUNT);
-}
-
-static fInt Divide (int X, int Y)
-{
- fInt A, B, Quotient;
-
- A.full = X << SHIFT_AMOUNT;
- B.full = Y << SHIFT_AMOUNT;
-
- Quotient = fDivide(A, B);
-
- return Quotient;
-}
-
-static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
-{
- int dec[PRECISION];
- int i, scaledDecimal = 0, tmp = A.partial.decimal;
-
- for (i = 0; i < PRECISION; i++) {
- dec[i] = tmp / (1 << SHIFT_AMOUNT);
- tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]);
- tmp *= 10;
- scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i);
- }
-
- return scaledDecimal;
-}
-
-static int uPow(int base, int power)
-{
- if (power == 0)
- return 1;
- else
- return (base)*uPow(base, power - 1);
-}
-
-static int uAbs(int X)
-{
- if (X < 0)
- return (X * -1);
- else
- return X;
-}
-
-static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
-{
- fInt solution;
-
- solution = fDivide(A, fStepSize);
- solution.partial.decimal = 0; /*All fractional digits changes to 0 */
-
- if (error_term)
- solution.partial.real += 1; /*Error term of 1 added */
-
- solution = fMultiply(solution, fStepSize);
- solution = fAdd(solution, fStepSize);
-
- return solution;
-}
-
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
index 79c817752a33..2b446f8866ba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
@@ -62,578 +62,6 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
return table_address;
}
-#if 0
-static void dump_pptable(PPTable_t *pptable)
-{
- int i;
-
- pr_info("Version = 0x%08x\n", pptable->Version);
-
- pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0);
- pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau);
- pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1);
- pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau);
- pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2);
- pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau);
- pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3);
- pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau);
- pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc);
- pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau);
- pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc);
- pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau);
- pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx);
- pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau);
-
- pr_info("TedgeLimit = %d\n", pptable->TedgeLimit);
- pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit);
- pr_info("ThbmLimit = %d\n", pptable->ThbmLimit);
- pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit);
- pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit);
- pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit);
- pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit);
- pr_info("TplxLimit = %d\n", pptable->TplxLimit);
- pr_info("FitLimit = %d\n", pptable->FitLimit);
-
- pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit);
- pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
-
- pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage);
- pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits);
- pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit);
-
- pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc);
- pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
-
- pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid);
- pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid);
- pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass);
- pr_info("Padding234 = 0x%02x\n", pptable->Padding234);
-
- pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx);
- pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc);
- pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx);
- pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc);
-
- pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx);
- pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc);
-
- pr_info("[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c);
-
- pr_info("[PPCLK_VCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK].padding,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c);
-
- pr_info("[PPCLK_DCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK].padding,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c);
-
- pr_info("[PPCLK_ECLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_ECLK].padding,
- pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c);
-
- pr_info("[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c);
-
- pr_info("[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c);
-
- pr_info("[PPCLK_DCEFCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].padding,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c);
-
- pr_info("[PPCLK_DISPCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DISPCLK].padding,
- pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c);
-
- pr_info("[PPCLK_PIXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_PIXCLK].padding,
- pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c);
-
- pr_info("[PPCLK_PHYCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_PHYCLK].padding,
- pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c);
-
- pr_info("[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c);
-
-
- pr_info("FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]);
-
- pr_info("FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]);
-
- pr_info("FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]);
-
- pr_info("FreqTableEclk\n");
- for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]);
-
- pr_info("FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]);
-
- pr_info("FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]);
-
- pr_info("FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]);
-
- pr_info("FreqTableDcefclk\n");
- for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]);
-
- pr_info("FreqTableDispclk\n");
- for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]);
-
- pr_info("FreqTablePixclk\n");
- for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]);
-
- pr_info("FreqTablePhyclk\n");
- for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]);
-
- pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]);
- pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]);
- pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]);
- pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]);
- pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]);
- pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]);
- pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]);
- pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]);
- pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]);
- pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]);
- pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]);
- pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks);
-
- pr_info("Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]);
-
- pr_info("Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]);
-
- pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate);
- pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq);
- pr_info("Padding789 = 0x%x\n", pptable->Padding789);
- pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n",
- pptable->CksVoltageOffset.a,
- pptable->CksVoltageOffset.b,
- pptable->CksVoltageOffset.c);
- pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]);
- pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]);
- pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]);
- pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]);
- pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq);
- pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- pr_info("Padding456 = 0x%x\n", pptable->Padding456);
-
- pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv);
- pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]);
- pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]);
- pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]);
-
- pr_info("PcieGenSpeed\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]);
-
- pr_info("PcieLaneCount\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]);
-
- pr_info("LclkFreq\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]);
-
- pr_info("EnableTdpm = %d\n", pptable->EnableTdpm);
- pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature);
- pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature);
- pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit);
-
- pr_info("FanStopTemp = %d\n", pptable->FanStopTemp);
- pr_info("FanStartTemp = %d\n", pptable->FanStartTemp);
-
- pr_info("FanGainEdge = %d\n", pptable->FanGainEdge);
- pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot);
- pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid);
- pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
- pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
- pr_info("FanGainPlx = %d\n", pptable->FanGainPlx);
- pr_info("FanGainHbm = %d\n", pptable->FanGainHbm);
- pr_info("FanPwmMin = %d\n", pptable->FanPwmMin);
- pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm);
- pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm);
- pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm);
- pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature);
- pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk);
- pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable);
- pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev);
-
- pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta);
- pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta);
- pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta);
- pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved);
-
- pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]);
- pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]);
-
- pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c);
- pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c);
- pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxCksOn.a,
- pptable->dBtcGbGfxCksOn.b,
- pptable->dBtcGbGfxCksOn.c);
- pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxCksOff.a,
- pptable->dBtcGbGfxCksOff.b,
- pptable->dBtcGbGfxCksOff.c);
- pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxAfll.a,
- pptable->dBtcGbGfxAfll.b,
- pptable->dBtcGbGfxAfll.c);
- pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
- pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- pr_info("XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]);
- pr_info("XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]);
- pr_info("XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]);
- pr_info("XgmiUclkFreq\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]);
- pr_info("XgmiSocclkFreq\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]);
- pr_info("XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]);
-
- pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
- pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc);
-
- pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm);
- pr_info("padding16_Fan = %d\n", pptable->padding16_Fan);
-
- pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
- pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
-
- pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- for (i = 0; i < 11; i++)
- pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
-
- for (i = 0; i < 3; i++)
- pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]);
-
- pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
- pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
-
- pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping);
- pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping);
-
- pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask);
- pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent);
- pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V);
-
- pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset);
- pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- pr_info("SocOffset = 0x%x\n", pptable->SocOffset);
- pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent);
- pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset);
- pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0);
-
- pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent);
- pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset);
- pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1);
-
- pr_info("AcDcGpio = %d\n", pptable->AcDcGpio);
- pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity);
- pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio);
- pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity);
-
- pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio);
- pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity);
- pr_info("Padding1 = 0x%x\n", pptable->Padding1);
- pr_info("Padding2 = 0x%x\n", pptable->Padding2);
-
- pr_info("LedPin0 = %d\n", pptable->LedPin0);
- pr_info("LedPin1 = %d\n", pptable->LedPin1);
- pr_info("LedPin2 = %d\n", pptable->LedPin2);
- pr_info("padding8_4 = 0x%x\n", pptable->padding8_4);
-
- pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled);
- pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent);
- pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq);
-
- pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled);
- pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent);
- pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq);
-
- pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled);
- pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent);
- pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq);
-
- pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled);
- pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
- pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
-
- for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
- pr_info("I2cControllers[%d]:\n", i);
- pr_info(" .Enabled = %d\n",
- pptable->I2cControllers[i].Enabled);
- pr_info(" .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- pr_info(" .ControllerPort = %d\n",
- pptable->I2cControllers[i].ControllerPort);
- pr_info(" .ControllerName = %d\n",
- pptable->I2cControllers[i].ControllerName);
- pr_info(" .ThermalThrottler = %d\n",
- pptable->I2cControllers[i].ThermalThrottler);
- pr_info(" .I2cProtocol = %d\n",
- pptable->I2cControllers[i].I2cProtocol);
- pr_info(" .I2cSpeed = %d\n",
- pptable->I2cControllers[i].I2cSpeed);
- }
-
- for (i = 0; i < 10; i++)
- pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]);
-
- for (i = 0; i < 8; i++)
- pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]);
-}
-#endif
-
static int check_powerplay_tables(
struct pp_hwmgr *hwmgr,
const ATOM_Vega20_POWERPLAYTABLE *powerplay_table)
@@ -652,8 +80,6 @@ static int check_powerplay_tables(
return -EINVAL;
}
- //dump_pptable(&powerplay_table->smcPPTable);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 9118fcddbf11..227bf0e84a13 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -60,7 +60,7 @@ struct vi_dpm_level {
struct vi_dpm_table {
uint32_t count;
- struct vi_dpm_level dpm_level[] __counted_by(count);
+ struct vi_dpm_level dpm_level[];
};
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
@@ -91,7 +91,7 @@ struct phm_set_power_state_input {
struct phm_clock_array {
uint32_t count;
- uint32_t values[] __counted_by(count);
+ uint32_t values[];
};
struct phm_clock_voltage_dependency_record {
@@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record {
struct phm_clock_voltage_dependency_table {
uint32_t count;
- struct phm_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_clock_voltage_dependency_record entries[];
};
struct phm_phase_shedding_limits_record {
@@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record {
struct phm_uvd_clock_voltage_dependency_table {
uint8_t count;
- struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvd_clock_voltage_dependency_record entries[];
};
struct phm_acp_clock_voltage_dependency_record {
@@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record {
struct phm_acp_clock_voltage_dependency_table {
uint32_t count;
- struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acp_clock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_record {
@@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record {
struct phm_phase_shedding_limits_table {
uint32_t count;
- struct phm_phase_shedding_limits_record entries[] __counted_by(count);
+ struct phm_phase_shedding_limits_record entries[];
};
struct phm_vceclock_voltage_dependency_table {
uint8_t count;
- struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vceclock_voltage_dependency_record entries[];
};
struct phm_uvdclock_voltage_dependency_table {
uint8_t count;
- struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvdclock_voltage_dependency_record entries[];
};
struct phm_samuclock_voltage_dependency_table {
uint8_t count;
- struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samuclock_voltage_dependency_record entries[];
};
struct phm_acpclock_voltage_dependency_table {
uint32_t count;
- struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acpclock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_table {
uint8_t count;
- struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vce_clock_voltage_dependency_record entries[];
};
@@ -393,7 +393,7 @@ union phm_cac_leakage_record {
struct phm_cac_leakage_table {
uint32_t count;
- union phm_cac_leakage_record entries[] __counted_by(count);
+ union phm_cac_leakage_record entries[];
};
struct phm_samu_clock_voltage_dependency_record {
@@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record {
struct phm_samu_clock_voltage_dependency_table {
uint8_t count;
- struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samu_clock_voltage_dependency_record entries[];
};
struct phm_cac_tdp_table {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index bb3bc68dfc39..67d5a8123416 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -140,7 +140,8 @@ int smu_set_soft_freq_range(struct smu_context *smu,
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
min,
- max);
+ max,
+ false);
return ret;
}
@@ -549,7 +550,8 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return false;
- if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
+ amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
return true;
return false;
@@ -741,9 +743,9 @@ static int smu_set_funcs(struct amdgpu_device *adev)
return 0;
}
-static int smu_early_init(void *handle)
+static int smu_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu;
int r;
@@ -825,9 +827,9 @@ static int smu_apply_default_config_table_settings(struct smu_context *smu)
return smu_set_config_table(smu, &adev->pm.config_table);
}
-static int smu_late_init(void *handle)
+static int smu_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
@@ -1234,9 +1236,17 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
-static int smu_sw_init(void *handle)
+static bool smu_is_workload_profile_available(struct smu_context *smu,
+ u32 profile)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (profile >= PP_SMC_POWER_PROFILE_COUNT)
+ return false;
+ return smu->workload_map && smu->workload_map[profile].valid_mapping;
+}
+
+static int smu_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
@@ -1264,7 +1274,12 @@ static int smu_sw_init(void *handle)
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+
+ if (smu->is_apu ||
+ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ else
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
@@ -1313,9 +1328,9 @@ static int smu_sw_init(void *handle)
return 0;
}
-static int smu_sw_fini(void *handle)
+static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
@@ -1786,10 +1801,10 @@ static int smu_start_smc_engine(struct smu_context *smu)
return ret;
}
-static int smu_hw_init(void *handle)
+static int smu_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
@@ -2008,9 +2023,9 @@ static int smu_reset_mp1_state(struct smu_context *smu)
return ret;
}
-static int smu_hw_fini(void *handle)
+static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
@@ -2041,9 +2056,9 @@ static int smu_hw_fini(void *handle)
return 0;
}
-static void smu_late_fini(void *handle)
+static void smu_late_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
kfree(smu);
@@ -2052,26 +2067,31 @@ static void smu_late_fini(void *handle)
static int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ip_block *ip_block;
int ret;
- ret = smu_hw_fini(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
+ if (!ip_block)
+ return -EINVAL;
+
+ ret = smu_hw_fini(ip_block);
if (ret)
return ret;
- ret = smu_hw_init(adev);
+ ret = smu_hw_init(ip_block);
if (ret)
return ret;
- ret = smu_late_init(adev);
+ ret = smu_late_init(ip_block);
if (ret)
return ret;
return 0;
}
-static int smu_suspend(void *handle)
+static int smu_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
uint64_t count;
@@ -2103,10 +2123,10 @@ static int smu_suspend(void *handle)
return 0;
}
-static int smu_resume(void *handle)
+static int smu_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
@@ -2226,7 +2246,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings,
- bool force_update)
+ bool init)
{
int ret = 0;
int index = 0;
@@ -2255,7 +2275,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
- if (force_update || smu_dpm_ctx->dpm_level != level) {
+ if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@@ -2272,7 +2292,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
- if (force_update || smu->power_profile_mode != workload[0])
+ if (init || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b44a185d07e8..8bb32b3f0d9c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -859,11 +859,6 @@ struct pptable_funcs {
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
/**
- * @dump_pptable: Print the power play table to the system log.
- */
- void (*dump_pptable)(struct smu_context *smu);
-
- /**
* @get_power_limit: Get the device's power limits.
*/
int (*get_power_limit)(struct smu_context *smu,
@@ -1260,7 +1255,8 @@ struct pptable_funcs {
* @set_soft_freq_limited_range: Set the soft frequency range of a clock
* domain in MHz.
*/
- int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max,
+ bool automatic);
/**
* @set_power_source: Notify the SMU of the current power source.
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
index ee457a6f0813..c2fd0a4a13e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
@@ -25,7 +25,7 @@
#define SMU14_DRIVER_IF_V14_0_H
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x18
+#define PPTABLE_VERSION 0x1B
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -145,7 +145,7 @@ typedef enum {
} FEATURE_BTC_e;
// Debug Overrides Bitmask
-#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
+#define DEBUG_OVERRIDE_NOT_USE 0x00000001
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
@@ -161,6 +161,7 @@ typedef enum {
#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
+#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -391,6 +392,21 @@ typedef struct {
EccInfo_t EccInfo[24];
} EccInfoTable_t;
+#define EPCS_HIGH_POWER 600
+#define EPCS_NORMAL_POWER 450
+#define EPCS_LOW_POWER 300
+#define EPCS_SHORTED_POWER 150
+#define EPCS_NO_BOOTUP 0
+
+typedef enum{
+ EPCS_SHORTED_LIMIT,
+ EPCS_LOW_POWER_LIMIT,
+ EPCS_NORMAL_POWER_LIMIT,
+ EPCS_HIGH_POWER_LIMIT,
+ EPCS_NOT_CONFIGURED,
+ EPCS_STATUS_COUNT,
+} EPCS_STATUS_e;
+
//D3HOT sequences
typedef enum {
BACO_SEQUENCE,
@@ -662,7 +678,7 @@ typedef enum {
} PP_GRTAVFS_FW_SEP_FUSE_e;
#define PP_NUM_RTAVFS_PWL_ZONES 5
-
+#define PP_NUM_PSM_DIDT_PWL_ZONES 3
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
@@ -746,10 +762,10 @@ typedef struct {
uint16_t Padding;
//Frequency changes
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
- uint16_t UclkFmin; // MHz
- uint16_t UclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding1;
+ uint16_t UclkFmin;
+ uint16_t UclkFmax;
uint16_t FclkFmin;
uint16_t FclkFmax;
@@ -770,19 +786,23 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t AdvancedOdModeEnabled;
- uint8_t Padding1[3];
+ uint8_t Padding2[3];
uint16_t GfxVoltageFullCtrlMode;
uint16_t SocVoltageFullCtrlMode;
uint16_t GfxclkFullCtrlMode;
uint16_t UclkFullCtrlMode;
uint16_t FclkFullCtrlMode;
- uint16_t Padding2;
+ uint16_t Padding3;
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- uint32_t Spare[10];
+ uint16_t GfxclkFmaxVmax;
+ uint8_t GfxclkFmaxVmaxTemperature;
+ uint8_t Padding4[1];
+
+ uint32_t Spare[9];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -802,8 +822,8 @@ typedef struct {
uint16_t VddSocVmax;
//gfxclk
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding;
//uclk
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -828,7 +848,7 @@ typedef struct {
uint8_t FanZeroRpmEnable;
//temperature
uint8_t MaxOpTemp;
- uint8_t Padding[2];
+ uint8_t Padding1[2];
//Full Ctrl
uint16_t GfxVoltageFullCtrlMode;
@@ -839,7 +859,7 @@ typedef struct {
//EDC
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- int16_t Padding1;
+ int16_t Padding2;
uint32_t Spare[5];
} OverDriveLimits_t;
@@ -987,8 +1007,9 @@ typedef struct {
uint16_t BaseClockDc;
uint16_t GameClockDc;
uint16_t BoostClockDc;
-
- uint32_t Reserved[4];
+ uint16_t MaxReportedClock;
+ uint16_t Padding;
+ uint32_t Reserved[3];
} DriverReportedClocks_t;
typedef struct {
@@ -1132,7 +1153,7 @@ typedef struct {
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t GfxclkAibFmax;
- uint16_t GfxclkFreqCap;
+ uint16_t GfxDpmPadding;
//GFX Idle Power Settings
uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
@@ -1172,8 +1193,7 @@ typedef struct {
uint32_t DvoFmaxLowScaler; //Unitless float
// GFX DCS
- uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
- uint16_t PaddingDcs;
+ uint32_t PaddingDcs;
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
@@ -1205,8 +1225,7 @@ typedef struct {
uint16_t DalDcModeMaxUclkFreq;
uint8_t PaddingsMem[2];
//FCLK Section
- uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
- uint16_t PaddingFclk;
+ uint32_t PaddingFclk;
// Link DPM Settings
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
@@ -1215,12 +1234,19 @@ typedef struct {
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
- uint8_t GfxAvfsPadding[3];
+ uint8_t GfxAvfsPadding[1];
+ uint16_t DroopGBStDev;
uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
//uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
- uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+
+ uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1];
+ uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t spare_HwRtAvfsFuses[19];
uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
@@ -1246,11 +1272,7 @@ typedef struct {
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
- //Unused: PMFW-9370
- uint32_t V2F_vmin_range_low;
- uint32_t V2F_vmin_range_high;
- uint32_t V2F_vmax_range_low;
- uint32_t V2F_vmax_range_high;
+ uint32_t PaddingV2F[4];
AvfsDcBtcParams_t DcBtcGfxParams;
QuadraticInt_t SSCurve_GFX;
@@ -1327,18 +1349,18 @@ typedef struct {
uint16_t PsmDidtReleaseTimer;
uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
// CAC EDC
- uint32_t Leakage_C0; // in IEEE float
- uint32_t Leakage_C1; // in IEEE float
- uint32_t Leakage_C2; // in IEEE float
- uint32_t Leakage_C3; // in IEEE float
- uint32_t Leakage_C4; // in IEEE float
- uint32_t Leakage_C5; // in IEEE float
- uint32_t GFX_CLK_SCALAR; // in IEEE float
- uint32_t GFX_CLK_INTERCEPT; // in IEEE float
- uint32_t GFX_CAC_M; // in IEEE float
- uint32_t GFX_CAC_B; // in IEEE float
- uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
- uint32_t DynToTotalCacScalar; // in IEEE
+ uint32_t CacEdcCacLeakageC0;
+ uint32_t CacEdcCacLeakageC1;
+ uint32_t CacEdcCacLeakageC2;
+ uint32_t CacEdcCacLeakageC3;
+ uint32_t CacEdcCacLeakageC4;
+ uint32_t CacEdcCacLeakageC5;
+ uint32_t CacEdcGfxClkScalar;
+ uint32_t CacEdcGfxClkIntercept;
+ uint32_t CacEdcCac_m;
+ uint32_t CacEdcCac_b;
+ uint32_t CacEdcCurrLimitGuardband;
+ uint32_t CacEdcDynToTotalCacRatio;
// GFX EDC XVMIN
uint32_t XVmin_Gfx_EdcThreshScalar;
uint32_t XVmin_Gfx_EdcEnableFreq;
@@ -1467,7 +1489,7 @@ typedef struct {
uint8_t VddqOffEnabled;
uint8_t PaddingUmcFlags[2];
- uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
+ uint32_t Paddign1;
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
@@ -1530,7 +1552,7 @@ typedef struct {
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
- uint16_t FuzzyFan_Reserved;
+ uint16_t FanPadding2;
uint16_t FwCtfLimit[TEMP_COUNT];
@@ -1547,9 +1569,10 @@ typedef struct {
uint16_t FanSpare[1];
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding;
- uint32_t FanAmbientPerfBoostThreshold;
uint32_t FanSpare2[12];
+ uint32_t ODFeatureCtrlMask;
+
uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
uint16_t TemperatureFwCtfLimit_Hynix;
@@ -1637,7 +1660,7 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
- uint16_t PCIeBusy ;
+ uint16_t AveragePCIeBusy ;
uint16_t dGPU_W_MAX ;
uint16_t padding ;
@@ -1665,12 +1688,12 @@ typedef struct {
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
- uint16_t Vcn0ActivityPercentage ;
+ uint16_t AverageVcn0ActivityPercentage;
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
uint16_t AverageSocketPower;
- uint16_t MovingAverageTotalBoardPower;
+ uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t AvgTemperatureFanIntake;
@@ -1684,7 +1707,8 @@ typedef struct {
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
- uint8_t padding1[3];
+ uint8_t VmaxThrottlingPercentage;
+ uint8_t padding1[2];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
@@ -1693,7 +1717,7 @@ typedef struct {
uint16_t ApuSTAPMSmartShiftLimit;
uint16_t ApuSTAPMLimit;
- uint16_t MovingAvgApuSocketPower;
+ uint16_t AvgApuSocketPower;
uint16_t AverageUclkActivity_MAX;
@@ -1823,6 +1847,17 @@ typedef struct {
#define TABLE_TRANSFER_FAILED 0xFF
#define TABLE_TRANSFER_PENDING 0xAB
+#define TABLE_PPT_FAILED 0x100
+#define TABLE_TDC_FAILED 0x200
+#define TABLE_TEMP_FAILED 0x400
+#define TABLE_FAN_TARGET_TEMP_FAILED 0x800
+#define TABLE_FAN_STOP_TEMP_FAILED 0x1000
+#define TABLE_FAN_START_TEMP_FAILED 0x2000
+#define TABLE_FAN_PWM_MIN_FAILED 0x4000
+#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000
+#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000
+#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000
+
// Table types
#define TABLE_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
@@ -1849,5 +1884,6 @@ typedef struct {
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 822c6425d90e..0f96b8c59a0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xD
+#define SMU_METRICS_TABLE_VERSION 0xE
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -231,6 +231,9 @@ typedef struct __attribute__((packed, aligned(4))) {
// PER XCD ACTIVITY
uint32_t GfxBusy[8];
uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index c2ab336bb530..ed8304d82831 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -255,7 +255,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
uint32_t *min, uint32_t *max);
int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
index 1ad2dff71090..0886d8cffbd0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
@@ -56,7 +56,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu);
int smu_v12_0_mode2_reset(struct smu_context *smu);
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e58220a7ee2f..044d6893b43e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -219,7 +219,7 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
uint32_t *min, uint32_t *max);
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 46b456590a08..07c220102c1d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -28,7 +28,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -186,7 +186,7 @@ int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
uint32_t *min, uint32_t *max);
int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index c0f6b59369b7..5ad09323a29d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1559,437 +1559,6 @@ static int arcturus_set_performance_level(struct smu_context *smu,
return smu_v11_0_set_performance_level(smu, level);
}
-static void arcturus_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- int i;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version);
-
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]);
- }
-
- dev_info(smu->adev->dev, "TdcLimitSoc = %d\n", pptable->TdcLimitSoc);
- dev_info(smu->adev->dev, "TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau);
- dev_info(smu->adev->dev, "TdcLimitGfx = %d\n", pptable->TdcLimitGfx);
- dev_info(smu->adev->dev, "TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau);
-
- dev_info(smu->adev->dev, "TedgeLimit = %d\n", pptable->TedgeLimit);
- dev_info(smu->adev->dev, "ThotspotLimit = %d\n", pptable->ThotspotLimit);
- dev_info(smu->adev->dev, "TmemLimit = %d\n", pptable->TmemLimit);
- dev_info(smu->adev->dev, "Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit);
- dev_info(smu->adev->dev, "Tvr_memLimit = %d\n", pptable->Tvr_memLimit);
- dev_info(smu->adev->dev, "Tvr_socLimit = %d\n", pptable->Tvr_socLimit);
- dev_info(smu->adev->dev, "FitLimit = %d\n", pptable->FitLimit);
-
- dev_info(smu->adev->dev, "PpmPowerLimit = %d\n", pptable->PpmPowerLimit);
- dev_info(smu->adev->dev, "PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
-
- dev_info(smu->adev->dev, "ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask);
-
- dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
- dev_info(smu->adev->dev, "UlvPadding = 0x%08x\n", pptable->UlvPadding);
-
- dev_info(smu->adev->dev, "UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass);
- dev_info(smu->adev->dev, "Padding234[0] = 0x%02x\n", pptable->Padding234[0]);
- dev_info(smu->adev->dev, "Padding234[1] = 0x%02x\n", pptable->Padding234[1]);
- dev_info(smu->adev->dev, "Padding234[2] = 0x%02x\n", pptable->Padding234[2]);
-
- dev_info(smu->adev->dev, "MinVoltageGfx = %d\n", pptable->MinVoltageGfx);
- dev_info(smu->adev->dev, "MinVoltageSoc = %d\n", pptable->MinVoltageSoc);
- dev_info(smu->adev->dev, "MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx);
- dev_info(smu->adev->dev, "MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc);
-
- dev_info(smu->adev->dev, "LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx);
- dev_info(smu->adev->dev, "LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc);
-
- dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK].padding,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK].padding,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
-
-
- dev_info(smu->adev->dev, "FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableGfx[i]);
-
- dev_info(smu->adev->dev, "FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableVclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableDclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableUclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableFclk[i]);
-
- dev_info(smu->adev->dev, "Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0clkFreq[i]);
-
- dev_info(smu->adev->dev, "Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]);
-
- dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- dev_info(smu->adev->dev, "GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate);
- dev_info(smu->adev->dev, "Padding567[0] = 0x%x\n", pptable->Padding567[0]);
- dev_info(smu->adev->dev, "Padding567[1] = 0x%x\n", pptable->Padding567[1]);
- dev_info(smu->adev->dev, "Padding567[2] = 0x%x\n", pptable->Padding567[2]);
- dev_info(smu->adev->dev, "Padding567[3] = 0x%x\n", pptable->Padding567[3]);
- dev_info(smu->adev->dev, "GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq);
- dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- dev_info(smu->adev->dev, "Padding456 = 0x%x\n", pptable->Padding456);
-
- dev_info(smu->adev->dev, "EnableTdpm = %d\n", pptable->EnableTdpm);
- dev_info(smu->adev->dev, "TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature);
- dev_info(smu->adev->dev, "TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature);
- dev_info(smu->adev->dev, "GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit);
-
- dev_info(smu->adev->dev, "FanStopTemp = %d\n", pptable->FanStopTemp);
- dev_info(smu->adev->dev, "FanStartTemp = %d\n", pptable->FanStartTemp);
-
- dev_info(smu->adev->dev, "FanGainEdge = %d\n", pptable->FanGainEdge);
- dev_info(smu->adev->dev, "FanGainHotspot = %d\n", pptable->FanGainHotspot);
- dev_info(smu->adev->dev, "FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
- dev_info(smu->adev->dev, "FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
- dev_info(smu->adev->dev, "FanGainVrMem = %d\n", pptable->FanGainVrMem);
- dev_info(smu->adev->dev, "FanGainHbm = %d\n", pptable->FanGainHbm);
-
- dev_info(smu->adev->dev, "FanPwmMin = %d\n", pptable->FanPwmMin);
- dev_info(smu->adev->dev, "FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm);
- dev_info(smu->adev->dev, "FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm);
- dev_info(smu->adev->dev, "FanMaximumRpm = %d\n", pptable->FanMaximumRpm);
- dev_info(smu->adev->dev, "FanTargetTemperature = %d\n", pptable->FanTargetTemperature);
- dev_info(smu->adev->dev, "FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk);
- dev_info(smu->adev->dev, "FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable);
- dev_info(smu->adev->dev, "FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev);
- dev_info(smu->adev->dev, "FanTempInputSelect = %d\n", pptable->FanTempInputSelect);
-
- dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved);
-
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]);
- dev_info(smu->adev->dev, "Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]);
-
- dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxPll.a,
- pptable->dBtcGbGfxPll.b,
- pptable->dBtcGbGfxPll.c);
- dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxAfll.a,
- pptable->dBtcGbGfxAfll.b,
- pptable->dBtcGbGfxAfll.c);
- dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
-
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "XgmiDpmPstates\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
-
- dev_info(smu->adev->dev, "VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin);
- dev_info(smu->adev->dev, "VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis);
- dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis);
-
- dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- dev_info(smu->adev->dev, "MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
- dev_info(smu->adev->dev, "PaddingUlv = %d\n", pptable->PaddingUlv);
-
- dev_info(smu->adev->dev, "TotalPowerConfig = %d\n", pptable->TotalPowerConfig);
- dev_info(smu->adev->dev, "TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1);
- dev_info(smu->adev->dev, "TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2);
-
- dev_info(smu->adev->dev, "PccThresholdLow = %d\n", pptable->PccThresholdLow);
- dev_info(smu->adev->dev, "PccThresholdHigh = %d\n", pptable->PccThresholdHigh);
-
- dev_info(smu->adev->dev, "Board Parameters:\n");
- dev_info(smu->adev->dev, "MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
- dev_info(smu->adev->dev, "MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
-
- dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- dev_info(smu->adev->dev, "VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping);
- dev_info(smu->adev->dev, "BoardVrMapping = 0x%x\n", pptable->BoardVrMapping);
-
- dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent);
-
- dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset);
- dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- dev_info(smu->adev->dev, "MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent);
- dev_info(smu->adev->dev, "MemOffset = 0x%x\n", pptable->MemOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem);
-
- dev_info(smu->adev->dev, "BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent);
- dev_info(smu->adev->dev, "BoardOffset = 0x%x\n", pptable->BoardOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput);
-
- dev_info(smu->adev->dev, "VR0HotGpio = %d\n", pptable->VR0HotGpio);
- dev_info(smu->adev->dev, "VR0HotPolarity = %d\n", pptable->VR0HotPolarity);
- dev_info(smu->adev->dev, "VR1HotGpio = %d\n", pptable->VR1HotGpio);
- dev_info(smu->adev->dev, "VR1HotPolarity = %d\n", pptable->VR1HotPolarity);
-
- dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled);
- dev_info(smu->adev->dev, "UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent);
- dev_info(smu->adev->dev, "UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled);
- dev_info(smu->adev->dev, "FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent);
- dev_info(smu->adev->dev, "FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
-
- for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
- dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i);
- dev_info(smu->adev->dev, " .Enabled = %d\n",
- pptable->I2cControllers[i].Enabled);
- dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- dev_info(smu->adev->dev, " .ControllerPort = %d\n",
- pptable->I2cControllers[i].ControllerPort);
- dev_info(smu->adev->dev, " .ControllerName = %d\n",
- pptable->I2cControllers[i].ControllerName);
- dev_info(smu->adev->dev, " .ThermalThrottler = %d\n",
- pptable->I2cControllers[i].ThermalThrotter);
- dev_info(smu->adev->dev, " .I2cProtocol = %d\n",
- pptable->I2cControllers[i].I2cProtocol);
- dev_info(smu->adev->dev, " .Speed = %d\n",
- pptable->I2cControllers[i].Speed);
- }
-
- dev_info(smu->adev->dev, "MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled);
- dev_info(smu->adev->dev, "DramBitWidth = %d\n", pptable->DramBitWidth);
-
- dev_info(smu->adev->dev, "TotalBoardPower = %d\n", pptable->TotalBoardPower);
-
- dev_info(smu->adev->dev, "XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]);
- dev_info(smu->adev->dev, "XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]);
- dev_info(smu->adev->dev, "XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]);
- dev_info(smu->adev->dev, "XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]);
-
-}
-
static bool arcturus_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
@@ -2365,8 +1934,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
.set_performance_level = arcturus_set_performance_level,
- /* debug (internal used) */
- .dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
.dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 16af1a329621..9fa305ba6422 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1689,7 +1689,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return 0;
- ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return 0;
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9c3c48297cba..77e58eb46328 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1469,7 +1469,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
if (ret)
goto forec_level_out;
- ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
goto forec_level_out;
break;
@@ -2493,1274 +2493,6 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
return val != 0x0;
}
-static void beige_goby_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_beige_goby_t *pptable = table_context->driver_pptable;
- int i;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]);
- }
-
- for (i = 0; i < TDC_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]);
- dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]);
- }
-
- for (i = 0; i < TEMP_COUNT; i++) {
- dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]);
- }
-
- dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit);
- dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig);
- dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]);
- dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]);
- dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]);
-
- dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit);
- for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) {
- dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]);
- dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]);
- }
- dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask);
-
- dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask);
-
- dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc);
- dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc);
-
- dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin);
-
- dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold);
-
- dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx);
- dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc);
- dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx);
- dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc);
-
- dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx);
- dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc);
-
- dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin);
- dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis);
- dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis);
-
- dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]);
-
- dev_info(smu->adev->dev, "FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]);
-
- dev_info(smu->adev->dev, "DcModeMaxFreq\n");
- dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]);
- dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]);
- dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]);
- dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]);
-
- dev_info(smu->adev->dev, "FreqTableUclkDiv\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]);
-
- dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq);
- dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding);
-
- dev_info(smu->adev->dev, "Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]);
-
- dev_info(smu->adev->dev, "Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]);
-
- dev_info(smu->adev->dev, "MemVddciVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]);
-
- dev_info(smu->adev->dev, "MemMvddVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]);
-
- dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry);
- dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit);
- dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding);
-
- dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask);
-
- dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]);
- dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow);
- dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]);
- dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt);
- dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt);
- dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt);
-
- dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage);
- dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime);
- dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime);
- dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum);
- dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis);
- dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout);
-
- dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]);
- dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]);
- dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]);
- dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]);
- dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]);
-
- dev_info(smu->adev->dev, "FlopsPerByteTable\n");
- for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]);
-
- dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv);
- dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]);
- dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]);
- dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]);
-
- dev_info(smu->adev->dev, "UclkDpmPstates\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]);
-
- dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq);
- dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding);
-
- dev_info(smu->adev->dev, "PcieGenSpeed\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]);
-
- dev_info(smu->adev->dev, "PcieLaneCount\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]);
-
- dev_info(smu->adev->dev, "LclkFreq\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]);
-
- dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp);
- dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp);
-
- dev_info(smu->adev->dev, "FanGain\n");
- for (i = 0; i < TEMP_COUNT; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]);
-
- dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin);
- dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm);
- dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm);
- dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm);
- dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm);
- dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature);
- dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk);
- dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16);
- dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect);
- dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding);
- dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable);
- dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev);
-
- dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved);
-
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect);
- dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs);
-
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c);
- dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxPll.a,
- pptable->dBtcGbGfxPll.b,
- pptable->dBtcGbGfxPll.c);
- dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxDfll.a,
- pptable->dBtcGbGfxDfll.b,
- pptable->dBtcGbGfxDfll.c);
- dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n");
- for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) {
- dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]);
- dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]);
- }
-
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "XgmiDpmPstates\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
-
- dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]);
- dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]);
- dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]);
- dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]);
- dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]);
- dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]);
- dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]);
- dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]);
-
- dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]);
- dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]);
- dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]);
- dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]);
- dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]);
- dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]);
-
- for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
- dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i);
- dev_info(smu->adev->dev, " .Enabled = 0x%x\n",
- pptable->I2cControllers[i].Enabled);
- dev_info(smu->adev->dev, " .Speed = 0x%x\n",
- pptable->I2cControllers[i].Speed);
- dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n",
- pptable->I2cControllers[i].ControllerPort);
- dev_info(smu->adev->dev, " .ControllerName = 0x%x\n",
- pptable->I2cControllers[i].ControllerName);
- dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n",
- pptable->I2cControllers[i].ThermalThrotter);
- dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n",
- pptable->I2cControllers[i].I2cProtocol);
- dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n",
- pptable->I2cControllers[i].PaddingConfig);
- }
-
- dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl);
- dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda);
- dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr);
- dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]);
-
- dev_info(smu->adev->dev, "Board Parameters:\n");
- dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping);
- dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping);
- dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask);
-
- dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset);
- dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent);
- dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0);
-
- dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent);
- dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1);
-
- dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio);
-
- dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio);
- dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity);
- dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio);
- dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity);
- dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio);
- dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity);
- dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio);
- dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity);
- dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0);
- dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1);
- dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2);
- dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask);
- dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie);
- dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError);
- dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]);
- dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]);
-
- dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding);
- dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled);
- dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent);
- dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq);
-
- dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled);
- dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth);
- dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]);
- dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]);
- dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]);
-
- dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower);
- dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding);
-
- dev_info(smu->adev->dev, "XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]);
- dev_info(smu->adev->dev, "XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]);
- dev_info(smu->adev->dev, "XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]);
- dev_info(smu->adev->dev, "XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]);
-
- dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled);
- dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled);
- dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]);
- dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]);
-
- dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]);
- dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]);
- dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]);
- dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]);
- dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]);
- dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]);
- dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]);
- dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]);
- dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]);
- dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]);
- dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]);
-
- dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]);
- dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]);
- dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]);
- dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]);
- dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]);
- dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]);
- dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]);
- dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]);
-}
-
-static void sienna_cichlid_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- int i;
-
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
- IP_VERSION(11, 0, 13)) {
- beige_goby_dump_pptable(smu);
- return;
- }
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]);
- }
-
- for (i = 0; i < TDC_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]);
- dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]);
- }
-
- for (i = 0; i < TEMP_COUNT; i++) {
- dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]);
- }
-
- dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit);
- dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig);
- dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]);
- dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]);
- dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]);
-
- dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit);
- for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) {
- dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]);
- dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]);
- }
- dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask);
-
- dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask);
-
- dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc);
- dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc);
-
- dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin);
- dev_info(smu->adev->dev, "PaddingLIVmin = 0x%x\n", pptable->PaddingLIVmin);
-
- dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold);
- dev_info(smu->adev->dev, "paddingRlcUlvParams[0] = 0x%x\n", pptable->paddingRlcUlvParams[0]);
- dev_info(smu->adev->dev, "paddingRlcUlvParams[1] = 0x%x\n", pptable->paddingRlcUlvParams[1]);
- dev_info(smu->adev->dev, "paddingRlcUlvParams[2] = 0x%x\n", pptable->paddingRlcUlvParams[2]);
-
- dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx);
- dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc);
- dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx);
- dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc);
-
- dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx);
- dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc);
-
- dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin);
- dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis);
- dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis);
-
- dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]);
-
- dev_info(smu->adev->dev, "FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]);
-
- dev_info(smu->adev->dev, "DcModeMaxFreq\n");
- dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]);
- dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]);
- dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]);
- dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]);
-
- dev_info(smu->adev->dev, "FreqTableUclkDiv\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]);
-
- dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq);
- dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding);
-
- dev_info(smu->adev->dev, "Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]);
-
- dev_info(smu->adev->dev, "Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]);
-
- dev_info(smu->adev->dev, "MemVddciVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]);
-
- dev_info(smu->adev->dev, "MemMvddVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]);
-
- dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry);
- dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit);
- dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding);
-
- dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask);
-
- dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]);
- dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow);
- dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]);
- dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt);
- dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt);
- dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt);
-
- dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage);
- dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime);
- dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime);
- dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum);
- dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis);
- dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout);
-
- dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]);
- dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]);
- dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]);
- dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]);
- dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]);
-
- dev_info(smu->adev->dev, "FlopsPerByteTable\n");
- for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]);
-
- dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv);
- dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]);
- dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]);
- dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]);
-
- dev_info(smu->adev->dev, "UclkDpmPstates\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]);
-
- dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq);
- dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding);
-
- dev_info(smu->adev->dev, "PcieGenSpeed\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]);
-
- dev_info(smu->adev->dev, "PcieLaneCount\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]);
-
- dev_info(smu->adev->dev, "LclkFreq\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]);
-
- dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp);
- dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp);
-
- dev_info(smu->adev->dev, "FanGain\n");
- for (i = 0; i < TEMP_COUNT; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]);
-
- dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin);
- dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm);
- dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm);
- dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm);
- dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm);
- dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature);
- dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk);
- dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16);
- dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect);
- dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding);
- dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable);
- dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev);
-
- dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved);
-
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect);
- dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs);
-
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c);
- dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxPll.a,
- pptable->dBtcGbGfxPll.b,
- pptable->dBtcGbGfxPll.c);
- dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxDfll.a,
- pptable->dBtcGbGfxDfll.b,
- pptable->dBtcGbGfxDfll.c);
- dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n");
- for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) {
- dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]);
- dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]);
- }
-
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "XgmiDpmPstates\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
-
- dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]);
- dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]);
- dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]);
- dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]);
- dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]);
- dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]);
- dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]);
- dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]);
-
- dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]);
- dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]);
- dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]);
- dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]);
- dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]);
- dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]);
-
- for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
- dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i);
- dev_info(smu->adev->dev, " .Enabled = 0x%x\n",
- pptable->I2cControllers[i].Enabled);
- dev_info(smu->adev->dev, " .Speed = 0x%x\n",
- pptable->I2cControllers[i].Speed);
- dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n",
- pptable->I2cControllers[i].ControllerPort);
- dev_info(smu->adev->dev, " .ControllerName = 0x%x\n",
- pptable->I2cControllers[i].ControllerName);
- dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n",
- pptable->I2cControllers[i].ThermalThrotter);
- dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n",
- pptable->I2cControllers[i].I2cProtocol);
- dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n",
- pptable->I2cControllers[i].PaddingConfig);
- }
-
- dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl);
- dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda);
- dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr);
- dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]);
-
- dev_info(smu->adev->dev, "Board Parameters:\n");
- dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping);
- dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping);
- dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask);
-
- dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset);
- dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent);
- dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0);
-
- dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent);
- dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1);
-
- dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio);
-
- dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio);
- dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity);
- dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio);
- dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity);
- dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio);
- dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity);
- dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio);
- dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity);
- dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0);
- dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1);
- dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2);
- dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask);
- dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie);
- dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError);
- dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]);
- dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]);
-
- dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding);
- dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled);
- dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent);
- dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq);
-
- dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled);
- dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth);
- dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]);
- dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]);
- dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]);
-
- dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower);
- dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding);
-
- dev_info(smu->adev->dev, "XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]);
- dev_info(smu->adev->dev, "XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]);
- dev_info(smu->adev->dev, "XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]);
- dev_info(smu->adev->dev, "XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]);
-
- dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled);
- dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled);
- dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]);
- dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]);
-
- dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]);
- dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]);
- dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]);
- dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]);
- dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]);
- dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]);
- dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]);
- dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]);
- dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]);
- dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]);
- dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]);
-
- dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]);
- dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]);
- dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]);
- dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]);
- dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]);
- dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]);
- dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]);
- dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]);
-}
-
static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
@@ -4397,7 +3129,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_disable_memory_clock_switch = sienna_cichlid_display_disable_memory_clock_switch,
.get_power_limit = sienna_cichlid_get_power_limit,
.update_pcie_parameters = sienna_cichlid_update_pcie_parameters,
- .dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
.fini_microcode = smu_v11_0_fini_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 16fcd9dcd202..480cf3cb204d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1616,7 +1616,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
break;
default:
if (!ras || !adev->ras_enabled ||
- adev->gmc.xgmi.pending_reset) {
+ (adev->init_lvl->level ==
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI)) {
if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
IP_VERSION(11, 0, 2)) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
@@ -1763,7 +1764,8 @@ failed:
int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
- uint32_t max)
+ uint32_t max,
+ bool automatic)
{
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1778,7 +1780,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0xffff);
+ else
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
@@ -1786,7 +1791,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
}
if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0);
+ else
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
@@ -1854,6 +1862,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
uint32_t mclk_min = 0, mclk_max = 0;
uint32_t socclk_min = 0, socclk_max = 0;
int ret = 0;
+ bool auto_level = false;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1873,6 +1882,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
mclk_max = mem_table->max;
socclk_min = soc_table->min;
socclk_max = soc_table->max;
+ auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
@@ -1905,13 +1915,15 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
+ auto_level = false;
}
if (sclk_min && sclk_max) {
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_GFXCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1920,7 +1932,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_MCLK,
mclk_min,
- mclk_max);
+ mclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1929,7 +1942,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ auto_level);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 22737b11b1bf..a333ab827f48 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1091,9 +1091,10 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
}
static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
int ret = 0;
@@ -1299,7 +1300,7 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -1335,7 +1336,7 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
@@ -1354,7 +1355,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false);
if (ret)
return ret;
@@ -1362,7 +1363,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false);
if (ret)
return ret;
@@ -1370,7 +1371,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false);
if (ret)
return ret;
@@ -1378,7 +1379,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index cc0504b063fa..0b210b1f2628 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -707,7 +707,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -740,7 +740,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
if (ret)
return ret;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -911,7 +911,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
@@ -919,7 +919,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -961,13 +961,13 @@ static int renior_set_dpm_profile_freq(struct smu_context *smu,
}
if (sclk)
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk, false);
if (socclk)
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk, false);
if (fclk)
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk, false);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index ed15f5a0fd11..3d3cd546f0ad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -211,7 +211,7 @@ int smu_v12_0_mode2_reset(struct smu_context *smu)
}
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max, bool automatic)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 2c35eb31475a..f6b029354327 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1297,9 +1297,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
}
static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
@@ -1328,7 +1329,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
return 0;
ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
- min, max);
+ min, max, false);
if (!ret) {
pstate_table->gfxclk_pstate.curr.min = min;
pstate_table->gfxclk_pstate.curr.max = max;
@@ -1348,7 +1349,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
/* Restore default min/max clocks and enable determinism */
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
if (!ret) {
usleep_range(500, 1000);
ret = smu_cmn_send_smc_msg_with_param(smu,
@@ -1422,7 +1423,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1441,7 +1442,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = pstate_table->gfxclk_pstate.custom.min;
max_clk = pstate_table->gfxclk_pstate.custom.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
}
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e17466cc1952..6cfd66363915 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1608,7 +1608,8 @@ failed:
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
- uint32_t max)
+ uint32_t max,
+ bool automatic)
{
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1623,7 +1624,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0xffff);
+ else
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
@@ -1631,7 +1635,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
}
if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0);
+ else
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
@@ -1708,6 +1715,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
uint32_t dclk_min = 0, dclk_max = 0;
uint32_t fclk_min = 0, fclk_max = 0;
int ret = 0, i;
+ bool auto_level = false;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1739,6 +1747,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
dclk_max = dclk_table->max;
fclk_min = fclk_table->min;
fclk_max = fclk_table->max;
+ auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
@@ -1780,13 +1789,15 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
vclk_min = vclk_max = 0;
dclk_min = dclk_max = 0;
fclk_min = fclk_max = 0;
+ auto_level = false;
}
if (sclk_min && sclk_max) {
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_GFXCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1798,7 +1809,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_MCLK,
mclk_min,
- mclk_max);
+ mclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1810,7 +1822,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1825,7 +1838,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
i ? SMU_VCLK1 : SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1840,7 +1854,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
i ? SMU_DCLK1 : SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1852,7 +1867,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ auto_level);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1d024b122b0c..3e2277abc754 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -736,19 +736,6 @@ static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static void smu_v13_0_0_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- SkuTable_t *skutable = &pptable->SkuTable;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]);
-}
-
static int smu_v13_0_0_system_features_control(struct smu_context *smu,
bool en)
{
@@ -1975,7 +1962,8 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
clk_type,
min_freq,
- max_freq);
+ max_freq,
+ false);
break;
case SMU_DCEFCLK:
case SMU_PCIE:
@@ -2555,18 +2543,16 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_POWERSAVING);
- if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
- }
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500)) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
}
ret = smu_cmn_send_smc_msg_with_param(smu,
@@ -3026,7 +3012,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.i2c_init = smu_v13_0_0_i2c_control_init,
.i2c_fini = smu_v13_0_0_i2c_control_fini,
.is_dpm_running = smu_v13_0_0_is_dpm_running,
- .dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 9c2c43bfed0b..a71b7c0803f1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -811,9 +811,10 @@ failed:
}
static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
enum smu_message_type msg_set_min, msg_set_max;
uint32_t min_clk = min;
@@ -950,7 +951,7 @@ static int smu_v13_0_5_force_clk_levels(struct smu_context *smu,
if (ret)
goto force_level_out;
- ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
goto force_level_out;
break;
@@ -1046,9 +1047,10 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
if (sclk_min && sclk_max) {
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
- SMU_SCLK,
- sclk_min,
- sclk_max);
+ SMU_SCLK,
+ sclk_min,
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1060,7 +1062,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1069,7 +1072,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 55ed6247eb61..7ebb675c5786 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -102,6 +102,24 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
+static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu)
+{
+ return (smu->adev->flags & AMD_IS_APU) &&
+ smu->smc_fw_version <= 0x4556900;
+}
+
+static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu)
+{
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ return smu->smc_fw_version >= 0x557600;
+ case IP_VERSION(13, 0, 14):
+ return smu->smc_fw_version >= 0x05550E00;
+ default:
+ return false;
+ }
+}
+
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -253,7 +271,7 @@ struct PPTable_t {
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
-#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+#define GET_METRIC_FIELD(field, flag) ((flag) ?\
(metrics_a->field) : (metrics_x->field))
struct smu_v13_0_6_dpm_map {
@@ -352,7 +370,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_6);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -583,7 +601,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- struct amdgpu_device *adev = smu->adev;
+ bool flag = smu_v13_0_6_is_unified_metrics(smu);
int ret, i, retry = 100;
uint32_t table_version;
@@ -595,7 +613,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (GET_METRIC_FIELD(AccumulationCounter))
+ if (GET_METRIC_FIELD(AccumulationCounter, flag))
break;
usleep_range(1000, 1100);
@@ -612,29 +630,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
table_version;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
+ pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0];
pptable->Init = true;
}
@@ -957,6 +975,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
+ bool flag = smu_v13_0_6_is_unified_metrics(smu);
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -971,50 +990,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_GFXCLK:
if (smu->smc_fw_version >= 0x552F00) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -1739,7 +1758,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
if (uclk_table->max != pstate_table->uclk_pstate.curr.max) {
/* Min UCLK is not expected to be changed */
ret = smu_v13_0_set_soft_freq_limited_range(
- smu, SMU_UCLK, 0, uclk_table->max);
+ smu, SMU_UCLK, 0, uclk_table->max, false);
if (ret)
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
@@ -1758,7 +1777,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max,
+ bool automatic)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
@@ -1806,7 +1826,7 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
- smu, SMU_UCLK, 0, max);
+ smu, SMU_UCLK, 0, max, false);
if (!ret)
pstate_table->uclk_pstate.curr.max = max;
}
@@ -1946,7 +1966,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
max_clk = dpm_context->dpm_tables.gfx_table.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_GFXCLK, min_clk, max_clk);
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
if (ret)
return ret;
@@ -1954,7 +1974,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = dpm_context->dpm_tables.uclk_table.min;
max_clk = dpm_context->dpm_tables.uclk_table.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_UCLK, min_clk, max_clk);
+ smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
pstate_table->uclk_pstate.custom.max = 0;
@@ -1978,7 +1998,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
max_clk = pstate_table->gfxclk_pstate.custom.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_GFXCLK, min_clk, max_clk);
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
if (ret)
return ret;
@@ -1989,7 +2009,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = pstate_table->uclk_pstate.curr.min;
max_clk = pstate_table->uclk_pstate.custom.max;
return smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_UCLK, min_clk, max_clk);
+ smu, SMU_UCLK, min_clk, max_clk, false);
}
break;
default:
@@ -2299,14 +2319,18 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
+ bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst;
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_5 *gpu_metrics =
- (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_6 *gpu_metrics =
+ (struct gpu_metrics_v1_6 *)smu_table->gpu_metrics_table;
+ bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i, j;
MetricsTableX_t *metrics_x;
MetricsTableA_t *metrics_a;
+ struct amdgpu_xcp *xcp;
u16 link_width_level;
+ u32 inst_mask;
metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
@@ -2317,53 +2341,60 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
metrics_a = (MetricsTableA_t *)metrics_x;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 6);
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+
+ /* Total accumulated cycle counter */
+ gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag);
- /* Throttle status is not reported through metrics now */
- gpu_metrics->throttle_status = 0;
+ /* Accumulated throttler residencies */
+ gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag);
+ gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag);
+ gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag);
+ gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag);
+ gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag);
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
/*Check smu version, PCIE link speed and width will be reported from pmfw metric
@@ -2399,41 +2430,77 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
metrics_x->PCIeNAKSentCountAcc;
gpu_metrics->pcie_nak_rcvd_count_acc =
metrics_x->PCIeNAKReceivedCountAcc;
+ if (smu_v13_0_6_is_other_end_count_available(smu))
+ gpu_metrics->pcie_lc_perf_other_end_recovery =
+ metrics_x->PCIeOtherEndRecoveryAcc;
+
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]);
gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
- }
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]);
+ }
+
+ gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
+
+ apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00);
+ smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) &&
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
+ == IP_VERSION(13, 0, 6)) &&
+ (smu->smc_fw_version >= 0x556F00);
+ smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) &&
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
+ == IP_VERSION(13, 0, 14)) &&
+ (smu->smc_fw_version >= 0x05550B00);
+
+ per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst;
+
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ gpu_metrics->xcp_stats[i].jpeg_busy
+ [(idx * adev->jpeg.num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag)
+ [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ }
+ gpu_metrics->xcp_stats[i].vcn_busy[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]);
+ idx++;
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- inst = GET_INST(JPEG, i);
- for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
- SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
- [(inst * adev->jpeg.num_jpeg_rings) + j]);
}
- }
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- inst = GET_INST(VCN, i);
- gpu_metrics->vcn_activity[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
+ if (per_inst) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
+ SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
+ gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
+ SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+ idx++;
+ }
+ }
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag));
- gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag);
*table = (void *)gpu_metrics;
kfree(metrics_x);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index b891a5e0a396..23f13388455f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -734,19 +734,6 @@ static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static void smu_v13_0_7_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- SkuTable_t *skutable = &pptable->SkuTable;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]);
-}
-
static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics)
{
uint32_t throttler_status = 0;
@@ -1964,7 +1951,8 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
clk_type,
min_freq,
- max_freq);
+ max_freq,
+ false);
break;
case SMU_DCEFCLK:
case SMU_PCIE:
@@ -2605,7 +2593,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
.is_dpm_running = smu_v13_0_7_is_dpm_running,
- .dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 260c339f89c5..71d58c8c8cc0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -945,9 +945,10 @@ failed:
}
static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
enum smu_message_type msg_set_min, msg_set_max;
uint32_t min_clk = min;
@@ -1134,7 +1135,7 @@ static int yellow_carp_force_clk_levels(struct smu_context *smu,
if (ret)
goto force_level_out;
- ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
goto force_level_out;
break;
@@ -1254,9 +1255,10 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
if (sclk_min && sclk_max) {
ret = yellow_carp_set_soft_freq_limited_range(smu,
- SMU_SCLK,
- sclk_min,
- sclk_max);
+ SMU_SCLK,
+ sclk_min,
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1266,18 +1268,20 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
if (fclk_min && fclk_max) {
ret = yellow_carp_set_soft_freq_limited_range(smu,
- SMU_FCLK,
- fclk_min,
- fclk_max);
+ SMU_FCLK,
+ fclk_min,
+ fclk_max,
+ false);
if (ret)
return ret;
}
if (socclk_min && socclk_max) {
ret = yellow_carp_set_soft_freq_limited_range(smu,
- SMU_SOCCLK,
- socclk_min,
- socclk_max);
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max,
+ false);
if (ret)
return ret;
}
@@ -1286,7 +1290,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
ret = yellow_carp_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1295,7 +1300,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
ret = yellow_carp_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 865e916fc425..f7745eaf118e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -1102,7 +1102,8 @@ failed:
int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
- uint32_t max)
+ uint32_t max,
+ bool automatic)
{
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1117,7 +1118,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0xffff);
+ else
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
@@ -1125,7 +1129,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
}
if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0);
+ else
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
@@ -1202,6 +1209,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
uint32_t dclk_min = 0, dclk_max = 0;
uint32_t fclk_min = 0, fclk_max = 0;
int ret = 0, i;
+ bool auto_level = false;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1233,6 +1241,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
dclk_max = dclk_table->max;
fclk_min = fclk_table->min;
fclk_max = fclk_table->max;
+ auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
@@ -1268,7 +1277,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_GFXCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1280,7 +1290,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_MCLK,
mclk_min,
- mclk_max);
+ mclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1292,7 +1303,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1307,7 +1319,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
i ? SMU_VCLK1 : SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1322,7 +1335,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
i ? SMU_DCLK1 : SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1334,7 +1348,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ auto_level);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5899d01fa73d..cefe10b95d8e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -732,19 +732,6 @@ static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static void smu_v14_0_2_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- PFE_Settings_t *PFEsettings = &pptable->PFE_Settings;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]);
-}
-
static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
{
uint32_t throttler_status = 0;
@@ -1077,12 +1064,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
switch (od_feature_bit) {
case PP_OD_FEATURE_GFXCLK_FMIN:
- od_min_setting = overdrive_lowerlimits->GfxclkFmin;
- od_max_setting = overdrive_upperlimits->GfxclkFmin;
- break;
case PP_OD_FEATURE_GFXCLK_FMAX:
- od_min_setting = overdrive_lowerlimits->GfxclkFmax;
- od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
+ od_max_setting = overdrive_upperlimits->GfxclkFoffset;
break;
case PP_OD_FEATURE_UCLK_FMIN:
od_min_setting = overdrive_lowerlimits->UclkFmin;
@@ -1269,10 +1253,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
- size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
- od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
+ overdrive_lowerlimits->GfxclkFoffset,
+ overdrive_upperlimits->GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1414,7 +1404,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_FMAX,
NULL,
&max_value);
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
}
@@ -1516,7 +1506,8 @@ static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
clk_type,
min_freq,
- max_freq);
+ max_freq,
+ false);
break;
case SMU_DCEFCLK:
case SMU_PCIE:
@@ -1796,7 +1787,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
-
+ uint32_t current_profile_mode = smu->power_profile_mode;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
@@ -1854,6 +1845,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
}
}
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, false);
+ else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, true);
+
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -2158,7 +2154,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -2217,8 +2213,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
od_table->OverDriveTable.UclkFmax);
}
@@ -2309,10 +2304,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
memcpy(user_od_table,
boot_od_table,
sizeof(OverDriveTableExternal_t));
- user_od_table->OverDriveTable.GfxclkFmin =
- user_od_table_bak.OverDriveTable.GfxclkFmin;
- user_od_table->OverDriveTable.GfxclkFmax =
- user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.GfxclkFoffset =
+ user_od_table_bak.OverDriveTable.GfxclkFoffset;
user_od_table->OverDriveTable.UclkFmin =
user_od_table_bak.OverDriveTable.UclkFmin;
user_od_table->OverDriveTable.UclkFmax =
@@ -2441,22 +2434,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
switch (input[i]) {
- case 0:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFmin = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
-
case 1:
smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
@@ -2469,7 +2446,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
@@ -2480,13 +2457,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
}
- if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
- dev_err(adev->dev,
- "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
- (uint32_t)od_table->OverDriveTable.GfxclkFmin,
- (uint32_t)od_table->OverDriveTable.GfxclkFmax);
- return -EINVAL;
- }
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2806,7 +2776,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.i2c_init = smu_v14_0_2_i2c_control_init,
.i2c_fini = smu_v14_0_2_i2c_control_fini,
.is_dpm_running = smu_v14_0_2_is_dpm_running,
- .dump_pptable = smu_v14_0_2_dump_pptable,
.init_microcode = smu_v14_0_init_microcode,
.load_microcode = smu_v14_0_load_microcode,
.fini_microcode = smu_v14_0_fini_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 91ad434bcdae..843f00c9e407 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1078,6 +1078,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 5):
structure_size = sizeof(struct gpu_metrics_v1_5);
break;
+ case METRICS_VERSION(1, 6):
+ structure_size = sizeof(struct gpu_metrics_v1_6);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 6f4d212607d7..c09ecf1a68a0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -78,7 +78,6 @@
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
#define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max)
#define smu_asic_set_performance_level(smu, level) smu_ppt_funcs(set_performance_level, -EINVAL, smu, level)
-#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu)
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
#define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src)
#define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fca8b08535a5..6328627b7c34 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
- int ret;
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
- radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev;
if (ASIC_IS_DCE5(rdev)) {
if (radeon_auxch)
@@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
}
- ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
- if (!ret)
- radeon_connector->ddc_bus->has_aux = true;
-
- WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
+ drm_dp_aux_init(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 1b2d31c4d77c..ac77d1246b94 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = radeon_get_ib_value(p, idx+1) << 8;
+ offset = (u64)radeon_get_ib_value(p, idx+1) << 8;
if (offset != track->vgt_strmout_bo_offset[idx_value]) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
offset, track->vgt_strmout_bo_offset[idx_value]);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 528a8f3677c2..f9c73c55f04f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1786,6 +1786,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector
return MODE_OK;
}
+static int
+radeon_connector_late_register(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int r = 0;
+
+ if (radeon_connector->ddc_bus->has_aux) {
+ radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
+ r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
@@ -1800,6 +1814,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
static const struct drm_connector_funcs radeon_edp_connector_funcs = {
@@ -1810,6 +1825,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
@@ -1820,6 +1836,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
void
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 0f723292409e..fafed331e0a0 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *clone_encoder;
- uint32_t index_mask = 0;
+ uint32_t index_mask = drm_encoder_mask(encoder);
int count;
/* DIG routing gets problematic */
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 9735f4968b86..bf2d4b16dc2a 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
int radeon_gem_prime_pin(struct drm_gem_object *obj);
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-const struct drm_gem_object_funcs radeon_gem_object_funcs;
-
static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -132,7 +130,6 @@ retry:
return r;
}
*obj = &robj->tbo.base;
- (*obj)->funcs = &radeon_gem_object_funcs;
robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d0e4b43d155c..7672404fdb29 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -151,6 +151,7 @@ int radeon_bo_create(struct radeon_device *rdev,
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size);
+ bo->tbo.base.funcs = &radeon_gem_object_funcs;
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 717307d6b5b7..fa9f9846b88e 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -609,6 +609,7 @@ struct kfd_ioctl_smi_events_args {
* migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update
* rw: 'W' for write page fault, 'R' for read page fault
* rescheduled: 'R' if the queue restore failed and rescheduled to try again
+ * error_code: migrate failure error code, 0 if no error
*/
#define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\
"%x %s\n", (reset_seq_num), (reset_cause)
@@ -630,9 +631,9 @@ struct kfd_ioctl_smi_events_args {
"%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\
(from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger)
-#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger)\
- "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size),\
- (from), (to), (migrate_trigger)
+#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger, error_code) \
+ "%lld -%d @%lx(%lx) %x->%x %d %d\n", (ns), (pid), (start), (size),\
+ (from), (to), (migrate_trigger), (error_code)
#define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\
"%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)