From c3bce35c662b8dfd91e52efaf463df41023b0852 Mon Sep 17 00:00:00 2001 From: "Frank.Min" Date: Tue, 2 Oct 2018 15:02:09 +0800 Subject: drm/amdgpu: fix sdma doorbell comments typo Reviewed-by: Alex Deucher Signed-off-by: Frank.Min Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d0102cfc8efb..fb922a872a80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -432,7 +432,7 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT * default non-graphics QWORD index is 0xe0 - 0xFF inclusive */ - /* sDMA engines reserved from 0xe0 -oxef */ + /* sDMA engines reserved from 0xe0 -0xef */ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0, AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1, AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8, -- cgit From e55a5c9b5f5b80275a38293ac0fd38336dd2efdf Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 16 Oct 2018 10:04:08 +0200 Subject: drm/ttm: Rename ttm_bo_global_{init,release}() to ttm_bo_global_ref_{,}() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The functions ttm_bo_global_init() and ttm_bo_global_release() do not receive an argument of type struct ttm_bo_global. Both take a struct drm_global_reference that contains points to a struct ttm_bo_global_ref. Renaming them reflects this. Signed-off-by: Thomas Zimmermann Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index a44fc12ae1f9..3a6802846698 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -125,8 +125,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) global_ref = &adev->mman.bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); - global_ref->init = &ttm_bo_global_init; - global_ref->release = &ttm_bo_global_release; + global_ref->init = &ttm_bo_global_ref_init; + global_ref->release = &ttm_bo_global_ref_release; r = drm_global_item_ref(global_ref); if (r) { DRM_ERROR("Failed setting up TTM BO subsystem.\n"); -- cgit From 51235849d99556fe9929735625d134b3b6acbf4a Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 26 Sep 2018 19:56:41 +0200 Subject: drm/amdgpu: fix sdma v4 startup under SRIOV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Under SRIOV we were enabling the ring buffer before it was initialized. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 234 ++++++++++++++++----------------- 1 file changed, 116 insertions(+), 118 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 7a8c9172d30a..fde27d8bfeb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -675,13 +675,14 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) * sdma_v4_0_gfx_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer + * @i: instance to resume * * Set up the gfx DMA ring buffers and enable them (VEGA10). * Returns 0 for success, error for failure. */ -static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) +static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) { - struct amdgpu_ring *ring; + struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; u32 rb_cntl, ib_cntl, wptr_poll_cntl; u32 rb_bufsz; u32 wb_offset; @@ -689,129 +690,108 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) u32 doorbell_offset; u32 temp; u64 wptr_gpu_addr; - int i, r; - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; - wb_offset = (ring->rptr_offs * 4); + wb_offset = (ring->rptr_offs * 4); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - /* Initialize the ring buffer's read and write pointers */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + /* Initialize the ring buffer's read and write pointers */ + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); - /* set the wb address whether it's enabled or not */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), - upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), - lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + /* set the wb address whether it's enabled or not */ + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); - ring->wptr = 0; + ring->wptr = 0; - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); - } + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + } - doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); - adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index); - - if (amdgpu_sriov_vf(adev)) - sdma_v4_0_ring_set_wptr(ring); - - /* set minor_ptr_update to 0 after wptr programed */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index); - /* setup the wptr shadow polling */ - wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); - if (amdgpu_sriov_vf(adev)) - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); - else - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); + if (amdgpu_sriov_vf(adev)) + sdma_v4_0_ring_set_wptr(ring); - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + /* set minor_ptr_update to 0 after wptr programed */ + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); -#ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); -#endif - /* enable DMA IBs */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - ring->ready = true; + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v4_0_ctx_switch_enable(adev, true); - sdma_v4_0_enable(adev, true); - } + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + if (amdgpu_sriov_vf(adev)) + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); + else + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - return r; - } + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); - - } + ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - return 0; + ring->ready = true; } static void @@ -943,33 +923,51 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) */ static int sdma_v4_0_start(struct amdgpu_device *adev) { - int r = 0; + struct amdgpu_ring *ring; + int i, r; if (amdgpu_sriov_vf(adev)) { sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); + } else { - /* set RB registers */ - r = sdma_v4_0_gfx_resume(adev); - return r; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + r = sdma_v4_0_load_microcode(adev); + if (r) + return r; + } + + /* unhalt the MEs */ + sdma_v4_0_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v4_0_ctx_switch_enable(adev, true); } - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - r = sdma_v4_0_load_microcode(adev); + /* start the gfx rings and rlc compute queues */ + for (i = 0; i < adev->sdma.num_instances; i++) + sdma_v4_0_gfx_resume(adev, i); + + if (amdgpu_sriov_vf(adev)) { + sdma_v4_0_ctx_switch_enable(adev, true); + sdma_v4_0_enable(adev, true); + } else { + r = sdma_v4_0_rlc_resume(adev); if (r) return r; } - /* unhalt the MEs */ - sdma_v4_0_enable(adev, true); - /* enable sdma ring preemption */ - sdma_v4_0_ctx_switch_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; - /* start the gfx rings and rlc compute queues */ - r = sdma_v4_0_gfx_resume(adev); - if (r) - return r; - r = sdma_v4_0_rlc_resume(adev); + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + } return r; } -- cgit From 9194a339034f1927b28fbc8a5cff50b117142945 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 4 Oct 2018 16:22:41 +0200 Subject: drm/amdgpu: add basics for SDMA page queue support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just the common helper and a new ring in the SDMA instance. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 10 ++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 1 + 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index bc9244b429ef..0fb9907494bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -34,11 +34,9 @@ struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) int i; for (i = 0; i < adev->sdma.num_instances; i++) - if (&adev->sdma.instance[i].ring == ring) - break; + if (ring == &adev->sdma.instance[i].ring || + ring == &adev->sdma.instance[i].page) + return &adev->sdma.instance[i]; - if (i < AMDGPU_MAX_SDMA_INSTANCES) - return &adev->sdma.instance[i]; - else - return NULL; + return NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 500113ec65ca..556db42edaed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -41,6 +41,7 @@ struct amdgpu_sdma_instance { uint32_t feature_version; struct amdgpu_ring ring; + struct amdgpu_ring page; bool burst_nop; }; -- cgit From 09f0b4ffd41e142f52f70640afd1b4b20288d627 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 4 Oct 2018 17:59:50 +0200 Subject: drm/amdgpu: remove non gfx specific handling from sdma_v4_0_gfx_resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Needed to start using the paging queue. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 36 +++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index fde27d8bfeb2..49c8feb14d10 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -688,13 +688,10 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) u32 wb_offset; u32 doorbell; u32 doorbell_offset; - u32 temp; u64 wptr_gpu_addr; wb_offset = (ring->rptr_offs * 4); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); @@ -754,18 +751,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) /* set minor_ptr_update to 0 after wptr programed */ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } - /* setup the wptr shadow polling */ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), @@ -944,9 +929,28 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) } /* start the gfx rings and rlc compute queues */ - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { + uint32_t temp; + + WREG32(sdma_v4_0_get_reg_offset(adev, i, + mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); sdma_v4_0_gfx_resume(adev, i); + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, + mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v4_0_get_reg_offset(adev, i, + mmSDMA0_F32_CNTL), temp); + } + } + if (amdgpu_sriov_vf(adev)) { sdma_v4_0_ctx_switch_enable(adev, true); sdma_v4_0_enable(adev, true); -- cgit From 998d3fd4f8ce5714449237c2f93652e95ad476f4 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 4 Oct 2018 19:31:27 +0200 Subject: drm/amdgpu: remove SRIOV specific handling from sdma_v4_0_gfx_resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just use the same code path for both SRIOV and bare metal. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 49c8feb14d10..aa5bb9cfa738 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -725,11 +725,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) /* before programing wptr to a less value, need set minor_ptr_update first */ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); - } - doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); @@ -745,8 +740,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index); - if (amdgpu_sriov_vf(adev)) - sdma_v4_0_ring_set_wptr(ring); + sdma_v4_0_ring_set_wptr(ring); /* set minor_ptr_update to 0 after wptr programed */ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); -- cgit From d425e7d8de539933521be5e15275825957732c27 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 4 Oct 2018 19:56:24 +0200 Subject: drm/amdgpu: add some [WR]REG32_SDMA macros to sdma_v4_0.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Significantly shortens the code. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 126 ++++++++++++++++----------------- 1 file changed, 63 insertions(+), 63 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index aa5bb9cfa738..bab4d499be75 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L +#define WREG32_SDMA(instance, offset, value) \ + WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value) +#define RREG32_SDMA(instance, offset) \ + RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset))) + static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); @@ -369,8 +374,8 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) } else { u32 lowbit, highbit; - lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; - highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; + lowbit = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR) >> 2; + highbit = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI) >> 2; DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", ring->me, highbit, lowbit); @@ -417,8 +422,10 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) lower_32_bits(ring->wptr << 2), ring->me, upper_32_bits(ring->wptr << 2)); - WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR, + lower_32_bits(ring->wptr << 2)); + WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI, + upper_32_bits(ring->wptr << 2)); } } @@ -568,12 +575,12 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { - rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); + ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); } sdma0->ready = false; @@ -630,18 +637,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) } for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, enable ? 1 : 0); if (enable && amdgpu_sdma_phase_quantum) { - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), - phase_quantum); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), - phase_quantum); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), - phase_quantum); + WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum); + WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum); + WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum); } - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl); } } @@ -665,9 +669,9 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) } for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl); } } @@ -694,39 +698,39 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); /* Initialize the ring buffer's read and write pointers */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0); + WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0); + WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0); + WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0); /* set the wb address whether it's enabled or not */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI, upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO, lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8); + WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40); ring->wptr = 0; /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1); - doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL); + doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET); if (ring->use_doorbell) { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); @@ -735,40 +739,40 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) } else { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); } - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell); + WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset); adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index); sdma_v4_0_ring_set_wptr(ring); /* set minor_ptr_update to 0 after wptr programed */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0); /* setup the wptr shadow polling */ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL); if (amdgpu_sriov_vf(adev)) wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); else wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); + WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); - ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif /* enable DMA IBs */ - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); ring->ready = true; } @@ -881,12 +885,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0); + WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0); for (j = 0; j < fw_size; j++) - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); + WREG32_SDMA(i, mmSDMA0_UCODE_DATA, + le32_to_cpup(fw_data++)); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); + WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, + adev->sdma.instance[i].fw_version); } return 0; @@ -926,22 +932,19 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { uint32_t temp; - WREG32(sdma_v4_0_get_reg_offset(adev, i, - mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0); sdma_v4_0_gfx_resume(adev, i); /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = RREG32_SDMA(i, mmSDMA0_CNTL); temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + WREG32_SDMA(i, mmSDMA0_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { /* unhalt engine */ - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, - mmSDMA0_F32_CNTL)); + temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v4_0_get_reg_offset(adev, i, - mmSDMA0_F32_CNTL), temp); + WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp); } } @@ -1410,7 +1413,7 @@ static bool sdma_v4_0_is_idle(void *handle) u32 i; for (i = 0; i < adev->sdma.num_instances; i++) { - u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG)); + u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG); if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) return false; @@ -1426,8 +1429,8 @@ static int sdma_v4_0_wait_for_idle(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); - sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG)); + sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG); + sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG); if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) return 0; @@ -1448,16 +1451,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { + unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1; u32 sdma_cntl; - u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ? - sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : - sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); - - sdma_cntl = RREG32(reg_offset); + sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL); sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32(reg_offset, sdma_cntl); + WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl); return 0; } -- cgit From bb97ab42ac5db1ee685bd5e79fdad29e083bfd61 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 8 Oct 2018 14:38:22 +0200 Subject: drm/amdgpu: activate paging queue on SDMA v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement all the necessary stuff to get those extra rings working. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 324 ++++++++++++++++++++++++++++----- 1 file changed, 274 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index bab4d499be75..2469deb57b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -429,6 +429,57 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) } } +/** + * sdma_v4_0_page_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware (VEGA10+). + */ +static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 wptr; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + } else { + wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI); + wptr = wptr << 32; + wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR); + } + + return wptr >> 2; +} + +/** + * sdma_v4_0_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware (VEGA10+). + */ +static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + + /* XXX check if swapping is necessary on BE */ + WRITE_ONCE(*wb, (ring->wptr << 2)); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + } else { + uint64_t wptr = ring->wptr << 2; + + WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR, + lower_32_bits(wptr)); + WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI, + upper_32_bits(wptr)); + } +} + static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); @@ -599,6 +650,35 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) /* XXX todo */ } +/** + * sdma_v4_0_page_stop - stop the page async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the page async dma ring buffers (VEGA10). + */ +static void sdma_v4_0_page_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page; + struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page; + u32 rb_cntl, ib_cntl; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, + RB_ENABLE, 0); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl); + ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, + IB_ENABLE, 0); + WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); + } + + sdma0->ready = false; + sdma1->ready = false; +} + /** * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch * @@ -666,6 +746,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) if (enable == false) { sdma_v4_0_gfx_stop(adev); sdma_v4_0_rlc_stop(adev); + sdma_v4_0_page_stop(adev); } for (i = 0; i < adev->sdma.num_instances; i++) { @@ -675,6 +756,23 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) } } +/** + * sdma_v4_0_rb_cntl - get parameters for rb_cntl + */ +static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl) +{ + /* Set ring buffer size in dwords */ + uint32_t rb_bufsz = order_base_2(ring->ring_size / 4); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); +#ifdef __BIG_ENDIAN + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); +#endif + return rb_cntl; +} + /** * sdma_v4_0_gfx_resume - setup and start the async dma engines * @@ -688,7 +786,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) { struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; u32 rb_cntl, ib_cntl, wptr_poll_cntl; - u32 rb_bufsz; u32 wb_offset; u32 doorbell; u32 doorbell_offset; @@ -696,15 +793,8 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) wb_offset = (ring->rptr_offs * 4); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); -#ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); -#endif + rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl); WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); /* Initialize the ring buffer's read and write pointers */ @@ -719,7 +809,8 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO, lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_ENABLE, 1); WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8); WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40); @@ -732,13 +823,11 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL); doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, + ring->use_doorbell); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell); WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset); adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, @@ -756,10 +845,9 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL); - if (amdgpu_sriov_vf(adev)) - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); - else - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); /* enable DMA RB */ @@ -777,6 +865,99 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) ring->ready = true; } +/** + * sdma_v4_0_page_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * @i: instance to resume + * + * Set up the page DMA ring buffers and enable them (VEGA10). + * Returns 0 for success, error for failure. + */ +static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) +{ + struct amdgpu_ring *ring = &adev->sdma.instance[i].page; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; + u32 wb_offset; + u32 doorbell; + u32 doorbell_offset; + u64 wptr_gpu_addr; + + wb_offset = (ring->rptr_offs * 4); + + rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); + rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0); + + /* set the wb address whether it's enabled or not */ + WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI, + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, + RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40); + + ring->wptr = 0; + + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1); + + doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL); + doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET); + + doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE, + ring->use_doorbell); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA0_PAGE_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell); + WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset); + /* TODO: enable doorbell support */ + /*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index);*/ + + sdma_v4_0_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0); + + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_PAGE_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl); + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1); + WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl); + + ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); + + ring->ready = true; +} + static void sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable) { @@ -934,6 +1115,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0); sdma_v4_0_gfx_resume(adev, i); + sdma_v4_0_page_resume(adev, i); /* set utc l1 enable flag always to 1 */ temp = RREG32_SDMA(i, mmSDMA0_CNTL); @@ -1339,6 +1521,19 @@ static int sdma_v4_0_sw_init(void *handle) AMDGPU_SDMA_IRQ_TRAP1); if (r) return r; + + ring = &adev->sdma.instance[i].page; + ring->ring_obj = NULL; + ring->use_doorbell = false; + + sprintf(ring->name, "page%d", i); + r = amdgpu_ring_init(adev, ring, 1024, + &adev->sdma.trap_irq, + (i == 0) ? + AMDGPU_SDMA_IRQ_TRAP0 : + AMDGPU_SDMA_IRQ_TRAP1); + if (r) + return r; } return r; @@ -1349,8 +1544,10 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_ring_fini(&adev->sdma.instance[i].page); + } for (i = 0; i < adev->sdma.num_instances; i++) { release_firmware(adev->sdma.instance[i].fw); @@ -1466,39 +1663,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t instance; + DRM_DEBUG("IH: SDMA trap\n"); switch (entry->client_id) { case SOC15_IH_CLIENTID_SDMA0: - switch (entry->ring_id) { - case 0: - amdgpu_fence_process(&adev->sdma.instance[0].ring); - break; - case 1: - /* XXX compute */ - break; - case 2: - /* XXX compute */ - break; - case 3: - /* XXX page queue*/ - break; - } + instance = 0; break; case SOC15_IH_CLIENTID_SDMA1: - switch (entry->ring_id) { - case 0: - amdgpu_fence_process(&adev->sdma.instance[1].ring); - break; - case 1: - /* XXX compute */ - break; - case 2: - /* XXX compute */ - break; - case 3: - /* XXX page queue*/ - break; - } + instance = 1; + break; + default: + return 0; + } + + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->sdma.instance[instance].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + case 3: + amdgpu_fence_process(&adev->sdma.instance[instance].page); break; } return 0; @@ -1726,6 +1916,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; +static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_MMHUB, + .get_rptr = sdma_v4_0_ring_get_rptr, + .get_wptr = sdma_v4_0_page_ring_get_wptr, + .set_wptr = sdma_v4_0_page_ring_set_wptr, + .emit_frame_size = + 6 + /* sdma_v4_0_ring_emit_hdp_flush */ + 3 + /* hdp invalidate */ + 6 + /* sdma_v4_0_ring_emit_pipeline_sync */ + /* sdma_v4_0_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ + .emit_ib = sdma_v4_0_ring_emit_ib, + .emit_fence = sdma_v4_0_ring_emit_fence, + .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, + .test_ring = sdma_v4_0_ring_test_ring, + .test_ib = sdma_v4_0_ring_test_ib, + .insert_nop = sdma_v4_0_ring_insert_nop, + .pad_ib = sdma_v4_0_ring_pad_ib, + .emit_wreg = sdma_v4_0_ring_emit_wreg, + .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) { int i; @@ -1733,6 +1955,8 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.me = i; + adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; + adev->sdma.instance[i].page.me = i; } } -- cgit From 161d0711b9e53afa6db7724f1f5a8e2dba34221e Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 8 Oct 2018 15:31:18 +0200 Subject: drm/amdgpu: use paging queue for VM page table updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only for testing, not sure if we should keep it like this. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 2469deb57b49..8c4877faa7aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2056,7 +2056,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; + sched = &adev->sdma.instance[i].page.sched; adev->vm_manager.vm_pte_rqs[i] = &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } -- cgit From 2a85e816ccd7a8f696c70a109c98cc9a017d683f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 17 Oct 2018 11:39:27 -0500 Subject: drm/amdgpu/sdma4: APUs do not have a page queue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't use the paging queue on APUs. Tested-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 1 + drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 53 ++++++++++++++++++++------------ 2 files changed, 34 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 556db42edaed..479a2459e558 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -51,6 +51,7 @@ struct amdgpu_sdma { struct amdgpu_irq_src illegal_inst_irq; int num_instances; uint32_t srbm_soft_reset; + bool has_page_queue; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 8c4877faa7aa..96857571fc62 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -746,7 +746,8 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) if (enable == false) { sdma_v4_0_gfx_stop(adev); sdma_v4_0_rlc_stop(adev); - sdma_v4_0_page_stop(adev); + if (adev->sdma.has_page_queue) + sdma_v4_0_page_stop(adev); } for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1115,7 +1116,8 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0); sdma_v4_0_gfx_resume(adev, i); - sdma_v4_0_page_resume(adev, i); + if (adev->sdma.has_page_queue) + sdma_v4_0_page_resume(adev, i); /* set utc l1 enable flag always to 1 */ temp = RREG32_SDMA(i, mmSDMA0_CNTL); @@ -1457,10 +1459,13 @@ static int sdma_v4_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_RAVEN) + if (adev->asic_type == CHIP_RAVEN) { adev->sdma.num_instances = 1; - else + adev->sdma.has_page_queue = false; + } else { adev->sdma.num_instances = 2; + adev->sdma.has_page_queue = true; + } sdma_v4_0_set_ring_funcs(adev); sdma_v4_0_set_buffer_funcs(adev); @@ -1522,18 +1527,20 @@ static int sdma_v4_0_sw_init(void *handle) if (r) return r; - ring = &adev->sdma.instance[i].page; - ring->ring_obj = NULL; - ring->use_doorbell = false; - - sprintf(ring->name, "page%d", i); - r = amdgpu_ring_init(adev, ring, 1024, - &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); - if (r) - return r; + if (adev->sdma.has_page_queue) { + ring = &adev->sdma.instance[i].page; + ring->ring_obj = NULL; + ring->use_doorbell = false; + + sprintf(ring->name, "page%d", i); + r = amdgpu_ring_init(adev, ring, 1024, + &adev->sdma.trap_irq, + (i == 0) ? + AMDGPU_SDMA_IRQ_TRAP0 : + AMDGPU_SDMA_IRQ_TRAP1); + if (r) + return r; + } } return r; @@ -1546,7 +1553,8 @@ static int sdma_v4_0_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); - amdgpu_ring_fini(&adev->sdma.instance[i].page); + if (adev->sdma.has_page_queue) + amdgpu_ring_fini(&adev->sdma.instance[i].page); } for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1955,8 +1963,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.me = i; - adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; - adev->sdma.instance[i].page.me = i; + if (adev->sdma.has_page_queue) { + adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; + adev->sdma.instance[i].page.me = i; + } } } @@ -2056,7 +2066,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].page.sched; + if (adev->sdma.has_page_queue) + sched = &adev->sdma.instance[i].page.sched; + else + sched = &adev->sdma.instance[i].ring.sched; adev->vm_manager.vm_pte_rqs[i] = &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } -- cgit From f783160c273485af492874b227f60e4e189106f7 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Oct 2018 10:33:10 +0800 Subject: drm/amdgpu: disable SDMA page queue on Vega20 Since we see driver loading failure on Vega20. Keep it disabled until it's ready. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 96857571fc62..6367a8133168 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1464,7 +1464,8 @@ static int sdma_v4_0_early_init(void *handle) adev->sdma.has_page_queue = false; } else { adev->sdma.num_instances = 2; - adev->sdma.has_page_queue = true; + if (adev->asic_type != CHIP_VEGA20) + adev->sdma.has_page_queue = true; } sdma_v4_0_set_ring_funcs(adev); -- cgit From d7f625e91b326c39413fa68ec4b8b2f280fd87b3 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 18 Oct 2018 18:11:45 +0800 Subject: drm/amdgpu: add ring test for page queue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We add page queue for sdma to update page table. So here it also needs ring test to verify it workable during the initialization. Signed-off-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 6367a8133168..dc0e8053a477 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1150,6 +1150,15 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) return r; } + if (adev->sdma.has_page_queue) { + ring = &adev->sdma.instance[i].page; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + } + if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); } -- cgit From efcf063f8d85ffd7d9f9ae4a19d185f48e57a7f0 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 19 Oct 2018 14:58:28 -0400 Subject: drm/amdgpu: fix sdma v4 ring is disabled accidently For sdma v4, there is bug caused by commit d4e869b6b5d6 ("drm/amdgpu: add ring test for page queue")' local variable ring is reused and changed, so amdgpu_ttm_set_buffer_funcs_status(adev, true) is skipped accidently. As a result, amdgpu_fill_buffer() will fail, kernel message: [drm:amdgpu_fill_buffer [amdgpu]] *ERROR* Trying to clear memory with ring turned off. [ 25.260444] [drm:amdgpu_fill_buffer [amdgpu]] *ERROR* Trying to clear memory with ring turned off. [ 25.260627] [drm:amdgpu_fill_buffer [amdgpu]] *ERROR* Trying to clear memory with ring turned off. [ 25.290119] [drm:amdgpu_fill_buffer [amdgpu]] *ERROR* Trying to clear memory with ring turned off. [ 25.290370] [drm:amdgpu_fill_buffer [amdgpu]] *ERROR* Trying to clear memory with ring turned off. [ 25.319971] [drm:amdgpu_fill_buffer [amdgpu]] *ERROR* Trying to clear memory with ring turned off. [ 25.320486] amdgpu 0000:19:00.0: [mmhub] VMC page fault (src_id:0 ring:154 vmid:8 pasid:32768, for process pid 0 thread pid 0) [ 25.320533] amdgpu 0000:19:00.0: in page starting at address 0x0000000000000000 from 18 [ 25.320563] amdgpu 0000:19:00.0: VM_L2_PROTECTION_FAULT_STATUS:0x00800134 Signed-off-by: Philip Yang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index dc0e8053a477..c0d1650d6c71 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1151,10 +1151,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) } if (adev->sdma.has_page_queue) { - ring = &adev->sdma.instance[i].page; - r = amdgpu_ring_test_ring(ring); + struct amdgpu_ring *page = &adev->sdma.instance[i].page; + + r = amdgpu_ring_test_ring(page); if (r) { - ring->ready = false; + page->ready = false; return r; } } -- cgit From 548f2ecc33b5a377d9d5ba9d69cc06722f7930e1 Mon Sep 17 00:00:00 2001 From: John Clements Date: Tue, 16 Oct 2018 18:47:54 -0400 Subject: drm/amdgpu: Revised PSP comments Revised comments in PSP SOS/Sysdriver loading sequence Signed-off-by: John Clements Signed-off-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 3f3fac2d50cd..2372f4220ecb 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -167,7 +167,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) /* Copy PSP System Driver binary to memory */ memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); - /* Provide the sys driver to bootrom */ + /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); psp_gfxdrv_command_reg = 1 << 16; @@ -208,7 +208,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) /* Copy Secure OS binary to PSP memory */ memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); - /* Provide the PSP secure OS to bootrom */ + /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); psp_gfxdrv_command_reg = 2 << 16; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index e1ebf770c303..9cea0bbe4525 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -194,7 +194,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) /* Copy PSP System Driver binary to memory */ memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); - /* Provide the sys driver to bootrom */ + /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); psp_gfxdrv_command_reg = 1 << 16; @@ -254,7 +254,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* Copy Secure OS binary to PSP memory */ memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); - /* Provide the PSP secure OS to bootrom */ + /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); psp_gfxdrv_command_reg = 2 << 16; -- cgit From 2c498d1dbe2e26d62a5ace8fa7b92ac2d084a60c Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 12 Oct 2018 16:53:51 +0200 Subject: drm/amdgpu: remove illegal instruction stub from si_dma.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Was never used. Signed-off-by: Christian König Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si_dma.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index adbaea6da0d7..d9b27d7017dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -658,15 +658,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev, return 0; } -static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - DRM_ERROR("Illegal instruction in SDMA command stream\n"); - schedule_work(&adev->reset_work); - return 0; -} - static int si_dma_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -781,15 +772,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { .process = si_dma_process_trap_irq, }; -static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = { - .process = si_dma_process_illegal_inst_irq, -}; - static void si_dma_set_irq_funcs(struct amdgpu_device *adev) { adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; - adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs; } /** -- cgit From 898c2cb5d94fc56d357f38ba7a05b1e0e23e44a3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 16 Oct 2018 13:08:21 +0200 Subject: drm/amdgpu: use scheduler fault instead of reset work MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signal a fault to the scheduler on an illegal instruction or register access violation instead of kicking of the reset handler directly. Signed-off-by: Christian König Acked-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 21 --------------------- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 23 +++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 28 ++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 31 +++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 31 +++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 8 +++++++- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 8 +++++++- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 21 +++++++++++++++++++-- 10 files changed, 142 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index fb922a872a80..9348eb5a3c83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -830,7 +830,6 @@ struct amdgpu_device { bool need_dma32; bool need_swiotlb; bool accel_working; - struct work_struct reset_work; struct notifier_block acpi_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 52c17f6219a7..6b6524f04ce0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -93,23 +93,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) drm_helper_hpd_irq_event(dev); } -/** - * amdgpu_irq_reset_work_func - execute GPU reset - * - * @work: work struct pointer - * - * Execute scheduled GPU reset (Cayman+). - * This function is called when the IRQ handler thinks we need a GPU reset. - */ -static void amdgpu_irq_reset_work_func(struct work_struct *work) -{ - struct amdgpu_device *adev = container_of(work, struct amdgpu_device, - reset_work); - - if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev)) - amdgpu_device_gpu_recover(adev, NULL); -} - /** * amdgpu_irq_disable_all - disable *all* interrupts * @@ -262,15 +245,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev) amdgpu_hotplug_work_func); } - INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); - adev->irq.installed = true; r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); if (r) { adev->irq.installed = false; if (!amdgpu_device_has_dc_support(adev)) flush_work(&adev->hotplug_work); - cancel_work_sync(&adev->reset_work); return r; } adev->ddev->max_vblank_count = 0x00ffffff; @@ -299,7 +279,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) pci_disable_msi(adev->pdev); if (!amdgpu_device_has_dc_support(adev)) flush_work(&adev->hotplug_work); - cancel_work_sync(&adev->reset_work); } for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index b918c8886b75..32eb43d165f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -1214,8 +1214,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + u8 instance_id; + DRM_ERROR("Illegal instruction in SDMA command stream\n"); - schedule_work(&adev->reset_work); + instance_id = (entry->ring_id & 0x3) >> 0; + drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d76eb27945dc..622dd70f310e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3393,12 +3393,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev, return 0; } +static void gfx_v6_0_fault(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + struct amdgpu_ring *ring; + + switch (entry->ring_id) { + case 0: + ring = &adev->gfx.gfx_ring[0]; + break; + case 1: + case 2: + ring = &adev->gfx.compute_ring[entry->ring_id - 1]; + break; + default: + return; + } + drm_sched_fault(&ring->sched); +} + static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal register access in command stream\n"); - schedule_work(&adev->reset_work); + gfx_v6_0_fault(adev, entry); return 0; } @@ -3407,7 +3426,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal instruction in command stream\n"); - schedule_work(&adev->reset_work); + gfx_v6_0_fault(adev, entry); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0e72bc09939a..9fadb32da827 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4959,12 +4959,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, return 0; } +static void gfx_v7_0_fault(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + struct amdgpu_ring *ring; + u8 me_id, pipe_id; + int i; + + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + switch (me_id) { + case 0: + drm_sched_fault(&adev->gfx.gfx_ring[0].sched); + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if ((ring->me == me_id) && (ring->pipe == pipe_id)) + drm_sched_fault(&ring->sched); + } + break; + } +} + static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal register access in command stream\n"); - schedule_work(&adev->reset_work); + gfx_v7_0_fault(adev, entry); return 0; } @@ -4974,7 +4998,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, { DRM_ERROR("Illegal instruction in command stream\n"); // XXX soft reset the gfx block only - schedule_work(&adev->reset_work); + gfx_v7_0_fault(adev, entry); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 617b0c8908a3..ba614f26f553 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6738,12 +6738,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev, return 0; } +static void gfx_v8_0_fault(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring; + int i; + + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + + switch (me_id) { + case 0: + drm_sched_fault(&adev->gfx.gfx_ring[0].sched); + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + } +} + static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal register access in command stream\n"); - schedule_work(&adev->reset_work); + gfx_v8_0_fault(adev, entry); return 0; } @@ -6752,7 +6779,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal instruction in command stream\n"); - schedule_work(&adev->reset_work); + gfx_v8_0_fault(adev, entry); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6d7baf59d6e1..0ce1e14099bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4695,12 +4695,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, return 0; } +static void gfx_v9_0_fault(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring; + int i; + + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + + switch (me_id) { + case 0: + drm_sched_fault(&adev->gfx.gfx_ring[0].sched); + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + } +} + static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal register access in command stream\n"); - schedule_work(&adev->reset_work); + gfx_v9_0_fault(adev, entry); return 0; } @@ -4709,7 +4736,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal instruction in command stream\n"); - schedule_work(&adev->reset_work); + gfx_v9_0_fault(adev, entry); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 2d4770e173dd..bedbd5f296c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -1105,8 +1105,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + u8 instance_id, queue_id; + DRM_ERROR("Illegal instruction in SDMA command stream\n"); - schedule_work(&adev->reset_work); + instance_id = (entry->ring_id & 0x3) >> 0; + queue_id = (entry->ring_id & 0xc) >> 2; + + if (instance_id <= 1 && queue_id == 0) + drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 6fb3edaba0ec..415968dc6c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1440,8 +1440,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + u8 instance_id, queue_id; + DRM_ERROR("Illegal instruction in SDMA command stream\n"); - schedule_work(&adev->reset_work); + instance_id = (entry->ring_id & 0x3) >> 0; + queue_id = (entry->ring_id & 0xc) >> 2; + + if (instance_id <= 1 && queue_id == 0) + drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index c0d1650d6c71..88d93430dfb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1717,12 +1717,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + int instance; + DRM_ERROR("Illegal instruction in SDMA command stream\n"); - schedule_work(&adev->reset_work); + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_SDMA0: + instance = 0; + break; + case SOC15_IH_CLIENTID_SDMA1: + instance = 1; + break; + default: + return 0; + } + + switch (entry->ring_id) { + case 0: + drm_sched_fault(&adev->sdma.instance[instance].ring.sched); + break; + } return 0; } - static void sdma_v4_0_update_medium_grain_clock_gating( struct amdgpu_device *adev, bool enable) -- cgit From b44da694a74a07071f9c8b50b4fcdfd47b52ae86 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Fri, 5 Oct 2018 16:52:39 -0400 Subject: drm/amdgpu: put HQD EOP buffers into VRAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This increases performance of compute queues. EOP events (PKT3_RELEASE_MEM) are stored into these buffers. Signed-off-by: Marek Olšák Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 9fadb32da827..cfa45d996482 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2781,7 +2781,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) * GFX7_MEC_HPD_SIZE * 2; r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ba614f26f553..656cc037d1ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1443,7 +1443,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0ce1e14099bc..17459796bc74 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1264,7 +1264,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); -- cgit From 17e4bd6cb888838fa8be6262ab47a201fcccbcd7 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Fri, 5 Oct 2018 16:43:44 -0400 Subject: drm/amdgpu: increase the size of HQD EOP buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marek Olšák Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 656cc037d1ce..e0fe0c6115a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -54,7 +54,7 @@ #include "ivsrcid/ivsrcid_vislands30.h" #define GFX8_NUM_GFX_RINGS 1 -#define GFX8_MEC_HPD_SIZE 2048 +#define GFX8_MEC_HPD_SIZE 4096 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 17459796bc74..4281a37a7feb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -41,7 +41,7 @@ #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" #define GFX9_NUM_GFX_RINGS 1 -#define GFX9_MEC_HPD_SIZE 2048 +#define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L -- cgit From f4f859408cad43c4b7c1ae91d87dbe33929bc8e2 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 12 Oct 2018 15:12:20 -0400 Subject: drm/amdkfd: Delete unnecessary register settings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Those register settings have been performed in amdgpu initialization gfxhub_v1_0_setup_vmid_config() and mmhub_v1_0_setup_vmid_config(). So no need to do it again in kfd. Signed-off-by: Yong Zhao Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 26 ----------------------- 1 file changed, 26 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 42cb4c4e0929..4b796395efab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -64,16 +64,6 @@ #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0 -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0 - -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0 -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0 - #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727 #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0 #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728 @@ -1028,25 +1018,9 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, * now, all processes share the same address space size, like * on GFX8 and older. */ - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); - - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), - upper_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), - upper_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); } -- cgit From c7ff7be62bc25864dd4571763a8ba76d6270e6ce Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 12 Oct 2018 15:22:46 -0400 Subject: drm/amdgpu: Expose *_setup_vm_pt_regs for kfd to use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kfd has the same need to set the VM page table base register, so expose them for kfd to use for better maintainability. Signed-off-by: Yong Zhao Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 19 ++++++++++++------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h | 6 ++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 19 ++++++++++++------- 3 files changed, 30 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ceb7847b504f..6a48cad0fb19 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev) return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24; } -static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) +void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */ + int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; - WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - gfxhub_v1_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index b030ca5ea107..1fd178a65e66 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -27,4 +27,10 @@ extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; +/* amdgpu_amdkfd*.c */ +void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); +void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index fd23ba1226a5..3881a42e780c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) return base; } -static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) +void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */ + int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; - WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - mmhub_v1_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); -- cgit From 1728801409e4bf11dea101408774bede2ffb720c Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 12 Oct 2018 15:27:39 -0400 Subject: drm/amdkfd: Use functions from amdgpu for setting up page table base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the functions from amdgpu to avoid directly programming registers in amdgpu_amdkfd_gfx_v9.c. Signed-off-by: Yong Zhao Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 4b796395efab..223bbc1adfac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -46,6 +46,7 @@ #include "v9_structs.h" #include "soc15.h" #include "soc15d.h" +#include "gmc_v9_0.h" /* HACK: MMHUB and GC both have VM-related register with the same * names but different offsets. Define the MMHUB register we need here @@ -59,11 +60,6 @@ #define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705 #define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0 -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0 -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c -#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 - #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727 #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0 #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728 @@ -1018,9 +1014,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, * now, all processes share the same address space size, like * on GFX8 and older. */ - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); + mmhub_v1_0_setup_vm_pt_regs(adev, vmid, base); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); + gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, base); } -- cgit From dfcbe6d5f4a340c51a4e13bc5181534bc4fd4697 Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Thu, 18 Oct 2018 12:51:02 -0400 Subject: drm/amdgpu: Remove unused function pointers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unused function pointers in kfd2kgd structure. Signed-off-by: Amber Lin Acked-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 25 ----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 25 ----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 24 ---------------------- 3 files changed, 74 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 244d9834a381..72a357dae070 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -173,13 +173,6 @@ static int get_tile_config(struct kgd_dev *kgd, } static const struct kfd2kgd_calls kfd2kgd = { - .init_gtt_mem_allocation = alloc_gtt_mem, - .free_gtt_mem = free_gtt_mem, - .get_local_mem_info = get_local_mem_info, - .get_gpu_clock_counter = get_gpu_clock_counter, - .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, - .alloc_pasid = amdgpu_pasid_alloc, - .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_interrupts = kgd_init_interrupts, @@ -200,28 +193,10 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, - .get_cu_info = get_cu_info, - .get_vram_usage = amdgpu_amdkfd_get_vram_usage, - .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, - .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, - .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, - .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, - .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, - .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, - .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, - .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, - .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, - .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, - .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, .invalidate_tlbs = invalidate_tlbs, .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .submit_ib = amdgpu_amdkfd_submit_ib, - .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, - .gpu_recover = amdgpu_amdkfd_gpu_reset, - .set_compute_idle = amdgpu_amdkfd_set_compute_idle }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 9f149914ad6c..0e2a56b6a9b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -128,13 +128,6 @@ static int get_tile_config(struct kgd_dev *kgd, } static const struct kfd2kgd_calls kfd2kgd = { - .init_gtt_mem_allocation = alloc_gtt_mem, - .free_gtt_mem = free_gtt_mem, - .get_local_mem_info = get_local_mem_info, - .get_gpu_clock_counter = get_gpu_clock_counter, - .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, - .alloc_pasid = amdgpu_pasid_alloc, - .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_interrupts = kgd_init_interrupts, @@ -157,27 +150,9 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, - .get_cu_info = get_cu_info, - .get_vram_usage = amdgpu_amdkfd_get_vram_usage, - .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, - .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, - .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, - .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, - .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, - .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, - .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, - .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, - .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, - .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, - .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, .invalidate_tlbs = invalidate_tlbs, .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .submit_ib = amdgpu_amdkfd_submit_ib, - .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info, - .gpu_recover = amdgpu_amdkfd_gpu_reset, - .set_compute_idle = amdgpu_amdkfd_set_compute_idle }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 223bbc1adfac..54c369091f6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -153,13 +153,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, } static const struct kfd2kgd_calls kfd2kgd = { - .init_gtt_mem_allocation = alloc_gtt_mem, - .free_gtt_mem = free_gtt_mem, - .get_local_mem_info = get_local_mem_info, - .get_gpu_clock_counter = get_gpu_clock_counter, - .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, - .alloc_pasid = amdgpu_pasid_alloc, - .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_interrupts = kgd_init_interrupts, @@ -182,26 +175,9 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = amdgpu_amdkfd_get_tile_config, - .get_cu_info = get_cu_info, - .get_vram_usage = amdgpu_amdkfd_get_vram_usage, - .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, - .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, - .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, - .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, - .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, - .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, - .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, - .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, - .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, - .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, - .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, .invalidate_tlbs = invalidate_tlbs, .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .submit_ib = amdgpu_amdkfd_submit_ib, - .gpu_recover = amdgpu_amdkfd_gpu_reset, - .set_compute_idle = amdgpu_amdkfd_set_compute_idle, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; -- cgit From 7cd52c917a9c9a90f7d303079576971dc1a1c4fc Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Thu, 18 Oct 2018 13:38:19 -0400 Subject: drm/amdkfd: Add proper prefix to functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add amdgpu_amdkfd_ prefix to amdgpu functions served for amdkfd usage. v2: fix indentation Signed-off-by: Amber Lin Acked-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 18 +++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 20 ++++++++++---------- 2 files changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index c31a8849e9f8..bce5f7711cf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -268,9 +268,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) amdgpu_device_gpu_recover(adev, NULL); } -int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, - void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr, bool mqd_gfx9) +int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, + void **mem_obj, uint64_t *gpu_addr, + void **cpu_ptr, bool mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; @@ -340,7 +340,7 @@ allocate_mem_reserve_bo_failed: return r; } -void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) +void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; @@ -351,8 +351,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) amdgpu_bo_unref(&(bo)); } -void get_local_mem_info(struct kgd_dev *kgd, - struct kfd_local_mem_info *mem_info) +void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, + struct kfd_local_mem_info *mem_info) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : @@ -383,7 +383,7 @@ void get_local_mem_info(struct kgd_dev *kgd, mem_info->mem_clk_max = 100; } -uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; @@ -392,7 +392,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) return 0; } -uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; @@ -405,7 +405,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) return 100; } -void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) +void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_cu_info acu_info = adev->gfx.cu_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 8e0d4f7196b4..bcf587b4ba98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -134,16 +134,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); /* Shared API */ -int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, - void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr, bool mqd_gfx9); -void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); -void get_local_mem_info(struct kgd_dev *kgd, - struct kfd_local_mem_info *mem_info); -uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); - -uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); -void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); +int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, + void **mem_obj, uint64_t *gpu_addr, + void **cpu_ptr, bool mqd_gfx9); +void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); +void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, + struct kfd_local_mem_info *mem_info); +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); + +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); +void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); -- cgit From 0b25cbf9c26c4edf2cb12b542faf94d77586c916 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 28 Sep 2018 14:23:11 +0800 Subject: drm/amdgpu/psp: avoid hard-code fence value pre submission Hard-code submission fence is not a sustainable way as there is more and more run-time psp kernel mode submission from driver to fw Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 18 +++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 5 ++++- 2 files changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 25d2f3e757f1..a18a8c91d52b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -118,21 +118,25 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, static int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, - struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr, - int index) + struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) { int ret; + int index; memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); + index = atomic_inc_return(&psp->fence_value); ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr, fence_mc_addr, index); + if (ret) { + atomic_dec(&psp->fence_value); + return ret; + } - while (*((unsigned int *)psp->fence_buf) != index) { + while (*((unsigned int *)psp->fence_buf) != index) msleep(1); - } /* the status field must be 0 after FW is loaded */ if (ucode && psp->cmd_buf_mem->resp.status) { @@ -191,7 +195,7 @@ static int psp_tmr_load(struct psp_context *psp) PSP_TMR_SIZE, psp->tmr_mc_addr); ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr, 1); + psp->fence_buf_mc_addr); if (ret) goto failed; @@ -258,7 +262,7 @@ static int psp_asd_load(struct psp_context *psp) psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE); ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr, 2); + psp->fence_buf_mc_addr); kfree(cmd); @@ -321,7 +325,7 @@ static int psp_np_fw_load(struct psp_context *psp) return ret; ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, - psp->fence_buf_mc_addr, i + 3); + psp->fence_buf_mc_addr); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 8b8720e9c3f0..5bc59bcb3097 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -96,7 +96,7 @@ struct psp_context const struct psp_funcs *funcs; - /* fence buffer */ + /* firmware buffer */ struct amdgpu_bo *fw_pri_bo; uint64_t fw_pri_mc_addr; void *fw_pri_buf; @@ -134,6 +134,9 @@ struct psp_context struct amdgpu_bo *cmd_buf_bo; uint64_t cmd_buf_mc_addr; struct psp_gfx_cmd_resp *cmd_buf_mem; + + /* fence value associated with cmd buffer */ + atomic_t fence_value; }; struct amdgpu_psp_funcs { -- cgit From 6ffb6b7f8ab7fea7f6007f68f3c31372410743c0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 22 Oct 2018 17:37:00 +0800 Subject: drm/amdgpu: Reverse the sequence of ctx_mgr_fini and vm_fini in amdgpu_driver_postclose_kms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit csa buffer will be created per ctx, when ctx fini, the csa buffer and va will be released. so need to do ctx_mgr fin before vm fini. Reviewed-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 81732a84c2ab..09fa919d2500 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1048,8 +1048,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, pasid = fpriv->vm.pasid; pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); - amdgpu_vm_fini(adev, &fpriv->vm); amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + amdgpu_vm_fini(adev, &fpriv->vm); if (pasid) amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); -- cgit From 435e2f970995f7bb3fa5b986071e8371bdc75619 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 16 Oct 2018 13:12:53 -0400 Subject: drm/amdkfd: page_table_base already have the flags needed The flags are added when calling amdgpu_gmc_pd_addr(). Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 54c369091f6c..60b5f56ce7a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -978,7 +978,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint64_t base = page_table_base | AMDGPU_PTE_VALID; if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", @@ -990,7 +989,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, * now, all processes share the same address space size, like * on GFX8 and older. */ - mmhub_v1_0_setup_vm_pt_regs(adev, vmid, base); + mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); - gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, base); + gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } -- cgit From a614aae76057955c914de9bc3fc659c164cd5c2c Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 12 Oct 2018 16:48:04 -0400 Subject: drm/amdkfd: Remove unnecessary register setting when invalidating tlb in kfd Those register settings have been done in gfxhub_v1_0_program_invalidation() and mmhub_v1_0_program_invalidation(). Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 60b5f56ce7a5..3ade5d54ea27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -60,11 +60,6 @@ #define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705 #define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0 -#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727 -#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0 -#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728 -#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0 - #define V9_PIPE_PER_MEC (4) #define V9_QUEUES_PER_PIPE_MEC (8) @@ -772,18 +767,6 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) * TODO 2: support range-based invalidation, requires kfg2kgd * interface change */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32), - 0xffffffff); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32), - 0x0000001f); - - WREG32(SOC15_REG_OFFSET(MMHUB, 0, - mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32), - 0xffffffff); - WREG32(SOC15_REG_OFFSET(MMHUB, 0, - mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32), - 0x0000001f); - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req); WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ), -- cgit From 2a79d86897afd54778d44019dcc9327e0188e69f Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 12 Oct 2018 17:17:05 -0400 Subject: drm/amdgpu: Reorganize amdgpu_gmc_flush_gpu_tlb() for kfd to use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a flush_type parameter to that series of functions. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 20 ++++++++++---------- 6 files changed, 22 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 11fea28f8ad3..9a212aa4c177 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, } mb(); amdgpu_asic_flush_hdp(adev, NULL); - amdgpu_gmc_flush_gpu_tlb(adev, 0); + amdgpu_gmc_flush_gpu_tlb(adev, 0, 0); return 0; } @@ -331,7 +331,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, mb(); amdgpu_asic_flush_hdp(adev, NULL); - amdgpu_gmc_flush_gpu_tlb(adev, 0); + amdgpu_gmc_flush_gpu_tlb(adev, 0, 0); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 6fa7ef446e46..4c5f18cf5b69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -64,7 +64,7 @@ struct amdgpu_vmhub { struct amdgpu_gmc_funcs { /* flush the vm tlb via mmio */ void (*flush_gpu_tlb)(struct amdgpu_device *adev, - uint32_t vmid); + uint32_t vmid, uint32_t flush_type); /* flush the vm tlb via ring */ uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); @@ -151,7 +151,7 @@ struct amdgpu_gmc { struct amdgpu_xgmi xgmi; }; -#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) +#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type)) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e1c2b4e9c7b2..2821d1d846e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -358,7 +358,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) return 0; } -static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid) +static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, + uint32_t vmid, uint32_t flush_type) { WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); } @@ -580,7 +581,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) else gmc_v6_0_set_fault_enable_default(adev, true); - gmc_v6_0_flush_gpu_tlb(adev, 0); + gmc_v6_0_flush_gpu_tlb(adev, 0, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 910c4ce19cb3..761dcfb2fec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * * Flush the TLB for the requested page table (CIK). */ -static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid) +static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, + uint32_t vmid, uint32_t flush_type) { /* bits 0-15 are the VM contexts0-15 */ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); @@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) WREG32(mmCHUB_CONTROL, tmp); } - gmc_v7_0_flush_gpu_tlb(adev, 0); + gmc_v7_0_flush_gpu_tlb(adev, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1d3265c97b70..531aaf377592 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -611,7 +611,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * Flush the TLB for the requested page table (CIK). */ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, - uint32_t vmid) + uint32_t vmid, uint32_t flush_type) { /* bits 0-15 are the VM contexts0-15 */ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); @@ -920,7 +920,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) else gmc_v8_0_set_fault_enable_default(adev, true); - gmc_v8_0_flush_gpu_tlb(adev, 0); + gmc_v8_0_flush_gpu_tlb(adev, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f35d7a554ad5..af786b5513bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -293,14 +293,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; } -static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) +static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) { u32 req = 0; - /* invalidate using legacy mode on vmid*/ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, PER_VMID_INVALIDATE_REQ, 1 << vmid); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); @@ -362,24 +362,24 @@ failed_kiq: */ /** - * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback + * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type * * @adev: amdgpu_device pointer * @vmid: vm instance to flush + * @flush_type: the flush type * - * Flush the TLB for the requested page table. + * Flush the TLB for the requested page table using certain type. */ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, - uint32_t vmid) + uint32_t vmid, uint32_t flush_type) { - /* Use register 17 for GART */ const unsigned eng = 17; unsigned i, j; int r; for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &adev->vmhub[i]; - u32 tmp = gmc_v9_0_get_invalidate_req(vmid); + u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); if (adev->gfx.kiq.ring.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && @@ -429,7 +429,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, { struct amdgpu_device *adev = ring->adev; struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; - uint32_t req = gmc_v9_0_get_invalidate_req(vmid); + uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), @@ -1122,7 +1122,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) gfxhub_v1_0_set_fault_enable_default(adev, value); mmhub_v1_0_set_fault_enable_default(adev, value); - gmc_v9_0_flush_gpu_tlb(adev, 0); + gmc_v9_0_flush_gpu_tlb(adev, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), -- cgit From afd545f673ad40bc0d3d6ecae686ca6fc44d1743 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 12 Oct 2018 17:23:09 -0400 Subject: drm/amdkfd: Use functions from amdgpu to invalidate vmid in kfd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As part of the change, we stop taking the srbm lock, and start to use the same invalidation engine and software lock as amdgpu. Signed-off-by: Yong Zhao Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 37 +---------------------- 1 file changed, 1 insertion(+), 36 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 3ade5d54ea27..f4b47065425c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -48,17 +48,6 @@ #include "soc15d.h" #include "gmc_v9_0.h" -/* HACK: MMHUB and GC both have VM-related register with the same - * names but different offsets. Define the MMHUB register we need here - * with a prefix. A proper solution would be to move the functions - * programming these registers into gfx_v9_0.c and mmhub_v1_0.c - * respectively. - */ -#define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3 -#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0 - -#define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705 -#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0 #define V9_PIPE_PER_MEC (4) #define V9_QUEUES_PER_PIPE_MEC (8) @@ -742,15 +731,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - uint32_t req = (1 << vmid) | - (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */ - VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK | - VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK | - VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK | - VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK | - VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK; - - mutex_lock(&adev->srbm_mutex); /* Use legacy mode tlb invalidation. * @@ -767,22 +747,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) * TODO 2: support range-based invalidation, requires kfg2kgd * interface change */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req); - - WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ), - req); - - while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) & - (1 << vmid))) - cpu_relax(); - - while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0, - mmMMHUB_VM_INVALIDATE_ENG16_ACK)) & - (1 << vmid))) - cpu_relax(); - - mutex_unlock(&adev->srbm_mutex); - + amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0); } static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) -- cgit From 5dcb668d9a078bf926ea629db41643dd95d2d31b Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 23 Oct 2018 22:58:04 -0400 Subject: drm/amdgpu: Added a few comments for gart Signed-off-by: Oak Zeng Reviewed-by: Christian Konig Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 9a212aa4c177..6d11e1721147 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, * @offset: offset into the GPU's gart aperture * @pages: number of pages to bind * @dma_addr: DMA addresses of pages + * @flags: page table entry flags + * @dst: CPU address of the gart table * * Map the dma_addresses into GART entries (all asics). * Returns 0 for success, -EINVAL for failure. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 9ff62887e4e3..afa2e2877d87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -41,6 +41,7 @@ struct amdgpu_bo; struct amdgpu_gart { struct amdgpu_bo *bo; + /* CPU kmapped address of gart table */ void *ptr; unsigned num_gpu_pages; unsigned num_cpu_pages; -- cgit From 27eb1fa9130a98edd2b321d4dbce5c8b244ee7af Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 19 Oct 2018 13:49:05 +0200 Subject: drm/ttm: use a static ttm_mem_global instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the name says we only need one global instance of ttm_mem_global. Drop all the driver initialization and just use a single exported instance which is initialized during BO global initialization. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 44 --------------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 - 2 files changed, 45 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3a6802846698..fda252022b15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -65,33 +65,6 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); * Global memory. */ -/** - * amdgpu_ttm_mem_global_init - Initialize and acquire reference to - * memory object - * - * @ref: Object for initialization. - * - * This is called by drm_global_item_ref() when an object is being - * initialized. - */ -static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) -{ - return ttm_mem_global_init(ref->object); -} - -/** - * amdgpu_ttm_mem_global_release - Drop reference to a memory object - * - * @ref: Object being removed - * - * This is called by drm_global_item_unref() when an object is being - * released. - */ -static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) -{ - ttm_mem_global_release(ref->object); -} - /** * amdgpu_ttm_global_init - Initialize global TTM memory reference structures. * @@ -108,20 +81,6 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) /* ensure reference is false in case init fails */ adev->mman.mem_global_referenced = false; - global_ref = &adev->mman.mem_global_ref; - global_ref->global_type = DRM_GLOBAL_TTM_MEM; - global_ref->size = sizeof(struct ttm_mem_global); - global_ref->init = &amdgpu_ttm_mem_global_init; - global_ref->release = &amdgpu_ttm_mem_global_release; - r = drm_global_item_ref(global_ref); - if (r) { - DRM_ERROR("Failed setting up TTM memory accounting " - "subsystem.\n"); - goto error_mem; - } - - adev->mman.bo_global_ref.mem_glob = - adev->mman.mem_global_ref.object; global_ref = &adev->mman.bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); @@ -140,8 +99,6 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) return 0; error_bo: - drm_global_item_unref(&adev->mman.mem_global_ref); -error_mem: return r; } @@ -150,7 +107,6 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) if (adev->mman.mem_global_referenced) { mutex_destroy(&adev->mman.gtt_window_lock); drm_global_item_unref(&adev->mman.bo_global_ref.ref); - drm_global_item_unref(&adev->mman.mem_global_ref); adev->mman.mem_global_referenced = false; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index fe8f276e9811..e114f209b701 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -40,7 +40,6 @@ struct amdgpu_mman { struct ttm_bo_global_ref bo_global_ref; - struct drm_global_reference mem_global_ref; struct ttm_bo_device bdev; bool mem_global_referenced; bool initialized; -- cgit From a64f784bb14a56bfdfad2dc397dd67e4564e3a29 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 19 Oct 2018 16:55:26 +0200 Subject: drm/ttm: initialize globals during device init (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure that the global BO state is always correctly initialized. This allows removing all the device code to initialize it. v2: fix up vbox (Alex) Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 59 ++------------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 - 2 files changed, 2 insertions(+), 58 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fda252022b15..31fe85dd0b50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -61,56 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); -/* - * Global memory. - */ - -/** - * amdgpu_ttm_global_init - Initialize global TTM memory reference structures. - * - * @adev: AMDGPU device for which the global structures need to be registered. - * - * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init() - * during bring up. - */ -static int amdgpu_ttm_global_init(struct amdgpu_device *adev) -{ - struct drm_global_reference *global_ref; - int r; - - /* ensure reference is false in case init fails */ - adev->mman.mem_global_referenced = false; - - global_ref = &adev->mman.bo_global_ref.ref; - global_ref->global_type = DRM_GLOBAL_TTM_BO; - global_ref->size = sizeof(struct ttm_bo_global); - global_ref->init = &ttm_bo_global_ref_init; - global_ref->release = &ttm_bo_global_ref_release; - r = drm_global_item_ref(global_ref); - if (r) { - DRM_ERROR("Failed setting up TTM BO subsystem.\n"); - goto error_bo; - } - - mutex_init(&adev->mman.gtt_window_lock); - - adev->mman.mem_global_referenced = true; - - return 0; - -error_bo: - return r; -} - -static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) -{ - if (adev->mman.mem_global_referenced) { - mutex_destroy(&adev->mman.gtt_window_lock); - drm_global_item_unref(&adev->mman.bo_global_ref.ref); - adev->mman.mem_global_referenced = false; - } -} - static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; @@ -1714,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) int r; u64 vis_vram_limit; - /* initialize global references for vram/gtt */ - r = amdgpu_ttm_global_init(adev); - if (r) { - return r; - } + mutex_init(&adev->mman.gtt_window_lock); + /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&adev->mman.bdev, - adev->mman.bo_global_ref.ref.object, &amdgpu_bo_driver, adev->ddev->anon_inode->i_mapping, DRM_FILE_PAGE_OFFSET, @@ -1878,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); ttm_bo_device_release(&adev->mman.bdev); - amdgpu_ttm_global_fini(adev); adev->mman.initialized = false; DRM_INFO("amdgpu: ttm finalized\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e114f209b701..b5b2d101f7db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -39,7 +39,6 @@ #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 struct amdgpu_mman { - struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; bool mem_global_referenced; bool initialized; -- cgit From c66ed765a0a97b8900f37d4a71f1d75f52f56eeb Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 19 Oct 2018 16:22:48 -0400 Subject: drm/amdgpu: Retire amdgpu_ring.ready flag v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start using drm_gpu_scheduler.ready isntead. v3: Add helper function to run ring test and set sched.ready flag status accordingly, clean explicit sched.ready sets from the IP specific files. v4: Add kerneldoc and rebase. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 6 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 18 +++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 22 ++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 12 ++++----- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 16 ++++-------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 16 ++++-------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 29 +++++++++------------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 +++++++++-------------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 12 ++++----- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 12 ++++----- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 24 ++++++++---------- drivers/gpu/drm/amd/amdgpu/si_dma.c | 10 +++----- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 9 +++---- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 9 +++---- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 16 ++++-------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 16 ++++-------- drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 6 +---- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 7 +----- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 9 ++----- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 24 ++++++------------ 26 files changed, 129 insertions(+), 187 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bce5f7711cf1..60f9a87e9c74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -144,7 +144,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) KGD_MAX_QUEUES); /* remove the KIQ bit as well */ - if (adev->gfx.kiq.ring.ready) + if (adev->gfx.kiq.ring.sched.ready) clear_bit(amdgpu_gfx_queue_to_bit(adev, adev->gfx.kiq.ring.me - 1, adev->gfx.kiq.ring.pipe, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index f4b47065425c..40a9f348b37d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -786,7 +786,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) if (adev->in_gpu_reset) return -EIO; - if (ring->ready) + if (ring->sched.ready) return invalidate_tlbs_with_kiq(adev, pasid); for (vmid = 0; vmid < 16; vmid++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index b8963b725dfa..fc74f40a5912 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, fence_ctx = 0; } - if (!ring->ready) { + if (!ring->sched.ready) { dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); return -EINVAL; } @@ -351,7 +351,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) struct amdgpu_ring *ring = adev->rings[i]; long tmo; - if (!ring || !ring->ready) + if (!ring || !ring->sched.ready) continue; /* skip IB tests for KIQ in general for the below reasons: @@ -375,7 +375,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) r = amdgpu_ring_test_ib(ring, tmo); if (r) { - ring->ready = false; + ring->sched.ready = false; if (ring == &adev->gfx.gfx_ring[0]) { /* oh, oh, that's really bad */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 09fa919d2500..8f6ff9f895c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_GFX: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) - if (adev->gfx.gfx_ring[i].ready) + if (adev->gfx.gfx_ring[i].sched.ready) ++num_rings; ib_start_alignment = 32; ib_size_alignment = 32; @@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) - if (adev->gfx.compute_ring[i].ready) + if (adev->gfx.compute_ring[i].sched.ready) ++num_rings; ib_start_alignment = 32; ib_size_alignment = 32; @@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; for (i = 0; i < adev->sdma.num_instances; i++) - if (adev->sdma.instance[i].ring.ready) + if (adev->sdma.instance[i].ring.sched.ready) ++num_rings; ib_start_alignment = 256; ib_size_alignment = 4; @@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->uvd.harvest_config & (1 << i)) continue; - if (adev->uvd.inst[i].ring.ready) + if (adev->uvd.inst[i].ring.sched.ready) ++num_rings; } ib_start_alignment = 64; @@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < adev->vce.num_rings; i++) - if (adev->vce.ring[i].ready) + if (adev->vce.ring[i].sched.ready) ++num_rings; ib_start_alignment = 4; ib_size_alignment = 1; @@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, continue; for (j = 0; j < adev->uvd.num_enc_rings; j++) - if (adev->uvd.inst[i].ring_enc[j].ready) + if (adev->uvd.inst[i].ring_enc[j].sched.ready) ++num_rings; } ib_start_alignment = 64; @@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - if (adev->vcn.ring_dec.ready) + if (adev->vcn.ring_dec.sched.ready) ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; @@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_enc_rings; i++) - if (adev->vcn.ring_enc[i].ready) + if (adev->vcn.ring_enc[i].sched.ready) ++num_rings; ib_start_alignment = 64; ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - if (adev->vcn.ring_jpeg.ready) + if (adev->vcn.ring_jpeg.sched.ready) ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 59cc678de8c1..7235cd0b0fa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -2129,7 +2129,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->ready) + if (ring && ring->sched.ready) amdgpu_fence_wait_empty(ring); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index b70e85ec147d..3c89c8aa33d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, */ void amdgpu_ring_fini(struct amdgpu_ring *ring) { - ring->ready = false; + ring->sched.ready = false; /* Not to finish a ring which is not initialized */ if (!(ring->adev) || !(ring->adev->rings[ring->idx])) @@ -500,3 +500,23 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) debugfs_remove(ring->ent); #endif } + +/** + * amdgpu_ring_test_helper - tests ring and set sched readiness status + * + * @ring: ring to try the recovery on + * + * Tests ring and set sched readiness status + * + * Returns 0 on success, error on failure. + */ +int amdgpu_ring_test_helper(struct amdgpu_ring *ring) +{ + int r; + + r = amdgpu_ring_test_ring(ring); + + ring->sched.ready = !r; + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4caa301ce454..4cdddbc4491b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -189,7 +189,6 @@ struct amdgpu_ring { uint64_t gpu_addr; uint64_t ptr_mask; uint32_t buf_mask; - bool ready; u32 idx; u32 me; u32 pipe; @@ -313,4 +312,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, ring->count_dw -= count_dw; } +int amdgpu_ring_test_helper(struct amdgpu_ring *ring); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 31fe85dd0b50..c91ec3101d00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1970,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, unsigned i; int r; - if (direct_submit && !ring->ready) { + if (direct_submit && !ring->sched.ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 32eb43d165f2..561406a1cf88 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -316,8 +316,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); } - sdma0->ready = false; - sdma1->ready = false; + sdma0->sched.ready = false; + sdma1->sched.ready = false; } /** @@ -494,18 +494,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - ring->ready = true; + ring->sched.ready = true; } cik_sdma_enable(adev, true); for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 622dd70f310e..c8f038136af0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1950,9 +1950,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) CP_ME_CNTL__CE_HALT_MASK)); WREG32(mmSCRATCH_UMSK, 0); for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].ready = false; + adev->gfx.gfx_ring[i].sched.ready = false; for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].ready = false; + adev->gfx.compute_ring[i].sched.ready = false; } udelay(50); } @@ -2124,12 +2124,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the rings */ gfx_v6_0_cp_gfx_start(adev); - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } return 0; } @@ -2227,14 +2224,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_RB2_CNTL, tmp); WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8); - adev->gfx.compute_ring[0].ready = false; - adev->gfx.compute_ring[1].ready = false; for (i = 0; i < 2; i++) { - r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]); + r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]); if (r) return r; - adev->gfx.compute_ring[i].ready = true; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index cfa45d996482..6de6bb18bdfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2403,7 +2403,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) } else { WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].ready = false; + adev->gfx.gfx_ring[i].sched.ready = false; } udelay(50); } @@ -2613,12 +2613,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the ring */ gfx_v7_0_cp_gfx_start(adev); - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } return 0; } @@ -2675,7 +2672,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) } else { WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].ready = false; + adev->gfx.compute_ring[i].sched.ready = false; } udelay(50); } @@ -3106,10 +3103,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) - ring->ready = false; + amdgpu_ring_test_helper(ring); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e0fe0c6115a8..02f8ca56386f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1629,7 +1629,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) return 0; /* bail if the compute ring is not ready */ - if (!ring->ready) + if (!ring->sched.ready) return 0; tmp = RREG32(mmGB_EDC_MODE); @@ -4197,7 +4197,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].ready = false; + adev->gfx.gfx_ring[i].sched.ready = false; } WREG32(mmCP_ME_CNTL, tmp); udelay(50); @@ -4379,10 +4379,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the ring */ amdgpu_ring_clear_ring(ring); gfx_v8_0_cp_gfx_start(adev); - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) - ring->ready = false; + ring->sched.ready = true; + r = amdgpu_ring_test_helper(ring); return r; } @@ -4396,8 +4394,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) } else { WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].ready = false; - adev->gfx.kiq.ring.ready = false; + adev->gfx.compute_ring[i].sched.ready = false; + adev->gfx.kiq.ring.sched.ready = false; } udelay(50); } @@ -4473,11 +4471,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); } - r = amdgpu_ring_test_ring(kiq_ring); - if (r) { + r = amdgpu_ring_test_helper(kiq_ring); + if (r) DRM_ERROR("KCQ enable failed\n"); - kiq_ring->ready = false; - } return r; } @@ -4781,7 +4777,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - ring->ready = true; + ring->sched.ready = true; return 0; } @@ -4820,10 +4816,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) */ for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { ring = &adev->gfx.compute_ring[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) - ring->ready = false; + r = amdgpu_ring_test_helper(ring); } done: @@ -4899,7 +4892,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); } - r = amdgpu_ring_test_ring(kiq_ring); + r = amdgpu_ring_test_helper(kiq_ring); if (r) DRM_ERROR("KCQ disable failed\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4281a37a7feb..d71c9c47444e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2537,7 +2537,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); if (!enable) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].ready = false; + adev->gfx.gfx_ring[i].sched.ready = false; } WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); udelay(50); @@ -2727,7 +2727,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the ring */ gfx_v9_0_cp_gfx_start(adev); - ring->ready = true; + ring->sched.ready = true; return 0; } @@ -2742,8 +2742,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].ready = false; - adev->gfx.kiq.ring.ready = false; + adev->gfx.compute_ring[i].sched.ready = false; + adev->gfx.kiq.ring.sched.ready = false; } udelay(50); } @@ -2866,11 +2866,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); } - r = amdgpu_ring_test_ring(kiq_ring); - if (r) { + r = amdgpu_ring_test_helper(kiq_ring); + if (r) DRM_ERROR("KCQ enable failed\n"); - kiq_ring->ready = false; - } return r; } @@ -3249,7 +3247,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - ring->ready = true; + ring->sched.ready = true; return 0; } @@ -3314,19 +3312,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return r; ring = &adev->gfx.gfx_ring[0]; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) - ring->ready = false; + amdgpu_ring_test_helper(ring); } gfx_v9_0_enable_gui_idle_interrupt(adev, true); @@ -3391,7 +3383,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); } - r = amdgpu_ring_test_ring(kiq_ring); + r = amdgpu_ring_test_helper(kiq_ring); if (r) DRM_ERROR("KCQ disable failed\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index af786b5513bc..a7e61c6de71c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -381,7 +381,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, struct amdgpu_vmhub *hub = &adev->vmhub[i]; u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); - if (adev->gfx.kiq.ring.ready && + if (adev->gfx.kiq.ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && !adev->in_gpu_reset) { r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index bedbd5f296c5..fa2f6bea1d60 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -349,8 +349,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } - sdma0->ready = false; - sdma1->ready = false; + sdma0->sched.ready = false; + sdma1->sched.ready = false; } /** @@ -471,17 +471,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - ring->ready = true; + ring->sched.ready = true; } sdma_v2_4_enable(adev, true); for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 415968dc6c87..942fe3696ef0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -523,8 +523,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } - sdma0->ready = false; - sdma1->ready = false; + sdma0->sched.ready = false; + sdma1->sched.ready = false; } /** @@ -739,7 +739,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - ring->ready = true; + ring->sched.ready = true; } /* unhalt the MEs */ @@ -749,11 +749,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 88d93430dfb1..65312897b8ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -634,8 +634,8 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); } - sdma0->ready = false; - sdma1->ready = false; + sdma0->sched.ready = false; + sdma1->sched.ready = false; } /** @@ -675,8 +675,8 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); } - sdma0->ready = false; - sdma1->ready = false; + sdma0->sched.ready = false; + sdma1->sched.ready = false; } /** @@ -863,7 +863,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) /* enable DMA IBs */ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); - ring->ready = true; + ring->sched.ready = true; } /** @@ -956,7 +956,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) /* enable DMA IBs */ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); - ring->ready = true; + ring->sched.ready = true; } static void @@ -1144,20 +1144,16 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } if (adev->sdma.has_page_queue) { struct amdgpu_ring *page = &adev->sdma.instance[i].page; - r = amdgpu_ring_test_ring(page); - if (r) { - page->ready = false; + r = amdgpu_ring_test_helper(page); + if (r) return r; - } } if (adev->mman.buffer_funcs_ring == ring) diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index d9b27d7017dd..05ce1ca4c789 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -122,7 +122,7 @@ static void si_dma_stop(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, false); - ring->ready = false; + ring->sched.ready = false; } } @@ -175,13 +175,11 @@ static int si_dma_start(struct amdgpu_device *adev) WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); - ring->ready = true; + ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 1fc17bf39fed..8cabe982a61d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle) uvd_v4_2_enable_mgcg(adev, true); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } r = amdgpu_ring_alloc(ring, 10); if (r) { @@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle) if (RREG32(mmUVD_STATUS) != 0) uvd_v4_2_stop(adev); - ring->ready = false; + ring->sched.ready = false; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index fde6ad5ac9ab..56b02ee543f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle) uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); uvd_v5_0_enable_mgcg(adev, true); - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } r = amdgpu_ring_alloc(ring, 10); if (r) { @@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle) if (RREG32(mmUVD_STATUS) != 0) uvd_v5_0_stop(adev); - ring->ready = false; + ring->sched.ready = false; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7a5b40275e8e..3027607a187c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -476,12 +476,9 @@ static int uvd_v6_0_hw_init(void *handle) uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); uvd_v6_0_enable_mgcg(adev, true); - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } r = amdgpu_ring_alloc(ring, 10); if (r) { @@ -513,12 +510,9 @@ static int uvd_v6_0_hw_init(void *handle) if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst->ring_enc[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } } @@ -548,7 +542,7 @@ static int uvd_v6_0_hw_fini(void *handle) if (RREG32(mmUVD_STATUS) != 0) uvd_v6_0_stop(adev); - ring->ready = false; + ring->sched.ready = false; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 58b39afcfb86..76a7fbef532a 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -540,12 +540,9 @@ static int uvd_v7_0_hw_init(void *handle) ring = &adev->uvd.inst[j].ring; if (!amdgpu_sriov_vf(adev)) { - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } r = amdgpu_ring_alloc(ring, 10); if (r) { @@ -582,12 +579,9 @@ static int uvd_v7_0_hw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst[j].ring_enc[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } } done: @@ -619,7 +613,7 @@ static int uvd_v7_0_hw_fini(void *handle) for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { if (adev->uvd.harvest_config & (1 << i)) continue; - adev->uvd.inst[i].ring.ready = false; + adev->uvd.inst[i].ring.sched.ready = false; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index ea28828360d3..bed78a778e3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle) amdgpu_asic_set_vce_clocks(adev, 10000, 10000); vce_v2_0_enable_mgcg(adev, true, false); - for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].ready = false; for (i = 0; i < adev->vce.num_rings; i++) { - r = amdgpu_ring_test_ring(&adev->vce.ring[i]); + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); if (r) return r; - else - adev->vce.ring[i].ready = true; } DRM_INFO("VCE initialized successfully.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6dbd39730070..2b1a5a793942 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -474,15 +474,10 @@ static int vce_v3_0_hw_init(void *handle) amdgpu_asic_set_vce_clocks(adev, 10000, 10000); - for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].ready = false; - for (i = 0; i < adev->vce.num_rings; i++) { - r = amdgpu_ring_test_ring(&adev->vce.ring[i]); + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); if (r) return r; - else - adev->vce.ring[i].ready = true; } DRM_INFO("VCE initialized successfully.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 1c9471890bf7..65b71fc2f7b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle) if (r) return r; - for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].ready = false; - for (i = 0; i < adev->vce.num_rings; i++) { - r = amdgpu_ring_test_ring(&adev->vce.ring[i]); + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); if (r) return r; - else - adev->vce.ring[i].ready = true; } DRM_INFO("VCE initialized successfully.\n"); @@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle) } for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].ready = false; + adev->vce.ring[i].sched.ready = false; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index eae90922fdbe..29628f60d50c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.ring_dec; int i, r; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.ring_enc[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + ring->sched.ready = true; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } ring = &adev->vcn.ring_jpeg; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } done: if (!r) @@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle) if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) vcn_v1_0_stop(adev); - ring->ready = false; + ring->sched.ready = false; return 0; } -- cgit From 3ba7b418f12b634b8920c979f5259013d40e38e2 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Mon, 22 Oct 2018 17:12:39 -0400 Subject: drm/amdgpu: Enable default GPU reset for dGPU on gfx8/9 v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After testing looks like these subset of ASICs has GPU reset working for the most part. Enable reset due to job timeout. v2: Switch from GFX version to ASIC type. v3: Fix identation Signed-off-by: Andrey Grodzovsky Acked-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 30bc345d6fdf..416a67672f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3295,13 +3295,35 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) return false; } - if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 && - !amdgpu_sriov_vf(adev))) { - DRM_INFO("GPU recovery disabled.\n"); - return false; + if (amdgpu_gpu_recovery == 0) + goto disabled; + + if (amdgpu_sriov_vf(adev)) + return true; + + if (amdgpu_gpu_recovery == -1) { + switch (adev->asic_type) { + case CHIP_TOPAZ: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_VEGA20: + case CHIP_VEGA10: + case CHIP_VEGA12: + break; + default: + goto disabled; + } } return true; + +disabled: + DRM_INFO("GPU recovery disabled.\n"); + return false; } /** -- cgit From e4312d459a2e572e093c758793bd5357d6bddfd1 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Oct 2018 17:23:09 -0500 Subject: drm/amdgpu/amdkfd: clean up mmhub and gfxhub includes Use the appropriate mmhub and gfxhub headers rather than adding them to the gmc9 header. Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 3 ++- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h | 6 ------ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h | 2 ++ 4 files changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 40a9f348b37d..03b604c96d94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -46,7 +46,8 @@ #include "v9_structs.h" #include "soc15.h" #include "soc15d.h" -#include "gmc_v9_0.h" +#include "mmhub_v1_0.h" +#include "gfxhub_v1_0.h" #define V9_PIPE_PER_MEC (4) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h index 206e29cad753..92d3a70cd9b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h @@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value); void gfxhub_v1_0_init(struct amdgpu_device *adev); u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); +void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index 1fd178a65e66..b030ca5ea107 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -27,10 +27,4 @@ extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; -/* amdgpu_amdkfd*.c */ -void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index bef3d0c0c117..0de0fdf98c00 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable); +void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif -- cgit From dc9eeff84c77080f545575a30062af0ac65b1eb0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 29 Oct 2018 10:48:31 +0100 Subject: drm/amdgpu: further ring test cleanups MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move all error messages from IP specific code into the common helper. This way we now uses the ring name in the messages instead of the index and note which device is affected as well. Also cleanup error handling in the IP specific code and consequently use ETIMEDOUT when the ring test timed out. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 8 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 14 +++------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 46 ++++++++------------------------ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 24 ++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 25 +++++++---------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 24 +++++++---------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 27 +++++++------------ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 27 +++++++------------ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 23 +++++----------- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 23 +++++----------- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 23 +++++----------- drivers/gpu/drm/amd/amdgpu/si_dma.c | 23 +++++----------- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 17 ++++-------- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 16 +++-------- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 13 ++------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 30 +++++---------------- 16 files changed, 114 insertions(+), 249 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3c89c8aa33d8..5b75bdc8dc28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -512,11 +512,17 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) */ int amdgpu_ring_test_helper(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; int r; r = amdgpu_ring_test_ring(ring); + if (r) + DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n", + ring->name, r); + else + DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n", + ring->name); ring->sched.ready = !r; - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 5f3f54073818..7b999ee3defb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -1079,11 +1079,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) return 0; r = amdgpu_ring_alloc(ring, 16); - if (r) { - DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } + amdgpu_ring_write(ring, VCE_CMD_END); amdgpu_ring_commit(ring); @@ -1093,14 +1091,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed\n", - ring->idx); + if (i >= timeout) r = -ETIMEDOUT; - } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 27da13df2f11..4b7f52e68457 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -425,11 +425,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0)); amdgpu_ring_write(ring, 0xDEADBEEF); @@ -441,14 +439,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + return r; } @@ -606,11 +599,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) int r; r = amdgpu_ring_alloc(ring, 16); - if (r) { - DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } + amdgpu_ring_write(ring, VCN_ENC_CMD_END); amdgpu_ring_commit(ring); @@ -620,14 +611,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed\n", - ring->idx); + if (i >= adev->usec_timeout) r = -ETIMEDOUT; - } return r; } @@ -778,11 +763,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } amdgpu_ring_write(ring, PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0)); @@ -796,14 +778,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 561406a1cf88..5eb15bf9ec7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -616,21 +616,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) u64 gpu_addr; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 5); - if (r) { - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); - return r; - } + if (r) + goto error_free_wb; + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); @@ -645,15 +641,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - amdgpu_device_wb_free(adev, index); + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; +error_free_wb: + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index c8f038136af0..0f36bb08fe1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) int r; r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + if (r) return r; - } + WREG32(scratch, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; - } + if (r) + goto error_free_scratch; + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); @@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) break; DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", - ring->idx, scratch, tmp); - r = -EINVAL; - } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + +error_free_scratch: amdgpu_gfx_scratch_free(adev, scratch); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 6de6bb18bdfa..742ec4425ca1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2064,17 +2064,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) int r; r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + if (r) return r; - } + WREG32(scratch, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; - } + if (r) + goto error_free_scratch; + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); @@ -2086,13 +2083,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) break; DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", - ring->idx, scratch, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + +error_free_scratch: amdgpu_gfx_scratch_free(adev, scratch); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 02f8ca56386f..45dda5684083 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -839,18 +839,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) int r; r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + if (r) return r; - } + WREG32(scratch, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; - } + if (r) + goto error_free_scratch; + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); @@ -862,14 +858,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) break; DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", - ring->idx, scratch, tmp); - r = -EINVAL; - } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + +error_free_scratch: amdgpu_gfx_scratch_free(adev, scratch); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d71c9c47444e..9248ef08bb37 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -396,18 +396,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) int r; r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + if (r) return r; - } + WREG32(scratch, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; - } + if (r) + goto error_free_scratch; + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); @@ -419,14 +415,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) break; DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", - ring->idx, scratch, tmp); - r = -EINVAL; - } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + +error_free_scratch: amdgpu_gfx_scratch_free(adev, scratch); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index fa2f6bea1d60..4fded77e4ae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -548,21 +548,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) u64 gpu_addr; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 5); - if (r) { - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); - return r; - } + if (r) + goto error_free_wb; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); @@ -579,15 +574,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - amdgpu_device_wb_free(adev, index); + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; +error_free_wb: + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 942fe3696ef0..5d59b7196da9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -820,21 +820,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) u64 gpu_addr; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 5); - if (r) { - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); - return r; - } + if (r) + goto error_free_wb; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); @@ -851,15 +846,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - amdgpu_device_wb_free(adev, index); + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; +error_free_wb: + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 65312897b8ba..01df6cf910b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1182,21 +1182,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) u64 gpu_addr; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 5); - if (r) { - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); - return r; - } + if (r) + goto error_free_wb; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); @@ -1213,15 +1208,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - amdgpu_device_wb_free(adev, index); + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; +error_free_wb: + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 05ce1ca4c789..f051e3d1e988 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -207,21 +207,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) u64 gpu_addr; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_alloc(ring, 4); - if (r) { - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); - return r; - } + if (r) + goto error_free_wb; amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); @@ -236,15 +231,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } - amdgpu_device_wb_free(adev, index); + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; +error_free_wb: + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 8cabe982a61d..51681eb0dd58 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -481,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); @@ -496,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 56b02ee543f9..907afcf8d867 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -497,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); @@ -512,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 3027607a187c..c8edd535eae5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) int r; r = amdgpu_ring_alloc(ring, 16); - if (r) { - DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } amdgpu_ring_write(ring, HEVC_ENC_CMD_END); amdgpu_ring_commit(ring); @@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed\n", - ring->idx); + if (i >= adev->usec_timeout) r = -ETIMEDOUT; - } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 76a7fbef532a..87fd003ff037 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) return 0; r = amdgpu_ring_alloc(ring, 16); - if (r) { - DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n", - ring->me, ring->idx, r); + if (r) return r; - } amdgpu_ring_write(ring, HEVC_ENC_CMD_END); amdgpu_ring_commit(ring); @@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", - ring->me, ring->idx, i); - } else { - DRM_ERROR("amdgpu: (%d)ring %d test failed\n", - ring->me, ring->idx); + if (i >= adev->usec_timeout) r = -ETIMEDOUT; - } return r; } @@ -1229,11 +1220,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", - ring->me, ring->idx, r); + if (r) return r; - } + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); amdgpu_ring_write(ring, 0xDEADBEEF); @@ -1245,14 +1234,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", - ring->me, ring->idx, i); - } else { - DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n", - ring->me, ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + return r; } -- cgit From af70a471bf067d4e808eff2b9c464e6055c98d49 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 29 Oct 2018 10:55:23 +0100 Subject: drm/amdgpu: cleanup amdgpu_ib_ring_tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Test only initialized rings, use the ring name instead of the index in the error message and note on which device the error occured. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 35 ++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index fc74f40a5912..c514bb9e94a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -347,11 +347,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; } - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + for (i = 0; i < adev->num_rings; ++i) { struct amdgpu_ring *ring = adev->rings[i]; long tmo; - if (!ring || !ring->sched.ready) + if (!ring->sched.ready) continue; /* skip IB tests for KIQ in general for the below reasons: @@ -374,20 +374,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) tmo = tmo_gfx; r = amdgpu_ring_test_ib(ring, tmo); - if (r) { - ring->sched.ready = false; - - if (ring == &adev->gfx.gfx_ring[0]) { - /* oh, oh, that's really bad */ - DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); - adev->accel_working = false; - return r; - - } else { - /* still not good, but we can live with it */ - DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); - ret = r; - } + if (!r) { + DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", + ring->name); + continue; + } + + ring->sched.ready = false; + DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n", + ring->name, r); + + if (ring == &adev->gfx.gfx_ring[0]) { + /* oh, oh, that's really bad */ + adev->accel_working = false; + return r; + + } else { + ret = r; } } return ret; -- cgit From 315fed0367b4b4197cdc9585d09a1e659ae6c716 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 29 Oct 2018 14:56:34 +0100 Subject: drm/amdgpu: cleanup skipping IB test on KIQ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of hard coding the ring type in the function just never provide a test_ib callback. Additional to that remove the emit_ib callback to make sure the nobody ever tries to execute an IB on the KIQ. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 11 +++-------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 -- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 -- 3 files changed, 3 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index c514bb9e94a0..ec0e6238dbc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -351,15 +351,10 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) struct amdgpu_ring *ring = adev->rings[i]; long tmo; - if (!ring->sched.ready) - continue; - - /* skip IB tests for KIQ in general for the below reasons: - * 1. We never submit IBs to the KIQ - * 2. KIQ doesn't use the EOP interrupts, - * we use some other CP interrupt. + /* KIQ rings don't have an IB test because we never submit IBs + * to them and they have no interrupt support. */ - if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + if (!ring->sched.ready || !ring->funcs->test_ib) continue; /* MM engine need more time */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 45dda5684083..740c73aa7b45 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6989,10 +6989,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { 17 + /* gfx_v8_0_ring_emit_vm_flush */ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ - .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_kiq, .test_ring = gfx_v8_0_ring_test_ring, - .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_rreg = gfx_v8_0_ring_emit_rreg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9248ef08bb37..67c011d7f1a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4848,10 +4848,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { 2 + /* gfx_v9_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ - .emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_fence = gfx_v9_0_ring_emit_fence_kiq, .test_ring = gfx_v9_0_ring_test_ring, - .test_ib = gfx_v9_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_rreg = gfx_v9_0_ring_emit_rreg, -- cgit From 98079389a873f45ba75bbb20dcf14db0ec694a9a Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 29 Oct 2018 16:12:42 +0100 Subject: drm/amdgpu: remove messages from IB tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already print an error message that an IB test failed in the common code. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 18 +++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 18 +++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 54 +++++++++------------------------ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 17 +++-------- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 20 ++++-------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 20 ++++-------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 18 +++-------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 28 ++++++----------- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 17 +++-------- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 17 +++-------- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 18 +++-------- drivers/gpu/drm/amd/amdgpu/si_dma.c | 10 ++---- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 19 ++++-------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 18 +++-------- 14 files changed, 80 insertions(+), 212 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e5a6db6beab7..69896f451e8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1243,30 +1243,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence; long r; - uint32_t ip_instance = ring->me; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r); + if (r) goto error; - } r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r); + if (r) goto error; - } r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance); + if (r == 0) r = -ETIMEDOUT; - } else if (r < 0) { - DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r); - } else { - DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx); + else if (r > 0) r = 0; - } dma_fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 7b999ee3defb..379e1ae7a8fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -1113,27 +1113,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) return 0; r = amdgpu_vce_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + if (r) goto error; - } r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + if (r) goto error; - } r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + if (r == 0) r = -ETIMEDOUT; - } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); - } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + else if (r > 0) r = 0; - } + error: dma_fence_put(fence); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4b7f52e68457..e2e42e3fbcf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -563,30 +563,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + if (r) goto error; - } r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + if (r) goto error; - } r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + if (r == 0) r = -ETIMEDOUT; - } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); - } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + else if (r > 0) r = 0; - } dma_fence_put(fence); - error: return r; } @@ -727,27 +717,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + if (r) goto error; - } r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + if (r) goto error; - } r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + if (r == 0) r = -ETIMEDOUT; - } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); - } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + else if (r > 0) r = 0; - } + error: dma_fence_put(fence); return r; @@ -832,21 +814,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r = 0; r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r); + if (r) goto error; - } r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; goto error; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto error; - } else + } else { r = 0; + } for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); @@ -855,15 +834,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) DRM_UDELAY(1); } - if (i < adev->usec_timeout) - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); - else { - DRM_ERROR("ib test failed (0x%08X)\n", tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; dma_fence_put(fence); - error: return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 5eb15bf9ec7c..49275f358f7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -668,20 +668,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err0; - } ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); @@ -696,21 +692,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: amdgpu_ib_free(adev, &ib, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 0f36bb08fe1c..25cf905965fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1887,17 +1887,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); + if (r) return r; - } + WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err1; - } + ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START)); ib.ptr[2] = 0xDEADBEEF; @@ -1909,22 +1907,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); + else r = -EINVAL; - } err2: amdgpu_ib_free(adev, &ib, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 742ec4425ca1..ff8d316d0533 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2310,17 +2310,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); + if (r) return r; - } + WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err1; - } + ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); ib.ptr[2] = 0xDEADBEEF; @@ -2332,22 +2330,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); + else r = -EINVAL; - } err2: amdgpu_ib_free(adev, &ib, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 740c73aa7b45..58c5ebe1cd73 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -879,19 +879,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 16, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err1; - } + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; ib.ptr[2] = lower_32_bits(gpu_addr); @@ -905,22 +902,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } tmp = adev->wb.wb[index]; - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("ib test on ring %d failed\n", ring->idx); + else r = -EINVAL; - } err2: amdgpu_ib_free(adev, &ib, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 67c011d7f1a6..fcb370cfa1e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -436,19 +436,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 16, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err1; - } + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; ib.ptr[2] = lower_32_bits(gpu_addr); @@ -462,22 +459,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); - r = -ETIMEDOUT; - goto err2; + r = -ETIMEDOUT; + goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); - goto err2; + goto err2; } tmp = adev->wb.wb[index]; - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); - r = 0; - } else { - DRM_ERROR("ib test on ring %d failed\n", ring->idx); - r = -EINVAL; - } + if (tmp == 0xDEADBEEF) + r = 0; + else + r = -EINVAL; err2: amdgpu_ib_free(adev, &ib, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 4fded77e4ae0..c4ab54a59fc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -601,20 +601,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err0; - } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); @@ -633,21 +629,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: amdgpu_ib_free(adev, &ib, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 5d59b7196da9..e3adddbcb593 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -873,20 +873,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err0; - } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); @@ -905,21 +901,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 01df6cf910b8..2b944db86950 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1235,20 +1235,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) u64 gpu_addr; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err0; - } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); @@ -1267,21 +1263,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } + err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index f051e3d1e988..68718ddf6ed0 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -258,20 +258,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + if (r) return r; - } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + if (r) goto err0; - } ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); ib.ptr[1] = lower_32_bits(gpu_addr); @@ -284,11 +280,9 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index c8edd535eae5..234bc91e91a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -327,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + if (r) goto error; - } r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + if (r) goto error; - } r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + if (r == 0) r = -ETIMEDOUT; - } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); - } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + else if (r > 0) r = 0; - } + error: dma_fence_put(fence); return r; } + static int uvd_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 87fd003ff037..3985530a882f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -334,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r); + if (r) goto error; - } r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence); - if (r) { - DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r); + if (r) goto error; - } r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me); + if (r == 0) r = -ETIMEDOUT; - } else if (r < 0) { - DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r); - } else { - DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx); + else if (r > 0) r = 0; - } + error: dma_fence_put(fence); return r; -- cgit From 26efecf9558895a89c2920d258601b4afba10fd0 Mon Sep 17 00:00:00 2001 From: Sharat Masetty Date: Mon, 29 Oct 2018 15:02:28 +0530 Subject: drm/scheduler: Add drm_sched_job_cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a new API to clean up the scheduler job resources. This is primarliy needed in cases the job was created but was not queued to the scheduler queue. Additionally with this change, the layer which creates the scheduler job also gets to free up the job's resources and this entails moving the dma_fence_put(finished_fence) to the drivers ops free handler routines. Signed-off-by: Sharat Masetty Reviewed-by: Christian König Acked-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 663043c8f0f5..5d768f95de24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1260,8 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, return 0; error_abort: - dma_fence_put(&job->base.s_fence->finished); - job->base.s_fence = NULL; + drm_sched_job_cleanup(&job->base); amdgpu_mn_unlock(p->mn); error_unlock: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 755f733bf0d9..e0af44fd6a0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); + drm_sched_job_cleanup(s_job); + amdgpu_ring_priority_put(ring, s_job->s_priority); dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); -- cgit From e0519696cc31c111f11e7c67bd663dbb88d6673f Mon Sep 17 00:00:00 2001 From: Samuel Pitoiset Date: Mon, 29 Oct 2018 12:23:06 +0100 Subject: drm/amdgpu: print an error when the parser can't be initialized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Similar to other error messages, might help for tracking down issues. Signed-off-by: Samuel Pitoiset Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5d768f95de24..fd12e9162f3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1284,7 +1284,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_init(&parser, data); if (r) { - DRM_ERROR("Failed to initialize parser !\n"); + DRM_ERROR("Failed to initialize parser %d!\n", r); goto out; } -- cgit From 9340c36ca5f80e0a75856cf0aeeb9e25ce7f9574 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 30 Oct 2018 13:04:37 +0100 Subject: drm/amdgpu: use ring name instead of idx in traces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Further remove using the ring index in messages and traces. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index e9bf70e2ac51..626abca770a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -218,6 +218,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_ARGS(vm, ring, job), TP_STRUCT__entry( __field(u32, pasid) + __string(ring, ring->name) __field(u32, ring) __field(u32, vmid) __field(u32, vm_hub) @@ -227,14 +228,14 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_fast_assign( __entry->pasid = vm->pasid; - __entry->ring = ring->idx; + __assign_str(ring, ring->name) __entry->vmid = job->vmid; __entry->vm_hub = ring->funcs->vmhub, __entry->pd_addr = job->vm_pd_addr; __entry->needs_flush = job->vm_needs_flush; ), - TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", - __entry->pasid, __entry->ring, __entry->vmid, + TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", + __entry->pasid, __get_str(ring), __entry->vmid, __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) ); @@ -366,20 +367,20 @@ TRACE_EVENT(amdgpu_vm_flush, uint64_t pd_addr), TP_ARGS(ring, vmid, pd_addr), TP_STRUCT__entry( - __field(u32, ring) + __string(ring, ring->name) __field(u32, vmid) __field(u32, vm_hub) __field(u64, pd_addr) ), TP_fast_assign( - __entry->ring = ring->idx; + __assign_str(ring, ring->name) __entry->vmid = vmid; __entry->vm_hub = ring->funcs->vmhub; __entry->pd_addr = pd_addr; ), - TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx", - __entry->ring, __entry->vmid, + TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx", + __get_str(ring), __entry->vmid, __entry->vm_hub,__entry->pd_addr) ); -- cgit From 725b2611781a513eeeae260bfdff4026ee8e3601 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 30 Oct 2018 13:06:22 +0100 Subject: drm/amdgpu: cleanup uvd_v6_0_ring_test_ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Accidentially missed during the last cleanup. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 234bc91e91a8..7df41d1c818b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -947,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + if (r) return r; - } + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); @@ -962,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } - if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + return r; } -- cgit From b7ff853f15a278583414bbaeeb47b6084a32b0fb Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 30 Oct 2018 13:07:26 +0100 Subject: drm/amdgpu: cleanup si_dma_ring_test_ib MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Accidentially missed during the last cleanup. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si_dma.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 68718ddf6ed0..bff6954c2150 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -286,13 +286,10 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: amdgpu_ib_free(adev, &ib, NULL); -- cgit From 6e82c6e0661befbe2f926863586597236ca617af Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 30 Oct 2018 13:16:28 +0100 Subject: drm/amdgpu: drop the remaining uses of ring idx in messages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consistently use the ring name instead. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 ++--- 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 5448cf27654e..ee47c11e92ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.irq_type = irq_type; ring->fence_drv.initialized = true; - dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " - "cpu addr 0x%p\n", ring->idx, - ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); + DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr " + "0x%016llx, cpu addr 0x%p\n", ring->name, + ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index fcb370cfa1e7..100f23b5e22f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1620,8 +1620,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) /* Clear GDS reserved memory */ r = amdgpu_ring_alloc(ring, 17); if (r) { - DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n", - ring->idx, r); + DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n", + ring->name, r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index a7e61c6de71c..6cedf7ebf036 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -739,9 +739,8 @@ static int gmc_v9_0_late_init(void *handle) unsigned vmhub = ring->funcs->vmhub; ring->vm_inv_eng = vm_inv_eng[vmhub]++; - dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", - ring->idx, ring->name, ring->vm_inv_eng, - ring->funcs->vmhub); + dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", + ring->name, ring->vm_inv_eng, ring->funcs->vmhub); } /* Engine 16 is used for KFD and 17 for GART flushes */ -- cgit From 965632adc8a4205d26b3e91b18041aba2fc28229 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 17 Oct 2018 11:24:26 -0500 Subject: drm/amdgpu: update smu firmware images for VI variants (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some new variants require updated firmware. V2: add MODULE_FIRMWARE for new firmwares Reviewed-by: Huang Rui (v1) Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 8816c697b205..ceadeeadfa56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_TOPAZ: if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || - ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); } else @@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (type == CGS_UCODE_ID_SMU) { if (((adev->pdev->device == 0x67ef) && ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe2) || (adev->pdev->revision == 0xe5))) || ((adev->pdev->device == 0x67ff) && ((adev->pdev->revision == 0xcf) || @@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xff)))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); - } else + } else if ((adev->pdev->device == 0x67ef) && + (adev->pdev->revision == 0xe2)) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); + } else { strcpy(fw_name, "amdgpu/polaris11_smc.bin"); + } } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); } @@ -378,14 +384,31 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xef))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); - } else + } else if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe1) || + (adev->pdev->revision == 0xf7))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); + } else { strcpy(fw_name, "amdgpu/polaris10_smc.bin"); + } } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); } break; case CHIP_POLARIS12: - strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + if (((adev->pdev->device == 0x6987) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc3))) || + ((adev->pdev->device == 0x6981) && + ((adev->pdev->revision == 0x00) || + (adev->pdev->revision == 0x01) || + (adev->pdev->revision == 0x10)))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris12_k_smc.bin"); + } else { + strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + } break; case CHIP_VEGAM: strcpy(fw_name, "amdgpu/vegam_smc.bin"); -- cgit From 1cf03c54b3d89ccbaee45abbbf10c098f23801d5 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Wed, 24 Oct 2018 16:19:09 +0800 Subject: drm/amdgpu: disable page queue on SDMA for Vega12 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It blocks most of sanity tests, so disable it for now. Tested-by: Chen Gong Signed-off-by: Junwei Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 2b944db86950..ec1b2f3ec01e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1453,7 +1453,8 @@ static int sdma_v4_0_early_init(void *handle) adev->sdma.has_page_queue = false; } else { adev->sdma.num_instances = 2; - if (adev->asic_type != CHIP_VEGA20) + if (adev->asic_type != CHIP_VEGA20 && + adev->asic_type != CHIP_VEGA12) adev->sdma.has_page_queue = true; } -- cgit From 2a5ae84bf31e09526788def14e4b28c6d4d756f9 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Wed, 31 Oct 2018 10:36:34 +0800 Subject: drm/amdgpu: fix gfx wptr for sdma v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The wptr value will be shitfed when function returns. Remove the redundant shift and clean up. Signed-off-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index ec1b2f3ec01e..7f9a501c919d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -372,16 +372,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); } else { - u32 lowbit, highbit; - - lowbit = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR) >> 2; - highbit = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI) >> 2; - - DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", - ring->me, highbit, lowbit); - wptr = highbit; + wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI); wptr = wptr << 32; - wptr |= lowbit; + wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", + ring->me, wptr); } return wptr >> 2; -- cgit From 20bedfe0c13a2e221301e3c889b2a4c48374f78a Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 16 Oct 2018 16:27:53 +0800 Subject: drm/amdgpu: Remove useless csa gpu address in vmid0 driver didn't use this address so far. Reviewed-by: Monk Liu Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f2f358aa0597..9ff16b790c92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -48,7 +48,7 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev) r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, - &adev->virt.csa_vmid0_addr, &ptr); + NULL, &ptr); if (r) return r; @@ -58,7 +58,7 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev) void amdgpu_free_static_csa(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->virt.csa_obj, - &adev->virt.csa_vmid0_addr, + NULL, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 880ac113a3a9..f1a6a50d9444 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -238,7 +238,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ; struct amdgpu_virt { uint32_t caps; struct amdgpu_bo *csa_obj; - uint64_t csa_vmid0_addr; bool chained_ib_support; uint32_t reg_val_offs; struct amdgpu_irq_src ack_irq; -- cgit From 1e256e2762211c02078c31f839a9b243f62efd5e Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 15 Oct 2018 17:08:38 +0800 Subject: drm/amdgpu: Refine CSA related functions There is no functional changes, Use function arguments for SRIOV special variables which is hardcode in those functions. so we can share those functions in baremetal. Reviewed-by: Monk Liu Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 32 +++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 8 +++++--- 4 files changed, 29 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 416a67672f3d..0bf13d69efbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* right after GMC hw init, we create CSA */ if (amdgpu_sriov_vf(adev)) { - r = amdgpu_allocate_static_csa(adev); + r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("allocate CSA failed %d\n", r); return r; @@ -1890,7 +1892,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { amdgpu_ucode_free_bo(adev); - amdgpu_free_static_csa(adev); + amdgpu_free_static_csa(&adev->virt.csa_obj); amdgpu_device_wb_fini(adev); amdgpu_device_vram_scratch_fini(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8f6ff9f895c8..9b3164c0f861 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) } if (amdgpu_sriov_vf(adev)) { - r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); + uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; + + r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, + &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); if (r) goto error_vm; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 9ff16b790c92..f71bc6feea7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -41,25 +41,25 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) return RREG32_NO_KIQ(0xc040) == 0xffffffff; } -int amdgpu_allocate_static_csa(struct amdgpu_device *adev) +int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, + u32 domain, uint32_t size) { int r; void *ptr; - r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, + r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + domain, bo, NULL, &ptr); - if (r) - return r; + if (!bo) + return -ENOMEM; - memset(ptr, 0, AMDGPU_CSA_SIZE); + memset(ptr, 0, size); return 0; } -void amdgpu_free_static_csa(struct amdgpu_device *adev) { - amdgpu_bo_free_kernel(&adev->virt.csa_obj, - NULL, - NULL); +void amdgpu_free_static_csa(struct amdgpu_bo **bo) +{ + amdgpu_bo_free_kernel(bo, NULL, NULL); } /* @@ -69,9 +69,9 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) { * package to support SRIOV gfx preemption. */ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo_va **bo_va) + struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, + uint64_t csa_addr, uint32_t size) { - uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; @@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&csa_tv.head); - csa_tv.bo = &adev->virt.csa_obj->tbo; + csa_tv.bo = &bo->tbo; csa_tv.shared = true; list_add(&csa_tv.head, &list); @@ -92,7 +92,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); + *bo_va = amdgpu_vm_bo_add(adev, vm, bo); if (!*bo_va) { ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("failed to create bo_va for static CSA\n"); @@ -100,7 +100,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, } r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, - AMDGPU_CSA_SIZE); + size); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); amdgpu_vm_bo_rmv(adev, *bo_va); @@ -108,7 +108,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, + r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_EXECUTABLE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f1a6a50d9444..09a7ebe964d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -280,10 +280,12 @@ struct amdgpu_vm; uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); -int amdgpu_allocate_static_csa(struct amdgpu_device *adev); +int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, + u32 domain, uint32_t size); int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo_va **bo_va); -void amdgpu_free_static_csa(struct amdgpu_device *adev); + struct amdgpu_bo *bo, + struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size); +void amdgpu_free_static_csa(struct amdgpu_bo **bo); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); -- cgit From 7946340fa38965705f79273cef0cdc477239bf2d Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 19 Oct 2018 13:35:48 +0800 Subject: drm/amdgpu: Move csa related code to separate file In baremetal, also need to reserve csa for preemption. so move the csa related code out of sriov. Reviewed-by: Monk Liu Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 117 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h | 39 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 92 ------------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 11 --- 6 files changed, 158 insertions(+), 104 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 138cb787d27e..ec4a9d539322 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -53,7 +53,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ - amdgpu_gmc.o amdgpu_xgmi.o + amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9348eb5a3c83..9dbdda66c318 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -75,6 +75,7 @@ #include "amdgpu_sdma.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" +#include "amdgpu_csa.h" #include "amdgpu_gart.h" #include "amdgpu_debugfs.h" #include "amdgpu_job.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c new file mode 100644 index 000000000000..0c590ddf250a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -0,0 +1,117 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + + * * Author: Monk.liu@amd.com + */ + +#include "amdgpu.h" + +uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) +{ + uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; + + addr -= AMDGPU_VA_RESERVED_SIZE; + addr = amdgpu_gmc_sign_extend(addr); + + return addr; +} + +int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, + u32 domain, uint32_t size) +{ + int r; + void *ptr; + + r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + domain, bo, + NULL, &ptr); + if (!bo) + return -ENOMEM; + + memset(ptr, 0, size); + return 0; +} + +void amdgpu_free_static_csa(struct amdgpu_bo **bo) +{ + amdgpu_bo_free_kernel(bo, NULL, NULL); +} + +/* + * amdgpu_map_static_csa should be called during amdgpu_vm_init + * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command + * submission of GFX should use this virtual address within META_DATA init + * package to support SRIOV gfx preemption. + */ +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, + uint64_t csa_addr, uint32_t size) +{ + struct ww_acquire_ctx ticket; + struct list_head list; + struct amdgpu_bo_list_entry pd; + struct ttm_validate_buffer csa_tv; + int r; + + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&csa_tv.head); + csa_tv.bo = &bo->tbo; + csa_tv.shared = true; + + list_add(&csa_tv.head, &list); + amdgpu_vm_get_pd_bo(vm, &list, &pd); + + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + if (r) { + DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); + return r; + } + + *bo_va = amdgpu_vm_bo_add(adev, vm, bo); + if (!*bo_va) { + ttm_eu_backoff_reservation(&ticket, &list); + DRM_ERROR("failed to create bo_va for static CSA\n"); + return -ENOMEM; + } + + r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, + size); + if (r) { + DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); + amdgpu_vm_bo_rmv(adev, *bo_va); + ttm_eu_backoff_reservation(&ticket, &list); + return r; + } + + r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); + + if (r) { + DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); + amdgpu_vm_bo_rmv(adev, *bo_va); + ttm_eu_backoff_reservation(&ticket, &list); + return r; + } + + ttm_eu_backoff_reservation(&ticket, &list); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h new file mode 100644 index 000000000000..ef2dfb0cd760 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h @@ -0,0 +1,39 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Monk.liu@amd.com + */ + +#ifndef AMDGPU_CSA_MANAGER_H +#define AMDGPU_CSA_MANAGER_H + +#define AMDGPU_CSA_SIZE (8 * 1024) + +uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev); +uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); +int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, + u32 domain, uint32_t size); +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, + uint64_t csa_addr, uint32_t size); +void amdgpu_free_static_csa(struct amdgpu_bo **bo); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f71bc6feea7a..cca794a1f8ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -23,16 +23,6 @@ #include "amdgpu.h" -uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) -{ - uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; - - addr -= AMDGPU_VA_RESERVED_SIZE; - addr = amdgpu_gmc_sign_extend(addr); - - return addr; -} - bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) { /* By now all MMIO pages except mailbox are blocked */ @@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) return RREG32_NO_KIQ(0xc040) == 0xffffffff; } -int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, - u32 domain, uint32_t size) -{ - int r; - void *ptr; - - r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, - domain, bo, - NULL, &ptr); - if (!bo) - return -ENOMEM; - - memset(ptr, 0, size); - return 0; -} - -void amdgpu_free_static_csa(struct amdgpu_bo **bo) -{ - amdgpu_bo_free_kernel(bo, NULL, NULL); -} - -/* - * amdgpu_map_static_csa should be called during amdgpu_vm_init - * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command - * submission of GFX should use this virtual address within META_DATA init - * package to support SRIOV gfx preemption. - */ -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, - uint64_t csa_addr, uint32_t size) -{ - struct ww_acquire_ctx ticket; - struct list_head list; - struct amdgpu_bo_list_entry pd; - struct ttm_validate_buffer csa_tv; - int r; - - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&csa_tv.head); - csa_tv.bo = &bo->tbo; - csa_tv.shared = true; - - list_add(&csa_tv.head, &list); - amdgpu_vm_get_pd_bo(vm, &list, &pd); - - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); - if (r) { - DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); - return r; - } - - *bo_va = amdgpu_vm_bo_add(adev, vm, bo); - if (!*bo_va) { - ttm_eu_backoff_reservation(&ticket, &list); - DRM_ERROR("failed to create bo_va for static CSA\n"); - return -ENOMEM; - } - - r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, - size); - if (r) { - DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, *bo_va); - ttm_eu_backoff_reservation(&ticket, &list); - return r; - } - - r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); - - if (r) { - DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, *bo_va); - ttm_eu_backoff_reservation(&ticket, &list); - return r; - } - - ttm_eu_backoff_reservation(&ticket, &list); - return 0; -} - void amdgpu_virt_init_setting(struct amdgpu_device *adev) { /* enable virtual display */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 09a7ebe964d0..cf46dfb59320 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -250,8 +250,6 @@ struct amdgpu_virt { uint32_t gim_feature; }; -#define AMDGPU_CSA_SIZE (8 * 1024) - #define amdgpu_sriov_enabled(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) @@ -276,16 +274,7 @@ static inline bool is_virtual_machine(void) #endif } -struct amdgpu_vm; - -uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); -int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, - u32 domain, uint32_t size); -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo, - struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size); -void amdgpu_free_static_csa(struct amdgpu_bo **bo); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); -- cgit From 8469868df76fc417e0256f21af96809bad96ba66 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 24 Oct 2018 20:06:37 +0800 Subject: drm/amdgpu: Change AMDGPU_CSA_SIZE to 128K In order to support new asics and MCBP feature enablement on baremetal. Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h index ef2dfb0cd760..524b4437a021 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h @@ -25,7 +25,7 @@ #ifndef AMDGPU_CSA_MANAGER_H #define AMDGPU_CSA_MANAGER_H -#define AMDGPU_CSA_SIZE (8 * 1024) +#define AMDGPU_CSA_SIZE (128 * 1024) uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev); uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); -- cgit From 34955e038a1b313b0f19eeacfb0e22aa6877e11d Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 24 Oct 2018 13:37:37 +0800 Subject: drm/amdgpu: Modify the argument of emit_ib interface use the point of struct amdgpu_job as the function argument instand of vmid, so the other members of struct amdgpu_job can be visit in emit_ib function. v2: add a wrapper for getting the VMID add the job before the ib on the parameter list. v3: refine the wrapper name Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 4 +++- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 4 +++- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 10 +++++++--- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 10 +++++++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 26 +++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/si_dma.c | 4 +++- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 3 ++- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 3 ++- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 11 +++++++++-- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 10 ++++++++-- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 18 +++++++++++++----- 21 files changed, 106 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index ec0e6238dbc3..c48207b377bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ continue; - amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0, - need_ctx_switch); + amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch); need_ctx_switch = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 57cfe78a262b..e1b46a6703de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -33,6 +33,8 @@ #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) +#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) + struct amdgpu_fence; struct amdgpu_job { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4cdddbc4491b..0beb01fef83f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -129,8 +129,9 @@ struct amdgpu_ring_funcs { unsigned emit_ib_size; /* command emit functions */ void (*emit_ib)(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch); + bool ctx_switch); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); void (*emit_pipeline_sync)(struct amdgpu_ring *ring); @@ -228,7 +229,7 @@ struct amdgpu_ring { #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) +#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c))) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 379e1ae7a8fb..98a1b2ce2b9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -1032,8 +1032,10 @@ out: * @ib: the IB to execute * */ -void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) +void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index a1f209eed4c4..50293652af14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); -void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch); +void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, bool ctx_switch); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 49275f358f7a..ad58dcec223e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (CIK). */ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 25cf905965fb..5b25c26fa30e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1840,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, } static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; /* insert SWITCH_BUFFER packet before first IB in the ring frame */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index ff8d316d0533..243b8c502ca6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2227,9 +2227,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, * on the gfx ring for execution by the GPU. */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; /* insert SWITCH_BUFFER packet before first IB in the ring frame */ @@ -2256,9 +2258,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 58c5ebe1cd73..bdae5636a910 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6109,9 +6109,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) } static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; if (ib->flags & AMDGPU_IB_FLAG_CE) @@ -6139,9 +6141,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 100f23b5e22f..928034ce7994 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4049,9 +4049,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; if (ib->flags & AMDGPU_IB_FLAG_CE) @@ -4080,20 +4082,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); - amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ - amdgpu_ring_write(ring, + amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN - (2 << 0) | + (2 << 0) | #endif - lower_32_bits(ib->gpu_addr)); - amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - amdgpu_ring_write(ring, control); + lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, control); } static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index c4ab54a59fc9..fb2a066c0ac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (VI). */ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + /* IB packet must end on a 8 DW boundary */ sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index e3adddbcb593..a9848d28707d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (VI). */ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + /* IB packet must end on a 8 DW boundary */ sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 7f9a501c919d..e740953110d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -497,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (VEGA10). */ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + /* IB packet must end on a 8 DW boundary */ sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index bff6954c2150..b6e473134e19 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) } static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. * Pad as necessary with NOPs. */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 51681eb0dd58..90bbcee00f28 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -509,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, ib->gpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 907afcf8d867..1c5e12703103 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -524,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7df41d1c818b..f184842ef2a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -975,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); amdgpu_ring_write(ring, vmid); @@ -998,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, * Write enc ring commands to execute the indirect buffer */ static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 3985530a882f..8a4595968d98 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1270,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, * Write ring commands to execute the indirect buffer */ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + bool ctx_switch) { struct amdgpu_device *adev = ring->adev; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0)); @@ -1299,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, * Write enc ring commands to execute the indirect buffer */ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 2b1a5a793942..3e84840859a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -833,8 +833,12 @@ out: } static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, VCE_CMD_IB_VM); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 65b71fc2f7b9..0054ba1b9a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -946,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle, } #endif -static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) +static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, VCE_CMD_IB_VM); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 29628f60d50c..c1a03505f956 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1358,10 +1358,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 * Write ring commands to execute the indirect buffer */ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { struct amdgpu_device *adev = ring->adev; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); @@ -1516,8 +1518,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) * Write enc ring commands to execute the indirect buffer */ static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, VCN_ENC_CMD_IB); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); @@ -1717,10 +1723,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6 * Write ring commands to execute the indirect buffer. */ static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, - unsigned vmid, bool ctx_switch) + struct amdgpu_job *job, + struct amdgpu_ib *ib, + bool ctx_switch) { struct amdgpu_device *adev = ring->adev; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); amdgpu_ring_write(ring, PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); -- cgit From ccf191f8aabf8cb7bb01e4efae7bdb73614c745b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 1 Nov 2018 13:42:42 +0800 Subject: drm/amdgpu: Refine function name there is no functional changes.just refine function name to keep consistence with other files. change amdgpu_get_sdma_instance to amdgpu_sdma_get_instance_from_ring. suggested by alex. Reviewed-by: Alex Deucher Reviewed-by: Flora Cui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 2 +- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 0fb9907494bb..c91223021ab4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -28,7 +28,7 @@ * GPU SDMA IP block helpers function. */ -struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) +struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; int i; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 479a2459e558..664f54930560 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -94,6 +94,6 @@ struct amdgpu_buffer_funcs { #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) struct amdgpu_sdma_instance * -amdgpu_get_sdma_instance(struct amdgpu_ring *ring); +amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ad58dcec223e..45795191de1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); int i; for (i = 0; i < count; i++) @@ -805,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, */ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); u32 pad_count; int i; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index fb2a066c0ac9..9f3cb2aec7c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); int i; for (i = 0; i < count; i++) @@ -743,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, */ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); u32 pad_count; int i; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index a9848d28707d..b6a25f92d566 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); int i; for (i = 0; i < count; i++) @@ -1014,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, */ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); u32 pad_count; int i; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e740953110d8..e39a09eb0fa1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -477,7 +477,7 @@ static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring) static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); int i; for (i = 0; i < count; i++) @@ -1376,7 +1376,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib, */ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); u32 pad_count; int i; -- cgit From f6cffc0d4d3c20bd5c9b3c825448016d7388b43a Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 31 Oct 2018 19:49:27 +0800 Subject: drm/amdgpu: Add helper function to get sdma index Get the sdma index from ring v2: refine function name Reviewed-by: Alex Deucher Reviewed-by: Flora Cui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 1 + 2 files changed, 17 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index c91223021ab4..115bb0c99b0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -40,3 +40,19 @@ struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ri return NULL; } + +int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index) +{ + struct amdgpu_device *adev = ring->adev; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring || + ring == &adev->sdma.instance[i].page) { + *index = i; + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 664f54930560..16b1a6ae5ba6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -95,5 +95,6 @@ struct amdgpu_buffer_funcs { struct amdgpu_sdma_instance * amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); +int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); #endif -- cgit From 5c76c6a8975e1d074dc5763d3f46c928bc7d6484 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 25 Oct 2018 10:37:02 +0200 Subject: drm/amdgpu: remove nonsense in_interrupt() checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit might_sleep() is supposed to raise if warning if called in interrupt or atomic context. Signed-off-by: Christian König Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index cca794a1f8ba..ff887639bfa3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -70,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) if (r < 1 && (adev->in_gpu_reset || in_interrupt())) goto failed_kiq_read; - if (in_interrupt()) - might_sleep(); - + might_sleep(); while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); @@ -118,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) if (r < 1 && (adev->in_gpu_reset || in_interrupt())) goto failed_kiq_write; - if (in_interrupt()) - might_sleep(); - + might_sleep(); while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); -- cgit From af5fe1e96aa156886f89282371fce1629fcc9f6a Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 25 Oct 2018 10:49:07 +0200 Subject: drm/amdgpu: cleanup GMC v9 TLB invalidation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the kiq handling into amdgpu_virt.c and drop the fallback. Signed-off-by: Christian König Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 40 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 ++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 53 ++++---------------------------- 3 files changed, 49 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index ff887639bfa3..cfee74732edb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -132,6 +132,46 @@ failed_kiq_write: pr_err("failed to write reg:%x\n", reg); } +void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, + ref, mask); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for IRQ context */ + if (r < 1 && in_interrupt()) + goto failed_kiq; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq; + + return; + +failed_kiq: + pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); +} + /** * amdgpu_virt_request_full_gpu() - request full gpu access * @amdgpu: amdgpu device. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index cf46dfb59320..0728fbc9a692 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -278,6 +278,9 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); +void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, + uint32_t reg0, uint32_t rreg1, + uint32_t ref, uint32_t mask); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6cedf7ebf036..4845b6af5808 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -312,48 +312,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, return req; } -static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, - uint32_t reg0, uint32_t reg1, - uint32_t ref, uint32_t mask) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, - ref, mask); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for IRQ context */ - if (r < 1 && in_interrupt()) - goto failed_kiq; - - might_sleep(); - - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq; - - return 0; - -failed_kiq: - pr_err("failed to invalidate tlb with kiq\n"); - return r; -} - /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -375,7 +333,6 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, { const unsigned eng = 17; unsigned i, j; - int r; for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &adev->vmhub[i]; @@ -384,10 +341,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, if (adev->gfx.kiq.ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && !adev->in_gpu_reset) { - r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, - hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid); - if (!r) - continue; + uint32_t req = hub->vm_inv_eng0_req + eng; + uint32_t ack = hub->vm_inv_eng0_ack + eng; + + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, + 1 << vmid); + continue; } spin_lock(&adev->gmc.invalidate_lock); -- cgit From 396557b03c9466f27b60052c6b364541f61092db Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 25 Oct 2018 10:50:42 +0200 Subject: drm/amdgpu: drop the busy wait for GMC v9 TLB invalidations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This code is not performance critical. Signed-off-by: Christian König Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 4845b6af5808..14ca4d835e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -350,35 +350,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, } spin_lock(&adev->gmc.invalidate_lock); - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); - - /* Busy wait for ACK.*/ - for (j = 0; j < 100; j++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); - tmp &= 1 << vmid; - if (tmp) - break; - cpu_relax(); - } - if (j < 100) { - spin_unlock(&adev->gmc.invalidate_lock); - continue; - } - - /* Wait for ACK with a delay.*/ for (j = 0; j < adev->usec_timeout; j++) { tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); - tmp &= 1 << vmid; - if (tmp) + if (tmp & (1 << vmid)) break; udelay(1); } - if (j < adev->usec_timeout) { - spin_unlock(&adev->gmc.invalidate_lock); - continue; - } spin_unlock(&adev->gmc.invalidate_lock); + if (j < adev->usec_timeout) + continue; + DRM_ERROR("Timeout waiting for VM flush ACK!\n"); } } -- cgit From b83761bb0b09ec11c924afe9d88e458cb16a0372 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 25 Oct 2018 11:02:50 +0200 Subject: drm/amdgpu: use GMC v9 KIQ workaround only for the GFXHUB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The MMHUB is not affected by this. Signed-off-by: Christian König Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 14ca4d835e0b..811231e4ec53 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -338,9 +338,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, struct amdgpu_vmhub *hub = &adev->vmhub[i]; u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); - if (adev->gfx.kiq.ring.sched.ready && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - !adev->in_gpu_reset) { + if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset && + adev->gfx.kiq.ring.sched.ready && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { uint32_t req = hub->vm_inv_eng0_req + eng; uint32_t ack = hub->vm_inv_eng0_ack + eng; -- cgit From f180b4bccc89332cad420a192797bc5dfb1ab5ee Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 5 Oct 2018 11:58:34 -0400 Subject: drm/amdgpu: Drop amdgpu_plane It's unnecessarily duplicating drm_plane_type. Signed-off-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b9e9e8b02fb7..11723d8fffbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -57,7 +57,6 @@ struct amdgpu_hpd; #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base) #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base) -#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base) #define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base); @@ -325,7 +324,7 @@ struct amdgpu_mode_info { struct card_info *atom_card_info; bool mode_config_initialized; struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; - struct amdgpu_plane *planes[AMDGPU_MAX_PLANES]; + struct drm_plane *planes[AMDGPU_MAX_PLANES]; struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; /* DVI-I properties */ struct drm_property *coherent_mode_property; @@ -434,11 +433,6 @@ struct amdgpu_crtc { struct drm_pending_vblank_event *event; }; -struct amdgpu_plane { - struct drm_plane base; - enum drm_plane_type plane_type; -}; - struct amdgpu_encoder_atom_dig { bool linkb; /* atom dig */ -- cgit From 1cda5e2161b401b98e3dad561c7da0013b617f4b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 6 Nov 2018 11:19:00 -0500 Subject: drm/amdgpu/vega20: add CLK base offset In case we need to access CLK registers. Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 2d4473557b0d..d13fc4fcb517 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); } return 0; } -- cgit From f0cfa19579fae3bd06366ebccdba26020bb6214a Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 11 Oct 2018 15:36:15 +0800 Subject: drm/amdgpu/psp: add structure for xgmi ta and its shared buffer Add data structures for xgmi trusted application. Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 5bc59bcb3097..26f17d9fcd4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -30,7 +30,8 @@ #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 -#define PSP_ASD_SHARED_MEM_SIZE 0x4000 +#define PSP_ASD_SHARED_MEM_SIZE 0x4000 +#define PSP_XGMI_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 #define PSP_TMR_SIZE 0x400000 @@ -88,6 +89,14 @@ struct psp_funcs struct psp_xgmi_topology_info *topology); }; +struct psp_xgmi_context { + uint8_t initialized; + uint32_t session_id; + struct amdgpu_bo *xgmi_shared_bo; + uint64_t xgmi_shared_mc_addr; + void *xgmi_shared_buf; +}; + struct psp_context { struct amdgpu_device *adev; @@ -137,6 +146,13 @@ struct psp_context /* fence value associated with cmd buffer */ atomic_t fence_value; + + /* xgmi ta firmware and buffer */ + const struct firmware *ta_fw; + uint32_t ta_xgmi_ucode_version; + uint32_t ta_xgmi_ucode_size; + uint8_t *ta_xgmi_start_addr; + struct psp_xgmi_context xgmi_context; }; struct amdgpu_psp_funcs { -- cgit From 51e7177f361ab804e788ae4924e1f5a73c76ef52 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 11 Oct 2018 21:48:00 +0800 Subject: drm/amdgpu/psp: init/de-init xgmi ta microcode Add ucode handling for psp xgmi ta firmware. Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 12 ++++++++++ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 38 +++++++++++++++++++++++-------- 3 files changed, 42 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a18a8c91d52b..b00592d60132 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -90,6 +90,8 @@ static int psp_sw_fini(void *handle) adev->psp.sos_fw = NULL; release_firmware(adev->psp.asd_fw); adev->psp.asd_fw = NULL; + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index aa6641b944a0..7ac25a1c7853 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -57,6 +57,17 @@ struct psp_firmware_header_v1_0 { uint32_t sos_size_bytes; }; +/* version_major=1, version_minor=0 */ +struct ta_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t ta_xgmi_ucode_version; + uint32_t ta_xgmi_offset_bytes; + uint32_t ta_xgmi_size_bytes; + uint32_t ta_ras_ucode_version; + uint32_t ta_ras_offset_bytes; + uint32_t ta_ras_size_bytes; +}; + /* version_major=1, version_minor=0 */ struct gfx_firmware_header_v1_0 { struct common_firmware_header header; @@ -170,6 +181,7 @@ union amdgpu_firmware_header { struct mc_firmware_header_v1_0 mc; struct smc_firmware_header_v1_0 smc; struct psp_firmware_header_v1_0 psp; + struct ta_firmware_header_v1_0 ta; struct gfx_firmware_header_v1_0 gfx; struct rlc_firmware_header_v1_0 rlc; struct rlc_firmware_header_v2_0 rlc_v2_0; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 2372f4220ecb..902b0e6a02bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -34,6 +34,7 @@ #include "nbio/nbio_7_4_offset.h" MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); +MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -98,7 +99,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) const char *chip_name; char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *hdr; + const struct psp_firmware_header_v1_0 *sos_hdr; + const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); @@ -119,16 +121,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) if (err) goto out; - hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; - adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version); - adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes); - adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) - - le32_to_cpu(hdr->sos_size_bytes); - adev->psp.sys_start_addr = (uint8_t *)hdr + - le32_to_cpu(hdr->header.ucode_array_offset_bytes); + sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; + adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); + adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); + adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); + adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) - + le32_to_cpu(sos_hdr->sos_size_bytes); + adev->psp.sys_start_addr = (uint8_t *)sos_hdr + + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(hdr->sos_offset_bytes); + le32_to_cpu(sos_hdr->sos_offset_bytes); + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out; + + ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; + adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); + adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); + adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + return 0; out: if (err) { -- cgit From 97c8d171105d51049ef00b1ef2ca1106bf2e8c6b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 12 Oct 2018 09:43:23 +0800 Subject: drm/amdgpu/psp: add helper function to load/unload xgmi ta Add helper functions for the psp xgmi ta. Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 98 +++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index b00592d60132..a639bedb0ad3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -271,6 +271,104 @@ static int psp_asd_load(struct psp_context *psp) return ret; } +static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared, + uint32_t xgmi_ta_size, uint32_t shared_size) +{ + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc); + cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size; + + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +} + +static int psp_xgmi_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for xgmi ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->xgmi_context.xgmi_shared_bo, + &psp->xgmi_context.xgmi_shared_mc_addr, + &psp->xgmi_context.xgmi_shared_buf); + + return ret; +} + +static int psp_xgmi_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); + + psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->xgmi_context.xgmi_shared_mc_addr, + psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + + if (!ret) { + psp->xgmi_context.initialized = 1; + psp->xgmi_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} + +static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t xgmi_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; + cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id; +} + +static int psp_xgmi_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the unloading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; -- cgit From 4de5f0055adf1ed45c81596afad1a41294af9e87 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sat, 29 Sep 2018 14:18:22 +0800 Subject: drm/amdgpu/psp: add xgmi ta header Add the psp xgmi driver interface. Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h | 130 ++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h new file mode 100644 index 000000000000..ac2c27b7630c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h @@ -0,0 +1,130 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TA_XGMI_IF_H +#define _TA_XGMI_IF_H + +/* Responses have bit 31 set */ +#define RSP_ID_MASK (1U << 31) +#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) + +enum ta_command_xgmi { + TA_COMMAND_XGMI__INITIALIZE = 0x00, + TA_COMMAND_XGMI__GET_NODE_ID = 0x01, + TA_COMMAND_XGMI__GET_HIVE_ID = 0x02, + TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03, + TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04 +}; + +/* XGMI related enumerations */ +/**********************************************************/; +enum ta_xgmi_connected_nodes { + TA_XGMI__MAX_CONNECTED_NODES = 64 +}; + +enum ta_xgmi_status { + TA_XGMI_STATUS__SUCCESS = 0x00, + TA_XGMI_STATUS__GENERIC_FAILURE = 0x01, + TA_XGMI_STATUS__NULL_POINTER = 0x02, + TA_XGMI_STATUS__INVALID_PARAMETER = 0x03, + TA_XGMI_STATUS__NOT_INITIALIZED = 0x04, + TA_XGMI_STATUS__INVALID_NODE_NUM = 0x05, + TA_XGMI_STATUS__INVALID_NODE_ID = 0x06, + TA_XGMI_STATUS__INVALID_TOPOLOGY = 0x07, + TA_XGMI_STATUS__FAILED_ID_GEN = 0x08, + TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT = 0x09, + TA_XGMI_STATUS__SET_SHARING_ERROR = 0x0A +}; + +enum ta_xgmi_assigned_sdma_engine { + TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED = -1, + TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0 = 0, + TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1 = 1, + TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2 = 2, + TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3 = 3, + TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4 = 4, + TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5 = 5 +}; + +/* input/output structures for XGMI commands */ +/**********************************************************/ +struct ta_xgmi_node_info { + uint64_t node_id; + uint8_t num_hops; + uint8_t is_sharing_enabled; + enum ta_xgmi_assigned_sdma_engine sdma_engine; +}; + +struct ta_xgmi_cmd_initialize_output { + uint32_t status; +}; + +struct ta_xgmi_cmd_get_node_id_output { + uint64_t node_id; +}; + +struct ta_xgmi_cmd_get_hive_id_output { + uint64_t hive_id; +}; + +struct ta_xgmi_cmd_get_topology_info_input { + uint32_t num_nodes; + struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; +}; + +struct ta_xgmi_cmd_get_topology_info_output { + uint32_t num_nodes; + struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; +}; + +struct ta_xgmi_cmd_set_topology_info_input { + uint32_t num_nodes; + struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; +}; + +/**********************************************************/ +/* Common input structure for XGMI callbacks */ +union ta_xgmi_cmd_input { + struct ta_xgmi_cmd_get_topology_info_input get_topology_info; + struct ta_xgmi_cmd_set_topology_info_input set_topology_info; +}; + +/* Common output structure for XGMI callbacks */ +union ta_xgmi_cmd_output { + struct ta_xgmi_cmd_initialize_output initialize; + struct ta_xgmi_cmd_get_node_id_output get_node_id; + struct ta_xgmi_cmd_get_hive_id_output get_hive_id; + struct ta_xgmi_cmd_get_topology_info_output get_topology_info; +}; +/**********************************************************/ + +struct ta_xgmi_shared_memory { + uint32_t cmd_id; + uint32_t resp_id; + enum ta_xgmi_status xgmi_status; + uint32_t reserved; + union ta_xgmi_cmd_input xgmi_in_message; + union ta_xgmi_cmd_output xgmi_out_message; +}; + +#endif //_TA_XGMI_IF_H -- cgit From ca6e1e59a24bf4aed4d017163f8184eb4e384a9b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 28 Sep 2018 15:01:57 +0800 Subject: drm/amdgpu/psp: add helper function to invoke xgmi ta per ta cmd_id psp_xgmi_invoke is the helper function to issue ta cmd to firmware Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 36 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 3 +++ 2 files changed, 39 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a639bedb0ad3..07f9fcb59c0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -369,6 +369,42 @@ static int psp_xgmi_unload(struct psp_context *psp) return ret; } +static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t xgmi_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; + /* Note: cmd_invoke_cmd.buf is not used for now */ +} + +int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id, + psp->xgmi_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 26f17d9fcd4c..000e12b437f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -27,6 +27,7 @@ #include "amdgpu.h" #include "psp_gfx_if.h" +#include "ta_xgmi_if.h" #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 @@ -218,6 +219,8 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; int psp_gpu_reset(struct amdgpu_device *adev); +int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); + extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; #endif -- cgit From 3e2e2ab55499f77cbd57ee91e250c085d252a979 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 28 Sep 2018 21:28:10 +0800 Subject: drm/amdgpu/psp: initialize xgmi session (v2) Setup and tear down xgmi as part of psp. v2: - make psp_xgmi_terminate static - squash in: drm/amdgpu: only issue xgmi cmd when it is enabled drm/amdgpu/psp: terminate xgmi ta in suspend and hw_fini phase Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 69 ++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0bf13d69efbc..590588a82471 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1683,7 +1683,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) return r; - amdgpu_xgmi_add_device(adev); + if (adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); if (amdgpu_sriov_vf(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 07f9fcb59c0a..e05dc66b1090 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -405,6 +405,53 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) return ret; } +static int psp_xgmi_terminate(struct psp_context *psp) +{ + int ret; + + if (!psp->xgmi_context.initialized) + return 0; + + ret = psp_xgmi_unload(psp); + if (ret) + return ret; + + psp->xgmi_context.initialized = 0; + + /* free xgmi shared memory */ + amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, + &psp->xgmi_context.xgmi_shared_mc_addr, + &psp->xgmi_context.xgmi_shared_buf); + + return 0; +} + +static int psp_xgmi_initialize(struct psp_context *psp) +{ + struct ta_xgmi_shared_memory *xgmi_cmd; + int ret; + + if (!psp->xgmi_context.initialized) { + ret = psp_xgmi_init_shared_buf(psp); + if (ret) + return ret; + } + + /* Load XGMI TA */ + ret = psp_xgmi_load(psp); + if (ret) + return ret; + + /* Initialize XGMI session */ + xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; + + ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); + + return ret; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -432,6 +479,15 @@ static int psp_hw_start(struct psp_context *psp) if (ret) return ret; + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } return 0; } @@ -592,6 +648,10 @@ static int psp_hw_fini(void *handle) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) return 0; + if (adev->gmc.xgmi.num_physical_nodes > 1 && + psp->xgmi_context.initialized == 1) + psp_xgmi_terminate(psp); + psp_ring_destroy(psp, PSP_RING_TYPE__KM); amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); @@ -619,6 +679,15 @@ static int psp_suspend(void *handle) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) return 0; + if (adev->gmc.xgmi.num_physical_nodes > 1 && + psp->xgmi_context.initialized == 1) { + ret = psp_xgmi_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate xgmi ta\n"); + return ret; + } + } + ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); if (ret) { DRM_ERROR("PSP ring stop failed\n"); -- cgit From dd3c45d306220b7f3e40fd1457eaf480ab7d1b26 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 28 Sep 2018 21:50:37 +0800 Subject: drm/amdgpu/psp: add get_node_id function get_node_id function is used for driver to get node_id for current device from xgmi ta Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Huang Rui Reviewed-by: Shaoyun Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 19 +++++++++++++++++++ 4 files changed, 28 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 4c5f18cf5b69..8c57924c075f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs { struct amdgpu_xgmi { /* from psp */ - u64 device_id; + u64 node_id; u64 hive_id; /* fixed per family */ u64 node_segment_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 000e12b437f0..28700a80cddd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -82,7 +82,7 @@ struct psp_funcs enum AMDGPU_UCODE_ID ucode_type); bool (*smu_reload_quirk)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp); - uint64_t (*xgmi_get_device_id)(struct psp_context *psp); + uint64_t (*xgmi_get_node_id)(struct psp_context *psp); uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology); @@ -163,7 +163,7 @@ struct amdgpu_psp_funcs { struct psp_xgmi_topology_info { /* Generated by PSP to identify the GPU instance within xgmi connection */ - uint64_t device_id; + uint64_t node_id; /* * If all bits set to 0 , driver indicates it wants to retrieve the xgmi * connection vector topology, but not access enable the connections @@ -197,8 +197,8 @@ struct psp_xgmi_topology_info { ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) #define psp_mode1_reset(psp) \ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) -#define psp_xgmi_get_device_id(psp) \ - ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0) +#define psp_xgmi_get_node_id(psp) \ + ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0) #define psp_xgmi_get_hive_id(psp) \ ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0) #define psp_xgmi_get_topology_info(psp, num_device, topology) \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 897afbb348c1..32896ba12c67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -73,7 +73,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if ((adev->asic_type < CHIP_VEGA20) || (adev->flags & AMD_IS_APU) ) return 0; - adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp); + adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); memset(&tmp_topology[0], 0, sizeof(tmp_topology)); @@ -84,13 +84,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); list_for_each_entry(entry, &hive->device_list, head) - tmp_topology[count++].device_id = entry->device_id; + tmp_topology[count++].node_id = entry->node_id; ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology); if (ret) { dev_err(adev->dev, "XGMI: Get topology failure on device %llx, hive %llx, ret %d", - adev->gmc.xgmi.device_id, + adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id, ret); goto exit; } @@ -100,7 +100,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (ret) { dev_err(tmp_adev->dev, "XGMI: Set topology failure on device %llx, hive %llx, ret %d", - tmp_adev->gmc.xgmi.device_id, + tmp_adev->gmc.xgmi.node_id, tmp_adev->gmc.xgmi.hive_id, ret); /* To do : continue with some node failed or disable the whole hive */ break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 902b0e6a02bf..6333413f51df 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -590,6 +590,24 @@ static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) return hive_id; } +static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) +{ + struct ta_xgmi_shared_memory *xgmi_cmd; + int ret; + + xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); + + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; + + /* Invoke xgmi ta to get the node id */ + ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); + if (ret) + return 0; + else + return xgmi_cmd->xgmi_out_message.get_node_id.node_id; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv, @@ -605,6 +623,7 @@ static const struct psp_funcs psp_v11_0_funcs = { .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, + .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) -- cgit From 4b93151f57d873b271bb25cf1fb8f230792ba75a Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sat, 29 Sep 2018 12:17:42 +0800 Subject: drm/amdgpu/psp: add get_hive_id function get_hive_id is used for driver to query hive_id for current device from xgmi ta Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Huang Rui Reviewed-by: Shaoyun Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 6333413f51df..7b248915489a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -581,13 +581,20 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) { - u64 hive_id = 0; + struct ta_xgmi_shared_memory *xgmi_cmd; + int ret; + + xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); - /* Remove me when we can get correct hive_id through PSP */ - if (psp->adev->gmc.xgmi.num_physical_nodes) - hive_id = 0x123456789abcdef; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; - return hive_id; + /* Invoke xgmi ta to get hive id */ + ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); + if (ret) + return 0; + else + return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; } static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) -- cgit From 593caa07ad6ad43bcd9edb36d828000053af7e2d Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sat, 29 Sep 2018 21:52:50 +0800 Subject: drm/amdgpu/psp: update topology info structures topology info structure needs to match with the one defined in xgmi ta Signed-off-by: Hawking Zhang Reviewed-by: Shaoyun Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 29 +++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 12 +++++------- 2 files changed, 18 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 28700a80cddd..9ec5d1a666a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -37,6 +37,7 @@ #define PSP_TMR_SIZE 0x400000 struct psp_context; +struct psp_xgmi_node_info; struct psp_xgmi_topology_info; enum psp_ring_type @@ -85,9 +86,9 @@ struct psp_funcs uint64_t (*xgmi_get_node_id)(struct psp_context *psp); uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, - struct psp_xgmi_topology_info *topology); + struct psp_xgmi_topology_info *topology); int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, - struct psp_xgmi_topology_info *topology); + struct psp_xgmi_topology_info *topology); }; struct psp_xgmi_context { @@ -161,21 +162,17 @@ struct amdgpu_psp_funcs { enum AMDGPU_UCODE_ID); }; +#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 +struct psp_xgmi_node_info { + uint64_t node_id; + uint8_t num_hops; + uint8_t is_sharing_enabled; + enum ta_xgmi_assigned_sdma_engine sdma_engine; +}; + struct psp_xgmi_topology_info { - /* Generated by PSP to identify the GPU instance within xgmi connection */ - uint64_t node_id; - /* - * If all bits set to 0 , driver indicates it wants to retrieve the xgmi - * connection vector topology, but not access enable the connections - * if some or all bits are set to 1, driver indicates it want to retrieve the - * current xgmi topology and access enable the link to GPU[i] associated - * with the bit position in the vector. - * On return,: bits indicated which xgmi links are present/active depending - * on the value passed in. The relative bit offset for the relative GPU index - * within the hive is always marked active. - */ - uint32_t connection_mask; - uint32_t reserved; /* must be 0 */ + uint32_t num_nodes; + struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; }; #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 32896ba12c67..e92b4548db49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -63,7 +63,7 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { - struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE]; + struct psp_xgmi_topology_info tmp_topology; struct amdgpu_hive_info *hive; struct amdgpu_xgmi *entry; struct amdgpu_device *tmp_adev; @@ -76,7 +76,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); - memset(&tmp_topology[0], 0, sizeof(tmp_topology)); + memset(&tmp_topology, 0, sizeof(tmp_topology)); mutex_lock(&xgmi_mutex); hive = amdgpu_get_xgmi_hive(adev); if (!hive) @@ -84,9 +84,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); list_for_each_entry(entry, &hive->device_list, head) - tmp_topology[count++].node_id = entry->node_id; + tmp_topology.nodes[count++].node_id = entry->node_id; - ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology); + ret = psp_xgmi_get_topology_info(&adev->psp, count, &tmp_topology); if (ret) { dev_err(adev->dev, "XGMI: Get topology failure on device %llx, hive %llx, ret %d", @@ -96,7 +96,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } /* Each psp need to set the latest topology */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); + ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, &tmp_topology); if (ret) { dev_err(tmp_adev->dev, "XGMI: Set topology failure on device %llx, hive %llx, ret %d", @@ -115,5 +115,3 @@ exit: mutex_unlock(&xgmi_mutex); return ret; } - - -- cgit From ec1a975e0b186dadb56249b9d415bb93c7219da3 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sat, 29 Sep 2018 22:30:44 +0800 Subject: drm/amdgpu/psp: add get_topology_info function get_topology_info function is used for driver to query topology_info for current device from xgmi ta Signed-off-by: Hawking Zhang Reviewed-by: Shaoyun Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 39 ++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 7b248915489a..f56f8e376b62 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -570,6 +570,45 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology) { + struct ta_xgmi_shared_memory *xgmi_cmd; + struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; + struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; + int i; + int ret; + + if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) + return -EINVAL; + + xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); + + /* Fill in the shared memory with topology information as input */ + topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; + topology_info_input->num_nodes = number_devices; + + for (i = 0; i < topology_info_input->num_nodes; i++) { + topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; + topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; + topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; + topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; + } + + /* Invoke xgmi ta to get the topology information */ + ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); + if (ret) + return ret; + + /* Read the output topology information from the shared memory */ + topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; + topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; + for (i = 0; i < topology->num_nodes; i++) { + topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; + topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; + topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; + topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; + } + return 0; } -- cgit From bb8310cc22d7acd34db72f890c267b513dbc24ff Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sat, 29 Sep 2018 22:32:42 +0800 Subject: drm/amdgpu/psp: add set_topology_info function set_topology_info is used for driver to set current topology info to xgmi ta Signed-off-by: Hawking Zhang Reviewed-by: Shaoyun Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index f56f8e376b62..e5dd052d9e06 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -615,7 +615,29 @@ static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp, static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology) { - return 0; + struct ta_xgmi_shared_memory *xgmi_cmd; + struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; + int i; + + if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) + return -EINVAL; + + xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); + + topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; + topology_info_input->num_nodes = number_devices; + + for (i = 0; i < topology_info_input->num_nodes; i++) { + topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; + topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; + topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; + topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; + } + + /* Invoke xgmi ta to set topology information */ + return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); } static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) -- cgit From 3426d66d3e74ab0ab264a85e0795a17e3dde1e71 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 6 Nov 2018 11:19:00 -0500 Subject: drm/amdgpu/vega20: add CLK base offset In case we need to access CLK registers. Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 2d4473557b0d..d13fc4fcb517 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); } return 0; } -- cgit From 7875a22625aa7f11befba84bd1e669201032947d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 6 Nov 2018 14:44:29 -0500 Subject: drm/amdgpu: add DC feature mask module parameter Similar to ppfeaturemask. Allows you to selectively enable/disable DC features. Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 11 +++++++++++ 2 files changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d0102cfc8efb..104b2e0d893b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; +extern uint amdgpu_dc_feature_mask; extern struct amdgpu_mgpu_info mgpu_info; #ifdef CONFIG_DRM_AMDGPU_SI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 943dbf3c5da1..8de55f7f1a3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_emu_mode = 0; uint amdgpu_smu_memory_pool_size = 0; +/* FBC (bit 0) disabled by default*/ +uint amdgpu_dc_feature_mask = 0; + struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; @@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644); MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); #endif +/** + * DOC: dcfeaturemask (uint) + * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h. + * The default is the current set of stable display features. + */ +MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))"); +module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, -- cgit From db0049129359eca348e5cc2782a90e78fda5bc85 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 7 Nov 2018 12:00:50 +0800 Subject: drm/amdgpu: fix frame size of amdgpu_xgmi_add_devices excceed 1024 bytes Instead of stack-allocated psp_xgmi_topology_info in function amdgpu_xgmi_add_device, dynamically allocated this structure to avoid the frame size of this function excceed 1024 bytes Signed-off-by: Hawking Zhang Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index e92b4548db49..56acdeab3812 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -63,7 +63,7 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { - struct psp_xgmi_topology_info tmp_topology; + struct psp_xgmi_topology_info *tmp_topology; struct amdgpu_hive_info *hive; struct amdgpu_xgmi *entry; struct amdgpu_device *tmp_adev; @@ -76,7 +76,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); - memset(&tmp_topology, 0, sizeof(tmp_topology)); + tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL); + if (!tmp_topology) + return -ENOMEM; mutex_lock(&xgmi_mutex); hive = amdgpu_get_xgmi_hive(adev); if (!hive) @@ -84,9 +86,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); list_for_each_entry(entry, &hive->device_list, head) - tmp_topology.nodes[count++].node_id = entry->node_id; + tmp_topology->nodes[count++].node_id = entry->node_id; - ret = psp_xgmi_get_topology_info(&adev->psp, count, &tmp_topology); + ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology); if (ret) { dev_err(adev->dev, "XGMI: Get topology failure on device %llx, hive %llx, ret %d", @@ -96,7 +98,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } /* Each psp need to set the latest topology */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, &tmp_topology); + ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); if (ret) { dev_err(tmp_adev->dev, "XGMI: Set topology failure on device %llx, hive %llx, ret %d", @@ -113,5 +115,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) exit: mutex_unlock(&xgmi_mutex); + kfree(tmp_topology); return ret; } -- cgit From 3482d2d9b0fb4340faa23a4967f69da047fa291c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 2 Nov 2018 17:21:17 +0800 Subject: drm/amdgpu: set Vega20 LBPW as disabled at default For Vega20, LBPW feature is disabled at default. Signed-off-by: Evan Quan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 928034ce7994..7abefb80f93d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2499,6 +2499,20 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } + if (amdgpu_lbpw == -1) { + switch (adev->asic_type) { + case CHIP_RAVEN: + amdgpu_lbpw = 1; + break; + case CHIP_VEGA20: + amdgpu_lbpw = 0; + break; + default: + amdgpu_lbpw = 0; + break; + } + } + if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_VEGA20) { if (amdgpu_lbpw != 0) -- cgit From 2cf6dd9cc5191ef9aec4f22cd64df8dc67e31c6d Mon Sep 17 00:00:00 2001 From: Trigger Huang Date: Wed, 7 Nov 2018 11:30:57 +0800 Subject: drm/amdgpu: disable page queue on Vega10 SR-IOV VF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, SDMA page queue is not used under SR-IOV VF, and this queue will cause ring test failure in amdgpu module reload case. So just disable it. Signed-off-by: Trigger Huang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e39a09eb0fa1..8977e84bebe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1451,7 +1451,10 @@ static int sdma_v4_0_early_init(void *handle) adev->sdma.has_page_queue = false; } else { adev->sdma.num_instances = 2; - if (adev->asic_type != CHIP_VEGA20 && + /* TODO: Page queue breaks driver reload under SRIOV */ + if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) + adev->sdma.has_page_queue = false; + else if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_VEGA12) adev->sdma.has_page_queue = true; } -- cgit From 79d197f31b3008c782e2ce3da8d24b5d8de48c75 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 6 Nov 2018 14:44:29 -0500 Subject: drm/amdgpu: add DC feature mask module parameter Similar to ppfeaturemask. Allows you to selectively enable/disable DC features. Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 11 +++++++++++ 2 files changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9dbdda66c318..42f882c633ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -152,6 +152,7 @@ extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; +extern uint amdgpu_dc_feature_mask; extern struct amdgpu_mgpu_info mgpu_info; #ifdef CONFIG_DRM_AMDGPU_SI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 943dbf3c5da1..8de55f7f1a3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_emu_mode = 0; uint amdgpu_smu_memory_pool_size = 0; +/* FBC (bit 0) disabled by default*/ +uint amdgpu_dc_feature_mask = 0; + struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; @@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644); MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); #endif +/** + * DOC: dcfeaturemask (uint) + * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h. + * The default is the current set of stable display features. + */ +MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))"); +module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, -- cgit From d2cfabbdafe09aa0b751302df9277663ff5a0270 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 7 Nov 2018 12:25:30 -0500 Subject: drm/amdgpu/sdma4: use paging queue for buffer funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the paging queue for buffer functions to avoid contention with the other queues. Reviewed-by: Junwei Zhang Reviewed-by: Christian König Tested-by: Chen Gong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 8977e84bebe4..f4490cdd9804 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -662,6 +662,10 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) u32 rb_cntl, ib_cntl; int i; + if ((adev->mman.buffer_funcs_ring == sdma0) || + (adev->mman.buffer_funcs_ring == sdma1)) + amdgpu_ttm_set_buffer_funcs_status(adev, false); + for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, @@ -1152,6 +1156,9 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) r = amdgpu_ring_test_helper(page); if (r) return r; + + if (adev->mman.buffer_funcs_ring == page) + amdgpu_ttm_set_buffer_funcs_status(adev, true); } if (adev->mman.buffer_funcs_ring == ring) @@ -2057,7 +2064,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = { static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev) { adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; + if (adev->sdma.has_page_queue) + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; + else + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = { -- cgit From a82c15668cfc02d69e0265cda50fa932310ddd30 Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Mon, 15 Oct 2018 14:40:06 -0400 Subject: drm/amdgpu: Each PSP need to get latest topology info on XGMI configuration Driver need to call each psp instance to get topology info before set topology Signed-off-by: shaoyunl reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 56acdeab3812..909216a9b447 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -88,14 +88,19 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) list_for_each_entry(entry, &hive->device_list, head) tmp_topology->nodes[count++].node_id = entry->node_id; - ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology); - if (ret) { - dev_err(adev->dev, - "XGMI: Get topology failure on device %llx, hive %llx, ret %d", - adev->gmc.xgmi.node_id, - adev->gmc.xgmi.hive_id, ret); - goto exit; + /* Each psp need to get the latest topology */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology); + if (ret) { + dev_err(tmp_adev->dev, + "XGMI: Get topology failure on device %llx, hive %llx, ret %d", + tmp_adev->gmc.xgmi.node_id, + tmp_adev->gmc.xgmi.hive_id, ret); + /* To do : continue with some node failed or disable the whole hive */ + break; + } } + /* Each psp need to set the latest topology */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); -- cgit From fdb81fd788a732b5efda8638be3fe159550b032d Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Fri, 28 Sep 2018 19:21:23 +0800 Subject: drm/amdgpu: unify rlc function into structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Put function rlc_init,rlc_fini,rlc_resume,rlc_stop,rlc_start into structure amdgpu_rlc_funcs and change the method to call rlc function for each verssion of GFX. Signed-off-by: Likun Gao Acked-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 6 ++++++ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 28 +++++++++++++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 30 ++++++++++++++++++------------ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 28 +++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++++++++++++------------ 5 files changed, 78 insertions(+), 44 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index b61b5c11aead..0a7c285c0454 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -41,6 +41,12 @@ struct amdgpu_rlc_funcs { void (*enter_safe_mode)(struct amdgpu_device *adev); void (*exit_safe_mode)(struct amdgpu_device *adev); + int (*init)(struct amdgpu_device *adev); + void (*fini)(struct amdgpu_device *adev); + int (*resume)(struct amdgpu_device *adev); + void (*stop)(struct amdgpu_device *adev); + void (*reset)(struct amdgpu_device *adev); + void (*start)(struct amdgpu_device *adev); }; struct amdgpu_rlc { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 5b25c26fa30e..2082347a374f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -2386,7 +2386,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) if (r) { dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - gfx_v6_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } @@ -2411,7 +2411,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cs_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v6_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } @@ -2532,8 +2532,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev) if (!adev->gfx.rlc_fw) return -EINVAL; - gfx_v6_0_rlc_stop(adev); - gfx_v6_0_rlc_reset(adev); + adev->gfx.rlc.funcs->stop(adev); + adev->gfx.rlc.funcs->reset(adev); gfx_v6_0_init_pg(adev); gfx_v6_0_init_cg(adev); @@ -2561,7 +2561,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_UCODE_ADDR, 0); gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev)); - gfx_v6_0_rlc_start(adev); + adev->gfx.rlc.funcs->start(adev); return 0; } @@ -3058,6 +3058,15 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q }; +static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = { + .init = gfx_v6_0_rlc_init, + .fini = gfx_v6_0_rlc_fini, + .resume = gfx_v6_0_rlc_resume, + .stop = gfx_v6_0_rlc_stop, + .reset = gfx_v6_0_rlc_reset, + .start = gfx_v6_0_rlc_start +}; + static int gfx_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -3065,6 +3074,7 @@ static int gfx_v6_0_early_init(void *handle) adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS; adev->gfx.funcs = &gfx_v6_0_gfx_funcs; + adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs; gfx_v6_0_set_ring_funcs(adev); gfx_v6_0_set_irq_funcs(adev); @@ -3097,7 +3107,7 @@ static int gfx_v6_0_sw_init(void *handle) return r; } - r = gfx_v6_0_rlc_init(adev); + r = adev->gfx.rlc.funcs->init(adev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -3148,7 +3158,7 @@ static int gfx_v6_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - gfx_v6_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return 0; } @@ -3160,7 +3170,7 @@ static int gfx_v6_0_hw_init(void *handle) gfx_v6_0_constants_init(adev); - r = gfx_v6_0_rlc_resume(adev); + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3178,7 +3188,7 @@ static int gfx_v6_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v6_0_cp_enable(adev, false); - gfx_v6_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); gfx_v6_0_fini_pg(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 243b8c502ca6..d8e2ad875cfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3298,7 +3298,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.sr_ptr); if (r) { dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); - gfx_v7_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } @@ -3321,7 +3321,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cs_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v7_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } @@ -3341,7 +3341,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } @@ -3529,13 +3529,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) adev->gfx.rlc_feature_version = le32_to_cpu( hdr->ucode_feature_version); - gfx_v7_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); /* disable CG */ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc; WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); - gfx_v7_0_rlc_reset(adev); + adev->gfx.rlc.funcs->reset(adev); gfx_v7_0_init_pg(adev); @@ -3566,7 +3566,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) if (adev->asic_type == CHIP_BONAIRE) WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0); - gfx_v7_0_rlc_start(adev); + adev->gfx.rlc.funcs->start(adev); return 0; } @@ -4273,7 +4273,13 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, - .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode + .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode, + .init = gfx_v7_0_rlc_init, + .fini = gfx_v7_0_rlc_fini, + .resume = gfx_v7_0_rlc_resume, + .stop = gfx_v7_0_rlc_stop, + .reset = gfx_v7_0_rlc_reset, + .start = gfx_v7_0_rlc_start }; static int gfx_v7_0_early_init(void *handle) @@ -4524,7 +4530,7 @@ static int gfx_v7_0_sw_init(void *handle) return r; } - r = gfx_v7_0_rlc_init(adev); + r = adev->gfx.rlc.funcs->init(adev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -4588,7 +4594,7 @@ static int gfx_v7_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); gfx_v7_0_cp_compute_fini(adev); - gfx_v7_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); gfx_v7_0_mec_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -4611,7 +4617,7 @@ static int gfx_v7_0_hw_init(void *handle) gfx_v7_0_constants_init(adev); /* init rlc */ - r = gfx_v7_0_rlc_resume(adev); + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4629,7 +4635,7 @@ static int gfx_v7_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); gfx_v7_0_cp_enable(adev, false); - gfx_v7_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); gfx_v7_0_fini_pg(adev); return 0; @@ -4714,7 +4720,7 @@ static int gfx_v7_0_soft_reset(void *handle) gfx_v7_0_update_cg(adev, false); /* stop the rlc */ - gfx_v7_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); /* Disable GFX parsing/prefetching */ WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bdae5636a910..7dbcb2ea20fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1376,7 +1376,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cs_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v8_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } @@ -2073,7 +2073,7 @@ static int gfx_v8_0_sw_init(void *handle) return r; } - r = gfx_v8_0_rlc_init(adev); + r = adev->gfx.rlc.funcs->init(adev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -2166,7 +2166,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev); gfx_v8_0_mec_fini(adev); - gfx_v8_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr); @@ -4160,10 +4160,10 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) { - gfx_v8_0_rlc_stop(adev); - gfx_v8_0_rlc_reset(adev); + adev->gfx.rlc.funcs->stop(adev); + adev->gfx.rlc.funcs->reset(adev); gfx_v8_0_init_pg(adev); - gfx_v8_0_rlc_start(adev); + adev->gfx.rlc.funcs->start(adev); return 0; } @@ -4845,7 +4845,7 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); - r = gfx_v8_0_rlc_resume(adev); + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4957,7 +4957,7 @@ static int gfx_v8_0_hw_fini(void *handle) else pr_err("cp is busy, skip halt cp\n"); if (!gfx_v8_0_wait_for_rlc_idle(adev)) - gfx_v8_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); else pr_err("rlc is busy, skip halt rlc\n"); adev->gfx.rlc.funcs->exit_safe_mode(adev); @@ -5049,7 +5049,7 @@ static int gfx_v8_0_pre_soft_reset(void *handle) srbm_soft_reset = adev->gfx.srbm_soft_reset; /* stop the rlc */ - gfx_v8_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) @@ -5175,7 +5175,7 @@ static int gfx_v8_0_post_soft_reset(void *handle) REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) gfx_v8_0_cp_gfx_resume(adev); - gfx_v8_0_rlc_start(adev); + adev->gfx.rlc.funcs->start(adev); return 0; } @@ -5632,7 +5632,13 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .enter_safe_mode = iceland_enter_rlc_safe_mode, - .exit_safe_mode = iceland_exit_rlc_safe_mode + .exit_safe_mode = iceland_exit_rlc_safe_mode, + .init = gfx_v8_0_rlc_init, + .fini = gfx_v8_0_rlc_fini, + .resume = gfx_v8_0_rlc_resume, + .stop = gfx_v8_0_rlc_stop, + .reset = gfx_v8_0_rlc_reset, + .start = gfx_v8_0_rlc_start }; static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7abefb80f93d..ae720851974f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1147,7 +1147,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (r) { dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r); - gfx_v9_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } /* set up the cs buffer */ @@ -1169,7 +1169,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (r) { dev_err(adev->dev, "(%d) failed to create cp table bo\n", r); - gfx_v9_0_rlc_fini(adev); + adev->gfx.rlc.funcs->fini(adev); return r; } @@ -1733,7 +1733,7 @@ static int gfx_v9_0_sw_init(void *handle) return r; } - r = gfx_v9_0_rlc_init(adev); + r = adev->gfx.rlc.funcs->init(adev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -2483,12 +2483,12 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return 0; } - gfx_v9_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); /* disable CG */ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); - gfx_v9_0_rlc_reset(adev); + adev->gfx.rlc.funcs->reset(adev); gfx_v9_0_init_pg(adev); @@ -2521,7 +2521,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) gfx_v9_0_enable_lbpw(adev, false); } - gfx_v9_0_rlc_start(adev); + adev->gfx.rlc.funcs->start(adev); return 0; } @@ -3344,7 +3344,7 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - r = gfx_v9_0_rlc_resume(adev); + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3424,7 +3424,7 @@ static int gfx_v9_0_hw_fini(void *handle) } gfx_v9_0_cp_enable(adev, false); - gfx_v9_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); gfx_v9_0_csb_vram_unpin(adev); @@ -3499,7 +3499,7 @@ static int gfx_v9_0_soft_reset(void *handle) if (grbm_soft_reset) { /* stop the rlc */ - gfx_v9_0_rlc_stop(adev); + adev->gfx.rlc.funcs->stop(adev); /* Disable GFX parsing/prefetching */ gfx_v9_0_cp_gfx_enable(adev, false); @@ -3655,7 +3655,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - gfx_v9_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { gfx_v9_0_enable_gfx_cg_power_gating(adev, true); @@ -3666,7 +3666,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); } - gfx_v9_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, @@ -3882,7 +3882,13 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, - .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode + .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode, + .init = gfx_v9_0_rlc_init, + .fini = gfx_v9_0_rlc_fini, + .resume = gfx_v9_0_rlc_resume, + .stop = gfx_v9_0_rlc_stop, + .reset = gfx_v9_0_rlc_reset, + .start = gfx_v9_0_rlc_start }; static int gfx_v9_0_set_powergating_state(void *handle, -- cgit From 88dfc9a3dd47027c9ffc831635e5cf4e8ed3b781 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Thu, 8 Nov 2018 13:43:46 +0800 Subject: drm/amdgpu: separate amdgpu_rlc into a single file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Separate the function and struct of RLC from the file of GFX. Abstract the function of amdgpu_gfx_rlc_fini. Signed-off-by: Likun Gao Acked-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 60 +--------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 57 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 89 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 14 ++---- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 16 ++---- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 11 +--- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 18 +------ 9 files changed, 160 insertions(+), 107 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index ec4a9d539322..f76bcb9c45e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -105,6 +105,7 @@ amdgpu-y += \ # add GFX block amdgpu-y += \ amdgpu_gfx.o \ + amdgpu_rlc.o \ gfx_v8_0.o \ gfx_v9_0.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1a656b8657f7..6a70c0b7105f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -25,6 +25,7 @@ #include #include "amdgpu.h" #include "amdgpu_gfx.h" +#include "amdgpu_rlc.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 0a7c285c0454..f790e15bcd08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -29,6 +29,7 @@ */ #include "clearstate_defs.h" #include "amdgpu_ring.h" +#include "amdgpu_rlc.h" /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L @@ -37,65 +38,6 @@ #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L - -struct amdgpu_rlc_funcs { - void (*enter_safe_mode)(struct amdgpu_device *adev); - void (*exit_safe_mode)(struct amdgpu_device *adev); - int (*init)(struct amdgpu_device *adev); - void (*fini)(struct amdgpu_device *adev); - int (*resume)(struct amdgpu_device *adev); - void (*stop)(struct amdgpu_device *adev); - void (*reset)(struct amdgpu_device *adev); - void (*start)(struct amdgpu_device *adev); -}; - -struct amdgpu_rlc { - /* for power gating */ - struct amdgpu_bo *save_restore_obj; - uint64_t save_restore_gpu_addr; - volatile uint32_t *sr_ptr; - const u32 *reg_list; - u32 reg_list_size; - /* for clear state */ - struct amdgpu_bo *clear_state_obj; - uint64_t clear_state_gpu_addr; - volatile uint32_t *cs_ptr; - const struct cs_section_def *cs_data; - u32 clear_state_size; - /* for cp tables */ - struct amdgpu_bo *cp_table_obj; - uint64_t cp_table_gpu_addr; - volatile uint32_t *cp_table_ptr; - u32 cp_table_size; - - /* safe mode for updating CG/PG state */ - bool in_safe_mode; - const struct amdgpu_rlc_funcs *funcs; - - /* for firmware data */ - u32 save_and_restore_offset; - u32 clear_state_descriptor_offset; - u32 avail_scratch_ram_locations; - u32 reg_restore_list_size; - u32 reg_list_format_start; - u32 reg_list_format_separate_start; - u32 starting_offsets_start; - u32 reg_list_format_size_bytes; - u32 reg_list_size_bytes; - u32 reg_list_format_direct_reg_list_length; - u32 save_restore_list_cntl_size_bytes; - u32 save_restore_list_gpm_size_bytes; - u32 save_restore_list_srm_size_bytes; - - u32 *register_list_format; - u32 *register_restore; - u8 *save_restore_list_cntl; - u8 *save_restore_list_gpm; - u8 *save_restore_list_srm; - - bool is_rlc_v2_1; -}; - #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES struct amdgpu_mec { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c new file mode 100644 index 000000000000..c5459ab6a31f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -0,0 +1,57 @@ + +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "amdgpu_rlc.h" + +/** + * amdgpu_gfx_rlc_fini - Free BO which used for RLC + * + * @adev: amdgpu_device pointer + * + * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block + * and rlc_jump_table_block. + */ +void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev) +{ + /* save restore block */ + if (adev->gfx.rlc.save_restore_obj) { + amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); + } + + /* clear state block */ + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + + /* jump table block */ + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h new file mode 100644 index 000000000000..b3b092022fc4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -0,0 +1,89 @@ + +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_RLC_H__ +#define __AMDGPU_RLC_H__ + +#include "clearstate_defs.h" + +struct amdgpu_rlc_funcs { + void (*enter_safe_mode)(struct amdgpu_device *adev); + void (*exit_safe_mode)(struct amdgpu_device *adev); + int (*init)(struct amdgpu_device *adev); + int (*resume)(struct amdgpu_device *adev); + void (*stop)(struct amdgpu_device *adev); + void (*reset)(struct amdgpu_device *adev); + void (*start)(struct amdgpu_device *adev); +}; + +struct amdgpu_rlc { + /* for power gating */ + struct amdgpu_bo *save_restore_obj; + uint64_t save_restore_gpu_addr; + volatile uint32_t *sr_ptr; + const u32 *reg_list; + u32 reg_list_size; + /* for clear state */ + struct amdgpu_bo *clear_state_obj; + uint64_t clear_state_gpu_addr; + volatile uint32_t *cs_ptr; + const struct cs_section_def *cs_data; + u32 clear_state_size; + /* for cp tables */ + struct amdgpu_bo *cp_table_obj; + uint64_t cp_table_gpu_addr; + volatile uint32_t *cp_table_ptr; + u32 cp_table_size; + + /* safe mode for updating CG/PG state */ + bool in_safe_mode; + const struct amdgpu_rlc_funcs *funcs; + + /* for firmware data */ + u32 save_and_restore_offset; + u32 clear_state_descriptor_offset; + u32 avail_scratch_ram_locations; + u32 reg_restore_list_size; + u32 reg_list_format_start; + u32 reg_list_format_separate_start; + u32 starting_offsets_start; + u32 reg_list_format_size_bytes; + u32 reg_list_size_bytes; + u32 reg_list_format_direct_reg_list_length; + u32 save_restore_list_cntl_size_bytes; + u32 save_restore_list_gpm_size_bytes; + u32 save_restore_list_srm_size_bytes; + + u32 *register_list_format; + u32 *register_restore; + u8 *save_restore_list_cntl; + u8 *save_restore_list_gpm; + u8 *save_restore_list_srm; + + bool is_rlc_v2_1; +}; + +void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 2082347a374f..192d98490188 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -2351,13 +2351,6 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); - amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); - amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); -} - static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) { const u32 *src_ptr; @@ -2386,7 +2379,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) if (r) { dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } @@ -2411,7 +2404,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cs_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } @@ -3060,7 +3053,6 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = { .init = gfx_v6_0_rlc_init, - .fini = gfx_v6_0_rlc_fini, .resume = gfx_v6_0_rlc_resume, .stop = gfx_v6_0_rlc_stop, .reset = gfx_v6_0_rlc_reset, @@ -3158,7 +3150,7 @@ static int gfx_v6_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d8e2ad875cfe..8097534aa6c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3252,13 +3252,6 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, * The RLC is a multi-purpose microengine that handles a * variety of functions. */ -static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); - amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); - amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); -} - static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) { const u32 *src_ptr; @@ -3298,7 +3291,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.sr_ptr); if (r) { dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } @@ -3321,7 +3314,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cs_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } @@ -3341,7 +3334,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } @@ -4275,7 +4268,6 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode, .init = gfx_v7_0_rlc_init, - .fini = gfx_v7_0_rlc_fini, .resume = gfx_v7_0_rlc_resume, .stop = gfx_v7_0_rlc_stop, .reset = gfx_v7_0_rlc_reset, @@ -4594,7 +4586,7 @@ static int gfx_v7_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); gfx_v7_0_cp_compute_fini(adev); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); gfx_v7_0_mec_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 7dbcb2ea20fd..81a308bac230 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1348,12 +1348,6 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev) } } -static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); - amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); -} - static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) { volatile u32 *dst_ptr; @@ -1376,7 +1370,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cs_ptr); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } @@ -2166,7 +2160,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev); gfx_v8_0_mec_fini(adev); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr); @@ -5634,7 +5628,6 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .enter_safe_mode = iceland_enter_rlc_safe_mode, .exit_safe_mode = iceland_exit_rlc_safe_mode, .init = gfx_v8_0_rlc_init, - .fini = gfx_v8_0_rlc_fini, .resume = gfx_v8_0_rlc_resume, .stop = gfx_v8_0_rlc_stop, .reset = gfx_v8_0_rlc_reset, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ae720851974f..84831839070c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1112,19 +1112,6 @@ static void rv_init_cp_jump_table(struct amdgpu_device *adev) } } -static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev) -{ - /* clear state block */ - amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, - &adev->gfx.rlc.clear_state_gpu_addr, - (void **)&adev->gfx.rlc.cs_ptr); - - /* jump table block */ - amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, - &adev->gfx.rlc.cp_table_gpu_addr, - (void **)&adev->gfx.rlc.cp_table_ptr); -} - static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) { volatile u32 *dst_ptr; @@ -1147,7 +1134,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (r) { dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } /* set up the cs buffer */ @@ -1169,7 +1156,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (r) { dev_err(adev->dev, "(%d) failed to create cp table bo\n", r); - adev->gfx.rlc.funcs->fini(adev); + amdgpu_gfx_rlc_fini(adev); return r; } @@ -3884,7 +3871,6 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode, .init = gfx_v9_0_rlc_init, - .fini = gfx_v9_0_rlc_fini, .resume = gfx_v9_0_rlc_resume, .stop = gfx_v9_0_rlc_stop, .reset = gfx_v9_0_rlc_reset, -- cgit From 106c7d6148e5aadd394e6701f7e498df49b869d1 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Thu, 8 Nov 2018 20:19:54 +0800 Subject: drm/amdgpu: abstract the function of enter/exit safe mode for RLC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Abstract the function of amdgpu_gfx_rlc_enter/exit_safe_mode and some part of rlc_init to improve the reusability of RLC. Signed-off-by: Likun Gao Acked-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 229 +++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 33 +++-- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 24 +--- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 148 ++++----------------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 201 ++++++++-------------------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 183 ++++++------------------- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 6 +- 8 files changed, 384 insertions(+), 446 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index c5459ab6a31f..c8793e6cc3c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -1,4 +1,3 @@ - /* * Copyright 2014 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. @@ -23,11 +22,237 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ - +#include #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" +/** + * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode + * + * @adev: amdgpu_device pointer + * + * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. + */ +void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) +{ + if (adev->gfx.rlc.in_safe_mode) + return; + + /* if RLC is not enabled, do nothing */ + if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) + return; + + if (adev->cg_flags & + (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_3D_CGCG)) { + adev->gfx.rlc.funcs->set_safe_mode(adev); + adev->gfx.rlc.in_safe_mode = true; + } +} + +/** + * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode + * + * @adev: amdgpu_device pointer + * + * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. + */ +void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) +{ + if (!(adev->gfx.rlc.in_safe_mode)) + return; + + /* if RLC is not enabled, do nothing */ + if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) + return; + + if (adev->cg_flags & + (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_3D_CGCG)) { + adev->gfx.rlc.funcs->unset_safe_mode(adev); + adev->gfx.rlc.in_safe_mode = false; + } +} + +/** + * amdgpu_gfx_rlc_init_sr - Init save restore block + * + * @adev: amdgpu_device pointer + * @dws: the size of save restore block + * + * Allocate and setup value to save restore block of rlc. + * Returns 0 on succeess or negative error code if allocate failed. + */ +int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) +{ + const u32 *src_ptr; + volatile u32 *dst_ptr; + u32 i; + int r; + + /* allocate save restore block */ + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); + if (r) { + dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); + amdgpu_gfx_rlc_fini(adev); + return r; + } + + /* write the sr buffer */ + src_ptr = adev->gfx.rlc.reg_list; + dst_ptr = adev->gfx.rlc.sr_ptr; + for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) + dst_ptr[i] = cpu_to_le32(src_ptr[i]); + amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); + + return 0; +} + +/** + * amdgpu_gfx_rlc_init_csb - Init clear state block + * + * @adev: amdgpu_device pointer + * + * Allocate and setup value to clear state block of rlc. + * Returns 0 on succeess or negative error code if allocate failed. + */ +int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) +{ + volatile u32 *dst_ptr; + u32 dws; + int r; + + /* allocate clear state block */ + adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r); + amdgpu_gfx_rlc_fini(adev); + return r; + } + + /* set up the cs buffer */ + dst_ptr = adev->gfx.rlc.cs_ptr; + adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); + amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + + return 0; +} + +/** + * amdgpu_gfx_rlc_init_cpt - Init cp table + * + * @adev: amdgpu_device pointer + * + * Allocate and setup value to cp table of rlc. + * Returns 0 on succeess or negative error code if allocate failed. + */ +int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create cp table bo\n", r); + amdgpu_gfx_rlc_fini(adev); + return r; + } + + /* set up the cp table */ + amdgpu_gfx_rlc_setup_cp_table(adev); + amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + return 0; +} + +/** + * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table + * + * @adev: amdgpu_device pointer + * + * Write cp firmware data into cp table. + */ +void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) +{ + const __le32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me; + u32 bo_offset = 0; + u32 table_offset, table_size; + + max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev); + + /* write the cp table buffer */ + dst_ptr = adev->gfx.rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 4) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + + bo_offset += table_size; + } +} + /** * amdgpu_gfx_rlc_fini - Free BO which used for RLC * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index b3b092022fc4..49a8ab52113b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -1,4 +1,3 @@ - /* * Copyright 2014 Advanced Micro Devices, Inc. * @@ -28,9 +27,13 @@ #include "clearstate_defs.h" struct amdgpu_rlc_funcs { - void (*enter_safe_mode)(struct amdgpu_device *adev); - void (*exit_safe_mode)(struct amdgpu_device *adev); + bool (*is_rlc_enabled)(struct amdgpu_device *adev); + void (*set_safe_mode)(struct amdgpu_device *adev); + void (*unset_safe_mode)(struct amdgpu_device *adev); int (*init)(struct amdgpu_device *adev); + u32 (*get_csb_size)(struct amdgpu_device *adev); + void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); + int (*get_cp_table_num)(struct amdgpu_device *adev); int (*resume)(struct amdgpu_device *adev); void (*stop)(struct amdgpu_device *adev); void (*reset)(struct amdgpu_device *adev); @@ -39,21 +42,21 @@ struct amdgpu_rlc_funcs { struct amdgpu_rlc { /* for power gating */ - struct amdgpu_bo *save_restore_obj; - uint64_t save_restore_gpu_addr; - volatile uint32_t *sr_ptr; + struct amdgpu_bo *save_restore_obj; + uint64_t save_restore_gpu_addr; + volatile uint32_t *sr_ptr; const u32 *reg_list; u32 reg_list_size; /* for clear state */ - struct amdgpu_bo *clear_state_obj; - uint64_t clear_state_gpu_addr; - volatile uint32_t *cs_ptr; + struct amdgpu_bo *clear_state_obj; + uint64_t clear_state_gpu_addr; + volatile uint32_t *cs_ptr; const struct cs_section_def *cs_data; u32 clear_state_size; /* for cp tables */ - struct amdgpu_bo *cp_table_obj; - uint64_t cp_table_gpu_addr; - volatile uint32_t *cp_table_ptr; + struct amdgpu_bo *cp_table_obj; + uint64_t cp_table_gpu_addr; + volatile uint32_t *cp_table_ptr; u32 cp_table_size; /* safe mode for updating CG/PG state */ @@ -84,6 +87,12 @@ struct amdgpu_rlc { bool is_rlc_v2_1; }; +void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev); +void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev); +int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws); +int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev); +int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev); +void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev); void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 79220a91abe3..86e14c754dd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) if (pi->caps_sq_ramping || pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); if (enable) { ret = ci_program_pt_config_registers(adev, didt_config_ci); if (ret) { - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); return ret; } } ci_do_enable_didt(adev, enable); - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 192d98490188..1dc3013ea1d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -2355,7 +2355,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) { const u32 *src_ptr; volatile u32 *dst_ptr; - u32 dws, i; + u32 dws; u64 reg_list_mc_addr; const struct cs_section_def *cs_data; int r; @@ -2370,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) cs_data = adev->gfx.rlc.cs_data; if (src_ptr) { - /* save restore block */ - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_obj, - &adev->gfx.rlc.save_restore_gpu_addr, - (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", - r); - amdgpu_gfx_rlc_fini(adev); + /* init save restore block */ + r = amdgpu_gfx_rlc_init_sr(adev, dws); + if (r) return r; - } - - /* write the sr buffer */ - dst_ptr = adev->gfx.rlc.sr_ptr; - for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) - dst_ptr[i] = cpu_to_le32(src_ptr[i]); - - amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); } if (cs_data) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 8097534aa6c9..f467b9bd090d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] = static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); -static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev); static void gfx_v7_0_init_pg(struct amdgpu_device *adev); static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); @@ -3255,8 +3254,7 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) { const u32 *src_ptr; - volatile u32 *dst_ptr; - u32 dws, i; + u32 dws; const struct cs_section_def *cs_data; int r; @@ -3283,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) cs_data = adev->gfx.rlc.cs_data; if (src_ptr) { - /* save restore block */ - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_obj, - &adev->gfx.rlc.save_restore_gpu_addr, - (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); - amdgpu_gfx_rlc_fini(adev); + /* init save restore block */ + r = amdgpu_gfx_rlc_init_sr(adev, dws); + if (r) return r; - } - - /* write the sr buffer */ - dst_ptr = adev->gfx.rlc.sr_ptr; - for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) - dst_ptr[i] = cpu_to_le32(src_ptr[i]); - amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); } if (cs_data) { - /* clear state block */ - adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); - - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_obj, - &adev->gfx.rlc.clear_state_gpu_addr, - (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - amdgpu_gfx_rlc_fini(adev); + /* init clear state block */ + r = amdgpu_gfx_rlc_init_csb(adev); + if (r) return r; - } - - /* set up the cs buffer */ - dst_ptr = adev->gfx.rlc.cs_ptr; - gfx_v7_0_get_csb_buffer(adev, dst_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } if (adev->gfx.rlc.cp_table_size) { - - r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_obj, - &adev->gfx.rlc.cp_table_gpu_addr, - (void **)&adev->gfx.rlc.cp_table_ptr); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - amdgpu_gfx_rlc_fini(adev); + r = amdgpu_gfx_rlc_init_cpt(adev); + if (r) return r; - } - - gfx_v7_0_init_cp_pg_table(adev); - - amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - } return 0; @@ -3423,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) return orig; } -static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) +static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev) +{ + return true; +} + +static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev) { u32 tmp, i, mask; @@ -3445,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev) { u32 tmp; @@ -3761,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) WREG32(mmRLC_PG_CNTL, data); } -static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev) +static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev) { - const __le32 *fw_data; - volatile u32 *dst_ptr; - int me, i, max_me = 4; - u32 bo_offset = 0; - u32 table_offset, table_size; - if (adev->asic_type == CHIP_KAVERI) - max_me = 5; - - if (adev->gfx.rlc.cp_table_ptr == NULL) - return; - - /* write the cp table buffer */ - dst_ptr = adev->gfx.rlc.cp_table_ptr; - for (me = 0; me < max_me; me++) { - if (me == 0) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; - fw_data = (const __le32 *) - (adev->gfx.ce_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 1) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; - fw_data = (const __le32 *) - (adev->gfx.pfp_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 2) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; - fw_data = (const __le32 *) - (adev->gfx.me_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 3) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - fw_data = (const __le32 *) - (adev->gfx.mec_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; - fw_data = (const __le32 *) - (adev->gfx.mec2_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } - - for (i = 0; i < table_size; i ++) { - dst_ptr[bo_offset + i] = - cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); - } - - bo_offset += table_size; - } + return 5; + else + return 4; } static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, @@ -4265,9 +4165,13 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { }; static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { - .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, - .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode, + .is_rlc_enabled = gfx_v7_0_is_rlc_enabled, + .set_safe_mode = gfx_v7_0_set_safe_mode, + .unset_safe_mode = gfx_v7_0_unset_safe_mode, .init = gfx_v7_0_rlc_init, + .get_csb_size = gfx_v7_0_get_csb_size, + .get_csb_buffer = gfx_v7_0_get_csb_buffer, + .get_cp_table_num = gfx_v7_0_cp_pg_table_num, .resume = gfx_v7_0_rlc_resume, .stop = gfx_v7_0_rlc_stop, .reset = gfx_v7_0_rlc_reset, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 81a308bac230..cb066a8dccd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1283,75 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, buffer[count++] = cpu_to_le32(0); } -static void cz_init_cp_jump_table(struct amdgpu_device *adev) +static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev) { - const __le32 *fw_data; - volatile u32 *dst_ptr; - int me, i, max_me = 4; - u32 bo_offset = 0; - u32 table_offset, table_size; - if (adev->asic_type == CHIP_CARRIZO) - max_me = 5; - - /* write the cp table buffer */ - dst_ptr = adev->gfx.rlc.cp_table_ptr; - for (me = 0; me < max_me; me++) { - if (me == 0) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; - fw_data = (const __le32 *) - (adev->gfx.ce_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 1) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; - fw_data = (const __le32 *) - (adev->gfx.pfp_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 2) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; - fw_data = (const __le32 *) - (adev->gfx.me_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 3) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - fw_data = (const __le32 *) - (adev->gfx.mec_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 4) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; - fw_data = (const __le32 *) - (adev->gfx.mec2_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } - - for (i = 0; i < table_size; i ++) { - dst_ptr[bo_offset + i] = - cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); - } - - bo_offset += table_size; - } + return 5; + else + return 4; } static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) { - volatile u32 *dst_ptr; - u32 dws; const struct cs_section_def *cs_data; int r; @@ -1360,44 +1301,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) cs_data = adev->gfx.rlc.cs_data; if (cs_data) { - /* clear state block */ - adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); - - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_obj, - &adev->gfx.rlc.clear_state_gpu_addr, - (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - amdgpu_gfx_rlc_fini(adev); + /* init clear state block */ + r = amdgpu_gfx_rlc_init_csb(adev); + if (r) return r; - } - - /* set up the cs buffer */ - dst_ptr = adev->gfx.rlc.cs_ptr; - gfx_v8_0_get_csb_buffer(adev, dst_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } if ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY)) { adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_obj, - &adev->gfx.rlc.cp_table_gpu_addr, - (void **)&adev->gfx.rlc.cp_table_ptr); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); + r = amdgpu_gfx_rlc_init_cpt(adev); + if (r) return r; - } - - cz_init_cp_jump_table(adev); - - amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); } return 0; @@ -4945,7 +4860,7 @@ static int gfx_v8_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); if (!gfx_v8_0_wait_for_idle(adev)) gfx_v8_0_cp_enable(adev, false); else @@ -4954,7 +4869,7 @@ static int gfx_v8_0_hw_fini(void *handle) adev->gfx.rlc.funcs->stop(adev); else pr_err("rlc is busy, skip halt rlc\n"); - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); return 0; } @@ -5417,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GFX_DMG)) - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -5471,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GFX_DMG)) - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); return 0; } @@ -5565,57 +5480,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e -static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) +static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev) { - u32 data; - unsigned i; + uint32_t rlc_setting; - data = RREG32(mmRLC_CNTL); - if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) - return; + rlc_setting = RREG32(mmRLC_CNTL); + if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) + return false; - if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { - data |= RLC_SAFE_MODE__CMD_MASK; - data &= ~RLC_SAFE_MODE__MESSAGE_MASK; - data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); - WREG32(mmRLC_SAFE_MODE, data); + return true; +} - for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPM_STAT) & - (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | - RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == - (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | - RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) - break; - udelay(1); - } +static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev) +{ + uint32_t data; + unsigned i; + data = RREG32(mmRLC_CNTL); + data |= RLC_SAFE_MODE__CMD_MASK; + data &= ~RLC_SAFE_MODE__MESSAGE_MASK; + data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); + WREG32(mmRLC_SAFE_MODE, data); - for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) - break; - udelay(1); - } - adev->gfx.rlc.in_safe_mode = true; + /* wait for RLC_SAFE_MODE */ + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPM_STAT) & + (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | + RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == + (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | + RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) + break; + udelay(1); + } + for (i = 0; i < adev->usec_timeout; i++) { + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + break; + udelay(1); } } -static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) { - u32 data = 0; + uint32_t data; unsigned i; data = RREG32(mmRLC_CNTL); - if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) - return; - - if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { - if (adev->gfx.rlc.in_safe_mode) { - data |= RLC_SAFE_MODE__CMD_MASK; - data &= ~RLC_SAFE_MODE__MESSAGE_MASK; - WREG32(mmRLC_SAFE_MODE, data); - adev->gfx.rlc.in_safe_mode = false; - } - } + data |= RLC_SAFE_MODE__CMD_MASK; + data &= ~RLC_SAFE_MODE__MESSAGE_MASK; + WREG32(mmRLC_SAFE_MODE, data); for (i = 0; i < adev->usec_timeout; i++) { if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) @@ -5625,9 +5536,13 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) } static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { - .enter_safe_mode = iceland_enter_rlc_safe_mode, - .exit_safe_mode = iceland_exit_rlc_safe_mode, + .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, + .set_safe_mode = gfx_v8_0_set_safe_mode, + .unset_safe_mode = gfx_v8_0_unset_safe_mode, .init = gfx_v8_0_rlc_init, + .get_csb_size = gfx_v8_0_get_csb_size, + .get_csb_buffer = gfx_v8_0_get_csb_buffer, + .get_cp_table_num = gfx_v8_0_cp_jump_table_num, .resume = gfx_v8_0_rlc_resume, .stop = gfx_v8_0_rlc_stop, .reset = gfx_v8_0_rlc_reset, @@ -5639,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev { uint32_t temp, data; - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { @@ -5735,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev gfx_v8_0_wait_for_rlc_serdes(adev); } - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); } static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, @@ -5745,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); @@ -5828,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev gfx_v8_0_wait_for_rlc_serdes(adev); - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); } static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 84831839070c..d6783ba2c9d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1050,72 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); } -static void rv_init_cp_jump_table(struct amdgpu_device *adev) +static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev) { - const __le32 *fw_data; - volatile u32 *dst_ptr; - int me, i, max_me = 5; - u32 bo_offset = 0; - u32 table_offset, table_size; - - /* write the cp table buffer */ - dst_ptr = adev->gfx.rlc.cp_table_ptr; - for (me = 0; me < max_me; me++) { - if (me == 0) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; - fw_data = (const __le32 *) - (adev->gfx.ce_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 1) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; - fw_data = (const __le32 *) - (adev->gfx.pfp_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 2) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; - fw_data = (const __le32 *) - (adev->gfx.me_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 3) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - fw_data = (const __le32 *) - (adev->gfx.mec_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } else if (me == 4) { - const struct gfx_firmware_header_v1_0 *hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; - fw_data = (const __le32 *) - (adev->gfx.mec2_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - table_offset = le32_to_cpu(hdr->jt_offset); - table_size = le32_to_cpu(hdr->jt_size); - } - - for (i = 0; i < table_size; i ++) { - dst_ptr[bo_offset + i] = - cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); - } - - bo_offset += table_size; - } + return 5; } static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) { - volatile u32 *dst_ptr; - u32 dws; const struct cs_section_def *cs_data; int r; @@ -1124,45 +1065,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) cs_data = adev->gfx.rlc.cs_data; if (cs_data) { - /* clear state block */ - adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_obj, - &adev->gfx.rlc.clear_state_gpu_addr, - (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", - r); - amdgpu_gfx_rlc_fini(adev); + /* init clear state block */ + r = amdgpu_gfx_rlc_init_csb(adev); + if (r) return r; - } - /* set up the cs buffer */ - dst_ptr = adev->gfx.rlc.cs_ptr; - gfx_v9_0_get_csb_buffer(adev, dst_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } if (adev->asic_type == CHIP_RAVEN) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_obj, - &adev->gfx.rlc.cp_table_gpu_addr, - (void **)&adev->gfx.rlc.cp_table_ptr); - if (r) { - dev_err(adev->dev, - "(%d) failed to create cp table bo\n", r); - amdgpu_gfx_rlc_fini(adev); + r = amdgpu_gfx_rlc_init_cpt(adev); + if (r) return r; - } - - rv_init_cp_jump_table(adev); - amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); } switch (adev->asic_type) { @@ -3585,64 +3499,47 @@ static int gfx_v9_0_late_init(void *handle) return 0; } -static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) +static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev) { - uint32_t rlc_setting, data; - unsigned i; - - if (adev->gfx.rlc.in_safe_mode) - return; + uint32_t rlc_setting; /* if RLC is not enabled, do nothing */ rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) - return; - - if (adev->cg_flags & - (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | - AMD_CG_SUPPORT_GFX_3D_CGCG)) { - data = RLC_SAFE_MODE__CMD_MASK; - data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); - WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); + return false; - /* wait for RLC_SAFE_MODE */ - for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) - break; - udelay(1); - } - adev->gfx.rlc.in_safe_mode = true; - } + return true; } -static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev) { - uint32_t rlc_setting, data; - - if (!adev->gfx.rlc.in_safe_mode) - return; + uint32_t data; + unsigned i; - /* if RLC is not enabled, do nothing */ - rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); - if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) - return; + data = RLC_SAFE_MODE__CMD_MASK; + data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); - if (adev->cg_flags & - (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { - /* - * Try to exit safe mode only if it is already in safe - * mode. - */ - data = RLC_SAFE_MODE__CMD_MASK; - WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); - adev->gfx.rlc.in_safe_mode = false; + /* wait for RLC_SAFE_MODE */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + break; + udelay(1); } } +static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RLC_SAFE_MODE__CMD_MASK; + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); +} + static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { gfx_v9_0_enable_gfx_cg_power_gating(adev, true); @@ -3653,7 +3550,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); } - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); } static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, @@ -3751,7 +3648,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, { uint32_t data, def; - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); /* Enable 3D CGCG/CGLS */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { @@ -3791,7 +3688,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); } - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); } static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, @@ -3799,7 +3696,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev { uint32_t def, data; - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); @@ -3839,7 +3736,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); } - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); } static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, @@ -3868,9 +3765,13 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, } static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { - .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, - .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode, + .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, + .set_safe_mode = gfx_v9_0_set_safe_mode, + .unset_safe_mode = gfx_v9_0_unset_safe_mode, .init = gfx_v9_0_rlc_init, + .get_csb_size = gfx_v9_0_get_csb_size, + .get_csb_buffer = gfx_v9_0_get_csb_buffer, + .get_cp_table_num = gfx_v9_0_cp_jump_table_num, .resume = gfx_v9_0_rlc_resume, .stop = gfx_v9_0_rlc_stop, .reset = gfx_v9_0_rlc_reset, diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index d0e478f43443..0c9a2c03504e 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - adev->gfx.rlc.funcs->enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev); if (enable) { ret = kv_program_pt_config_registers(adev, didt_config_kv); if (ret) { - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); return ret; } } kv_do_enable_didt(adev, enable); - adev->gfx.rlc.funcs->exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev); } return 0; -- cgit From 688be01a0ab7eeefb1cccf3ad1c59803276b5a12 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 8 Nov 2018 16:34:48 -0500 Subject: drm/amdgpu/gfx9: rework lbpw enable code To avoid changing the global lbpw module parameter directly. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d6783ba2c9d0..c27caa144c57 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2400,26 +2400,21 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - if (amdgpu_lbpw == -1) { - switch (adev->asic_type) { - case CHIP_RAVEN: - amdgpu_lbpw = 1; - break; - case CHIP_VEGA20: - amdgpu_lbpw = 0; - break; - default: - amdgpu_lbpw = 0; - break; - } - } - - if (adev->asic_type == CHIP_RAVEN || - adev->asic_type == CHIP_VEGA20) { - if (amdgpu_lbpw != 0) + switch (adev->asic_type) { + case CHIP_RAVEN: + if (amdgpu_lbpw == 0) + gfx_v9_0_enable_lbpw(adev, false); + else + gfx_v9_0_enable_lbpw(adev, true); + break; + case CHIP_VEGA20: + if (amdgpu_lbpw > 0) gfx_v9_0_enable_lbpw(adev, true); else gfx_v9_0_enable_lbpw(adev, false); + break; + default: + break; } adev->gfx.rlc.funcs->start(adev); -- cgit From 5581c670fb7ec267fc79215c6d5176b07e5f6dad Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Mon, 12 Nov 2018 11:19:24 -0500 Subject: drm/amdgpu: set system aperture to cover whole FB region MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In XGMI configuration, the FB region covers vram region from peer device, adjust system aperture to cover all of them Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: shaoyunl Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ceb7847b504f..bfa317ad20a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) /* Program the system aperture low logical page number. */ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, - min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) /* @@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) * to get rid of the VM fault and hardware hang. */ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max((adev->gmc.vram_end >> 18) + 0x1, + max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18)); else WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index fd23ba1226a5..a0db67adc34c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -90,7 +90,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, - min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) /* @@ -100,11 +100,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) * to get rid of the VM fault and hardware hang. */ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max((adev->gmc.vram_end >> 18) + 0x1, + max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18)); else WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + -- cgit From c837243ff4017f493c7d6f4ab57278d812a86859 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 12 Nov 2018 14:00:45 -0500 Subject: drm/amdgpu: fix bug with IH ring setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bug limits the IH ring wptr address to 40bit. When the system memory is bigger than 1TB, the bus address is more than 40bit, this causes the interrupt cannot be handled and cleared correctly. Reviewed-by: Christian König Signed-off-by: Philip Yang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index a99f71797aa3..a0fda6f9252a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) else wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF); /* set rptr, wptr to 0 */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); -- cgit From c1a17777eb45d9f3821f35e9869c0a08cd2e664e Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 12 Nov 2018 18:08:31 +0100 Subject: drm/amdgpu: fix huge page handling on Vega10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We accidentially set the huge flag on the parent instead of the childs. This caused some VM faults under memory pressure. Signed-off-by: Christian König Acked-by: Alex Deucher Tested-by: Samuel Pitoiset Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 352b30409060..dad0e2342df9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, continue; } - /* First check if the entry is already handled */ - if (cursor.pfn < frag_start) { - cursor.entry->huge = true; - amdgpu_vm_pt_next(adev, &cursor); - continue; - } - /* If it isn't already handled it can't be a huge page */ if (cursor.entry->huge) { /* Add the entry to the relocated list to update it. */ @@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, } } while (frag_start < entry_end); - if (frag >= shift) + if (amdgpu_vm_pt_descendant(adev, &cursor)) { + /* Mark all child entries as huge */ + while (cursor.pfn < frag_start) { + cursor.entry->huge = true; + amdgpu_vm_pt_next(adev, &cursor); + } + + } else if (frag >= shift) { + /* or just move on to the next on the same level. */ amdgpu_vm_pt_next(adev, &cursor); + } } return 0; -- cgit From 69756c6ff0de478c10100481f16c966dde3b5339 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 15 Nov 2018 17:19:12 -0500 Subject: drm/amdgpu: Add amdgpu "max bpc" connector property (v2) [Why] Many panels support more than 8bpc but some modes are unavailable while running at greater than 8bpc due to DP/HDMI bandwidth constraints. Support for more than 8bpc was added recently in the driver but it defaults to the maximum supported bpc - locking out these modes. This should be a user configurable option such that the user can select what bpc configuration they would like. [How] This patch introduces the "max bpc" amdgpu driver specific connector property so the user can limit the maximum bpc. It ranges from 8 to 16. This doesn't directly set the preferred bpc for the panel since it follows Intel's existing driver conventions. This proprety should be removed once common drm support for max bpc lands. v2: rebase on upstream (Alex) Signed-off-by: Nicholas Kazlauskas Acked-by: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 2 ++ 2 files changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 6748cd7fc129..686a26de50f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); + if (amdgpu_device_has_dc_support(adev)) { + adev->mode_info.max_bpc_property = + drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); + if (!adev->mode_info.max_bpc_property) + return -ENOMEM; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b9e9e8b02fb7..d1b4d9b6aae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -339,6 +339,8 @@ struct amdgpu_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; + /* maximum number of bits per channel for monitor color */ + struct drm_property *max_bpc_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; -- cgit From 919a52fc4ca137c871f295224507fa3401e08472 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 20 Jul 2018 11:37:25 -0400 Subject: drm/amdgpu: Fix oops when pp_funcs->switch_power_profile is unset On Vega20 and other pre-production GPUs, powerplay is not enabled yet. Check for NULL pointers before calling pp_funcs function pointers. Also affects Kaveri. CC: Joerg Roedel Signed-off-by: Felix Kuehling Reviewed-by: Alex Deucher Tested-by: Joerg Roedel Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index c31a8849e9f8..1580ec60b89f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - amdgpu_dpm_switch_power_profile(adev, - PP_SMC_POWER_PROFILE_COMPUTE, !idle); + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->switch_power_profile) + amdgpu_dpm_switch_power_profile(adev, + PP_SMC_POWER_PROFILE_COMPUTE, + !idle); } bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) -- cgit From 8d4d7c58994759bbd9f4fec32d88bf0e0b89302e Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 19 Nov 2018 12:55:12 +0100 Subject: drm/amdgpu: Add missing firmware entry for HAINAN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Due to lack of MODULE_FIRMWARE() with hainan_mc.bin, the driver doesn't work properly in initrd. Let's add it. Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=1116239 Fixes: 8eaf2b1faaf4 ("drm/amdgpu: switch firmware path for SI parts") Cc: Reviewed-by: Christian König Signed-off-by: Takashi Iwai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e1c2b4e9c7b2..73ad02aea2b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); MODULE_FIRMWARE("amdgpu/verde_mc.bin"); MODULE_FIRMWARE("amdgpu/oland_mc.bin"); +MODULE_FIRMWARE("amdgpu/hainan_mc.bin"); MODULE_FIRMWARE("amdgpu/si58_mc.bin"); #define MC_SEQ_MISC0__MT__MASK 0xf0000000 -- cgit From a5d0f4565996e5595a10cb57b3d1e3d74379c502 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Mon, 19 Nov 2018 14:49:16 +0800 Subject: drm/amdgpu: Enable HDP memory light sleep Due to the register name and setting change of HDP memory light sleep on Vega20,change accordingly in the driver. Signed-off-by: Kenneth Feng Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 39 +++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index bf5e6a413dee..4cc0dcb1a187 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -65,6 +65,13 @@ #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 +/* for Vega20 register name change */ +#define mmHDP_MEM_POWER_CTRL 0x00d4 +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L +#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 /* * Indirect registers accessor */ @@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable { uint32_t def, data; - def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); + if (adev->asic_type == CHIP_VEGA20) { + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) - data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; - else - data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; + else + data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); - if (def != data) - WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); + if (def != data) + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); + } else { + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; + else + data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; + + if (def != data) + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); + } } static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) -- cgit