summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2025-04-16 14:40:25 -0400
committerAlex Deucher <alexander.deucher@amd.com>2025-04-22 08:51:45 -0400
commitd13e95967ebfde85d244ea626c8b14a12bca14ac (patch)
tree4841bc50ad5d78c0be8aa36c256bfd171cc5427f /drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
parent36b0bc1731c82fdf2b9228fe4e86d99e5063be1b (diff)
drm/amdgpu/userq: move waiting for last fence before umap
Need to wait for the last fence before unmapping. This also fixes a memory leak in amdgpu_userqueue_cleanup() when the fence isn't signalled. Fixes: b0db33c8c50f ("drm/amdgpu/userq: rework front end call sequence") Reviewed-by: Prike Liang <Prike.Liang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
index d821e5d57417..b75b93e69c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
@@ -84,23 +84,27 @@ amdgpu_userqueue_map_helper(struct amdgpu_userq_mgr *uq_mgr,
}
static void
-amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue,
- int queue_id)
+amdgpu_userqueue_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
- const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
struct dma_fence *f = queue->last_fence;
int ret;
if (f && !dma_fence_is_signaled(f)) {
ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
- if (ret <= 0) {
- DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
- f->context, f->seqno);
- return;
- }
+ if (ret <= 0)
+ dev_err(adev->dev, "Timed out waiting for fence f=%p\n", f);
}
+}
+
+static void
+amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
uq_funcs->mqd_destroy(uq_mgr, queue);
amdgpu_userq_fence_driver_free(queue);
@@ -305,6 +309,7 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id)
mutex_unlock(&uq_mgr->userq_mutex);
return -EINVAL;
}
+ amdgpu_userqueue_wait_for_last_fence(uq_mgr, queue);
r = amdgpu_userqueue_unmap_helper(uq_mgr, queue);
amdgpu_bo_unpin(queue->db_obj.obj);
amdgpu_bo_unref(&queue->db_obj.obj);
@@ -780,6 +785,7 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
mutex_lock(&userq_mgr->userq_mutex);
idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userqueue_wait_for_last_fence(userq_mgr, queue);
amdgpu_userqueue_unmap_helper(userq_mgr, queue);
amdgpu_userqueue_cleanup(userq_mgr, queue, queue_id);
}