summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSunil Khatri <sunil.khatri@amd.com>2025-04-22 18:40:03 +0530
committerAlex Deucher <alexander.deucher@amd.com>2025-05-05 13:29:33 -0400
commitc46a37628a274ff557e6c1f45e6c00c3c6db60df (patch)
tree4e981be1c2b6c8b9ff624ccd762846da22b07994
parent8c97cdb1a692eafcf5b6e8fc7e7ecc7eff28984e (diff)
drm/amdgpu: change DRM_ERROR to drm_file_err in amdgpu_userq.c
change the DRM_ERROR and drm_err to drm_file_err to add process name and pid to the logging. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c63
1 files changed, 32 insertions, 31 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 8b1323babb96..854577d3c37c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -179,25 +179,25 @@ int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
if (r) {
- DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
return r;
}
r = amdgpu_bo_reserve(userq_obj->obj, true);
if (r) {
- DRM_ERROR("Failed to reserve BO to map (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
goto free_obj;
}
r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
if (r) {
- DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
goto unresv;
}
r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
if (r) {
- DRM_ERROR("Failed to map BO for userqueue (%d)", r);
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
goto unresv;
}
@@ -233,7 +233,7 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
if (gobj == NULL) {
- DRM_ERROR("Can't find GEM object for doorbell\n");
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
return -EINVAL;
}
@@ -243,13 +243,13 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
/* Pin the BO before generating the index, unpin in queue destroy */
r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
if (r) {
- DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
goto unref_bo;
}
r = amdgpu_bo_reserve(db_obj->obj, true);
if (r) {
- DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
goto unpin_bo;
}
@@ -271,7 +271,8 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
break;
default:
- DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type);
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
r = -EINVAL;
goto unpin_bo;
}
@@ -356,7 +357,8 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
args->in.ip_type != AMDGPU_HW_IP_DMA &&
args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
- DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type);
+ drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
return -EINVAL;
}
@@ -368,13 +370,13 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
(args->in.ip_type != AMDGPU_HW_IP_GFX) &&
(args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
!amdgpu_is_tmz(adev)) {
- drm_err(adev_to_drm(adev), "Secure only supported on GFX/Compute queues\n");
+ drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
return -EINVAL;
}
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n");
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -390,14 +392,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
uq_funcs = adev->userq_funcs[args->in.ip_type];
if (!uq_funcs) {
- DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type);
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
r = -EINVAL;
goto unlock;
}
queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
r = -ENOMEM;
goto unlock;
}
@@ -414,7 +417,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
- DRM_ERROR("Failed to get doorbell for queue\n");
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
goto unlock;
}
@@ -423,13 +426,13 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
r = amdgpu_userq_fence_driver_alloc(adev, queue);
if (r) {
- DRM_ERROR("Failed to alloc fence driver\n");
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
goto unlock;
}
r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
if (r) {
- DRM_ERROR("Failed to create Queue\n");
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
amdgpu_userq_fence_driver_free(queue);
kfree(queue);
goto unlock;
@@ -438,7 +441,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
if (qid < 0) {
- DRM_ERROR("Failed to allocate a queue id\n");
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
@@ -458,7 +461,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(uq_mgr, queue);
if (r) {
mutex_unlock(&adev->userq_mutex);
- DRM_ERROR("Failed to map Queue\n");
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
idr_remove(&uq_mgr->userq_idr, qid);
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
@@ -490,7 +493,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
r = amdgpu_userq_create(filp, args);
if (r)
- DRM_ERROR("Failed to create usermode queue\n");
+ drm_file_err(filp, "Failed to create usermode queue\n");
break;
case AMDGPU_USERQ_OP_FREE:
@@ -508,7 +511,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
r = amdgpu_userq_destroy(filp, args->in.queue_id);
if (r)
- DRM_ERROR("Failed to destroy usermode queue\n");
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
break;
default:
@@ -522,7 +525,6 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
- struct amdgpu_device *adev = uq_mgr->adev;
struct amdgpu_usermode_queue *queue;
int queue_id;
int ret = 0, r;
@@ -535,7 +537,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
}
if (ret)
- dev_err(adev->dev, "Failed to map all the queues\n");
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
return ret;
}
@@ -573,7 +575,7 @@ amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
ret = amdgpu_vm_lock_pd(vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret)) {
- DRM_ERROR("Failed to lock PD\n");
+ drm_file_err(uq_mgr->file, "Failed to lock PD\n");
goto unlock_all;
}
@@ -613,7 +615,7 @@ amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
bo = bo_va->base.bo;
ret = amdgpu_userq_validate_vm_bo(NULL, bo);
if (ret) {
- DRM_ERROR("Failed to validate BO\n");
+ drm_file_err(uq_mgr->file, "Failed to validate BO\n");
goto unlock_all;
}
@@ -644,7 +646,7 @@ amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
if (ret)
- DRM_ERROR("Failed to replace eviction fence\n");
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
unlock_all:
drm_exec_fini(&exec);
@@ -663,13 +665,13 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
ret = amdgpu_userq_validate_bos(uq_mgr);
if (ret) {
- DRM_ERROR("Failed to validate BOs to restore\n");
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
goto unlock;
}
ret = amdgpu_userq_restore_all(uq_mgr);
if (ret) {
- DRM_ERROR("Failed to restore all queues\n");
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
goto unlock;
}
@@ -680,7 +682,6 @@ unlock:
static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
{
- struct amdgpu_device *adev = uq_mgr->adev;
struct amdgpu_usermode_queue *queue;
int queue_id;
int ret = 0, r;
@@ -693,7 +694,7 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
}
if (ret)
- dev_err(adev->dev, "Couldn't unmap all the queues\n");
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
return ret;
}
@@ -730,13 +731,13 @@ amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
/* Wait for any pending userqueue fence work to finish */
ret = amdgpu_userq_wait_for_signal(uq_mgr);
if (ret) {
- DRM_ERROR("Not evicting userqueue, timeout waiting for work\n");
+ drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
return;
}
ret = amdgpu_userq_evict_all(uq_mgr);
if (ret) {
- DRM_ERROR("Failed to evict userqueue\n");
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
return;
}