summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShuicheng Lin <shuicheng.lin@intel.com>2025-09-11 03:14:06 +0000
committerMatt Roper <matthew.d.roper@intel.com>2025-09-17 09:50:44 -0700
commit5959c4da17e01782ad09b226802ac1f1ef157a70 (patch)
tree8959225f8ffe72e1184bd1f7ce7326cf54fd635c
parent5bb5258e357eb79719d1c998731b932c945cb52c (diff)
drm/xe: Misc refine for svm
These changes should have no functional impact. 1. Correct typo of "operation"in macro range_debug(). 2. Combine 2 spin_lock() call in xe_svm_garbage_collector() into 1. 3. Drop redundant preferred_region_is_vram check in xe_svm_range_needs_migrate_to_vram(). 4. Combine the devmem_possible check in xe_svm_handle_pagefault(). need_vram includes the IS_DGFX() check, so there is no change for .devmem_only. v2: revert !ctx.devmem_only change (Matt) v3: rebase code and refine commit message. v4: rebase code and refine commit message. Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com> Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Link: https://lore.kernel.org/r/20250911031405.1371812-2-shuicheng.lin@intel.com
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 86e7c72c89f1..7f2f1f041f1d 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -50,11 +50,11 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-#define range_debug(r__, operaton__) \
+#define range_debug(r__, operation__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
"start=0x%014lx, end=0x%014lx, size=%lu", \
- (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (operation__), range_to_vm(&(r__)->base)->usm.asid, \
(r__)->base.gpusvm, \
xe_svm_range_in_vram((r__)) ? 1 : 0, \
xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
@@ -347,8 +347,8 @@ static int xe_svm_garbage_collector(struct xe_vm *vm)
if (xe_vm_is_closed_or_banned(vm))
return -ENOENT;
- spin_lock(&vm->svm.garbage_collector.lock);
for (;;) {
+ spin_lock(&vm->svm.garbage_collector.lock);
range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
typeof(*range),
garbage_collector_link);
@@ -377,8 +377,6 @@ static int xe_svm_garbage_collector(struct xe_vm *vm)
else
return err;
}
-
- spin_lock(&vm->svm.garbage_collector.lock);
}
spin_unlock(&vm->svm.garbage_collector.lock);
@@ -944,12 +942,12 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
xe_assert(vm->xe, IS_DGFX(vm->xe));
- if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ if (xe_svm_range_in_vram(range)) {
drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -1010,15 +1008,14 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_gt *gt, u64 fault_addr,
bool need_vram)
{
+ int devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
- .devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
- .devmem_only = need_vram && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .timeslice_ms = need_vram && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ .devmem_possible = devmem_possible,
+ .check_pages_threshold = devmem_possible ? SZ_64K : 0,
+ .devmem_only = need_vram && devmem_possible,
+ .timeslice_ms = need_vram && devmem_possible ?
vm->xe->atomic_svm_timeslice_ms : 0,
};
struct xe_validation_ctx vctx;