summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4403a93eb164..426ed08b15ad 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -840,6 +840,15 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
unsigned int bound;
bound = atomic_read(&vma->flags);
+
+ if (flags & PIN_VALIDATE) {
+ flags &= I915_VMA_BIND_MASK;
+
+ return (flags & bound) == flags;
+ }
+
+ /* with the lock mandatory for unbind, we don't race here */
+ flags &= I915_VMA_BIND_MASK;
do {
if (unlikely(flags & ~bound))
return false;
@@ -1261,7 +1270,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
/* First try and grab the pin without rebinding the vma */
- if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+ if (try_qad_pin(vma, flags))
return 0;
err = i915_vma_get_pages(vma);
@@ -1349,7 +1358,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
}
if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
- __i915_vma_pin(vma);
+ if (!(flags & PIN_VALIDATE))
+ __i915_vma_pin(vma);
goto err_unlock;
}
@@ -1379,8 +1389,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- __i915_vma_pin(vma);
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ if (!(flags & PIN_VALIDATE)) {
+ __i915_vma_pin(vma);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ }
GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
@@ -1643,8 +1655,6 @@ static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *
{
int err;
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
-
/* Wait for the vma to be bound before we start! */
err = __i915_request_await_bind(rq, vma);
if (err)
@@ -1663,6 +1673,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
assert_object_held(obj);
+ GEM_BUG_ON(!vma->pages);
+
err = __i915_vma_move_to_active(vma, rq);
if (unlikely(err))
return err;