Merge drm/drm-next into drm-intel-gt-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_vma.c
index c0d6d5526abe614d1caa0503a9c2221be290154f..30307e34d2dcb2df361a3b7820c1d0f3462ac180 100644 (file)
 #include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 #include "i915_vma.h"
+#include "i915_vma_resource.h"
+
+static inline void assert_vma_held_evict(const struct i915_vma *vma)
+{
+       /*
+        * We may be forced to unbind when the vm is dead, to clean it up.
+        * This is the only exception to the requirement of the object lock
+        * being held.
+        */
+       if (atomic_read(&vma->vm->open))
+               assert_object_held_shared(vma->obj);
+}
 
 static struct kmem_cache *slab_vmas;
 
@@ -284,7 +296,7 @@ struct i915_vma_work {
        struct dma_fence_work base;
        struct i915_address_space *vm;
        struct i915_vm_pt_stash stash;
-       struct i915_vma *vma;
+       struct i915_vma_resource *vma_res;
        struct drm_i915_gem_object *pinned;
        struct i915_sw_dma_fence_cb cb;
        enum i915_cache_level cache_level;
@@ -294,23 +306,24 @@ struct i915_vma_work {
 static void __vma_bind(struct dma_fence_work *work)
 {
        struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
-       struct i915_vma *vma = vw->vma;
+       struct i915_vma_resource *vma_res = vw->vma_res;
+
+       vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
+                              vma_res, vw->cache_level, vw->flags);
 
-       vma->ops->bind_vma(vw->vm, &vw->stash,
-                          vma, vw->cache_level, vw->flags);
 }
 
 static void __vma_release(struct dma_fence_work *work)
 {
        struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
 
-       if (vw->pinned) {
-               __i915_gem_object_unpin_pages(vw->pinned);
+       if (vw->pinned)
                i915_gem_object_put(vw->pinned);
-       }
 
        i915_vm_free_pt_stash(vw->vm, &vw->stash);
        i915_vm_put(vw->vm);
+       if (vw->vma_res)
+               i915_vma_resource_put(vw->vma_res);
 }
 
 static const struct dma_fence_work_ops bind_ops = {
@@ -374,12 +387,27 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
 #define i915_vma_verify_bind_complete(_vma) 0
 #endif
 
+I915_SELFTEST_EXPORT void
+i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
+                               struct i915_vma *vma)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
+                              obj->mm.rsgt, i915_gem_object_is_readonly(obj),
+                              i915_gem_object_is_lmem(obj), obj->mm.region,
+                              vma->ops, vma->private, vma->node.start,
+                              vma->node.size, vma->size);
+}
+
 /**
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  * @vma: VMA to map
  * @cache_level: mapping cache level
  * @flags: flags like global or local mapping
  * @work: preallocated worker for allocating and binding the PTE
+ * @vma_res: pointer to a preallocated vma resource. The resource is either
+ * consumed or freed.
  *
  * DMA addresses are taken from the scatter-gather table of this object (or of
  * this VMA in case of non-default GGTT views) and PTE entries set up.
@@ -388,10 +416,12 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
 int i915_vma_bind(struct i915_vma *vma,
                  enum i915_cache_level cache_level,
                  u32 flags,
-                 struct i915_vma_work *work)
+                 struct i915_vma_work *work,
+                 struct i915_vma_resource *vma_res)
 {
        u32 bind_flags;
        u32 vma_flags;
+       int ret;
 
        lockdep_assert_held(&vma->vm->mutex);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -399,11 +429,15 @@ int i915_vma_bind(struct i915_vma *vma,
 
        if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
                                              vma->node.size,
-                                             vma->vm->total)))
+                                             vma->vm->total))) {
+               i915_vma_resource_free(vma_res);
                return -ENODEV;
+       }
 
-       if (GEM_DEBUG_WARN_ON(!flags))
+       if (GEM_DEBUG_WARN_ON(!flags)) {
+               i915_vma_resource_free(vma_res);
                return -EINVAL;
+       }
 
        bind_flags = flags;
        bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
@@ -412,16 +446,44 @@ int i915_vma_bind(struct i915_vma *vma,
        vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 
        bind_flags &= ~vma_flags;
-       if (bind_flags == 0)
+       if (bind_flags == 0) {
+               i915_vma_resource_free(vma_res);
                return 0;
+       }
 
        GEM_BUG_ON(!atomic_read(&vma->pages_count));
 
+       /* Wait for or await async unbinds touching our range */
+       if (work && bind_flags & vma->vm->bind_async_flags)
+               ret = i915_vma_resource_bind_dep_await(vma->vm,
+                                                      &work->base.chain,
+                                                      vma->node.start,
+                                                      vma->node.size,
+                                                      true,
+                                                      GFP_NOWAIT |
+                                                      __GFP_RETRY_MAYFAIL |
+                                                      __GFP_NOWARN);
+       else
+               ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
+                                                     vma->node.size, true);
+       if (ret) {
+               i915_vma_resource_free(vma_res);
+               return ret;
+       }
+
+       if (vma->resource || !vma_res) {
+               /* Rebinding with an additional I915_VMA_*_BIND */
+               GEM_WARN_ON(!vma_flags);
+               i915_vma_resource_free(vma_res);
+       } else {
+               i915_vma_resource_init_from_vma(vma_res, vma);
+               vma->resource = vma_res;
+       }
        trace_i915_vma_bind(vma, bind_flags);
        if (work && bind_flags & vma->vm->bind_async_flags) {
                struct dma_fence *prev;
 
-               work->vma = vma;
+               work->vma_res = i915_vma_resource_get(vma->resource);
                work->cache_level = cache_level;
                work->flags = bind_flags;
 
@@ -444,17 +506,25 @@ int i915_vma_bind(struct i915_vma *vma,
 
                work->base.dma.error = 0; /* enable the queue_work() */
 
-               __i915_gem_object_pin_pages(vma->obj);
-               work->pinned = i915_gem_object_get(vma->obj);
+               /*
+                * If we don't have the refcounted pages list, keep a reference
+                * on the object to avoid waiting for the async bind to
+                * complete in the object destruction path.
+                */
+               if (!work->vma_res->bi.pages_rsgt)
+                       work->pinned = i915_gem_object_get(vma->obj);
        } else {
                if (vma->obj) {
-                       int ret;
-
                        ret = i915_gem_object_wait_moving_fence(vma->obj, true);
-                       if (ret)
+                       if (ret) {
+                               i915_vma_resource_free(vma->resource);
+                               vma->resource = NULL;
+
                                return ret;
+                       }
                }
-               vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
+               vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
+                                  bind_flags);
        }
 
        if (vma->obj)
@@ -654,7 +724,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
  * 0 on success, negative error code otherwise.
  */
 static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+               u64 size, u64 alignment, u64 flags)
 {
        unsigned long color;
        u64 start, end;
@@ -706,7 +777,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                    range_overflows(offset, size, end))
                        return -EINVAL;
 
-               ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
+               ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
                                           size, offset, color,
                                           flags);
                if (ret)
@@ -745,7 +816,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                                size = round_up(size, I915_GTT_PAGE_SIZE_2M);
                }
 
-               ret = i915_gem_gtt_insert(vma->vm, &vma->node,
+               ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
                                          size, alignment, color,
                                          start, end, flags);
                if (ret)
@@ -779,9 +850,17 @@ i915_vma_detach(struct i915_vma *vma)
 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
 {
        unsigned int bound;
-       bool pinned = true;
 
        bound = atomic_read(&vma->flags);
+
+       if (flags & PIN_VALIDATE) {
+               flags &= I915_VMA_BIND_MASK;
+
+               return (flags & bound) == flags;
+       }
+
+       /* with the lock mandatory for unbind, we don't race here */
+       flags &= I915_VMA_BIND_MASK;
        do {
                if (unlikely(flags & ~bound))
                        return false;
@@ -789,34 +868,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
                if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
                        return false;
 
-               if (!(bound & I915_VMA_PIN_MASK))
-                       goto unpinned;
-
                GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
        } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
 
        return true;
-
-unpinned:
-       /*
-        * If pin_count==0, but we are bound, check under the lock to avoid
-        * racing with a concurrent i915_vma_unbind().
-        */
-       mutex_lock(&vma->vm->mutex);
-       do {
-               if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
-                       pinned = false;
-                       break;
-               }
-
-               if (unlikely(flags & ~bound)) {
-                       pinned = false;
-                       break;
-               }
-       } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
-       mutex_unlock(&vma->vm->mutex);
-
-       return pinned;
 }
 
 static struct scatterlist *
@@ -1100,7 +1155,6 @@ static int
 __i915_vma_get_pages(struct i915_vma *vma)
 {
        struct sg_table *pages;
-       int ret;
 
        /*
         * The vma->pages are only valid within the lifespan of the borrowed
@@ -1133,18 +1187,16 @@ __i915_vma_get_pages(struct i915_vma *vma)
                break;
        }
 
-       ret = 0;
        if (IS_ERR(pages)) {
-               ret = PTR_ERR(pages);
-               pages = NULL;
                drm_err(&vma->vm->i915->drm,
-                       "Failed to get pages for VMA view type %u (%d)!\n",
-                       vma->ggtt_view.type, ret);
+                       "Failed to get pages for VMA view type %u (%ld)!\n",
+                       vma->ggtt_view.type, PTR_ERR(pages));
+               return PTR_ERR(pages);
        }
 
        vma->pages = pages;
 
-       return ret;
+       return 0;
 }
 
 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
@@ -1176,25 +1228,14 @@ err_unpin:
 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
 {
        /* We allocate under vma_get_pages, so beware the shrinker */
-       struct sg_table *pages = READ_ONCE(vma->pages);
-
        GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
 
        if (atomic_sub_return(count, &vma->pages_count) == 0) {
-               /*
-                * The atomic_sub_return is a read barrier for the READ_ONCE of
-                * vma->pages above.
-                *
-                * READ_ONCE is safe because this is either called from the same
-                * function (i915_vma_pin_ww), or guarded by vma->vm->mutex.
-                *
-                * TODO: We're leaving vma->pages dangling, until vma->obj->resv
-                * lock is required.
-                */
-               if (pages != vma->obj->mm.pages) {
-                       sg_free_table(pages);
-                       kfree(pages);
+               if (vma->pages != vma->obj->mm.pages) {
+                       sg_free_table(vma->pages);
+                       kfree(vma->pages);
                }
+               vma->pages = NULL;
 
                i915_gem_object_unpin_pages(vma->obj);
        }
@@ -1227,6 +1268,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
 {
        struct i915_vma_work *work = NULL;
        struct dma_fence *moving = NULL;
+       struct i915_vma_resource *vma_res = NULL;
        intel_wakeref_t wakeref = 0;
        unsigned int bound;
        int err;
@@ -1240,7 +1282,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
        GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
 
        /* First try and grab the pin without rebinding the vma */
-       if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+       if (try_qad_pin(vma, flags))
                return 0;
 
        err = i915_vma_get_pages(vma);
@@ -1281,6 +1323,12 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
                }
        }
 
+       vma_res = i915_vma_resource_alloc();
+       if (IS_ERR(vma_res)) {
+               err = PTR_ERR(vma_res);
+               goto err_fence;
+       }
+
        /*
         * Differentiate between user/kernel vma inside the aliasing-ppgtt.
         *
@@ -1301,7 +1349,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
        err = mutex_lock_interruptible_nested(&vma->vm->mutex,
                                              !(flags & PIN_GLOBAL));
        if (err)
-               goto err_fence;
+               goto err_vma_res;
 
        /* No more allocations allowed now we hold vm->mutex */
 
@@ -1322,7 +1370,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
        }
 
        if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
-               __i915_vma_pin(vma);
+               if (!(flags & PIN_VALIDATE))
+                       __i915_vma_pin(vma);
                goto err_unlock;
        }
 
@@ -1331,7 +1380,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
                goto err_unlock;
 
        if (!(bound & I915_VMA_BIND_MASK)) {
-               err = i915_vma_insert(vma, size, alignment, flags);
+               err = i915_vma_insert(vma, ww, size, alignment, flags);
                if (err)
                        goto err_active;
 
@@ -1342,7 +1391,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
        GEM_BUG_ON(!vma->pages);
        err = i915_vma_bind(vma,
                            vma->obj->cache_level,
-                           flags, work);
+                           flags, work, vma_res);
+       vma_res = NULL;
        if (err)
                goto err_remove;
 
@@ -1351,8 +1401,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
        atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
        list_move_tail(&vma->vm_link, &vma->vm->bound_list);
 
-       __i915_vma_pin(vma);
-       GEM_BUG_ON(!i915_vma_is_pinned(vma));
+       if (!(flags & PIN_VALIDATE)) {
+               __i915_vma_pin(vma);
+               GEM_BUG_ON(!i915_vma_is_pinned(vma));
+       }
        GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
        GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 
@@ -1365,6 +1417,8 @@ err_active:
        i915_active_release(&vma->active);
 err_unlock:
        mutex_unlock(&vma->vm->mutex);
+err_vma_res:
+       i915_vma_resource_free(vma_res);
 err_fence:
        if (work)
                dma_fence_work_commit_imm(&work->base);
@@ -1411,7 +1465,12 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
                /* Unlike i915_vma_pin, we don't take no for an answer! */
                flush_idle_contexts(vm->gt);
                if (mutex_lock_interruptible(&vm->mutex) == 0) {
-                       i915_gem_evict_vm(vm);
+                       /*
+                        * We pass NULL ww here, as we don't want to unbind
+                        * locked objects when called from execbuf when pinning
+                        * is removed. This would probably regress badly.
+                        */
+                       i915_gem_evict_vm(vm, NULL);
                        mutex_unlock(&vm->mutex);
                }
        } while (1);
@@ -1515,6 +1574,7 @@ void i915_vma_release(struct kref *ref)
        i915_vm_put(vma->vm);
 
        i915_active_fini(&vma->active);
+       GEM_WARN_ON(vma->resource);
        i915_vma_free(vma);
 }
 
@@ -1547,8 +1607,16 @@ void i915_vma_parked(struct intel_gt *gt)
                struct drm_i915_gem_object *obj = vma->obj;
                struct i915_address_space *vm = vma->vm;
 
-               INIT_LIST_HEAD(&vma->closed_link);
-               __i915_vma_put(vma);
+               if (i915_gem_object_trylock(obj, NULL)) {
+                       INIT_LIST_HEAD(&vma->closed_link);
+                       __i915_vma_put(vma);
+                       i915_gem_object_unlock(obj);
+               } else {
+                       /* back you go.. */
+                       spin_lock_irq(&gt->closed_lock);
+                       list_add(&vma->closed_link, &gt->closed_vma);
+                       spin_unlock_irq(&gt->closed_lock);
+               }
 
                i915_gem_object_put(obj);
                i915_vm_close(vm);
@@ -1599,8 +1667,6 @@ static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *
 {
        int err;
 
-       GEM_BUG_ON(!i915_vma_is_pinned(vma));
-
        /* Wait for the vma to be bound before we start! */
        err = __i915_request_await_bind(rq, vma);
        if (err)
@@ -1619,6 +1685,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
 
        assert_object_held(obj);
 
+       GEM_BUG_ON(!vma->pages);
+
        err = __i915_vma_move_to_active(vma, rq);
        if (unlikely(err))
                return err;
@@ -1661,9 +1729,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
        return 0;
 }
 
-void __i915_vma_evict(struct i915_vma *vma)
+struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
 {
+       struct i915_vma_resource *vma_res = vma->resource;
+       struct dma_fence *unbind_fence;
+
        GEM_BUG_ON(i915_vma_is_pinned(vma));
+       assert_vma_held_evict(vma);
 
        if (i915_vma_is_map_and_fenceable(vma)) {
                /* Force a pagefault for domain tracking on next user access */
@@ -1693,15 +1765,36 @@ void __i915_vma_evict(struct i915_vma *vma)
        GEM_BUG_ON(vma->fence);
        GEM_BUG_ON(i915_vma_has_userfault(vma));
 
-       if (likely(atomic_read(&vma->vm->open))) {
-               trace_i915_vma_unbind(vma);
-               vma->ops->unbind_vma(vma->vm, vma);
-       }
+       /* Object backend must be async capable. */
+       GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
+
+       /* If vm is not open, unbind is a nop. */
+       vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
+               atomic_read(&vma->vm->open);
+       trace_i915_vma_unbind(vma);
+
+       unbind_fence = i915_vma_resource_unbind(vma_res);
+       vma->resource = NULL;
+
        atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
                   &vma->flags);
 
        i915_vma_detach(vma);
+
+       if (!async && unbind_fence) {
+               dma_fence_wait(unbind_fence, false);
+               dma_fence_put(unbind_fence);
+               unbind_fence = NULL;
+       }
+
+       /*
+        * Binding itself may not have completed until the unbind fence signals,
+        * so don't drop the pages until that happens, unless the resource is
+        * async_capable.
+        */
+
        vma_unbind_pages(vma);
+       return unbind_fence;
 }
 
 int __i915_vma_unbind(struct i915_vma *vma)
@@ -1709,6 +1802,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
        int ret;
 
        lockdep_assert_held(&vma->vm->mutex);
+       assert_vma_held_evict(vma);
 
        if (!drm_mm_node_allocated(&vma->node))
                return 0;
@@ -1728,18 +1822,55 @@ int __i915_vma_unbind(struct i915_vma *vma)
                return ret;
 
        GEM_BUG_ON(i915_vma_is_active(vma));
-       __i915_vma_evict(vma);
+       __i915_vma_evict(vma, false);
 
        drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
        return 0;
 }
 
+static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
+{
+       struct dma_fence *fence;
+
+       lockdep_assert_held(&vma->vm->mutex);
+
+       if (!drm_mm_node_allocated(&vma->node))
+               return NULL;
+
+       if (i915_vma_is_pinned(vma) ||
+           &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
+               return ERR_PTR(-EAGAIN);
+
+       /*
+        * We probably need to replace this with awaiting the fences of the
+        * object's dma_resv when the vma active goes away. When doing that
+        * we need to be careful to not add the vma_resource unbind fence
+        * immediately to the object's dma_resv, because then unbinding
+        * the next vma from the object, in case there are many, will
+        * actually await the unbinding of the previous vmas, which is
+        * undesirable.
+        */
+       if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
+                                      I915_ACTIVE_AWAIT_EXCL |
+                                      I915_ACTIVE_AWAIT_ACTIVE) < 0) {
+               return ERR_PTR(-EBUSY);
+       }
+
+       fence = __i915_vma_evict(vma, true);
+
+       drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
+
+       return fence;
+}
+
 int i915_vma_unbind(struct i915_vma *vma)
 {
        struct i915_address_space *vm = vma->vm;
        intel_wakeref_t wakeref = 0;
        int err;
 
+       assert_object_held_shared(vma->obj);
+
        /* Optimistic wait before taking the mutex */
        err = i915_vma_sync(vma);
        if (err)
@@ -1770,6 +1901,79 @@ out_rpm:
        return err;
 }
 
+int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+       struct i915_address_space *vm = vma->vm;
+       intel_wakeref_t wakeref = 0;
+       struct dma_fence *fence;
+       int err;
+
+       /*
+        * We need the dma-resv lock since we add the
+        * unbind fence to the dma-resv object.
+        */
+       assert_object_held(obj);
+
+       if (!drm_mm_node_allocated(&vma->node))
+               return 0;
+
+       if (i915_vma_is_pinned(vma)) {
+               vma_print_allocator(vma, "is pinned");
+               return -EAGAIN;
+       }
+
+       if (!obj->mm.rsgt)
+               return -EBUSY;
+
+       err = dma_resv_reserve_shared(obj->base.resv, 1);
+       if (err)
+               return -EBUSY;
+
+       /*
+        * It would be great if we could grab this wakeref from the
+        * async unbind work if needed, but we can't because it uses
+        * kmalloc and it's in the dma-fence signalling critical path.
+        */
+       if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+               wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+       if (trylock_vm && !mutex_trylock(&vm->mutex)) {
+               err = -EBUSY;
+               goto out_rpm;
+       } else if (!trylock_vm) {
+               err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
+               if (err)
+                       goto out_rpm;
+       }
+
+       fence = __i915_vma_unbind_async(vma);
+       mutex_unlock(&vm->mutex);
+       if (IS_ERR_OR_NULL(fence)) {
+               err = PTR_ERR_OR_ZERO(fence);
+               goto out_rpm;
+       }
+
+       dma_resv_add_shared_fence(obj->base.resv, fence);
+       dma_fence_put(fence);
+
+out_rpm:
+       if (wakeref)
+               intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+       return err;
+}
+
+int i915_vma_unbind_unlocked(struct i915_vma *vma)
+{
+       int err;
+
+       i915_gem_object_lock(vma->obj, NULL);
+       err = i915_vma_unbind(vma);
+       i915_gem_object_unlock(vma->obj);
+
+       return err;
+}
+
 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
 {
        i915_gem_object_make_unshrinkable(vma->obj);