dma-buf: add more reservation object locking wrappers
authorChristian König <christian.koenig@amd.com>
Wed, 31 Jul 2019 07:41:50 +0000 (09:41 +0200)
committerChristian König <christian.koenig@amd.com>
Mon, 5 Aug 2019 07:28:43 +0000 (09:28 +0200)
Complete the abstraction of the ww_mutex inside the reservation object.

This allows us to add more handling and debugging to the reservation
object in the future.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/320761/
16 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
include/drm/ttm/ttm_bo_driver.h
include/linux/reservation.h

index e069de8b54e619fbd9a630e34ad9f757731d27ab..cce0575119b06cb8dd9fb3beab97c5bb0bcffd44 100644 (file)
@@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
        *map = mapping;
 
        /* Double check that the BO is reserved by this CS */
-       if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+       if (reservation_object_locking_ctx((*bo)->tbo.resv) != &parser->ticket)
                return -EINVAL;
 
        if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
index 02cd845e77b33e812c0f411af5ede40ad5606e00..344f277b54f008b45677f46af539f807b485d2a0 100644 (file)
@@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
        bp.flags = 0;
        bp.type = ttm_bo_type_sg;
        bp.resv = resv;
-       ww_mutex_lock(&resv->lock, NULL);
+       reservation_object_lock(resv, NULL);
        ret = amdgpu_bo_create(adev, &bp, &bo);
        if (ret)
                goto error;
@@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
        if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
                bo->prime_shared_count = 1;
 
-       ww_mutex_unlock(&resv->lock);
+       reservation_object_unlock(resv);
        return &bo->gem_base;
 
 error:
-       ww_mutex_unlock(&resv->lock);
+       reservation_object_unlock(resv);
        return ERR_PTR(ret);
 }
 
index 19ec775b7aa8bc486666764df0baed449b46ffc5..e352aa2cc28be63f27a9c893eba520761449a611 100644 (file)
@@ -546,7 +546,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 
 fail_unreserve:
        if (!bp->resv)
-               ww_mutex_unlock(&bo->tbo.resv->lock);
+               reservation_object_unlock(bo->tbo.resv);
        amdgpu_bo_unref(&bo);
        return r;
 }
@@ -1089,7 +1089,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
  */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
-       lockdep_assert_held(&bo->tbo.resv->lock.base);
+       reservation_object_assert_held(bo->tbo.resv);
 
        if (tiling_flags)
                *tiling_flags = bo->tiling_flags;
@@ -1330,7 +1330,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-       WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+       WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.resv) &&
                     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
index 24c3c05e2fb7d70121b03f7ec111de018c67e96a..f72d3625e162cc275c84696e367a9337d845877c 100644 (file)
@@ -2416,7 +2416,8 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
                        struct amdgpu_bo *bo;
 
                        bo = mapping->bo_va->base.bo;
-                       if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+                       if (reservation_object_locking_ctx(bo->tbo.resv) !=
+                           ticket)
                                continue;
                }
 
index 243f43d70f42c7ab00e0d161172df18b7b8749b1..afc38cece3f539e80a514f899775c373b8564ca6 100644 (file)
@@ -1288,8 +1288,8 @@ retry:
        if (contended != -1) {
                struct drm_gem_object *obj = objs[contended];
 
-               ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
-                                                      acquire_ctx);
+               ret = reservation_object_lock_slow_interruptible(obj->resv,
+                                                                acquire_ctx);
                if (ret) {
                        ww_acquire_done(acquire_ctx);
                        return ret;
@@ -1300,16 +1300,16 @@ retry:
                if (i == contended)
                        continue;
 
-               ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
-                                                 acquire_ctx);
+               ret = reservation_object_lock_interruptible(objs[i]->resv,
+                                                           acquire_ctx);
                if (ret) {
                        int j;
 
                        for (j = 0; j < i; j++)
-                               ww_mutex_unlock(&objs[j]->resv->lock);
+                               reservation_object_unlock(objs[j]->resv);
 
                        if (contended != -1 && contended >= i)
-                               ww_mutex_unlock(&objs[contended]->resv->lock);
+                               reservation_object_unlock(objs[contended]->resv);
 
                        if (ret == -EDEADLK) {
                                contended = i;
@@ -1334,7 +1334,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
        int i;
 
        for (i = 0; i < count; i++)
-               ww_mutex_unlock(&objs[i]->resv->lock);
+               reservation_object_unlock(objs[i]->resv);
 
        ww_acquire_fini(acquire_ctx);
 }
index ec50017692d48de54885b20a21e8a43147538a8a..8478c3c9ffcd2a765e18764f97ea5d5e0a45d79e 100644 (file)
@@ -68,10 +68,10 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
 
        flags = TTM_PL_FLAG_TT;
 
-       ww_mutex_lock(&robj->lock, NULL);
+       reservation_object_lock(robj, NULL);
        ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
                             sg, robj, &nvbo);
-       ww_mutex_unlock(&robj->lock);
+       reservation_object_unlock(robj);
        if (ret)
                return ERR_PTR(ret);
 
index 7a2bad843f8a2e4cdd201b07b00a0ca95af36bb0..a668abcbacef5cf630f17587652e378549970777 100644 (file)
@@ -611,7 +611,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
        int steal;
        int i;
 
-       lockdep_assert_held(&bo->tbo.resv->lock.base);
+       reservation_object_assert_held(bo->tbo.resv);
 
        if (!bo->tiling_flags)
                return 0;
@@ -737,7 +737,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
                                uint32_t *tiling_flags,
                                uint32_t *pitch)
 {
-       lockdep_assert_held(&bo->tbo.resv->lock.base);
+       reservation_object_assert_held(bo->tbo.resv);
 
        if (tiling_flags)
                *tiling_flags = bo->tiling_flags;
@@ -749,7 +749,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
                                bool force_drop)
 {
        if (!force_drop)
-               lockdep_assert_held(&bo->tbo.resv->lock.base);
+               reservation_object_assert_held(bo->tbo.resv);
 
        if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
                return 0;
index 8ce3e8045d425f6b1c17d9449cd16d2bba09cedd..30b7bd671525ef216abf4fc2742b5c842fa3b0ef 100644 (file)
@@ -68,10 +68,10 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
        struct radeon_bo *bo;
        int ret;
 
-       ww_mutex_lock(&resv->lock, NULL);
+       reservation_object_lock(resv, NULL);
        ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
                               RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
-       ww_mutex_unlock(&resv->lock);
+       reservation_object_unlock(resv);
        if (ret)
                return ERR_PTR(ret);
 
index 58c403eda04e70631fb78e848bb982b5fad6f0c4..40d3e547c78e0d79947fa5072e640d7df6bf6eea 100644 (file)
@@ -850,8 +850,8 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
                        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
                                                            &busy)) {
-                               if (busy && !busy_bo &&
-                                   bo->resv->lock.ctx != ticket)
+                               if (busy && !busy_bo && ticket !=
+                                   reservation_object_locking_ctx(bo->resv))
                                        busy_bo = bo;
                                continue;
                        }
@@ -957,8 +957,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct ww_acquire_ctx *ticket;
        int ret;
 
+       ticket = reservation_object_locking_ctx(bo->resv);
        do {
                ret = (*man->func->get_node)(man, bo, place, mem);
                if (unlikely(ret != 0))
@@ -966,7 +968,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                if (mem->mm_node)
                        break;
                ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
-                                         bo->resv->lock.ctx);
+                                         ticket);
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
@@ -1963,7 +1965,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
        ret = mutex_lock_interruptible(&bo->wu_mutex);
        if (unlikely(ret != 0))
                return -ERESTARTSYS;
-       if (!ww_mutex_is_locked(&bo->resv->lock))
+       if (!reservation_object_is_locked(bo->resv))
                goto out_unlock;
        ret = reservation_object_lock_interruptible(bo->resv, NULL);
        if (ret == -EINTR)
index 957ec375a4ba54b5399c89aa5dda91ae822e9abd..723fb583fddab87eac74f622157fb94bb3971c5e 100644 (file)
@@ -144,10 +144,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 
                if (ret == -EDEADLK) {
                        if (intr) {
-                               ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
-                                                                      ticket);
+                               ret = reservation_object_lock_slow_interruptible(bo->resv,
+                                                                                ticket);
                        } else {
-                               ww_mutex_lock_slow(&bo->resv->lock, ticket);
+                               reservation_object_lock_slow(bo->resv, ticket);
                                ret = 0;
                        }
                }
index fc6673cde28951b6b4b26bea81e5f9c02bcde869..703786e3d5796366e48104a8725b5f3c21c2dfb7 100644 (file)
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
 
        /* Buffer objects need to be either pinned or reserved: */
        if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
-               lockdep_assert_held(&dst->resv->lock.base);
+               reservation_object_assert_held(dst->resv);
        if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
-               lockdep_assert_held(&src->resv->lock.base);
+               reservation_object_assert_held(src->resv);
 
        if (dst->ttm->state == tt_unpopulated) {
                ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
index 5d5c2bce01f3d241dded1d40c724a55d6bee2b08..315da41a18b4c530b9f00af6bbea4141281d03a8 100644 (file)
@@ -342,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
        uint32_t old_mem_type = bo->mem.mem_type;
        int ret;
 
-       lockdep_assert_held(&bo->resv->lock.base);
+       reservation_object_assert_held(bo->resv);
 
        if (pin) {
                if (vbo->pin_count++ > 0)
index b4f6e1217c9d309b250d0fefcec57871826ef242..71e901bbed68c58d49022e992e0cf51ea3296cd5 100644 (file)
@@ -169,7 +169,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
        } *cmd;
 
        WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
-       lockdep_assert_held(&bo->resv->lock.base);
+       reservation_object_assert_held(bo->resv);
 
        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
        if (!cmd)
@@ -311,7 +311,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
                return 0;
 
        WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
-       lockdep_assert_held(&bo->resv->lock.base);
+       reservation_object_assert_held(bo->resv);
 
        mutex_lock(&dev_priv->binding_mutex);
        if (!vcotbl->scrubbed)
index 1d38a8b2f2ec8be2839e9b297ab1f1bbf5eb9fdd..303d2c7d9ab370ba718ca58a73d78c45ff115743 100644 (file)
@@ -402,14 +402,14 @@ void vmw_resource_unreserve(struct vmw_resource *res,
 
        if (switch_backup && new_backup != res->backup) {
                if (res->backup) {
-                       lockdep_assert_held(&res->backup->base.resv->lock.base);
+                       reservation_object_assert_held(res->backup->base.resv);
                        list_del_init(&res->mob_head);
                        vmw_bo_unreference(&res->backup);
                }
 
                if (new_backup) {
                        res->backup = vmw_bo_reference(new_backup);
-                       lockdep_assert_held(&new_backup->base.resv->lock.base);
+                       reservation_object_assert_held(new_backup->base.resv);
                        list_add_tail(&res->mob_head, &new_backup->res_list);
                } else {
                        res->backup = NULL;
@@ -691,7 +691,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
                .num_shared = 0
        };
 
-       lockdep_assert_held(&vbo->base.resv->lock.base);
+       reservation_object_assert_held(vbo->base.resv);
        list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
                if (!res->func->unbind)
                        continue;
index c9b8ba492f2479549938b47622bad1144fdc1221..0e6a111bed0b5d19ac4ea8a91c5f4debb96b5a7e 100644 (file)
@@ -745,10 +745,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
        WARN_ON(!kref_read(&bo->kref));
 
        if (interruptible)
-               ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
-                                                      ticket);
+               ret = reservation_object_lock_slow_interruptible(bo->resv,
+                                                                ticket);
        else
-               ww_mutex_lock_slow(&bo->resv->lock, ticket);
+               reservation_object_lock_slow(bo->resv, ticket);
 
        if (likely(ret == 0))
                ttm_bo_del_sub_from_lru(bo);
index 02866ee54d67e479d0ace67ebe4538b7a5de00a7..56b782fec49bf8d364ca00e36b6c5b52cf6641ca 100644 (file)
@@ -140,6 +140,38 @@ reservation_object_lock_interruptible(struct reservation_object *obj,
        return ww_mutex_lock_interruptible(&obj->lock, ctx);
 }
 
+/**
+ * reservation_object_lock_slow - slowpath lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object after a die case. This function
+ * will sleep until the lock becomes available. See reservation_object_lock() as
+ * well.
+ */
+static inline void
+reservation_object_lock_slow(struct reservation_object *obj,
+                            struct ww_acquire_ctx *ctx)
+{
+       ww_mutex_lock_slow(&obj->lock, ctx);
+}
+
+/**
+ * reservation_object_lock_slow_interruptible - slowpath lock the reservation
+ * object, interruptible
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object interruptible after a die case. This function
+ * will sleep until the lock becomes available. See
+ * reservation_object_lock_interruptible() as well.
+ */
+static inline int
+reservation_object_lock_slow_interruptible(struct reservation_object *obj,
+                                          struct ww_acquire_ctx *ctx)
+{
+       return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
+}
 
 /**
  * reservation_object_trylock - trylock the reservation object
@@ -161,6 +193,31 @@ reservation_object_trylock(struct reservation_object *obj)
        return ww_mutex_trylock(&obj->lock);
 }
 
+/**
+ * reservation_object_is_locked - is the reservation object locked
+ * @obj: the reservation object
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool
+reservation_object_is_locked(struct reservation_object *obj)
+{
+       return ww_mutex_is_locked(&obj->lock);
+}
+
+/**
+ * reservation_object_locking_ctx - returns the context used to lock the object
+ * @obj: the reservation object
+ *
+ * Returns the context used to lock a reservation object or NULL if no context
+ * was used or the object is not locked at all.
+ */
+static inline struct ww_acquire_ctx *
+reservation_object_locking_ctx(struct reservation_object *obj)
+{
+       return READ_ONCE(obj->lock.ctx);
+}
+
 /**
  * reservation_object_unlock - unlock the reservation object
  * @obj: the reservation object