drm/ttm: use rcu in core ttm
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>
Wed, 14 May 2014 13:42:29 +0000 (15:42 +0200)
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 2 Sep 2014 15:28:47 +0000 (17:28 +0200)
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
drivers/gpu/drm/ttm/ttm_bo.c

index 66707be386f74ce59cc5ef13a44d9a8058b71293..a11969acfea5363fc1ce747338edd03b40b24cba 100644 (file)
@@ -467,66 +467,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                              ((HZ / 100) < 1) ? 1 : HZ / 100);
 }
 
-static int ttm_bo_unreserve_and_wait(struct ttm_buffer_object *bo,
-                                    bool interruptible)
-{
-       struct ttm_bo_global *glob = bo->glob;
-       struct reservation_object_list *fobj;
-       struct fence *excl = NULL;
-       struct fence **shared = NULL;
-       u32 shared_count = 0, i;
-       int ret = 0;
-
-       fobj = reservation_object_get_list(bo->resv);
-       if (fobj && fobj->shared_count) {
-               shared = kmalloc(sizeof(*shared) * fobj->shared_count,
-                                GFP_KERNEL);
-
-               if (!shared) {
-                       ret = -ENOMEM;
-                       __ttm_bo_unreserve(bo);
-                       spin_unlock(&glob->lru_lock);
-                       return ret;
-               }
-
-               for (i = 0; i < fobj->shared_count; ++i) {
-                       if (!fence_is_signaled(fobj->shared[i])) {
-                               fence_get(fobj->shared[i]);
-                               shared[shared_count++] = fobj->shared[i];
-                       }
-               }
-               if (!shared_count) {
-                       kfree(shared);
-                       shared = NULL;
-               }
-       }
-
-       excl = reservation_object_get_excl(bo->resv);
-       if (excl && !fence_is_signaled(excl))
-               fence_get(excl);
-       else
-               excl = NULL;
-
-       __ttm_bo_unreserve(bo);
-       spin_unlock(&glob->lru_lock);
-
-       if (excl) {
-               ret = fence_wait(excl, interruptible);
-               fence_put(excl);
-       }
-
-       if (shared_count > 0) {
-               for (i = 0; i < shared_count; ++i) {
-                       if (!ret)
-                               ret = fence_wait(shared[i], interruptible);
-                       fence_put(shared[i]);
-               }
-               kfree(shared);
-       }
-
-       return ret;
-}
-
 /**
  * function ttm_bo_cleanup_refs_and_unlock
  * If bo idle, remove from delayed- and lru lists, and unref.
@@ -550,9 +490,19 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
        ret = ttm_bo_wait(bo, false, false, true);
 
        if (ret && !no_wait_gpu) {
-               ret = ttm_bo_unreserve_and_wait(bo, interruptible);
-               if (ret)
-                       return ret;
+               long lret;
+               ww_mutex_unlock(&bo->resv->lock);
+               spin_unlock(&glob->lru_lock);
+
+               lret = reservation_object_wait_timeout_rcu(bo->resv,
+                                                          true,
+                                                          interruptible,
+                                                          30 * HZ);
+
+               if (lret < 0)
+                       return lret;
+               else if (lret == 0)
+                       return -EBUSY;
 
                spin_lock(&glob->lru_lock);
                ret = __ttm_bo_reserve(bo, false, true, false, NULL);