drm/ttm: add ttm_bo_reserve_slowpath
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 15 Jan 2013 13:57:05 +0000 (14:57 +0100)
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 15 Jan 2013 13:57:05 +0000 (14:57 +0100)
Instead of dropping everything, waiting for the bo to be unreserved
and trying over, a better strategy would be to do a blocking wait.

This can be mapped a lot better to a mutex_lock-like call.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
drivers/gpu/drm/ttm/ttm_bo.c
include/drm/ttm/ttm_bo_driver.h

index e8e4814b12957b26bf5e08f3ad51814d3c192fa7..4dd6f9e77a7d213f98fa2ef0569270e0baf19b50 100644 (file)
@@ -310,6 +310,53 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
        return ret;
 }
 
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+                                 bool interruptible, uint32_t sequence)
+{
+       bool wake_up = false;
+       int ret;
+
+       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+               WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+
+               ret = ttm_bo_wait_unreserved(bo, interruptible);
+
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+               wake_up = true;
+
+       /**
+        * Wake up waiters that may need to recheck for deadlock,
+        * if we decreased the sequence number.
+        */
+       bo->val_seq = sequence;
+       bo->seq_valid = true;
+       if (wake_up)
+               wake_up_all(&bo->event_queue);
+
+       return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+                           bool interruptible, uint32_t sequence)
+{
+       struct ttm_bo_global *glob = bo->glob;
+       int put_count, ret;
+
+       ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+       if (likely(!ret)) {
+               spin_lock(&glob->lru_lock);
+               put_count = ttm_bo_del_from_lru(bo);
+               spin_unlock(&glob->lru_lock);
+               ttm_bo_list_ref_sub(bo, put_count, true);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
+
 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
 {
        ttm_bo_add_to_lru(bo);
index 6fff43222e20d76f8794d6f8a1bfe00af872b36a..5af71af6bf88a1ba3ca5bba159fedc0a21d087bf 100644 (file)
@@ -821,6 +821,36 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
                          bool interruptible,
                          bool no_wait, bool use_sequence, uint32_t sequence);
 
+/**
+ * ttm_bo_reserve_slowpath_nolru:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ *
+ * Will not remove reserved buffers from the lru lists.
+ * Otherwise identical to ttm_bo_reserve_slowpath.
+ */
+extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+                                        bool interruptible,
+                                        uint32_t sequence);
+
+
+/**
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+                                  bool interruptible, uint32_t sequence);
 
 /**
  * ttm_bo_reserve_nolru: