drm/amdgpu: add new device to use atpx quirk
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
index a4bf21f8f1c187e9c7f45bfc4c30171760647eea..df65c66dc956f7500810b5e3e42080650a484193 100644 (file)
@@ -35,6 +35,7 @@
 struct amdgpu_sync_entry {
        struct hlist_node       node;
        struct dma_fence        *fence;
+       bool    explicit;
 };
 
 static struct kmem_cache *amdgpu_sync_slab;
@@ -63,7 +64,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
                                 struct dma_fence *f)
 {
-       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+       struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
 
        if (s_fence) {
                struct amdgpu_ring *ring;
@@ -84,7 +85,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
  */
 static void *amdgpu_sync_get_owner(struct dma_fence *f)
 {
-       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+       struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
 
        if (s_fence)
                return s_fence->owner;
@@ -119,7 +120,7 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
  * Tries to add the fence to an existing hash entry. Returns true when an entry
  * was found, false otherwise.
  */
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
 {
        struct amdgpu_sync_entry *e;
 
@@ -128,6 +129,10 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
                        continue;
 
                amdgpu_sync_keep_later(&e->fence, f);
+
+               /* Preserve eplicit flag to not loose pipe line sync */
+               e->explicit |= explicit;
+
                return true;
        }
        return false;
@@ -141,24 +146,25 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
  *
  */
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-                     struct dma_fence *f)
+                     struct dma_fence *f, bool explicit)
 {
        struct amdgpu_sync_entry *e;
 
        if (!f)
                return 0;
-
        if (amdgpu_sync_same_dev(adev, f) &&
            amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
                amdgpu_sync_keep_later(&sync->last_vm_update, f);
 
-       if (amdgpu_sync_add_later(sync, f))
+       if (amdgpu_sync_add_later(sync, f, explicit))
                return 0;
 
        e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
        if (!e)
                return -ENOMEM;
 
+       e->explicit = explicit;
+
        hash_add(sync->fences, &e->node, f->context);
        e->fence = dma_fence_get(f);
        return 0;
@@ -189,10 +195,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 
        /* always sync to the exclusive fence */
        f = reservation_object_get_excl(resv);
-       r = amdgpu_sync_fence(adev, sync, f);
-
-       if (explicit_sync)
-               return r;
+       r = amdgpu_sync_fence(adev, sync, f, false);
 
        flist = reservation_object_get_list(resv);
        if (!flist || r)
@@ -212,15 +215,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                             (fence_owner == AMDGPU_FENCE_OWNER_VM)))
                                continue;
 
-                       /* Ignore fence from the same owner as
+                       /* Ignore fence from the same owner and explicit one as
                         * long as it isn't undefined.
                         */
                        if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-                           fence_owner == owner)
+                           (fence_owner == owner || explicit_sync))
                                continue;
                }
 
-               r = amdgpu_sync_fence(adev, sync, f);
+               r = amdgpu_sync_fence(adev, sync, f, false);
                if (r)
                        break;
        }
@@ -245,7 +248,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 
        hash_for_each_safe(sync->fences, i, tmp, e, node) {
                struct dma_fence *f = e->fence;
-               struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+               struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
 
                if (dma_fence_is_signaled(f)) {
                        hash_del(&e->node);
@@ -275,19 +278,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
  * amdgpu_sync_get_fence - get the next fence from the sync object
  *
  * @sync: sync object to use
+ * @explicit: true if the next fence is explicit
  *
  * Get and removes the next fence from the sync object not signaled yet.
  */
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
 {
        struct amdgpu_sync_entry *e;
        struct hlist_node *tmp;
        struct dma_fence *f;
        int i;
-
        hash_for_each_safe(sync->fences, i, tmp, e, node) {
 
                f = e->fence;
+               if (explicit)
+                       *explicit = e->explicit;
 
                hash_del(&e->node);
                kmem_cache_free(amdgpu_sync_slab, e);