drm/connector: Allow max possible encoders to attach to a connector
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index 93b2c5a48a7123e217e6752e83891755262b8ee7..2f11ebd95528452e1624ecbcbea65084a30730a8 100644 (file)
@@ -31,7 +31,7 @@
  */
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <drm/drmP.h>
+
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_cache.h>
 #include "amdgpu.h"
@@ -85,9 +85,9 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
 
        amdgpu_bo_kunmap(bo);
 
-       if (bo->gem_base.import_attach)
-               drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
-       drm_gem_object_release(&bo->gem_base);
+       if (bo->tbo.base.import_attach)
+               drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+       drm_gem_object_release(&bo->tbo.base);
        /* in case amdgpu_device_recover_vram got NULL of bo->parent */
        if (!list_empty(&bo->shadow_list)) {
                mutex_lock(&adev->shadow_list_lock);
@@ -454,7 +454,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
-       drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+       drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
        INIT_LIST_HEAD(&bo->shadow_list);
        bo->vm_bo = NULL;
        bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
@@ -495,7 +495,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 #endif
 
        bo->tbo.bdev = &adev->mman.bdev;
-       amdgpu_bo_placement_from_domain(bo, bp->domain);
+       if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
+                         AMDGPU_GEM_DOMAIN_GDS))
+               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+       else
+               amdgpu_bo_placement_from_domain(bo, bp->domain);
        if (bp->type == ttm_bo_type_kernel)
                bo->tbo.priority = 1;
 
@@ -517,7 +521,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
            bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
                struct dma_fence *fence;
 
-               r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+               r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
                if (unlikely(r))
                        goto fail_unreserve;
 
@@ -540,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 
 fail_unreserve:
        if (!bp->resv)
-               ww_mutex_unlock(&bo->tbo.resv->lock);
+               dma_resv_unlock(bo->tbo.base.resv);
        amdgpu_bo_unref(&bo);
        return r;
 }
@@ -561,7 +565,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
        bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
                AMDGPU_GEM_CREATE_SHADOW;
        bp.type = ttm_bo_type_kernel;
-       bp.resv = bo->tbo.resv;
+       bp.resv = bo->tbo.base.resv;
 
        r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
        if (!r) {
@@ -602,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 
        if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
                if (!bp->resv)
-                       WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+                       WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
                                                        NULL));
 
                r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
 
                if (!bp->resv)
-                       reservation_object_unlock((*bo_ptr)->tbo.resv);
+                       dma_resv_unlock((*bo_ptr)->tbo.base.resv);
 
                if (r)
                        amdgpu_bo_unref(bo_ptr);
@@ -705,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
                return 0;
        }
 
-       r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
+       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
                                                MAX_SCHEDULE_TIMEOUT);
        if (r < 0)
                return r;
@@ -975,6 +979,7 @@ static const char *amdgpu_vram_names[] = {
        "HBM",
        "DDR3",
        "DDR4",
+       "GDDR6",
 };
 
 /**
@@ -1082,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
  */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
-       lockdep_assert_held(&bo->tbo.resv->lock.base);
+       dma_resv_assert_held(bo->tbo.base.resv);
 
        if (tiling_flags)
                *tiling_flags = bo->tiling_flags;
@@ -1278,12 +1283,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                     bool shared)
 {
-       struct reservation_object *resv = bo->tbo.resv;
+       struct dma_resv *resv = bo->tbo.base.resv;
 
        if (shared)
-               reservation_object_add_shared_fence(resv, fence);
+               dma_resv_add_shared_fence(resv, fence);
        else
-               reservation_object_add_excl_fence(resv, fence);
+               dma_resv_add_excl_fence(resv, fence);
 }
 
 /**
@@ -1303,7 +1308,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
        int r;
 
        amdgpu_sync_create(&sync);
-       amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
+       amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
        r = amdgpu_sync_wait(&sync, intr);
        amdgpu_sync_free(&sync);
 
@@ -1323,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-       WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+       WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
                     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&