Merge tag 'amd-drm-next-5.19-2022-05-18' of https://gitlab.freedesktop.org/agd5f...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index 9596c22fded6c023e384eedaac291ebb8c975763..2ceeaa4c793aec1243386b81c6fd93b08058ec8f 100644 (file)
@@ -289,7 +289,7 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 
        dma_resv_assert_held(vm->root.bo->tbo.base.resv);
 
-       vm->bulk_moveable = false;
+       ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
        if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
                amdgpu_vm_bo_relocated(base);
        else
@@ -329,36 +329,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
        list_add(&entry->tv.head, validated);
 }
 
-/**
- * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
- *
- * @bo: BO which was removed from the LRU
- *
- * Make sure the bulk_moveable flag is updated when a BO is removed from the
- * LRU.
- */
-void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
-{
-       struct amdgpu_bo *abo;
-       struct amdgpu_vm_bo_base *bo_base;
-
-       if (!amdgpu_bo_is_amdgpu_bo(bo))
-               return;
-
-       if (bo->pin_count)
-               return;
-
-       abo = ttm_to_amdgpu_bo(bo);
-       if (!abo->parent)
-               return;
-       for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
-               struct amdgpu_vm *vm = bo_base->vm;
-
-               if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv)
-                       vm->bulk_moveable = false;
-       }
-
-}
 /**
  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
  *
@@ -371,35 +341,9 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
                                struct amdgpu_vm *vm)
 {
-       struct amdgpu_vm_bo_base *bo_base;
-
-       if (vm->bulk_moveable) {
-               spin_lock(&adev->mman.bdev.lru_lock);
-               ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
-               spin_unlock(&adev->mman.bdev.lru_lock);
-               return;
-       }
-
-       memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
-
        spin_lock(&adev->mman.bdev.lru_lock);
-       list_for_each_entry(bo_base, &vm->idle, vm_status) {
-               struct amdgpu_bo *bo = bo_base->bo;
-               struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
-
-               if (!bo->parent)
-                       continue;
-
-               ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
-                                       &vm->lru_bulk_move);
-               if (shadow)
-                       ttm_bo_move_to_lru_tail(&shadow->tbo,
-                                               shadow->tbo.resource,
-                                               &vm->lru_bulk_move);
-       }
+       ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
        spin_unlock(&adev->mman.bdev.lru_lock);
-
-       vm->bulk_moveable = true;
 }
 
 /**
@@ -422,8 +366,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        struct amdgpu_vm_bo_base *bo_base, *tmp;
        int r;
 
-       vm->bulk_moveable &= list_empty(&vm->evicted);
-
        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
                struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
@@ -1248,7 +1190,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        struct dma_resv_iter cursor;
        struct dma_fence *fence;
 
-       dma_resv_for_each_fence(&cursor, resv, true, fence) {
+       dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
                /* Add a callback for each fence in the reservation object */
                amdgpu_vm_prt_get(adev);
                amdgpu_vm_add_prt_cb(adev, fence);
@@ -1800,7 +1742,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
        if (bo) {
                dma_resv_assert_held(bo->tbo.base.resv);
                if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
-                       vm->bulk_moveable = false;
+                       ttm_bo_set_bulk_move(&bo->tbo, NULL);
 
                for (base = &bo_va->base.bo->vm_bo; *base;
                     base = &(*base)->next) {
@@ -1854,7 +1796,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
                return true;
 
        /* Don't evict VM page tables while they are busy */
-       if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
+       if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
                return false;
 
        /* Try to block ongoing updates */
@@ -2034,7 +1976,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-       timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
+       timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
+                                       DMA_RESV_USAGE_BOOKKEEP,
                                        true, timeout);
        if (timeout <= 0)
                return timeout;
@@ -2116,7 +2059,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        if (r)
                goto error_free_root;
 
-       r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
+       r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
        if (r)
                goto error_unreserve;
 
@@ -2538,7 +2481,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                value = 0;
        }
 
-       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+       r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
        if (r) {
                pr_debug("failed %d to reserve fence slot\n", r);
                goto error_unlock;