Merge tag 'drm-for-v4.15-part2' of git://people.freedesktop.org/~airlied/linux
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
index 6149a47fe63d5edbd4f2561bd5e500f3a4078746..e87eedcc0da9d5363d7742782281683d6bb842dd 100644 (file)
@@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 }
 
 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
-                               int alignment, u32 initial_domain,
-                               u64 flags, bool kernel,
-                               struct drm_gem_object **obj)
+                            int alignment, u32 initial_domain,
+                            u64 flags, bool kernel,
+                            struct reservation_object *resv,
+                            struct drm_gem_object **obj)
 {
-       struct amdgpu_bo *robj;
+       struct amdgpu_bo *bo;
        int r;
 
        *obj = NULL;
@@ -59,9 +60,14 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 
 retry:
        r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
-                            flags, NULL, NULL, 0, &robj);
+                            flags, NULL, resv, 0, &bo);
        if (r) {
                if (r != -ERESTARTSYS) {
+                       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+                               flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                               goto retry;
+                       }
+
                        if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
                                initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
                                goto retry;
@@ -71,7 +77,7 @@ retry:
                }
                return r;
        }
-       *obj = &robj->gem_base;
+       *obj = &bo->gem_base;
 
        return 0;
 }
@@ -112,7 +118,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_va *bo_va;
+       struct mm_struct *mm;
        int r;
+
+       mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
+       if (mm && mm != current->mm)
+               return -EPERM;
+
+       if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
+           abo->tbo.resv != vm->root.base.bo->tbo.resv)
+               return -EPERM;
+
        r = amdgpu_bo_reserve(abo, false);
        if (r)
                return r;
@@ -127,35 +143,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
        return 0;
 }
 
-static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
-{
-       /* if anything is swapped out don't swap it in here,
-          just abort and wait for the next CS */
-       if (!amdgpu_bo_gpu_accessible(bo))
-               return -ERESTARTSYS;
-
-       if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
-               return -ERESTARTSYS;
-
-       return 0;
-}
-
-static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
-                               struct amdgpu_vm *vm,
-                               struct list_head *list)
-{
-       struct ttm_validate_buffer *entry;
-
-       list_for_each_entry(entry, list, head) {
-               struct amdgpu_bo *bo =
-                       container_of(entry->bo, struct amdgpu_bo, tbo);
-               if (amdgpu_gem_vm_check(NULL, bo))
-                       return false;
-       }
-
-       return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
-}
-
 void amdgpu_gem_object_close(struct drm_gem_object *obj,
                             struct drm_file *file_priv)
 {
@@ -165,13 +152,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
        struct amdgpu_vm *vm = &fpriv->vm;
 
        struct amdgpu_bo_list_entry vm_pd;
-       struct list_head list;
+       struct list_head list, duplicates;
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
        int r;
 
        INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&duplicates);
 
        tv.bo = &bo->tbo;
        tv.shared = true;
@@ -179,7 +167,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
@@ -189,7 +177,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
        if (bo_va && --bo_va->ref_count == 0) {
                amdgpu_vm_bo_rmv(adev, bo_va);
 
-               if (amdgpu_gem_vm_ready(adev, vm, &list)) {
+               if (amdgpu_vm_ready(vm)) {
                        struct dma_fence *fence = NULL;
 
                        r = amdgpu_vm_clear_freed(adev, vm, &fence);
@@ -214,18 +202,24 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
        union drm_amdgpu_gem_create *args = data;
+       uint64_t flags = args->in.domain_flags;
        uint64_t size = args->in.bo_size;
+       struct reservation_object *resv = NULL;
        struct drm_gem_object *gobj;
        uint32_t handle;
-       bool kernel = false;
        int r;
 
        /* reject invalid gem flags */
-       if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
-                                     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-                                     AMDGPU_GEM_CREATE_CPU_GTT_USWC |
-                                     AMDGPU_GEM_CREATE_VRAM_CLEARED))
+       if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+                     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+                     AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+                     AMDGPU_GEM_CREATE_VRAM_CLEARED |
+                     AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
+                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+
                return -EINVAL;
 
        /* reject invalid gem domains */
@@ -240,7 +234,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
        /* create a gem object to contain this object in */
        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
-               kernel = true;
+               flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
                if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
                        size = size << AMDGPU_GDS_SHIFT;
                else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
@@ -252,10 +246,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
        }
        size = roundup(size, PAGE_SIZE);
 
+       if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+               r = amdgpu_bo_reserve(vm->root.base.bo, false);
+               if (r)
+                       return r;
+
+               resv = vm->root.base.bo->tbo.resv;
+       }
+
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
                                     (u32)(0xffffffff & args->in.domains),
-                                    args->in.domain_flags,
-                                    kernel, &gobj);
+                                    flags, false, resv, &gobj);
+       if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+               if (!r) {
+                       struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+                       abo->parent = amdgpu_bo_ref(vm->root.base.bo);
+               }
+               amdgpu_bo_unreserve(vm->root.base.bo);
+       }
        if (r)
                return r;
 
@@ -297,9 +306,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
        }
 
        /* create a gem object to contain this object in */
-       r = amdgpu_gem_object_create(adev, args->size, 0,
-                                    AMDGPU_GEM_DOMAIN_CPU, 0,
-                                    0, &gobj);
+       r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
+                                    0, 0, NULL, &gobj);
        if (r)
                return r;
 
@@ -317,12 +325,10 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
        }
 
        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
-               down_read(&current->mm->mmap_sem);
-
                r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
                                                 bo->tbo.ttm->pages);
                if (r)
-                       goto unlock_mmap_sem;
+                       goto release_object;
 
                r = amdgpu_bo_reserve(bo, true);
                if (r)
@@ -333,8 +339,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                amdgpu_bo_unreserve(bo);
                if (r)
                        goto free_pages;
-
-               up_read(&current->mm->mmap_sem);
        }
 
        r = drm_gem_handle_create(filp, gobj, &handle);
@@ -347,10 +351,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
        return 0;
 
 free_pages:
-       release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
-
-unlock_mmap_sem:
-       up_read(&current->mm->mmap_sem);
+       release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
 
 release_object:
        drm_gem_object_put_unlocked(gobj);
@@ -511,10 +512,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
                                    struct list_head *list,
                                    uint32_t operation)
 {
-       int r = -ERESTARTSYS;
+       int r;
 
-       if (!amdgpu_gem_vm_ready(adev, vm, list))
-               goto error;
+       if (!amdgpu_vm_ready(vm))
+               return;
 
        r = amdgpu_vm_update_directories(adev, vm);
        if (r)
@@ -551,15 +552,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        struct amdgpu_bo_list_entry vm_pd;
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
-       struct list_head list;
+       struct list_head list, duplicates;
        uint64_t va_flags;
        int r = 0;
 
        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
                dev_err(&dev->pdev->dev,
-                       "va_address 0x%lX is in reserved area 0x%X\n",
-                       (unsigned long)args->va_address,
-                       AMDGPU_VA_RESERVED_SIZE);
+                       "va_address 0x%LX is in reserved area 0x%LX\n",
+                       args->va_address, AMDGPU_VA_RESERVED_SIZE);
                return -EINVAL;
        }
 
@@ -580,13 +580,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                        args->operation);
                return -EINVAL;
        }
-       if ((args->operation == AMDGPU_VA_OP_MAP) ||
-           (args->operation == AMDGPU_VA_OP_REPLACE)) {
-               if (amdgpu_kms_vram_lost(adev, fpriv))
-                       return -ENODEV;
-       }
 
        INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&duplicates);
        if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
            !(args->flags & AMDGPU_VM_PAGE_PRT)) {
                gobj = drm_gem_object_lookup(filp, args->handle);
@@ -603,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 
        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
        if (r)
                goto error_unref;
 
@@ -669,6 +665,7 @@ error_unref:
 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *filp)
 {
+       struct amdgpu_device *adev = dev->dev_private;
        struct drm_amdgpu_gem_op *args = data;
        struct drm_gem_object *gobj;
        struct amdgpu_bo *robj;
@@ -716,6 +713,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
                if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
                        robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 
+               if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+                       amdgpu_vm_bo_invalidate(adev, robj, true);
+
                amdgpu_bo_unreserve(robj);
                break;
        default:
@@ -745,8 +745,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
        r = amdgpu_gem_object_create(adev, args->size, 0,
                                     AMDGPU_GEM_DOMAIN_VRAM,
                                     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                                    ttm_bo_type_device,
-                                    &gobj);
+                                    false, NULL, &gobj);
        if (r)
                return -ENOMEM;