Merge tag 'rproc-v5.1' of git://github.com/andersson/remoteproc
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <linux/dma-buf.h>
29 #include <drm/drmP.h>
30 #include "amdgpu_object.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_amdkfd.h"
33
34 /* Special VM and GART address alignment needed for VI pre-Fiji due to
35  * a HW bug.
36  */
37 #define VI_BO_SIZE_ALIGN (0x8000)
38
39 /* BO flag to indicate a KFD userptr BO */
40 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
41
42 /* Userptr restore delay, just long enough to allow consecutive VM
43  * changes to accumulate
44  */
45 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
46
47 /* Impose limit on how much memory KFD can use */
48 static struct {
49         uint64_t max_system_mem_limit;
50         uint64_t max_ttm_mem_limit;
51         int64_t system_mem_used;
52         int64_t ttm_mem_used;
53         spinlock_t mem_limit_lock;
54 } kfd_mem_limit;
55
56 /* Struct used for amdgpu_amdkfd_bo_validate */
57 struct amdgpu_vm_parser {
58         uint32_t        domain;
59         bool            wait;
60 };
61
62 static const char * const domain_bit_to_string[] = {
63                 "CPU",
64                 "GTT",
65                 "VRAM",
66                 "GDS",
67                 "GWS",
68                 "OA"
69 };
70
71 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
72
73 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
74
75
76 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
77 {
78         return (struct amdgpu_device *)kgd;
79 }
80
81 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
82                 struct kgd_mem *mem)
83 {
84         struct kfd_bo_va_list *entry;
85
86         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
87                 if (entry->bo_va->base.vm == avm)
88                         return false;
89
90         return true;
91 }
92
93 /* Set memory usage limits. Current, limits are
94  *  System (TTM + userptr) memory - 3/4th System RAM
95  *  TTM memory - 3/8th System RAM
96  */
97 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
98 {
99         struct sysinfo si;
100         uint64_t mem;
101
102         si_meminfo(&si);
103         mem = si.totalram - si.totalhigh;
104         mem *= si.mem_unit;
105
106         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
107         kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
108         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
109         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
110                 (kfd_mem_limit.max_system_mem_limit >> 20),
111                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
112 }
113
114 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
115                 uint64_t size, u32 domain, bool sg)
116 {
117         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
118         uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
119         int ret = 0;
120
121         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
122                                        sizeof(struct amdgpu_bo));
123
124         vram_needed = 0;
125         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
126                 /* TTM GTT memory */
127                 system_mem_needed = acc_size + size;
128                 ttm_mem_needed = acc_size + size;
129         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
130                 /* Userptr */
131                 system_mem_needed = acc_size + size;
132                 ttm_mem_needed = acc_size;
133         } else {
134                 /* VRAM and SG */
135                 system_mem_needed = acc_size;
136                 ttm_mem_needed = acc_size;
137                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
138                         vram_needed = size;
139         }
140
141         spin_lock(&kfd_mem_limit.mem_limit_lock);
142
143         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
144              kfd_mem_limit.max_system_mem_limit) ||
145             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
146              kfd_mem_limit.max_ttm_mem_limit) ||
147             (adev->kfd.vram_used + vram_needed >
148              adev->gmc.real_vram_size - reserved_for_pt)) {
149                 ret = -ENOMEM;
150         } else {
151                 kfd_mem_limit.system_mem_used += system_mem_needed;
152                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
153                 adev->kfd.vram_used += vram_needed;
154         }
155
156         spin_unlock(&kfd_mem_limit.mem_limit_lock);
157         return ret;
158 }
159
160 static void unreserve_mem_limit(struct amdgpu_device *adev,
161                 uint64_t size, u32 domain, bool sg)
162 {
163         size_t acc_size;
164
165         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
166                                        sizeof(struct amdgpu_bo));
167
168         spin_lock(&kfd_mem_limit.mem_limit_lock);
169         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
170                 kfd_mem_limit.system_mem_used -= (acc_size + size);
171                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
172         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
173                 kfd_mem_limit.system_mem_used -= (acc_size + size);
174                 kfd_mem_limit.ttm_mem_used -= acc_size;
175         } else {
176                 kfd_mem_limit.system_mem_used -= acc_size;
177                 kfd_mem_limit.ttm_mem_used -= acc_size;
178                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
179                         adev->kfd.vram_used -= size;
180                         WARN_ONCE(adev->kfd.vram_used < 0,
181                                   "kfd VRAM memory accounting unbalanced");
182                 }
183         }
184         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
185                   "kfd system memory accounting unbalanced");
186         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
187                   "kfd TTM memory accounting unbalanced");
188
189         spin_unlock(&kfd_mem_limit.mem_limit_lock);
190 }
191
192 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
193 {
194         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
195         u32 domain = bo->preferred_domains;
196         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
197
198         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
199                 domain = AMDGPU_GEM_DOMAIN_CPU;
200                 sg = false;
201         }
202
203         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
204 }
205
206
207 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
208  *  reservation object.
209  *
210  * @bo: [IN] Remove eviction fence(s) from this BO
211  * @ef: [IN] This eviction fence is removed if it
212  *  is present in the shared list.
213  *
214  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
215  */
216 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
217                                         struct amdgpu_amdkfd_fence *ef)
218 {
219         struct reservation_object *resv = bo->tbo.resv;
220         struct reservation_object_list *old, *new;
221         unsigned int i, j, k;
222
223         if (!ef)
224                 return -EINVAL;
225
226         old = reservation_object_get_list(resv);
227         if (!old)
228                 return 0;
229
230         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
231                       GFP_KERNEL);
232         if (!new)
233                 return -ENOMEM;
234
235         /* Go through all the shared fences in the resevation object and sort
236          * the interesting ones to the end of the list.
237          */
238         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
239                 struct dma_fence *f;
240
241                 f = rcu_dereference_protected(old->shared[i],
242                                               reservation_object_held(resv));
243
244                 if (f->context == ef->base.context)
245                         RCU_INIT_POINTER(new->shared[--j], f);
246                 else
247                         RCU_INIT_POINTER(new->shared[k++], f);
248         }
249         new->shared_max = old->shared_max;
250         new->shared_count = k;
251
252         /* Install the new fence list, seqcount provides the barriers */
253         preempt_disable();
254         write_seqcount_begin(&resv->seq);
255         RCU_INIT_POINTER(resv->fence, new);
256         write_seqcount_end(&resv->seq);
257         preempt_enable();
258
259         /* Drop the references to the removed fences or move them to ef_list */
260         for (i = j, k = 0; i < old->shared_count; ++i) {
261                 struct dma_fence *f;
262
263                 f = rcu_dereference_protected(new->shared[i],
264                                               reservation_object_held(resv));
265                 dma_fence_put(f);
266         }
267         kfree_rcu(old, rcu);
268
269         return 0;
270 }
271
272 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
273                                      bool wait)
274 {
275         struct ttm_operation_ctx ctx = { false, false };
276         int ret;
277
278         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
279                  "Called with userptr BO"))
280                 return -EINVAL;
281
282         amdgpu_bo_placement_from_domain(bo, domain);
283
284         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
285         if (ret)
286                 goto validate_fail;
287         if (wait)
288                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
289
290 validate_fail:
291         return ret;
292 }
293
294 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
295 {
296         struct amdgpu_vm_parser *p = param;
297
298         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
299 }
300
301 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
302  *
303  * Page directories are not updated here because huge page handling
304  * during page table updates can invalidate page directory entries
305  * again. Page directories are only updated after updating page
306  * tables.
307  */
308 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
309 {
310         struct amdgpu_bo *pd = vm->root.base.bo;
311         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
312         struct amdgpu_vm_parser param;
313         int ret;
314
315         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
316         param.wait = false;
317
318         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
319                                         &param);
320         if (ret) {
321                 pr_err("amdgpu: failed to validate PT BOs\n");
322                 return ret;
323         }
324
325         ret = amdgpu_amdkfd_validate(&param, pd);
326         if (ret) {
327                 pr_err("amdgpu: failed to validate PD\n");
328                 return ret;
329         }
330
331         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
332
333         if (vm->use_cpu_for_update) {
334                 ret = amdgpu_bo_kmap(pd, NULL);
335                 if (ret) {
336                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
337                         return ret;
338                 }
339         }
340
341         return 0;
342 }
343
344 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
345 {
346         struct amdgpu_bo *pd = vm->root.base.bo;
347         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
348         int ret;
349
350         ret = amdgpu_vm_update_directories(adev, vm);
351         if (ret)
352                 return ret;
353
354         return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
355 }
356
357 /* add_bo_to_vm - Add a BO to a VM
358  *
359  * Everything that needs to bo done only once when a BO is first added
360  * to a VM. It can later be mapped and unmapped many times without
361  * repeating these steps.
362  *
363  * 1. Allocate and initialize BO VA entry data structure
364  * 2. Add BO to the VM
365  * 3. Determine ASIC-specific PTE flags
366  * 4. Alloc page tables and directories if needed
367  * 4a.  Validate new page tables and directories
368  */
369 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
370                 struct amdgpu_vm *vm, bool is_aql,
371                 struct kfd_bo_va_list **p_bo_va_entry)
372 {
373         int ret;
374         struct kfd_bo_va_list *bo_va_entry;
375         struct amdgpu_bo *bo = mem->bo;
376         uint64_t va = mem->va;
377         struct list_head *list_bo_va = &mem->bo_va_list;
378         unsigned long bo_size = bo->tbo.mem.size;
379
380         if (!va) {
381                 pr_err("Invalid VA when adding BO to VM\n");
382                 return -EINVAL;
383         }
384
385         if (is_aql)
386                 va += bo_size;
387
388         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
389         if (!bo_va_entry)
390                 return -ENOMEM;
391
392         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
393                         va + bo_size, vm);
394
395         /* Add BO to VM internal data structures*/
396         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
397         if (!bo_va_entry->bo_va) {
398                 ret = -EINVAL;
399                 pr_err("Failed to add BO object to VM. ret == %d\n",
400                                 ret);
401                 goto err_vmadd;
402         }
403
404         bo_va_entry->va = va;
405         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
406                                                          mem->mapping_flags);
407         bo_va_entry->kgd_dev = (void *)adev;
408         list_add(&bo_va_entry->bo_list, list_bo_va);
409
410         if (p_bo_va_entry)
411                 *p_bo_va_entry = bo_va_entry;
412
413         /* Allocate new page tables if needed and validate
414          * them.
415          */
416         ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
417         if (ret) {
418                 pr_err("Failed to allocate pts, err=%d\n", ret);
419                 goto err_alloc_pts;
420         }
421
422         ret = vm_validate_pt_pd_bos(vm);
423         if (ret) {
424                 pr_err("validate_pt_pd_bos() failed\n");
425                 goto err_alloc_pts;
426         }
427
428         return 0;
429
430 err_alloc_pts:
431         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
432         list_del(&bo_va_entry->bo_list);
433 err_vmadd:
434         kfree(bo_va_entry);
435         return ret;
436 }
437
438 static void remove_bo_from_vm(struct amdgpu_device *adev,
439                 struct kfd_bo_va_list *entry, unsigned long size)
440 {
441         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
442                         entry->va,
443                         entry->va + size, entry);
444         amdgpu_vm_bo_rmv(adev, entry->bo_va);
445         list_del(&entry->bo_list);
446         kfree(entry);
447 }
448
449 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
450                                 struct amdkfd_process_info *process_info,
451                                 bool userptr)
452 {
453         struct ttm_validate_buffer *entry = &mem->validate_list;
454         struct amdgpu_bo *bo = mem->bo;
455
456         INIT_LIST_HEAD(&entry->head);
457         entry->num_shared = 1;
458         entry->bo = &bo->tbo;
459         mutex_lock(&process_info->lock);
460         if (userptr)
461                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
462         else
463                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
464         mutex_unlock(&process_info->lock);
465 }
466
467 /* Initializes user pages. It registers the MMU notifier and validates
468  * the userptr BO in the GTT domain.
469  *
470  * The BO must already be on the userptr_valid_list. Otherwise an
471  * eviction and restore may happen that leaves the new BO unmapped
472  * with the user mode queues running.
473  *
474  * Takes the process_info->lock to protect against concurrent restore
475  * workers.
476  *
477  * Returns 0 for success, negative errno for errors.
478  */
479 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
480                            uint64_t user_addr)
481 {
482         struct amdkfd_process_info *process_info = mem->process_info;
483         struct amdgpu_bo *bo = mem->bo;
484         struct ttm_operation_ctx ctx = { true, false };
485         int ret = 0;
486
487         mutex_lock(&process_info->lock);
488
489         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
490         if (ret) {
491                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
492                 goto out;
493         }
494
495         ret = amdgpu_mn_register(bo, user_addr);
496         if (ret) {
497                 pr_err("%s: Failed to register MMU notifier: %d\n",
498                        __func__, ret);
499                 goto out;
500         }
501
502         /* If no restore worker is running concurrently, user_pages
503          * should not be allocated
504          */
505         WARN(mem->user_pages, "Leaking user_pages array");
506
507         mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
508                                            sizeof(struct page *),
509                                            GFP_KERNEL | __GFP_ZERO);
510         if (!mem->user_pages) {
511                 pr_err("%s: Failed to allocate pages array\n", __func__);
512                 ret = -ENOMEM;
513                 goto unregister_out;
514         }
515
516         ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
517         if (ret) {
518                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
519                 goto free_out;
520         }
521
522         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
523
524         ret = amdgpu_bo_reserve(bo, true);
525         if (ret) {
526                 pr_err("%s: Failed to reserve BO\n", __func__);
527                 goto release_out;
528         }
529         amdgpu_bo_placement_from_domain(bo, mem->domain);
530         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
531         if (ret)
532                 pr_err("%s: failed to validate BO\n", __func__);
533         amdgpu_bo_unreserve(bo);
534
535 release_out:
536         if (ret)
537                 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
538 free_out:
539         kvfree(mem->user_pages);
540         mem->user_pages = NULL;
541 unregister_out:
542         if (ret)
543                 amdgpu_mn_unregister(bo);
544 out:
545         mutex_unlock(&process_info->lock);
546         return ret;
547 }
548
549 /* Reserving a BO and its page table BOs must happen atomically to
550  * avoid deadlocks. Some operations update multiple VMs at once. Track
551  * all the reservation info in a context structure. Optionally a sync
552  * object can track VM updates.
553  */
554 struct bo_vm_reservation_context {
555         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
556         unsigned int n_vms;                 /* Number of VMs reserved       */
557         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
558         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
559         struct list_head list, duplicates;  /* BO lists                     */
560         struct amdgpu_sync *sync;           /* Pointer to sync object       */
561         bool reserved;                      /* Whether BOs are reserved     */
562 };
563
564 enum bo_vm_match {
565         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
566         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
567         BO_VM_ALL,              /* Match all VMs a BO was added to    */
568 };
569
570 /**
571  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
572  * @mem: KFD BO structure.
573  * @vm: the VM to reserve.
574  * @ctx: the struct that will be used in unreserve_bo_and_vms().
575  */
576 static int reserve_bo_and_vm(struct kgd_mem *mem,
577                               struct amdgpu_vm *vm,
578                               struct bo_vm_reservation_context *ctx)
579 {
580         struct amdgpu_bo *bo = mem->bo;
581         int ret;
582
583         WARN_ON(!vm);
584
585         ctx->reserved = false;
586         ctx->n_vms = 1;
587         ctx->sync = &mem->sync;
588
589         INIT_LIST_HEAD(&ctx->list);
590         INIT_LIST_HEAD(&ctx->duplicates);
591
592         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
593         if (!ctx->vm_pd)
594                 return -ENOMEM;
595
596         ctx->kfd_bo.priority = 0;
597         ctx->kfd_bo.tv.bo = &bo->tbo;
598         ctx->kfd_bo.tv.num_shared = 1;
599         ctx->kfd_bo.user_pages = NULL;
600         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
601
602         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
603
604         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
605                                      false, &ctx->duplicates);
606         if (!ret)
607                 ctx->reserved = true;
608         else {
609                 pr_err("Failed to reserve buffers in ttm\n");
610                 kfree(ctx->vm_pd);
611                 ctx->vm_pd = NULL;
612         }
613
614         return ret;
615 }
616
617 /**
618  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
619  * @mem: KFD BO structure.
620  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
621  * is used. Otherwise, a single VM associated with the BO.
622  * @map_type: the mapping status that will be used to filter the VMs.
623  * @ctx: the struct that will be used in unreserve_bo_and_vms().
624  *
625  * Returns 0 for success, negative for failure.
626  */
627 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
628                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
629                                 struct bo_vm_reservation_context *ctx)
630 {
631         struct amdgpu_bo *bo = mem->bo;
632         struct kfd_bo_va_list *entry;
633         unsigned int i;
634         int ret;
635
636         ctx->reserved = false;
637         ctx->n_vms = 0;
638         ctx->vm_pd = NULL;
639         ctx->sync = &mem->sync;
640
641         INIT_LIST_HEAD(&ctx->list);
642         INIT_LIST_HEAD(&ctx->duplicates);
643
644         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
645                 if ((vm && vm != entry->bo_va->base.vm) ||
646                         (entry->is_mapped != map_type
647                         && map_type != BO_VM_ALL))
648                         continue;
649
650                 ctx->n_vms++;
651         }
652
653         if (ctx->n_vms != 0) {
654                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
655                                      GFP_KERNEL);
656                 if (!ctx->vm_pd)
657                         return -ENOMEM;
658         }
659
660         ctx->kfd_bo.priority = 0;
661         ctx->kfd_bo.tv.bo = &bo->tbo;
662         ctx->kfd_bo.tv.num_shared = 1;
663         ctx->kfd_bo.user_pages = NULL;
664         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
665
666         i = 0;
667         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
668                 if ((vm && vm != entry->bo_va->base.vm) ||
669                         (entry->is_mapped != map_type
670                         && map_type != BO_VM_ALL))
671                         continue;
672
673                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
674                                 &ctx->vm_pd[i]);
675                 i++;
676         }
677
678         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
679                                      false, &ctx->duplicates);
680         if (!ret)
681                 ctx->reserved = true;
682         else
683                 pr_err("Failed to reserve buffers in ttm.\n");
684
685         if (ret) {
686                 kfree(ctx->vm_pd);
687                 ctx->vm_pd = NULL;
688         }
689
690         return ret;
691 }
692
693 /**
694  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
695  * @ctx: Reservation context to unreserve
696  * @wait: Optionally wait for a sync object representing pending VM updates
697  * @intr: Whether the wait is interruptible
698  *
699  * Also frees any resources allocated in
700  * reserve_bo_and_(cond_)vm(s). Returns the status from
701  * amdgpu_sync_wait.
702  */
703 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
704                                  bool wait, bool intr)
705 {
706         int ret = 0;
707
708         if (wait)
709                 ret = amdgpu_sync_wait(ctx->sync, intr);
710
711         if (ctx->reserved)
712                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
713         kfree(ctx->vm_pd);
714
715         ctx->sync = NULL;
716
717         ctx->reserved = false;
718         ctx->vm_pd = NULL;
719
720         return ret;
721 }
722
723 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
724                                 struct kfd_bo_va_list *entry,
725                                 struct amdgpu_sync *sync)
726 {
727         struct amdgpu_bo_va *bo_va = entry->bo_va;
728         struct amdgpu_vm *vm = bo_va->base.vm;
729
730         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
731
732         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
733
734         amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
735
736         return 0;
737 }
738
739 static int update_gpuvm_pte(struct amdgpu_device *adev,
740                 struct kfd_bo_va_list *entry,
741                 struct amdgpu_sync *sync)
742 {
743         int ret;
744         struct amdgpu_vm *vm;
745         struct amdgpu_bo_va *bo_va;
746         struct amdgpu_bo *bo;
747
748         bo_va = entry->bo_va;
749         vm = bo_va->base.vm;
750         bo = bo_va->base.bo;
751
752         /* Update the page tables  */
753         ret = amdgpu_vm_bo_update(adev, bo_va, false);
754         if (ret) {
755                 pr_err("amdgpu_vm_bo_update failed\n");
756                 return ret;
757         }
758
759         return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
760 }
761
762 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
763                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
764                 bool no_update_pte)
765 {
766         int ret;
767
768         /* Set virtual address for the allocation */
769         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
770                                amdgpu_bo_size(entry->bo_va->base.bo),
771                                entry->pte_flags);
772         if (ret) {
773                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
774                                 entry->va, ret);
775                 return ret;
776         }
777
778         if (no_update_pte)
779                 return 0;
780
781         ret = update_gpuvm_pte(adev, entry, sync);
782         if (ret) {
783                 pr_err("update_gpuvm_pte() failed\n");
784                 goto update_gpuvm_pte_failed;
785         }
786
787         return 0;
788
789 update_gpuvm_pte_failed:
790         unmap_bo_from_gpuvm(adev, entry, sync);
791         return ret;
792 }
793
794 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
795 {
796         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
797
798         if (!sg)
799                 return NULL;
800         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
801                 kfree(sg);
802                 return NULL;
803         }
804         sg->sgl->dma_address = addr;
805         sg->sgl->length = size;
806 #ifdef CONFIG_NEED_SG_DMA_LENGTH
807         sg->sgl->dma_length = size;
808 #endif
809         return sg;
810 }
811
812 static int process_validate_vms(struct amdkfd_process_info *process_info)
813 {
814         struct amdgpu_vm *peer_vm;
815         int ret;
816
817         list_for_each_entry(peer_vm, &process_info->vm_list_head,
818                             vm_list_node) {
819                 ret = vm_validate_pt_pd_bos(peer_vm);
820                 if (ret)
821                         return ret;
822         }
823
824         return 0;
825 }
826
827 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
828                                  struct amdgpu_sync *sync)
829 {
830         struct amdgpu_vm *peer_vm;
831         int ret;
832
833         list_for_each_entry(peer_vm, &process_info->vm_list_head,
834                             vm_list_node) {
835                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
836
837                 ret = amdgpu_sync_resv(NULL,
838                                         sync, pd->tbo.resv,
839                                         AMDGPU_FENCE_OWNER_UNDEFINED, false);
840                 if (ret)
841                         return ret;
842         }
843
844         return 0;
845 }
846
847 static int process_update_pds(struct amdkfd_process_info *process_info,
848                               struct amdgpu_sync *sync)
849 {
850         struct amdgpu_vm *peer_vm;
851         int ret;
852
853         list_for_each_entry(peer_vm, &process_info->vm_list_head,
854                             vm_list_node) {
855                 ret = vm_update_pds(peer_vm, sync);
856                 if (ret)
857                         return ret;
858         }
859
860         return 0;
861 }
862
863 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
864                        struct dma_fence **ef)
865 {
866         struct amdkfd_process_info *info = NULL;
867         int ret;
868
869         if (!*process_info) {
870                 info = kzalloc(sizeof(*info), GFP_KERNEL);
871                 if (!info)
872                         return -ENOMEM;
873
874                 mutex_init(&info->lock);
875                 INIT_LIST_HEAD(&info->vm_list_head);
876                 INIT_LIST_HEAD(&info->kfd_bo_list);
877                 INIT_LIST_HEAD(&info->userptr_valid_list);
878                 INIT_LIST_HEAD(&info->userptr_inval_list);
879
880                 info->eviction_fence =
881                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
882                                                    current->mm);
883                 if (!info->eviction_fence) {
884                         pr_err("Failed to create eviction fence\n");
885                         ret = -ENOMEM;
886                         goto create_evict_fence_fail;
887                 }
888
889                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
890                 atomic_set(&info->evicted_bos, 0);
891                 INIT_DELAYED_WORK(&info->restore_userptr_work,
892                                   amdgpu_amdkfd_restore_userptr_worker);
893
894                 *process_info = info;
895                 *ef = dma_fence_get(&info->eviction_fence->base);
896         }
897
898         vm->process_info = *process_info;
899
900         /* Validate page directory and attach eviction fence */
901         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
902         if (ret)
903                 goto reserve_pd_fail;
904         ret = vm_validate_pt_pd_bos(vm);
905         if (ret) {
906                 pr_err("validate_pt_pd_bos() failed\n");
907                 goto validate_pd_fail;
908         }
909         amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
910         if (ret)
911                 goto wait_pd_fail;
912         amdgpu_bo_fence(vm->root.base.bo,
913                         &vm->process_info->eviction_fence->base, true);
914         amdgpu_bo_unreserve(vm->root.base.bo);
915
916         /* Update process info */
917         mutex_lock(&vm->process_info->lock);
918         list_add_tail(&vm->vm_list_node,
919                         &(vm->process_info->vm_list_head));
920         vm->process_info->n_vms++;
921         mutex_unlock(&vm->process_info->lock);
922
923         return 0;
924
925 wait_pd_fail:
926 validate_pd_fail:
927         amdgpu_bo_unreserve(vm->root.base.bo);
928 reserve_pd_fail:
929         vm->process_info = NULL;
930         if (info) {
931                 /* Two fence references: one in info and one in *ef */
932                 dma_fence_put(&info->eviction_fence->base);
933                 dma_fence_put(*ef);
934                 *ef = NULL;
935                 *process_info = NULL;
936                 put_pid(info->pid);
937 create_evict_fence_fail:
938                 mutex_destroy(&info->lock);
939                 kfree(info);
940         }
941         return ret;
942 }
943
944 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
945                                           void **vm, void **process_info,
946                                           struct dma_fence **ef)
947 {
948         struct amdgpu_device *adev = get_amdgpu_device(kgd);
949         struct amdgpu_vm *new_vm;
950         int ret;
951
952         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
953         if (!new_vm)
954                 return -ENOMEM;
955
956         /* Initialize AMDGPU part of the VM */
957         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
958         if (ret) {
959                 pr_err("Failed init vm ret %d\n", ret);
960                 goto amdgpu_vm_init_fail;
961         }
962
963         /* Initialize KFD part of the VM and process info */
964         ret = init_kfd_vm(new_vm, process_info, ef);
965         if (ret)
966                 goto init_kfd_vm_fail;
967
968         *vm = (void *) new_vm;
969
970         return 0;
971
972 init_kfd_vm_fail:
973         amdgpu_vm_fini(adev, new_vm);
974 amdgpu_vm_init_fail:
975         kfree(new_vm);
976         return ret;
977 }
978
979 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
980                                            struct file *filp, unsigned int pasid,
981                                            void **vm, void **process_info,
982                                            struct dma_fence **ef)
983 {
984         struct amdgpu_device *adev = get_amdgpu_device(kgd);
985         struct drm_file *drm_priv = filp->private_data;
986         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
987         struct amdgpu_vm *avm = &drv_priv->vm;
988         int ret;
989
990         /* Already a compute VM? */
991         if (avm->process_info)
992                 return -EINVAL;
993
994         /* Convert VM into a compute VM */
995         ret = amdgpu_vm_make_compute(adev, avm, pasid);
996         if (ret)
997                 return ret;
998
999         /* Initialize KFD part of the VM and process info */
1000         ret = init_kfd_vm(avm, process_info, ef);
1001         if (ret)
1002                 return ret;
1003
1004         *vm = (void *)avm;
1005
1006         return 0;
1007 }
1008
1009 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1010                                     struct amdgpu_vm *vm)
1011 {
1012         struct amdkfd_process_info *process_info = vm->process_info;
1013         struct amdgpu_bo *pd = vm->root.base.bo;
1014
1015         if (!process_info)
1016                 return;
1017
1018         /* Release eviction fence from PD */
1019         amdgpu_bo_reserve(pd, false);
1020         amdgpu_bo_fence(pd, NULL, false);
1021         amdgpu_bo_unreserve(pd);
1022
1023         /* Update process info */
1024         mutex_lock(&process_info->lock);
1025         process_info->n_vms--;
1026         list_del(&vm->vm_list_node);
1027         mutex_unlock(&process_info->lock);
1028
1029         /* Release per-process resources when last compute VM is destroyed */
1030         if (!process_info->n_vms) {
1031                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1032                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1033                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1034
1035                 dma_fence_put(&process_info->eviction_fence->base);
1036                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1037                 put_pid(process_info->pid);
1038                 mutex_destroy(&process_info->lock);
1039                 kfree(process_info);
1040         }
1041 }
1042
1043 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1044 {
1045         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1046         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1047
1048         if (WARN_ON(!kgd || !vm))
1049                 return;
1050
1051         pr_debug("Destroying process vm %p\n", vm);
1052
1053         /* Release the VM context */
1054         amdgpu_vm_fini(adev, avm);
1055         kfree(vm);
1056 }
1057
1058 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1059 {
1060         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1061         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1062
1063         if (WARN_ON(!kgd || !vm))
1064                 return;
1065
1066         pr_debug("Releasing process vm %p\n", vm);
1067
1068         /* The original pasid of amdgpu vm has already been
1069          * released during making a amdgpu vm to a compute vm
1070          * The current pasid is managed by kfd and will be
1071          * released on kfd process destroy. Set amdgpu pasid
1072          * to 0 to avoid duplicate release.
1073          */
1074         amdgpu_vm_release_compute(adev, avm);
1075 }
1076
1077 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1078 {
1079         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1080         struct amdgpu_bo *pd = avm->root.base.bo;
1081         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1082
1083         if (adev->asic_type < CHIP_VEGA10)
1084                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1085         return avm->pd_phys_addr;
1086 }
1087
1088 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1089                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1090                 void *vm, struct kgd_mem **mem,
1091                 uint64_t *offset, uint32_t flags)
1092 {
1093         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1094         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1095         enum ttm_bo_type bo_type = ttm_bo_type_device;
1096         struct sg_table *sg = NULL;
1097         uint64_t user_addr = 0;
1098         struct amdgpu_bo *bo;
1099         struct amdgpu_bo_param bp;
1100         int byte_align;
1101         u32 domain, alloc_domain;
1102         u64 alloc_flags;
1103         uint32_t mapping_flags;
1104         int ret;
1105
1106         /*
1107          * Check on which domain to allocate BO
1108          */
1109         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1110                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1111                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1112                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1113                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1114                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1115         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1116                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1117                 alloc_flags = 0;
1118         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1119                 domain = AMDGPU_GEM_DOMAIN_GTT;
1120                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1121                 alloc_flags = 0;
1122                 if (!offset || !*offset)
1123                         return -EINVAL;
1124                 user_addr = *offset;
1125         } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
1126                 domain = AMDGPU_GEM_DOMAIN_GTT;
1127                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1128                 bo_type = ttm_bo_type_sg;
1129                 alloc_flags = 0;
1130                 if (size > UINT_MAX)
1131                         return -EINVAL;
1132                 sg = create_doorbell_sg(*offset, size);
1133                 if (!sg)
1134                         return -ENOMEM;
1135         } else {
1136                 return -EINVAL;
1137         }
1138
1139         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1140         if (!*mem) {
1141                 ret = -ENOMEM;
1142                 goto err;
1143         }
1144         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1145         mutex_init(&(*mem)->lock);
1146         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1147
1148         /* Workaround for AQL queue wraparound bug. Map the same
1149          * memory twice. That means we only actually allocate half
1150          * the memory.
1151          */
1152         if ((*mem)->aql_queue)
1153                 size = size >> 1;
1154
1155         /* Workaround for TLB bug on older VI chips */
1156         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1157                         adev->asic_type != CHIP_FIJI &&
1158                         adev->asic_type != CHIP_POLARIS10 &&
1159                         adev->asic_type != CHIP_POLARIS11 &&
1160                         adev->asic_type != CHIP_POLARIS12) ?
1161                         VI_BO_SIZE_ALIGN : 1;
1162
1163         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1164         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1165                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1166         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1167                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1168         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1169                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1170         else
1171                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1172         (*mem)->mapping_flags = mapping_flags;
1173
1174         amdgpu_sync_create(&(*mem)->sync);
1175
1176         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1177         if (ret) {
1178                 pr_debug("Insufficient system memory\n");
1179                 goto err_reserve_limit;
1180         }
1181
1182         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1183                         va, size, domain_string(alloc_domain));
1184
1185         memset(&bp, 0, sizeof(bp));
1186         bp.size = size;
1187         bp.byte_align = byte_align;
1188         bp.domain = alloc_domain;
1189         bp.flags = alloc_flags;
1190         bp.type = bo_type;
1191         bp.resv = NULL;
1192         ret = amdgpu_bo_create(adev, &bp, &bo);
1193         if (ret) {
1194                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1195                                 domain_string(alloc_domain), ret);
1196                 goto err_bo_create;
1197         }
1198         if (bo_type == ttm_bo_type_sg) {
1199                 bo->tbo.sg = sg;
1200                 bo->tbo.ttm->sg = sg;
1201         }
1202         bo->kfd_bo = *mem;
1203         (*mem)->bo = bo;
1204         if (user_addr)
1205                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1206
1207         (*mem)->va = va;
1208         (*mem)->domain = domain;
1209         (*mem)->mapped_to_gpu_memory = 0;
1210         (*mem)->process_info = avm->process_info;
1211         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1212
1213         if (user_addr) {
1214                 ret = init_user_pages(*mem, current->mm, user_addr);
1215                 if (ret) {
1216                         mutex_lock(&avm->process_info->lock);
1217                         list_del(&(*mem)->validate_list.head);
1218                         mutex_unlock(&avm->process_info->lock);
1219                         goto allocate_init_user_pages_failed;
1220                 }
1221         }
1222
1223         if (offset)
1224                 *offset = amdgpu_bo_mmap_offset(bo);
1225
1226         return 0;
1227
1228 allocate_init_user_pages_failed:
1229         amdgpu_bo_unref(&bo);
1230         /* Don't unreserve system mem limit twice */
1231         goto err_reserve_limit;
1232 err_bo_create:
1233         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1234 err_reserve_limit:
1235         mutex_destroy(&(*mem)->lock);
1236         kfree(*mem);
1237 err:
1238         if (sg) {
1239                 sg_free_table(sg);
1240                 kfree(sg);
1241         }
1242         return ret;
1243 }
1244
1245 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1246                 struct kgd_dev *kgd, struct kgd_mem *mem)
1247 {
1248         struct amdkfd_process_info *process_info = mem->process_info;
1249         unsigned long bo_size = mem->bo->tbo.mem.size;
1250         struct kfd_bo_va_list *entry, *tmp;
1251         struct bo_vm_reservation_context ctx;
1252         struct ttm_validate_buffer *bo_list_entry;
1253         int ret;
1254
1255         mutex_lock(&mem->lock);
1256
1257         if (mem->mapped_to_gpu_memory > 0) {
1258                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1259                                 mem->va, bo_size);
1260                 mutex_unlock(&mem->lock);
1261                 return -EBUSY;
1262         }
1263
1264         mutex_unlock(&mem->lock);
1265         /* lock is not needed after this, since mem is unused and will
1266          * be freed anyway
1267          */
1268
1269         /* No more MMU notifiers */
1270         amdgpu_mn_unregister(mem->bo);
1271
1272         /* Make sure restore workers don't access the BO any more */
1273         bo_list_entry = &mem->validate_list;
1274         mutex_lock(&process_info->lock);
1275         list_del(&bo_list_entry->head);
1276         mutex_unlock(&process_info->lock);
1277
1278         /* Free user pages if necessary */
1279         if (mem->user_pages) {
1280                 pr_debug("%s: Freeing user_pages array\n", __func__);
1281                 if (mem->user_pages[0])
1282                         release_pages(mem->user_pages,
1283                                         mem->bo->tbo.ttm->num_pages);
1284                 kvfree(mem->user_pages);
1285         }
1286
1287         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1288         if (unlikely(ret))
1289                 return ret;
1290
1291         /* The eviction fence should be removed by the last unmap.
1292          * TODO: Log an error condition if the bo still has the eviction fence
1293          * attached
1294          */
1295         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1296                                         process_info->eviction_fence);
1297         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1298                 mem->va + bo_size * (1 + mem->aql_queue));
1299
1300         /* Remove from VM internal data structures */
1301         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1302                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1303                                 entry, bo_size);
1304
1305         ret = unreserve_bo_and_vms(&ctx, false, false);
1306
1307         /* Free the sync object */
1308         amdgpu_sync_free(&mem->sync);
1309
1310         /* If the SG is not NULL, it's one we created for a doorbell
1311          * BO. We need to free it.
1312          */
1313         if (mem->bo->tbo.sg) {
1314                 sg_free_table(mem->bo->tbo.sg);
1315                 kfree(mem->bo->tbo.sg);
1316         }
1317
1318         /* Free the BO*/
1319         amdgpu_bo_unref(&mem->bo);
1320         mutex_destroy(&mem->lock);
1321         kfree(mem);
1322
1323         return ret;
1324 }
1325
1326 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1327                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1328 {
1329         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1330         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1331         int ret;
1332         struct amdgpu_bo *bo;
1333         uint32_t domain;
1334         struct kfd_bo_va_list *entry;
1335         struct bo_vm_reservation_context ctx;
1336         struct kfd_bo_va_list *bo_va_entry = NULL;
1337         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1338         unsigned long bo_size;
1339         bool is_invalid_userptr = false;
1340
1341         bo = mem->bo;
1342         if (!bo) {
1343                 pr_err("Invalid BO when mapping memory to GPU\n");
1344                 return -EINVAL;
1345         }
1346
1347         /* Make sure restore is not running concurrently. Since we
1348          * don't map invalid userptr BOs, we rely on the next restore
1349          * worker to do the mapping
1350          */
1351         mutex_lock(&mem->process_info->lock);
1352
1353         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1354          * sure that the MMU notifier is no longer running
1355          * concurrently and the queues are actually stopped
1356          */
1357         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1358                 down_write(&current->mm->mmap_sem);
1359                 is_invalid_userptr = atomic_read(&mem->invalid);
1360                 up_write(&current->mm->mmap_sem);
1361         }
1362
1363         mutex_lock(&mem->lock);
1364
1365         domain = mem->domain;
1366         bo_size = bo->tbo.mem.size;
1367
1368         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1369                         mem->va,
1370                         mem->va + bo_size * (1 + mem->aql_queue),
1371                         vm, domain_string(domain));
1372
1373         ret = reserve_bo_and_vm(mem, vm, &ctx);
1374         if (unlikely(ret))
1375                 goto out;
1376
1377         /* Userptr can be marked as "not invalid", but not actually be
1378          * validated yet (still in the system domain). In that case
1379          * the queues are still stopped and we can leave mapping for
1380          * the next restore worker
1381          */
1382         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1383             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1384                 is_invalid_userptr = true;
1385
1386         if (check_if_add_bo_to_vm(avm, mem)) {
1387                 ret = add_bo_to_vm(adev, mem, avm, false,
1388                                 &bo_va_entry);
1389                 if (ret)
1390                         goto add_bo_to_vm_failed;
1391                 if (mem->aql_queue) {
1392                         ret = add_bo_to_vm(adev, mem, avm,
1393                                         true, &bo_va_entry_aql);
1394                         if (ret)
1395                                 goto add_bo_to_vm_failed_aql;
1396                 }
1397         } else {
1398                 ret = vm_validate_pt_pd_bos(avm);
1399                 if (unlikely(ret))
1400                         goto add_bo_to_vm_failed;
1401         }
1402
1403         if (mem->mapped_to_gpu_memory == 0 &&
1404             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1405                 /* Validate BO only once. The eviction fence gets added to BO
1406                  * the first time it is mapped. Validate will wait for all
1407                  * background evictions to complete.
1408                  */
1409                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1410                 if (ret) {
1411                         pr_debug("Validate failed\n");
1412                         goto map_bo_to_gpuvm_failed;
1413                 }
1414         }
1415
1416         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1417                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1418                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1419                                         entry->va, entry->va + bo_size,
1420                                         entry);
1421
1422                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1423                                               is_invalid_userptr);
1424                         if (ret) {
1425                                 pr_err("Failed to map radeon bo to gpuvm\n");
1426                                 goto map_bo_to_gpuvm_failed;
1427                         }
1428
1429                         ret = vm_update_pds(vm, ctx.sync);
1430                         if (ret) {
1431                                 pr_err("Failed to update page directories\n");
1432                                 goto map_bo_to_gpuvm_failed;
1433                         }
1434
1435                         entry->is_mapped = true;
1436                         mem->mapped_to_gpu_memory++;
1437                         pr_debug("\t INC mapping count %d\n",
1438                                         mem->mapped_to_gpu_memory);
1439                 }
1440         }
1441
1442         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1443                 amdgpu_bo_fence(bo,
1444                                 &avm->process_info->eviction_fence->base,
1445                                 true);
1446         ret = unreserve_bo_and_vms(&ctx, false, false);
1447
1448         goto out;
1449
1450 map_bo_to_gpuvm_failed:
1451         if (bo_va_entry_aql)
1452                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1453 add_bo_to_vm_failed_aql:
1454         if (bo_va_entry)
1455                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1456 add_bo_to_vm_failed:
1457         unreserve_bo_and_vms(&ctx, false, false);
1458 out:
1459         mutex_unlock(&mem->process_info->lock);
1460         mutex_unlock(&mem->lock);
1461         return ret;
1462 }
1463
1464 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1465                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1466 {
1467         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1468         struct amdkfd_process_info *process_info =
1469                 ((struct amdgpu_vm *)vm)->process_info;
1470         unsigned long bo_size = mem->bo->tbo.mem.size;
1471         struct kfd_bo_va_list *entry;
1472         struct bo_vm_reservation_context ctx;
1473         int ret;
1474
1475         mutex_lock(&mem->lock);
1476
1477         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1478         if (unlikely(ret))
1479                 goto out;
1480         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1481         if (ctx.n_vms == 0) {
1482                 ret = -EINVAL;
1483                 goto unreserve_out;
1484         }
1485
1486         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1487         if (unlikely(ret))
1488                 goto unreserve_out;
1489
1490         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1491                 mem->va,
1492                 mem->va + bo_size * (1 + mem->aql_queue),
1493                 vm);
1494
1495         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1496                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1497                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1498                                         entry->va,
1499                                         entry->va + bo_size,
1500                                         entry);
1501
1502                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1503                         if (ret == 0) {
1504                                 entry->is_mapped = false;
1505                         } else {
1506                                 pr_err("failed to unmap VA 0x%llx\n",
1507                                                 mem->va);
1508                                 goto unreserve_out;
1509                         }
1510
1511                         mem->mapped_to_gpu_memory--;
1512                         pr_debug("\t DEC mapping count %d\n",
1513                                         mem->mapped_to_gpu_memory);
1514                 }
1515         }
1516
1517         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1518          * required.
1519          */
1520         if (mem->mapped_to_gpu_memory == 0 &&
1521             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1522                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1523                                                 process_info->eviction_fence);
1524
1525 unreserve_out:
1526         unreserve_bo_and_vms(&ctx, false, false);
1527 out:
1528         mutex_unlock(&mem->lock);
1529         return ret;
1530 }
1531
1532 int amdgpu_amdkfd_gpuvm_sync_memory(
1533                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1534 {
1535         struct amdgpu_sync sync;
1536         int ret;
1537
1538         amdgpu_sync_create(&sync);
1539
1540         mutex_lock(&mem->lock);
1541         amdgpu_sync_clone(&mem->sync, &sync);
1542         mutex_unlock(&mem->lock);
1543
1544         ret = amdgpu_sync_wait(&sync, intr);
1545         amdgpu_sync_free(&sync);
1546         return ret;
1547 }
1548
1549 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1550                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1551 {
1552         int ret;
1553         struct amdgpu_bo *bo = mem->bo;
1554
1555         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1556                 pr_err("userptr can't be mapped to kernel\n");
1557                 return -EINVAL;
1558         }
1559
1560         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1561          * this BO in BO's restoring after eviction.
1562          */
1563         mutex_lock(&mem->process_info->lock);
1564
1565         ret = amdgpu_bo_reserve(bo, true);
1566         if (ret) {
1567                 pr_err("Failed to reserve bo. ret %d\n", ret);
1568                 goto bo_reserve_failed;
1569         }
1570
1571         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1572         if (ret) {
1573                 pr_err("Failed to pin bo. ret %d\n", ret);
1574                 goto pin_failed;
1575         }
1576
1577         ret = amdgpu_bo_kmap(bo, kptr);
1578         if (ret) {
1579                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1580                 goto kmap_failed;
1581         }
1582
1583         amdgpu_amdkfd_remove_eviction_fence(
1584                 bo, mem->process_info->eviction_fence);
1585         list_del_init(&mem->validate_list.head);
1586
1587         if (size)
1588                 *size = amdgpu_bo_size(bo);
1589
1590         amdgpu_bo_unreserve(bo);
1591
1592         mutex_unlock(&mem->process_info->lock);
1593         return 0;
1594
1595 kmap_failed:
1596         amdgpu_bo_unpin(bo);
1597 pin_failed:
1598         amdgpu_bo_unreserve(bo);
1599 bo_reserve_failed:
1600         mutex_unlock(&mem->process_info->lock);
1601
1602         return ret;
1603 }
1604
1605 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1606                                               struct kfd_vm_fault_info *mem)
1607 {
1608         struct amdgpu_device *adev;
1609
1610         adev = (struct amdgpu_device *)kgd;
1611         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1612                 *mem = *adev->gmc.vm_fault_info;
1613                 mb();
1614                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1615         }
1616         return 0;
1617 }
1618
1619 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1620                                       struct dma_buf *dma_buf,
1621                                       uint64_t va, void *vm,
1622                                       struct kgd_mem **mem, uint64_t *size,
1623                                       uint64_t *mmap_offset)
1624 {
1625         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1626         struct drm_gem_object *obj;
1627         struct amdgpu_bo *bo;
1628         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1629
1630         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1631                 /* Can't handle non-graphics buffers */
1632                 return -EINVAL;
1633
1634         obj = dma_buf->priv;
1635         if (obj->dev->dev_private != adev)
1636                 /* Can't handle buffers from other devices */
1637                 return -EINVAL;
1638
1639         bo = gem_to_amdgpu_bo(obj);
1640         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1641                                     AMDGPU_GEM_DOMAIN_GTT)))
1642                 /* Only VRAM and GTT BOs are supported */
1643                 return -EINVAL;
1644
1645         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1646         if (!*mem)
1647                 return -ENOMEM;
1648
1649         if (size)
1650                 *size = amdgpu_bo_size(bo);
1651
1652         if (mmap_offset)
1653                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1654
1655         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1656         mutex_init(&(*mem)->lock);
1657         (*mem)->mapping_flags =
1658                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1659                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
1660
1661         (*mem)->bo = amdgpu_bo_ref(bo);
1662         (*mem)->va = va;
1663         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1664                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1665         (*mem)->mapped_to_gpu_memory = 0;
1666         (*mem)->process_info = avm->process_info;
1667         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1668         amdgpu_sync_create(&(*mem)->sync);
1669
1670         return 0;
1671 }
1672
1673 /* Evict a userptr BO by stopping the queues if necessary
1674  *
1675  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1676  * cannot do any memory allocations, and cannot take any locks that
1677  * are held elsewhere while allocating memory. Therefore this is as
1678  * simple as possible, using atomic counters.
1679  *
1680  * It doesn't do anything to the BO itself. The real work happens in
1681  * restore, where we get updated page addresses. This function only
1682  * ensures that GPU access to the BO is stopped.
1683  */
1684 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1685                                 struct mm_struct *mm)
1686 {
1687         struct amdkfd_process_info *process_info = mem->process_info;
1688         int invalid, evicted_bos;
1689         int r = 0;
1690
1691         invalid = atomic_inc_return(&mem->invalid);
1692         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1693         if (evicted_bos == 1) {
1694                 /* First eviction, stop the queues */
1695                 r = kgd2kfd_quiesce_mm(mm);
1696                 if (r)
1697                         pr_err("Failed to quiesce KFD\n");
1698                 schedule_delayed_work(&process_info->restore_userptr_work,
1699                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1700         }
1701
1702         return r;
1703 }
1704
1705 /* Update invalid userptr BOs
1706  *
1707  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1708  * userptr_inval_list and updates user pages for all BOs that have
1709  * been invalidated since their last update.
1710  */
1711 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1712                                      struct mm_struct *mm)
1713 {
1714         struct kgd_mem *mem, *tmp_mem;
1715         struct amdgpu_bo *bo;
1716         struct ttm_operation_ctx ctx = { false, false };
1717         int invalid, ret;
1718
1719         /* Move all invalidated BOs to the userptr_inval_list and
1720          * release their user pages by migration to the CPU domain
1721          */
1722         list_for_each_entry_safe(mem, tmp_mem,
1723                                  &process_info->userptr_valid_list,
1724                                  validate_list.head) {
1725                 if (!atomic_read(&mem->invalid))
1726                         continue; /* BO is still valid */
1727
1728                 bo = mem->bo;
1729
1730                 if (amdgpu_bo_reserve(bo, true))
1731                         return -EAGAIN;
1732                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1733                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1734                 amdgpu_bo_unreserve(bo);
1735                 if (ret) {
1736                         pr_err("%s: Failed to invalidate userptr BO\n",
1737                                __func__);
1738                         return -EAGAIN;
1739                 }
1740
1741                 list_move_tail(&mem->validate_list.head,
1742                                &process_info->userptr_inval_list);
1743         }
1744
1745         if (list_empty(&process_info->userptr_inval_list))
1746                 return 0; /* All evicted userptr BOs were freed */
1747
1748         /* Go through userptr_inval_list and update any invalid user_pages */
1749         list_for_each_entry(mem, &process_info->userptr_inval_list,
1750                             validate_list.head) {
1751                 invalid = atomic_read(&mem->invalid);
1752                 if (!invalid)
1753                         /* BO hasn't been invalidated since the last
1754                          * revalidation attempt. Keep its BO list.
1755                          */
1756                         continue;
1757
1758                 bo = mem->bo;
1759
1760                 if (!mem->user_pages) {
1761                         mem->user_pages =
1762                                 kvmalloc_array(bo->tbo.ttm->num_pages,
1763                                                  sizeof(struct page *),
1764                                                  GFP_KERNEL | __GFP_ZERO);
1765                         if (!mem->user_pages) {
1766                                 pr_err("%s: Failed to allocate pages array\n",
1767                                        __func__);
1768                                 return -ENOMEM;
1769                         }
1770                 } else if (mem->user_pages[0]) {
1771                         release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1772                 }
1773
1774                 /* Get updated user pages */
1775                 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1776                                                    mem->user_pages);
1777                 if (ret) {
1778                         mem->user_pages[0] = NULL;
1779                         pr_info("%s: Failed to get user pages: %d\n",
1780                                 __func__, ret);
1781                         /* Pretend it succeeded. It will fail later
1782                          * with a VM fault if the GPU tries to access
1783                          * it. Better than hanging indefinitely with
1784                          * stalled user mode queues.
1785                          */
1786                 }
1787
1788                 /* Mark the BO as valid unless it was invalidated
1789                  * again concurrently
1790                  */
1791                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1792                         return -EAGAIN;
1793         }
1794
1795         return 0;
1796 }
1797
1798 /* Validate invalid userptr BOs
1799  *
1800  * Validates BOs on the userptr_inval_list, and moves them back to the
1801  * userptr_valid_list. Also updates GPUVM page tables with new page
1802  * addresses and waits for the page table updates to complete.
1803  */
1804 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1805 {
1806         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1807         struct list_head resv_list, duplicates;
1808         struct ww_acquire_ctx ticket;
1809         struct amdgpu_sync sync;
1810
1811         struct amdgpu_vm *peer_vm;
1812         struct kgd_mem *mem, *tmp_mem;
1813         struct amdgpu_bo *bo;
1814         struct ttm_operation_ctx ctx = { false, false };
1815         int i, ret;
1816
1817         pd_bo_list_entries = kcalloc(process_info->n_vms,
1818                                      sizeof(struct amdgpu_bo_list_entry),
1819                                      GFP_KERNEL);
1820         if (!pd_bo_list_entries) {
1821                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1822                 return -ENOMEM;
1823         }
1824
1825         INIT_LIST_HEAD(&resv_list);
1826         INIT_LIST_HEAD(&duplicates);
1827
1828         /* Get all the page directory BOs that need to be reserved */
1829         i = 0;
1830         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1831                             vm_list_node)
1832                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1833                                     &pd_bo_list_entries[i++]);
1834         /* Add the userptr_inval_list entries to resv_list */
1835         list_for_each_entry(mem, &process_info->userptr_inval_list,
1836                             validate_list.head) {
1837                 list_add_tail(&mem->resv_list.head, &resv_list);
1838                 mem->resv_list.bo = mem->validate_list.bo;
1839                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1840         }
1841
1842         /* Reserve all BOs and page tables for validation */
1843         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1844         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1845         if (ret)
1846                 goto out;
1847
1848         amdgpu_sync_create(&sync);
1849
1850         ret = process_validate_vms(process_info);
1851         if (ret)
1852                 goto unreserve_out;
1853
1854         /* Validate BOs and update GPUVM page tables */
1855         list_for_each_entry_safe(mem, tmp_mem,
1856                                  &process_info->userptr_inval_list,
1857                                  validate_list.head) {
1858                 struct kfd_bo_va_list *bo_va_entry;
1859
1860                 bo = mem->bo;
1861
1862                 /* Copy pages array and validate the BO if we got user pages */
1863                 if (mem->user_pages[0]) {
1864                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1865                                                      mem->user_pages);
1866                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1867                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1868                         if (ret) {
1869                                 pr_err("%s: failed to validate BO\n", __func__);
1870                                 goto unreserve_out;
1871                         }
1872                 }
1873
1874                 /* Validate succeeded, now the BO owns the pages, free
1875                  * our copy of the pointer array. Put this BO back on
1876                  * the userptr_valid_list. If we need to revalidate
1877                  * it, we need to start from scratch.
1878                  */
1879                 kvfree(mem->user_pages);
1880                 mem->user_pages = NULL;
1881                 list_move_tail(&mem->validate_list.head,
1882                                &process_info->userptr_valid_list);
1883
1884                 /* Update mapping. If the BO was not validated
1885                  * (because we couldn't get user pages), this will
1886                  * clear the page table entries, which will result in
1887                  * VM faults if the GPU tries to access the invalid
1888                  * memory.
1889                  */
1890                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1891                         if (!bo_va_entry->is_mapped)
1892                                 continue;
1893
1894                         ret = update_gpuvm_pte((struct amdgpu_device *)
1895                                                bo_va_entry->kgd_dev,
1896                                                bo_va_entry, &sync);
1897                         if (ret) {
1898                                 pr_err("%s: update PTE failed\n", __func__);
1899                                 /* make sure this gets validated again */
1900                                 atomic_inc(&mem->invalid);
1901                                 goto unreserve_out;
1902                         }
1903                 }
1904         }
1905
1906         /* Update page directories */
1907         ret = process_update_pds(process_info, &sync);
1908
1909 unreserve_out:
1910         ttm_eu_backoff_reservation(&ticket, &resv_list);
1911         amdgpu_sync_wait(&sync, false);
1912         amdgpu_sync_free(&sync);
1913 out:
1914         kfree(pd_bo_list_entries);
1915
1916         return ret;
1917 }
1918
1919 /* Worker callback to restore evicted userptr BOs
1920  *
1921  * Tries to update and validate all userptr BOs. If successful and no
1922  * concurrent evictions happened, the queues are restarted. Otherwise,
1923  * reschedule for another attempt later.
1924  */
1925 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1926 {
1927         struct delayed_work *dwork = to_delayed_work(work);
1928         struct amdkfd_process_info *process_info =
1929                 container_of(dwork, struct amdkfd_process_info,
1930                              restore_userptr_work);
1931         struct task_struct *usertask;
1932         struct mm_struct *mm;
1933         int evicted_bos;
1934
1935         evicted_bos = atomic_read(&process_info->evicted_bos);
1936         if (!evicted_bos)
1937                 return;
1938
1939         /* Reference task and mm in case of concurrent process termination */
1940         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1941         if (!usertask)
1942                 return;
1943         mm = get_task_mm(usertask);
1944         if (!mm) {
1945                 put_task_struct(usertask);
1946                 return;
1947         }
1948
1949         mutex_lock(&process_info->lock);
1950
1951         if (update_invalid_user_pages(process_info, mm))
1952                 goto unlock_out;
1953         /* userptr_inval_list can be empty if all evicted userptr BOs
1954          * have been freed. In that case there is nothing to validate
1955          * and we can just restart the queues.
1956          */
1957         if (!list_empty(&process_info->userptr_inval_list)) {
1958                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1959                         goto unlock_out; /* Concurrent eviction, try again */
1960
1961                 if (validate_invalid_user_pages(process_info))
1962                         goto unlock_out;
1963         }
1964         /* Final check for concurrent evicton and atomic update. If
1965          * another eviction happens after successful update, it will
1966          * be a first eviction that calls quiesce_mm. The eviction
1967          * reference counting inside KFD will handle this case.
1968          */
1969         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1970             evicted_bos)
1971                 goto unlock_out;
1972         evicted_bos = 0;
1973         if (kgd2kfd_resume_mm(mm)) {
1974                 pr_err("%s: Failed to resume KFD\n", __func__);
1975                 /* No recovery from this failure. Probably the CP is
1976                  * hanging. No point trying again.
1977                  */
1978         }
1979 unlock_out:
1980         mutex_unlock(&process_info->lock);
1981         mmput(mm);
1982         put_task_struct(usertask);
1983
1984         /* If validation failed, reschedule another attempt */
1985         if (evicted_bos)
1986                 schedule_delayed_work(&process_info->restore_userptr_work,
1987                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1988 }
1989
1990 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1991  *   KFD process identified by process_info
1992  *
1993  * @process_info: amdkfd_process_info of the KFD process
1994  *
1995  * After memory eviction, restore thread calls this function. The function
1996  * should be called when the Process is still valid. BO restore involves -
1997  *
1998  * 1.  Release old eviction fence and create new one
1999  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2000  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2001  *     BOs that need to be reserved.
2002  * 4.  Reserve all the BOs
2003  * 5.  Validate of PD and PT BOs.
2004  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2005  * 7.  Add fence to all PD and PT BOs.
2006  * 8.  Unreserve all BOs
2007  */
2008 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2009 {
2010         struct amdgpu_bo_list_entry *pd_bo_list;
2011         struct amdkfd_process_info *process_info = info;
2012         struct amdgpu_vm *peer_vm;
2013         struct kgd_mem *mem;
2014         struct bo_vm_reservation_context ctx;
2015         struct amdgpu_amdkfd_fence *new_fence;
2016         int ret = 0, i;
2017         struct list_head duplicate_save;
2018         struct amdgpu_sync sync_obj;
2019
2020         INIT_LIST_HEAD(&duplicate_save);
2021         INIT_LIST_HEAD(&ctx.list);
2022         INIT_LIST_HEAD(&ctx.duplicates);
2023
2024         pd_bo_list = kcalloc(process_info->n_vms,
2025                              sizeof(struct amdgpu_bo_list_entry),
2026                              GFP_KERNEL);
2027         if (!pd_bo_list)
2028                 return -ENOMEM;
2029
2030         i = 0;
2031         mutex_lock(&process_info->lock);
2032         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2033                         vm_list_node)
2034                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2035
2036         /* Reserve all BOs and page tables/directory. Add all BOs from
2037          * kfd_bo_list to ctx.list
2038          */
2039         list_for_each_entry(mem, &process_info->kfd_bo_list,
2040                             validate_list.head) {
2041
2042                 list_add_tail(&mem->resv_list.head, &ctx.list);
2043                 mem->resv_list.bo = mem->validate_list.bo;
2044                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2045         }
2046
2047         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2048                                      false, &duplicate_save);
2049         if (ret) {
2050                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2051                 goto ttm_reserve_fail;
2052         }
2053
2054         amdgpu_sync_create(&sync_obj);
2055
2056         /* Validate PDs and PTs */
2057         ret = process_validate_vms(process_info);
2058         if (ret)
2059                 goto validate_map_fail;
2060
2061         ret = process_sync_pds_resv(process_info, &sync_obj);
2062         if (ret) {
2063                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2064                 goto validate_map_fail;
2065         }
2066
2067         /* Validate BOs and map them to GPUVM (update VM page tables). */
2068         list_for_each_entry(mem, &process_info->kfd_bo_list,
2069                             validate_list.head) {
2070
2071                 struct amdgpu_bo *bo = mem->bo;
2072                 uint32_t domain = mem->domain;
2073                 struct kfd_bo_va_list *bo_va_entry;
2074
2075                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2076                 if (ret) {
2077                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2078                         goto validate_map_fail;
2079                 }
2080                 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2081                 if (ret) {
2082                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2083                         goto validate_map_fail;
2084                 }
2085                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2086                                     bo_list) {
2087                         ret = update_gpuvm_pte((struct amdgpu_device *)
2088                                               bo_va_entry->kgd_dev,
2089                                               bo_va_entry,
2090                                               &sync_obj);
2091                         if (ret) {
2092                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2093                                 goto validate_map_fail;
2094                         }
2095                 }
2096         }
2097
2098         /* Update page directories */
2099         ret = process_update_pds(process_info, &sync_obj);
2100         if (ret) {
2101                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2102                 goto validate_map_fail;
2103         }
2104
2105         /* Wait for validate and PT updates to finish */
2106         amdgpu_sync_wait(&sync_obj, false);
2107
2108         /* Release old eviction fence and create new one, because fence only
2109          * goes from unsignaled to signaled, fence cannot be reused.
2110          * Use context and mm from the old fence.
2111          */
2112         new_fence = amdgpu_amdkfd_fence_create(
2113                                 process_info->eviction_fence->base.context,
2114                                 process_info->eviction_fence->mm);
2115         if (!new_fence) {
2116                 pr_err("Failed to create eviction fence\n");
2117                 ret = -ENOMEM;
2118                 goto validate_map_fail;
2119         }
2120         dma_fence_put(&process_info->eviction_fence->base);
2121         process_info->eviction_fence = new_fence;
2122         *ef = dma_fence_get(&new_fence->base);
2123
2124         /* Attach new eviction fence to all BOs */
2125         list_for_each_entry(mem, &process_info->kfd_bo_list,
2126                 validate_list.head)
2127                 amdgpu_bo_fence(mem->bo,
2128                         &process_info->eviction_fence->base, true);
2129
2130         /* Attach eviction fence to PD / PT BOs */
2131         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2132                             vm_list_node) {
2133                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2134
2135                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2136         }
2137
2138 validate_map_fail:
2139         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2140         amdgpu_sync_free(&sync_obj);
2141 ttm_reserve_fail:
2142         mutex_unlock(&process_info->lock);
2143         kfree(pd_bo_list);
2144         return ret;
2145 }