Merge tag 'drm-intel-next-2018-05-14' of git://anongit.freedesktop.org/drm/drm-intel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/list.h>
26 #include <linux/sched/mm.h>
27 #include <drm/drmP.h>
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31
32 /* Special VM and GART address alignment needed for VI pre-Fiji due to
33  * a HW bug.
34  */
35 #define VI_BO_SIZE_ALIGN (0x8000)
36
37 /* BO flag to indicate a KFD userptr BO */
38 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
39
40 /* Userptr restore delay, just long enough to allow consecutive VM
41  * changes to accumulate
42  */
43 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
44
45 /* Impose limit on how much memory KFD can use */
46 static struct {
47         uint64_t max_system_mem_limit;
48         uint64_t max_userptr_mem_limit;
49         int64_t system_mem_used;
50         int64_t userptr_mem_used;
51         spinlock_t mem_limit_lock;
52 } kfd_mem_limit;
53
54 /* Struct used for amdgpu_amdkfd_bo_validate */
55 struct amdgpu_vm_parser {
56         uint32_t        domain;
57         bool            wait;
58 };
59
60 static const char * const domain_bit_to_string[] = {
61                 "CPU",
62                 "GTT",
63                 "VRAM",
64                 "GDS",
65                 "GWS",
66                 "OA"
67 };
68
69 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
70
71 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
72
73
74 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
75 {
76         return (struct amdgpu_device *)kgd;
77 }
78
79 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
80                 struct kgd_mem *mem)
81 {
82         struct kfd_bo_va_list *entry;
83
84         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
85                 if (entry->bo_va->base.vm == avm)
86                         return false;
87
88         return true;
89 }
90
91 /* Set memory usage limits. Current, limits are
92  *  System (kernel) memory - 3/8th System RAM
93  *  Userptr memory - 3/4th System RAM
94  */
95 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
96 {
97         struct sysinfo si;
98         uint64_t mem;
99
100         si_meminfo(&si);
101         mem = si.totalram - si.totalhigh;
102         mem *= si.mem_unit;
103
104         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
105         kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
106         kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
107         pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
108                 (kfd_mem_limit.max_system_mem_limit >> 20),
109                 (kfd_mem_limit.max_userptr_mem_limit >> 20));
110 }
111
112 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
113                                               uint64_t size, u32 domain)
114 {
115         size_t acc_size;
116         int ret = 0;
117
118         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
119                                        sizeof(struct amdgpu_bo));
120
121         spin_lock(&kfd_mem_limit.mem_limit_lock);
122         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
123                 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
124                         kfd_mem_limit.max_system_mem_limit) {
125                         ret = -ENOMEM;
126                         goto err_no_mem;
127                 }
128                 kfd_mem_limit.system_mem_used += (acc_size + size);
129         } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
130                 if ((kfd_mem_limit.system_mem_used + acc_size >
131                         kfd_mem_limit.max_system_mem_limit) ||
132                         (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
133                         kfd_mem_limit.max_userptr_mem_limit)) {
134                         ret = -ENOMEM;
135                         goto err_no_mem;
136                 }
137                 kfd_mem_limit.system_mem_used += acc_size;
138                 kfd_mem_limit.userptr_mem_used += size;
139         }
140 err_no_mem:
141         spin_unlock(&kfd_mem_limit.mem_limit_lock);
142         return ret;
143 }
144
145 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
146                                        uint64_t size, u32 domain)
147 {
148         size_t acc_size;
149
150         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
151                                        sizeof(struct amdgpu_bo));
152
153         spin_lock(&kfd_mem_limit.mem_limit_lock);
154         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
155                 kfd_mem_limit.system_mem_used -= (acc_size + size);
156         } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
157                 kfd_mem_limit.system_mem_used -= acc_size;
158                 kfd_mem_limit.userptr_mem_used -= size;
159         }
160         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
161                   "kfd system memory accounting unbalanced");
162         WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
163                   "kfd userptr memory accounting unbalanced");
164
165         spin_unlock(&kfd_mem_limit.mem_limit_lock);
166 }
167
168 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
169 {
170         spin_lock(&kfd_mem_limit.mem_limit_lock);
171
172         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
173                 kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
174                 kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
175         } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
176                 kfd_mem_limit.system_mem_used -=
177                         (bo->tbo.acc_size + amdgpu_bo_size(bo));
178         }
179         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
180                   "kfd system memory accounting unbalanced");
181         WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
182                   "kfd userptr memory accounting unbalanced");
183
184         spin_unlock(&kfd_mem_limit.mem_limit_lock);
185 }
186
187
188 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
189  *  reservation object.
190  *
191  * @bo: [IN] Remove eviction fence(s) from this BO
192  * @ef: [IN] If ef is specified, then this eviction fence is removed if it
193  *  is present in the shared list.
194  * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
195  *  from BO's reservation object shared list.
196  * @ef_count: [OUT] Number of fences in ef_list.
197  *
198  * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
199  *  called to restore the eviction fences and to avoid memory leak. This is
200  *  useful for shared BOs.
201  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
202  */
203 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
204                                         struct amdgpu_amdkfd_fence *ef,
205                                         struct amdgpu_amdkfd_fence ***ef_list,
206                                         unsigned int *ef_count)
207 {
208         struct reservation_object_list *fobj;
209         struct reservation_object *resv;
210         unsigned int i = 0, j = 0, k = 0, shared_count;
211         unsigned int count = 0;
212         struct amdgpu_amdkfd_fence **fence_list;
213
214         if (!ef && !ef_list)
215                 return -EINVAL;
216
217         if (ef_list) {
218                 *ef_list = NULL;
219                 *ef_count = 0;
220         }
221
222         resv = bo->tbo.resv;
223         fobj = reservation_object_get_list(resv);
224
225         if (!fobj)
226                 return 0;
227
228         preempt_disable();
229         write_seqcount_begin(&resv->seq);
230
231         /* Go through all the shared fences in the resevation object. If
232          * ef is specified and it exists in the list, remove it and reduce the
233          * count. If ef is not specified, then get the count of eviction fences
234          * present.
235          */
236         shared_count = fobj->shared_count;
237         for (i = 0; i < shared_count; ++i) {
238                 struct dma_fence *f;
239
240                 f = rcu_dereference_protected(fobj->shared[i],
241                                               reservation_object_held(resv));
242
243                 if (ef) {
244                         if (f->context == ef->base.context) {
245                                 dma_fence_put(f);
246                                 fobj->shared_count--;
247                         } else {
248                                 RCU_INIT_POINTER(fobj->shared[j++], f);
249                         }
250                 } else if (to_amdgpu_amdkfd_fence(f))
251                         count++;
252         }
253         write_seqcount_end(&resv->seq);
254         preempt_enable();
255
256         if (ef || !count)
257                 return 0;
258
259         /* Alloc memory for count number of eviction fence pointers. Fill the
260          * ef_list array and ef_count
261          */
262         fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
263                              GFP_KERNEL);
264         if (!fence_list)
265                 return -ENOMEM;
266
267         preempt_disable();
268         write_seqcount_begin(&resv->seq);
269
270         j = 0;
271         for (i = 0; i < shared_count; ++i) {
272                 struct dma_fence *f;
273                 struct amdgpu_amdkfd_fence *efence;
274
275                 f = rcu_dereference_protected(fobj->shared[i],
276                         reservation_object_held(resv));
277
278                 efence = to_amdgpu_amdkfd_fence(f);
279                 if (efence) {
280                         fence_list[k++] = efence;
281                         fobj->shared_count--;
282                 } else {
283                         RCU_INIT_POINTER(fobj->shared[j++], f);
284                 }
285         }
286
287         write_seqcount_end(&resv->seq);
288         preempt_enable();
289
290         *ef_list = fence_list;
291         *ef_count = k;
292
293         return 0;
294 }
295
296 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
297  *  reservation object.
298  *
299  * @bo: [IN] Add eviction fences to this BO
300  * @ef_list: [IN] List of eviction fences to be added
301  * @ef_count: [IN] Number of fences in ef_list.
302  *
303  * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
304  *  function.
305  */
306 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
307                                 struct amdgpu_amdkfd_fence **ef_list,
308                                 unsigned int ef_count)
309 {
310         int i;
311
312         if (!ef_list || !ef_count)
313                 return;
314
315         for (i = 0; i < ef_count; i++) {
316                 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
317                 /* Re-adding the fence takes an additional reference. Drop that
318                  * reference.
319                  */
320                 dma_fence_put(&ef_list[i]->base);
321         }
322
323         kfree(ef_list);
324 }
325
326 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
327                                      bool wait)
328 {
329         struct ttm_operation_ctx ctx = { false, false };
330         int ret;
331
332         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
333                  "Called with userptr BO"))
334                 return -EINVAL;
335
336         amdgpu_ttm_placement_from_domain(bo, domain);
337
338         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
339         if (ret)
340                 goto validate_fail;
341         if (wait) {
342                 struct amdgpu_amdkfd_fence **ef_list;
343                 unsigned int ef_count;
344
345                 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
346                                                           &ef_count);
347                 if (ret)
348                         goto validate_fail;
349
350                 ttm_bo_wait(&bo->tbo, false, false);
351                 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
352         }
353
354 validate_fail:
355         return ret;
356 }
357
358 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
359 {
360         struct amdgpu_vm_parser *p = param;
361
362         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
363 }
364
365 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
366  *
367  * Page directories are not updated here because huge page handling
368  * during page table updates can invalidate page directory entries
369  * again. Page directories are only updated after updating page
370  * tables.
371  */
372 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
373 {
374         struct amdgpu_bo *pd = vm->root.base.bo;
375         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
376         struct amdgpu_vm_parser param;
377         uint64_t addr, flags = AMDGPU_PTE_VALID;
378         int ret;
379
380         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
381         param.wait = false;
382
383         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
384                                         &param);
385         if (ret) {
386                 pr_err("amdgpu: failed to validate PT BOs\n");
387                 return ret;
388         }
389
390         ret = amdgpu_amdkfd_validate(&param, pd);
391         if (ret) {
392                 pr_err("amdgpu: failed to validate PD\n");
393                 return ret;
394         }
395
396         addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
397         amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
398         vm->pd_phys_addr = addr;
399
400         if (vm->use_cpu_for_update) {
401                 ret = amdgpu_bo_kmap(pd, NULL);
402                 if (ret) {
403                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
404                         return ret;
405                 }
406         }
407
408         return 0;
409 }
410
411 static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
412                          struct dma_fence *f)
413 {
414         int ret = amdgpu_sync_fence(adev, sync, f, false);
415
416         /* Sync objects can't handle multiple GPUs (contexts) updating
417          * sync->last_vm_update. Fortunately we don't need it for
418          * KFD's purposes, so we can just drop that fence.
419          */
420         if (sync->last_vm_update) {
421                 dma_fence_put(sync->last_vm_update);
422                 sync->last_vm_update = NULL;
423         }
424
425         return ret;
426 }
427
428 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
429 {
430         struct amdgpu_bo *pd = vm->root.base.bo;
431         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
432         int ret;
433
434         ret = amdgpu_vm_update_directories(adev, vm);
435         if (ret)
436                 return ret;
437
438         return sync_vm_fence(adev, sync, vm->last_update);
439 }
440
441 /* add_bo_to_vm - Add a BO to a VM
442  *
443  * Everything that needs to bo done only once when a BO is first added
444  * to a VM. It can later be mapped and unmapped many times without
445  * repeating these steps.
446  *
447  * 1. Allocate and initialize BO VA entry data structure
448  * 2. Add BO to the VM
449  * 3. Determine ASIC-specific PTE flags
450  * 4. Alloc page tables and directories if needed
451  * 4a.  Validate new page tables and directories
452  */
453 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
454                 struct amdgpu_vm *vm, bool is_aql,
455                 struct kfd_bo_va_list **p_bo_va_entry)
456 {
457         int ret;
458         struct kfd_bo_va_list *bo_va_entry;
459         struct amdgpu_bo *pd = vm->root.base.bo;
460         struct amdgpu_bo *bo = mem->bo;
461         uint64_t va = mem->va;
462         struct list_head *list_bo_va = &mem->bo_va_list;
463         unsigned long bo_size = bo->tbo.mem.size;
464
465         if (!va) {
466                 pr_err("Invalid VA when adding BO to VM\n");
467                 return -EINVAL;
468         }
469
470         if (is_aql)
471                 va += bo_size;
472
473         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
474         if (!bo_va_entry)
475                 return -ENOMEM;
476
477         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
478                         va + bo_size, vm);
479
480         /* Add BO to VM internal data structures*/
481         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
482         if (!bo_va_entry->bo_va) {
483                 ret = -EINVAL;
484                 pr_err("Failed to add BO object to VM. ret == %d\n",
485                                 ret);
486                 goto err_vmadd;
487         }
488
489         bo_va_entry->va = va;
490         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
491                                                          mem->mapping_flags);
492         bo_va_entry->kgd_dev = (void *)adev;
493         list_add(&bo_va_entry->bo_list, list_bo_va);
494
495         if (p_bo_va_entry)
496                 *p_bo_va_entry = bo_va_entry;
497
498         /* Allocate new page tables if needed and validate
499          * them. Clearing of new page tables and validate need to wait
500          * on move fences. We don't want that to trigger the eviction
501          * fence, so remove it temporarily.
502          */
503         amdgpu_amdkfd_remove_eviction_fence(pd,
504                                         vm->process_info->eviction_fence,
505                                         NULL, NULL);
506
507         ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
508         if (ret) {
509                 pr_err("Failed to allocate pts, err=%d\n", ret);
510                 goto err_alloc_pts;
511         }
512
513         ret = vm_validate_pt_pd_bos(vm);
514         if (ret) {
515                 pr_err("validate_pt_pd_bos() failed\n");
516                 goto err_alloc_pts;
517         }
518
519         /* Add the eviction fence back */
520         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
521
522         return 0;
523
524 err_alloc_pts:
525         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
526         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
527         list_del(&bo_va_entry->bo_list);
528 err_vmadd:
529         kfree(bo_va_entry);
530         return ret;
531 }
532
533 static void remove_bo_from_vm(struct amdgpu_device *adev,
534                 struct kfd_bo_va_list *entry, unsigned long size)
535 {
536         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
537                         entry->va,
538                         entry->va + size, entry);
539         amdgpu_vm_bo_rmv(adev, entry->bo_va);
540         list_del(&entry->bo_list);
541         kfree(entry);
542 }
543
544 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
545                                 struct amdkfd_process_info *process_info,
546                                 bool userptr)
547 {
548         struct ttm_validate_buffer *entry = &mem->validate_list;
549         struct amdgpu_bo *bo = mem->bo;
550
551         INIT_LIST_HEAD(&entry->head);
552         entry->shared = true;
553         entry->bo = &bo->tbo;
554         mutex_lock(&process_info->lock);
555         if (userptr)
556                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
557         else
558                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
559         mutex_unlock(&process_info->lock);
560 }
561
562 /* Initializes user pages. It registers the MMU notifier and validates
563  * the userptr BO in the GTT domain.
564  *
565  * The BO must already be on the userptr_valid_list. Otherwise an
566  * eviction and restore may happen that leaves the new BO unmapped
567  * with the user mode queues running.
568  *
569  * Takes the process_info->lock to protect against concurrent restore
570  * workers.
571  *
572  * Returns 0 for success, negative errno for errors.
573  */
574 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
575                            uint64_t user_addr)
576 {
577         struct amdkfd_process_info *process_info = mem->process_info;
578         struct amdgpu_bo *bo = mem->bo;
579         struct ttm_operation_ctx ctx = { true, false };
580         int ret = 0;
581
582         mutex_lock(&process_info->lock);
583
584         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
585         if (ret) {
586                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
587                 goto out;
588         }
589
590         ret = amdgpu_mn_register(bo, user_addr);
591         if (ret) {
592                 pr_err("%s: Failed to register MMU notifier: %d\n",
593                        __func__, ret);
594                 goto out;
595         }
596
597         /* If no restore worker is running concurrently, user_pages
598          * should not be allocated
599          */
600         WARN(mem->user_pages, "Leaking user_pages array");
601
602         mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
603                                            sizeof(struct page *),
604                                            GFP_KERNEL | __GFP_ZERO);
605         if (!mem->user_pages) {
606                 pr_err("%s: Failed to allocate pages array\n", __func__);
607                 ret = -ENOMEM;
608                 goto unregister_out;
609         }
610
611         ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
612         if (ret) {
613                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
614                 goto free_out;
615         }
616
617         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
618
619         ret = amdgpu_bo_reserve(bo, true);
620         if (ret) {
621                 pr_err("%s: Failed to reserve BO\n", __func__);
622                 goto release_out;
623         }
624         amdgpu_ttm_placement_from_domain(bo, mem->domain);
625         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
626         if (ret)
627                 pr_err("%s: failed to validate BO\n", __func__);
628         amdgpu_bo_unreserve(bo);
629
630 release_out:
631         if (ret)
632                 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
633 free_out:
634         kvfree(mem->user_pages);
635         mem->user_pages = NULL;
636 unregister_out:
637         if (ret)
638                 amdgpu_mn_unregister(bo);
639 out:
640         mutex_unlock(&process_info->lock);
641         return ret;
642 }
643
644 /* Reserving a BO and its page table BOs must happen atomically to
645  * avoid deadlocks. Some operations update multiple VMs at once. Track
646  * all the reservation info in a context structure. Optionally a sync
647  * object can track VM updates.
648  */
649 struct bo_vm_reservation_context {
650         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
651         unsigned int n_vms;                 /* Number of VMs reserved       */
652         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
653         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
654         struct list_head list, duplicates;  /* BO lists                     */
655         struct amdgpu_sync *sync;           /* Pointer to sync object       */
656         bool reserved;                      /* Whether BOs are reserved     */
657 };
658
659 enum bo_vm_match {
660         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
661         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
662         BO_VM_ALL,              /* Match all VMs a BO was added to    */
663 };
664
665 /**
666  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
667  * @mem: KFD BO structure.
668  * @vm: the VM to reserve.
669  * @ctx: the struct that will be used in unreserve_bo_and_vms().
670  */
671 static int reserve_bo_and_vm(struct kgd_mem *mem,
672                               struct amdgpu_vm *vm,
673                               struct bo_vm_reservation_context *ctx)
674 {
675         struct amdgpu_bo *bo = mem->bo;
676         int ret;
677
678         WARN_ON(!vm);
679
680         ctx->reserved = false;
681         ctx->n_vms = 1;
682         ctx->sync = &mem->sync;
683
684         INIT_LIST_HEAD(&ctx->list);
685         INIT_LIST_HEAD(&ctx->duplicates);
686
687         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
688         if (!ctx->vm_pd)
689                 return -ENOMEM;
690
691         ctx->kfd_bo.robj = bo;
692         ctx->kfd_bo.priority = 0;
693         ctx->kfd_bo.tv.bo = &bo->tbo;
694         ctx->kfd_bo.tv.shared = true;
695         ctx->kfd_bo.user_pages = NULL;
696         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
697
698         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
699
700         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
701                                      false, &ctx->duplicates);
702         if (!ret)
703                 ctx->reserved = true;
704         else {
705                 pr_err("Failed to reserve buffers in ttm\n");
706                 kfree(ctx->vm_pd);
707                 ctx->vm_pd = NULL;
708         }
709
710         return ret;
711 }
712
713 /**
714  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
715  * @mem: KFD BO structure.
716  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
717  * is used. Otherwise, a single VM associated with the BO.
718  * @map_type: the mapping status that will be used to filter the VMs.
719  * @ctx: the struct that will be used in unreserve_bo_and_vms().
720  *
721  * Returns 0 for success, negative for failure.
722  */
723 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
724                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
725                                 struct bo_vm_reservation_context *ctx)
726 {
727         struct amdgpu_bo *bo = mem->bo;
728         struct kfd_bo_va_list *entry;
729         unsigned int i;
730         int ret;
731
732         ctx->reserved = false;
733         ctx->n_vms = 0;
734         ctx->vm_pd = NULL;
735         ctx->sync = &mem->sync;
736
737         INIT_LIST_HEAD(&ctx->list);
738         INIT_LIST_HEAD(&ctx->duplicates);
739
740         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
741                 if ((vm && vm != entry->bo_va->base.vm) ||
742                         (entry->is_mapped != map_type
743                         && map_type != BO_VM_ALL))
744                         continue;
745
746                 ctx->n_vms++;
747         }
748
749         if (ctx->n_vms != 0) {
750                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
751                                      GFP_KERNEL);
752                 if (!ctx->vm_pd)
753                         return -ENOMEM;
754         }
755
756         ctx->kfd_bo.robj = bo;
757         ctx->kfd_bo.priority = 0;
758         ctx->kfd_bo.tv.bo = &bo->tbo;
759         ctx->kfd_bo.tv.shared = true;
760         ctx->kfd_bo.user_pages = NULL;
761         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
762
763         i = 0;
764         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
765                 if ((vm && vm != entry->bo_va->base.vm) ||
766                         (entry->is_mapped != map_type
767                         && map_type != BO_VM_ALL))
768                         continue;
769
770                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
771                                 &ctx->vm_pd[i]);
772                 i++;
773         }
774
775         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
776                                      false, &ctx->duplicates);
777         if (!ret)
778                 ctx->reserved = true;
779         else
780                 pr_err("Failed to reserve buffers in ttm.\n");
781
782         if (ret) {
783                 kfree(ctx->vm_pd);
784                 ctx->vm_pd = NULL;
785         }
786
787         return ret;
788 }
789
790 /**
791  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
792  * @ctx: Reservation context to unreserve
793  * @wait: Optionally wait for a sync object representing pending VM updates
794  * @intr: Whether the wait is interruptible
795  *
796  * Also frees any resources allocated in
797  * reserve_bo_and_(cond_)vm(s). Returns the status from
798  * amdgpu_sync_wait.
799  */
800 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
801                                  bool wait, bool intr)
802 {
803         int ret = 0;
804
805         if (wait)
806                 ret = amdgpu_sync_wait(ctx->sync, intr);
807
808         if (ctx->reserved)
809                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
810         kfree(ctx->vm_pd);
811
812         ctx->sync = NULL;
813
814         ctx->reserved = false;
815         ctx->vm_pd = NULL;
816
817         return ret;
818 }
819
820 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
821                                 struct kfd_bo_va_list *entry,
822                                 struct amdgpu_sync *sync)
823 {
824         struct amdgpu_bo_va *bo_va = entry->bo_va;
825         struct amdgpu_vm *vm = bo_va->base.vm;
826         struct amdgpu_bo *pd = vm->root.base.bo;
827
828         /* Remove eviction fence from PD (and thereby from PTs too as
829          * they share the resv. object). Otherwise during PT update
830          * job (see amdgpu_vm_bo_update_mapping), eviction fence would
831          * get added to job->sync object and job execution would
832          * trigger the eviction fence.
833          */
834         amdgpu_amdkfd_remove_eviction_fence(pd,
835                                             vm->process_info->eviction_fence,
836                                             NULL, NULL);
837         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
838
839         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
840
841         /* Add the eviction fence back */
842         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
843
844         sync_vm_fence(adev, sync, bo_va->last_pt_update);
845
846         return 0;
847 }
848
849 static int update_gpuvm_pte(struct amdgpu_device *adev,
850                 struct kfd_bo_va_list *entry,
851                 struct amdgpu_sync *sync)
852 {
853         int ret;
854         struct amdgpu_vm *vm;
855         struct amdgpu_bo_va *bo_va;
856         struct amdgpu_bo *bo;
857
858         bo_va = entry->bo_va;
859         vm = bo_va->base.vm;
860         bo = bo_va->base.bo;
861
862         /* Update the page tables  */
863         ret = amdgpu_vm_bo_update(adev, bo_va, false);
864         if (ret) {
865                 pr_err("amdgpu_vm_bo_update failed\n");
866                 return ret;
867         }
868
869         return sync_vm_fence(adev, sync, bo_va->last_pt_update);
870 }
871
872 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
873                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
874                 bool no_update_pte)
875 {
876         int ret;
877
878         /* Set virtual address for the allocation */
879         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
880                                amdgpu_bo_size(entry->bo_va->base.bo),
881                                entry->pte_flags);
882         if (ret) {
883                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
884                                 entry->va, ret);
885                 return ret;
886         }
887
888         if (no_update_pte)
889                 return 0;
890
891         ret = update_gpuvm_pte(adev, entry, sync);
892         if (ret) {
893                 pr_err("update_gpuvm_pte() failed\n");
894                 goto update_gpuvm_pte_failed;
895         }
896
897         return 0;
898
899 update_gpuvm_pte_failed:
900         unmap_bo_from_gpuvm(adev, entry, sync);
901         return ret;
902 }
903
904 static int process_validate_vms(struct amdkfd_process_info *process_info)
905 {
906         struct amdgpu_vm *peer_vm;
907         int ret;
908
909         list_for_each_entry(peer_vm, &process_info->vm_list_head,
910                             vm_list_node) {
911                 ret = vm_validate_pt_pd_bos(peer_vm);
912                 if (ret)
913                         return ret;
914         }
915
916         return 0;
917 }
918
919 static int process_update_pds(struct amdkfd_process_info *process_info,
920                               struct amdgpu_sync *sync)
921 {
922         struct amdgpu_vm *peer_vm;
923         int ret;
924
925         list_for_each_entry(peer_vm, &process_info->vm_list_head,
926                             vm_list_node) {
927                 ret = vm_update_pds(peer_vm, sync);
928                 if (ret)
929                         return ret;
930         }
931
932         return 0;
933 }
934
935 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
936                        struct dma_fence **ef)
937 {
938         struct amdkfd_process_info *info = NULL;
939         int ret;
940
941         if (!*process_info) {
942                 info = kzalloc(sizeof(*info), GFP_KERNEL);
943                 if (!info)
944                         return -ENOMEM;
945
946                 mutex_init(&info->lock);
947                 INIT_LIST_HEAD(&info->vm_list_head);
948                 INIT_LIST_HEAD(&info->kfd_bo_list);
949                 INIT_LIST_HEAD(&info->userptr_valid_list);
950                 INIT_LIST_HEAD(&info->userptr_inval_list);
951
952                 info->eviction_fence =
953                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
954                                                    current->mm);
955                 if (!info->eviction_fence) {
956                         pr_err("Failed to create eviction fence\n");
957                         ret = -ENOMEM;
958                         goto create_evict_fence_fail;
959                 }
960
961                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
962                 atomic_set(&info->evicted_bos, 0);
963                 INIT_DELAYED_WORK(&info->restore_userptr_work,
964                                   amdgpu_amdkfd_restore_userptr_worker);
965
966                 *process_info = info;
967                 *ef = dma_fence_get(&info->eviction_fence->base);
968         }
969
970         vm->process_info = *process_info;
971
972         /* Validate page directory and attach eviction fence */
973         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
974         if (ret)
975                 goto reserve_pd_fail;
976         ret = vm_validate_pt_pd_bos(vm);
977         if (ret) {
978                 pr_err("validate_pt_pd_bos() failed\n");
979                 goto validate_pd_fail;
980         }
981         ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
982         if (ret)
983                 goto wait_pd_fail;
984         amdgpu_bo_fence(vm->root.base.bo,
985                         &vm->process_info->eviction_fence->base, true);
986         amdgpu_bo_unreserve(vm->root.base.bo);
987
988         /* Update process info */
989         mutex_lock(&vm->process_info->lock);
990         list_add_tail(&vm->vm_list_node,
991                         &(vm->process_info->vm_list_head));
992         vm->process_info->n_vms++;
993         mutex_unlock(&vm->process_info->lock);
994
995         return 0;
996
997 wait_pd_fail:
998 validate_pd_fail:
999         amdgpu_bo_unreserve(vm->root.base.bo);
1000 reserve_pd_fail:
1001         vm->process_info = NULL;
1002         if (info) {
1003                 /* Two fence references: one in info and one in *ef */
1004                 dma_fence_put(&info->eviction_fence->base);
1005                 dma_fence_put(*ef);
1006                 *ef = NULL;
1007                 *process_info = NULL;
1008                 put_pid(info->pid);
1009 create_evict_fence_fail:
1010                 mutex_destroy(&info->lock);
1011                 kfree(info);
1012         }
1013         return ret;
1014 }
1015
1016 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
1017                                           void **process_info,
1018                                           struct dma_fence **ef)
1019 {
1020         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1021         struct amdgpu_vm *new_vm;
1022         int ret;
1023
1024         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1025         if (!new_vm)
1026                 return -ENOMEM;
1027
1028         /* Initialize AMDGPU part of the VM */
1029         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
1030         if (ret) {
1031                 pr_err("Failed init vm ret %d\n", ret);
1032                 goto amdgpu_vm_init_fail;
1033         }
1034
1035         /* Initialize KFD part of the VM and process info */
1036         ret = init_kfd_vm(new_vm, process_info, ef);
1037         if (ret)
1038                 goto init_kfd_vm_fail;
1039
1040         *vm = (void *) new_vm;
1041
1042         return 0;
1043
1044 init_kfd_vm_fail:
1045         amdgpu_vm_fini(adev, new_vm);
1046 amdgpu_vm_init_fail:
1047         kfree(new_vm);
1048         return ret;
1049 }
1050
1051 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1052                                            struct file *filp,
1053                                            void **vm, void **process_info,
1054                                            struct dma_fence **ef)
1055 {
1056         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1057         struct drm_file *drm_priv = filp->private_data;
1058         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1059         struct amdgpu_vm *avm = &drv_priv->vm;
1060         int ret;
1061
1062         /* Already a compute VM? */
1063         if (avm->process_info)
1064                 return -EINVAL;
1065
1066         /* Convert VM into a compute VM */
1067         ret = amdgpu_vm_make_compute(adev, avm);
1068         if (ret)
1069                 return ret;
1070
1071         /* Initialize KFD part of the VM and process info */
1072         ret = init_kfd_vm(avm, process_info, ef);
1073         if (ret)
1074                 return ret;
1075
1076         *vm = (void *)avm;
1077
1078         return 0;
1079 }
1080
1081 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1082                                     struct amdgpu_vm *vm)
1083 {
1084         struct amdkfd_process_info *process_info = vm->process_info;
1085         struct amdgpu_bo *pd = vm->root.base.bo;
1086
1087         if (!process_info)
1088                 return;
1089
1090         /* Release eviction fence from PD */
1091         amdgpu_bo_reserve(pd, false);
1092         amdgpu_bo_fence(pd, NULL, false);
1093         amdgpu_bo_unreserve(pd);
1094
1095         /* Update process info */
1096         mutex_lock(&process_info->lock);
1097         process_info->n_vms--;
1098         list_del(&vm->vm_list_node);
1099         mutex_unlock(&process_info->lock);
1100
1101         /* Release per-process resources when last compute VM is destroyed */
1102         if (!process_info->n_vms) {
1103                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1104                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1105                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1106
1107                 dma_fence_put(&process_info->eviction_fence->base);
1108                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1109                 put_pid(process_info->pid);
1110                 mutex_destroy(&process_info->lock);
1111                 kfree(process_info);
1112         }
1113 }
1114
1115 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1116 {
1117         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1118         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1119
1120         if (WARN_ON(!kgd || !vm))
1121                 return;
1122
1123         pr_debug("Destroying process vm %p\n", vm);
1124
1125         /* Release the VM context */
1126         amdgpu_vm_fini(adev, avm);
1127         kfree(vm);
1128 }
1129
1130 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1131 {
1132         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1133
1134         return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1135 }
1136
1137 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1138                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1139                 void *vm, struct kgd_mem **mem,
1140                 uint64_t *offset, uint32_t flags)
1141 {
1142         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1143         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1144         uint64_t user_addr = 0;
1145         struct amdgpu_bo *bo;
1146         int byte_align;
1147         u32 domain, alloc_domain;
1148         u64 alloc_flags;
1149         uint32_t mapping_flags;
1150         int ret;
1151
1152         /*
1153          * Check on which domain to allocate BO
1154          */
1155         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1156                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1157                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1158                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1159                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1160                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1161         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1162                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1163                 alloc_flags = 0;
1164         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1165                 domain = AMDGPU_GEM_DOMAIN_GTT;
1166                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1167                 alloc_flags = 0;
1168                 if (!offset || !*offset)
1169                         return -EINVAL;
1170                 user_addr = *offset;
1171         } else {
1172                 return -EINVAL;
1173         }
1174
1175         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1176         if (!*mem)
1177                 return -ENOMEM;
1178         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1179         mutex_init(&(*mem)->lock);
1180         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1181
1182         /* Workaround for AQL queue wraparound bug. Map the same
1183          * memory twice. That means we only actually allocate half
1184          * the memory.
1185          */
1186         if ((*mem)->aql_queue)
1187                 size = size >> 1;
1188
1189         /* Workaround for TLB bug on older VI chips */
1190         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1191                         adev->asic_type != CHIP_FIJI &&
1192                         adev->asic_type != CHIP_POLARIS10 &&
1193                         adev->asic_type != CHIP_POLARIS11) ?
1194                         VI_BO_SIZE_ALIGN : 1;
1195
1196         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1197         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1198                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1199         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1200                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1201         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1202                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1203         else
1204                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1205         (*mem)->mapping_flags = mapping_flags;
1206
1207         amdgpu_sync_create(&(*mem)->sync);
1208
1209         ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1210         if (ret) {
1211                 pr_debug("Insufficient system memory\n");
1212                 goto err_reserve_system_mem;
1213         }
1214
1215         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1216                         va, size, domain_string(alloc_domain));
1217
1218         ret = amdgpu_bo_create(adev, size, byte_align,
1219                                 alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
1220         if (ret) {
1221                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1222                                 domain_string(alloc_domain), ret);
1223                 goto err_bo_create;
1224         }
1225         bo->kfd_bo = *mem;
1226         (*mem)->bo = bo;
1227         if (user_addr)
1228                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1229
1230         (*mem)->va = va;
1231         (*mem)->domain = domain;
1232         (*mem)->mapped_to_gpu_memory = 0;
1233         (*mem)->process_info = avm->process_info;
1234         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1235
1236         if (user_addr) {
1237                 ret = init_user_pages(*mem, current->mm, user_addr);
1238                 if (ret) {
1239                         mutex_lock(&avm->process_info->lock);
1240                         list_del(&(*mem)->validate_list.head);
1241                         mutex_unlock(&avm->process_info->lock);
1242                         goto allocate_init_user_pages_failed;
1243                 }
1244         }
1245
1246         if (offset)
1247                 *offset = amdgpu_bo_mmap_offset(bo);
1248
1249         return 0;
1250
1251 allocate_init_user_pages_failed:
1252         amdgpu_bo_unref(&bo);
1253         /* Don't unreserve system mem limit twice */
1254         goto err_reserve_system_mem;
1255 err_bo_create:
1256         unreserve_system_mem_limit(adev, size, alloc_domain);
1257 err_reserve_system_mem:
1258         mutex_destroy(&(*mem)->lock);
1259         kfree(*mem);
1260         return ret;
1261 }
1262
1263 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1264                 struct kgd_dev *kgd, struct kgd_mem *mem)
1265 {
1266         struct amdkfd_process_info *process_info = mem->process_info;
1267         unsigned long bo_size = mem->bo->tbo.mem.size;
1268         struct kfd_bo_va_list *entry, *tmp;
1269         struct bo_vm_reservation_context ctx;
1270         struct ttm_validate_buffer *bo_list_entry;
1271         int ret;
1272
1273         mutex_lock(&mem->lock);
1274
1275         if (mem->mapped_to_gpu_memory > 0) {
1276                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1277                                 mem->va, bo_size);
1278                 mutex_unlock(&mem->lock);
1279                 return -EBUSY;
1280         }
1281
1282         mutex_unlock(&mem->lock);
1283         /* lock is not needed after this, since mem is unused and will
1284          * be freed anyway
1285          */
1286
1287         /* No more MMU notifiers */
1288         amdgpu_mn_unregister(mem->bo);
1289
1290         /* Make sure restore workers don't access the BO any more */
1291         bo_list_entry = &mem->validate_list;
1292         mutex_lock(&process_info->lock);
1293         list_del(&bo_list_entry->head);
1294         mutex_unlock(&process_info->lock);
1295
1296         /* Free user pages if necessary */
1297         if (mem->user_pages) {
1298                 pr_debug("%s: Freeing user_pages array\n", __func__);
1299                 if (mem->user_pages[0])
1300                         release_pages(mem->user_pages,
1301                                         mem->bo->tbo.ttm->num_pages);
1302                 kvfree(mem->user_pages);
1303         }
1304
1305         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1306         if (unlikely(ret))
1307                 return ret;
1308
1309         /* The eviction fence should be removed by the last unmap.
1310          * TODO: Log an error condition if the bo still has the eviction fence
1311          * attached
1312          */
1313         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1314                                         process_info->eviction_fence,
1315                                         NULL, NULL);
1316         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1317                 mem->va + bo_size * (1 + mem->aql_queue));
1318
1319         /* Remove from VM internal data structures */
1320         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1321                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1322                                 entry, bo_size);
1323
1324         ret = unreserve_bo_and_vms(&ctx, false, false);
1325
1326         /* Free the sync object */
1327         amdgpu_sync_free(&mem->sync);
1328
1329         /* Free the BO*/
1330         amdgpu_bo_unref(&mem->bo);
1331         mutex_destroy(&mem->lock);
1332         kfree(mem);
1333
1334         return ret;
1335 }
1336
1337 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1338                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1339 {
1340         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1341         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1342         int ret;
1343         struct amdgpu_bo *bo;
1344         uint32_t domain;
1345         struct kfd_bo_va_list *entry;
1346         struct bo_vm_reservation_context ctx;
1347         struct kfd_bo_va_list *bo_va_entry = NULL;
1348         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1349         unsigned long bo_size;
1350         bool is_invalid_userptr = false;
1351
1352         bo = mem->bo;
1353         if (!bo) {
1354                 pr_err("Invalid BO when mapping memory to GPU\n");
1355                 return -EINVAL;
1356         }
1357
1358         /* Make sure restore is not running concurrently. Since we
1359          * don't map invalid userptr BOs, we rely on the next restore
1360          * worker to do the mapping
1361          */
1362         mutex_lock(&mem->process_info->lock);
1363
1364         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1365          * sure that the MMU notifier is no longer running
1366          * concurrently and the queues are actually stopped
1367          */
1368         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1369                 down_write(&current->mm->mmap_sem);
1370                 is_invalid_userptr = atomic_read(&mem->invalid);
1371                 up_write(&current->mm->mmap_sem);
1372         }
1373
1374         mutex_lock(&mem->lock);
1375
1376         domain = mem->domain;
1377         bo_size = bo->tbo.mem.size;
1378
1379         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1380                         mem->va,
1381                         mem->va + bo_size * (1 + mem->aql_queue),
1382                         vm, domain_string(domain));
1383
1384         ret = reserve_bo_and_vm(mem, vm, &ctx);
1385         if (unlikely(ret))
1386                 goto out;
1387
1388         /* Userptr can be marked as "not invalid", but not actually be
1389          * validated yet (still in the system domain). In that case
1390          * the queues are still stopped and we can leave mapping for
1391          * the next restore worker
1392          */
1393         if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1394                 is_invalid_userptr = true;
1395
1396         if (check_if_add_bo_to_vm(avm, mem)) {
1397                 ret = add_bo_to_vm(adev, mem, avm, false,
1398                                 &bo_va_entry);
1399                 if (ret)
1400                         goto add_bo_to_vm_failed;
1401                 if (mem->aql_queue) {
1402                         ret = add_bo_to_vm(adev, mem, avm,
1403                                         true, &bo_va_entry_aql);
1404                         if (ret)
1405                                 goto add_bo_to_vm_failed_aql;
1406                 }
1407         } else {
1408                 ret = vm_validate_pt_pd_bos(avm);
1409                 if (unlikely(ret))
1410                         goto add_bo_to_vm_failed;
1411         }
1412
1413         if (mem->mapped_to_gpu_memory == 0 &&
1414             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1415                 /* Validate BO only once. The eviction fence gets added to BO
1416                  * the first time it is mapped. Validate will wait for all
1417                  * background evictions to complete.
1418                  */
1419                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1420                 if (ret) {
1421                         pr_debug("Validate failed\n");
1422                         goto map_bo_to_gpuvm_failed;
1423                 }
1424         }
1425
1426         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1427                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1428                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1429                                         entry->va, entry->va + bo_size,
1430                                         entry);
1431
1432                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1433                                               is_invalid_userptr);
1434                         if (ret) {
1435                                 pr_err("Failed to map radeon bo to gpuvm\n");
1436                                 goto map_bo_to_gpuvm_failed;
1437                         }
1438
1439                         ret = vm_update_pds(vm, ctx.sync);
1440                         if (ret) {
1441                                 pr_err("Failed to update page directories\n");
1442                                 goto map_bo_to_gpuvm_failed;
1443                         }
1444
1445                         entry->is_mapped = true;
1446                         mem->mapped_to_gpu_memory++;
1447                         pr_debug("\t INC mapping count %d\n",
1448                                         mem->mapped_to_gpu_memory);
1449                 }
1450         }
1451
1452         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1453                 amdgpu_bo_fence(bo,
1454                                 &avm->process_info->eviction_fence->base,
1455                                 true);
1456         ret = unreserve_bo_and_vms(&ctx, false, false);
1457
1458         goto out;
1459
1460 map_bo_to_gpuvm_failed:
1461         if (bo_va_entry_aql)
1462                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1463 add_bo_to_vm_failed_aql:
1464         if (bo_va_entry)
1465                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1466 add_bo_to_vm_failed:
1467         unreserve_bo_and_vms(&ctx, false, false);
1468 out:
1469         mutex_unlock(&mem->process_info->lock);
1470         mutex_unlock(&mem->lock);
1471         return ret;
1472 }
1473
1474 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1475                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1476 {
1477         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1478         struct amdkfd_process_info *process_info =
1479                 ((struct amdgpu_vm *)vm)->process_info;
1480         unsigned long bo_size = mem->bo->tbo.mem.size;
1481         struct kfd_bo_va_list *entry;
1482         struct bo_vm_reservation_context ctx;
1483         int ret;
1484
1485         mutex_lock(&mem->lock);
1486
1487         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1488         if (unlikely(ret))
1489                 goto out;
1490         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1491         if (ctx.n_vms == 0) {
1492                 ret = -EINVAL;
1493                 goto unreserve_out;
1494         }
1495
1496         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1497         if (unlikely(ret))
1498                 goto unreserve_out;
1499
1500         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1501                 mem->va,
1502                 mem->va + bo_size * (1 + mem->aql_queue),
1503                 vm);
1504
1505         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1506                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1507                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1508                                         entry->va,
1509                                         entry->va + bo_size,
1510                                         entry);
1511
1512                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1513                         if (ret == 0) {
1514                                 entry->is_mapped = false;
1515                         } else {
1516                                 pr_err("failed to unmap VA 0x%llx\n",
1517                                                 mem->va);
1518                                 goto unreserve_out;
1519                         }
1520
1521                         mem->mapped_to_gpu_memory--;
1522                         pr_debug("\t DEC mapping count %d\n",
1523                                         mem->mapped_to_gpu_memory);
1524                 }
1525         }
1526
1527         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1528          * required.
1529          */
1530         if (mem->mapped_to_gpu_memory == 0 &&
1531             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1532                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1533                                                 process_info->eviction_fence,
1534                                                     NULL, NULL);
1535
1536 unreserve_out:
1537         unreserve_bo_and_vms(&ctx, false, false);
1538 out:
1539         mutex_unlock(&mem->lock);
1540         return ret;
1541 }
1542
1543 int amdgpu_amdkfd_gpuvm_sync_memory(
1544                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1545 {
1546         struct amdgpu_sync sync;
1547         int ret;
1548
1549         amdgpu_sync_create(&sync);
1550
1551         mutex_lock(&mem->lock);
1552         amdgpu_sync_clone(&mem->sync, &sync);
1553         mutex_unlock(&mem->lock);
1554
1555         ret = amdgpu_sync_wait(&sync, intr);
1556         amdgpu_sync_free(&sync);
1557         return ret;
1558 }
1559
1560 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1561                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1562 {
1563         int ret;
1564         struct amdgpu_bo *bo = mem->bo;
1565
1566         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1567                 pr_err("userptr can't be mapped to kernel\n");
1568                 return -EINVAL;
1569         }
1570
1571         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1572          * this BO in BO's restoring after eviction.
1573          */
1574         mutex_lock(&mem->process_info->lock);
1575
1576         ret = amdgpu_bo_reserve(bo, true);
1577         if (ret) {
1578                 pr_err("Failed to reserve bo. ret %d\n", ret);
1579                 goto bo_reserve_failed;
1580         }
1581
1582         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
1583         if (ret) {
1584                 pr_err("Failed to pin bo. ret %d\n", ret);
1585                 goto pin_failed;
1586         }
1587
1588         ret = amdgpu_bo_kmap(bo, kptr);
1589         if (ret) {
1590                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1591                 goto kmap_failed;
1592         }
1593
1594         amdgpu_amdkfd_remove_eviction_fence(
1595                 bo, mem->process_info->eviction_fence, NULL, NULL);
1596         list_del_init(&mem->validate_list.head);
1597
1598         if (size)
1599                 *size = amdgpu_bo_size(bo);
1600
1601         amdgpu_bo_unreserve(bo);
1602
1603         mutex_unlock(&mem->process_info->lock);
1604         return 0;
1605
1606 kmap_failed:
1607         amdgpu_bo_unpin(bo);
1608 pin_failed:
1609         amdgpu_bo_unreserve(bo);
1610 bo_reserve_failed:
1611         mutex_unlock(&mem->process_info->lock);
1612
1613         return ret;
1614 }
1615
1616 /* Evict a userptr BO by stopping the queues if necessary
1617  *
1618  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1619  * cannot do any memory allocations, and cannot take any locks that
1620  * are held elsewhere while allocating memory. Therefore this is as
1621  * simple as possible, using atomic counters.
1622  *
1623  * It doesn't do anything to the BO itself. The real work happens in
1624  * restore, where we get updated page addresses. This function only
1625  * ensures that GPU access to the BO is stopped.
1626  */
1627 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1628                                 struct mm_struct *mm)
1629 {
1630         struct amdkfd_process_info *process_info = mem->process_info;
1631         int invalid, evicted_bos;
1632         int r = 0;
1633
1634         invalid = atomic_inc_return(&mem->invalid);
1635         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1636         if (evicted_bos == 1) {
1637                 /* First eviction, stop the queues */
1638                 r = kgd2kfd->quiesce_mm(mm);
1639                 if (r)
1640                         pr_err("Failed to quiesce KFD\n");
1641                 schedule_delayed_work(&process_info->restore_userptr_work,
1642                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1643         }
1644
1645         return r;
1646 }
1647
1648 /* Update invalid userptr BOs
1649  *
1650  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1651  * userptr_inval_list and updates user pages for all BOs that have
1652  * been invalidated since their last update.
1653  */
1654 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1655                                      struct mm_struct *mm)
1656 {
1657         struct kgd_mem *mem, *tmp_mem;
1658         struct amdgpu_bo *bo;
1659         struct ttm_operation_ctx ctx = { false, false };
1660         int invalid, ret;
1661
1662         /* Move all invalidated BOs to the userptr_inval_list and
1663          * release their user pages by migration to the CPU domain
1664          */
1665         list_for_each_entry_safe(mem, tmp_mem,
1666                                  &process_info->userptr_valid_list,
1667                                  validate_list.head) {
1668                 if (!atomic_read(&mem->invalid))
1669                         continue; /* BO is still valid */
1670
1671                 bo = mem->bo;
1672
1673                 if (amdgpu_bo_reserve(bo, true))
1674                         return -EAGAIN;
1675                 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1676                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1677                 amdgpu_bo_unreserve(bo);
1678                 if (ret) {
1679                         pr_err("%s: Failed to invalidate userptr BO\n",
1680                                __func__);
1681                         return -EAGAIN;
1682                 }
1683
1684                 list_move_tail(&mem->validate_list.head,
1685                                &process_info->userptr_inval_list);
1686         }
1687
1688         if (list_empty(&process_info->userptr_inval_list))
1689                 return 0; /* All evicted userptr BOs were freed */
1690
1691         /* Go through userptr_inval_list and update any invalid user_pages */
1692         list_for_each_entry(mem, &process_info->userptr_inval_list,
1693                             validate_list.head) {
1694                 invalid = atomic_read(&mem->invalid);
1695                 if (!invalid)
1696                         /* BO hasn't been invalidated since the last
1697                          * revalidation attempt. Keep its BO list.
1698                          */
1699                         continue;
1700
1701                 bo = mem->bo;
1702
1703                 if (!mem->user_pages) {
1704                         mem->user_pages =
1705                                 kvmalloc_array(bo->tbo.ttm->num_pages,
1706                                                  sizeof(struct page *),
1707                                                  GFP_KERNEL | __GFP_ZERO);
1708                         if (!mem->user_pages) {
1709                                 pr_err("%s: Failed to allocate pages array\n",
1710                                        __func__);
1711                                 return -ENOMEM;
1712                         }
1713                 } else if (mem->user_pages[0]) {
1714                         release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1715                 }
1716
1717                 /* Get updated user pages */
1718                 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1719                                                    mem->user_pages);
1720                 if (ret) {
1721                         mem->user_pages[0] = NULL;
1722                         pr_info("%s: Failed to get user pages: %d\n",
1723                                 __func__, ret);
1724                         /* Pretend it succeeded. It will fail later
1725                          * with a VM fault if the GPU tries to access
1726                          * it. Better than hanging indefinitely with
1727                          * stalled user mode queues.
1728                          */
1729                 }
1730
1731                 /* Mark the BO as valid unless it was invalidated
1732                  * again concurrently
1733                  */
1734                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1735                         return -EAGAIN;
1736         }
1737
1738         return 0;
1739 }
1740
1741 /* Validate invalid userptr BOs
1742  *
1743  * Validates BOs on the userptr_inval_list, and moves them back to the
1744  * userptr_valid_list. Also updates GPUVM page tables with new page
1745  * addresses and waits for the page table updates to complete.
1746  */
1747 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1748 {
1749         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1750         struct list_head resv_list, duplicates;
1751         struct ww_acquire_ctx ticket;
1752         struct amdgpu_sync sync;
1753
1754         struct amdgpu_vm *peer_vm;
1755         struct kgd_mem *mem, *tmp_mem;
1756         struct amdgpu_bo *bo;
1757         struct ttm_operation_ctx ctx = { false, false };
1758         int i, ret;
1759
1760         pd_bo_list_entries = kcalloc(process_info->n_vms,
1761                                      sizeof(struct amdgpu_bo_list_entry),
1762                                      GFP_KERNEL);
1763         if (!pd_bo_list_entries) {
1764                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1765                 return -ENOMEM;
1766         }
1767
1768         INIT_LIST_HEAD(&resv_list);
1769         INIT_LIST_HEAD(&duplicates);
1770
1771         /* Get all the page directory BOs that need to be reserved */
1772         i = 0;
1773         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1774                             vm_list_node)
1775                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1776                                     &pd_bo_list_entries[i++]);
1777         /* Add the userptr_inval_list entries to resv_list */
1778         list_for_each_entry(mem, &process_info->userptr_inval_list,
1779                             validate_list.head) {
1780                 list_add_tail(&mem->resv_list.head, &resv_list);
1781                 mem->resv_list.bo = mem->validate_list.bo;
1782                 mem->resv_list.shared = mem->validate_list.shared;
1783         }
1784
1785         /* Reserve all BOs and page tables for validation */
1786         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1787         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1788         if (ret)
1789                 goto out;
1790
1791         amdgpu_sync_create(&sync);
1792
1793         /* Avoid triggering eviction fences when unmapping invalid
1794          * userptr BOs (waits for all fences, doesn't use
1795          * FENCE_OWNER_VM)
1796          */
1797         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1798                             vm_list_node)
1799                 amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
1800                                                 process_info->eviction_fence,
1801                                                 NULL, NULL);
1802
1803         ret = process_validate_vms(process_info);
1804         if (ret)
1805                 goto unreserve_out;
1806
1807         /* Validate BOs and update GPUVM page tables */
1808         list_for_each_entry_safe(mem, tmp_mem,
1809                                  &process_info->userptr_inval_list,
1810                                  validate_list.head) {
1811                 struct kfd_bo_va_list *bo_va_entry;
1812
1813                 bo = mem->bo;
1814
1815                 /* Copy pages array and validate the BO if we got user pages */
1816                 if (mem->user_pages[0]) {
1817                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1818                                                      mem->user_pages);
1819                         amdgpu_ttm_placement_from_domain(bo, mem->domain);
1820                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1821                         if (ret) {
1822                                 pr_err("%s: failed to validate BO\n", __func__);
1823                                 goto unreserve_out;
1824                         }
1825                 }
1826
1827                 /* Validate succeeded, now the BO owns the pages, free
1828                  * our copy of the pointer array. Put this BO back on
1829                  * the userptr_valid_list. If we need to revalidate
1830                  * it, we need to start from scratch.
1831                  */
1832                 kvfree(mem->user_pages);
1833                 mem->user_pages = NULL;
1834                 list_move_tail(&mem->validate_list.head,
1835                                &process_info->userptr_valid_list);
1836
1837                 /* Update mapping. If the BO was not validated
1838                  * (because we couldn't get user pages), this will
1839                  * clear the page table entries, which will result in
1840                  * VM faults if the GPU tries to access the invalid
1841                  * memory.
1842                  */
1843                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1844                         if (!bo_va_entry->is_mapped)
1845                                 continue;
1846
1847                         ret = update_gpuvm_pte((struct amdgpu_device *)
1848                                                bo_va_entry->kgd_dev,
1849                                                bo_va_entry, &sync);
1850                         if (ret) {
1851                                 pr_err("%s: update PTE failed\n", __func__);
1852                                 /* make sure this gets validated again */
1853                                 atomic_inc(&mem->invalid);
1854                                 goto unreserve_out;
1855                         }
1856                 }
1857         }
1858
1859         /* Update page directories */
1860         ret = process_update_pds(process_info, &sync);
1861
1862 unreserve_out:
1863         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1864                             vm_list_node)
1865                 amdgpu_bo_fence(peer_vm->root.base.bo,
1866                                 &process_info->eviction_fence->base, true);
1867         ttm_eu_backoff_reservation(&ticket, &resv_list);
1868         amdgpu_sync_wait(&sync, false);
1869         amdgpu_sync_free(&sync);
1870 out:
1871         kfree(pd_bo_list_entries);
1872
1873         return ret;
1874 }
1875
1876 /* Worker callback to restore evicted userptr BOs
1877  *
1878  * Tries to update and validate all userptr BOs. If successful and no
1879  * concurrent evictions happened, the queues are restarted. Otherwise,
1880  * reschedule for another attempt later.
1881  */
1882 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1883 {
1884         struct delayed_work *dwork = to_delayed_work(work);
1885         struct amdkfd_process_info *process_info =
1886                 container_of(dwork, struct amdkfd_process_info,
1887                              restore_userptr_work);
1888         struct task_struct *usertask;
1889         struct mm_struct *mm;
1890         int evicted_bos;
1891
1892         evicted_bos = atomic_read(&process_info->evicted_bos);
1893         if (!evicted_bos)
1894                 return;
1895
1896         /* Reference task and mm in case of concurrent process termination */
1897         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1898         if (!usertask)
1899                 return;
1900         mm = get_task_mm(usertask);
1901         if (!mm) {
1902                 put_task_struct(usertask);
1903                 return;
1904         }
1905
1906         mutex_lock(&process_info->lock);
1907
1908         if (update_invalid_user_pages(process_info, mm))
1909                 goto unlock_out;
1910         /* userptr_inval_list can be empty if all evicted userptr BOs
1911          * have been freed. In that case there is nothing to validate
1912          * and we can just restart the queues.
1913          */
1914         if (!list_empty(&process_info->userptr_inval_list)) {
1915                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1916                         goto unlock_out; /* Concurrent eviction, try again */
1917
1918                 if (validate_invalid_user_pages(process_info))
1919                         goto unlock_out;
1920         }
1921         /* Final check for concurrent evicton and atomic update. If
1922          * another eviction happens after successful update, it will
1923          * be a first eviction that calls quiesce_mm. The eviction
1924          * reference counting inside KFD will handle this case.
1925          */
1926         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1927             evicted_bos)
1928                 goto unlock_out;
1929         evicted_bos = 0;
1930         if (kgd2kfd->resume_mm(mm)) {
1931                 pr_err("%s: Failed to resume KFD\n", __func__);
1932                 /* No recovery from this failure. Probably the CP is
1933                  * hanging. No point trying again.
1934                  */
1935         }
1936 unlock_out:
1937         mutex_unlock(&process_info->lock);
1938         mmput(mm);
1939         put_task_struct(usertask);
1940
1941         /* If validation failed, reschedule another attempt */
1942         if (evicted_bos)
1943                 schedule_delayed_work(&process_info->restore_userptr_work,
1944                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1945 }
1946
1947 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1948  *   KFD process identified by process_info
1949  *
1950  * @process_info: amdkfd_process_info of the KFD process
1951  *
1952  * After memory eviction, restore thread calls this function. The function
1953  * should be called when the Process is still valid. BO restore involves -
1954  *
1955  * 1.  Release old eviction fence and create new one
1956  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1957  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1958  *     BOs that need to be reserved.
1959  * 4.  Reserve all the BOs
1960  * 5.  Validate of PD and PT BOs.
1961  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1962  * 7.  Add fence to all PD and PT BOs.
1963  * 8.  Unreserve all BOs
1964  */
1965 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1966 {
1967         struct amdgpu_bo_list_entry *pd_bo_list;
1968         struct amdkfd_process_info *process_info = info;
1969         struct amdgpu_vm *peer_vm;
1970         struct kgd_mem *mem;
1971         struct bo_vm_reservation_context ctx;
1972         struct amdgpu_amdkfd_fence *new_fence;
1973         int ret = 0, i;
1974         struct list_head duplicate_save;
1975         struct amdgpu_sync sync_obj;
1976
1977         INIT_LIST_HEAD(&duplicate_save);
1978         INIT_LIST_HEAD(&ctx.list);
1979         INIT_LIST_HEAD(&ctx.duplicates);
1980
1981         pd_bo_list = kcalloc(process_info->n_vms,
1982                              sizeof(struct amdgpu_bo_list_entry),
1983                              GFP_KERNEL);
1984         if (!pd_bo_list)
1985                 return -ENOMEM;
1986
1987         i = 0;
1988         mutex_lock(&process_info->lock);
1989         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1990                         vm_list_node)
1991                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1992
1993         /* Reserve all BOs and page tables/directory. Add all BOs from
1994          * kfd_bo_list to ctx.list
1995          */
1996         list_for_each_entry(mem, &process_info->kfd_bo_list,
1997                             validate_list.head) {
1998
1999                 list_add_tail(&mem->resv_list.head, &ctx.list);
2000                 mem->resv_list.bo = mem->validate_list.bo;
2001                 mem->resv_list.shared = mem->validate_list.shared;
2002         }
2003
2004         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2005                                      false, &duplicate_save);
2006         if (ret) {
2007                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2008                 goto ttm_reserve_fail;
2009         }
2010
2011         amdgpu_sync_create(&sync_obj);
2012
2013         /* Validate PDs and PTs */
2014         ret = process_validate_vms(process_info);
2015         if (ret)
2016                 goto validate_map_fail;
2017
2018         /* Wait for PD/PTs validate to finish */
2019         /* FIXME: I think this isn't needed */
2020         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2021                             vm_list_node) {
2022                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2023
2024                 ttm_bo_wait(&bo->tbo, false, false);
2025         }
2026
2027         /* Validate BOs and map them to GPUVM (update VM page tables). */
2028         list_for_each_entry(mem, &process_info->kfd_bo_list,
2029                             validate_list.head) {
2030
2031                 struct amdgpu_bo *bo = mem->bo;
2032                 uint32_t domain = mem->domain;
2033                 struct kfd_bo_va_list *bo_va_entry;
2034
2035                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2036                 if (ret) {
2037                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2038                         goto validate_map_fail;
2039                 }
2040
2041                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2042                                     bo_list) {
2043                         ret = update_gpuvm_pte((struct amdgpu_device *)
2044                                               bo_va_entry->kgd_dev,
2045                                               bo_va_entry,
2046                                               &sync_obj);
2047                         if (ret) {
2048                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2049                                 goto validate_map_fail;
2050                         }
2051                 }
2052         }
2053
2054         /* Update page directories */
2055         ret = process_update_pds(process_info, &sync_obj);
2056         if (ret) {
2057                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2058                 goto validate_map_fail;
2059         }
2060
2061         amdgpu_sync_wait(&sync_obj, false);
2062
2063         /* Release old eviction fence and create new one, because fence only
2064          * goes from unsignaled to signaled, fence cannot be reused.
2065          * Use context and mm from the old fence.
2066          */
2067         new_fence = amdgpu_amdkfd_fence_create(
2068                                 process_info->eviction_fence->base.context,
2069                                 process_info->eviction_fence->mm);
2070         if (!new_fence) {
2071                 pr_err("Failed to create eviction fence\n");
2072                 ret = -ENOMEM;
2073                 goto validate_map_fail;
2074         }
2075         dma_fence_put(&process_info->eviction_fence->base);
2076         process_info->eviction_fence = new_fence;
2077         *ef = dma_fence_get(&new_fence->base);
2078
2079         /* Wait for validate to finish and attach new eviction fence */
2080         list_for_each_entry(mem, &process_info->kfd_bo_list,
2081                 validate_list.head)
2082                 ttm_bo_wait(&mem->bo->tbo, false, false);
2083         list_for_each_entry(mem, &process_info->kfd_bo_list,
2084                 validate_list.head)
2085                 amdgpu_bo_fence(mem->bo,
2086                         &process_info->eviction_fence->base, true);
2087
2088         /* Attach eviction fence to PD / PT BOs */
2089         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2090                             vm_list_node) {
2091                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2092
2093                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2094         }
2095
2096 validate_map_fail:
2097         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2098         amdgpu_sync_free(&sync_obj);
2099 ttm_reserve_fail:
2100         mutex_unlock(&process_info->lock);
2101         kfree(pd_bo_list);
2102         return ret;
2103 }