Merge tag 'v5.11' into next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34
35 /* BO flag to indicate a KFD userptr BO */
36 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
37
38 /* Userptr restore delay, just long enough to allow consecutive VM
39  * changes to accumulate
40  */
41 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
42
43 /* Impose limit on how much memory KFD can use */
44 static struct {
45         uint64_t max_system_mem_limit;
46         uint64_t max_ttm_mem_limit;
47         int64_t system_mem_used;
48         int64_t ttm_mem_used;
49         spinlock_t mem_limit_lock;
50 } kfd_mem_limit;
51
52 /* Struct used for amdgpu_amdkfd_bo_validate */
53 struct amdgpu_vm_parser {
54         uint32_t        domain;
55         bool            wait;
56 };
57
58 static const char * const domain_bit_to_string[] = {
59                 "CPU",
60                 "GTT",
61                 "VRAM",
62                 "GDS",
63                 "GWS",
64                 "OA"
65 };
66
67 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
68
69 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
70
71
72 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
73 {
74         return (struct amdgpu_device *)kgd;
75 }
76
77 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
78                 struct kgd_mem *mem)
79 {
80         struct kfd_bo_va_list *entry;
81
82         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
83                 if (entry->bo_va->base.vm == avm)
84                         return false;
85
86         return true;
87 }
88
89 /* Set memory usage limits. Current, limits are
90  *  System (TTM + userptr) memory - 15/16th System RAM
91  *  TTM memory - 3/8th System RAM
92  */
93 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
94 {
95         struct sysinfo si;
96         uint64_t mem;
97
98         si_meminfo(&si);
99         mem = si.totalram - si.totalhigh;
100         mem *= si.mem_unit;
101
102         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
103         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
104         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
105         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
106                 (kfd_mem_limit.max_system_mem_limit >> 20),
107                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
108 }
109
110 /* Estimate page table size needed to represent a given memory size
111  *
112  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
113  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
114  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
115  * for 2MB pages for TLB efficiency. However, small allocations and
116  * fragmented system memory still need some 4KB pages. We choose a
117  * compromise that should work in most cases without reserving too
118  * much memory for page tables unnecessarily (factor 16K, >> 14).
119  */
120 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
121
122 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
123                 uint64_t size, u32 domain, bool sg)
124 {
125         uint64_t reserved_for_pt =
126                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
127         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
128         int ret = 0;
129
130         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
131                                        sizeof(struct amdgpu_bo));
132
133         vram_needed = 0;
134         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
135                 /* TTM GTT memory */
136                 system_mem_needed = acc_size + size;
137                 ttm_mem_needed = acc_size + size;
138         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
139                 /* Userptr */
140                 system_mem_needed = acc_size + size;
141                 ttm_mem_needed = acc_size;
142         } else {
143                 /* VRAM and SG */
144                 system_mem_needed = acc_size;
145                 ttm_mem_needed = acc_size;
146                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
147                         vram_needed = size;
148         }
149
150         spin_lock(&kfd_mem_limit.mem_limit_lock);
151
152         if (kfd_mem_limit.system_mem_used + system_mem_needed >
153             kfd_mem_limit.max_system_mem_limit)
154                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
155
156         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
157              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
158             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
159              kfd_mem_limit.max_ttm_mem_limit) ||
160             (adev->kfd.vram_used + vram_needed >
161              adev->gmc.real_vram_size - reserved_for_pt)) {
162                 ret = -ENOMEM;
163         } else {
164                 kfd_mem_limit.system_mem_used += system_mem_needed;
165                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
166                 adev->kfd.vram_used += vram_needed;
167         }
168
169         spin_unlock(&kfd_mem_limit.mem_limit_lock);
170         return ret;
171 }
172
173 static void unreserve_mem_limit(struct amdgpu_device *adev,
174                 uint64_t size, u32 domain, bool sg)
175 {
176         size_t acc_size;
177
178         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
179                                        sizeof(struct amdgpu_bo));
180
181         spin_lock(&kfd_mem_limit.mem_limit_lock);
182         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
183                 kfd_mem_limit.system_mem_used -= (acc_size + size);
184                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
185         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
186                 kfd_mem_limit.system_mem_used -= (acc_size + size);
187                 kfd_mem_limit.ttm_mem_used -= acc_size;
188         } else {
189                 kfd_mem_limit.system_mem_used -= acc_size;
190                 kfd_mem_limit.ttm_mem_used -= acc_size;
191                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
192                         adev->kfd.vram_used -= size;
193                         WARN_ONCE(adev->kfd.vram_used < 0,
194                                   "kfd VRAM memory accounting unbalanced");
195                 }
196         }
197         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
198                   "kfd system memory accounting unbalanced");
199         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
200                   "kfd TTM memory accounting unbalanced");
201
202         spin_unlock(&kfd_mem_limit.mem_limit_lock);
203 }
204
205 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
206 {
207         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
208         u32 domain = bo->preferred_domains;
209         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
210
211         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
212                 domain = AMDGPU_GEM_DOMAIN_CPU;
213                 sg = false;
214         }
215
216         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
217 }
218
219
220 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
221  *  reservation object.
222  *
223  * @bo: [IN] Remove eviction fence(s) from this BO
224  * @ef: [IN] This eviction fence is removed if it
225  *  is present in the shared list.
226  *
227  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
228  */
229 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
230                                         struct amdgpu_amdkfd_fence *ef)
231 {
232         struct dma_resv *resv = bo->tbo.base.resv;
233         struct dma_resv_list *old, *new;
234         unsigned int i, j, k;
235
236         if (!ef)
237                 return -EINVAL;
238
239         old = dma_resv_get_list(resv);
240         if (!old)
241                 return 0;
242
243         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
244         if (!new)
245                 return -ENOMEM;
246
247         /* Go through all the shared fences in the resevation object and sort
248          * the interesting ones to the end of the list.
249          */
250         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
251                 struct dma_fence *f;
252
253                 f = rcu_dereference_protected(old->shared[i],
254                                               dma_resv_held(resv));
255
256                 if (f->context == ef->base.context)
257                         RCU_INIT_POINTER(new->shared[--j], f);
258                 else
259                         RCU_INIT_POINTER(new->shared[k++], f);
260         }
261         new->shared_max = old->shared_max;
262         new->shared_count = k;
263
264         /* Install the new fence list, seqcount provides the barriers */
265         write_seqcount_begin(&resv->seq);
266         RCU_INIT_POINTER(resv->fence, new);
267         write_seqcount_end(&resv->seq);
268
269         /* Drop the references to the removed fences or move them to ef_list */
270         for (i = j, k = 0; i < old->shared_count; ++i) {
271                 struct dma_fence *f;
272
273                 f = rcu_dereference_protected(new->shared[i],
274                                               dma_resv_held(resv));
275                 dma_fence_put(f);
276         }
277         kfree_rcu(old, rcu);
278
279         return 0;
280 }
281
282 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
283 {
284         struct amdgpu_bo *root = bo;
285         struct amdgpu_vm_bo_base *vm_bo;
286         struct amdgpu_vm *vm;
287         struct amdkfd_process_info *info;
288         struct amdgpu_amdkfd_fence *ef;
289         int ret;
290
291         /* we can always get vm_bo from root PD bo.*/
292         while (root->parent)
293                 root = root->parent;
294
295         vm_bo = root->vm_bo;
296         if (!vm_bo)
297                 return 0;
298
299         vm = vm_bo->vm;
300         if (!vm)
301                 return 0;
302
303         info = vm->process_info;
304         if (!info || !info->eviction_fence)
305                 return 0;
306
307         ef = container_of(dma_fence_get(&info->eviction_fence->base),
308                         struct amdgpu_amdkfd_fence, base);
309
310         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
311         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
312         dma_resv_unlock(bo->tbo.base.resv);
313
314         dma_fence_put(&ef->base);
315         return ret;
316 }
317
318 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
319                                      bool wait)
320 {
321         struct ttm_operation_ctx ctx = { false, false };
322         int ret;
323
324         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
325                  "Called with userptr BO"))
326                 return -EINVAL;
327
328         amdgpu_bo_placement_from_domain(bo, domain);
329
330         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
331         if (ret)
332                 goto validate_fail;
333         if (wait)
334                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
335
336 validate_fail:
337         return ret;
338 }
339
340 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
341 {
342         struct amdgpu_vm_parser *p = param;
343
344         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
345 }
346
347 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
348  *
349  * Page directories are not updated here because huge page handling
350  * during page table updates can invalidate page directory entries
351  * again. Page directories are only updated after updating page
352  * tables.
353  */
354 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
355 {
356         struct amdgpu_bo *pd = vm->root.base.bo;
357         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
358         struct amdgpu_vm_parser param;
359         int ret;
360
361         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
362         param.wait = false;
363
364         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
365                                         &param);
366         if (ret) {
367                 pr_err("failed to validate PT BOs\n");
368                 return ret;
369         }
370
371         ret = amdgpu_amdkfd_validate(&param, pd);
372         if (ret) {
373                 pr_err("failed to validate PD\n");
374                 return ret;
375         }
376
377         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
378
379         if (vm->use_cpu_for_update) {
380                 ret = amdgpu_bo_kmap(pd, NULL);
381                 if (ret) {
382                         pr_err("failed to kmap PD, ret=%d\n", ret);
383                         return ret;
384                 }
385         }
386
387         return 0;
388 }
389
390 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
391 {
392         struct amdgpu_bo *pd = vm->root.base.bo;
393         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
394         int ret;
395
396         ret = amdgpu_vm_update_pdes(adev, vm, false);
397         if (ret)
398                 return ret;
399
400         return amdgpu_sync_fence(sync, vm->last_update);
401 }
402
403 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
404 {
405         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
406         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
407         uint32_t mapping_flags;
408
409         mapping_flags = AMDGPU_VM_PAGE_READABLE;
410         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
411                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
412         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
413                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
414
415         switch (adev->asic_type) {
416         case CHIP_ARCTURUS:
417                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
418                         if (bo_adev == adev)
419                                 mapping_flags |= coherent ?
420                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
421                         else
422                                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
423                 } else {
424                         mapping_flags |= coherent ?
425                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
426                 }
427                 break;
428         default:
429                 mapping_flags |= coherent ?
430                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
431         }
432
433         return amdgpu_gem_va_map_flags(adev, mapping_flags);
434 }
435
436 /* add_bo_to_vm - Add a BO to a VM
437  *
438  * Everything that needs to bo done only once when a BO is first added
439  * to a VM. It can later be mapped and unmapped many times without
440  * repeating these steps.
441  *
442  * 1. Allocate and initialize BO VA entry data structure
443  * 2. Add BO to the VM
444  * 3. Determine ASIC-specific PTE flags
445  * 4. Alloc page tables and directories if needed
446  * 4a.  Validate new page tables and directories
447  */
448 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
449                 struct amdgpu_vm *vm, bool is_aql,
450                 struct kfd_bo_va_list **p_bo_va_entry)
451 {
452         int ret;
453         struct kfd_bo_va_list *bo_va_entry;
454         struct amdgpu_bo *bo = mem->bo;
455         uint64_t va = mem->va;
456         struct list_head *list_bo_va = &mem->bo_va_list;
457         unsigned long bo_size = bo->tbo.mem.size;
458
459         if (!va) {
460                 pr_err("Invalid VA when adding BO to VM\n");
461                 return -EINVAL;
462         }
463
464         if (is_aql)
465                 va += bo_size;
466
467         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
468         if (!bo_va_entry)
469                 return -ENOMEM;
470
471         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
472                         va + bo_size, vm);
473
474         /* Add BO to VM internal data structures*/
475         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
476         if (!bo_va_entry->bo_va) {
477                 ret = -EINVAL;
478                 pr_err("Failed to add BO object to VM. ret == %d\n",
479                                 ret);
480                 goto err_vmadd;
481         }
482
483         bo_va_entry->va = va;
484         bo_va_entry->pte_flags = get_pte_flags(adev, mem);
485         bo_va_entry->kgd_dev = (void *)adev;
486         list_add(&bo_va_entry->bo_list, list_bo_va);
487
488         if (p_bo_va_entry)
489                 *p_bo_va_entry = bo_va_entry;
490
491         /* Allocate validate page tables if needed */
492         ret = vm_validate_pt_pd_bos(vm);
493         if (ret) {
494                 pr_err("validate_pt_pd_bos() failed\n");
495                 goto err_alloc_pts;
496         }
497
498         return 0;
499
500 err_alloc_pts:
501         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
502         list_del(&bo_va_entry->bo_list);
503 err_vmadd:
504         kfree(bo_va_entry);
505         return ret;
506 }
507
508 static void remove_bo_from_vm(struct amdgpu_device *adev,
509                 struct kfd_bo_va_list *entry, unsigned long size)
510 {
511         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
512                         entry->va,
513                         entry->va + size, entry);
514         amdgpu_vm_bo_rmv(adev, entry->bo_va);
515         list_del(&entry->bo_list);
516         kfree(entry);
517 }
518
519 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
520                                 struct amdkfd_process_info *process_info,
521                                 bool userptr)
522 {
523         struct ttm_validate_buffer *entry = &mem->validate_list;
524         struct amdgpu_bo *bo = mem->bo;
525
526         INIT_LIST_HEAD(&entry->head);
527         entry->num_shared = 1;
528         entry->bo = &bo->tbo;
529         mutex_lock(&process_info->lock);
530         if (userptr)
531                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
532         else
533                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
534         mutex_unlock(&process_info->lock);
535 }
536
537 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
538                 struct amdkfd_process_info *process_info)
539 {
540         struct ttm_validate_buffer *bo_list_entry;
541
542         bo_list_entry = &mem->validate_list;
543         mutex_lock(&process_info->lock);
544         list_del(&bo_list_entry->head);
545         mutex_unlock(&process_info->lock);
546 }
547
548 /* Initializes user pages. It registers the MMU notifier and validates
549  * the userptr BO in the GTT domain.
550  *
551  * The BO must already be on the userptr_valid_list. Otherwise an
552  * eviction and restore may happen that leaves the new BO unmapped
553  * with the user mode queues running.
554  *
555  * Takes the process_info->lock to protect against concurrent restore
556  * workers.
557  *
558  * Returns 0 for success, negative errno for errors.
559  */
560 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
561 {
562         struct amdkfd_process_info *process_info = mem->process_info;
563         struct amdgpu_bo *bo = mem->bo;
564         struct ttm_operation_ctx ctx = { true, false };
565         int ret = 0;
566
567         mutex_lock(&process_info->lock);
568
569         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
570         if (ret) {
571                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
572                 goto out;
573         }
574
575         ret = amdgpu_mn_register(bo, user_addr);
576         if (ret) {
577                 pr_err("%s: Failed to register MMU notifier: %d\n",
578                        __func__, ret);
579                 goto out;
580         }
581
582         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
583         if (ret) {
584                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
585                 goto unregister_out;
586         }
587
588         ret = amdgpu_bo_reserve(bo, true);
589         if (ret) {
590                 pr_err("%s: Failed to reserve BO\n", __func__);
591                 goto release_out;
592         }
593         amdgpu_bo_placement_from_domain(bo, mem->domain);
594         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
595         if (ret)
596                 pr_err("%s: failed to validate BO\n", __func__);
597         amdgpu_bo_unreserve(bo);
598
599 release_out:
600         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
601 unregister_out:
602         if (ret)
603                 amdgpu_mn_unregister(bo);
604 out:
605         mutex_unlock(&process_info->lock);
606         return ret;
607 }
608
609 /* Reserving a BO and its page table BOs must happen atomically to
610  * avoid deadlocks. Some operations update multiple VMs at once. Track
611  * all the reservation info in a context structure. Optionally a sync
612  * object can track VM updates.
613  */
614 struct bo_vm_reservation_context {
615         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
616         unsigned int n_vms;                 /* Number of VMs reserved       */
617         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
618         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
619         struct list_head list, duplicates;  /* BO lists                     */
620         struct amdgpu_sync *sync;           /* Pointer to sync object       */
621         bool reserved;                      /* Whether BOs are reserved     */
622 };
623
624 enum bo_vm_match {
625         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
626         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
627         BO_VM_ALL,              /* Match all VMs a BO was added to    */
628 };
629
630 /**
631  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
632  * @mem: KFD BO structure.
633  * @vm: the VM to reserve.
634  * @ctx: the struct that will be used in unreserve_bo_and_vms().
635  */
636 static int reserve_bo_and_vm(struct kgd_mem *mem,
637                               struct amdgpu_vm *vm,
638                               struct bo_vm_reservation_context *ctx)
639 {
640         struct amdgpu_bo *bo = mem->bo;
641         int ret;
642
643         WARN_ON(!vm);
644
645         ctx->reserved = false;
646         ctx->n_vms = 1;
647         ctx->sync = &mem->sync;
648
649         INIT_LIST_HEAD(&ctx->list);
650         INIT_LIST_HEAD(&ctx->duplicates);
651
652         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
653         if (!ctx->vm_pd)
654                 return -ENOMEM;
655
656         ctx->kfd_bo.priority = 0;
657         ctx->kfd_bo.tv.bo = &bo->tbo;
658         ctx->kfd_bo.tv.num_shared = 1;
659         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
660
661         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
662
663         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
664                                      false, &ctx->duplicates);
665         if (ret) {
666                 pr_err("Failed to reserve buffers in ttm.\n");
667                 kfree(ctx->vm_pd);
668                 ctx->vm_pd = NULL;
669                 return ret;
670         }
671
672         ctx->reserved = true;
673         return 0;
674 }
675
676 /**
677  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
678  * @mem: KFD BO structure.
679  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
680  * is used. Otherwise, a single VM associated with the BO.
681  * @map_type: the mapping status that will be used to filter the VMs.
682  * @ctx: the struct that will be used in unreserve_bo_and_vms().
683  *
684  * Returns 0 for success, negative for failure.
685  */
686 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
687                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
688                                 struct bo_vm_reservation_context *ctx)
689 {
690         struct amdgpu_bo *bo = mem->bo;
691         struct kfd_bo_va_list *entry;
692         unsigned int i;
693         int ret;
694
695         ctx->reserved = false;
696         ctx->n_vms = 0;
697         ctx->vm_pd = NULL;
698         ctx->sync = &mem->sync;
699
700         INIT_LIST_HEAD(&ctx->list);
701         INIT_LIST_HEAD(&ctx->duplicates);
702
703         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
704                 if ((vm && vm != entry->bo_va->base.vm) ||
705                         (entry->is_mapped != map_type
706                         && map_type != BO_VM_ALL))
707                         continue;
708
709                 ctx->n_vms++;
710         }
711
712         if (ctx->n_vms != 0) {
713                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
714                                      GFP_KERNEL);
715                 if (!ctx->vm_pd)
716                         return -ENOMEM;
717         }
718
719         ctx->kfd_bo.priority = 0;
720         ctx->kfd_bo.tv.bo = &bo->tbo;
721         ctx->kfd_bo.tv.num_shared = 1;
722         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
723
724         i = 0;
725         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
726                 if ((vm && vm != entry->bo_va->base.vm) ||
727                         (entry->is_mapped != map_type
728                         && map_type != BO_VM_ALL))
729                         continue;
730
731                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
732                                 &ctx->vm_pd[i]);
733                 i++;
734         }
735
736         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
737                                      false, &ctx->duplicates);
738         if (ret) {
739                 pr_err("Failed to reserve buffers in ttm.\n");
740                 kfree(ctx->vm_pd);
741                 ctx->vm_pd = NULL;
742                 return ret;
743         }
744
745         ctx->reserved = true;
746         return 0;
747 }
748
749 /**
750  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
751  * @ctx: Reservation context to unreserve
752  * @wait: Optionally wait for a sync object representing pending VM updates
753  * @intr: Whether the wait is interruptible
754  *
755  * Also frees any resources allocated in
756  * reserve_bo_and_(cond_)vm(s). Returns the status from
757  * amdgpu_sync_wait.
758  */
759 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
760                                  bool wait, bool intr)
761 {
762         int ret = 0;
763
764         if (wait)
765                 ret = amdgpu_sync_wait(ctx->sync, intr);
766
767         if (ctx->reserved)
768                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
769         kfree(ctx->vm_pd);
770
771         ctx->sync = NULL;
772
773         ctx->reserved = false;
774         ctx->vm_pd = NULL;
775
776         return ret;
777 }
778
779 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
780                                 struct kfd_bo_va_list *entry,
781                                 struct amdgpu_sync *sync)
782 {
783         struct amdgpu_bo_va *bo_va = entry->bo_va;
784         struct amdgpu_vm *vm = bo_va->base.vm;
785
786         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
787
788         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
789
790         amdgpu_sync_fence(sync, bo_va->last_pt_update);
791
792         return 0;
793 }
794
795 static int update_gpuvm_pte(struct amdgpu_device *adev,
796                 struct kfd_bo_va_list *entry,
797                 struct amdgpu_sync *sync)
798 {
799         int ret;
800         struct amdgpu_bo_va *bo_va = entry->bo_va;
801
802         /* Update the page tables  */
803         ret = amdgpu_vm_bo_update(adev, bo_va, false);
804         if (ret) {
805                 pr_err("amdgpu_vm_bo_update failed\n");
806                 return ret;
807         }
808
809         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
810 }
811
812 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
813                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
814                 bool no_update_pte)
815 {
816         int ret;
817
818         /* Set virtual address for the allocation */
819         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
820                                amdgpu_bo_size(entry->bo_va->base.bo),
821                                entry->pte_flags);
822         if (ret) {
823                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
824                                 entry->va, ret);
825                 return ret;
826         }
827
828         if (no_update_pte)
829                 return 0;
830
831         ret = update_gpuvm_pte(adev, entry, sync);
832         if (ret) {
833                 pr_err("update_gpuvm_pte() failed\n");
834                 goto update_gpuvm_pte_failed;
835         }
836
837         return 0;
838
839 update_gpuvm_pte_failed:
840         unmap_bo_from_gpuvm(adev, entry, sync);
841         return ret;
842 }
843
844 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
845 {
846         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
847
848         if (!sg)
849                 return NULL;
850         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
851                 kfree(sg);
852                 return NULL;
853         }
854         sg->sgl->dma_address = addr;
855         sg->sgl->length = size;
856 #ifdef CONFIG_NEED_SG_DMA_LENGTH
857         sg->sgl->dma_length = size;
858 #endif
859         return sg;
860 }
861
862 static int process_validate_vms(struct amdkfd_process_info *process_info)
863 {
864         struct amdgpu_vm *peer_vm;
865         int ret;
866
867         list_for_each_entry(peer_vm, &process_info->vm_list_head,
868                             vm_list_node) {
869                 ret = vm_validate_pt_pd_bos(peer_vm);
870                 if (ret)
871                         return ret;
872         }
873
874         return 0;
875 }
876
877 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
878                                  struct amdgpu_sync *sync)
879 {
880         struct amdgpu_vm *peer_vm;
881         int ret;
882
883         list_for_each_entry(peer_vm, &process_info->vm_list_head,
884                             vm_list_node) {
885                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
886
887                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
888                                        AMDGPU_SYNC_NE_OWNER,
889                                        AMDGPU_FENCE_OWNER_KFD);
890                 if (ret)
891                         return ret;
892         }
893
894         return 0;
895 }
896
897 static int process_update_pds(struct amdkfd_process_info *process_info,
898                               struct amdgpu_sync *sync)
899 {
900         struct amdgpu_vm *peer_vm;
901         int ret;
902
903         list_for_each_entry(peer_vm, &process_info->vm_list_head,
904                             vm_list_node) {
905                 ret = vm_update_pds(peer_vm, sync);
906                 if (ret)
907                         return ret;
908         }
909
910         return 0;
911 }
912
913 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
914                        struct dma_fence **ef)
915 {
916         struct amdkfd_process_info *info = NULL;
917         int ret;
918
919         if (!*process_info) {
920                 info = kzalloc(sizeof(*info), GFP_KERNEL);
921                 if (!info)
922                         return -ENOMEM;
923
924                 mutex_init(&info->lock);
925                 INIT_LIST_HEAD(&info->vm_list_head);
926                 INIT_LIST_HEAD(&info->kfd_bo_list);
927                 INIT_LIST_HEAD(&info->userptr_valid_list);
928                 INIT_LIST_HEAD(&info->userptr_inval_list);
929
930                 info->eviction_fence =
931                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
932                                                    current->mm);
933                 if (!info->eviction_fence) {
934                         pr_err("Failed to create eviction fence\n");
935                         ret = -ENOMEM;
936                         goto create_evict_fence_fail;
937                 }
938
939                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
940                 atomic_set(&info->evicted_bos, 0);
941                 INIT_DELAYED_WORK(&info->restore_userptr_work,
942                                   amdgpu_amdkfd_restore_userptr_worker);
943
944                 *process_info = info;
945                 *ef = dma_fence_get(&info->eviction_fence->base);
946         }
947
948         vm->process_info = *process_info;
949
950         /* Validate page directory and attach eviction fence */
951         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
952         if (ret)
953                 goto reserve_pd_fail;
954         ret = vm_validate_pt_pd_bos(vm);
955         if (ret) {
956                 pr_err("validate_pt_pd_bos() failed\n");
957                 goto validate_pd_fail;
958         }
959         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
960                                   AMDGPU_FENCE_OWNER_KFD, false);
961         if (ret)
962                 goto wait_pd_fail;
963         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
964         if (ret)
965                 goto reserve_shared_fail;
966         amdgpu_bo_fence(vm->root.base.bo,
967                         &vm->process_info->eviction_fence->base, true);
968         amdgpu_bo_unreserve(vm->root.base.bo);
969
970         /* Update process info */
971         mutex_lock(&vm->process_info->lock);
972         list_add_tail(&vm->vm_list_node,
973                         &(vm->process_info->vm_list_head));
974         vm->process_info->n_vms++;
975         mutex_unlock(&vm->process_info->lock);
976
977         return 0;
978
979 reserve_shared_fail:
980 wait_pd_fail:
981 validate_pd_fail:
982         amdgpu_bo_unreserve(vm->root.base.bo);
983 reserve_pd_fail:
984         vm->process_info = NULL;
985         if (info) {
986                 /* Two fence references: one in info and one in *ef */
987                 dma_fence_put(&info->eviction_fence->base);
988                 dma_fence_put(*ef);
989                 *ef = NULL;
990                 *process_info = NULL;
991                 put_pid(info->pid);
992 create_evict_fence_fail:
993                 mutex_destroy(&info->lock);
994                 kfree(info);
995         }
996         return ret;
997 }
998
999 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
1000                                           void **vm, void **process_info,
1001                                           struct dma_fence **ef)
1002 {
1003         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1004         struct amdgpu_vm *new_vm;
1005         int ret;
1006
1007         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1008         if (!new_vm)
1009                 return -ENOMEM;
1010
1011         /* Initialize AMDGPU part of the VM */
1012         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1013         if (ret) {
1014                 pr_err("Failed init vm ret %d\n", ret);
1015                 goto amdgpu_vm_init_fail;
1016         }
1017
1018         /* Initialize KFD part of the VM and process info */
1019         ret = init_kfd_vm(new_vm, process_info, ef);
1020         if (ret)
1021                 goto init_kfd_vm_fail;
1022
1023         *vm = (void *) new_vm;
1024
1025         return 0;
1026
1027 init_kfd_vm_fail:
1028         amdgpu_vm_fini(adev, new_vm);
1029 amdgpu_vm_init_fail:
1030         kfree(new_vm);
1031         return ret;
1032 }
1033
1034 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1035                                            struct file *filp, u32 pasid,
1036                                            void **vm, void **process_info,
1037                                            struct dma_fence **ef)
1038 {
1039         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1040         struct drm_file *drm_priv = filp->private_data;
1041         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1042         struct amdgpu_vm *avm = &drv_priv->vm;
1043         int ret;
1044
1045         /* Already a compute VM? */
1046         if (avm->process_info)
1047                 return -EINVAL;
1048
1049         /* Convert VM into a compute VM */
1050         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1051         if (ret)
1052                 return ret;
1053
1054         /* Initialize KFD part of the VM and process info */
1055         ret = init_kfd_vm(avm, process_info, ef);
1056         if (ret)
1057                 return ret;
1058
1059         *vm = (void *)avm;
1060
1061         return 0;
1062 }
1063
1064 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1065                                     struct amdgpu_vm *vm)
1066 {
1067         struct amdkfd_process_info *process_info = vm->process_info;
1068         struct amdgpu_bo *pd = vm->root.base.bo;
1069
1070         if (!process_info)
1071                 return;
1072
1073         /* Release eviction fence from PD */
1074         amdgpu_bo_reserve(pd, false);
1075         amdgpu_bo_fence(pd, NULL, false);
1076         amdgpu_bo_unreserve(pd);
1077
1078         /* Update process info */
1079         mutex_lock(&process_info->lock);
1080         process_info->n_vms--;
1081         list_del(&vm->vm_list_node);
1082         mutex_unlock(&process_info->lock);
1083
1084         vm->process_info = NULL;
1085
1086         /* Release per-process resources when last compute VM is destroyed */
1087         if (!process_info->n_vms) {
1088                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1089                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1090                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1091
1092                 dma_fence_put(&process_info->eviction_fence->base);
1093                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1094                 put_pid(process_info->pid);
1095                 mutex_destroy(&process_info->lock);
1096                 kfree(process_info);
1097         }
1098 }
1099
1100 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1101 {
1102         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1103         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1104
1105         if (WARN_ON(!kgd || !vm))
1106                 return;
1107
1108         pr_debug("Destroying process vm %p\n", vm);
1109
1110         /* Release the VM context */
1111         amdgpu_vm_fini(adev, avm);
1112         kfree(vm);
1113 }
1114
1115 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1116 {
1117         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1118         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1119
1120         if (WARN_ON(!kgd || !vm))
1121                 return;
1122
1123         pr_debug("Releasing process vm %p\n", vm);
1124
1125         /* The original pasid of amdgpu vm has already been
1126          * released during making a amdgpu vm to a compute vm
1127          * The current pasid is managed by kfd and will be
1128          * released on kfd process destroy. Set amdgpu pasid
1129          * to 0 to avoid duplicate release.
1130          */
1131         amdgpu_vm_release_compute(adev, avm);
1132 }
1133
1134 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1135 {
1136         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1137         struct amdgpu_bo *pd = avm->root.base.bo;
1138         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1139
1140         if (adev->asic_type < CHIP_VEGA10)
1141                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1142         return avm->pd_phys_addr;
1143 }
1144
1145 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1146                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1147                 void *vm, struct kgd_mem **mem,
1148                 uint64_t *offset, uint32_t flags)
1149 {
1150         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1151         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1152         enum ttm_bo_type bo_type = ttm_bo_type_device;
1153         struct sg_table *sg = NULL;
1154         uint64_t user_addr = 0;
1155         struct amdgpu_bo *bo;
1156         struct drm_gem_object *gobj;
1157         u32 domain, alloc_domain;
1158         u64 alloc_flags;
1159         int ret;
1160
1161         /*
1162          * Check on which domain to allocate BO
1163          */
1164         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1165                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1166                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1167                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1168                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1169                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1170         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1171                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1172                 alloc_flags = 0;
1173         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1174                 domain = AMDGPU_GEM_DOMAIN_GTT;
1175                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1176                 alloc_flags = 0;
1177                 if (!offset || !*offset)
1178                         return -EINVAL;
1179                 user_addr = untagged_addr(*offset);
1180         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1181                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1182                 domain = AMDGPU_GEM_DOMAIN_GTT;
1183                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1184                 bo_type = ttm_bo_type_sg;
1185                 alloc_flags = 0;
1186                 if (size > UINT_MAX)
1187                         return -EINVAL;
1188                 sg = create_doorbell_sg(*offset, size);
1189                 if (!sg)
1190                         return -ENOMEM;
1191         } else {
1192                 return -EINVAL;
1193         }
1194
1195         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1196         if (!*mem) {
1197                 ret = -ENOMEM;
1198                 goto err;
1199         }
1200         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1201         mutex_init(&(*mem)->lock);
1202         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1203
1204         /* Workaround for AQL queue wraparound bug. Map the same
1205          * memory twice. That means we only actually allocate half
1206          * the memory.
1207          */
1208         if ((*mem)->aql_queue)
1209                 size = size >> 1;
1210
1211         (*mem)->alloc_flags = flags;
1212
1213         amdgpu_sync_create(&(*mem)->sync);
1214
1215         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1216         if (ret) {
1217                 pr_debug("Insufficient memory\n");
1218                 goto err_reserve_limit;
1219         }
1220
1221         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1222                         va, size, domain_string(alloc_domain));
1223
1224         ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1225                                        bo_type, NULL, &gobj);
1226         if (ret) {
1227                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1228                          domain_string(alloc_domain), ret);
1229                 goto err_bo_create;
1230         }
1231         bo = gem_to_amdgpu_bo(gobj);
1232         if (bo_type == ttm_bo_type_sg) {
1233                 bo->tbo.sg = sg;
1234                 bo->tbo.ttm->sg = sg;
1235         }
1236         bo->kfd_bo = *mem;
1237         (*mem)->bo = bo;
1238         if (user_addr)
1239                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1240
1241         (*mem)->va = va;
1242         (*mem)->domain = domain;
1243         (*mem)->mapped_to_gpu_memory = 0;
1244         (*mem)->process_info = avm->process_info;
1245         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1246
1247         if (user_addr) {
1248                 ret = init_user_pages(*mem, user_addr);
1249                 if (ret)
1250                         goto allocate_init_user_pages_failed;
1251         }
1252
1253         if (offset)
1254                 *offset = amdgpu_bo_mmap_offset(bo);
1255
1256         return 0;
1257
1258 allocate_init_user_pages_failed:
1259         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1260         amdgpu_bo_unref(&bo);
1261         /* Don't unreserve system mem limit twice */
1262         goto err_reserve_limit;
1263 err_bo_create:
1264         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1265 err_reserve_limit:
1266         mutex_destroy(&(*mem)->lock);
1267         kfree(*mem);
1268 err:
1269         if (sg) {
1270                 sg_free_table(sg);
1271                 kfree(sg);
1272         }
1273         return ret;
1274 }
1275
1276 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1277                 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1278 {
1279         struct amdkfd_process_info *process_info = mem->process_info;
1280         unsigned long bo_size = mem->bo->tbo.mem.size;
1281         struct kfd_bo_va_list *entry, *tmp;
1282         struct bo_vm_reservation_context ctx;
1283         struct ttm_validate_buffer *bo_list_entry;
1284         unsigned int mapped_to_gpu_memory;
1285         int ret;
1286         bool is_imported = false;
1287
1288         mutex_lock(&mem->lock);
1289         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1290         is_imported = mem->is_imported;
1291         mutex_unlock(&mem->lock);
1292         /* lock is not needed after this, since mem is unused and will
1293          * be freed anyway
1294          */
1295
1296         if (mapped_to_gpu_memory > 0) {
1297                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1298                                 mem->va, bo_size);
1299                 return -EBUSY;
1300         }
1301
1302         /* Make sure restore workers don't access the BO any more */
1303         bo_list_entry = &mem->validate_list;
1304         mutex_lock(&process_info->lock);
1305         list_del(&bo_list_entry->head);
1306         mutex_unlock(&process_info->lock);
1307
1308         /* No more MMU notifiers */
1309         amdgpu_mn_unregister(mem->bo);
1310
1311         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1312         if (unlikely(ret))
1313                 return ret;
1314
1315         /* The eviction fence should be removed by the last unmap.
1316          * TODO: Log an error condition if the bo still has the eviction fence
1317          * attached
1318          */
1319         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1320                                         process_info->eviction_fence);
1321         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1322                 mem->va + bo_size * (1 + mem->aql_queue));
1323
1324         /* Remove from VM internal data structures */
1325         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1326                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1327                                 entry, bo_size);
1328
1329         ret = unreserve_bo_and_vms(&ctx, false, false);
1330
1331         /* Free the sync object */
1332         amdgpu_sync_free(&mem->sync);
1333
1334         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1335          * remap BO. We need to free it.
1336          */
1337         if (mem->bo->tbo.sg) {
1338                 sg_free_table(mem->bo->tbo.sg);
1339                 kfree(mem->bo->tbo.sg);
1340         }
1341
1342         /* Update the size of the BO being freed if it was allocated from
1343          * VRAM and is not imported.
1344          */
1345         if (size) {
1346                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1347                     (!is_imported))
1348                         *size = bo_size;
1349                 else
1350                         *size = 0;
1351         }
1352
1353         /* Free the BO*/
1354         drm_gem_object_put(&mem->bo->tbo.base);
1355         mutex_destroy(&mem->lock);
1356         kfree(mem);
1357
1358         return ret;
1359 }
1360
1361 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1362                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1363 {
1364         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1365         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1366         int ret;
1367         struct amdgpu_bo *bo;
1368         uint32_t domain;
1369         struct kfd_bo_va_list *entry;
1370         struct bo_vm_reservation_context ctx;
1371         struct kfd_bo_va_list *bo_va_entry = NULL;
1372         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1373         unsigned long bo_size;
1374         bool is_invalid_userptr = false;
1375
1376         bo = mem->bo;
1377         if (!bo) {
1378                 pr_err("Invalid BO when mapping memory to GPU\n");
1379                 return -EINVAL;
1380         }
1381
1382         /* Make sure restore is not running concurrently. Since we
1383          * don't map invalid userptr BOs, we rely on the next restore
1384          * worker to do the mapping
1385          */
1386         mutex_lock(&mem->process_info->lock);
1387
1388         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1389          * sure that the MMU notifier is no longer running
1390          * concurrently and the queues are actually stopped
1391          */
1392         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1393                 mmap_write_lock(current->mm);
1394                 is_invalid_userptr = atomic_read(&mem->invalid);
1395                 mmap_write_unlock(current->mm);
1396         }
1397
1398         mutex_lock(&mem->lock);
1399
1400         domain = mem->domain;
1401         bo_size = bo->tbo.mem.size;
1402
1403         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1404                         mem->va,
1405                         mem->va + bo_size * (1 + mem->aql_queue),
1406                         vm, domain_string(domain));
1407
1408         ret = reserve_bo_and_vm(mem, vm, &ctx);
1409         if (unlikely(ret))
1410                 goto out;
1411
1412         /* Userptr can be marked as "not invalid", but not actually be
1413          * validated yet (still in the system domain). In that case
1414          * the queues are still stopped and we can leave mapping for
1415          * the next restore worker
1416          */
1417         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1418             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1419                 is_invalid_userptr = true;
1420
1421         if (check_if_add_bo_to_vm(avm, mem)) {
1422                 ret = add_bo_to_vm(adev, mem, avm, false,
1423                                 &bo_va_entry);
1424                 if (ret)
1425                         goto add_bo_to_vm_failed;
1426                 if (mem->aql_queue) {
1427                         ret = add_bo_to_vm(adev, mem, avm,
1428                                         true, &bo_va_entry_aql);
1429                         if (ret)
1430                                 goto add_bo_to_vm_failed_aql;
1431                 }
1432         } else {
1433                 ret = vm_validate_pt_pd_bos(avm);
1434                 if (unlikely(ret))
1435                         goto add_bo_to_vm_failed;
1436         }
1437
1438         if (mem->mapped_to_gpu_memory == 0 &&
1439             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1440                 /* Validate BO only once. The eviction fence gets added to BO
1441                  * the first time it is mapped. Validate will wait for all
1442                  * background evictions to complete.
1443                  */
1444                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1445                 if (ret) {
1446                         pr_debug("Validate failed\n");
1447                         goto map_bo_to_gpuvm_failed;
1448                 }
1449         }
1450
1451         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1452                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1453                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1454                                         entry->va, entry->va + bo_size,
1455                                         entry);
1456
1457                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1458                                               is_invalid_userptr);
1459                         if (ret) {
1460                                 pr_err("Failed to map bo to gpuvm\n");
1461                                 goto map_bo_to_gpuvm_failed;
1462                         }
1463
1464                         ret = vm_update_pds(vm, ctx.sync);
1465                         if (ret) {
1466                                 pr_err("Failed to update page directories\n");
1467                                 goto map_bo_to_gpuvm_failed;
1468                         }
1469
1470                         entry->is_mapped = true;
1471                         mem->mapped_to_gpu_memory++;
1472                         pr_debug("\t INC mapping count %d\n",
1473                                         mem->mapped_to_gpu_memory);
1474                 }
1475         }
1476
1477         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1478                 amdgpu_bo_fence(bo,
1479                                 &avm->process_info->eviction_fence->base,
1480                                 true);
1481         ret = unreserve_bo_and_vms(&ctx, false, false);
1482
1483         goto out;
1484
1485 map_bo_to_gpuvm_failed:
1486         if (bo_va_entry_aql)
1487                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1488 add_bo_to_vm_failed_aql:
1489         if (bo_va_entry)
1490                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1491 add_bo_to_vm_failed:
1492         unreserve_bo_and_vms(&ctx, false, false);
1493 out:
1494         mutex_unlock(&mem->process_info->lock);
1495         mutex_unlock(&mem->lock);
1496         return ret;
1497 }
1498
1499 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1500                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1501 {
1502         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1503         struct amdkfd_process_info *process_info =
1504                 ((struct amdgpu_vm *)vm)->process_info;
1505         unsigned long bo_size = mem->bo->tbo.mem.size;
1506         struct kfd_bo_va_list *entry;
1507         struct bo_vm_reservation_context ctx;
1508         int ret;
1509
1510         mutex_lock(&mem->lock);
1511
1512         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1513         if (unlikely(ret))
1514                 goto out;
1515         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1516         if (ctx.n_vms == 0) {
1517                 ret = -EINVAL;
1518                 goto unreserve_out;
1519         }
1520
1521         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1522         if (unlikely(ret))
1523                 goto unreserve_out;
1524
1525         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1526                 mem->va,
1527                 mem->va + bo_size * (1 + mem->aql_queue),
1528                 vm);
1529
1530         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1531                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1532                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1533                                         entry->va,
1534                                         entry->va + bo_size,
1535                                         entry);
1536
1537                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1538                         if (ret == 0) {
1539                                 entry->is_mapped = false;
1540                         } else {
1541                                 pr_err("failed to unmap VA 0x%llx\n",
1542                                                 mem->va);
1543                                 goto unreserve_out;
1544                         }
1545
1546                         mem->mapped_to_gpu_memory--;
1547                         pr_debug("\t DEC mapping count %d\n",
1548                                         mem->mapped_to_gpu_memory);
1549                 }
1550         }
1551
1552         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1553          * required.
1554          */
1555         if (mem->mapped_to_gpu_memory == 0 &&
1556             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1557             !mem->bo->tbo.pin_count)
1558                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1559                                                 process_info->eviction_fence);
1560
1561 unreserve_out:
1562         unreserve_bo_and_vms(&ctx, false, false);
1563 out:
1564         mutex_unlock(&mem->lock);
1565         return ret;
1566 }
1567
1568 int amdgpu_amdkfd_gpuvm_sync_memory(
1569                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1570 {
1571         struct amdgpu_sync sync;
1572         int ret;
1573
1574         amdgpu_sync_create(&sync);
1575
1576         mutex_lock(&mem->lock);
1577         amdgpu_sync_clone(&mem->sync, &sync);
1578         mutex_unlock(&mem->lock);
1579
1580         ret = amdgpu_sync_wait(&sync, intr);
1581         amdgpu_sync_free(&sync);
1582         return ret;
1583 }
1584
1585 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1586                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1587 {
1588         int ret;
1589         struct amdgpu_bo *bo = mem->bo;
1590
1591         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1592                 pr_err("userptr can't be mapped to kernel\n");
1593                 return -EINVAL;
1594         }
1595
1596         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1597          * this BO in BO's restoring after eviction.
1598          */
1599         mutex_lock(&mem->process_info->lock);
1600
1601         ret = amdgpu_bo_reserve(bo, true);
1602         if (ret) {
1603                 pr_err("Failed to reserve bo. ret %d\n", ret);
1604                 goto bo_reserve_failed;
1605         }
1606
1607         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1608         if (ret) {
1609                 pr_err("Failed to pin bo. ret %d\n", ret);
1610                 goto pin_failed;
1611         }
1612
1613         ret = amdgpu_bo_kmap(bo, kptr);
1614         if (ret) {
1615                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1616                 goto kmap_failed;
1617         }
1618
1619         amdgpu_amdkfd_remove_eviction_fence(
1620                 bo, mem->process_info->eviction_fence);
1621         list_del_init(&mem->validate_list.head);
1622
1623         if (size)
1624                 *size = amdgpu_bo_size(bo);
1625
1626         amdgpu_bo_unreserve(bo);
1627
1628         mutex_unlock(&mem->process_info->lock);
1629         return 0;
1630
1631 kmap_failed:
1632         amdgpu_bo_unpin(bo);
1633 pin_failed:
1634         amdgpu_bo_unreserve(bo);
1635 bo_reserve_failed:
1636         mutex_unlock(&mem->process_info->lock);
1637
1638         return ret;
1639 }
1640
1641 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1642                                               struct kfd_vm_fault_info *mem)
1643 {
1644         struct amdgpu_device *adev;
1645
1646         adev = (struct amdgpu_device *)kgd;
1647         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1648                 *mem = *adev->gmc.vm_fault_info;
1649                 mb();
1650                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1651         }
1652         return 0;
1653 }
1654
1655 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1656                                       struct dma_buf *dma_buf,
1657                                       uint64_t va, void *vm,
1658                                       struct kgd_mem **mem, uint64_t *size,
1659                                       uint64_t *mmap_offset)
1660 {
1661         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1662         struct drm_gem_object *obj;
1663         struct amdgpu_bo *bo;
1664         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1665
1666         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1667                 /* Can't handle non-graphics buffers */
1668                 return -EINVAL;
1669
1670         obj = dma_buf->priv;
1671         if (drm_to_adev(obj->dev) != adev)
1672                 /* Can't handle buffers from other devices */
1673                 return -EINVAL;
1674
1675         bo = gem_to_amdgpu_bo(obj);
1676         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1677                                     AMDGPU_GEM_DOMAIN_GTT)))
1678                 /* Only VRAM and GTT BOs are supported */
1679                 return -EINVAL;
1680
1681         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1682         if (!*mem)
1683                 return -ENOMEM;
1684
1685         if (size)
1686                 *size = amdgpu_bo_size(bo);
1687
1688         if (mmap_offset)
1689                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1690
1691         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1692         mutex_init(&(*mem)->lock);
1693
1694         (*mem)->alloc_flags =
1695                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1696                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1697                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1698                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1699
1700         drm_gem_object_get(&bo->tbo.base);
1701         (*mem)->bo = bo;
1702         (*mem)->va = va;
1703         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1704                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1705         (*mem)->mapped_to_gpu_memory = 0;
1706         (*mem)->process_info = avm->process_info;
1707         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1708         amdgpu_sync_create(&(*mem)->sync);
1709         (*mem)->is_imported = true;
1710
1711         return 0;
1712 }
1713
1714 /* Evict a userptr BO by stopping the queues if necessary
1715  *
1716  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1717  * cannot do any memory allocations, and cannot take any locks that
1718  * are held elsewhere while allocating memory. Therefore this is as
1719  * simple as possible, using atomic counters.
1720  *
1721  * It doesn't do anything to the BO itself. The real work happens in
1722  * restore, where we get updated page addresses. This function only
1723  * ensures that GPU access to the BO is stopped.
1724  */
1725 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1726                                 struct mm_struct *mm)
1727 {
1728         struct amdkfd_process_info *process_info = mem->process_info;
1729         int evicted_bos;
1730         int r = 0;
1731
1732         atomic_inc(&mem->invalid);
1733         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1734         if (evicted_bos == 1) {
1735                 /* First eviction, stop the queues */
1736                 r = kgd2kfd_quiesce_mm(mm);
1737                 if (r)
1738                         pr_err("Failed to quiesce KFD\n");
1739                 schedule_delayed_work(&process_info->restore_userptr_work,
1740                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1741         }
1742
1743         return r;
1744 }
1745
1746 /* Update invalid userptr BOs
1747  *
1748  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1749  * userptr_inval_list and updates user pages for all BOs that have
1750  * been invalidated since their last update.
1751  */
1752 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1753                                      struct mm_struct *mm)
1754 {
1755         struct kgd_mem *mem, *tmp_mem;
1756         struct amdgpu_bo *bo;
1757         struct ttm_operation_ctx ctx = { false, false };
1758         int invalid, ret;
1759
1760         /* Move all invalidated BOs to the userptr_inval_list and
1761          * release their user pages by migration to the CPU domain
1762          */
1763         list_for_each_entry_safe(mem, tmp_mem,
1764                                  &process_info->userptr_valid_list,
1765                                  validate_list.head) {
1766                 if (!atomic_read(&mem->invalid))
1767                         continue; /* BO is still valid */
1768
1769                 bo = mem->bo;
1770
1771                 if (amdgpu_bo_reserve(bo, true))
1772                         return -EAGAIN;
1773                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1774                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1775                 amdgpu_bo_unreserve(bo);
1776                 if (ret) {
1777                         pr_err("%s: Failed to invalidate userptr BO\n",
1778                                __func__);
1779                         return -EAGAIN;
1780                 }
1781
1782                 list_move_tail(&mem->validate_list.head,
1783                                &process_info->userptr_inval_list);
1784         }
1785
1786         if (list_empty(&process_info->userptr_inval_list))
1787                 return 0; /* All evicted userptr BOs were freed */
1788
1789         /* Go through userptr_inval_list and update any invalid user_pages */
1790         list_for_each_entry(mem, &process_info->userptr_inval_list,
1791                             validate_list.head) {
1792                 invalid = atomic_read(&mem->invalid);
1793                 if (!invalid)
1794                         /* BO hasn't been invalidated since the last
1795                          * revalidation attempt. Keep its BO list.
1796                          */
1797                         continue;
1798
1799                 bo = mem->bo;
1800
1801                 /* Get updated user pages */
1802                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1803                 if (ret) {
1804                         pr_debug("%s: Failed to get user pages: %d\n",
1805                                 __func__, ret);
1806
1807                         /* Return error -EBUSY or -ENOMEM, retry restore */
1808                         return ret;
1809                 }
1810
1811                 /*
1812                  * FIXME: Cannot ignore the return code, must hold
1813                  * notifier_lock
1814                  */
1815                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1816
1817                 /* Mark the BO as valid unless it was invalidated
1818                  * again concurrently.
1819                  */
1820                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1821                         return -EAGAIN;
1822         }
1823
1824         return 0;
1825 }
1826
1827 /* Validate invalid userptr BOs
1828  *
1829  * Validates BOs on the userptr_inval_list, and moves them back to the
1830  * userptr_valid_list. Also updates GPUVM page tables with new page
1831  * addresses and waits for the page table updates to complete.
1832  */
1833 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1834 {
1835         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1836         struct list_head resv_list, duplicates;
1837         struct ww_acquire_ctx ticket;
1838         struct amdgpu_sync sync;
1839
1840         struct amdgpu_vm *peer_vm;
1841         struct kgd_mem *mem, *tmp_mem;
1842         struct amdgpu_bo *bo;
1843         struct ttm_operation_ctx ctx = { false, false };
1844         int i, ret;
1845
1846         pd_bo_list_entries = kcalloc(process_info->n_vms,
1847                                      sizeof(struct amdgpu_bo_list_entry),
1848                                      GFP_KERNEL);
1849         if (!pd_bo_list_entries) {
1850                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1851                 ret = -ENOMEM;
1852                 goto out_no_mem;
1853         }
1854
1855         INIT_LIST_HEAD(&resv_list);
1856         INIT_LIST_HEAD(&duplicates);
1857
1858         /* Get all the page directory BOs that need to be reserved */
1859         i = 0;
1860         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1861                             vm_list_node)
1862                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1863                                     &pd_bo_list_entries[i++]);
1864         /* Add the userptr_inval_list entries to resv_list */
1865         list_for_each_entry(mem, &process_info->userptr_inval_list,
1866                             validate_list.head) {
1867                 list_add_tail(&mem->resv_list.head, &resv_list);
1868                 mem->resv_list.bo = mem->validate_list.bo;
1869                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1870         }
1871
1872         /* Reserve all BOs and page tables for validation */
1873         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1874         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1875         if (ret)
1876                 goto out_free;
1877
1878         amdgpu_sync_create(&sync);
1879
1880         ret = process_validate_vms(process_info);
1881         if (ret)
1882                 goto unreserve_out;
1883
1884         /* Validate BOs and update GPUVM page tables */
1885         list_for_each_entry_safe(mem, tmp_mem,
1886                                  &process_info->userptr_inval_list,
1887                                  validate_list.head) {
1888                 struct kfd_bo_va_list *bo_va_entry;
1889
1890                 bo = mem->bo;
1891
1892                 /* Validate the BO if we got user pages */
1893                 if (bo->tbo.ttm->pages[0]) {
1894                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1895                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1896                         if (ret) {
1897                                 pr_err("%s: failed to validate BO\n", __func__);
1898                                 goto unreserve_out;
1899                         }
1900                 }
1901
1902                 list_move_tail(&mem->validate_list.head,
1903                                &process_info->userptr_valid_list);
1904
1905                 /* Update mapping. If the BO was not validated
1906                  * (because we couldn't get user pages), this will
1907                  * clear the page table entries, which will result in
1908                  * VM faults if the GPU tries to access the invalid
1909                  * memory.
1910                  */
1911                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1912                         if (!bo_va_entry->is_mapped)
1913                                 continue;
1914
1915                         ret = update_gpuvm_pte((struct amdgpu_device *)
1916                                                bo_va_entry->kgd_dev,
1917                                                bo_va_entry, &sync);
1918                         if (ret) {
1919                                 pr_err("%s: update PTE failed\n", __func__);
1920                                 /* make sure this gets validated again */
1921                                 atomic_inc(&mem->invalid);
1922                                 goto unreserve_out;
1923                         }
1924                 }
1925         }
1926
1927         /* Update page directories */
1928         ret = process_update_pds(process_info, &sync);
1929
1930 unreserve_out:
1931         ttm_eu_backoff_reservation(&ticket, &resv_list);
1932         amdgpu_sync_wait(&sync, false);
1933         amdgpu_sync_free(&sync);
1934 out_free:
1935         kfree(pd_bo_list_entries);
1936 out_no_mem:
1937
1938         return ret;
1939 }
1940
1941 /* Worker callback to restore evicted userptr BOs
1942  *
1943  * Tries to update and validate all userptr BOs. If successful and no
1944  * concurrent evictions happened, the queues are restarted. Otherwise,
1945  * reschedule for another attempt later.
1946  */
1947 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1948 {
1949         struct delayed_work *dwork = to_delayed_work(work);
1950         struct amdkfd_process_info *process_info =
1951                 container_of(dwork, struct amdkfd_process_info,
1952                              restore_userptr_work);
1953         struct task_struct *usertask;
1954         struct mm_struct *mm;
1955         int evicted_bos;
1956
1957         evicted_bos = atomic_read(&process_info->evicted_bos);
1958         if (!evicted_bos)
1959                 return;
1960
1961         /* Reference task and mm in case of concurrent process termination */
1962         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1963         if (!usertask)
1964                 return;
1965         mm = get_task_mm(usertask);
1966         if (!mm) {
1967                 put_task_struct(usertask);
1968                 return;
1969         }
1970
1971         mutex_lock(&process_info->lock);
1972
1973         if (update_invalid_user_pages(process_info, mm))
1974                 goto unlock_out;
1975         /* userptr_inval_list can be empty if all evicted userptr BOs
1976          * have been freed. In that case there is nothing to validate
1977          * and we can just restart the queues.
1978          */
1979         if (!list_empty(&process_info->userptr_inval_list)) {
1980                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1981                         goto unlock_out; /* Concurrent eviction, try again */
1982
1983                 if (validate_invalid_user_pages(process_info))
1984                         goto unlock_out;
1985         }
1986         /* Final check for concurrent evicton and atomic update. If
1987          * another eviction happens after successful update, it will
1988          * be a first eviction that calls quiesce_mm. The eviction
1989          * reference counting inside KFD will handle this case.
1990          */
1991         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1992             evicted_bos)
1993                 goto unlock_out;
1994         evicted_bos = 0;
1995         if (kgd2kfd_resume_mm(mm)) {
1996                 pr_err("%s: Failed to resume KFD\n", __func__);
1997                 /* No recovery from this failure. Probably the CP is
1998                  * hanging. No point trying again.
1999                  */
2000         }
2001
2002 unlock_out:
2003         mutex_unlock(&process_info->lock);
2004         mmput(mm);
2005         put_task_struct(usertask);
2006
2007         /* If validation failed, reschedule another attempt */
2008         if (evicted_bos)
2009                 schedule_delayed_work(&process_info->restore_userptr_work,
2010                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2011 }
2012
2013 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2014  *   KFD process identified by process_info
2015  *
2016  * @process_info: amdkfd_process_info of the KFD process
2017  *
2018  * After memory eviction, restore thread calls this function. The function
2019  * should be called when the Process is still valid. BO restore involves -
2020  *
2021  * 1.  Release old eviction fence and create new one
2022  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2023  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2024  *     BOs that need to be reserved.
2025  * 4.  Reserve all the BOs
2026  * 5.  Validate of PD and PT BOs.
2027  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2028  * 7.  Add fence to all PD and PT BOs.
2029  * 8.  Unreserve all BOs
2030  */
2031 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2032 {
2033         struct amdgpu_bo_list_entry *pd_bo_list;
2034         struct amdkfd_process_info *process_info = info;
2035         struct amdgpu_vm *peer_vm;
2036         struct kgd_mem *mem;
2037         struct bo_vm_reservation_context ctx;
2038         struct amdgpu_amdkfd_fence *new_fence;
2039         int ret = 0, i;
2040         struct list_head duplicate_save;
2041         struct amdgpu_sync sync_obj;
2042         unsigned long failed_size = 0;
2043         unsigned long total_size = 0;
2044
2045         INIT_LIST_HEAD(&duplicate_save);
2046         INIT_LIST_HEAD(&ctx.list);
2047         INIT_LIST_HEAD(&ctx.duplicates);
2048
2049         pd_bo_list = kcalloc(process_info->n_vms,
2050                              sizeof(struct amdgpu_bo_list_entry),
2051                              GFP_KERNEL);
2052         if (!pd_bo_list)
2053                 return -ENOMEM;
2054
2055         i = 0;
2056         mutex_lock(&process_info->lock);
2057         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2058                         vm_list_node)
2059                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2060
2061         /* Reserve all BOs and page tables/directory. Add all BOs from
2062          * kfd_bo_list to ctx.list
2063          */
2064         list_for_each_entry(mem, &process_info->kfd_bo_list,
2065                             validate_list.head) {
2066
2067                 list_add_tail(&mem->resv_list.head, &ctx.list);
2068                 mem->resv_list.bo = mem->validate_list.bo;
2069                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2070         }
2071
2072         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2073                                      false, &duplicate_save);
2074         if (ret) {
2075                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2076                 goto ttm_reserve_fail;
2077         }
2078
2079         amdgpu_sync_create(&sync_obj);
2080
2081         /* Validate PDs and PTs */
2082         ret = process_validate_vms(process_info);
2083         if (ret)
2084                 goto validate_map_fail;
2085
2086         ret = process_sync_pds_resv(process_info, &sync_obj);
2087         if (ret) {
2088                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2089                 goto validate_map_fail;
2090         }
2091
2092         /* Validate BOs and map them to GPUVM (update VM page tables). */
2093         list_for_each_entry(mem, &process_info->kfd_bo_list,
2094                             validate_list.head) {
2095
2096                 struct amdgpu_bo *bo = mem->bo;
2097                 uint32_t domain = mem->domain;
2098                 struct kfd_bo_va_list *bo_va_entry;
2099
2100                 total_size += amdgpu_bo_size(bo);
2101
2102                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2103                 if (ret) {
2104                         pr_debug("Memory eviction: Validate BOs failed\n");
2105                         failed_size += amdgpu_bo_size(bo);
2106                         ret = amdgpu_amdkfd_bo_validate(bo,
2107                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2108                         if (ret) {
2109                                 pr_debug("Memory eviction: Try again\n");
2110                                 goto validate_map_fail;
2111                         }
2112                 }
2113                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2114                 if (ret) {
2115                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2116                         goto validate_map_fail;
2117                 }
2118                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2119                                     bo_list) {
2120                         ret = update_gpuvm_pte((struct amdgpu_device *)
2121                                               bo_va_entry->kgd_dev,
2122                                               bo_va_entry,
2123                                               &sync_obj);
2124                         if (ret) {
2125                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2126                                 goto validate_map_fail;
2127                         }
2128                 }
2129         }
2130
2131         if (failed_size)
2132                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2133
2134         /* Update page directories */
2135         ret = process_update_pds(process_info, &sync_obj);
2136         if (ret) {
2137                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2138                 goto validate_map_fail;
2139         }
2140
2141         /* Wait for validate and PT updates to finish */
2142         amdgpu_sync_wait(&sync_obj, false);
2143
2144         /* Release old eviction fence and create new one, because fence only
2145          * goes from unsignaled to signaled, fence cannot be reused.
2146          * Use context and mm from the old fence.
2147          */
2148         new_fence = amdgpu_amdkfd_fence_create(
2149                                 process_info->eviction_fence->base.context,
2150                                 process_info->eviction_fence->mm);
2151         if (!new_fence) {
2152                 pr_err("Failed to create eviction fence\n");
2153                 ret = -ENOMEM;
2154                 goto validate_map_fail;
2155         }
2156         dma_fence_put(&process_info->eviction_fence->base);
2157         process_info->eviction_fence = new_fence;
2158         *ef = dma_fence_get(&new_fence->base);
2159
2160         /* Attach new eviction fence to all BOs */
2161         list_for_each_entry(mem, &process_info->kfd_bo_list,
2162                 validate_list.head)
2163                 amdgpu_bo_fence(mem->bo,
2164                         &process_info->eviction_fence->base, true);
2165
2166         /* Attach eviction fence to PD / PT BOs */
2167         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2168                             vm_list_node) {
2169                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2170
2171                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2172         }
2173
2174 validate_map_fail:
2175         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2176         amdgpu_sync_free(&sync_obj);
2177 ttm_reserve_fail:
2178         mutex_unlock(&process_info->lock);
2179         kfree(pd_bo_list);
2180         return ret;
2181 }
2182
2183 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2184 {
2185         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2186         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2187         int ret;
2188
2189         if (!info || !gws)
2190                 return -EINVAL;
2191
2192         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2193         if (!*mem)
2194                 return -ENOMEM;
2195
2196         mutex_init(&(*mem)->lock);
2197         INIT_LIST_HEAD(&(*mem)->bo_va_list);
2198         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2199         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2200         (*mem)->process_info = process_info;
2201         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2202         amdgpu_sync_create(&(*mem)->sync);
2203
2204
2205         /* Validate gws bo the first time it is added to process */
2206         mutex_lock(&(*mem)->process_info->lock);
2207         ret = amdgpu_bo_reserve(gws_bo, false);
2208         if (unlikely(ret)) {
2209                 pr_err("Reserve gws bo failed %d\n", ret);
2210                 goto bo_reservation_failure;
2211         }
2212
2213         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2214         if (ret) {
2215                 pr_err("GWS BO validate failed %d\n", ret);
2216                 goto bo_validation_failure;
2217         }
2218         /* GWS resource is shared b/t amdgpu and amdkfd
2219          * Add process eviction fence to bo so they can
2220          * evict each other.
2221          */
2222         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2223         if (ret)
2224                 goto reserve_shared_fail;
2225         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2226         amdgpu_bo_unreserve(gws_bo);
2227         mutex_unlock(&(*mem)->process_info->lock);
2228
2229         return ret;
2230
2231 reserve_shared_fail:
2232 bo_validation_failure:
2233         amdgpu_bo_unreserve(gws_bo);
2234 bo_reservation_failure:
2235         mutex_unlock(&(*mem)->process_info->lock);
2236         amdgpu_sync_free(&(*mem)->sync);
2237         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2238         amdgpu_bo_unref(&gws_bo);
2239         mutex_destroy(&(*mem)->lock);
2240         kfree(*mem);
2241         *mem = NULL;
2242         return ret;
2243 }
2244
2245 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2246 {
2247         int ret;
2248         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2249         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2250         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2251
2252         /* Remove BO from process's validate list so restore worker won't touch
2253          * it anymore
2254          */
2255         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2256
2257         ret = amdgpu_bo_reserve(gws_bo, false);
2258         if (unlikely(ret)) {
2259                 pr_err("Reserve gws bo failed %d\n", ret);
2260                 //TODO add BO back to validate_list?
2261                 return ret;
2262         }
2263         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2264                         process_info->eviction_fence);
2265         amdgpu_bo_unreserve(gws_bo);
2266         amdgpu_sync_free(&kgd_mem->sync);
2267         amdgpu_bo_unref(&gws_bo);
2268         mutex_destroy(&kgd_mem->lock);
2269         kfree(mem);
2270         return 0;
2271 }
2272
2273 /* Returns GPU-specific tiling mode information */
2274 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2275                                 struct tile_config *config)
2276 {
2277         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2278
2279         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2280         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2281         config->num_tile_configs =
2282                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2283         config->macro_tile_config_ptr =
2284                         adev->gfx.config.macrotile_mode_array;
2285         config->num_macro_tile_configs =
2286                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2287
2288         /* Those values are not set from GFX9 onwards */
2289         config->num_banks = adev->gfx.config.num_banks;
2290         config->num_ranks = adev->gfx.config.num_ranks;
2291
2292         return 0;
2293 }