2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
25 #include <linux/list.h>
26 #include <linux/sched/mm.h>
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
32 /* Special VM and GART address alignment needed for VI pre-Fiji due to
35 #define VI_BO_SIZE_ALIGN (0x8000)
37 /* BO flag to indicate a KFD userptr BO */
38 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
40 /* Userptr restore delay, just long enough to allow consecutive VM
41 * changes to accumulate
43 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
45 /* Impose limit on how much memory KFD can use */
47 uint64_t max_system_mem_limit;
48 uint64_t max_userptr_mem_limit;
49 int64_t system_mem_used;
50 int64_t userptr_mem_used;
51 spinlock_t mem_limit_lock;
54 /* Struct used for amdgpu_amdkfd_bo_validate */
55 struct amdgpu_vm_parser {
60 static const char * const domain_bit_to_string[] = {
69 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
71 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
74 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
76 return (struct amdgpu_device *)kgd;
79 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
82 struct kfd_bo_va_list *entry;
84 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
85 if (entry->bo_va->base.vm == avm)
91 /* Set memory usage limits. Current, limits are
92 * System (kernel) memory - 3/8th System RAM
93 * Userptr memory - 3/4th System RAM
95 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
101 mem = si.totalram - si.totalhigh;
104 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
105 kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
106 kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
107 pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
108 (kfd_mem_limit.max_system_mem_limit >> 20),
109 (kfd_mem_limit.max_userptr_mem_limit >> 20));
112 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
113 uint64_t size, u32 domain)
118 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
119 sizeof(struct amdgpu_bo));
121 spin_lock(&kfd_mem_limit.mem_limit_lock);
122 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
123 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
124 kfd_mem_limit.max_system_mem_limit) {
128 kfd_mem_limit.system_mem_used += (acc_size + size);
129 } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
130 if ((kfd_mem_limit.system_mem_used + acc_size >
131 kfd_mem_limit.max_system_mem_limit) ||
132 (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
133 kfd_mem_limit.max_userptr_mem_limit)) {
137 kfd_mem_limit.system_mem_used += acc_size;
138 kfd_mem_limit.userptr_mem_used += size;
141 spin_unlock(&kfd_mem_limit.mem_limit_lock);
145 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
146 uint64_t size, u32 domain)
150 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
151 sizeof(struct amdgpu_bo));
153 spin_lock(&kfd_mem_limit.mem_limit_lock);
154 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
155 kfd_mem_limit.system_mem_used -= (acc_size + size);
156 } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
157 kfd_mem_limit.system_mem_used -= acc_size;
158 kfd_mem_limit.userptr_mem_used -= size;
160 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
161 "kfd system memory accounting unbalanced");
162 WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
163 "kfd userptr memory accounting unbalanced");
165 spin_unlock(&kfd_mem_limit.mem_limit_lock);
168 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
170 spin_lock(&kfd_mem_limit.mem_limit_lock);
172 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
173 kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
174 kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
175 } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
176 kfd_mem_limit.system_mem_used -=
177 (bo->tbo.acc_size + amdgpu_bo_size(bo));
179 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
180 "kfd system memory accounting unbalanced");
181 WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
182 "kfd userptr memory accounting unbalanced");
184 spin_unlock(&kfd_mem_limit.mem_limit_lock);
188 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
189 * reservation object.
191 * @bo: [IN] Remove eviction fence(s) from this BO
192 * @ef: [IN] If ef is specified, then this eviction fence is removed if it
193 * is present in the shared list.
194 * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
195 * from BO's reservation object shared list.
196 * @ef_count: [OUT] Number of fences in ef_list.
198 * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
199 * called to restore the eviction fences and to avoid memory leak. This is
200 * useful for shared BOs.
201 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
203 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
204 struct amdgpu_amdkfd_fence *ef,
205 struct amdgpu_amdkfd_fence ***ef_list,
206 unsigned int *ef_count)
208 struct reservation_object_list *fobj;
209 struct reservation_object *resv;
210 unsigned int i = 0, j = 0, k = 0, shared_count;
211 unsigned int count = 0;
212 struct amdgpu_amdkfd_fence **fence_list;
223 fobj = reservation_object_get_list(resv);
229 write_seqcount_begin(&resv->seq);
231 /* Go through all the shared fences in the resevation object. If
232 * ef is specified and it exists in the list, remove it and reduce the
233 * count. If ef is not specified, then get the count of eviction fences
236 shared_count = fobj->shared_count;
237 for (i = 0; i < shared_count; ++i) {
240 f = rcu_dereference_protected(fobj->shared[i],
241 reservation_object_held(resv));
244 if (f->context == ef->base.context) {
246 fobj->shared_count--;
248 RCU_INIT_POINTER(fobj->shared[j++], f);
250 } else if (to_amdgpu_amdkfd_fence(f))
253 write_seqcount_end(&resv->seq);
259 /* Alloc memory for count number of eviction fence pointers. Fill the
260 * ef_list array and ef_count
262 fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
268 write_seqcount_begin(&resv->seq);
271 for (i = 0; i < shared_count; ++i) {
273 struct amdgpu_amdkfd_fence *efence;
275 f = rcu_dereference_protected(fobj->shared[i],
276 reservation_object_held(resv));
278 efence = to_amdgpu_amdkfd_fence(f);
280 fence_list[k++] = efence;
281 fobj->shared_count--;
283 RCU_INIT_POINTER(fobj->shared[j++], f);
287 write_seqcount_end(&resv->seq);
290 *ef_list = fence_list;
296 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
297 * reservation object.
299 * @bo: [IN] Add eviction fences to this BO
300 * @ef_list: [IN] List of eviction fences to be added
301 * @ef_count: [IN] Number of fences in ef_list.
303 * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
306 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
307 struct amdgpu_amdkfd_fence **ef_list,
308 unsigned int ef_count)
312 if (!ef_list || !ef_count)
315 for (i = 0; i < ef_count; i++) {
316 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
317 /* Re-adding the fence takes an additional reference. Drop that
320 dma_fence_put(&ef_list[i]->base);
326 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
329 struct ttm_operation_ctx ctx = { false, false };
332 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
333 "Called with userptr BO"))
336 amdgpu_ttm_placement_from_domain(bo, domain);
338 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
342 struct amdgpu_amdkfd_fence **ef_list;
343 unsigned int ef_count;
345 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
350 ttm_bo_wait(&bo->tbo, false, false);
351 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
358 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
360 struct amdgpu_vm_parser *p = param;
362 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
365 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
367 * Page directories are not updated here because huge page handling
368 * during page table updates can invalidate page directory entries
369 * again. Page directories are only updated after updating page
372 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
374 struct amdgpu_bo *pd = vm->root.base.bo;
375 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
376 struct amdgpu_vm_parser param;
377 uint64_t addr, flags = AMDGPU_PTE_VALID;
380 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
383 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
386 pr_err("amdgpu: failed to validate PT BOs\n");
390 ret = amdgpu_amdkfd_validate(¶m, pd);
392 pr_err("amdgpu: failed to validate PD\n");
396 addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
397 amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
398 vm->pd_phys_addr = addr;
400 if (vm->use_cpu_for_update) {
401 ret = amdgpu_bo_kmap(pd, NULL);
403 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
411 static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
414 int ret = amdgpu_sync_fence(adev, sync, f, false);
416 /* Sync objects can't handle multiple GPUs (contexts) updating
417 * sync->last_vm_update. Fortunately we don't need it for
418 * KFD's purposes, so we can just drop that fence.
420 if (sync->last_vm_update) {
421 dma_fence_put(sync->last_vm_update);
422 sync->last_vm_update = NULL;
428 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
430 struct amdgpu_bo *pd = vm->root.base.bo;
431 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
434 ret = amdgpu_vm_update_directories(adev, vm);
438 return sync_vm_fence(adev, sync, vm->last_update);
441 /* add_bo_to_vm - Add a BO to a VM
443 * Everything that needs to bo done only once when a BO is first added
444 * to a VM. It can later be mapped and unmapped many times without
445 * repeating these steps.
447 * 1. Allocate and initialize BO VA entry data structure
448 * 2. Add BO to the VM
449 * 3. Determine ASIC-specific PTE flags
450 * 4. Alloc page tables and directories if needed
451 * 4a. Validate new page tables and directories
453 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
454 struct amdgpu_vm *vm, bool is_aql,
455 struct kfd_bo_va_list **p_bo_va_entry)
458 struct kfd_bo_va_list *bo_va_entry;
459 struct amdgpu_bo *pd = vm->root.base.bo;
460 struct amdgpu_bo *bo = mem->bo;
461 uint64_t va = mem->va;
462 struct list_head *list_bo_va = &mem->bo_va_list;
463 unsigned long bo_size = bo->tbo.mem.size;
466 pr_err("Invalid VA when adding BO to VM\n");
473 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
477 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
480 /* Add BO to VM internal data structures*/
481 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
482 if (!bo_va_entry->bo_va) {
484 pr_err("Failed to add BO object to VM. ret == %d\n",
489 bo_va_entry->va = va;
490 bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
492 bo_va_entry->kgd_dev = (void *)adev;
493 list_add(&bo_va_entry->bo_list, list_bo_va);
496 *p_bo_va_entry = bo_va_entry;
498 /* Allocate new page tables if needed and validate
499 * them. Clearing of new page tables and validate need to wait
500 * on move fences. We don't want that to trigger the eviction
501 * fence, so remove it temporarily.
503 amdgpu_amdkfd_remove_eviction_fence(pd,
504 vm->process_info->eviction_fence,
507 ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
509 pr_err("Failed to allocate pts, err=%d\n", ret);
513 ret = vm_validate_pt_pd_bos(vm);
515 pr_err("validate_pt_pd_bos() failed\n");
519 /* Add the eviction fence back */
520 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
525 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
526 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
527 list_del(&bo_va_entry->bo_list);
533 static void remove_bo_from_vm(struct amdgpu_device *adev,
534 struct kfd_bo_va_list *entry, unsigned long size)
536 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
538 entry->va + size, entry);
539 amdgpu_vm_bo_rmv(adev, entry->bo_va);
540 list_del(&entry->bo_list);
544 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
545 struct amdkfd_process_info *process_info,
548 struct ttm_validate_buffer *entry = &mem->validate_list;
549 struct amdgpu_bo *bo = mem->bo;
551 INIT_LIST_HEAD(&entry->head);
552 entry->shared = true;
553 entry->bo = &bo->tbo;
554 mutex_lock(&process_info->lock);
556 list_add_tail(&entry->head, &process_info->userptr_valid_list);
558 list_add_tail(&entry->head, &process_info->kfd_bo_list);
559 mutex_unlock(&process_info->lock);
562 /* Initializes user pages. It registers the MMU notifier and validates
563 * the userptr BO in the GTT domain.
565 * The BO must already be on the userptr_valid_list. Otherwise an
566 * eviction and restore may happen that leaves the new BO unmapped
567 * with the user mode queues running.
569 * Takes the process_info->lock to protect against concurrent restore
572 * Returns 0 for success, negative errno for errors.
574 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
577 struct amdkfd_process_info *process_info = mem->process_info;
578 struct amdgpu_bo *bo = mem->bo;
579 struct ttm_operation_ctx ctx = { true, false };
582 mutex_lock(&process_info->lock);
584 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
586 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
590 ret = amdgpu_mn_register(bo, user_addr);
592 pr_err("%s: Failed to register MMU notifier: %d\n",
597 /* If no restore worker is running concurrently, user_pages
598 * should not be allocated
600 WARN(mem->user_pages, "Leaking user_pages array");
602 mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
603 sizeof(struct page *),
604 GFP_KERNEL | __GFP_ZERO);
605 if (!mem->user_pages) {
606 pr_err("%s: Failed to allocate pages array\n", __func__);
611 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
613 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
617 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
619 ret = amdgpu_bo_reserve(bo, true);
621 pr_err("%s: Failed to reserve BO\n", __func__);
624 amdgpu_ttm_placement_from_domain(bo, mem->domain);
625 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
627 pr_err("%s: failed to validate BO\n", __func__);
628 amdgpu_bo_unreserve(bo);
632 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
634 kvfree(mem->user_pages);
635 mem->user_pages = NULL;
638 amdgpu_mn_unregister(bo);
640 mutex_unlock(&process_info->lock);
644 /* Reserving a BO and its page table BOs must happen atomically to
645 * avoid deadlocks. Some operations update multiple VMs at once. Track
646 * all the reservation info in a context structure. Optionally a sync
647 * object can track VM updates.
649 struct bo_vm_reservation_context {
650 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
651 unsigned int n_vms; /* Number of VMs reserved */
652 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
653 struct ww_acquire_ctx ticket; /* Reservation ticket */
654 struct list_head list, duplicates; /* BO lists */
655 struct amdgpu_sync *sync; /* Pointer to sync object */
656 bool reserved; /* Whether BOs are reserved */
660 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
661 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
662 BO_VM_ALL, /* Match all VMs a BO was added to */
666 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
667 * @mem: KFD BO structure.
668 * @vm: the VM to reserve.
669 * @ctx: the struct that will be used in unreserve_bo_and_vms().
671 static int reserve_bo_and_vm(struct kgd_mem *mem,
672 struct amdgpu_vm *vm,
673 struct bo_vm_reservation_context *ctx)
675 struct amdgpu_bo *bo = mem->bo;
680 ctx->reserved = false;
682 ctx->sync = &mem->sync;
684 INIT_LIST_HEAD(&ctx->list);
685 INIT_LIST_HEAD(&ctx->duplicates);
687 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
691 ctx->kfd_bo.robj = bo;
692 ctx->kfd_bo.priority = 0;
693 ctx->kfd_bo.tv.bo = &bo->tbo;
694 ctx->kfd_bo.tv.shared = true;
695 ctx->kfd_bo.user_pages = NULL;
696 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
698 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
700 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
701 false, &ctx->duplicates);
703 ctx->reserved = true;
705 pr_err("Failed to reserve buffers in ttm\n");
714 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
715 * @mem: KFD BO structure.
716 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
717 * is used. Otherwise, a single VM associated with the BO.
718 * @map_type: the mapping status that will be used to filter the VMs.
719 * @ctx: the struct that will be used in unreserve_bo_and_vms().
721 * Returns 0 for success, negative for failure.
723 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
724 struct amdgpu_vm *vm, enum bo_vm_match map_type,
725 struct bo_vm_reservation_context *ctx)
727 struct amdgpu_bo *bo = mem->bo;
728 struct kfd_bo_va_list *entry;
732 ctx->reserved = false;
735 ctx->sync = &mem->sync;
737 INIT_LIST_HEAD(&ctx->list);
738 INIT_LIST_HEAD(&ctx->duplicates);
740 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
741 if ((vm && vm != entry->bo_va->base.vm) ||
742 (entry->is_mapped != map_type
743 && map_type != BO_VM_ALL))
749 if (ctx->n_vms != 0) {
750 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
756 ctx->kfd_bo.robj = bo;
757 ctx->kfd_bo.priority = 0;
758 ctx->kfd_bo.tv.bo = &bo->tbo;
759 ctx->kfd_bo.tv.shared = true;
760 ctx->kfd_bo.user_pages = NULL;
761 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
764 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
765 if ((vm && vm != entry->bo_va->base.vm) ||
766 (entry->is_mapped != map_type
767 && map_type != BO_VM_ALL))
770 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
775 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
776 false, &ctx->duplicates);
778 ctx->reserved = true;
780 pr_err("Failed to reserve buffers in ttm.\n");
791 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
792 * @ctx: Reservation context to unreserve
793 * @wait: Optionally wait for a sync object representing pending VM updates
794 * @intr: Whether the wait is interruptible
796 * Also frees any resources allocated in
797 * reserve_bo_and_(cond_)vm(s). Returns the status from
800 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
801 bool wait, bool intr)
806 ret = amdgpu_sync_wait(ctx->sync, intr);
809 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
814 ctx->reserved = false;
820 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
821 struct kfd_bo_va_list *entry,
822 struct amdgpu_sync *sync)
824 struct amdgpu_bo_va *bo_va = entry->bo_va;
825 struct amdgpu_vm *vm = bo_va->base.vm;
826 struct amdgpu_bo *pd = vm->root.base.bo;
828 /* Remove eviction fence from PD (and thereby from PTs too as
829 * they share the resv. object). Otherwise during PT update
830 * job (see amdgpu_vm_bo_update_mapping), eviction fence would
831 * get added to job->sync object and job execution would
832 * trigger the eviction fence.
834 amdgpu_amdkfd_remove_eviction_fence(pd,
835 vm->process_info->eviction_fence,
837 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
839 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
841 /* Add the eviction fence back */
842 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
844 sync_vm_fence(adev, sync, bo_va->last_pt_update);
849 static int update_gpuvm_pte(struct amdgpu_device *adev,
850 struct kfd_bo_va_list *entry,
851 struct amdgpu_sync *sync)
854 struct amdgpu_vm *vm;
855 struct amdgpu_bo_va *bo_va;
856 struct amdgpu_bo *bo;
858 bo_va = entry->bo_va;
862 /* Update the page tables */
863 ret = amdgpu_vm_bo_update(adev, bo_va, false);
865 pr_err("amdgpu_vm_bo_update failed\n");
869 return sync_vm_fence(adev, sync, bo_va->last_pt_update);
872 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
873 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
878 /* Set virtual address for the allocation */
879 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
880 amdgpu_bo_size(entry->bo_va->base.bo),
883 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
891 ret = update_gpuvm_pte(adev, entry, sync);
893 pr_err("update_gpuvm_pte() failed\n");
894 goto update_gpuvm_pte_failed;
899 update_gpuvm_pte_failed:
900 unmap_bo_from_gpuvm(adev, entry, sync);
904 static int process_validate_vms(struct amdkfd_process_info *process_info)
906 struct amdgpu_vm *peer_vm;
909 list_for_each_entry(peer_vm, &process_info->vm_list_head,
911 ret = vm_validate_pt_pd_bos(peer_vm);
919 static int process_update_pds(struct amdkfd_process_info *process_info,
920 struct amdgpu_sync *sync)
922 struct amdgpu_vm *peer_vm;
925 list_for_each_entry(peer_vm, &process_info->vm_list_head,
927 ret = vm_update_pds(peer_vm, sync);
935 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
936 struct dma_fence **ef)
938 struct amdkfd_process_info *info = NULL;
941 if (!*process_info) {
942 info = kzalloc(sizeof(*info), GFP_KERNEL);
946 mutex_init(&info->lock);
947 INIT_LIST_HEAD(&info->vm_list_head);
948 INIT_LIST_HEAD(&info->kfd_bo_list);
949 INIT_LIST_HEAD(&info->userptr_valid_list);
950 INIT_LIST_HEAD(&info->userptr_inval_list);
952 info->eviction_fence =
953 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
955 if (!info->eviction_fence) {
956 pr_err("Failed to create eviction fence\n");
958 goto create_evict_fence_fail;
961 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
962 atomic_set(&info->evicted_bos, 0);
963 INIT_DELAYED_WORK(&info->restore_userptr_work,
964 amdgpu_amdkfd_restore_userptr_worker);
966 *process_info = info;
967 *ef = dma_fence_get(&info->eviction_fence->base);
970 vm->process_info = *process_info;
972 /* Validate page directory and attach eviction fence */
973 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
975 goto reserve_pd_fail;
976 ret = vm_validate_pt_pd_bos(vm);
978 pr_err("validate_pt_pd_bos() failed\n");
979 goto validate_pd_fail;
981 ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
984 amdgpu_bo_fence(vm->root.base.bo,
985 &vm->process_info->eviction_fence->base, true);
986 amdgpu_bo_unreserve(vm->root.base.bo);
988 /* Update process info */
989 mutex_lock(&vm->process_info->lock);
990 list_add_tail(&vm->vm_list_node,
991 &(vm->process_info->vm_list_head));
992 vm->process_info->n_vms++;
993 mutex_unlock(&vm->process_info->lock);
999 amdgpu_bo_unreserve(vm->root.base.bo);
1001 vm->process_info = NULL;
1003 /* Two fence references: one in info and one in *ef */
1004 dma_fence_put(&info->eviction_fence->base);
1007 *process_info = NULL;
1009 create_evict_fence_fail:
1010 mutex_destroy(&info->lock);
1016 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
1017 void **process_info,
1018 struct dma_fence **ef)
1020 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1021 struct amdgpu_vm *new_vm;
1024 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1028 /* Initialize AMDGPU part of the VM */
1029 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
1031 pr_err("Failed init vm ret %d\n", ret);
1032 goto amdgpu_vm_init_fail;
1035 /* Initialize KFD part of the VM and process info */
1036 ret = init_kfd_vm(new_vm, process_info, ef);
1038 goto init_kfd_vm_fail;
1040 *vm = (void *) new_vm;
1045 amdgpu_vm_fini(adev, new_vm);
1046 amdgpu_vm_init_fail:
1051 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1053 void **vm, void **process_info,
1054 struct dma_fence **ef)
1056 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1057 struct drm_file *drm_priv = filp->private_data;
1058 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1059 struct amdgpu_vm *avm = &drv_priv->vm;
1062 /* Already a compute VM? */
1063 if (avm->process_info)
1066 /* Convert VM into a compute VM */
1067 ret = amdgpu_vm_make_compute(adev, avm);
1071 /* Initialize KFD part of the VM and process info */
1072 ret = init_kfd_vm(avm, process_info, ef);
1081 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1082 struct amdgpu_vm *vm)
1084 struct amdkfd_process_info *process_info = vm->process_info;
1085 struct amdgpu_bo *pd = vm->root.base.bo;
1090 /* Release eviction fence from PD */
1091 amdgpu_bo_reserve(pd, false);
1092 amdgpu_bo_fence(pd, NULL, false);
1093 amdgpu_bo_unreserve(pd);
1095 /* Update process info */
1096 mutex_lock(&process_info->lock);
1097 process_info->n_vms--;
1098 list_del(&vm->vm_list_node);
1099 mutex_unlock(&process_info->lock);
1101 /* Release per-process resources when last compute VM is destroyed */
1102 if (!process_info->n_vms) {
1103 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1104 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1105 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1107 dma_fence_put(&process_info->eviction_fence->base);
1108 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1109 put_pid(process_info->pid);
1110 mutex_destroy(&process_info->lock);
1111 kfree(process_info);
1115 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1117 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1118 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1120 if (WARN_ON(!kgd || !vm))
1123 pr_debug("Destroying process vm %p\n", vm);
1125 /* Release the VM context */
1126 amdgpu_vm_fini(adev, avm);
1130 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1132 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1134 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1137 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1138 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1139 void *vm, struct kgd_mem **mem,
1140 uint64_t *offset, uint32_t flags)
1142 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1143 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1144 uint64_t user_addr = 0;
1145 struct amdgpu_bo *bo;
1147 u32 domain, alloc_domain;
1149 uint32_t mapping_flags;
1153 * Check on which domain to allocate BO
1155 if (flags & ALLOC_MEM_FLAGS_VRAM) {
1156 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1157 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1158 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1159 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1160 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1161 } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1162 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1164 } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1165 domain = AMDGPU_GEM_DOMAIN_GTT;
1166 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1168 if (!offset || !*offset)
1170 user_addr = *offset;
1175 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1178 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1179 mutex_init(&(*mem)->lock);
1180 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1182 /* Workaround for AQL queue wraparound bug. Map the same
1183 * memory twice. That means we only actually allocate half
1186 if ((*mem)->aql_queue)
1189 /* Workaround for TLB bug on older VI chips */
1190 byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1191 adev->asic_type != CHIP_FIJI &&
1192 adev->asic_type != CHIP_POLARIS10 &&
1193 adev->asic_type != CHIP_POLARIS11) ?
1194 VI_BO_SIZE_ALIGN : 1;
1196 mapping_flags = AMDGPU_VM_PAGE_READABLE;
1197 if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1198 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1199 if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1200 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1201 if (flags & ALLOC_MEM_FLAGS_COHERENT)
1202 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1204 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1205 (*mem)->mapping_flags = mapping_flags;
1207 amdgpu_sync_create(&(*mem)->sync);
1209 ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1211 pr_debug("Insufficient system memory\n");
1212 goto err_reserve_system_mem;
1215 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1216 va, size, domain_string(alloc_domain));
1218 ret = amdgpu_bo_create(adev, size, byte_align,
1219 alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
1221 pr_debug("Failed to create BO on domain %s. ret %d\n",
1222 domain_string(alloc_domain), ret);
1228 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1231 (*mem)->domain = domain;
1232 (*mem)->mapped_to_gpu_memory = 0;
1233 (*mem)->process_info = avm->process_info;
1234 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1237 ret = init_user_pages(*mem, current->mm, user_addr);
1239 mutex_lock(&avm->process_info->lock);
1240 list_del(&(*mem)->validate_list.head);
1241 mutex_unlock(&avm->process_info->lock);
1242 goto allocate_init_user_pages_failed;
1247 *offset = amdgpu_bo_mmap_offset(bo);
1251 allocate_init_user_pages_failed:
1252 amdgpu_bo_unref(&bo);
1253 /* Don't unreserve system mem limit twice */
1254 goto err_reserve_system_mem;
1256 unreserve_system_mem_limit(adev, size, alloc_domain);
1257 err_reserve_system_mem:
1258 mutex_destroy(&(*mem)->lock);
1263 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1264 struct kgd_dev *kgd, struct kgd_mem *mem)
1266 struct amdkfd_process_info *process_info = mem->process_info;
1267 unsigned long bo_size = mem->bo->tbo.mem.size;
1268 struct kfd_bo_va_list *entry, *tmp;
1269 struct bo_vm_reservation_context ctx;
1270 struct ttm_validate_buffer *bo_list_entry;
1273 mutex_lock(&mem->lock);
1275 if (mem->mapped_to_gpu_memory > 0) {
1276 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1278 mutex_unlock(&mem->lock);
1282 mutex_unlock(&mem->lock);
1283 /* lock is not needed after this, since mem is unused and will
1287 /* No more MMU notifiers */
1288 amdgpu_mn_unregister(mem->bo);
1290 /* Make sure restore workers don't access the BO any more */
1291 bo_list_entry = &mem->validate_list;
1292 mutex_lock(&process_info->lock);
1293 list_del(&bo_list_entry->head);
1294 mutex_unlock(&process_info->lock);
1296 /* Free user pages if necessary */
1297 if (mem->user_pages) {
1298 pr_debug("%s: Freeing user_pages array\n", __func__);
1299 if (mem->user_pages[0])
1300 release_pages(mem->user_pages,
1301 mem->bo->tbo.ttm->num_pages);
1302 kvfree(mem->user_pages);
1305 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1309 /* The eviction fence should be removed by the last unmap.
1310 * TODO: Log an error condition if the bo still has the eviction fence
1313 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1314 process_info->eviction_fence,
1316 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1317 mem->va + bo_size * (1 + mem->aql_queue));
1319 /* Remove from VM internal data structures */
1320 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1321 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1324 ret = unreserve_bo_and_vms(&ctx, false, false);
1326 /* Free the sync object */
1327 amdgpu_sync_free(&mem->sync);
1330 amdgpu_bo_unref(&mem->bo);
1331 mutex_destroy(&mem->lock);
1337 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1338 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1340 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1341 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1343 struct amdgpu_bo *bo;
1345 struct kfd_bo_va_list *entry;
1346 struct bo_vm_reservation_context ctx;
1347 struct kfd_bo_va_list *bo_va_entry = NULL;
1348 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1349 unsigned long bo_size;
1350 bool is_invalid_userptr = false;
1354 pr_err("Invalid BO when mapping memory to GPU\n");
1358 /* Make sure restore is not running concurrently. Since we
1359 * don't map invalid userptr BOs, we rely on the next restore
1360 * worker to do the mapping
1362 mutex_lock(&mem->process_info->lock);
1364 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1365 * sure that the MMU notifier is no longer running
1366 * concurrently and the queues are actually stopped
1368 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1369 down_write(¤t->mm->mmap_sem);
1370 is_invalid_userptr = atomic_read(&mem->invalid);
1371 up_write(¤t->mm->mmap_sem);
1374 mutex_lock(&mem->lock);
1376 domain = mem->domain;
1377 bo_size = bo->tbo.mem.size;
1379 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1381 mem->va + bo_size * (1 + mem->aql_queue),
1382 vm, domain_string(domain));
1384 ret = reserve_bo_and_vm(mem, vm, &ctx);
1388 /* Userptr can be marked as "not invalid", but not actually be
1389 * validated yet (still in the system domain). In that case
1390 * the queues are still stopped and we can leave mapping for
1391 * the next restore worker
1393 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1394 is_invalid_userptr = true;
1396 if (check_if_add_bo_to_vm(avm, mem)) {
1397 ret = add_bo_to_vm(adev, mem, avm, false,
1400 goto add_bo_to_vm_failed;
1401 if (mem->aql_queue) {
1402 ret = add_bo_to_vm(adev, mem, avm,
1403 true, &bo_va_entry_aql);
1405 goto add_bo_to_vm_failed_aql;
1408 ret = vm_validate_pt_pd_bos(avm);
1410 goto add_bo_to_vm_failed;
1413 if (mem->mapped_to_gpu_memory == 0 &&
1414 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1415 /* Validate BO only once. The eviction fence gets added to BO
1416 * the first time it is mapped. Validate will wait for all
1417 * background evictions to complete.
1419 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1421 pr_debug("Validate failed\n");
1422 goto map_bo_to_gpuvm_failed;
1426 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1427 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1428 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1429 entry->va, entry->va + bo_size,
1432 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1433 is_invalid_userptr);
1435 pr_err("Failed to map radeon bo to gpuvm\n");
1436 goto map_bo_to_gpuvm_failed;
1439 ret = vm_update_pds(vm, ctx.sync);
1441 pr_err("Failed to update page directories\n");
1442 goto map_bo_to_gpuvm_failed;
1445 entry->is_mapped = true;
1446 mem->mapped_to_gpu_memory++;
1447 pr_debug("\t INC mapping count %d\n",
1448 mem->mapped_to_gpu_memory);
1452 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1454 &avm->process_info->eviction_fence->base,
1456 ret = unreserve_bo_and_vms(&ctx, false, false);
1460 map_bo_to_gpuvm_failed:
1461 if (bo_va_entry_aql)
1462 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1463 add_bo_to_vm_failed_aql:
1465 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1466 add_bo_to_vm_failed:
1467 unreserve_bo_and_vms(&ctx, false, false);
1469 mutex_unlock(&mem->process_info->lock);
1470 mutex_unlock(&mem->lock);
1474 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1475 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1477 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1478 struct amdkfd_process_info *process_info =
1479 ((struct amdgpu_vm *)vm)->process_info;
1480 unsigned long bo_size = mem->bo->tbo.mem.size;
1481 struct kfd_bo_va_list *entry;
1482 struct bo_vm_reservation_context ctx;
1485 mutex_lock(&mem->lock);
1487 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1490 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1491 if (ctx.n_vms == 0) {
1496 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1500 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1502 mem->va + bo_size * (1 + mem->aql_queue),
1505 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1506 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1507 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1509 entry->va + bo_size,
1512 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1514 entry->is_mapped = false;
1516 pr_err("failed to unmap VA 0x%llx\n",
1521 mem->mapped_to_gpu_memory--;
1522 pr_debug("\t DEC mapping count %d\n",
1523 mem->mapped_to_gpu_memory);
1527 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1530 if (mem->mapped_to_gpu_memory == 0 &&
1531 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1532 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1533 process_info->eviction_fence,
1537 unreserve_bo_and_vms(&ctx, false, false);
1539 mutex_unlock(&mem->lock);
1543 int amdgpu_amdkfd_gpuvm_sync_memory(
1544 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1546 struct amdgpu_sync sync;
1549 amdgpu_sync_create(&sync);
1551 mutex_lock(&mem->lock);
1552 amdgpu_sync_clone(&mem->sync, &sync);
1553 mutex_unlock(&mem->lock);
1555 ret = amdgpu_sync_wait(&sync, intr);
1556 amdgpu_sync_free(&sync);
1560 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1561 struct kgd_mem *mem, void **kptr, uint64_t *size)
1564 struct amdgpu_bo *bo = mem->bo;
1566 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1567 pr_err("userptr can't be mapped to kernel\n");
1571 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1572 * this BO in BO's restoring after eviction.
1574 mutex_lock(&mem->process_info->lock);
1576 ret = amdgpu_bo_reserve(bo, true);
1578 pr_err("Failed to reserve bo. ret %d\n", ret);
1579 goto bo_reserve_failed;
1582 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
1584 pr_err("Failed to pin bo. ret %d\n", ret);
1588 ret = amdgpu_bo_kmap(bo, kptr);
1590 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1594 amdgpu_amdkfd_remove_eviction_fence(
1595 bo, mem->process_info->eviction_fence, NULL, NULL);
1596 list_del_init(&mem->validate_list.head);
1599 *size = amdgpu_bo_size(bo);
1601 amdgpu_bo_unreserve(bo);
1603 mutex_unlock(&mem->process_info->lock);
1607 amdgpu_bo_unpin(bo);
1609 amdgpu_bo_unreserve(bo);
1611 mutex_unlock(&mem->process_info->lock);
1616 /* Evict a userptr BO by stopping the queues if necessary
1618 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1619 * cannot do any memory allocations, and cannot take any locks that
1620 * are held elsewhere while allocating memory. Therefore this is as
1621 * simple as possible, using atomic counters.
1623 * It doesn't do anything to the BO itself. The real work happens in
1624 * restore, where we get updated page addresses. This function only
1625 * ensures that GPU access to the BO is stopped.
1627 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1628 struct mm_struct *mm)
1630 struct amdkfd_process_info *process_info = mem->process_info;
1631 int invalid, evicted_bos;
1634 invalid = atomic_inc_return(&mem->invalid);
1635 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1636 if (evicted_bos == 1) {
1637 /* First eviction, stop the queues */
1638 r = kgd2kfd->quiesce_mm(mm);
1640 pr_err("Failed to quiesce KFD\n");
1641 schedule_delayed_work(&process_info->restore_userptr_work,
1642 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1648 /* Update invalid userptr BOs
1650 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1651 * userptr_inval_list and updates user pages for all BOs that have
1652 * been invalidated since their last update.
1654 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1655 struct mm_struct *mm)
1657 struct kgd_mem *mem, *tmp_mem;
1658 struct amdgpu_bo *bo;
1659 struct ttm_operation_ctx ctx = { false, false };
1662 /* Move all invalidated BOs to the userptr_inval_list and
1663 * release their user pages by migration to the CPU domain
1665 list_for_each_entry_safe(mem, tmp_mem,
1666 &process_info->userptr_valid_list,
1667 validate_list.head) {
1668 if (!atomic_read(&mem->invalid))
1669 continue; /* BO is still valid */
1673 if (amdgpu_bo_reserve(bo, true))
1675 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1676 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1677 amdgpu_bo_unreserve(bo);
1679 pr_err("%s: Failed to invalidate userptr BO\n",
1684 list_move_tail(&mem->validate_list.head,
1685 &process_info->userptr_inval_list);
1688 if (list_empty(&process_info->userptr_inval_list))
1689 return 0; /* All evicted userptr BOs were freed */
1691 /* Go through userptr_inval_list and update any invalid user_pages */
1692 list_for_each_entry(mem, &process_info->userptr_inval_list,
1693 validate_list.head) {
1694 invalid = atomic_read(&mem->invalid);
1696 /* BO hasn't been invalidated since the last
1697 * revalidation attempt. Keep its BO list.
1703 if (!mem->user_pages) {
1705 kvmalloc_array(bo->tbo.ttm->num_pages,
1706 sizeof(struct page *),
1707 GFP_KERNEL | __GFP_ZERO);
1708 if (!mem->user_pages) {
1709 pr_err("%s: Failed to allocate pages array\n",
1713 } else if (mem->user_pages[0]) {
1714 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1717 /* Get updated user pages */
1718 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1721 mem->user_pages[0] = NULL;
1722 pr_info("%s: Failed to get user pages: %d\n",
1724 /* Pretend it succeeded. It will fail later
1725 * with a VM fault if the GPU tries to access
1726 * it. Better than hanging indefinitely with
1727 * stalled user mode queues.
1731 /* Mark the BO as valid unless it was invalidated
1732 * again concurrently
1734 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1741 /* Validate invalid userptr BOs
1743 * Validates BOs on the userptr_inval_list, and moves them back to the
1744 * userptr_valid_list. Also updates GPUVM page tables with new page
1745 * addresses and waits for the page table updates to complete.
1747 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1749 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1750 struct list_head resv_list, duplicates;
1751 struct ww_acquire_ctx ticket;
1752 struct amdgpu_sync sync;
1754 struct amdgpu_vm *peer_vm;
1755 struct kgd_mem *mem, *tmp_mem;
1756 struct amdgpu_bo *bo;
1757 struct ttm_operation_ctx ctx = { false, false };
1760 pd_bo_list_entries = kcalloc(process_info->n_vms,
1761 sizeof(struct amdgpu_bo_list_entry),
1763 if (!pd_bo_list_entries) {
1764 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1768 INIT_LIST_HEAD(&resv_list);
1769 INIT_LIST_HEAD(&duplicates);
1771 /* Get all the page directory BOs that need to be reserved */
1773 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1775 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1776 &pd_bo_list_entries[i++]);
1777 /* Add the userptr_inval_list entries to resv_list */
1778 list_for_each_entry(mem, &process_info->userptr_inval_list,
1779 validate_list.head) {
1780 list_add_tail(&mem->resv_list.head, &resv_list);
1781 mem->resv_list.bo = mem->validate_list.bo;
1782 mem->resv_list.shared = mem->validate_list.shared;
1785 /* Reserve all BOs and page tables for validation */
1786 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1787 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1791 amdgpu_sync_create(&sync);
1793 /* Avoid triggering eviction fences when unmapping invalid
1794 * userptr BOs (waits for all fences, doesn't use
1797 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1799 amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
1800 process_info->eviction_fence,
1803 ret = process_validate_vms(process_info);
1807 /* Validate BOs and update GPUVM page tables */
1808 list_for_each_entry_safe(mem, tmp_mem,
1809 &process_info->userptr_inval_list,
1810 validate_list.head) {
1811 struct kfd_bo_va_list *bo_va_entry;
1815 /* Copy pages array and validate the BO if we got user pages */
1816 if (mem->user_pages[0]) {
1817 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1819 amdgpu_ttm_placement_from_domain(bo, mem->domain);
1820 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1822 pr_err("%s: failed to validate BO\n", __func__);
1827 /* Validate succeeded, now the BO owns the pages, free
1828 * our copy of the pointer array. Put this BO back on
1829 * the userptr_valid_list. If we need to revalidate
1830 * it, we need to start from scratch.
1832 kvfree(mem->user_pages);
1833 mem->user_pages = NULL;
1834 list_move_tail(&mem->validate_list.head,
1835 &process_info->userptr_valid_list);
1837 /* Update mapping. If the BO was not validated
1838 * (because we couldn't get user pages), this will
1839 * clear the page table entries, which will result in
1840 * VM faults if the GPU tries to access the invalid
1843 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1844 if (!bo_va_entry->is_mapped)
1847 ret = update_gpuvm_pte((struct amdgpu_device *)
1848 bo_va_entry->kgd_dev,
1849 bo_va_entry, &sync);
1851 pr_err("%s: update PTE failed\n", __func__);
1852 /* make sure this gets validated again */
1853 atomic_inc(&mem->invalid);
1859 /* Update page directories */
1860 ret = process_update_pds(process_info, &sync);
1863 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1865 amdgpu_bo_fence(peer_vm->root.base.bo,
1866 &process_info->eviction_fence->base, true);
1867 ttm_eu_backoff_reservation(&ticket, &resv_list);
1868 amdgpu_sync_wait(&sync, false);
1869 amdgpu_sync_free(&sync);
1871 kfree(pd_bo_list_entries);
1876 /* Worker callback to restore evicted userptr BOs
1878 * Tries to update and validate all userptr BOs. If successful and no
1879 * concurrent evictions happened, the queues are restarted. Otherwise,
1880 * reschedule for another attempt later.
1882 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1884 struct delayed_work *dwork = to_delayed_work(work);
1885 struct amdkfd_process_info *process_info =
1886 container_of(dwork, struct amdkfd_process_info,
1887 restore_userptr_work);
1888 struct task_struct *usertask;
1889 struct mm_struct *mm;
1892 evicted_bos = atomic_read(&process_info->evicted_bos);
1896 /* Reference task and mm in case of concurrent process termination */
1897 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1900 mm = get_task_mm(usertask);
1902 put_task_struct(usertask);
1906 mutex_lock(&process_info->lock);
1908 if (update_invalid_user_pages(process_info, mm))
1910 /* userptr_inval_list can be empty if all evicted userptr BOs
1911 * have been freed. In that case there is nothing to validate
1912 * and we can just restart the queues.
1914 if (!list_empty(&process_info->userptr_inval_list)) {
1915 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1916 goto unlock_out; /* Concurrent eviction, try again */
1918 if (validate_invalid_user_pages(process_info))
1921 /* Final check for concurrent evicton and atomic update. If
1922 * another eviction happens after successful update, it will
1923 * be a first eviction that calls quiesce_mm. The eviction
1924 * reference counting inside KFD will handle this case.
1926 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1930 if (kgd2kfd->resume_mm(mm)) {
1931 pr_err("%s: Failed to resume KFD\n", __func__);
1932 /* No recovery from this failure. Probably the CP is
1933 * hanging. No point trying again.
1937 mutex_unlock(&process_info->lock);
1939 put_task_struct(usertask);
1941 /* If validation failed, reschedule another attempt */
1943 schedule_delayed_work(&process_info->restore_userptr_work,
1944 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1947 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1948 * KFD process identified by process_info
1950 * @process_info: amdkfd_process_info of the KFD process
1952 * After memory eviction, restore thread calls this function. The function
1953 * should be called when the Process is still valid. BO restore involves -
1955 * 1. Release old eviction fence and create new one
1956 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1957 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1958 * BOs that need to be reserved.
1959 * 4. Reserve all the BOs
1960 * 5. Validate of PD and PT BOs.
1961 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1962 * 7. Add fence to all PD and PT BOs.
1963 * 8. Unreserve all BOs
1965 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1967 struct amdgpu_bo_list_entry *pd_bo_list;
1968 struct amdkfd_process_info *process_info = info;
1969 struct amdgpu_vm *peer_vm;
1970 struct kgd_mem *mem;
1971 struct bo_vm_reservation_context ctx;
1972 struct amdgpu_amdkfd_fence *new_fence;
1974 struct list_head duplicate_save;
1975 struct amdgpu_sync sync_obj;
1977 INIT_LIST_HEAD(&duplicate_save);
1978 INIT_LIST_HEAD(&ctx.list);
1979 INIT_LIST_HEAD(&ctx.duplicates);
1981 pd_bo_list = kcalloc(process_info->n_vms,
1982 sizeof(struct amdgpu_bo_list_entry),
1988 mutex_lock(&process_info->lock);
1989 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1991 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1993 /* Reserve all BOs and page tables/directory. Add all BOs from
1994 * kfd_bo_list to ctx.list
1996 list_for_each_entry(mem, &process_info->kfd_bo_list,
1997 validate_list.head) {
1999 list_add_tail(&mem->resv_list.head, &ctx.list);
2000 mem->resv_list.bo = mem->validate_list.bo;
2001 mem->resv_list.shared = mem->validate_list.shared;
2004 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2005 false, &duplicate_save);
2007 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2008 goto ttm_reserve_fail;
2011 amdgpu_sync_create(&sync_obj);
2013 /* Validate PDs and PTs */
2014 ret = process_validate_vms(process_info);
2016 goto validate_map_fail;
2018 /* Wait for PD/PTs validate to finish */
2019 /* FIXME: I think this isn't needed */
2020 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2022 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2024 ttm_bo_wait(&bo->tbo, false, false);
2027 /* Validate BOs and map them to GPUVM (update VM page tables). */
2028 list_for_each_entry(mem, &process_info->kfd_bo_list,
2029 validate_list.head) {
2031 struct amdgpu_bo *bo = mem->bo;
2032 uint32_t domain = mem->domain;
2033 struct kfd_bo_va_list *bo_va_entry;
2035 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2037 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2038 goto validate_map_fail;
2041 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2043 ret = update_gpuvm_pte((struct amdgpu_device *)
2044 bo_va_entry->kgd_dev,
2048 pr_debug("Memory eviction: update PTE failed. Try again\n");
2049 goto validate_map_fail;
2054 /* Update page directories */
2055 ret = process_update_pds(process_info, &sync_obj);
2057 pr_debug("Memory eviction: update PDs failed. Try again\n");
2058 goto validate_map_fail;
2061 amdgpu_sync_wait(&sync_obj, false);
2063 /* Release old eviction fence and create new one, because fence only
2064 * goes from unsignaled to signaled, fence cannot be reused.
2065 * Use context and mm from the old fence.
2067 new_fence = amdgpu_amdkfd_fence_create(
2068 process_info->eviction_fence->base.context,
2069 process_info->eviction_fence->mm);
2071 pr_err("Failed to create eviction fence\n");
2073 goto validate_map_fail;
2075 dma_fence_put(&process_info->eviction_fence->base);
2076 process_info->eviction_fence = new_fence;
2077 *ef = dma_fence_get(&new_fence->base);
2079 /* Wait for validate to finish and attach new eviction fence */
2080 list_for_each_entry(mem, &process_info->kfd_bo_list,
2082 ttm_bo_wait(&mem->bo->tbo, false, false);
2083 list_for_each_entry(mem, &process_info->kfd_bo_list,
2085 amdgpu_bo_fence(mem->bo,
2086 &process_info->eviction_fence->base, true);
2088 /* Attach eviction fence to PD / PT BOs */
2089 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2091 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2093 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2097 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2098 amdgpu_sync_free(&sync_obj);
2100 mutex_unlock(&process_info->lock);