1 // SPDX-License-Identifier: MIT
3 * Copyright 2014-2018 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/dma-buf.h>
24 #include <linux/list.h>
25 #include <linux/pagemap.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
29 #include "amdgpu_object.h"
30 #include "amdgpu_gem.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_hmm.h"
33 #include "amdgpu_amdkfd.h"
34 #include "amdgpu_dma_buf.h"
35 #include <uapi/linux/kfd_ioctl.h>
36 #include "amdgpu_xgmi.h"
37 #include "kfd_smi_events.h"
39 /* Userptr restore delay, just long enough to allow consecutive VM
40 * changes to accumulate
42 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
45 * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
48 #define VRAM_AVAILABLITY_ALIGN (1 << 21)
50 /* Impose limit on how much memory KFD can use */
52 uint64_t max_system_mem_limit;
53 uint64_t max_ttm_mem_limit;
54 int64_t system_mem_used;
56 spinlock_t mem_limit_lock;
59 static const char * const domain_bit_to_string[] = {
68 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
70 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
72 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
75 struct kfd_mem_attachment *entry;
77 list_for_each_entry(entry, &mem->attachments, list)
78 if (entry->bo_va->base.vm == avm)
84 /* Set memory usage limits. Current, limits are
85 * System (TTM + userptr) memory - 15/16th System RAM
86 * TTM memory - 3/8th System RAM
88 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
94 mem = si.freeram - si.freehigh;
97 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
98 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
99 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
100 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
101 (kfd_mem_limit.max_system_mem_limit >> 20),
102 (kfd_mem_limit.max_ttm_mem_limit >> 20));
105 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
107 kfd_mem_limit.system_mem_used += size;
110 /* Estimate page table size needed to represent a given memory size
112 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
113 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
114 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
115 * for 2MB pages for TLB efficiency. However, small allocations and
116 * fragmented system memory still need some 4KB pages. We choose a
117 * compromise that should work in most cases without reserving too
118 * much memory for page tables unnecessarily (factor 16K, >> 14).
121 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
124 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
127 * @adev: Device to which allocated BO belongs to
128 * @size: Size of buffer, in bytes, encapsulated by B0. This should be
129 * equivalent to amdgpu_bo_size(BO)
130 * @alloc_flag: Flag used in allocating a BO as noted above
132 * Return: returns -ENOMEM in case of error, ZERO otherwise
134 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
135 uint64_t size, u32 alloc_flag)
137 uint64_t reserved_for_pt =
138 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
139 size_t system_mem_needed, ttm_mem_needed, vram_needed;
142 system_mem_needed = 0;
145 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
146 system_mem_needed = size;
147 ttm_mem_needed = size;
148 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
150 * Conservatively round up the allocation requirement to 2 MB
151 * to avoid fragmentation caused by 4K allocations in the tail
155 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
156 system_mem_needed = size;
157 } else if (!(alloc_flag &
158 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
159 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
160 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
164 spin_lock(&kfd_mem_limit.mem_limit_lock);
166 if (kfd_mem_limit.system_mem_used + system_mem_needed >
167 kfd_mem_limit.max_system_mem_limit)
168 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
170 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
171 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
172 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
173 kfd_mem_limit.max_ttm_mem_limit) ||
174 (adev && adev->kfd.vram_used + vram_needed >
175 adev->gmc.real_vram_size - reserved_for_pt)) {
180 /* Update memory accounting by decreasing available system
181 * memory, TTM memory and GPU memory as computed above
183 WARN_ONCE(vram_needed && !adev,
184 "adev reference can't be null when vram is used");
186 adev->kfd.vram_used += vram_needed;
187 adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
189 kfd_mem_limit.system_mem_used += system_mem_needed;
190 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
193 spin_unlock(&kfd_mem_limit.mem_limit_lock);
197 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
198 uint64_t size, u32 alloc_flag)
200 spin_lock(&kfd_mem_limit.mem_limit_lock);
202 if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
203 kfd_mem_limit.system_mem_used -= size;
204 kfd_mem_limit.ttm_mem_used -= size;
205 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
207 "adev reference can't be null when alloc mem flags vram is set");
209 adev->kfd.vram_used -= size;
210 adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
212 } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
213 kfd_mem_limit.system_mem_used -= size;
214 } else if (!(alloc_flag &
215 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
216 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
217 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
220 WARN_ONCE(adev && adev->kfd.vram_used < 0,
221 "KFD VRAM memory accounting unbalanced");
222 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
223 "KFD TTM memory accounting unbalanced");
224 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
225 "KFD system memory accounting unbalanced");
228 spin_unlock(&kfd_mem_limit.mem_limit_lock);
231 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
233 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
234 u32 alloc_flags = bo->kfd_bo->alloc_flags;
235 u64 size = amdgpu_bo_size(bo);
237 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags);
243 * @create_dmamap_sg_bo: Creates a amdgpu_bo object to reflect information
244 * about USERPTR or DOOREBELL or MMIO BO.
245 * @adev: Device for which dmamap BO is being created
246 * @mem: BO of peer device that is being DMA mapped. Provides parameters
247 * in building the dmamap BO
248 * @bo_out: Output parameter updated with handle of dmamap BO
251 create_dmamap_sg_bo(struct amdgpu_device *adev,
252 struct kgd_mem *mem, struct amdgpu_bo **bo_out)
254 struct drm_gem_object *gem_obj;
257 ret = amdgpu_bo_reserve(mem->bo, false);
262 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, align,
263 AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE,
264 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj);
266 amdgpu_bo_unreserve(mem->bo);
269 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
273 *bo_out = gem_to_amdgpu_bo(gem_obj);
274 (*bo_out)->parent = amdgpu_bo_ref(mem->bo);
278 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
279 * reservation object.
281 * @bo: [IN] Remove eviction fence(s) from this BO
282 * @ef: [IN] This eviction fence is removed if it
283 * is present in the shared list.
285 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
287 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
288 struct amdgpu_amdkfd_fence *ef)
290 struct dma_fence *replacement;
295 /* TODO: Instead of block before we should use the fence of the page
296 * table update and TLB flush here directly.
298 replacement = dma_fence_get_stub();
299 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
300 replacement, DMA_RESV_USAGE_BOOKKEEP);
301 dma_fence_put(replacement);
305 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
307 struct amdgpu_bo *root = bo;
308 struct amdgpu_vm_bo_base *vm_bo;
309 struct amdgpu_vm *vm;
310 struct amdkfd_process_info *info;
311 struct amdgpu_amdkfd_fence *ef;
314 /* we can always get vm_bo from root PD bo.*/
326 info = vm->process_info;
327 if (!info || !info->eviction_fence)
330 ef = container_of(dma_fence_get(&info->eviction_fence->base),
331 struct amdgpu_amdkfd_fence, base);
333 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
334 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
335 dma_resv_unlock(bo->tbo.base.resv);
337 dma_fence_put(&ef->base);
341 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
344 struct ttm_operation_ctx ctx = { false, false };
347 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
348 "Called with userptr BO"))
351 amdgpu_bo_placement_from_domain(bo, domain);
353 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
357 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
363 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
365 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
368 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
370 * Page directories are not updated here because huge page handling
371 * during page table updates can invalidate page directory entries
372 * again. Page directories are only updated after updating page
375 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
377 struct amdgpu_bo *pd = vm->root.bo;
378 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
381 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
383 pr_err("failed to validate PT BOs\n");
387 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
392 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
394 struct amdgpu_bo *pd = vm->root.bo;
395 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
398 ret = amdgpu_vm_update_pdes(adev, vm, false);
402 return amdgpu_sync_fence(sync, vm->last_update);
405 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
407 uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
408 AMDGPU_VM_MTYPE_DEFAULT;
410 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
411 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
412 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
413 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
415 return amdgpu_gem_va_map_flags(adev, mapping_flags);
419 * create_sg_table() - Create an sg_table for a contiguous DMA addr range
420 * @addr: The starting address to point to
421 * @size: Size of memory area in bytes being pointed to
423 * Allocates an instance of sg_table and initializes it to point to memory
424 * area specified by input parameters. The address used to build is assumed
425 * to be DMA mapped, if needed.
427 * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
428 * because they are physically contiguous.
430 * Return: Initialized instance of SG Table or NULL
432 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
434 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
438 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
442 sg_dma_address(sg->sgl) = addr;
443 sg->sgl->length = size;
444 #ifdef CONFIG_NEED_SG_DMA_LENGTH
445 sg->sgl->dma_length = size;
451 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
452 struct kfd_mem_attachment *attachment)
454 enum dma_data_direction direction =
455 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
456 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
457 struct ttm_operation_ctx ctx = {.interruptible = true};
458 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
459 struct amdgpu_device *adev = attachment->adev;
460 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
461 struct ttm_tt *ttm = bo->tbo.ttm;
464 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
467 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
468 if (unlikely(!ttm->sg))
471 /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
472 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
474 (u64)ttm->num_pages << PAGE_SHIFT,
479 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
483 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
486 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
487 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
494 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
496 pr_err("DMA map userptr failed: %d\n", ret);
497 sg_free_table(ttm->sg);
505 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
507 struct ttm_operation_ctx ctx = {.interruptible = true};
508 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
510 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
511 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
515 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
516 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
517 * @attachment: Virtual address attachment of the BO on accessing device
519 * An access request from the device that owns DOORBELL does not require DMA mapping.
520 * This is because the request doesn't go through PCIe root complex i.e. it instead
521 * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
523 * In contrast, all access requests for MMIO need to be DMA mapped without regard to
524 * device ownership. This is because access requests for MMIO go through PCIe root
527 * This is accomplished in two steps:
528 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
529 * in updating requesting device's page table
530 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
531 * accessible. This allows an update of requesting device's page table
532 * with entries associated with DOOREBELL or MMIO memory
534 * This method is invoked in the following contexts:
535 * - Mapping of DOORBELL or MMIO BO of same or peer device
536 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
538 * Return: ZERO if successful, NON-ZERO otherwise
541 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
542 struct kfd_mem_attachment *attachment)
544 struct ttm_operation_ctx ctx = {.interruptible = true};
545 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
546 struct amdgpu_device *adev = attachment->adev;
547 struct ttm_tt *ttm = bo->tbo.ttm;
548 enum dma_data_direction dir;
553 /* Expect SG Table of dmapmap BO to be NULL */
554 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
555 if (unlikely(ttm->sg)) {
556 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
560 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
561 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
562 dma_addr = mem->bo->tbo.sg->sgl->dma_address;
563 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
564 pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
565 dma_addr = dma_map_resource(adev->dev, dma_addr,
566 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
567 ret = dma_mapping_error(adev->dev, dma_addr);
570 pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
572 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
573 if (unlikely(!ttm->sg)) {
578 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
579 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
586 sg_free_table(ttm->sg);
590 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
591 dir, DMA_ATTR_SKIP_CPU_SYNC);
596 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
597 struct kfd_mem_attachment *attachment)
599 switch (attachment->type) {
600 case KFD_MEM_ATT_SHARED:
602 case KFD_MEM_ATT_USERPTR:
603 return kfd_mem_dmamap_userptr(mem, attachment);
604 case KFD_MEM_ATT_DMABUF:
605 return kfd_mem_dmamap_dmabuf(attachment);
607 return kfd_mem_dmamap_sg_bo(mem, attachment);
615 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
616 struct kfd_mem_attachment *attachment)
618 enum dma_data_direction direction =
619 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
620 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
621 struct ttm_operation_ctx ctx = {.interruptible = false};
622 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
623 struct amdgpu_device *adev = attachment->adev;
624 struct ttm_tt *ttm = bo->tbo.ttm;
626 if (unlikely(!ttm->sg))
629 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
630 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
632 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
633 sg_free_table(ttm->sg);
639 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
641 struct ttm_operation_ctx ctx = {.interruptible = true};
642 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
644 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
645 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
649 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
650 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
651 * @attachment: Virtual address attachment of the BO on accessing device
653 * The method performs following steps:
654 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
655 * - Free SG Table that is used to encapsulate DMA mapped memory of
656 * peer device's DOORBELL or MMIO memory
658 * This method is invoked in the following contexts:
659 * UNMapping of DOORBELL or MMIO BO on a device having access to its memory
660 * Eviction of DOOREBELL or MMIO BO on device having access to its memory
665 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
666 struct kfd_mem_attachment *attachment)
668 struct ttm_operation_ctx ctx = {.interruptible = true};
669 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
670 struct amdgpu_device *adev = attachment->adev;
671 struct ttm_tt *ttm = bo->tbo.ttm;
672 enum dma_data_direction dir;
674 if (unlikely(!ttm->sg)) {
675 pr_err("SG Table of BO is UNEXPECTEDLY NULL");
679 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
680 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
682 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
683 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
684 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
685 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
686 sg_free_table(ttm->sg);
693 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
694 struct kfd_mem_attachment *attachment)
696 switch (attachment->type) {
697 case KFD_MEM_ATT_SHARED:
699 case KFD_MEM_ATT_USERPTR:
700 kfd_mem_dmaunmap_userptr(mem, attachment);
702 case KFD_MEM_ATT_DMABUF:
703 kfd_mem_dmaunmap_dmabuf(attachment);
706 kfd_mem_dmaunmap_sg_bo(mem, attachment);
714 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
715 struct amdgpu_bo **bo)
717 struct drm_gem_object *gobj;
721 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
722 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
724 if (IS_ERR(mem->dmabuf)) {
725 ret = PTR_ERR(mem->dmabuf);
731 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
733 return PTR_ERR(gobj);
735 *bo = gem_to_amdgpu_bo(gobj);
736 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
741 /* kfd_mem_attach - Add a BO to a VM
743 * Everything that needs to bo done only once when a BO is first added
744 * to a VM. It can later be mapped and unmapped many times without
745 * repeating these steps.
747 * 0. Create BO for DMA mapping, if needed
748 * 1. Allocate and initialize BO VA entry data structure
749 * 2. Add BO to the VM
750 * 3. Determine ASIC-specific PTE flags
751 * 4. Alloc page tables and directories if needed
752 * 4a. Validate new page tables and directories
754 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
755 struct amdgpu_vm *vm, bool is_aql)
757 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
758 unsigned long bo_size = mem->bo->tbo.base.size;
759 uint64_t va = mem->va;
760 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
761 struct amdgpu_bo *bo[2] = {NULL, NULL};
762 bool same_hive = false;
766 pr_err("Invalid VA when adding BO to VM\n");
770 /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
772 * The access path of MMIO and DOORBELL BOs of is always over PCIe.
773 * In contrast the access path of VRAM BOs depens upon the type of
774 * link that connects the peer device. Access over PCIe is allowed
775 * if peer device has large BAR. In contrast, access over xGMI is
776 * allowed for both small and large BAR configurations of peer device
778 if ((adev != bo_adev) &&
779 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
780 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
781 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
782 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
783 same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
784 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
788 for (i = 0; i <= is_aql; i++) {
789 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
790 if (unlikely(!attachment[i])) {
795 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
798 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
799 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
801 /* Mappings on the local GPU, or VRAM mappings in the
802 * local hive, or userptr mapping IOMMU direct map mode
803 * share the original BO
805 attachment[i]->type = KFD_MEM_ATT_SHARED;
807 drm_gem_object_get(&bo[i]->tbo.base);
809 /* Multiple mappings on the same GPU share the BO */
810 attachment[i]->type = KFD_MEM_ATT_SHARED;
812 drm_gem_object_get(&bo[i]->tbo.base);
813 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
814 /* Create an SG BO to DMA-map userptrs on other GPUs */
815 attachment[i]->type = KFD_MEM_ATT_USERPTR;
816 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
819 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
820 } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
821 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
822 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
823 "Handing invalid SG BO in ATTACH request");
824 attachment[i]->type = KFD_MEM_ATT_SG;
825 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
828 /* Enable acces to GTT and VRAM BOs of peer devices */
829 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
830 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
831 attachment[i]->type = KFD_MEM_ATT_DMABUF;
832 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
835 pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
837 WARN_ONCE(true, "Handling invalid ATTACH request");
842 /* Add BO to VM internal data structures */
843 ret = amdgpu_bo_reserve(bo[i], false);
845 pr_debug("Unable to reserve BO during memory attach");
848 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
849 amdgpu_bo_unreserve(bo[i]);
850 if (unlikely(!attachment[i]->bo_va)) {
852 pr_err("Failed to add BO object to VM. ret == %d\n",
856 attachment[i]->va = va;
857 attachment[i]->pte_flags = get_pte_flags(adev, mem);
858 attachment[i]->adev = adev;
859 list_add(&attachment[i]->list, &mem->attachments);
867 for (; i >= 0; i--) {
870 if (attachment[i]->bo_va) {
871 amdgpu_bo_reserve(bo[i], true);
872 amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
873 amdgpu_bo_unreserve(bo[i]);
874 list_del(&attachment[i]->list);
877 drm_gem_object_put(&bo[i]->tbo.base);
878 kfree(attachment[i]);
883 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
885 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
887 pr_debug("\t remove VA 0x%llx in entry %p\n",
888 attachment->va, attachment);
889 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
890 drm_gem_object_put(&bo->tbo.base);
891 list_del(&attachment->list);
895 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
896 struct amdkfd_process_info *process_info,
899 struct ttm_validate_buffer *entry = &mem->validate_list;
900 struct amdgpu_bo *bo = mem->bo;
902 INIT_LIST_HEAD(&entry->head);
903 entry->num_shared = 1;
904 entry->bo = &bo->tbo;
905 mutex_lock(&process_info->lock);
907 list_add_tail(&entry->head, &process_info->userptr_valid_list);
909 list_add_tail(&entry->head, &process_info->kfd_bo_list);
910 mutex_unlock(&process_info->lock);
913 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
914 struct amdkfd_process_info *process_info)
916 struct ttm_validate_buffer *bo_list_entry;
918 bo_list_entry = &mem->validate_list;
919 mutex_lock(&process_info->lock);
920 list_del(&bo_list_entry->head);
921 mutex_unlock(&process_info->lock);
924 /* Initializes user pages. It registers the MMU notifier and validates
925 * the userptr BO in the GTT domain.
927 * The BO must already be on the userptr_valid_list. Otherwise an
928 * eviction and restore may happen that leaves the new BO unmapped
929 * with the user mode queues running.
931 * Takes the process_info->lock to protect against concurrent restore
934 * Returns 0 for success, negative errno for errors.
936 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
939 struct amdkfd_process_info *process_info = mem->process_info;
940 struct amdgpu_bo *bo = mem->bo;
941 struct ttm_operation_ctx ctx = { true, false };
942 struct hmm_range *range;
945 mutex_lock(&process_info->lock);
947 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
949 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
953 ret = amdgpu_hmm_register(bo, user_addr);
955 pr_err("%s: Failed to register MMU notifier: %d\n",
962 * During a CRIU restore operation, the userptr buffer objects
963 * will be validated in the restore_userptr_work worker at a
964 * later stage when it is scheduled by another ioctl called by
965 * CRIU master process for the target pid for restore.
967 mutex_lock(&process_info->notifier_lock);
969 mutex_unlock(&process_info->notifier_lock);
970 mutex_unlock(&process_info->lock);
974 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
976 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
980 ret = amdgpu_bo_reserve(bo, true);
982 pr_err("%s: Failed to reserve BO\n", __func__);
985 amdgpu_bo_placement_from_domain(bo, mem->domain);
986 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
988 pr_err("%s: failed to validate BO\n", __func__);
989 amdgpu_bo_unreserve(bo);
992 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
995 amdgpu_hmm_unregister(bo);
997 mutex_unlock(&process_info->lock);
1001 /* Reserving a BO and its page table BOs must happen atomically to
1002 * avoid deadlocks. Some operations update multiple VMs at once. Track
1003 * all the reservation info in a context structure. Optionally a sync
1004 * object can track VM updates.
1006 struct bo_vm_reservation_context {
1007 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
1008 unsigned int n_vms; /* Number of VMs reserved */
1009 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
1010 struct ww_acquire_ctx ticket; /* Reservation ticket */
1011 struct list_head list, duplicates; /* BO lists */
1012 struct amdgpu_sync *sync; /* Pointer to sync object */
1013 bool reserved; /* Whether BOs are reserved */
1017 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
1018 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
1019 BO_VM_ALL, /* Match all VMs a BO was added to */
1023 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1024 * @mem: KFD BO structure.
1025 * @vm: the VM to reserve.
1026 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1028 static int reserve_bo_and_vm(struct kgd_mem *mem,
1029 struct amdgpu_vm *vm,
1030 struct bo_vm_reservation_context *ctx)
1032 struct amdgpu_bo *bo = mem->bo;
1037 ctx->reserved = false;
1039 ctx->sync = &mem->sync;
1041 INIT_LIST_HEAD(&ctx->list);
1042 INIT_LIST_HEAD(&ctx->duplicates);
1044 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
1048 ctx->kfd_bo.priority = 0;
1049 ctx->kfd_bo.tv.bo = &bo->tbo;
1050 ctx->kfd_bo.tv.num_shared = 1;
1051 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1053 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
1055 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1056 false, &ctx->duplicates);
1058 pr_err("Failed to reserve buffers in ttm.\n");
1064 ctx->reserved = true;
1069 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1070 * @mem: KFD BO structure.
1071 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1072 * is used. Otherwise, a single VM associated with the BO.
1073 * @map_type: the mapping status that will be used to filter the VMs.
1074 * @ctx: the struct that will be used in unreserve_bo_and_vms().
1076 * Returns 0 for success, negative for failure.
1078 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1079 struct amdgpu_vm *vm, enum bo_vm_match map_type,
1080 struct bo_vm_reservation_context *ctx)
1082 struct amdgpu_bo *bo = mem->bo;
1083 struct kfd_mem_attachment *entry;
1087 ctx->reserved = false;
1090 ctx->sync = &mem->sync;
1092 INIT_LIST_HEAD(&ctx->list);
1093 INIT_LIST_HEAD(&ctx->duplicates);
1095 list_for_each_entry(entry, &mem->attachments, list) {
1096 if ((vm && vm != entry->bo_va->base.vm) ||
1097 (entry->is_mapped != map_type
1098 && map_type != BO_VM_ALL))
1104 if (ctx->n_vms != 0) {
1105 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
1111 ctx->kfd_bo.priority = 0;
1112 ctx->kfd_bo.tv.bo = &bo->tbo;
1113 ctx->kfd_bo.tv.num_shared = 1;
1114 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1117 list_for_each_entry(entry, &mem->attachments, list) {
1118 if ((vm && vm != entry->bo_va->base.vm) ||
1119 (entry->is_mapped != map_type
1120 && map_type != BO_VM_ALL))
1123 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1128 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1129 false, &ctx->duplicates);
1131 pr_err("Failed to reserve buffers in ttm.\n");
1137 ctx->reserved = true;
1142 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1143 * @ctx: Reservation context to unreserve
1144 * @wait: Optionally wait for a sync object representing pending VM updates
1145 * @intr: Whether the wait is interruptible
1147 * Also frees any resources allocated in
1148 * reserve_bo_and_(cond_)vm(s). Returns the status from
1151 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1152 bool wait, bool intr)
1157 ret = amdgpu_sync_wait(ctx->sync, intr);
1160 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1165 ctx->reserved = false;
1171 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1172 struct kfd_mem_attachment *entry,
1173 struct amdgpu_sync *sync)
1175 struct amdgpu_bo_va *bo_va = entry->bo_va;
1176 struct amdgpu_device *adev = entry->adev;
1177 struct amdgpu_vm *vm = bo_va->base.vm;
1179 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1181 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1183 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1185 kfd_mem_dmaunmap_attachment(mem, entry);
1188 static int update_gpuvm_pte(struct kgd_mem *mem,
1189 struct kfd_mem_attachment *entry,
1190 struct amdgpu_sync *sync)
1192 struct amdgpu_bo_va *bo_va = entry->bo_va;
1193 struct amdgpu_device *adev = entry->adev;
1196 ret = kfd_mem_dmamap_attachment(mem, entry);
1200 /* Update the page tables */
1201 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1203 pr_err("amdgpu_vm_bo_update failed\n");
1207 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1210 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1211 struct kfd_mem_attachment *entry,
1212 struct amdgpu_sync *sync,
1217 /* Set virtual address for the allocation */
1218 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1219 amdgpu_bo_size(entry->bo_va->base.bo),
1222 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1230 ret = update_gpuvm_pte(mem, entry, sync);
1232 pr_err("update_gpuvm_pte() failed\n");
1233 goto update_gpuvm_pte_failed;
1238 update_gpuvm_pte_failed:
1239 unmap_bo_from_gpuvm(mem, entry, sync);
1243 static int process_validate_vms(struct amdkfd_process_info *process_info)
1245 struct amdgpu_vm *peer_vm;
1248 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1250 ret = vm_validate_pt_pd_bos(peer_vm);
1258 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1259 struct amdgpu_sync *sync)
1261 struct amdgpu_vm *peer_vm;
1264 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1266 struct amdgpu_bo *pd = peer_vm->root.bo;
1268 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1269 AMDGPU_SYNC_NE_OWNER,
1270 AMDGPU_FENCE_OWNER_KFD);
1278 static int process_update_pds(struct amdkfd_process_info *process_info,
1279 struct amdgpu_sync *sync)
1281 struct amdgpu_vm *peer_vm;
1284 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1286 ret = vm_update_pds(peer_vm, sync);
1294 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1295 struct dma_fence **ef)
1297 struct amdkfd_process_info *info = NULL;
1300 if (!*process_info) {
1301 info = kzalloc(sizeof(*info), GFP_KERNEL);
1305 mutex_init(&info->lock);
1306 mutex_init(&info->notifier_lock);
1307 INIT_LIST_HEAD(&info->vm_list_head);
1308 INIT_LIST_HEAD(&info->kfd_bo_list);
1309 INIT_LIST_HEAD(&info->userptr_valid_list);
1310 INIT_LIST_HEAD(&info->userptr_inval_list);
1312 info->eviction_fence =
1313 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1316 if (!info->eviction_fence) {
1317 pr_err("Failed to create eviction fence\n");
1319 goto create_evict_fence_fail;
1322 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1323 INIT_DELAYED_WORK(&info->restore_userptr_work,
1324 amdgpu_amdkfd_restore_userptr_worker);
1326 *process_info = info;
1327 *ef = dma_fence_get(&info->eviction_fence->base);
1330 vm->process_info = *process_info;
1332 /* Validate page directory and attach eviction fence */
1333 ret = amdgpu_bo_reserve(vm->root.bo, true);
1335 goto reserve_pd_fail;
1336 ret = vm_validate_pt_pd_bos(vm);
1338 pr_err("validate_pt_pd_bos() failed\n");
1339 goto validate_pd_fail;
1341 ret = amdgpu_bo_sync_wait(vm->root.bo,
1342 AMDGPU_FENCE_OWNER_KFD, false);
1345 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1347 goto reserve_shared_fail;
1348 dma_resv_add_fence(vm->root.bo->tbo.base.resv,
1349 &vm->process_info->eviction_fence->base,
1350 DMA_RESV_USAGE_BOOKKEEP);
1351 amdgpu_bo_unreserve(vm->root.bo);
1353 /* Update process info */
1354 mutex_lock(&vm->process_info->lock);
1355 list_add_tail(&vm->vm_list_node,
1356 &(vm->process_info->vm_list_head));
1357 vm->process_info->n_vms++;
1358 mutex_unlock(&vm->process_info->lock);
1362 reserve_shared_fail:
1365 amdgpu_bo_unreserve(vm->root.bo);
1367 vm->process_info = NULL;
1369 /* Two fence references: one in info and one in *ef */
1370 dma_fence_put(&info->eviction_fence->base);
1373 *process_info = NULL;
1375 create_evict_fence_fail:
1376 mutex_destroy(&info->lock);
1377 mutex_destroy(&info->notifier_lock);
1384 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1385 * @bo: Handle of buffer object being pinned
1386 * @domain: Domain into which BO should be pinned
1388 * - USERPTR BOs are UNPINNABLE and will return error
1389 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1390 * PIN count incremented. It is valid to PIN a BO multiple times
1392 * Return: ZERO if successful in pinning, Non-Zero in case of error.
1394 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1398 ret = amdgpu_bo_reserve(bo, false);
1402 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1404 pr_err("Error in Pinning BO to domain: %d\n", domain);
1406 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1407 amdgpu_bo_unreserve(bo);
1413 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1414 * @bo: Handle of buffer object being unpinned
1416 * - Is a illegal request for USERPTR BOs and is ignored
1417 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1418 * PIN count decremented. Calls to UNPIN must balance calls to PIN
1420 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1424 ret = amdgpu_bo_reserve(bo, false);
1428 amdgpu_bo_unpin(bo);
1429 amdgpu_bo_unreserve(bo);
1432 int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
1433 struct file *filp, u32 pasid)
1436 struct amdgpu_fpriv *drv_priv;
1437 struct amdgpu_vm *avm;
1440 ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1443 avm = &drv_priv->vm;
1445 /* Free the original amdgpu allocated pasid,
1446 * will be replaced with kfd allocated pasid.
1449 amdgpu_pasid_free(avm->pasid);
1450 amdgpu_vm_set_pasid(adev, avm, 0);
1453 ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1460 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1462 void **process_info,
1463 struct dma_fence **ef)
1465 struct amdgpu_fpriv *drv_priv;
1466 struct amdgpu_vm *avm;
1469 ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1472 avm = &drv_priv->vm;
1474 /* Already a compute VM? */
1475 if (avm->process_info)
1478 /* Convert VM into a compute VM */
1479 ret = amdgpu_vm_make_compute(adev, avm);
1483 /* Initialize KFD part of the VM and process info */
1484 ret = init_kfd_vm(avm, process_info, ef);
1488 amdgpu_vm_set_task_info(avm);
1493 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1494 struct amdgpu_vm *vm)
1496 struct amdkfd_process_info *process_info = vm->process_info;
1501 /* Update process info */
1502 mutex_lock(&process_info->lock);
1503 process_info->n_vms--;
1504 list_del(&vm->vm_list_node);
1505 mutex_unlock(&process_info->lock);
1507 vm->process_info = NULL;
1509 /* Release per-process resources when last compute VM is destroyed */
1510 if (!process_info->n_vms) {
1511 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1512 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1513 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1515 dma_fence_put(&process_info->eviction_fence->base);
1516 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1517 put_pid(process_info->pid);
1518 mutex_destroy(&process_info->lock);
1519 mutex_destroy(&process_info->notifier_lock);
1520 kfree(process_info);
1524 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1527 struct amdgpu_vm *avm;
1529 if (WARN_ON(!adev || !drm_priv))
1532 avm = drm_priv_to_vm(drm_priv);
1534 pr_debug("Releasing process vm %p\n", avm);
1536 /* The original pasid of amdgpu vm has already been
1537 * released during making a amdgpu vm to a compute vm
1538 * The current pasid is managed by kfd and will be
1539 * released on kfd process destroy. Set amdgpu pasid
1540 * to 0 to avoid duplicate release.
1542 amdgpu_vm_release_compute(adev, avm);
1545 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1547 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1548 struct amdgpu_bo *pd = avm->root.bo;
1549 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1551 if (adev->asic_type < CHIP_VEGA10)
1552 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1553 return avm->pd_phys_addr;
1556 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1558 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1560 mutex_lock(&pinfo->lock);
1561 WRITE_ONCE(pinfo->block_mmu_notifications, true);
1562 mutex_unlock(&pinfo->lock);
1565 int amdgpu_amdkfd_criu_resume(void *p)
1568 struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1570 mutex_lock(&pinfo->lock);
1571 pr_debug("scheduling work\n");
1572 mutex_lock(&pinfo->notifier_lock);
1573 pinfo->evicted_bos++;
1574 mutex_unlock(&pinfo->notifier_lock);
1575 if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1579 WRITE_ONCE(pinfo->block_mmu_notifications, false);
1580 schedule_delayed_work(&pinfo->restore_userptr_work, 0);
1583 mutex_unlock(&pinfo->lock);
1587 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
1589 uint64_t reserved_for_pt =
1590 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1593 spin_lock(&kfd_mem_limit.mem_limit_lock);
1594 available = adev->gmc.real_vram_size
1595 - adev->kfd.vram_used_aligned
1596 - atomic64_read(&adev->vram_pin_size)
1598 spin_unlock(&kfd_mem_limit.mem_limit_lock);
1600 return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
1603 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1604 struct amdgpu_device *adev, uint64_t va, uint64_t size,
1605 void *drm_priv, struct kgd_mem **mem,
1606 uint64_t *offset, uint32_t flags, bool criu_resume)
1608 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1609 enum ttm_bo_type bo_type = ttm_bo_type_device;
1610 struct sg_table *sg = NULL;
1611 uint64_t user_addr = 0;
1612 struct amdgpu_bo *bo;
1613 struct drm_gem_object *gobj = NULL;
1614 u32 domain, alloc_domain;
1619 * Check on which domain to allocate BO
1621 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1622 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1623 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1624 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1625 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1626 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1627 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1630 domain = AMDGPU_GEM_DOMAIN_GTT;
1631 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1632 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1634 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1635 if (!offset || !*offset)
1637 user_addr = untagged_addr(*offset);
1638 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1639 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1640 bo_type = ttm_bo_type_sg;
1641 if (size > UINT_MAX)
1643 sg = create_sg_table(*offset, size);
1651 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
1652 alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
1653 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
1654 alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
1656 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1661 INIT_LIST_HEAD(&(*mem)->attachments);
1662 mutex_init(&(*mem)->lock);
1663 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1665 /* Workaround for AQL queue wraparound bug. Map the same
1666 * memory twice. That means we only actually allocate half
1669 if ((*mem)->aql_queue)
1672 (*mem)->alloc_flags = flags;
1674 amdgpu_sync_create(&(*mem)->sync);
1676 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
1678 pr_debug("Insufficient memory\n");
1679 goto err_reserve_limit;
1682 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1683 va, size, domain_string(alloc_domain));
1685 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1686 bo_type, NULL, &gobj);
1688 pr_debug("Failed to create BO on domain %s. ret %d\n",
1689 domain_string(alloc_domain), ret);
1692 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1694 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1695 goto err_node_allow;
1697 bo = gem_to_amdgpu_bo(gobj);
1698 if (bo_type == ttm_bo_type_sg) {
1700 bo->tbo.ttm->sg = sg;
1705 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1708 (*mem)->domain = domain;
1709 (*mem)->mapped_to_gpu_memory = 0;
1710 (*mem)->process_info = avm->process_info;
1711 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1714 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
1715 ret = init_user_pages(*mem, user_addr, criu_resume);
1717 goto allocate_init_user_pages_failed;
1718 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1719 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1720 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1722 pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1725 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1726 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1730 *offset = amdgpu_bo_mmap_offset(bo);
1734 allocate_init_user_pages_failed:
1736 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1737 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1739 /* Don't unreserve system mem limit twice */
1740 goto err_reserve_limit;
1742 amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
1744 mutex_destroy(&(*mem)->lock);
1746 drm_gem_object_put(gobj);
1757 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1758 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1761 struct amdkfd_process_info *process_info = mem->process_info;
1762 unsigned long bo_size = mem->bo->tbo.base.size;
1763 bool use_release_notifier = (mem->bo->kfd_bo == mem);
1764 struct kfd_mem_attachment *entry, *tmp;
1765 struct bo_vm_reservation_context ctx;
1766 struct ttm_validate_buffer *bo_list_entry;
1767 unsigned int mapped_to_gpu_memory;
1769 bool is_imported = false;
1771 mutex_lock(&mem->lock);
1773 /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1774 if (mem->alloc_flags &
1775 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1776 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1777 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1780 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1781 is_imported = mem->is_imported;
1782 mutex_unlock(&mem->lock);
1783 /* lock is not needed after this, since mem is unused and will
1787 if (mapped_to_gpu_memory > 0) {
1788 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1793 /* Make sure restore workers don't access the BO any more */
1794 bo_list_entry = &mem->validate_list;
1795 mutex_lock(&process_info->lock);
1796 list_del(&bo_list_entry->head);
1797 mutex_unlock(&process_info->lock);
1799 /* Cleanup user pages and MMU notifiers */
1800 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1801 amdgpu_hmm_unregister(mem->bo);
1802 mutex_lock(&process_info->notifier_lock);
1803 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1804 mutex_unlock(&process_info->notifier_lock);
1807 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1811 /* The eviction fence should be removed by the last unmap.
1812 * TODO: Log an error condition if the bo still has the eviction fence
1815 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1816 process_info->eviction_fence);
1817 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1818 mem->va + bo_size * (1 + mem->aql_queue));
1820 /* Remove from VM internal data structures */
1821 list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1822 kfd_mem_detach(entry);
1824 ret = unreserve_bo_and_vms(&ctx, false, false);
1826 /* Free the sync object */
1827 amdgpu_sync_free(&mem->sync);
1829 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1830 * remap BO. We need to free it.
1832 if (mem->bo->tbo.sg) {
1833 sg_free_table(mem->bo->tbo.sg);
1834 kfree(mem->bo->tbo.sg);
1837 /* Update the size of the BO being freed if it was allocated from
1838 * VRAM and is not imported.
1841 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1849 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1851 dma_buf_put(mem->dmabuf);
1852 mutex_destroy(&mem->lock);
1854 /* If this releases the last reference, it will end up calling
1855 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1856 * this needs to be the last call here.
1858 drm_gem_object_put(&mem->bo->tbo.base);
1861 * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
1862 * explicitly free it here.
1864 if (!use_release_notifier)
1870 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1871 struct amdgpu_device *adev, struct kgd_mem *mem,
1874 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1876 struct amdgpu_bo *bo;
1878 struct kfd_mem_attachment *entry;
1879 struct bo_vm_reservation_context ctx;
1880 unsigned long bo_size;
1881 bool is_invalid_userptr = false;
1885 pr_err("Invalid BO when mapping memory to GPU\n");
1889 /* Make sure restore is not running concurrently. Since we
1890 * don't map invalid userptr BOs, we rely on the next restore
1891 * worker to do the mapping
1893 mutex_lock(&mem->process_info->lock);
1895 /* Lock notifier lock. If we find an invalid userptr BO, we can be
1896 * sure that the MMU notifier is no longer running
1897 * concurrently and the queues are actually stopped
1899 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1900 mutex_lock(&mem->process_info->notifier_lock);
1901 is_invalid_userptr = !!mem->invalid;
1902 mutex_unlock(&mem->process_info->notifier_lock);
1905 mutex_lock(&mem->lock);
1907 domain = mem->domain;
1908 bo_size = bo->tbo.base.size;
1910 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1912 mem->va + bo_size * (1 + mem->aql_queue),
1913 avm, domain_string(domain));
1915 if (!kfd_mem_is_attached(avm, mem)) {
1916 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1921 ret = reserve_bo_and_vm(mem, avm, &ctx);
1925 /* Userptr can be marked as "not invalid", but not actually be
1926 * validated yet (still in the system domain). In that case
1927 * the queues are still stopped and we can leave mapping for
1928 * the next restore worker
1930 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1931 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1932 is_invalid_userptr = true;
1934 ret = vm_validate_pt_pd_bos(avm);
1938 if (mem->mapped_to_gpu_memory == 0 &&
1939 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1940 /* Validate BO only once. The eviction fence gets added to BO
1941 * the first time it is mapped. Validate will wait for all
1942 * background evictions to complete.
1944 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1946 pr_debug("Validate failed\n");
1951 list_for_each_entry(entry, &mem->attachments, list) {
1952 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1955 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1956 entry->va, entry->va + bo_size, entry);
1958 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1959 is_invalid_userptr);
1961 pr_err("Failed to map bo to gpuvm\n");
1965 ret = vm_update_pds(avm, ctx.sync);
1967 pr_err("Failed to update page directories\n");
1971 entry->is_mapped = true;
1972 mem->mapped_to_gpu_memory++;
1973 pr_debug("\t INC mapping count %d\n",
1974 mem->mapped_to_gpu_memory);
1977 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1978 dma_resv_add_fence(bo->tbo.base.resv,
1979 &avm->process_info->eviction_fence->base,
1980 DMA_RESV_USAGE_BOOKKEEP);
1981 ret = unreserve_bo_and_vms(&ctx, false, false);
1986 unreserve_bo_and_vms(&ctx, false, false);
1988 mutex_unlock(&mem->process_info->lock);
1989 mutex_unlock(&mem->lock);
1993 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1994 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
1996 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1997 struct amdkfd_process_info *process_info = avm->process_info;
1998 unsigned long bo_size = mem->bo->tbo.base.size;
1999 struct kfd_mem_attachment *entry;
2000 struct bo_vm_reservation_context ctx;
2003 mutex_lock(&mem->lock);
2005 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2008 /* If no VMs were reserved, it means the BO wasn't actually mapped */
2009 if (ctx.n_vms == 0) {
2014 ret = vm_validate_pt_pd_bos(avm);
2018 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2020 mem->va + bo_size * (1 + mem->aql_queue),
2023 list_for_each_entry(entry, &mem->attachments, list) {
2024 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2027 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2028 entry->va, entry->va + bo_size, entry);
2030 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2031 entry->is_mapped = false;
2033 mem->mapped_to_gpu_memory--;
2034 pr_debug("\t DEC mapping count %d\n",
2035 mem->mapped_to_gpu_memory);
2038 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
2041 if (mem->mapped_to_gpu_memory == 0 &&
2042 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
2043 !mem->bo->tbo.pin_count)
2044 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
2045 process_info->eviction_fence);
2048 unreserve_bo_and_vms(&ctx, false, false);
2050 mutex_unlock(&mem->lock);
2054 int amdgpu_amdkfd_gpuvm_sync_memory(
2055 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2057 struct amdgpu_sync sync;
2060 amdgpu_sync_create(&sync);
2062 mutex_lock(&mem->lock);
2063 amdgpu_sync_clone(&mem->sync, &sync);
2064 mutex_unlock(&mem->lock);
2066 ret = amdgpu_sync_wait(&sync, intr);
2067 amdgpu_sync_free(&sync);
2072 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2073 * @adev: Device to which allocated BO belongs
2074 * @bo: Buffer object to be mapped
2076 * Before return, bo reference count is incremented. To release the reference and unpin/
2077 * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2079 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
2083 ret = amdgpu_bo_reserve(bo, true);
2085 pr_err("Failed to reserve bo. ret %d\n", ret);
2086 goto err_reserve_bo_failed;
2089 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2091 pr_err("Failed to pin bo. ret %d\n", ret);
2092 goto err_pin_bo_failed;
2095 ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2097 pr_err("Failed to bind bo to GART. ret %d\n", ret);
2098 goto err_map_bo_gart_failed;
2101 amdgpu_amdkfd_remove_eviction_fence(
2102 bo, bo->kfd_bo->process_info->eviction_fence);
2104 amdgpu_bo_unreserve(bo);
2106 bo = amdgpu_bo_ref(bo);
2110 err_map_bo_gart_failed:
2111 amdgpu_bo_unpin(bo);
2113 amdgpu_bo_unreserve(bo);
2114 err_reserve_bo_failed:
2119 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2121 * @mem: Buffer object to be mapped for CPU access
2122 * @kptr[out]: pointer in kernel CPU address space
2123 * @size[out]: size of the buffer
2125 * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
2126 * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
2127 * validate_list, so the GPU mapping can be restored after a page table was
2130 * Return: 0 on success, error code on failure
2132 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
2133 void **kptr, uint64_t *size)
2136 struct amdgpu_bo *bo = mem->bo;
2138 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2139 pr_err("userptr can't be mapped to kernel\n");
2143 mutex_lock(&mem->process_info->lock);
2145 ret = amdgpu_bo_reserve(bo, true);
2147 pr_err("Failed to reserve bo. ret %d\n", ret);
2148 goto bo_reserve_failed;
2151 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2153 pr_err("Failed to pin bo. ret %d\n", ret);
2157 ret = amdgpu_bo_kmap(bo, kptr);
2159 pr_err("Failed to map bo to kernel. ret %d\n", ret);
2163 amdgpu_amdkfd_remove_eviction_fence(
2164 bo, mem->process_info->eviction_fence);
2167 *size = amdgpu_bo_size(bo);
2169 amdgpu_bo_unreserve(bo);
2171 mutex_unlock(&mem->process_info->lock);
2175 amdgpu_bo_unpin(bo);
2177 amdgpu_bo_unreserve(bo);
2179 mutex_unlock(&mem->process_info->lock);
2184 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2186 * @mem: Buffer object to be unmapped for CPU access
2188 * Removes the kernel CPU mapping and unpins the BO. It does not restore the
2189 * eviction fence, so this function should only be used for cleanup before the
2192 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
2194 struct amdgpu_bo *bo = mem->bo;
2196 amdgpu_bo_reserve(bo, true);
2197 amdgpu_bo_kunmap(bo);
2198 amdgpu_bo_unpin(bo);
2199 amdgpu_bo_unreserve(bo);
2202 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2203 struct kfd_vm_fault_info *mem)
2205 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2206 *mem = *adev->gmc.vm_fault_info;
2207 mb(); /* make sure read happened */
2208 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2213 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
2214 struct dma_buf *dma_buf,
2215 uint64_t va, void *drm_priv,
2216 struct kgd_mem **mem, uint64_t *size,
2217 uint64_t *mmap_offset)
2219 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2220 struct drm_gem_object *obj;
2221 struct amdgpu_bo *bo;
2224 if (dma_buf->ops != &amdgpu_dmabuf_ops)
2225 /* Can't handle non-graphics buffers */
2228 obj = dma_buf->priv;
2229 if (drm_to_adev(obj->dev) != adev)
2230 /* Can't handle buffers from other devices */
2233 bo = gem_to_amdgpu_bo(obj);
2234 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2235 AMDGPU_GEM_DOMAIN_GTT)))
2236 /* Only VRAM and GTT BOs are supported */
2239 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2243 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2250 *size = amdgpu_bo_size(bo);
2253 *mmap_offset = amdgpu_bo_mmap_offset(bo);
2255 INIT_LIST_HEAD(&(*mem)->attachments);
2256 mutex_init(&(*mem)->lock);
2258 (*mem)->alloc_flags =
2259 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2260 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2261 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2262 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2264 drm_gem_object_get(&bo->tbo.base);
2267 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2268 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2269 (*mem)->mapped_to_gpu_memory = 0;
2270 (*mem)->process_info = avm->process_info;
2271 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2272 amdgpu_sync_create(&(*mem)->sync);
2273 (*mem)->is_imported = true;
2278 /* Evict a userptr BO by stopping the queues if necessary
2280 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2281 * cannot do any memory allocations, and cannot take any locks that
2282 * are held elsewhere while allocating memory.
2284 * It doesn't do anything to the BO itself. The real work happens in
2285 * restore, where we get updated page addresses. This function only
2286 * ensures that GPU access to the BO is stopped.
2288 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
2289 unsigned long cur_seq, struct kgd_mem *mem)
2291 struct amdkfd_process_info *process_info = mem->process_info;
2294 /* Do not process MMU notifications during CRIU restore until
2295 * KFD_CRIU_OP_RESUME IOCTL is received
2297 if (READ_ONCE(process_info->block_mmu_notifications))
2300 mutex_lock(&process_info->notifier_lock);
2301 mmu_interval_set_seq(mni, cur_seq);
2304 if (++process_info->evicted_bos == 1) {
2305 /* First eviction, stop the queues */
2306 r = kgd2kfd_quiesce_mm(mni->mm,
2307 KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
2309 pr_err("Failed to quiesce KFD\n");
2310 schedule_delayed_work(&process_info->restore_userptr_work,
2311 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2313 mutex_unlock(&process_info->notifier_lock);
2318 /* Update invalid userptr BOs
2320 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2321 * userptr_inval_list and updates user pages for all BOs that have
2322 * been invalidated since their last update.
2324 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2325 struct mm_struct *mm)
2327 struct kgd_mem *mem, *tmp_mem;
2328 struct amdgpu_bo *bo;
2329 struct ttm_operation_ctx ctx = { false, false };
2333 mutex_lock(&process_info->notifier_lock);
2335 /* Move all invalidated BOs to the userptr_inval_list */
2336 list_for_each_entry_safe(mem, tmp_mem,
2337 &process_info->userptr_valid_list,
2340 list_move_tail(&mem->validate_list.head,
2341 &process_info->userptr_inval_list);
2343 /* Go through userptr_inval_list and update any invalid user_pages */
2344 list_for_each_entry(mem, &process_info->userptr_inval_list,
2345 validate_list.head) {
2346 invalid = mem->invalid;
2348 /* BO hasn't been invalidated since the last
2349 * revalidation attempt. Keep its page list.
2355 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2358 /* BO reservations and getting user pages (hmm_range_fault)
2359 * must happen outside the notifier lock
2361 mutex_unlock(&process_info->notifier_lock);
2363 /* Move the BO to system (CPU) domain if necessary to unmap
2364 * and free the SG table
2366 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
2367 if (amdgpu_bo_reserve(bo, true))
2369 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2370 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2371 amdgpu_bo_unreserve(bo);
2373 pr_err("%s: Failed to invalidate userptr BO\n",
2379 /* Get updated user pages */
2380 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2383 pr_debug("Failed %d to get user pages\n", ret);
2385 /* Return -EFAULT bad address error as success. It will
2386 * fail later with a VM fault if the GPU tries to access
2387 * it. Better than hanging indefinitely with stalled
2390 * Return other error -EBUSY or -ENOMEM to retry restore
2398 mutex_lock(&process_info->notifier_lock);
2400 /* Mark the BO as valid unless it was invalidated
2401 * again concurrently.
2403 if (mem->invalid != invalid) {
2411 mutex_unlock(&process_info->notifier_lock);
2416 /* Validate invalid userptr BOs
2418 * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
2419 * with new page addresses and waits for the page table updates to complete.
2421 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2423 struct amdgpu_bo_list_entry *pd_bo_list_entries;
2424 struct list_head resv_list, duplicates;
2425 struct ww_acquire_ctx ticket;
2426 struct amdgpu_sync sync;
2428 struct amdgpu_vm *peer_vm;
2429 struct kgd_mem *mem, *tmp_mem;
2430 struct amdgpu_bo *bo;
2431 struct ttm_operation_ctx ctx = { false, false };
2434 pd_bo_list_entries = kcalloc(process_info->n_vms,
2435 sizeof(struct amdgpu_bo_list_entry),
2437 if (!pd_bo_list_entries) {
2438 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2443 INIT_LIST_HEAD(&resv_list);
2444 INIT_LIST_HEAD(&duplicates);
2446 /* Get all the page directory BOs that need to be reserved */
2448 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2450 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2451 &pd_bo_list_entries[i++]);
2452 /* Add the userptr_inval_list entries to resv_list */
2453 list_for_each_entry(mem, &process_info->userptr_inval_list,
2454 validate_list.head) {
2455 list_add_tail(&mem->resv_list.head, &resv_list);
2456 mem->resv_list.bo = mem->validate_list.bo;
2457 mem->resv_list.num_shared = mem->validate_list.num_shared;
2460 /* Reserve all BOs and page tables for validation */
2461 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2462 WARN(!list_empty(&duplicates), "Duplicates should be empty");
2466 amdgpu_sync_create(&sync);
2468 ret = process_validate_vms(process_info);
2472 /* Validate BOs and update GPUVM page tables */
2473 list_for_each_entry_safe(mem, tmp_mem,
2474 &process_info->userptr_inval_list,
2475 validate_list.head) {
2476 struct kfd_mem_attachment *attachment;
2480 /* Validate the BO if we got user pages */
2481 if (bo->tbo.ttm->pages[0]) {
2482 amdgpu_bo_placement_from_domain(bo, mem->domain);
2483 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2485 pr_err("%s: failed to validate BO\n", __func__);
2490 /* Update mapping. If the BO was not validated
2491 * (because we couldn't get user pages), this will
2492 * clear the page table entries, which will result in
2493 * VM faults if the GPU tries to access the invalid
2496 list_for_each_entry(attachment, &mem->attachments, list) {
2497 if (!attachment->is_mapped)
2500 kfd_mem_dmaunmap_attachment(mem, attachment);
2501 ret = update_gpuvm_pte(mem, attachment, &sync);
2503 pr_err("%s: update PTE failed\n", __func__);
2504 /* make sure this gets validated again */
2505 mutex_lock(&process_info->notifier_lock);
2507 mutex_unlock(&process_info->notifier_lock);
2513 /* Update page directories */
2514 ret = process_update_pds(process_info, &sync);
2517 ttm_eu_backoff_reservation(&ticket, &resv_list);
2518 amdgpu_sync_wait(&sync, false);
2519 amdgpu_sync_free(&sync);
2521 kfree(pd_bo_list_entries);
2527 /* Confirm that all user pages are valid while holding the notifier lock
2529 * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
2531 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
2533 struct kgd_mem *mem, *tmp_mem;
2536 list_for_each_entry_safe(mem, tmp_mem,
2537 &process_info->userptr_inval_list,
2538 validate_list.head) {
2539 bool valid = amdgpu_ttm_tt_get_user_pages_done(
2540 mem->bo->tbo.ttm, mem->range);
2544 WARN(!mem->invalid, "Invalid BO not marked invalid");
2548 WARN(mem->invalid, "Valid BO is marked invalid");
2550 list_move_tail(&mem->validate_list.head,
2551 &process_info->userptr_valid_list);
2557 /* Worker callback to restore evicted userptr BOs
2559 * Tries to update and validate all userptr BOs. If successful and no
2560 * concurrent evictions happened, the queues are restarted. Otherwise,
2561 * reschedule for another attempt later.
2563 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2565 struct delayed_work *dwork = to_delayed_work(work);
2566 struct amdkfd_process_info *process_info =
2567 container_of(dwork, struct amdkfd_process_info,
2568 restore_userptr_work);
2569 struct task_struct *usertask;
2570 struct mm_struct *mm;
2571 uint32_t evicted_bos;
2573 mutex_lock(&process_info->notifier_lock);
2574 evicted_bos = process_info->evicted_bos;
2575 mutex_unlock(&process_info->notifier_lock);
2579 /* Reference task and mm in case of concurrent process termination */
2580 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2583 mm = get_task_mm(usertask);
2585 put_task_struct(usertask);
2589 mutex_lock(&process_info->lock);
2591 if (update_invalid_user_pages(process_info, mm))
2593 /* userptr_inval_list can be empty if all evicted userptr BOs
2594 * have been freed. In that case there is nothing to validate
2595 * and we can just restart the queues.
2597 if (!list_empty(&process_info->userptr_inval_list)) {
2598 if (validate_invalid_user_pages(process_info))
2601 /* Final check for concurrent evicton and atomic update. If
2602 * another eviction happens after successful update, it will
2603 * be a first eviction that calls quiesce_mm. The eviction
2604 * reference counting inside KFD will handle this case.
2606 mutex_lock(&process_info->notifier_lock);
2607 if (process_info->evicted_bos != evicted_bos)
2608 goto unlock_notifier_out;
2610 if (confirm_valid_user_pages_locked(process_info)) {
2611 WARN(1, "User pages unexpectedly invalid");
2612 goto unlock_notifier_out;
2615 process_info->evicted_bos = evicted_bos = 0;
2617 if (kgd2kfd_resume_mm(mm)) {
2618 pr_err("%s: Failed to resume KFD\n", __func__);
2619 /* No recovery from this failure. Probably the CP is
2620 * hanging. No point trying again.
2624 unlock_notifier_out:
2625 mutex_unlock(&process_info->notifier_lock);
2627 mutex_unlock(&process_info->lock);
2629 /* If validation failed, reschedule another attempt */
2631 schedule_delayed_work(&process_info->restore_userptr_work,
2632 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2634 kfd_smi_event_queue_restore_rescheduled(mm);
2637 put_task_struct(usertask);
2640 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2641 * KFD process identified by process_info
2643 * @process_info: amdkfd_process_info of the KFD process
2645 * After memory eviction, restore thread calls this function. The function
2646 * should be called when the Process is still valid. BO restore involves -
2648 * 1. Release old eviction fence and create new one
2649 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2650 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2651 * BOs that need to be reserved.
2652 * 4. Reserve all the BOs
2653 * 5. Validate of PD and PT BOs.
2654 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2655 * 7. Add fence to all PD and PT BOs.
2656 * 8. Unreserve all BOs
2658 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2660 struct amdgpu_bo_list_entry *pd_bo_list;
2661 struct amdkfd_process_info *process_info = info;
2662 struct amdgpu_vm *peer_vm;
2663 struct kgd_mem *mem;
2664 struct bo_vm_reservation_context ctx;
2665 struct amdgpu_amdkfd_fence *new_fence;
2667 struct list_head duplicate_save;
2668 struct amdgpu_sync sync_obj;
2669 unsigned long failed_size = 0;
2670 unsigned long total_size = 0;
2672 INIT_LIST_HEAD(&duplicate_save);
2673 INIT_LIST_HEAD(&ctx.list);
2674 INIT_LIST_HEAD(&ctx.duplicates);
2676 pd_bo_list = kcalloc(process_info->n_vms,
2677 sizeof(struct amdgpu_bo_list_entry),
2683 mutex_lock(&process_info->lock);
2684 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2686 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2688 /* Reserve all BOs and page tables/directory. Add all BOs from
2689 * kfd_bo_list to ctx.list
2691 list_for_each_entry(mem, &process_info->kfd_bo_list,
2692 validate_list.head) {
2694 list_add_tail(&mem->resv_list.head, &ctx.list);
2695 mem->resv_list.bo = mem->validate_list.bo;
2696 mem->resv_list.num_shared = mem->validate_list.num_shared;
2699 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2700 false, &duplicate_save);
2702 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2703 goto ttm_reserve_fail;
2706 amdgpu_sync_create(&sync_obj);
2708 /* Validate PDs and PTs */
2709 ret = process_validate_vms(process_info);
2711 goto validate_map_fail;
2713 ret = process_sync_pds_resv(process_info, &sync_obj);
2715 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2716 goto validate_map_fail;
2719 /* Validate BOs and map them to GPUVM (update VM page tables). */
2720 list_for_each_entry(mem, &process_info->kfd_bo_list,
2721 validate_list.head) {
2723 struct amdgpu_bo *bo = mem->bo;
2724 uint32_t domain = mem->domain;
2725 struct kfd_mem_attachment *attachment;
2726 struct dma_resv_iter cursor;
2727 struct dma_fence *fence;
2729 total_size += amdgpu_bo_size(bo);
2731 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2733 pr_debug("Memory eviction: Validate BOs failed\n");
2734 failed_size += amdgpu_bo_size(bo);
2735 ret = amdgpu_amdkfd_bo_validate(bo,
2736 AMDGPU_GEM_DOMAIN_GTT, false);
2738 pr_debug("Memory eviction: Try again\n");
2739 goto validate_map_fail;
2742 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2743 DMA_RESV_USAGE_KERNEL, fence) {
2744 ret = amdgpu_sync_fence(&sync_obj, fence);
2746 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2747 goto validate_map_fail;
2750 list_for_each_entry(attachment, &mem->attachments, list) {
2751 if (!attachment->is_mapped)
2754 kfd_mem_dmaunmap_attachment(mem, attachment);
2755 ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2757 pr_debug("Memory eviction: update PTE failed. Try again\n");
2758 goto validate_map_fail;
2764 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2766 /* Update page directories */
2767 ret = process_update_pds(process_info, &sync_obj);
2769 pr_debug("Memory eviction: update PDs failed. Try again\n");
2770 goto validate_map_fail;
2773 /* Wait for validate and PT updates to finish */
2774 amdgpu_sync_wait(&sync_obj, false);
2776 /* Release old eviction fence and create new one, because fence only
2777 * goes from unsignaled to signaled, fence cannot be reused.
2778 * Use context and mm from the old fence.
2780 new_fence = amdgpu_amdkfd_fence_create(
2781 process_info->eviction_fence->base.context,
2782 process_info->eviction_fence->mm,
2785 pr_err("Failed to create eviction fence\n");
2787 goto validate_map_fail;
2789 dma_fence_put(&process_info->eviction_fence->base);
2790 process_info->eviction_fence = new_fence;
2791 *ef = dma_fence_get(&new_fence->base);
2793 /* Attach new eviction fence to all BOs except pinned ones */
2794 list_for_each_entry(mem, &process_info->kfd_bo_list,
2795 validate_list.head) {
2796 if (mem->bo->tbo.pin_count)
2799 dma_resv_add_fence(mem->bo->tbo.base.resv,
2800 &process_info->eviction_fence->base,
2801 DMA_RESV_USAGE_BOOKKEEP);
2803 /* Attach eviction fence to PD / PT BOs */
2804 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2806 struct amdgpu_bo *bo = peer_vm->root.bo;
2808 dma_resv_add_fence(bo->tbo.base.resv,
2809 &process_info->eviction_fence->base,
2810 DMA_RESV_USAGE_BOOKKEEP);
2814 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2815 amdgpu_sync_free(&sync_obj);
2817 mutex_unlock(&process_info->lock);
2822 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2824 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2825 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2831 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2835 mutex_init(&(*mem)->lock);
2836 INIT_LIST_HEAD(&(*mem)->attachments);
2837 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2838 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2839 (*mem)->process_info = process_info;
2840 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2841 amdgpu_sync_create(&(*mem)->sync);
2844 /* Validate gws bo the first time it is added to process */
2845 mutex_lock(&(*mem)->process_info->lock);
2846 ret = amdgpu_bo_reserve(gws_bo, false);
2847 if (unlikely(ret)) {
2848 pr_err("Reserve gws bo failed %d\n", ret);
2849 goto bo_reservation_failure;
2852 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2854 pr_err("GWS BO validate failed %d\n", ret);
2855 goto bo_validation_failure;
2857 /* GWS resource is shared b/t amdgpu and amdkfd
2858 * Add process eviction fence to bo so they can
2861 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
2863 goto reserve_shared_fail;
2864 dma_resv_add_fence(gws_bo->tbo.base.resv,
2865 &process_info->eviction_fence->base,
2866 DMA_RESV_USAGE_BOOKKEEP);
2867 amdgpu_bo_unreserve(gws_bo);
2868 mutex_unlock(&(*mem)->process_info->lock);
2872 reserve_shared_fail:
2873 bo_validation_failure:
2874 amdgpu_bo_unreserve(gws_bo);
2875 bo_reservation_failure:
2876 mutex_unlock(&(*mem)->process_info->lock);
2877 amdgpu_sync_free(&(*mem)->sync);
2878 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2879 amdgpu_bo_unref(&gws_bo);
2880 mutex_destroy(&(*mem)->lock);
2886 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2889 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2890 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2891 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2893 /* Remove BO from process's validate list so restore worker won't touch
2896 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2898 ret = amdgpu_bo_reserve(gws_bo, false);
2899 if (unlikely(ret)) {
2900 pr_err("Reserve gws bo failed %d\n", ret);
2901 //TODO add BO back to validate_list?
2904 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2905 process_info->eviction_fence);
2906 amdgpu_bo_unreserve(gws_bo);
2907 amdgpu_sync_free(&kgd_mem->sync);
2908 amdgpu_bo_unref(&gws_bo);
2909 mutex_destroy(&kgd_mem->lock);
2914 /* Returns GPU-specific tiling mode information */
2915 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
2916 struct tile_config *config)
2918 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2919 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2920 config->num_tile_configs =
2921 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2922 config->macro_tile_config_ptr =
2923 adev->gfx.config.macrotile_mode_array;
2924 config->num_macro_tile_configs =
2925 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2927 /* Those values are not set from GFX9 onwards */
2928 config->num_banks = adev->gfx.config.num_banks;
2929 config->num_ranks = adev->gfx.config.num_ranks;
2934 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
2936 struct kfd_mem_attachment *entry;
2938 list_for_each_entry(entry, &mem->attachments, list) {
2939 if (entry->is_mapped && entry->adev == adev)
2945 #if defined(CONFIG_DEBUG_FS)
2947 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
2950 spin_lock(&kfd_mem_limit.mem_limit_lock);
2951 seq_printf(m, "System mem used %lldM out of %lluM\n",
2952 (kfd_mem_limit.system_mem_used >> 20),
2953 (kfd_mem_limit.max_system_mem_limit >> 20));
2954 seq_printf(m, "TTM mem used %lldM out of %lluM\n",
2955 (kfd_mem_limit.ttm_mem_used >> 20),
2956 (kfd_mem_limit.max_ttm_mem_limit >> 20));
2957 spin_unlock(&kfd_mem_limit.mem_limit_lock);