2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/swiotlb.h>
42 #include <linux/dma-buf.h>
43 #include <linux/sizes.h>
44 #include <linux/module.h>
46 #include <drm/drm_drv.h>
47 #include <drm/ttm/ttm_bo_api.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_placement.h>
50 #include <drm/ttm/ttm_range_manager.h>
52 #include <drm/amdgpu_drm.h>
53 #include <drm/drm_drv.h>
56 #include "amdgpu_object.h"
57 #include "amdgpu_trace.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_sdma.h"
60 #include "amdgpu_ras.h"
61 #include "amdgpu_hmm.h"
62 #include "amdgpu_atomfirmware.h"
63 #include "amdgpu_res_cursor.h"
64 #include "bif/bif_4_1_d.h"
66 MODULE_IMPORT_NS(DMA_BUF);
68 #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
70 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
72 struct ttm_resource *bo_mem);
73 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
76 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
78 uint64_t size_in_page)
80 return ttm_range_man_init(&adev->mman.bdev, type,
85 * amdgpu_evict_flags - Compute placement flags
87 * @bo: The buffer object to evict
88 * @placement: Possible destination(s) for evicted BO
90 * Fill in placement data when ttm_bo_evict() is called
92 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
93 struct ttm_placement *placement)
95 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
96 struct amdgpu_bo *abo;
97 static const struct ttm_place placements = {
100 .mem_type = TTM_PL_SYSTEM,
104 /* Don't handle scatter gather BOs */
105 if (bo->type == ttm_bo_type_sg) {
106 placement->num_placement = 0;
107 placement->num_busy_placement = 0;
111 /* Object isn't an AMDGPU object so ignore */
112 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
113 placement->placement = &placements;
114 placement->busy_placement = &placements;
115 placement->num_placement = 1;
116 placement->num_busy_placement = 1;
120 abo = ttm_to_amdgpu_bo(bo);
121 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
122 placement->num_placement = 0;
123 placement->num_busy_placement = 0;
127 switch (bo->resource->mem_type) {
131 placement->num_placement = 0;
132 placement->num_busy_placement = 0;
136 if (!adev->mman.buffer_funcs_enabled) {
137 /* Move to system memory */
138 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
139 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
140 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
141 amdgpu_bo_in_cpu_visible_vram(abo)) {
143 /* Try evicting to the CPU inaccessible part of VRAM
144 * first, but only set GTT as busy placement, so this
145 * BO will be evicted to GTT rather than causing other
146 * BOs to be evicted from VRAM
148 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
149 AMDGPU_GEM_DOMAIN_GTT |
150 AMDGPU_GEM_DOMAIN_CPU);
151 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
152 abo->placements[0].lpfn = 0;
153 abo->placement.busy_placement = &abo->placements[1];
154 abo->placement.num_busy_placement = 1;
156 /* Move to GTT memory */
157 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
158 AMDGPU_GEM_DOMAIN_CPU);
162 case AMDGPU_PL_PREEMPT:
164 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
167 *placement = abo->placement;
171 * amdgpu_ttm_map_buffer - Map memory into the GART windows
172 * @bo: buffer object to map
173 * @mem: memory object to map
174 * @mm_cur: range to map
175 * @window: which GART window to use
176 * @ring: DMA ring to use for the copy
177 * @tmz: if we should setup a TMZ enabled mapping
178 * @size: in number of bytes to map, out number of bytes mapped
179 * @addr: resulting address inside the MC address space
181 * Setup one of the GART windows to access a specific piece of memory or return
182 * the physical address for local memory.
184 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
185 struct ttm_resource *mem,
186 struct amdgpu_res_cursor *mm_cur,
187 unsigned window, struct amdgpu_ring *ring,
188 bool tmz, uint64_t *size, uint64_t *addr)
190 struct amdgpu_device *adev = ring->adev;
191 unsigned offset, num_pages, num_dw, num_bytes;
192 uint64_t src_addr, dst_addr;
193 struct amdgpu_job *job;
199 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
200 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
202 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
205 /* Map only what can't be accessed directly */
206 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
207 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
214 * If start begins at an offset inside the page, then adjust the size
215 * and addr accordingly
217 offset = mm_cur->start & ~PAGE_MASK;
219 num_pages = PFN_UP(*size + offset);
220 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
222 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
224 *addr = adev->gmc.gart_start;
225 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
226 AMDGPU_GPU_PAGE_SIZE;
229 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
230 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
232 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
233 AMDGPU_FENCE_OWNER_UNDEFINED,
234 num_dw * 4 + num_bytes,
235 AMDGPU_IB_POOL_DELAYED, &job);
239 src_addr = num_dw * 4;
240 src_addr += job->ibs[0].gpu_addr;
242 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
243 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
244 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
245 dst_addr, num_bytes, false);
247 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
248 WARN_ON(job->ibs[0].length_dw > num_dw);
250 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
252 flags |= AMDGPU_PTE_TMZ;
254 cpu_addr = &job->ibs[0].ptr[num_dw];
256 if (mem->mem_type == TTM_PL_TT) {
257 dma_addr_t *dma_addr;
259 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
260 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
262 dma_addr_t dma_address;
264 dma_address = mm_cur->start;
265 dma_address += adev->vm_manager.vram_base_offset;
267 for (i = 0; i < num_pages; ++i) {
268 amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
270 dma_address += PAGE_SIZE;
274 dma_fence_put(amdgpu_job_submit(job));
279 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
280 * @adev: amdgpu device
281 * @src: buffer/address where to read from
282 * @dst: buffer/address where to write to
283 * @size: number of bytes to copy
284 * @tmz: if a secure copy should be used
285 * @resv: resv object to sync to
286 * @f: Returns the last fence if multiple jobs are submitted.
288 * The function copies @size bytes from {src->mem + src->offset} to
289 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
290 * move and different for a BO to BO copy.
293 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
294 const struct amdgpu_copy_mem *src,
295 const struct amdgpu_copy_mem *dst,
296 uint64_t size, bool tmz,
297 struct dma_resv *resv,
298 struct dma_fence **f)
300 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
301 struct amdgpu_res_cursor src_mm, dst_mm;
302 struct dma_fence *fence = NULL;
305 if (!adev->mman.buffer_funcs_enabled) {
306 DRM_ERROR("Trying to move memory with ring turned off.\n");
310 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
311 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
313 mutex_lock(&adev->mman.gtt_window_lock);
314 while (src_mm.remaining) {
315 uint64_t from, to, cur_size;
316 struct dma_fence *next;
318 /* Never copy more than 256MiB at once to avoid a timeout */
319 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
321 /* Map src to window 0 and dst to window 1. */
322 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
323 0, ring, tmz, &cur_size, &from);
327 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
328 1, ring, tmz, &cur_size, &to);
332 r = amdgpu_copy_buffer(ring, from, to, cur_size,
333 resv, &next, false, true, tmz);
337 dma_fence_put(fence);
340 amdgpu_res_next(&src_mm, cur_size);
341 amdgpu_res_next(&dst_mm, cur_size);
344 mutex_unlock(&adev->mman.gtt_window_lock);
346 *f = dma_fence_get(fence);
347 dma_fence_put(fence);
352 * amdgpu_move_blit - Copy an entire buffer to another buffer
354 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
355 * help move buffers to and from VRAM.
357 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
359 struct ttm_resource *new_mem,
360 struct ttm_resource *old_mem)
362 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
363 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
364 struct amdgpu_copy_mem src, dst;
365 struct dma_fence *fence = NULL;
375 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
377 amdgpu_bo_encrypted(abo),
378 bo->base.resv, &fence);
382 /* clear the space being freed */
383 if (old_mem->mem_type == TTM_PL_VRAM &&
384 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
385 struct dma_fence *wipe_fence = NULL;
387 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
390 } else if (wipe_fence) {
391 dma_fence_put(fence);
396 /* Always block for VM page tables before committing the new location */
397 if (bo->type == ttm_bo_type_kernel)
398 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
400 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
401 dma_fence_put(fence);
406 dma_fence_wait(fence, false);
407 dma_fence_put(fence);
412 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
414 * Called by amdgpu_bo_move()
416 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
417 struct ttm_resource *mem)
419 u64 mem_size = (u64)mem->size;
420 struct amdgpu_res_cursor cursor;
423 if (mem->mem_type == TTM_PL_SYSTEM ||
424 mem->mem_type == TTM_PL_TT)
426 if (mem->mem_type != TTM_PL_VRAM)
429 amdgpu_res_first(mem, 0, mem_size, &cursor);
430 end = cursor.start + cursor.size;
431 while (cursor.remaining) {
432 amdgpu_res_next(&cursor, cursor.size);
434 if (!cursor.remaining)
437 /* ttm_resource_ioremap only supports contiguous memory */
438 if (end != cursor.start)
441 end = cursor.start + cursor.size;
444 return end <= adev->gmc.visible_vram_size;
448 * amdgpu_bo_move - Move a buffer object to a new memory location
450 * Called by ttm_bo_handle_move_mem()
452 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
453 struct ttm_operation_ctx *ctx,
454 struct ttm_resource *new_mem,
455 struct ttm_place *hop)
457 struct amdgpu_device *adev;
458 struct amdgpu_bo *abo;
459 struct ttm_resource *old_mem = bo->resource;
462 if (new_mem->mem_type == TTM_PL_TT ||
463 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
464 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
469 /* Can't move a pinned BO */
470 abo = ttm_to_amdgpu_bo(bo);
471 if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
474 adev = amdgpu_ttm_adev(bo->bdev);
476 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
478 ttm_bo_move_null(bo, new_mem);
481 if (old_mem->mem_type == TTM_PL_SYSTEM &&
482 (new_mem->mem_type == TTM_PL_TT ||
483 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
484 ttm_bo_move_null(bo, new_mem);
487 if ((old_mem->mem_type == TTM_PL_TT ||
488 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
489 new_mem->mem_type == TTM_PL_SYSTEM) {
490 r = ttm_bo_wait_ctx(bo, ctx);
494 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
495 ttm_resource_free(bo, &bo->resource);
496 ttm_bo_assign_mem(bo, new_mem);
500 if (old_mem->mem_type == AMDGPU_PL_GDS ||
501 old_mem->mem_type == AMDGPU_PL_GWS ||
502 old_mem->mem_type == AMDGPU_PL_OA ||
503 new_mem->mem_type == AMDGPU_PL_GDS ||
504 new_mem->mem_type == AMDGPU_PL_GWS ||
505 new_mem->mem_type == AMDGPU_PL_OA) {
506 /* Nothing to save here */
507 ttm_bo_move_null(bo, new_mem);
511 if (bo->type == ttm_bo_type_device &&
512 new_mem->mem_type == TTM_PL_VRAM &&
513 old_mem->mem_type != TTM_PL_VRAM) {
514 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
515 * accesses the BO after it's moved.
517 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
520 if (adev->mman.buffer_funcs_enabled) {
521 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
522 new_mem->mem_type == TTM_PL_VRAM) ||
523 (old_mem->mem_type == TTM_PL_VRAM &&
524 new_mem->mem_type == TTM_PL_SYSTEM))) {
527 hop->mem_type = TTM_PL_TT;
528 hop->flags = TTM_PL_FLAG_TEMPORARY;
532 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
538 /* Check that all memory is CPU accessible */
539 if (!amdgpu_mem_visible(adev, old_mem) ||
540 !amdgpu_mem_visible(adev, new_mem)) {
541 pr_err("Move buffer fallback to memcpy unavailable\n");
545 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
551 /* update statistics */
552 atomic64_add(bo->base.size, &adev->num_bytes_moved);
553 amdgpu_bo_move_notify(bo, evict, new_mem);
558 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
560 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
562 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
563 struct ttm_resource *mem)
565 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
566 size_t bus_size = (size_t)mem->size;
568 switch (mem->mem_type) {
573 case AMDGPU_PL_PREEMPT:
576 mem->bus.offset = mem->start << PAGE_SHIFT;
577 /* check if it's visible */
578 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
581 if (adev->mman.aper_base_kaddr &&
582 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
583 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
586 mem->bus.offset += adev->gmc.aper_base;
587 mem->bus.is_iomem = true;
595 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
596 unsigned long page_offset)
598 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
599 struct amdgpu_res_cursor cursor;
601 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
603 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
607 * amdgpu_ttm_domain_start - Returns GPU start address
608 * @adev: amdgpu device object
609 * @type: type of the memory
612 * GPU start address of a memory domain
615 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
619 return adev->gmc.gart_start;
621 return adev->gmc.vram_start;
628 * TTM backend functions.
630 struct amdgpu_ttm_tt {
632 struct drm_gem_object *gobj;
635 struct task_struct *usertask;
640 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
642 #ifdef CONFIG_DRM_AMDGPU_USERPTR
644 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
645 * memory and start HMM tracking CPU page table update
647 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
648 * once afterwards to stop HMM tracking
650 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
651 struct hmm_range **range)
653 struct ttm_tt *ttm = bo->tbo.ttm;
654 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
655 unsigned long start = gtt->userptr;
656 struct vm_area_struct *vma;
657 struct mm_struct *mm;
661 /* Make sure get_user_pages_done() can cleanup gracefully */
664 mm = bo->notifier.mm;
666 DRM_DEBUG_DRIVER("BO is not registered?\n");
670 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
674 vma = vma_lookup(mm, start);
675 if (unlikely(!vma)) {
679 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
685 readonly = amdgpu_ttm_tt_is_readonly(ttm);
686 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
687 readonly, NULL, pages, range);
689 mmap_read_unlock(mm);
691 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
698 /* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
700 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
701 struct hmm_range *range)
703 struct amdgpu_ttm_tt *gtt = (void *)ttm;
705 if (gtt && gtt->userptr && range)
706 amdgpu_hmm_range_get_pages_done(range);
710 * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
711 * Check if the pages backing this ttm range have been invalidated
713 * Returns: true if pages are still valid
715 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
716 struct hmm_range *range)
718 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
720 if (!gtt || !gtt->userptr || !range)
723 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
724 gtt->userptr, ttm->num_pages);
726 WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
728 return !amdgpu_hmm_range_get_pages_done(range);
733 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
735 * Called by amdgpu_cs_list_validate(). This creates the page list
736 * that backs user memory and will ultimately be mapped into the device
739 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
743 for (i = 0; i < ttm->num_pages; ++i)
744 ttm->pages[i] = pages ? pages[i] : NULL;
748 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
750 * Called by amdgpu_ttm_backend_bind()
752 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
755 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
756 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
757 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
758 enum dma_data_direction direction = write ?
759 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
762 /* Allocate an SG array and squash pages into it */
763 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
764 (u64)ttm->num_pages << PAGE_SHIFT,
769 /* Map SG to device */
770 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
774 /* convert SG to linear array of pages and dma addresses */
775 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
787 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
789 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
792 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
793 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
794 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
795 enum dma_data_direction direction = write ?
796 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
798 /* double check that we don't free the table twice */
799 if (!ttm->sg || !ttm->sg->sgl)
802 /* unmap the pages mapped to the device */
803 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
804 sg_free_table(ttm->sg);
807 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
808 struct ttm_buffer_object *tbo,
811 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
812 struct ttm_tt *ttm = tbo->ttm;
813 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
815 if (amdgpu_bo_encrypted(abo))
816 flags |= AMDGPU_PTE_TMZ;
818 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
819 uint64_t page_idx = 1;
821 amdgpu_gart_bind(adev, gtt->offset, page_idx,
822 gtt->ttm.dma_address, flags);
824 /* The memory type of the first page defaults to UC. Now
825 * modify the memory type to NC from the second page of
828 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
829 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
831 amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
832 ttm->num_pages - page_idx,
833 &(gtt->ttm.dma_address[page_idx]), flags);
835 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
836 gtt->ttm.dma_address, flags);
841 * amdgpu_ttm_backend_bind - Bind GTT memory
843 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
844 * This handles binding GTT memory to the device address space.
846 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
848 struct ttm_resource *bo_mem)
850 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
851 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
862 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
864 DRM_ERROR("failed to pin userptr\n");
867 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
869 struct dma_buf_attachment *attach;
870 struct sg_table *sgt;
872 attach = gtt->gobj->import_attach;
873 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
880 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
884 if (!ttm->num_pages) {
885 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
886 ttm->num_pages, bo_mem, ttm);
889 if (bo_mem->mem_type != TTM_PL_TT ||
890 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
891 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
895 /* compute PTE flags relevant to this BO memory */
896 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
898 /* bind pages into GART page tables */
899 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
900 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
901 gtt->ttm.dma_address, flags);
907 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
908 * through AGP or GART aperture.
910 * If bo is accessible through AGP aperture, then use AGP aperture
911 * to access bo; otherwise allocate logical space in GART aperture
912 * and map bo to GART aperture.
914 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
916 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
917 struct ttm_operation_ctx ctx = { false, false };
918 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
919 struct ttm_placement placement;
920 struct ttm_place placements;
921 struct ttm_resource *tmp;
922 uint64_t addr, flags;
925 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
928 addr = amdgpu_gmc_agp_addr(bo);
929 if (addr != AMDGPU_BO_INVALID_OFFSET) {
930 bo->resource->start = addr >> PAGE_SHIFT;
934 /* allocate GART space */
935 placement.num_placement = 1;
936 placement.placement = &placements;
937 placement.num_busy_placement = 1;
938 placement.busy_placement = &placements;
940 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
941 placements.mem_type = TTM_PL_TT;
942 placements.flags = bo->resource->placement;
944 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
948 /* compute PTE flags for this buffer object */
949 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
952 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
953 amdgpu_ttm_gart_bind(adev, bo, flags);
954 amdgpu_gart_invalidate_tlb(adev);
955 ttm_resource_free(bo, &bo->resource);
956 ttm_bo_assign_mem(bo, tmp);
962 * amdgpu_ttm_recover_gart - Rebind GTT pages
964 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
965 * rebind GTT pages during a GPU reset.
967 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
969 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
975 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
976 amdgpu_ttm_gart_bind(adev, tbo, flags);
980 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
982 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
985 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
988 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
989 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
991 /* if the pages have userptr pinning then clear that first */
993 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
994 } else if (ttm->sg && gtt->gobj->import_attach) {
995 struct dma_buf_attachment *attach;
997 attach = gtt->gobj->import_attach;
998 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1005 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1008 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1009 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1013 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1016 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1019 put_task_struct(gtt->usertask);
1021 ttm_tt_fini(>t->ttm);
1026 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1028 * @bo: The buffer object to create a GTT ttm_tt object around
1029 * @page_flags: Page flags to be added to the ttm_tt object
1031 * Called by ttm_tt_create().
1033 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1034 uint32_t page_flags)
1036 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1037 struct amdgpu_ttm_tt *gtt;
1038 enum ttm_caching caching;
1040 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1044 gtt->gobj = &bo->base;
1046 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1047 caching = ttm_write_combined;
1049 caching = ttm_cached;
1051 /* allocate space for the uninitialized page entries */
1052 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1060 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1062 * Map the pages of a ttm_tt object to an address space visible
1063 * to the underlying device.
1065 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1067 struct ttm_operation_ctx *ctx)
1069 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1070 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1074 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1076 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1082 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1085 ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1089 for (i = 0; i < ttm->num_pages; ++i)
1090 ttm->pages[i]->mapping = bdev->dev_mapping;
1096 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1098 * Unmaps pages of a ttm_tt object from the device address space and
1099 * unpopulates the page array backing it.
1101 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1104 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1105 struct amdgpu_device *adev;
1108 amdgpu_ttm_backend_unbind(bdev, ttm);
1111 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1117 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1120 for (i = 0; i < ttm->num_pages; ++i)
1121 ttm->pages[i]->mapping = NULL;
1123 adev = amdgpu_ttm_adev(bdev);
1124 return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1128 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1131 * @tbo: The ttm_buffer_object that contains the userptr
1132 * @user_addr: The returned value
1134 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1135 uint64_t *user_addr)
1137 struct amdgpu_ttm_tt *gtt;
1142 gtt = (void *)tbo->ttm;
1143 *user_addr = gtt->userptr;
1148 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1151 * @bo: The ttm_buffer_object to bind this userptr to
1152 * @addr: The address in the current tasks VM space to use
1153 * @flags: Requirements of userptr object.
1155 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1156 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1157 * initialize GPU VM for a KFD process.
1159 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1160 uint64_t addr, uint32_t flags)
1162 struct amdgpu_ttm_tt *gtt;
1165 /* TODO: We want a separate TTM object type for userptrs */
1166 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1167 if (bo->ttm == NULL)
1171 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1172 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1174 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1175 gtt->userptr = addr;
1176 gtt->userflags = flags;
1179 put_task_struct(gtt->usertask);
1180 gtt->usertask = current->group_leader;
1181 get_task_struct(gtt->usertask);
1187 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1189 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1191 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1196 if (gtt->usertask == NULL)
1199 return gtt->usertask->mm;
1203 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1204 * address range for the current task.
1207 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1208 unsigned long end, unsigned long *userptr)
1210 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1213 if (gtt == NULL || !gtt->userptr)
1216 /* Return false if no part of the ttm_tt object lies within
1219 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1220 if (gtt->userptr > end || gtt->userptr + size <= start)
1224 *userptr = gtt->userptr;
1229 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1231 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1233 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1235 if (gtt == NULL || !gtt->userptr)
1242 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1244 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1246 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1251 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1255 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1257 * @ttm: The ttm_tt object to compute the flags for
1258 * @mem: The memory registry backing this ttm_tt object
1260 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1262 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1266 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1267 flags |= AMDGPU_PTE_VALID;
1269 if (mem && (mem->mem_type == TTM_PL_TT ||
1270 mem->mem_type == AMDGPU_PL_PREEMPT)) {
1271 flags |= AMDGPU_PTE_SYSTEM;
1273 if (ttm->caching == ttm_cached)
1274 flags |= AMDGPU_PTE_SNOOPED;
1277 if (mem && mem->mem_type == TTM_PL_VRAM &&
1278 mem->bus.caching == ttm_cached)
1279 flags |= AMDGPU_PTE_SNOOPED;
1285 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1287 * @adev: amdgpu_device pointer
1288 * @ttm: The ttm_tt object to compute the flags for
1289 * @mem: The memory registry backing this ttm_tt object
1291 * Figure out the flags to use for a VM PTE (Page Table Entry).
1293 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1294 struct ttm_resource *mem)
1296 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1298 flags |= adev->gart.gart_pte_flags;
1299 flags |= AMDGPU_PTE_READABLE;
1301 if (!amdgpu_ttm_tt_is_readonly(ttm))
1302 flags |= AMDGPU_PTE_WRITEABLE;
1308 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1311 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1312 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1313 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1314 * used to clean out a memory space.
1316 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1317 const struct ttm_place *place)
1319 struct dma_resv_iter resv_cursor;
1320 struct dma_fence *f;
1322 if (!amdgpu_bo_is_amdgpu_bo(bo))
1323 return ttm_bo_eviction_valuable(bo, place);
1326 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1329 if (bo->type == ttm_bo_type_kernel &&
1330 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1333 /* If bo is a KFD BO, check if the bo belongs to the current process.
1334 * If true, then return false as any KFD process needs all its BOs to
1335 * be resident to run successfully
1337 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1338 DMA_RESV_USAGE_BOOKKEEP, f) {
1339 if (amdkfd_fence_check_mm(f, current->mm))
1343 /* Preemptible BOs don't own system resources managed by the
1344 * driver (pages, VRAM, GART space). They point to resources
1345 * owned by someone else (e.g. pageable memory in user mode
1346 * or a DMABuf). They are used in a preemptible context so we
1347 * can guarantee no deadlocks and good QoS in case of MMU
1348 * notifiers or DMABuf move notifiers from the resource owner.
1350 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1353 if (bo->resource->mem_type == TTM_PL_TT &&
1354 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1357 return ttm_bo_eviction_valuable(bo, place);
1360 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1361 void *buf, size_t size, bool write)
1364 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1365 uint64_t bytes = 4 - (pos & 0x3);
1366 uint32_t shift = (pos & 0x3) * 8;
1367 uint32_t mask = 0xffffffff << shift;
1371 mask &= 0xffffffff >> (bytes - size) * 8;
1375 if (mask != 0xffffffff) {
1376 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1379 value |= (*(uint32_t *)buf << shift) & mask;
1380 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1382 value = (value & mask) >> shift;
1383 memcpy(buf, &value, bytes);
1386 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1395 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1396 unsigned long offset, void *buf,
1399 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1400 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1401 struct amdgpu_res_cursor src_mm;
1402 struct amdgpu_job *job;
1403 struct dma_fence *fence;
1404 uint64_t src_addr, dst_addr;
1405 unsigned int num_dw;
1408 if (len != PAGE_SIZE)
1411 if (!adev->mman.sdma_access_ptr)
1414 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1418 memcpy(adev->mman.sdma_access_ptr, buf, len);
1420 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1421 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
1422 AMDGPU_FENCE_OWNER_UNDEFINED,
1423 num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1428 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1429 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1431 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1433 swap(src_addr, dst_addr);
1435 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1438 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
1439 WARN_ON(job->ibs[0].length_dw > num_dw);
1441 fence = amdgpu_job_submit(job);
1443 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1445 dma_fence_put(fence);
1448 memcpy(buf, adev->mman.sdma_access_ptr, len);
1455 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1457 * @bo: The buffer object to read/write
1458 * @offset: Offset into buffer object
1459 * @buf: Secondary buffer to write/read from
1460 * @len: Length in bytes of access
1461 * @write: true if writing
1463 * This is used to access VRAM that backs a buffer object via MMIO
1464 * access for debugging purposes.
1466 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1467 unsigned long offset, void *buf, int len,
1470 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1471 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1472 struct amdgpu_res_cursor cursor;
1475 if (bo->resource->mem_type != TTM_PL_VRAM)
1478 if (amdgpu_device_has_timeouts_enabled(adev) &&
1479 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1482 amdgpu_res_first(bo->resource, offset, len, &cursor);
1483 while (cursor.remaining) {
1484 size_t count, size = cursor.size;
1485 loff_t pos = cursor.start;
1487 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1490 /* using MM to access rest vram and handle un-aligned address */
1493 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1498 amdgpu_res_next(&cursor, cursor.size);
1505 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1507 amdgpu_bo_move_notify(bo, false, NULL);
1510 static struct ttm_device_funcs amdgpu_bo_driver = {
1511 .ttm_tt_create = &amdgpu_ttm_tt_create,
1512 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1513 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1514 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1515 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1516 .evict_flags = &amdgpu_evict_flags,
1517 .move = &amdgpu_bo_move,
1518 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1519 .release_notify = &amdgpu_bo_release_notify,
1520 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1521 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1522 .access_memory = &amdgpu_ttm_access_memory,
1526 * Firmware Reservation functions
1529 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1531 * @adev: amdgpu_device pointer
1533 * free fw reserved vram if it has been reserved.
1535 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1537 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1538 NULL, &adev->mman.fw_vram_usage_va);
1542 * Driver Reservation functions
1545 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1547 * @adev: amdgpu_device pointer
1549 * free drv reserved vram if it has been reserved.
1551 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
1553 amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
1555 &adev->mman.drv_vram_usage_va);
1559 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1561 * @adev: amdgpu_device pointer
1563 * create bo vram reservation from fw.
1565 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1567 uint64_t vram_size = adev->gmc.visible_vram_size;
1569 adev->mman.fw_vram_usage_va = NULL;
1570 adev->mman.fw_vram_usage_reserved_bo = NULL;
1572 if (adev->mman.fw_vram_usage_size == 0 ||
1573 adev->mman.fw_vram_usage_size > vram_size)
1576 return amdgpu_bo_create_kernel_at(adev,
1577 adev->mman.fw_vram_usage_start_offset,
1578 adev->mman.fw_vram_usage_size,
1579 &adev->mman.fw_vram_usage_reserved_bo,
1580 &adev->mman.fw_vram_usage_va);
1584 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1586 * @adev: amdgpu_device pointer
1588 * create bo vram reservation from drv.
1590 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
1592 u64 vram_size = adev->gmc.visible_vram_size;
1594 adev->mman.drv_vram_usage_va = NULL;
1595 adev->mman.drv_vram_usage_reserved_bo = NULL;
1597 if (adev->mman.drv_vram_usage_size == 0 ||
1598 adev->mman.drv_vram_usage_size > vram_size)
1601 return amdgpu_bo_create_kernel_at(adev,
1602 adev->mman.drv_vram_usage_start_offset,
1603 adev->mman.drv_vram_usage_size,
1604 &adev->mman.drv_vram_usage_reserved_bo,
1605 &adev->mman.drv_vram_usage_va);
1609 * Memoy training reservation functions
1613 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1615 * @adev: amdgpu_device pointer
1617 * free memory training reserved vram if it has been reserved.
1619 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1621 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1623 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1624 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1630 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1632 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1634 memset(ctx, 0, sizeof(*ctx));
1636 ctx->c2p_train_data_offset =
1637 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1638 ctx->p2c_train_data_offset =
1639 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1640 ctx->train_data_size =
1641 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1643 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1644 ctx->train_data_size,
1645 ctx->p2c_train_data_offset,
1646 ctx->c2p_train_data_offset);
1650 * reserve TMR memory at the top of VRAM which holds
1651 * IP Discovery data and is protected by PSP.
1653 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1656 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1657 bool mem_train_support = false;
1659 if (!amdgpu_sriov_vf(adev)) {
1660 if (amdgpu_atomfirmware_mem_training_supported(adev))
1661 mem_train_support = true;
1663 DRM_DEBUG("memory training does not support!\n");
1667 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1668 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1670 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1671 * discovery data and G6 memory training data respectively
1673 adev->mman.discovery_tmr_size =
1674 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1675 if (!adev->mman.discovery_tmr_size)
1676 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1678 if (mem_train_support) {
1679 /* reserve vram for mem train according to TMR location */
1680 amdgpu_ttm_training_data_block_init(adev);
1681 ret = amdgpu_bo_create_kernel_at(adev,
1682 ctx->c2p_train_data_offset,
1683 ctx->train_data_size,
1687 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1688 amdgpu_ttm_training_reserve_vram_fini(adev);
1691 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1694 ret = amdgpu_bo_create_kernel_at(adev,
1695 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1696 adev->mman.discovery_tmr_size,
1697 &adev->mman.discovery_memory,
1700 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1701 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1709 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1710 * gtt/vram related fields.
1712 * This initializes all of the memory space pools that the TTM layer
1713 * will need such as the GTT space (system memory mapped to the device),
1714 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1715 * can be mapped per VMID.
1717 int amdgpu_ttm_init(struct amdgpu_device *adev)
1723 mutex_init(&adev->mman.gtt_window_lock);
1725 /* No others user of address space so set it to 0 */
1726 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1727 adev_to_drm(adev)->anon_inode->i_mapping,
1728 adev_to_drm(adev)->vma_offset_manager,
1730 dma_addressing_limited(adev->dev));
1732 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1735 adev->mman.initialized = true;
1737 /* Initialize VRAM pool with all of VRAM divided into pages */
1738 r = amdgpu_vram_mgr_init(adev);
1740 DRM_ERROR("Failed initializing VRAM heap.\n");
1744 /* Reduce size of CPU-visible VRAM if requested */
1745 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1746 if (amdgpu_vis_vram_limit > 0 &&
1747 vis_vram_limit <= adev->gmc.visible_vram_size)
1748 adev->gmc.visible_vram_size = vis_vram_limit;
1750 /* Change the size here instead of the init above so only lpfn is affected */
1751 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1754 if (adev->gmc.xgmi.connected_to_cpu)
1755 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1756 adev->gmc.visible_vram_size);
1760 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1761 adev->gmc.visible_vram_size);
1765 *The reserved vram for firmware must be pinned to the specified
1766 *place on the VRAM, so reserve it early.
1768 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1774 *The reserved vram for driver must be pinned to the specified
1775 *place on the VRAM, so reserve it early.
1777 r = amdgpu_ttm_drv_reserve_vram_init(adev);
1782 * only NAVI10 and onwards ASIC support for IP discovery.
1783 * If IP discovery enabled, a block of memory should be
1784 * reserved for IP discovey.
1786 if (adev->mman.discovery_bin) {
1787 r = amdgpu_ttm_reserve_tmr(adev);
1792 /* allocate memory as required for VGA
1793 * This is used for VGA emulation and pre-OS scanout buffers to
1794 * avoid display artifacts while transitioning between pre-OS
1796 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1797 &adev->mman.stolen_vga_memory,
1801 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1802 adev->mman.stolen_extended_size,
1803 &adev->mman.stolen_extended_memory,
1807 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
1808 adev->mman.stolen_reserved_size,
1809 &adev->mman.stolen_reserved_memory,
1814 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1815 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1817 /* Compute GTT size, either based on 1/2 the size of RAM size
1818 * or whatever the user passed on module init */
1819 if (amdgpu_gtt_size == -1) {
1823 /* Certain GL unit tests for large textures can cause problems
1824 * with the OOM killer since there is no way to link this memory
1825 * to a process. This was originally mitigated (but not necessarily
1826 * eliminated) by limiting the GTT size. The problem is this limit
1827 * is often too low for many modern games so just make the limit 1/2
1828 * of system memory which aligns with TTM. The OOM accounting needs
1829 * to be addressed, but we shouldn't prevent common 3D applications
1830 * from being usable just to potentially mitigate that corner case.
1832 gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1833 (u64)si.totalram * si.mem_unit / 2);
1835 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1838 /* Initialize GTT memory pool */
1839 r = amdgpu_gtt_mgr_init(adev, gtt_size);
1841 DRM_ERROR("Failed initializing GTT heap.\n");
1844 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1845 (unsigned)(gtt_size / (1024 * 1024)));
1847 /* Initialize preemptible memory pool */
1848 r = amdgpu_preempt_mgr_init(adev);
1850 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1854 /* Initialize various on-chip memory pools */
1855 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1857 DRM_ERROR("Failed initializing GDS heap.\n");
1861 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1863 DRM_ERROR("Failed initializing gws heap.\n");
1867 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1869 DRM_ERROR("Failed initializing oa heap.\n");
1873 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
1874 AMDGPU_GEM_DOMAIN_GTT,
1875 &adev->mman.sdma_access_bo, NULL,
1876 &adev->mman.sdma_access_ptr))
1877 DRM_WARN("Debug VRAM access will use slowpath MM access\n");
1883 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1885 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1888 if (!adev->mman.initialized)
1891 amdgpu_ttm_training_reserve_vram_fini(adev);
1892 /* return the stolen vga memory back to VRAM */
1893 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1894 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1895 /* return the IP Discovery TMR memory back to VRAM */
1896 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1897 if (adev->mman.stolen_reserved_size)
1898 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
1900 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
1901 &adev->mman.sdma_access_ptr);
1902 amdgpu_ttm_fw_reserve_vram_fini(adev);
1903 amdgpu_ttm_drv_reserve_vram_fini(adev);
1905 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1907 if (adev->mman.aper_base_kaddr)
1908 iounmap(adev->mman.aper_base_kaddr);
1909 adev->mman.aper_base_kaddr = NULL;
1914 amdgpu_vram_mgr_fini(adev);
1915 amdgpu_gtt_mgr_fini(adev);
1916 amdgpu_preempt_mgr_fini(adev);
1917 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1918 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1919 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1920 ttm_device_fini(&adev->mman.bdev);
1921 adev->mman.initialized = false;
1922 DRM_INFO("amdgpu: ttm finalized\n");
1926 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1928 * @adev: amdgpu_device pointer
1929 * @enable: true when we can use buffer functions.
1931 * Enable/disable use of buffer functions during suspend/resume. This should
1932 * only be called at bootup or when userspace isn't running.
1934 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1936 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1940 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1941 adev->mman.buffer_funcs_enabled == enable)
1945 struct amdgpu_ring *ring;
1946 struct drm_gpu_scheduler *sched;
1948 ring = adev->mman.buffer_funcs_ring;
1949 sched = &ring->sched;
1950 r = drm_sched_entity_init(&adev->mman.entity,
1951 DRM_SCHED_PRIORITY_KERNEL, &sched,
1954 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1959 drm_sched_entity_destroy(&adev->mman.entity);
1960 dma_fence_put(man->move);
1964 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1966 size = adev->gmc.real_vram_size;
1968 size = adev->gmc.visible_vram_size;
1970 adev->mman.buffer_funcs_enabled = enable;
1973 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
1975 unsigned int num_dw,
1976 struct dma_resv *resv,
1977 bool vm_needs_flush,
1978 struct amdgpu_job **job)
1980 enum amdgpu_ib_pool_type pool = direct_submit ?
1981 AMDGPU_IB_POOL_DIRECT :
1982 AMDGPU_IB_POOL_DELAYED;
1985 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
1986 AMDGPU_FENCE_OWNER_UNDEFINED,
1987 num_dw * 4, pool, job);
1991 if (vm_needs_flush) {
1992 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1995 (*job)->vm_needs_flush = true;
2000 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2001 DMA_RESV_USAGE_BOOKKEEP);
2004 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2005 uint64_t dst_offset, uint32_t byte_count,
2006 struct dma_resv *resv,
2007 struct dma_fence **fence, bool direct_submit,
2008 bool vm_needs_flush, bool tmz)
2010 struct amdgpu_device *adev = ring->adev;
2011 unsigned num_loops, num_dw;
2012 struct amdgpu_job *job;
2017 if (!direct_submit && !ring->sched.ready) {
2018 DRM_ERROR("Trying to move memory with ring turned off.\n");
2022 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2023 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2024 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2025 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
2026 resv, vm_needs_flush, &job);
2030 for (i = 0; i < num_loops; i++) {
2031 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2033 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2034 dst_offset, cur_size_in_bytes, tmz);
2036 src_offset += cur_size_in_bytes;
2037 dst_offset += cur_size_in_bytes;
2038 byte_count -= cur_size_in_bytes;
2041 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2042 WARN_ON(job->ibs[0].length_dw > num_dw);
2044 r = amdgpu_job_submit_direct(job, ring, fence);
2046 *fence = amdgpu_job_submit(job);
2053 amdgpu_job_free(job);
2054 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2058 static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
2059 uint64_t dst_addr, uint32_t byte_count,
2060 struct dma_resv *resv,
2061 struct dma_fence **fence,
2062 bool vm_needs_flush)
2064 struct amdgpu_device *adev = ring->adev;
2065 unsigned int num_loops, num_dw;
2066 struct amdgpu_job *job;
2071 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2072 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2073 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2074 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
2079 for (i = 0; i < num_loops; i++) {
2080 uint32_t cur_size = min(byte_count, max_bytes);
2082 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2085 dst_addr += cur_size;
2086 byte_count -= cur_size;
2089 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2090 WARN_ON(job->ibs[0].length_dw > num_dw);
2091 *fence = amdgpu_job_submit(job);
2095 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2097 struct dma_resv *resv,
2098 struct dma_fence **f)
2100 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2101 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2102 struct dma_fence *fence = NULL;
2103 struct amdgpu_res_cursor dst;
2106 if (!adev->mman.buffer_funcs_enabled) {
2107 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2111 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2113 mutex_lock(&adev->mman.gtt_window_lock);
2114 while (dst.remaining) {
2115 struct dma_fence *next;
2116 uint64_t cur_size, to;
2118 /* Never fill more than 256MiB at once to avoid timeouts */
2119 cur_size = min(dst.size, 256ULL << 20);
2121 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
2122 1, ring, false, &cur_size, &to);
2126 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
2131 dma_fence_put(fence);
2134 amdgpu_res_next(&dst, cur_size);
2137 mutex_unlock(&adev->mman.gtt_window_lock);
2139 *f = dma_fence_get(fence);
2140 dma_fence_put(fence);
2145 * amdgpu_ttm_evict_resources - evict memory buffers
2146 * @adev: amdgpu device object
2147 * @mem_type: evicted BO's memory type
2149 * Evicts all @mem_type buffers on the lru list of the memory type.
2152 * 0 for success or a negative error code on failure.
2154 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2156 struct ttm_resource_manager *man;
2164 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2167 DRM_ERROR("Trying to evict invalid memory type\n");
2171 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2174 #if defined(CONFIG_DEBUG_FS)
2176 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2178 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2180 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2183 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2186 * amdgpu_ttm_vram_read - Linear read access to VRAM
2188 * Accesses VRAM via MMIO for debugging purposes.
2190 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2191 size_t size, loff_t *pos)
2193 struct amdgpu_device *adev = file_inode(f)->i_private;
2196 if (size & 0x3 || *pos & 0x3)
2199 if (*pos >= adev->gmc.mc_vram_size)
2202 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2204 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2205 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2207 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2208 if (copy_to_user(buf, value, bytes))
2221 * amdgpu_ttm_vram_write - Linear write access to VRAM
2223 * Accesses VRAM via MMIO for debugging purposes.
2225 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2226 size_t size, loff_t *pos)
2228 struct amdgpu_device *adev = file_inode(f)->i_private;
2232 if (size & 0x3 || *pos & 0x3)
2235 if (*pos >= adev->gmc.mc_vram_size)
2241 if (*pos >= adev->gmc.mc_vram_size)
2244 r = get_user(value, (uint32_t *)buf);
2248 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2259 static const struct file_operations amdgpu_ttm_vram_fops = {
2260 .owner = THIS_MODULE,
2261 .read = amdgpu_ttm_vram_read,
2262 .write = amdgpu_ttm_vram_write,
2263 .llseek = default_llseek,
2267 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2269 * This function is used to read memory that has been mapped to the
2270 * GPU and the known addresses are not physical addresses but instead
2271 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2273 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2274 size_t size, loff_t *pos)
2276 struct amdgpu_device *adev = file_inode(f)->i_private;
2277 struct iommu_domain *dom;
2281 /* retrieve the IOMMU domain if any for this device */
2282 dom = iommu_get_domain_for_dev(adev->dev);
2285 phys_addr_t addr = *pos & PAGE_MASK;
2286 loff_t off = *pos & ~PAGE_MASK;
2287 size_t bytes = PAGE_SIZE - off;
2292 bytes = bytes < size ? bytes : size;
2294 /* Translate the bus address to a physical address. If
2295 * the domain is NULL it means there is no IOMMU active
2296 * and the address translation is the identity
2298 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2300 pfn = addr >> PAGE_SHIFT;
2301 if (!pfn_valid(pfn))
2304 p = pfn_to_page(pfn);
2305 if (p->mapping != adev->mman.bdev.dev_mapping)
2308 ptr = kmap_local_page(p);
2309 r = copy_to_user(buf, ptr + off, bytes);
2323 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2325 * This function is used to write memory that has been mapped to the
2326 * GPU and the known addresses are not physical addresses but instead
2327 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2329 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2330 size_t size, loff_t *pos)
2332 struct amdgpu_device *adev = file_inode(f)->i_private;
2333 struct iommu_domain *dom;
2337 dom = iommu_get_domain_for_dev(adev->dev);
2340 phys_addr_t addr = *pos & PAGE_MASK;
2341 loff_t off = *pos & ~PAGE_MASK;
2342 size_t bytes = PAGE_SIZE - off;
2347 bytes = bytes < size ? bytes : size;
2349 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2351 pfn = addr >> PAGE_SHIFT;
2352 if (!pfn_valid(pfn))
2355 p = pfn_to_page(pfn);
2356 if (p->mapping != adev->mman.bdev.dev_mapping)
2359 ptr = kmap_local_page(p);
2360 r = copy_from_user(ptr + off, buf, bytes);
2373 static const struct file_operations amdgpu_ttm_iomem_fops = {
2374 .owner = THIS_MODULE,
2375 .read = amdgpu_iomem_read,
2376 .write = amdgpu_iomem_write,
2377 .llseek = default_llseek
2382 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2384 #if defined(CONFIG_DEBUG_FS)
2385 struct drm_minor *minor = adev_to_drm(adev)->primary;
2386 struct dentry *root = minor->debugfs_root;
2388 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2389 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2390 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2391 &amdgpu_ttm_iomem_fops);
2392 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2393 &amdgpu_ttm_page_pool_fops);
2394 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2396 root, "amdgpu_vram_mm");
2397 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2399 root, "amdgpu_gtt_mm");
2400 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2402 root, "amdgpu_gds_mm");
2403 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2405 root, "amdgpu_gws_mm");
2406 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2408 root, "amdgpu_oa_mm");