Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63                      START, LAST, static, amdgpu_vm_it)
64
65 #undef START
66 #undef LAST
67
68 /**
69  * struct amdgpu_pte_update_params - Local structure
70  *
71  * Encapsulate some VM table update parameters to reduce
72  * the number of function parameters
73  *
74  */
75 struct amdgpu_pte_update_params {
76
77         /**
78          * @adev: amdgpu device we do this update for
79          */
80         struct amdgpu_device *adev;
81
82         /**
83          * @vm: optional amdgpu_vm we do this update for
84          */
85         struct amdgpu_vm *vm;
86
87         /**
88          * @src: address where to copy page table entries from
89          */
90         uint64_t src;
91
92         /**
93          * @ib: indirect buffer to fill with commands
94          */
95         struct amdgpu_ib *ib;
96
97         /**
98          * @func: Function which actually does the update
99          */
100         void (*func)(struct amdgpu_pte_update_params *params,
101                      struct amdgpu_bo *bo, uint64_t pe,
102                      uint64_t addr, unsigned count, uint32_t incr,
103                      uint64_t flags);
104         /**
105          * @pages_addr:
106          *
107          * DMA addresses to use for mapping, used during VM update by CPU
108          */
109         dma_addr_t *pages_addr;
110 };
111
112 /**
113  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
114  */
115 struct amdgpu_prt_cb {
116
117         /**
118          * @adev: amdgpu device
119          */
120         struct amdgpu_device *adev;
121
122         /**
123          * @cb: callback
124          */
125         struct dma_fence_cb cb;
126 };
127
128 /**
129  * amdgpu_vm_level_shift - return the addr shift for each level
130  *
131  * @adev: amdgpu_device pointer
132  * @level: VMPT level
133  *
134  * Returns:
135  * The number of bits the pfn needs to be right shifted for a level.
136  */
137 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
138                                       unsigned level)
139 {
140         unsigned shift = 0xff;
141
142         switch (level) {
143         case AMDGPU_VM_PDB2:
144         case AMDGPU_VM_PDB1:
145         case AMDGPU_VM_PDB0:
146                 shift = 9 * (AMDGPU_VM_PDB0 - level) +
147                         adev->vm_manager.block_size;
148                 break;
149         case AMDGPU_VM_PTB:
150                 shift = 0;
151                 break;
152         default:
153                 dev_err(adev->dev, "the level%d isn't supported.\n", level);
154         }
155
156         return shift;
157 }
158
159 /**
160  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
161  *
162  * @adev: amdgpu_device pointer
163  * @level: VMPT level
164  *
165  * Returns:
166  * The number of entries in a page directory or page table.
167  */
168 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
169                                       unsigned level)
170 {
171         unsigned shift = amdgpu_vm_level_shift(adev,
172                                                adev->vm_manager.root_level);
173
174         if (level == adev->vm_manager.root_level)
175                 /* For the root directory */
176                 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
177         else if (level != AMDGPU_VM_PTB)
178                 /* Everything in between */
179                 return 512;
180         else
181                 /* For the page tables on the leaves */
182                 return AMDGPU_VM_PTE_COUNT(adev);
183 }
184
185 /**
186  * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
187  *
188  * @adev: amdgpu_device pointer
189  * @level: VMPT level
190  *
191  * Returns:
192  * The mask to extract the entry number of a PD/PT from an address.
193  */
194 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
195                                        unsigned int level)
196 {
197         if (level <= adev->vm_manager.root_level)
198                 return 0xffffffff;
199         else if (level != AMDGPU_VM_PTB)
200                 return 0x1ff;
201         else
202                 return AMDGPU_VM_PTE_COUNT(adev) - 1;
203 }
204
205 /**
206  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
207  *
208  * @adev: amdgpu_device pointer
209  * @level: VMPT level
210  *
211  * Returns:
212  * The size of the BO for a page directory or page table in bytes.
213  */
214 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
215 {
216         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
217 }
218
219 /**
220  * amdgpu_vm_bo_evicted - vm_bo is evicted
221  *
222  * @vm_bo: vm_bo which is evicted
223  *
224  * State for PDs/PTs and per VM BOs which are not at the location they should
225  * be.
226  */
227 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
228 {
229         struct amdgpu_vm *vm = vm_bo->vm;
230         struct amdgpu_bo *bo = vm_bo->bo;
231
232         vm_bo->moved = true;
233         if (bo->tbo.type == ttm_bo_type_kernel)
234                 list_move(&vm_bo->vm_status, &vm->evicted);
235         else
236                 list_move_tail(&vm_bo->vm_status, &vm->evicted);
237 }
238
239 /**
240  * amdgpu_vm_bo_relocated - vm_bo is reloacted
241  *
242  * @vm_bo: vm_bo which is relocated
243  *
244  * State for PDs/PTs which needs to update their parent PD.
245  */
246 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
247 {
248         list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
249 }
250
251 /**
252  * amdgpu_vm_bo_moved - vm_bo is moved
253  *
254  * @vm_bo: vm_bo which is moved
255  *
256  * State for per VM BOs which are moved, but that change is not yet reflected
257  * in the page tables.
258  */
259 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
260 {
261         list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
262 }
263
264 /**
265  * amdgpu_vm_bo_idle - vm_bo is idle
266  *
267  * @vm_bo: vm_bo which is now idle
268  *
269  * State for PDs/PTs and per VM BOs which have gone through the state machine
270  * and are now idle.
271  */
272 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
273 {
274         list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
275         vm_bo->moved = false;
276 }
277
278 /**
279  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
280  *
281  * @vm_bo: vm_bo which is now invalidated
282  *
283  * State for normal BOs which are invalidated and that change not yet reflected
284  * in the PTs.
285  */
286 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
287 {
288         spin_lock(&vm_bo->vm->invalidated_lock);
289         list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
290         spin_unlock(&vm_bo->vm->invalidated_lock);
291 }
292
293 /**
294  * amdgpu_vm_bo_done - vm_bo is done
295  *
296  * @vm_bo: vm_bo which is now done
297  *
298  * State for normal BOs which are invalidated and that change has been updated
299  * in the PTs.
300  */
301 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
302 {
303         spin_lock(&vm_bo->vm->invalidated_lock);
304         list_del_init(&vm_bo->vm_status);
305         spin_unlock(&vm_bo->vm->invalidated_lock);
306 }
307
308 /**
309  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
310  *
311  * @base: base structure for tracking BO usage in a VM
312  * @vm: vm to which bo is to be added
313  * @bo: amdgpu buffer object
314  *
315  * Initialize a bo_va_base structure and add it to the appropriate lists
316  *
317  */
318 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
319                                    struct amdgpu_vm *vm,
320                                    struct amdgpu_bo *bo)
321 {
322         base->vm = vm;
323         base->bo = bo;
324         base->next = NULL;
325         INIT_LIST_HEAD(&base->vm_status);
326
327         if (!bo)
328                 return;
329         base->next = bo->vm_bo;
330         bo->vm_bo = base;
331
332         if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
333                 return;
334
335         vm->bulk_moveable = false;
336         if (bo->tbo.type == ttm_bo_type_kernel)
337                 amdgpu_vm_bo_relocated(base);
338         else
339                 amdgpu_vm_bo_idle(base);
340
341         if (bo->preferred_domains &
342             amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
343                 return;
344
345         /*
346          * we checked all the prerequisites, but it looks like this per vm bo
347          * is currently evicted. add the bo to the evicted list to make sure it
348          * is validated on next vm use to avoid fault.
349          * */
350         amdgpu_vm_bo_evicted(base);
351 }
352
353 /**
354  * amdgpu_vm_pt_parent - get the parent page directory
355  *
356  * @pt: child page table
357  *
358  * Helper to get the parent entry for the child page table. NULL if we are at
359  * the root page directory.
360  */
361 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
362 {
363         struct amdgpu_bo *parent = pt->base.bo->parent;
364
365         if (!parent)
366                 return NULL;
367
368         return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
369 }
370
371 /**
372  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
373  */
374 struct amdgpu_vm_pt_cursor {
375         uint64_t pfn;
376         struct amdgpu_vm_pt *parent;
377         struct amdgpu_vm_pt *entry;
378         unsigned level;
379 };
380
381 /**
382  * amdgpu_vm_pt_start - start PD/PT walk
383  *
384  * @adev: amdgpu_device pointer
385  * @vm: amdgpu_vm structure
386  * @start: start address of the walk
387  * @cursor: state to initialize
388  *
389  * Initialize a amdgpu_vm_pt_cursor to start a walk.
390  */
391 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
392                                struct amdgpu_vm *vm, uint64_t start,
393                                struct amdgpu_vm_pt_cursor *cursor)
394 {
395         cursor->pfn = start;
396         cursor->parent = NULL;
397         cursor->entry = &vm->root;
398         cursor->level = adev->vm_manager.root_level;
399 }
400
401 /**
402  * amdgpu_vm_pt_descendant - go to child node
403  *
404  * @adev: amdgpu_device pointer
405  * @cursor: current state
406  *
407  * Walk to the child node of the current node.
408  * Returns:
409  * True if the walk was possible, false otherwise.
410  */
411 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
412                                     struct amdgpu_vm_pt_cursor *cursor)
413 {
414         unsigned mask, shift, idx;
415
416         if (!cursor->entry->entries)
417                 return false;
418
419         BUG_ON(!cursor->entry->base.bo);
420         mask = amdgpu_vm_entries_mask(adev, cursor->level);
421         shift = amdgpu_vm_level_shift(adev, cursor->level);
422
423         ++cursor->level;
424         idx = (cursor->pfn >> shift) & mask;
425         cursor->parent = cursor->entry;
426         cursor->entry = &cursor->entry->entries[idx];
427         return true;
428 }
429
430 /**
431  * amdgpu_vm_pt_sibling - go to sibling node
432  *
433  * @adev: amdgpu_device pointer
434  * @cursor: current state
435  *
436  * Walk to the sibling node of the current node.
437  * Returns:
438  * True if the walk was possible, false otherwise.
439  */
440 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
441                                  struct amdgpu_vm_pt_cursor *cursor)
442 {
443         unsigned shift, num_entries;
444
445         /* Root doesn't have a sibling */
446         if (!cursor->parent)
447                 return false;
448
449         /* Go to our parents and see if we got a sibling */
450         shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
451         num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
452
453         if (cursor->entry == &cursor->parent->entries[num_entries - 1])
454                 return false;
455
456         cursor->pfn += 1ULL << shift;
457         cursor->pfn &= ~((1ULL << shift) - 1);
458         ++cursor->entry;
459         return true;
460 }
461
462 /**
463  * amdgpu_vm_pt_ancestor - go to parent node
464  *
465  * @cursor: current state
466  *
467  * Walk to the parent node of the current node.
468  * Returns:
469  * True if the walk was possible, false otherwise.
470  */
471 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
472 {
473         if (!cursor->parent)
474                 return false;
475
476         --cursor->level;
477         cursor->entry = cursor->parent;
478         cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
479         return true;
480 }
481
482 /**
483  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
484  *
485  * @adev: amdgpu_device pointer
486  * @cursor: current state
487  *
488  * Walk the PD/PT tree to the next node.
489  */
490 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
491                               struct amdgpu_vm_pt_cursor *cursor)
492 {
493         /* First try a newborn child */
494         if (amdgpu_vm_pt_descendant(adev, cursor))
495                 return;
496
497         /* If that didn't worked try to find a sibling */
498         while (!amdgpu_vm_pt_sibling(adev, cursor)) {
499                 /* No sibling, go to our parents and grandparents */
500                 if (!amdgpu_vm_pt_ancestor(cursor)) {
501                         cursor->pfn = ~0ll;
502                         return;
503                 }
504         }
505 }
506
507 /**
508  * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
509  *
510  * @adev: amdgpu_device pointer
511  * @vm: amdgpu_vm structure
512  * @start: start addr of the walk
513  * @cursor: state to initialize
514  *
515  * Start a walk and go directly to the leaf node.
516  */
517 static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
518                                     struct amdgpu_vm *vm, uint64_t start,
519                                     struct amdgpu_vm_pt_cursor *cursor)
520 {
521         amdgpu_vm_pt_start(adev, vm, start, cursor);
522         while (amdgpu_vm_pt_descendant(adev, cursor));
523 }
524
525 /**
526  * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
527  *
528  * @adev: amdgpu_device pointer
529  * @cursor: current state
530  *
531  * Walk the PD/PT tree to the next leaf node.
532  */
533 static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
534                                    struct amdgpu_vm_pt_cursor *cursor)
535 {
536         amdgpu_vm_pt_next(adev, cursor);
537         if (cursor->pfn != ~0ll)
538                 while (amdgpu_vm_pt_descendant(adev, cursor));
539 }
540
541 /**
542  * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
543  */
544 #define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor)                \
545         for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor));         \
546              (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
547
548 /**
549  * amdgpu_vm_pt_first_dfs - start a deep first search
550  *
551  * @adev: amdgpu_device structure
552  * @vm: amdgpu_vm structure
553  * @cursor: state to initialize
554  *
555  * Starts a deep first traversal of the PD/PT tree.
556  */
557 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
558                                    struct amdgpu_vm *vm,
559                                    struct amdgpu_vm_pt_cursor *cursor)
560 {
561         amdgpu_vm_pt_start(adev, vm, 0, cursor);
562         while (amdgpu_vm_pt_descendant(adev, cursor));
563 }
564
565 /**
566  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
567  *
568  * @adev: amdgpu_device structure
569  * @cursor: current state
570  *
571  * Move the cursor to the next node in a deep first search.
572  */
573 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
574                                   struct amdgpu_vm_pt_cursor *cursor)
575 {
576         if (!cursor->entry)
577                 return;
578
579         if (!cursor->parent)
580                 cursor->entry = NULL;
581         else if (amdgpu_vm_pt_sibling(adev, cursor))
582                 while (amdgpu_vm_pt_descendant(adev, cursor));
583         else
584                 amdgpu_vm_pt_ancestor(cursor);
585 }
586
587 /**
588  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
589  */
590 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)                 \
591         for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)),                   \
592              (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
593              (entry); (entry) = (cursor).entry,                                 \
594              amdgpu_vm_pt_next_dfs((adev), &(cursor)))
595
596 /**
597  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
598  *
599  * @vm: vm providing the BOs
600  * @validated: head of validation list
601  * @entry: entry to add
602  *
603  * Add the page directory to the list of BOs to
604  * validate for command submission.
605  */
606 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
607                          struct list_head *validated,
608                          struct amdgpu_bo_list_entry *entry)
609 {
610         entry->priority = 0;
611         entry->tv.bo = &vm->root.base.bo->tbo;
612         /* One for the VM updates, one for TTM and one for the CS job */
613         entry->tv.num_shared = 3;
614         entry->user_pages = NULL;
615         list_add(&entry->tv.head, validated);
616 }
617
618 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
619 {
620         struct amdgpu_bo *abo;
621         struct amdgpu_vm_bo_base *bo_base;
622
623         if (!amdgpu_bo_is_amdgpu_bo(bo))
624                 return;
625
626         if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
627                 return;
628
629         abo = ttm_to_amdgpu_bo(bo);
630         if (!abo->parent)
631                 return;
632         for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
633                 struct amdgpu_vm *vm = bo_base->vm;
634
635                 if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
636                         vm->bulk_moveable = false;
637         }
638
639 }
640 /**
641  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
642  *
643  * @adev: amdgpu device pointer
644  * @vm: vm providing the BOs
645  *
646  * Move all BOs to the end of LRU and remember their positions to put them
647  * together.
648  */
649 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
650                                 struct amdgpu_vm *vm)
651 {
652         struct ttm_bo_global *glob = adev->mman.bdev.glob;
653         struct amdgpu_vm_bo_base *bo_base;
654
655 #if 0
656         if (vm->bulk_moveable) {
657                 spin_lock(&glob->lru_lock);
658                 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
659                 spin_unlock(&glob->lru_lock);
660                 return;
661         }
662 #endif
663
664         memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
665
666         spin_lock(&glob->lru_lock);
667         list_for_each_entry(bo_base, &vm->idle, vm_status) {
668                 struct amdgpu_bo *bo = bo_base->bo;
669
670                 if (!bo->parent)
671                         continue;
672
673                 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
674                 if (bo->shadow)
675                         ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
676                                                 &vm->lru_bulk_move);
677         }
678         spin_unlock(&glob->lru_lock);
679
680         vm->bulk_moveable = true;
681 }
682
683 /**
684  * amdgpu_vm_validate_pt_bos - validate the page table BOs
685  *
686  * @adev: amdgpu device pointer
687  * @vm: vm providing the BOs
688  * @validate: callback to do the validation
689  * @param: parameter for the validation callback
690  *
691  * Validate the page table BOs on command submission if neccessary.
692  *
693  * Returns:
694  * Validation result.
695  */
696 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
697                               int (*validate)(void *p, struct amdgpu_bo *bo),
698                               void *param)
699 {
700         struct amdgpu_vm_bo_base *bo_base, *tmp;
701         int r = 0;
702
703         list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
704                 struct amdgpu_bo *bo = bo_base->bo;
705
706                 r = validate(param, bo);
707                 if (r)
708                         break;
709
710                 if (bo->tbo.type != ttm_bo_type_kernel) {
711                         amdgpu_vm_bo_moved(bo_base);
712                 } else {
713                         if (vm->use_cpu_for_update)
714                                 r = amdgpu_bo_kmap(bo, NULL);
715                         else
716                                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
717                         if (r)
718                                 break;
719                         if (bo->shadow) {
720                                 r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
721                                 if (r)
722                                         break;
723                         }
724                         amdgpu_vm_bo_relocated(bo_base);
725                 }
726         }
727
728         return r;
729 }
730
731 /**
732  * amdgpu_vm_ready - check VM is ready for updates
733  *
734  * @vm: VM to check
735  *
736  * Check if all VM PDs/PTs are ready for updates
737  *
738  * Returns:
739  * True if eviction list is empty.
740  */
741 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
742 {
743         return list_empty(&vm->evicted);
744 }
745
746 /**
747  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
748  *
749  * @adev: amdgpu_device pointer
750  * @vm: VM to clear BO from
751  * @bo: BO to clear
752  * @level: level this BO is at
753  * @pte_support_ats: indicate ATS support from PTE
754  *
755  * Root PD needs to be reserved when calling this.
756  *
757  * Returns:
758  * 0 on success, errno otherwise.
759  */
760 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
761                               struct amdgpu_vm *vm, struct amdgpu_bo *bo,
762                               unsigned level, bool pte_support_ats)
763 {
764         struct ttm_operation_ctx ctx = { true, false };
765         struct dma_fence *fence = NULL;
766         unsigned entries, ats_entries;
767         struct amdgpu_ring *ring;
768         struct amdgpu_job *job;
769         uint64_t addr;
770         int r;
771
772         entries = amdgpu_bo_size(bo) / 8;
773
774         if (pte_support_ats) {
775                 if (level == adev->vm_manager.root_level) {
776                         ats_entries = amdgpu_vm_level_shift(adev, level);
777                         ats_entries += AMDGPU_GPU_PAGE_SHIFT;
778                         ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
779                         ats_entries = min(ats_entries, entries);
780                         entries -= ats_entries;
781                 } else {
782                         ats_entries = entries;
783                         entries = 0;
784                 }
785         } else {
786                 ats_entries = 0;
787         }
788
789         ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
790
791         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
792         if (r)
793                 goto error;
794
795         r = amdgpu_ttm_alloc_gart(&bo->tbo);
796         if (r)
797                 return r;
798
799         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
800         if (r)
801                 goto error;
802
803         addr = amdgpu_bo_gpu_offset(bo);
804         if (ats_entries) {
805                 uint64_t ats_value;
806
807                 ats_value = AMDGPU_PTE_DEFAULT_ATC;
808                 if (level != AMDGPU_VM_PTB)
809                         ats_value |= AMDGPU_PDE_PTE;
810
811                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
812                                       ats_entries, 0, ats_value);
813                 addr += ats_entries * 8;
814         }
815
816         if (entries) {
817                 uint64_t value = 0;
818
819                 /* Workaround for fault priority problem on GMC9 */
820                 if (level == AMDGPU_VM_PTB && adev->asic_type >= CHIP_VEGA10)
821                         value = AMDGPU_PTE_EXECUTABLE;
822
823                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
824                                       entries, 0, value);
825         }
826
827         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
828
829         WARN_ON(job->ibs[0].length_dw > 64);
830         r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
831                              AMDGPU_FENCE_OWNER_KFD, false);
832         if (r)
833                 goto error_free;
834
835         r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
836                               &fence);
837         if (r)
838                 goto error_free;
839
840         amdgpu_bo_fence(bo, fence, true);
841         dma_fence_put(fence);
842
843         if (bo->shadow)
844                 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
845                                           level, pte_support_ats);
846
847         return 0;
848
849 error_free:
850         amdgpu_job_free(job);
851
852 error:
853         return r;
854 }
855
856 /**
857  * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
858  *
859  * @adev: amdgpu_device pointer
860  * @vm: requesting vm
861  * @bp: resulting BO allocation parameters
862  */
863 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
864                                int level, struct amdgpu_bo_param *bp)
865 {
866         memset(bp, 0, sizeof(*bp));
867
868         bp->size = amdgpu_vm_bo_size(adev, level);
869         bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
870         bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
871         bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
872         bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
873                 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
874         if (vm->use_cpu_for_update)
875                 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
876         else if (!vm->root.base.bo || vm->root.base.bo->shadow)
877                 bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
878         bp->type = ttm_bo_type_kernel;
879         if (vm->root.base.bo)
880                 bp->resv = vm->root.base.bo->tbo.resv;
881 }
882
883 /**
884  * amdgpu_vm_alloc_pts - Allocate page tables.
885  *
886  * @adev: amdgpu_device pointer
887  * @vm: VM to allocate page tables for
888  * @saddr: Start address which needs to be allocated
889  * @size: Size from start address we need.
890  *
891  * Make sure the page directories and page tables are allocated
892  *
893  * Returns:
894  * 0 on success, errno otherwise.
895  */
896 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
897                         struct amdgpu_vm *vm,
898                         uint64_t saddr, uint64_t size)
899 {
900         struct amdgpu_vm_pt_cursor cursor;
901         struct amdgpu_bo *pt;
902         bool ats = false;
903         uint64_t eaddr;
904         int r;
905
906         /* validate the parameters */
907         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
908                 return -EINVAL;
909
910         eaddr = saddr + size - 1;
911
912         if (vm->pte_support_ats)
913                 ats = saddr < AMDGPU_GMC_HOLE_START;
914
915         saddr /= AMDGPU_GPU_PAGE_SIZE;
916         eaddr /= AMDGPU_GPU_PAGE_SIZE;
917
918         if (eaddr >= adev->vm_manager.max_pfn) {
919                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
920                         eaddr, adev->vm_manager.max_pfn);
921                 return -EINVAL;
922         }
923
924         for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
925                 struct amdgpu_vm_pt *entry = cursor.entry;
926                 struct amdgpu_bo_param bp;
927
928                 if (cursor.level < AMDGPU_VM_PTB) {
929                         unsigned num_entries;
930
931                         num_entries = amdgpu_vm_num_entries(adev, cursor.level);
932                         entry->entries = kvmalloc_array(num_entries,
933                                                         sizeof(*entry->entries),
934                                                         GFP_KERNEL |
935                                                         __GFP_ZERO);
936                         if (!entry->entries)
937                                 return -ENOMEM;
938                 }
939
940
941                 if (entry->base.bo)
942                         continue;
943
944                 amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
945
946                 r = amdgpu_bo_create(adev, &bp, &pt);
947                 if (r)
948                         return r;
949
950                 if (vm->use_cpu_for_update) {
951                         r = amdgpu_bo_kmap(pt, NULL);
952                         if (r)
953                                 goto error_free_pt;
954                 }
955
956                 /* Keep a reference to the root directory to avoid
957                 * freeing them up in the wrong order.
958                 */
959                 pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
960
961                 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
962
963                 r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
964                 if (r)
965                         goto error_free_pt;
966         }
967
968         return 0;
969
970 error_free_pt:
971         amdgpu_bo_unref(&pt->shadow);
972         amdgpu_bo_unref(&pt);
973         return r;
974 }
975
976 /**
977  * amdgpu_vm_free_pts - free PD/PT levels
978  *
979  * @adev: amdgpu device structure
980  * @vm: amdgpu vm structure
981  *
982  * Free the page directory or page table level and all sub levels.
983  */
984 static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
985                                struct amdgpu_vm *vm)
986 {
987         struct amdgpu_vm_pt_cursor cursor;
988         struct amdgpu_vm_pt *entry;
989
990         for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
991
992                 if (entry->base.bo) {
993                         entry->base.bo->vm_bo = NULL;
994                         list_del(&entry->base.vm_status);
995                         amdgpu_bo_unref(&entry->base.bo->shadow);
996                         amdgpu_bo_unref(&entry->base.bo);
997                 }
998                 kvfree(entry->entries);
999         }
1000
1001         BUG_ON(vm->root.base.bo);
1002 }
1003
1004 /**
1005  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
1006  *
1007  * @adev: amdgpu_device pointer
1008  */
1009 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
1010 {
1011         const struct amdgpu_ip_block *ip_block;
1012         bool has_compute_vm_bug;
1013         struct amdgpu_ring *ring;
1014         int i;
1015
1016         has_compute_vm_bug = false;
1017
1018         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
1019         if (ip_block) {
1020                 /* Compute has a VM bug for GFX version < 7.
1021                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1022                 if (ip_block->version->major <= 7)
1023                         has_compute_vm_bug = true;
1024                 else if (ip_block->version->major == 8)
1025                         if (adev->gfx.mec_fw_version < 673)
1026                                 has_compute_vm_bug = true;
1027         }
1028
1029         for (i = 0; i < adev->num_rings; i++) {
1030                 ring = adev->rings[i];
1031                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1032                         /* only compute rings */
1033                         ring->has_compute_vm_bug = has_compute_vm_bug;
1034                 else
1035                         ring->has_compute_vm_bug = false;
1036         }
1037 }
1038
1039 /**
1040  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1041  *
1042  * @ring: ring on which the job will be submitted
1043  * @job: job to submit
1044  *
1045  * Returns:
1046  * True if sync is needed.
1047  */
1048 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1049                                   struct amdgpu_job *job)
1050 {
1051         struct amdgpu_device *adev = ring->adev;
1052         unsigned vmhub = ring->funcs->vmhub;
1053         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1054         struct amdgpu_vmid *id;
1055         bool gds_switch_needed;
1056         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1057
1058         if (job->vmid == 0)
1059                 return false;
1060         id = &id_mgr->ids[job->vmid];
1061         gds_switch_needed = ring->funcs->emit_gds_switch && (
1062                 id->gds_base != job->gds_base ||
1063                 id->gds_size != job->gds_size ||
1064                 id->gws_base != job->gws_base ||
1065                 id->gws_size != job->gws_size ||
1066                 id->oa_base != job->oa_base ||
1067                 id->oa_size != job->oa_size);
1068
1069         if (amdgpu_vmid_had_gpu_reset(adev, id))
1070                 return true;
1071
1072         return vm_flush_needed || gds_switch_needed;
1073 }
1074
1075 /**
1076  * amdgpu_vm_flush - hardware flush the vm
1077  *
1078  * @ring: ring to use for flush
1079  * @job:  related job
1080  * @need_pipe_sync: is pipe sync needed
1081  *
1082  * Emit a VM flush when it is necessary.
1083  *
1084  * Returns:
1085  * 0 on success, errno otherwise.
1086  */
1087 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
1088 {
1089         struct amdgpu_device *adev = ring->adev;
1090         unsigned vmhub = ring->funcs->vmhub;
1091         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1092         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1093         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1094                 id->gds_base != job->gds_base ||
1095                 id->gds_size != job->gds_size ||
1096                 id->gws_base != job->gws_base ||
1097                 id->gws_size != job->gws_size ||
1098                 id->oa_base != job->oa_base ||
1099                 id->oa_size != job->oa_size);
1100         bool vm_flush_needed = job->vm_needs_flush;
1101         bool pasid_mapping_needed = id->pasid != job->pasid ||
1102                 !id->pasid_mapping ||
1103                 !dma_fence_is_signaled(id->pasid_mapping);
1104         struct dma_fence *fence = NULL;
1105         unsigned patch_offset = 0;
1106         int r;
1107
1108         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1109                 gds_switch_needed = true;
1110                 vm_flush_needed = true;
1111                 pasid_mapping_needed = true;
1112         }
1113
1114         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1115         vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
1116                         job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1117         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1118                 ring->funcs->emit_wreg;
1119
1120         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1121                 return 0;
1122
1123         if (ring->funcs->init_cond_exec)
1124                 patch_offset = amdgpu_ring_init_cond_exec(ring);
1125
1126         if (need_pipe_sync)
1127                 amdgpu_ring_emit_pipeline_sync(ring);
1128
1129         if (vm_flush_needed) {
1130                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1131                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1132         }
1133
1134         if (pasid_mapping_needed)
1135                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1136
1137         if (vm_flush_needed || pasid_mapping_needed) {
1138                 r = amdgpu_fence_emit(ring, &fence, 0);
1139                 if (r)
1140                         return r;
1141         }
1142
1143         if (vm_flush_needed) {
1144                 mutex_lock(&id_mgr->lock);
1145                 dma_fence_put(id->last_flush);
1146                 id->last_flush = dma_fence_get(fence);
1147                 id->current_gpu_reset_count =
1148                         atomic_read(&adev->gpu_reset_counter);
1149                 mutex_unlock(&id_mgr->lock);
1150         }
1151
1152         if (pasid_mapping_needed) {
1153                 id->pasid = job->pasid;
1154                 dma_fence_put(id->pasid_mapping);
1155                 id->pasid_mapping = dma_fence_get(fence);
1156         }
1157         dma_fence_put(fence);
1158
1159         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1160                 id->gds_base = job->gds_base;
1161                 id->gds_size = job->gds_size;
1162                 id->gws_base = job->gws_base;
1163                 id->gws_size = job->gws_size;
1164                 id->oa_base = job->oa_base;
1165                 id->oa_size = job->oa_size;
1166                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1167                                             job->gds_size, job->gws_base,
1168                                             job->gws_size, job->oa_base,
1169                                             job->oa_size);
1170         }
1171
1172         if (ring->funcs->patch_cond_exec)
1173                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
1174
1175         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1176         if (ring->funcs->emit_switch_buffer) {
1177                 amdgpu_ring_emit_switch_buffer(ring);
1178                 amdgpu_ring_emit_switch_buffer(ring);
1179         }
1180         return 0;
1181 }
1182
1183 /**
1184  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1185  *
1186  * @vm: requested vm
1187  * @bo: requested buffer object
1188  *
1189  * Find @bo inside the requested vm.
1190  * Search inside the @bos vm list for the requested vm
1191  * Returns the found bo_va or NULL if none is found
1192  *
1193  * Object has to be reserved!
1194  *
1195  * Returns:
1196  * Found bo_va or NULL.
1197  */
1198 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1199                                        struct amdgpu_bo *bo)
1200 {
1201         struct amdgpu_vm_bo_base *base;
1202
1203         for (base = bo->vm_bo; base; base = base->next) {
1204                 if (base->vm != vm)
1205                         continue;
1206
1207                 return container_of(base, struct amdgpu_bo_va, base);
1208         }
1209         return NULL;
1210 }
1211
1212 /**
1213  * amdgpu_vm_do_set_ptes - helper to call the right asic function
1214  *
1215  * @params: see amdgpu_pte_update_params definition
1216  * @bo: PD/PT to update
1217  * @pe: addr of the page entry
1218  * @addr: dst addr to write into pe
1219  * @count: number of page entries to update
1220  * @incr: increase next addr by incr bytes
1221  * @flags: hw access flags
1222  *
1223  * Traces the parameters and calls the right asic functions
1224  * to setup the page table using the DMA.
1225  */
1226 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
1227                                   struct amdgpu_bo *bo,
1228                                   uint64_t pe, uint64_t addr,
1229                                   unsigned count, uint32_t incr,
1230                                   uint64_t flags)
1231 {
1232         pe += amdgpu_bo_gpu_offset(bo);
1233         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
1234
1235         if (count < 3) {
1236                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
1237                                     addr | flags, count, incr);
1238
1239         } else {
1240                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
1241                                       count, incr, flags);
1242         }
1243 }
1244
1245 /**
1246  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
1247  *
1248  * @params: see amdgpu_pte_update_params definition
1249  * @bo: PD/PT to update
1250  * @pe: addr of the page entry
1251  * @addr: dst addr to write into pe
1252  * @count: number of page entries to update
1253  * @incr: increase next addr by incr bytes
1254  * @flags: hw access flags
1255  *
1256  * Traces the parameters and calls the DMA function to copy the PTEs.
1257  */
1258 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
1259                                    struct amdgpu_bo *bo,
1260                                    uint64_t pe, uint64_t addr,
1261                                    unsigned count, uint32_t incr,
1262                                    uint64_t flags)
1263 {
1264         uint64_t src = (params->src + (addr >> 12) * 8);
1265
1266         pe += amdgpu_bo_gpu_offset(bo);
1267         trace_amdgpu_vm_copy_ptes(pe, src, count);
1268
1269         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
1270 }
1271
1272 /**
1273  * amdgpu_vm_map_gart - Resolve gart mapping of addr
1274  *
1275  * @pages_addr: optional DMA address to use for lookup
1276  * @addr: the unmapped addr
1277  *
1278  * Look up the physical address of the page that the pte resolves
1279  * to.
1280  *
1281  * Returns:
1282  * The pointer for the page table entry.
1283  */
1284 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1285 {
1286         uint64_t result;
1287
1288         /* page table offset */
1289         result = pages_addr[addr >> PAGE_SHIFT];
1290
1291         /* in case cpu page size != gpu page size*/
1292         result |= addr & (~PAGE_MASK);
1293
1294         result &= 0xFFFFFFFFFFFFF000ULL;
1295
1296         return result;
1297 }
1298
1299 /**
1300  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
1301  *
1302  * @params: see amdgpu_pte_update_params definition
1303  * @bo: PD/PT to update
1304  * @pe: kmap addr of the page entry
1305  * @addr: dst addr to write into pe
1306  * @count: number of page entries to update
1307  * @incr: increase next addr by incr bytes
1308  * @flags: hw access flags
1309  *
1310  * Write count number of PT/PD entries directly.
1311  */
1312 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
1313                                    struct amdgpu_bo *bo,
1314                                    uint64_t pe, uint64_t addr,
1315                                    unsigned count, uint32_t incr,
1316                                    uint64_t flags)
1317 {
1318         unsigned int i;
1319         uint64_t value;
1320
1321         pe += (unsigned long)amdgpu_bo_kptr(bo);
1322
1323         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
1324
1325         for (i = 0; i < count; i++) {
1326                 value = params->pages_addr ?
1327                         amdgpu_vm_map_gart(params->pages_addr, addr) :
1328                         addr;
1329                 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
1330                                        i, value, flags);
1331                 addr += incr;
1332         }
1333 }
1334
1335 /**
1336  * amdgpu_vm_update_func - helper to call update function
1337  *
1338  * Calls the update function for both the given BO as well as its shadow.
1339  */
1340 static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
1341                                   struct amdgpu_bo *bo,
1342                                   uint64_t pe, uint64_t addr,
1343                                   unsigned count, uint32_t incr,
1344                                   uint64_t flags)
1345 {
1346         if (bo->shadow)
1347                 params->func(params, bo->shadow, pe, addr, count, incr, flags);
1348         params->func(params, bo, pe, addr, count, incr, flags);
1349 }
1350
1351 /*
1352  * amdgpu_vm_update_pde - update a single level in the hierarchy
1353  *
1354  * @param: parameters for the update
1355  * @vm: requested vm
1356  * @parent: parent directory
1357  * @entry: entry to update
1358  *
1359  * Makes sure the requested entry in parent is up to date.
1360  */
1361 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
1362                                  struct amdgpu_vm *vm,
1363                                  struct amdgpu_vm_pt *parent,
1364                                  struct amdgpu_vm_pt *entry)
1365 {
1366         struct amdgpu_bo *bo = parent->base.bo, *pbo;
1367         uint64_t pde, pt, flags;
1368         unsigned level;
1369
1370         /* Don't update huge pages here */
1371         if (entry->huge)
1372                 return;
1373
1374         for (level = 0, pbo = bo->parent; pbo; ++level)
1375                 pbo = pbo->parent;
1376
1377         level += params->adev->vm_manager.root_level;
1378         amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1379         pde = (entry - parent->entries) * 8;
1380         amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
1381 }
1382
1383 /*
1384  * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1385  *
1386  * @adev: amdgpu_device pointer
1387  * @vm: related vm
1388  *
1389  * Mark all PD level as invalid after an error.
1390  */
1391 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1392                                      struct amdgpu_vm *vm)
1393 {
1394         struct amdgpu_vm_pt_cursor cursor;
1395         struct amdgpu_vm_pt *entry;
1396
1397         for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
1398                 if (entry->base.bo && !entry->base.moved)
1399                         amdgpu_vm_bo_relocated(&entry->base);
1400 }
1401
1402 /*
1403  * amdgpu_vm_update_directories - make sure that all directories are valid
1404  *
1405  * @adev: amdgpu_device pointer
1406  * @vm: requested vm
1407  *
1408  * Makes sure all directories are up to date.
1409  *
1410  * Returns:
1411  * 0 for success, error for failure.
1412  */
1413 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1414                                  struct amdgpu_vm *vm)
1415 {
1416         struct amdgpu_pte_update_params params;
1417         struct amdgpu_job *job;
1418         unsigned ndw = 0;
1419         int r = 0;
1420
1421         if (list_empty(&vm->relocated))
1422                 return 0;
1423
1424 restart:
1425         memset(&params, 0, sizeof(params));
1426         params.adev = adev;
1427
1428         if (vm->use_cpu_for_update) {
1429                 r = amdgpu_bo_sync_wait(vm->root.base.bo,
1430                                         AMDGPU_FENCE_OWNER_VM, true);
1431                 if (unlikely(r))
1432                         return r;
1433
1434                 params.func = amdgpu_vm_cpu_set_ptes;
1435         } else {
1436                 ndw = 512 * 8;
1437                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1438                 if (r)
1439                         return r;
1440
1441                 params.ib = &job->ibs[0];
1442                 params.func = amdgpu_vm_do_set_ptes;
1443         }
1444
1445         while (!list_empty(&vm->relocated)) {
1446                 struct amdgpu_vm_pt *pt, *entry;
1447
1448                 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1449                                          base.vm_status);
1450                 amdgpu_vm_bo_idle(&entry->base);
1451
1452                 pt = amdgpu_vm_pt_parent(entry);
1453                 if (!pt)
1454                         continue;
1455
1456                 amdgpu_vm_update_pde(&params, vm, pt, entry);
1457
1458                 if (!vm->use_cpu_for_update &&
1459                     (ndw - params.ib->length_dw) < 32)
1460                         break;
1461         }
1462
1463         if (vm->use_cpu_for_update) {
1464                 /* Flush HDP */
1465                 mb();
1466                 amdgpu_asic_flush_hdp(adev, NULL);
1467         } else if (params.ib->length_dw == 0) {
1468                 amdgpu_job_free(job);
1469         } else {
1470                 struct amdgpu_bo *root = vm->root.base.bo;
1471                 struct amdgpu_ring *ring;
1472                 struct dma_fence *fence;
1473
1474                 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1475                                     sched);
1476
1477                 amdgpu_ring_pad_ib(ring, params.ib);
1478                 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1479                                  AMDGPU_FENCE_OWNER_VM, false);
1480                 WARN_ON(params.ib->length_dw > ndw);
1481                 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1482                                       &fence);
1483                 if (r)
1484                         goto error;
1485
1486                 amdgpu_bo_fence(root, fence, true);
1487                 dma_fence_put(vm->last_update);
1488                 vm->last_update = fence;
1489         }
1490
1491         if (!list_empty(&vm->relocated))
1492                 goto restart;
1493
1494         return 0;
1495
1496 error:
1497         amdgpu_vm_invalidate_pds(adev, vm);
1498         amdgpu_job_free(job);
1499         return r;
1500 }
1501
1502 /**
1503  * amdgpu_vm_update_flags - figure out flags for PTE updates
1504  *
1505  * Make sure to set the right flags for the PTEs at the desired level.
1506  */
1507 static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
1508                                    struct amdgpu_bo *bo, unsigned level,
1509                                    uint64_t pe, uint64_t addr,
1510                                    unsigned count, uint32_t incr,
1511                                    uint64_t flags)
1512
1513 {
1514         if (level != AMDGPU_VM_PTB) {
1515                 flags |= AMDGPU_PDE_PTE;
1516                 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1517
1518         } else if (params->adev->asic_type >= CHIP_VEGA10 &&
1519                    !(flags & AMDGPU_PTE_VALID) &&
1520                    !(flags & AMDGPU_PTE_PRT)) {
1521
1522                 /* Workaround for fault priority problem on GMC9 */
1523                 flags |= AMDGPU_PTE_EXECUTABLE;
1524         }
1525
1526         amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
1527 }
1528
1529 /**
1530  * amdgpu_vm_fragment - get fragment for PTEs
1531  *
1532  * @params: see amdgpu_pte_update_params definition
1533  * @start: first PTE to handle
1534  * @end: last PTE to handle
1535  * @flags: hw mapping flags
1536  * @frag: resulting fragment size
1537  * @frag_end: end of this fragment
1538  *
1539  * Returns the first possible fragment for the start and end address.
1540  */
1541 static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
1542                                uint64_t start, uint64_t end, uint64_t flags,
1543                                unsigned int *frag, uint64_t *frag_end)
1544 {
1545         /**
1546          * The MC L1 TLB supports variable sized pages, based on a fragment
1547          * field in the PTE. When this field is set to a non-zero value, page
1548          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1549          * flags are considered valid for all PTEs within the fragment range
1550          * and corresponding mappings are assumed to be physically contiguous.
1551          *
1552          * The L1 TLB can store a single PTE for the whole fragment,
1553          * significantly increasing the space available for translation
1554          * caching. This leads to large improvements in throughput when the
1555          * TLB is under pressure.
1556          *
1557          * The L2 TLB distributes small and large fragments into two
1558          * asymmetric partitions. The large fragment cache is significantly
1559          * larger. Thus, we try to use large fragments wherever possible.
1560          * Userspace can support this by aligning virtual base address and
1561          * allocation size to the fragment size.
1562          *
1563          * Starting with Vega10 the fragment size only controls the L1. The L2
1564          * is now directly feed with small/huge/giant pages from the walker.
1565          */
1566         unsigned max_frag;
1567
1568         if (params->adev->asic_type < CHIP_VEGA10)
1569                 max_frag = params->adev->vm_manager.fragment_size;
1570         else
1571                 max_frag = 31;
1572
1573         /* system pages are non continuously */
1574         if (params->src) {
1575                 *frag = 0;
1576                 *frag_end = end;
1577                 return;
1578         }
1579
1580         /* This intentionally wraps around if no bit is set */
1581         *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1582         if (*frag >= max_frag) {
1583                 *frag = max_frag;
1584                 *frag_end = end & ~((1ULL << max_frag) - 1);
1585         } else {
1586                 *frag_end = start + (1 << *frag);
1587         }
1588 }
1589
1590 /**
1591  * amdgpu_vm_update_ptes - make sure that page tables are valid
1592  *
1593  * @params: see amdgpu_pte_update_params definition
1594  * @start: start of GPU address range
1595  * @end: end of GPU address range
1596  * @dst: destination address to map to, the next dst inside the function
1597  * @flags: mapping flags
1598  *
1599  * Update the page tables in the range @start - @end.
1600  *
1601  * Returns:
1602  * 0 for success, -EINVAL for failure.
1603  */
1604 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1605                                  uint64_t start, uint64_t end,
1606                                  uint64_t dst, uint64_t flags)
1607 {
1608         struct amdgpu_device *adev = params->adev;
1609         struct amdgpu_vm_pt_cursor cursor;
1610         uint64_t frag_start = start, frag_end;
1611         unsigned int frag;
1612
1613         /* figure out the initial fragment */
1614         amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1615
1616         /* walk over the address space and update the PTs */
1617         amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1618         while (cursor.pfn < end) {
1619                 struct amdgpu_bo *pt = cursor.entry->base.bo;
1620                 unsigned shift, parent_shift, mask;
1621                 uint64_t incr, entry_end, pe_start;
1622
1623                 if (!pt)
1624                         return -ENOENT;
1625
1626                 /* The root level can't be a huge page */
1627                 if (cursor.level == adev->vm_manager.root_level) {
1628                         if (!amdgpu_vm_pt_descendant(adev, &cursor))
1629                                 return -ENOENT;
1630                         continue;
1631                 }
1632
1633                 /* If it isn't already handled it can't be a huge page */
1634                 if (cursor.entry->huge) {
1635                         /* Add the entry to the relocated list to update it. */
1636                         cursor.entry->huge = false;
1637                         amdgpu_vm_bo_relocated(&cursor.entry->base);
1638                 }
1639
1640                 shift = amdgpu_vm_level_shift(adev, cursor.level);
1641                 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1642                 if (adev->asic_type < CHIP_VEGA10) {
1643                         /* No huge page support before GMC v9 */
1644                         if (cursor.level != AMDGPU_VM_PTB) {
1645                                 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1646                                         return -ENOENT;
1647                                 continue;
1648                         }
1649                 } else if (frag < shift) {
1650                         /* We can't use this level when the fragment size is
1651                          * smaller than the address shift. Go to the next
1652                          * child entry and try again.
1653                          */
1654                         if (!amdgpu_vm_pt_descendant(adev, &cursor))
1655                                 return -ENOENT;
1656                         continue;
1657                 } else if (frag >= parent_shift &&
1658                            cursor.level - 1 != adev->vm_manager.root_level) {
1659                         /* If the fragment size is even larger than the parent
1660                          * shift we should go up one level and check it again
1661                          * unless one level up is the root level.
1662                          */
1663                         if (!amdgpu_vm_pt_ancestor(&cursor))
1664                                 return -ENOENT;
1665                         continue;
1666                 }
1667
1668                 /* Looks good so far, calculate parameters for the update */
1669                 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1670                 mask = amdgpu_vm_entries_mask(adev, cursor.level);
1671                 pe_start = ((cursor.pfn >> shift) & mask) * 8;
1672                 entry_end = (uint64_t)(mask + 1) << shift;
1673                 entry_end += cursor.pfn & ~(entry_end - 1);
1674                 entry_end = min(entry_end, end);
1675
1676                 do {
1677                         uint64_t upd_end = min(entry_end, frag_end);
1678                         unsigned nptes = (upd_end - frag_start) >> shift;
1679
1680                         amdgpu_vm_update_flags(params, pt, cursor.level,
1681                                                pe_start, dst, nptes, incr,
1682                                                flags | AMDGPU_PTE_FRAG(frag));
1683
1684                         pe_start += nptes * 8;
1685                         dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1686
1687                         frag_start = upd_end;
1688                         if (frag_start >= frag_end) {
1689                                 /* figure out the next fragment */
1690                                 amdgpu_vm_fragment(params, frag_start, end,
1691                                                    flags, &frag, &frag_end);
1692                                 if (frag < shift)
1693                                         break;
1694                         }
1695                 } while (frag_start < entry_end);
1696
1697                 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1698                         /* Mark all child entries as huge */
1699                         while (cursor.pfn < frag_start) {
1700                                 cursor.entry->huge = true;
1701                                 amdgpu_vm_pt_next(adev, &cursor);
1702                         }
1703
1704                 } else if (frag >= shift) {
1705                         /* or just move on to the next on the same level. */
1706                         amdgpu_vm_pt_next(adev, &cursor);
1707                 }
1708         }
1709
1710         return 0;
1711 }
1712
1713 /**
1714  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1715  *
1716  * @adev: amdgpu_device pointer
1717  * @exclusive: fence we need to sync to
1718  * @pages_addr: DMA addresses to use for mapping
1719  * @vm: requested vm
1720  * @start: start of mapped range
1721  * @last: last mapped entry
1722  * @flags: flags for the entries
1723  * @addr: addr to set the area to
1724  * @fence: optional resulting fence
1725  *
1726  * Fill in the page table entries between @start and @last.
1727  *
1728  * Returns:
1729  * 0 for success, -EINVAL for failure.
1730  */
1731 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1732                                        struct dma_fence *exclusive,
1733                                        dma_addr_t *pages_addr,
1734                                        struct amdgpu_vm *vm,
1735                                        uint64_t start, uint64_t last,
1736                                        uint64_t flags, uint64_t addr,
1737                                        struct dma_fence **fence)
1738 {
1739         struct amdgpu_ring *ring;
1740         void *owner = AMDGPU_FENCE_OWNER_VM;
1741         unsigned nptes, ncmds, ndw;
1742         struct amdgpu_job *job;
1743         struct amdgpu_pte_update_params params;
1744         struct dma_fence *f = NULL;
1745         int r;
1746
1747         memset(&params, 0, sizeof(params));
1748         params.adev = adev;
1749         params.vm = vm;
1750
1751         /* sync to everything except eviction fences on unmapping */
1752         if (!(flags & AMDGPU_PTE_VALID))
1753                 owner = AMDGPU_FENCE_OWNER_KFD;
1754
1755         if (vm->use_cpu_for_update) {
1756                 /* params.src is used as flag to indicate system Memory */
1757                 if (pages_addr)
1758                         params.src = ~0;
1759
1760                 /* Wait for PT BOs to be idle. PTs share the same resv. object
1761                  * as the root PD BO
1762                  */
1763                 r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
1764                 if (unlikely(r))
1765                         return r;
1766
1767                 /* Wait for any BO move to be completed */
1768                 if (exclusive) {
1769                         r = dma_fence_wait(exclusive, true);
1770                         if (unlikely(r))
1771                                 return r;
1772                 }
1773
1774                 params.func = amdgpu_vm_cpu_set_ptes;
1775                 params.pages_addr = pages_addr;
1776                 return amdgpu_vm_update_ptes(&params, start, last + 1,
1777                                              addr, flags);
1778         }
1779
1780         ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1781
1782         nptes = last - start + 1;
1783
1784         /*
1785          * reserve space for two commands every (1 << BLOCK_SIZE)
1786          *  entries or 2k dwords (whatever is smaller)
1787          */
1788         ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1789
1790         /* The second command is for the shadow pagetables. */
1791         if (vm->root.base.bo->shadow)
1792                 ncmds *= 2;
1793
1794         /* padding, etc. */
1795         ndw = 64;
1796
1797         if (pages_addr) {
1798                 /* copy commands needed */
1799                 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1800
1801                 /* and also PTEs */
1802                 ndw += nptes * 2;
1803
1804                 params.func = amdgpu_vm_do_copy_ptes;
1805
1806         } else {
1807                 /* set page commands needed */
1808                 ndw += ncmds * 10;
1809
1810                 /* extra commands for begin/end fragments */
1811                 ncmds = 2 * adev->vm_manager.fragment_size;
1812                 if (vm->root.base.bo->shadow)
1813                         ncmds *= 2;
1814
1815                 ndw += 10 * ncmds;
1816
1817                 params.func = amdgpu_vm_do_set_ptes;
1818         }
1819
1820         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1821         if (r)
1822                 return r;
1823
1824         params.ib = &job->ibs[0];
1825
1826         if (pages_addr) {
1827                 uint64_t *pte;
1828                 unsigned i;
1829
1830                 /* Put the PTEs at the end of the IB. */
1831                 i = ndw - nptes * 2;
1832                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1833                 params.src = job->ibs->gpu_addr + i * 4;
1834
1835                 for (i = 0; i < nptes; ++i) {
1836                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1837                                                     AMDGPU_GPU_PAGE_SIZE);
1838                         pte[i] |= flags;
1839                 }
1840                 addr = 0;
1841         }
1842
1843         r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1844         if (r)
1845                 goto error_free;
1846
1847         r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1848                              owner, false);
1849         if (r)
1850                 goto error_free;
1851
1852         r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
1853         if (r)
1854                 goto error_free;
1855
1856         amdgpu_ring_pad_ib(ring, params.ib);
1857         WARN_ON(params.ib->length_dw > ndw);
1858         r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1859         if (r)
1860                 goto error_free;
1861
1862         amdgpu_bo_fence(vm->root.base.bo, f, true);
1863         dma_fence_put(*fence);
1864         *fence = f;
1865         return 0;
1866
1867 error_free:
1868         amdgpu_job_free(job);
1869         return r;
1870 }
1871
1872 /**
1873  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1874  *
1875  * @adev: amdgpu_device pointer
1876  * @exclusive: fence we need to sync to
1877  * @pages_addr: DMA addresses to use for mapping
1878  * @vm: requested vm
1879  * @mapping: mapped range and flags to use for the update
1880  * @flags: HW flags for the mapping
1881  * @nodes: array of drm_mm_nodes with the MC addresses
1882  * @fence: optional resulting fence
1883  *
1884  * Split the mapping into smaller chunks so that each update fits
1885  * into a SDMA IB.
1886  *
1887  * Returns:
1888  * 0 for success, -EINVAL for failure.
1889  */
1890 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1891                                       struct dma_fence *exclusive,
1892                                       dma_addr_t *pages_addr,
1893                                       struct amdgpu_vm *vm,
1894                                       struct amdgpu_bo_va_mapping *mapping,
1895                                       uint64_t flags,
1896                                       struct drm_mm_node *nodes,
1897                                       struct dma_fence **fence)
1898 {
1899         unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1900         uint64_t pfn, start = mapping->start;
1901         int r;
1902
1903         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1904          * but in case of something, we filter the flags in first place
1905          */
1906         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1907                 flags &= ~AMDGPU_PTE_READABLE;
1908         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1909                 flags &= ~AMDGPU_PTE_WRITEABLE;
1910
1911         flags &= ~AMDGPU_PTE_EXECUTABLE;
1912         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1913
1914         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1915         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1916
1917         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1918             (adev->asic_type >= CHIP_VEGA10)) {
1919                 flags |= AMDGPU_PTE_PRT;
1920                 flags &= ~AMDGPU_PTE_VALID;
1921         }
1922
1923         trace_amdgpu_vm_bo_update(mapping);
1924
1925         pfn = mapping->offset >> PAGE_SHIFT;
1926         if (nodes) {
1927                 while (pfn >= nodes->size) {
1928                         pfn -= nodes->size;
1929                         ++nodes;
1930                 }
1931         }
1932
1933         do {
1934                 dma_addr_t *dma_addr = NULL;
1935                 uint64_t max_entries;
1936                 uint64_t addr, last;
1937
1938                 if (nodes) {
1939                         addr = nodes->start << PAGE_SHIFT;
1940                         max_entries = (nodes->size - pfn) *
1941                                 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1942                 } else {
1943                         addr = 0;
1944                         max_entries = S64_MAX;
1945                 }
1946
1947                 if (pages_addr) {
1948                         uint64_t count;
1949
1950                         max_entries = min(max_entries, 16ull * 1024ull);
1951                         for (count = 1;
1952                              count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1953                              ++count) {
1954                                 uint64_t idx = pfn + count;
1955
1956                                 if (pages_addr[idx] !=
1957                                     (pages_addr[idx - 1] + PAGE_SIZE))
1958                                         break;
1959                         }
1960
1961                         if (count < min_linear_pages) {
1962                                 addr = pfn << PAGE_SHIFT;
1963                                 dma_addr = pages_addr;
1964                         } else {
1965                                 addr = pages_addr[pfn];
1966                                 max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1967                         }
1968
1969                 } else if (flags & AMDGPU_PTE_VALID) {
1970                         addr += adev->vm_manager.vram_base_offset;
1971                         addr += pfn << PAGE_SHIFT;
1972                 }
1973
1974                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1975                 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1976                                                 start, last, flags, addr,
1977                                                 fence);
1978                 if (r)
1979                         return r;
1980
1981                 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1982                 if (nodes && nodes->size == pfn) {
1983                         pfn = 0;
1984                         ++nodes;
1985                 }
1986                 start = last + 1;
1987
1988         } while (unlikely(start != mapping->last + 1));
1989
1990         return 0;
1991 }
1992
1993 /**
1994  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1995  *
1996  * @adev: amdgpu_device pointer
1997  * @bo_va: requested BO and VM object
1998  * @clear: if true clear the entries
1999  *
2000  * Fill in the page table entries for @bo_va.
2001  *
2002  * Returns:
2003  * 0 for success, -EINVAL for failure.
2004  */
2005 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2006                         struct amdgpu_bo_va *bo_va,
2007                         bool clear)
2008 {
2009         struct amdgpu_bo *bo = bo_va->base.bo;
2010         struct amdgpu_vm *vm = bo_va->base.vm;
2011         struct amdgpu_bo_va_mapping *mapping;
2012         dma_addr_t *pages_addr = NULL;
2013         struct ttm_mem_reg *mem;
2014         struct drm_mm_node *nodes;
2015         struct dma_fence *exclusive, **last_update;
2016         uint64_t flags;
2017         int r;
2018
2019         if (clear || !bo) {
2020                 mem = NULL;
2021                 nodes = NULL;
2022                 exclusive = NULL;
2023         } else {
2024                 struct ttm_dma_tt *ttm;
2025
2026                 mem = &bo->tbo.mem;
2027                 nodes = mem->mm_node;
2028                 if (mem->mem_type == TTM_PL_TT) {
2029                         ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
2030                         pages_addr = ttm->dma_address;
2031                 }
2032                 exclusive = reservation_object_get_excl(bo->tbo.resv);
2033         }
2034
2035         if (bo)
2036                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
2037         else
2038                 flags = 0x0;
2039
2040         if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
2041                 last_update = &vm->last_update;
2042         else
2043                 last_update = &bo_va->last_pt_update;
2044
2045         if (!clear && bo_va->base.moved) {
2046                 bo_va->base.moved = false;
2047                 list_splice_init(&bo_va->valids, &bo_va->invalids);
2048
2049         } else if (bo_va->cleared != clear) {
2050                 list_splice_init(&bo_va->valids, &bo_va->invalids);
2051         }
2052
2053         list_for_each_entry(mapping, &bo_va->invalids, list) {
2054                 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
2055                                                mapping, flags, nodes,
2056                                                last_update);
2057                 if (r)
2058                         return r;
2059         }
2060
2061         if (vm->use_cpu_for_update) {
2062                 /* Flush HDP */
2063                 mb();
2064                 amdgpu_asic_flush_hdp(adev, NULL);
2065         }
2066
2067         /* If the BO is not in its preferred location add it back to
2068          * the evicted list so that it gets validated again on the
2069          * next command submission.
2070          */
2071         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2072                 uint32_t mem_type = bo->tbo.mem.mem_type;
2073
2074                 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
2075                         amdgpu_vm_bo_evicted(&bo_va->base);
2076                 else
2077                         amdgpu_vm_bo_idle(&bo_va->base);
2078         } else {
2079                 amdgpu_vm_bo_done(&bo_va->base);
2080         }
2081
2082         list_splice_init(&bo_va->invalids, &bo_va->valids);
2083         bo_va->cleared = clear;
2084
2085         if (trace_amdgpu_vm_bo_mapping_enabled()) {
2086                 list_for_each_entry(mapping, &bo_va->valids, list)
2087                         trace_amdgpu_vm_bo_mapping(mapping);
2088         }
2089
2090         return 0;
2091 }
2092
2093 /**
2094  * amdgpu_vm_update_prt_state - update the global PRT state
2095  *
2096  * @adev: amdgpu_device pointer
2097  */
2098 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
2099 {
2100         unsigned long flags;
2101         bool enable;
2102
2103         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
2104         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
2105         adev->gmc.gmc_funcs->set_prt(adev, enable);
2106         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
2107 }
2108
2109 /**
2110  * amdgpu_vm_prt_get - add a PRT user
2111  *
2112  * @adev: amdgpu_device pointer
2113  */
2114 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
2115 {
2116         if (!adev->gmc.gmc_funcs->set_prt)
2117                 return;
2118
2119         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
2120                 amdgpu_vm_update_prt_state(adev);
2121 }
2122
2123 /**
2124  * amdgpu_vm_prt_put - drop a PRT user
2125  *
2126  * @adev: amdgpu_device pointer
2127  */
2128 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
2129 {
2130         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
2131                 amdgpu_vm_update_prt_state(adev);
2132 }
2133
2134 /**
2135  * amdgpu_vm_prt_cb - callback for updating the PRT status
2136  *
2137  * @fence: fence for the callback
2138  * @_cb: the callback function
2139  */
2140 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
2141 {
2142         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
2143
2144         amdgpu_vm_prt_put(cb->adev);
2145         kfree(cb);
2146 }
2147
2148 /**
2149  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
2150  *
2151  * @adev: amdgpu_device pointer
2152  * @fence: fence for the callback
2153  */
2154 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
2155                                  struct dma_fence *fence)
2156 {
2157         struct amdgpu_prt_cb *cb;
2158
2159         if (!adev->gmc.gmc_funcs->set_prt)
2160                 return;
2161
2162         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
2163         if (!cb) {
2164                 /* Last resort when we are OOM */
2165                 if (fence)
2166                         dma_fence_wait(fence, false);
2167
2168                 amdgpu_vm_prt_put(adev);
2169         } else {
2170                 cb->adev = adev;
2171                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
2172                                                      amdgpu_vm_prt_cb))
2173                         amdgpu_vm_prt_cb(fence, &cb->cb);
2174         }
2175 }
2176
2177 /**
2178  * amdgpu_vm_free_mapping - free a mapping
2179  *
2180  * @adev: amdgpu_device pointer
2181  * @vm: requested vm
2182  * @mapping: mapping to be freed
2183  * @fence: fence of the unmap operation
2184  *
2185  * Free a mapping and make sure we decrease the PRT usage count if applicable.
2186  */
2187 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
2188                                    struct amdgpu_vm *vm,
2189                                    struct amdgpu_bo_va_mapping *mapping,
2190                                    struct dma_fence *fence)
2191 {
2192         if (mapping->flags & AMDGPU_PTE_PRT)
2193                 amdgpu_vm_add_prt_cb(adev, fence);
2194         kfree(mapping);
2195 }
2196
2197 /**
2198  * amdgpu_vm_prt_fini - finish all prt mappings
2199  *
2200  * @adev: amdgpu_device pointer
2201  * @vm: requested vm
2202  *
2203  * Register a cleanup callback to disable PRT support after VM dies.
2204  */
2205 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2206 {
2207         struct reservation_object *resv = vm->root.base.bo->tbo.resv;
2208         struct dma_fence *excl, **shared;
2209         unsigned i, shared_count;
2210         int r;
2211
2212         r = reservation_object_get_fences_rcu(resv, &excl,
2213                                               &shared_count, &shared);
2214         if (r) {
2215                 /* Not enough memory to grab the fence list, as last resort
2216                  * block for all the fences to complete.
2217                  */
2218                 reservation_object_wait_timeout_rcu(resv, true, false,
2219                                                     MAX_SCHEDULE_TIMEOUT);
2220                 return;
2221         }
2222
2223         /* Add a callback for each fence in the reservation object */
2224         amdgpu_vm_prt_get(adev);
2225         amdgpu_vm_add_prt_cb(adev, excl);
2226
2227         for (i = 0; i < shared_count; ++i) {
2228                 amdgpu_vm_prt_get(adev);
2229                 amdgpu_vm_add_prt_cb(adev, shared[i]);
2230         }
2231
2232         kfree(shared);
2233 }
2234
2235 /**
2236  * amdgpu_vm_clear_freed - clear freed BOs in the PT
2237  *
2238  * @adev: amdgpu_device pointer
2239  * @vm: requested vm
2240  * @fence: optional resulting fence (unchanged if no work needed to be done
2241  * or if an error occurred)
2242  *
2243  * Make sure all freed BOs are cleared in the PT.
2244  * PTs have to be reserved and mutex must be locked!
2245  *
2246  * Returns:
2247  * 0 for success.
2248  *
2249  */
2250 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2251                           struct amdgpu_vm *vm,
2252                           struct dma_fence **fence)
2253 {
2254         struct amdgpu_bo_va_mapping *mapping;
2255         uint64_t init_pte_value = 0;
2256         struct dma_fence *f = NULL;
2257         int r;
2258
2259         while (!list_empty(&vm->freed)) {
2260                 mapping = list_first_entry(&vm->freed,
2261                         struct amdgpu_bo_va_mapping, list);
2262                 list_del(&mapping->list);
2263
2264                 if (vm->pte_support_ats &&
2265                     mapping->start < AMDGPU_GMC_HOLE_START)
2266                         init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2267
2268                 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
2269                                                 mapping->start, mapping->last,
2270                                                 init_pte_value, 0, &f);
2271                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
2272                 if (r) {
2273                         dma_fence_put(f);
2274                         return r;
2275                 }
2276         }
2277
2278         if (fence && f) {
2279                 dma_fence_put(*fence);
2280                 *fence = f;
2281         } else {
2282                 dma_fence_put(f);
2283         }
2284
2285         return 0;
2286
2287 }
2288
2289 /**
2290  * amdgpu_vm_handle_moved - handle moved BOs in the PT
2291  *
2292  * @adev: amdgpu_device pointer
2293  * @vm: requested vm
2294  *
2295  * Make sure all BOs which are moved are updated in the PTs.
2296  *
2297  * Returns:
2298  * 0 for success.
2299  *
2300  * PTs have to be reserved!
2301  */
2302 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2303                            struct amdgpu_vm *vm)
2304 {
2305         struct amdgpu_bo_va *bo_va, *tmp;
2306         struct reservation_object *resv;
2307         bool clear;
2308         int r;
2309
2310         list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2311                 /* Per VM BOs never need to bo cleared in the page tables */
2312                 r = amdgpu_vm_bo_update(adev, bo_va, false);
2313                 if (r)
2314                         return r;
2315         }
2316
2317         spin_lock(&vm->invalidated_lock);
2318         while (!list_empty(&vm->invalidated)) {
2319                 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2320                                          base.vm_status);
2321                 resv = bo_va->base.bo->tbo.resv;
2322                 spin_unlock(&vm->invalidated_lock);
2323
2324                 /* Try to reserve the BO to avoid clearing its ptes */
2325                 if (!amdgpu_vm_debug && reservation_object_trylock(resv))
2326                         clear = false;
2327                 /* Somebody else is using the BO right now */
2328                 else
2329                         clear = true;
2330
2331                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
2332                 if (r)
2333                         return r;
2334
2335                 if (!clear)
2336                         reservation_object_unlock(resv);
2337                 spin_lock(&vm->invalidated_lock);
2338         }
2339         spin_unlock(&vm->invalidated_lock);
2340
2341         return 0;
2342 }
2343
2344 /**
2345  * amdgpu_vm_bo_add - add a bo to a specific vm
2346  *
2347  * @adev: amdgpu_device pointer
2348  * @vm: requested vm
2349  * @bo: amdgpu buffer object
2350  *
2351  * Add @bo into the requested vm.
2352  * Add @bo to the list of bos associated with the vm
2353  *
2354  * Returns:
2355  * Newly added bo_va or NULL for failure
2356  *
2357  * Object has to be reserved!
2358  */
2359 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2360                                       struct amdgpu_vm *vm,
2361                                       struct amdgpu_bo *bo)
2362 {
2363         struct amdgpu_bo_va *bo_va;
2364
2365         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2366         if (bo_va == NULL) {
2367                 return NULL;
2368         }
2369         amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2370
2371         bo_va->ref_count = 1;
2372         INIT_LIST_HEAD(&bo_va->valids);
2373         INIT_LIST_HEAD(&bo_va->invalids);
2374
2375         return bo_va;
2376 }
2377
2378
2379 /**
2380  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2381  *
2382  * @adev: amdgpu_device pointer
2383  * @bo_va: bo_va to store the address
2384  * @mapping: the mapping to insert
2385  *
2386  * Insert a new mapping into all structures.
2387  */
2388 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2389                                     struct amdgpu_bo_va *bo_va,
2390                                     struct amdgpu_bo_va_mapping *mapping)
2391 {
2392         struct amdgpu_vm *vm = bo_va->base.vm;
2393         struct amdgpu_bo *bo = bo_va->base.bo;
2394
2395         mapping->bo_va = bo_va;
2396         list_add(&mapping->list, &bo_va->invalids);
2397         amdgpu_vm_it_insert(mapping, &vm->va);
2398
2399         if (mapping->flags & AMDGPU_PTE_PRT)
2400                 amdgpu_vm_prt_get(adev);
2401
2402         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2403             !bo_va->base.moved) {
2404                 list_move(&bo_va->base.vm_status, &vm->moved);
2405         }
2406         trace_amdgpu_vm_bo_map(bo_va, mapping);
2407 }
2408
2409 /**
2410  * amdgpu_vm_bo_map - map bo inside a vm
2411  *
2412  * @adev: amdgpu_device pointer
2413  * @bo_va: bo_va to store the address
2414  * @saddr: where to map the BO
2415  * @offset: requested offset in the BO
2416  * @size: BO size in bytes
2417  * @flags: attributes of pages (read/write/valid/etc.)
2418  *
2419  * Add a mapping of the BO at the specefied addr into the VM.
2420  *
2421  * Returns:
2422  * 0 for success, error for failure.
2423  *
2424  * Object has to be reserved and unreserved outside!
2425  */
2426 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2427                      struct amdgpu_bo_va *bo_va,
2428                      uint64_t saddr, uint64_t offset,
2429                      uint64_t size, uint64_t flags)
2430 {
2431         struct amdgpu_bo_va_mapping *mapping, *tmp;
2432         struct amdgpu_bo *bo = bo_va->base.bo;
2433         struct amdgpu_vm *vm = bo_va->base.vm;
2434         uint64_t eaddr;
2435
2436         /* validate the parameters */
2437         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2438             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2439                 return -EINVAL;
2440
2441         /* make sure object fit at this offset */
2442         eaddr = saddr + size - 1;
2443         if (saddr >= eaddr ||
2444             (bo && offset + size > amdgpu_bo_size(bo)))
2445                 return -EINVAL;
2446
2447         saddr /= AMDGPU_GPU_PAGE_SIZE;
2448         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2449
2450         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2451         if (tmp) {
2452                 /* bo and tmp overlap, invalid addr */
2453                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2454                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2455                         tmp->start, tmp->last + 1);
2456                 return -EINVAL;
2457         }
2458
2459         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2460         if (!mapping)
2461                 return -ENOMEM;
2462
2463         mapping->start = saddr;
2464         mapping->last = eaddr;
2465         mapping->offset = offset;
2466         mapping->flags = flags;
2467
2468         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2469
2470         return 0;
2471 }
2472
2473 /**
2474  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2475  *
2476  * @adev: amdgpu_device pointer
2477  * @bo_va: bo_va to store the address
2478  * @saddr: where to map the BO
2479  * @offset: requested offset in the BO
2480  * @size: BO size in bytes
2481  * @flags: attributes of pages (read/write/valid/etc.)
2482  *
2483  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2484  * mappings as we do so.
2485  *
2486  * Returns:
2487  * 0 for success, error for failure.
2488  *
2489  * Object has to be reserved and unreserved outside!
2490  */
2491 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2492                              struct amdgpu_bo_va *bo_va,
2493                              uint64_t saddr, uint64_t offset,
2494                              uint64_t size, uint64_t flags)
2495 {
2496         struct amdgpu_bo_va_mapping *mapping;
2497         struct amdgpu_bo *bo = bo_va->base.bo;
2498         uint64_t eaddr;
2499         int r;
2500
2501         /* validate the parameters */
2502         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2503             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2504                 return -EINVAL;
2505
2506         /* make sure object fit at this offset */
2507         eaddr = saddr + size - 1;
2508         if (saddr >= eaddr ||
2509             (bo && offset + size > amdgpu_bo_size(bo)))
2510                 return -EINVAL;
2511
2512         /* Allocate all the needed memory */
2513         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2514         if (!mapping)
2515                 return -ENOMEM;
2516
2517         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2518         if (r) {
2519                 kfree(mapping);
2520                 return r;
2521         }
2522
2523         saddr /= AMDGPU_GPU_PAGE_SIZE;
2524         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2525
2526         mapping->start = saddr;
2527         mapping->last = eaddr;
2528         mapping->offset = offset;
2529         mapping->flags = flags;
2530
2531         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2532
2533         return 0;
2534 }
2535
2536 /**
2537  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2538  *
2539  * @adev: amdgpu_device pointer
2540  * @bo_va: bo_va to remove the address from
2541  * @saddr: where to the BO is mapped
2542  *
2543  * Remove a mapping of the BO at the specefied addr from the VM.
2544  *
2545  * Returns:
2546  * 0 for success, error for failure.
2547  *
2548  * Object has to be reserved and unreserved outside!
2549  */
2550 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2551                        struct amdgpu_bo_va *bo_va,
2552                        uint64_t saddr)
2553 {
2554         struct amdgpu_bo_va_mapping *mapping;
2555         struct amdgpu_vm *vm = bo_va->base.vm;
2556         bool valid = true;
2557
2558         saddr /= AMDGPU_GPU_PAGE_SIZE;
2559
2560         list_for_each_entry(mapping, &bo_va->valids, list) {
2561                 if (mapping->start == saddr)
2562                         break;
2563         }
2564
2565         if (&mapping->list == &bo_va->valids) {
2566                 valid = false;
2567
2568                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2569                         if (mapping->start == saddr)
2570                                 break;
2571                 }
2572
2573                 if (&mapping->list == &bo_va->invalids)
2574                         return -ENOENT;
2575         }
2576
2577         list_del(&mapping->list);
2578         amdgpu_vm_it_remove(mapping, &vm->va);
2579         mapping->bo_va = NULL;
2580         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2581
2582         if (valid)
2583                 list_add(&mapping->list, &vm->freed);
2584         else
2585                 amdgpu_vm_free_mapping(adev, vm, mapping,
2586                                        bo_va->last_pt_update);
2587
2588         return 0;
2589 }
2590
2591 /**
2592  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2593  *
2594  * @adev: amdgpu_device pointer
2595  * @vm: VM structure to use
2596  * @saddr: start of the range
2597  * @size: size of the range
2598  *
2599  * Remove all mappings in a range, split them as appropriate.
2600  *
2601  * Returns:
2602  * 0 for success, error for failure.
2603  */
2604 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2605                                 struct amdgpu_vm *vm,
2606                                 uint64_t saddr, uint64_t size)
2607 {
2608         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2609         LIST_HEAD(removed);
2610         uint64_t eaddr;
2611
2612         eaddr = saddr + size - 1;
2613         saddr /= AMDGPU_GPU_PAGE_SIZE;
2614         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2615
2616         /* Allocate all the needed memory */
2617         before = kzalloc(sizeof(*before), GFP_KERNEL);
2618         if (!before)
2619                 return -ENOMEM;
2620         INIT_LIST_HEAD(&before->list);
2621
2622         after = kzalloc(sizeof(*after), GFP_KERNEL);
2623         if (!after) {
2624                 kfree(before);
2625                 return -ENOMEM;
2626         }
2627         INIT_LIST_HEAD(&after->list);
2628
2629         /* Now gather all removed mappings */
2630         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2631         while (tmp) {
2632                 /* Remember mapping split at the start */
2633                 if (tmp->start < saddr) {
2634                         before->start = tmp->start;
2635                         before->last = saddr - 1;
2636                         before->offset = tmp->offset;
2637                         before->flags = tmp->flags;
2638                         before->bo_va = tmp->bo_va;
2639                         list_add(&before->list, &tmp->bo_va->invalids);
2640                 }
2641
2642                 /* Remember mapping split at the end */
2643                 if (tmp->last > eaddr) {
2644                         after->start = eaddr + 1;
2645                         after->last = tmp->last;
2646                         after->offset = tmp->offset;
2647                         after->offset += after->start - tmp->start;
2648                         after->flags = tmp->flags;
2649                         after->bo_va = tmp->bo_va;
2650                         list_add(&after->list, &tmp->bo_va->invalids);
2651                 }
2652
2653                 list_del(&tmp->list);
2654                 list_add(&tmp->list, &removed);
2655
2656                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2657         }
2658
2659         /* And free them up */
2660         list_for_each_entry_safe(tmp, next, &removed, list) {
2661                 amdgpu_vm_it_remove(tmp, &vm->va);
2662                 list_del(&tmp->list);
2663
2664                 if (tmp->start < saddr)
2665                     tmp->start = saddr;
2666                 if (tmp->last > eaddr)
2667                     tmp->last = eaddr;
2668
2669                 tmp->bo_va = NULL;
2670                 list_add(&tmp->list, &vm->freed);
2671                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2672         }
2673
2674         /* Insert partial mapping before the range */
2675         if (!list_empty(&before->list)) {
2676                 amdgpu_vm_it_insert(before, &vm->va);
2677                 if (before->flags & AMDGPU_PTE_PRT)
2678                         amdgpu_vm_prt_get(adev);
2679         } else {
2680                 kfree(before);
2681         }
2682
2683         /* Insert partial mapping after the range */
2684         if (!list_empty(&after->list)) {
2685                 amdgpu_vm_it_insert(after, &vm->va);
2686                 if (after->flags & AMDGPU_PTE_PRT)
2687                         amdgpu_vm_prt_get(adev);
2688         } else {
2689                 kfree(after);
2690         }
2691
2692         return 0;
2693 }
2694
2695 /**
2696  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2697  *
2698  * @vm: the requested VM
2699  * @addr: the address
2700  *
2701  * Find a mapping by it's address.
2702  *
2703  * Returns:
2704  * The amdgpu_bo_va_mapping matching for addr or NULL
2705  *
2706  */
2707 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2708                                                          uint64_t addr)
2709 {
2710         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2711 }
2712
2713 /**
2714  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2715  *
2716  * @vm: the requested vm
2717  * @ticket: CS ticket
2718  *
2719  * Trace all mappings of BOs reserved during a command submission.
2720  */
2721 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2722 {
2723         struct amdgpu_bo_va_mapping *mapping;
2724
2725         if (!trace_amdgpu_vm_bo_cs_enabled())
2726                 return;
2727
2728         for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2729              mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2730                 if (mapping->bo_va && mapping->bo_va->base.bo) {
2731                         struct amdgpu_bo *bo;
2732
2733                         bo = mapping->bo_va->base.bo;
2734                         if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2735                                 continue;
2736                 }
2737
2738                 trace_amdgpu_vm_bo_cs(mapping);
2739         }
2740 }
2741
2742 /**
2743  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2744  *
2745  * @adev: amdgpu_device pointer
2746  * @bo_va: requested bo_va
2747  *
2748  * Remove @bo_va->bo from the requested vm.
2749  *
2750  * Object have to be reserved!
2751  */
2752 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2753                       struct amdgpu_bo_va *bo_va)
2754 {
2755         struct amdgpu_bo_va_mapping *mapping, *next;
2756         struct amdgpu_bo *bo = bo_va->base.bo;
2757         struct amdgpu_vm *vm = bo_va->base.vm;
2758         struct amdgpu_vm_bo_base **base;
2759
2760         if (bo) {
2761                 if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2762                         vm->bulk_moveable = false;
2763
2764                 for (base = &bo_va->base.bo->vm_bo; *base;
2765                      base = &(*base)->next) {
2766                         if (*base != &bo_va->base)
2767                                 continue;
2768
2769                         *base = bo_va->base.next;
2770                         break;
2771                 }
2772         }
2773
2774         spin_lock(&vm->invalidated_lock);
2775         list_del(&bo_va->base.vm_status);
2776         spin_unlock(&vm->invalidated_lock);
2777
2778         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2779                 list_del(&mapping->list);
2780                 amdgpu_vm_it_remove(mapping, &vm->va);
2781                 mapping->bo_va = NULL;
2782                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2783                 list_add(&mapping->list, &vm->freed);
2784         }
2785         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2786                 list_del(&mapping->list);
2787                 amdgpu_vm_it_remove(mapping, &vm->va);
2788                 amdgpu_vm_free_mapping(adev, vm, mapping,
2789                                        bo_va->last_pt_update);
2790         }
2791
2792         dma_fence_put(bo_va->last_pt_update);
2793         kfree(bo_va);
2794 }
2795
2796 /**
2797  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2798  *
2799  * @adev: amdgpu_device pointer
2800  * @bo: amdgpu buffer object
2801  * @evicted: is the BO evicted
2802  *
2803  * Mark @bo as invalid.
2804  */
2805 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2806                              struct amdgpu_bo *bo, bool evicted)
2807 {
2808         struct amdgpu_vm_bo_base *bo_base;
2809
2810         /* shadow bo doesn't have bo base, its validation needs its parent */
2811         if (bo->parent && bo->parent->shadow == bo)
2812                 bo = bo->parent;
2813
2814         for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2815                 struct amdgpu_vm *vm = bo_base->vm;
2816
2817                 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2818                         amdgpu_vm_bo_evicted(bo_base);
2819                         continue;
2820                 }
2821
2822                 if (bo_base->moved)
2823                         continue;
2824                 bo_base->moved = true;
2825
2826                 if (bo->tbo.type == ttm_bo_type_kernel)
2827                         amdgpu_vm_bo_relocated(bo_base);
2828                 else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2829                         amdgpu_vm_bo_moved(bo_base);
2830                 else
2831                         amdgpu_vm_bo_invalidated(bo_base);
2832         }
2833 }
2834
2835 /**
2836  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2837  *
2838  * @vm_size: VM size
2839  *
2840  * Returns:
2841  * VM page table as power of two
2842  */
2843 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2844 {
2845         /* Total bits covered by PD + PTs */
2846         unsigned bits = ilog2(vm_size) + 18;
2847
2848         /* Make sure the PD is 4K in size up to 8GB address space.
2849            Above that split equal between PD and PTs */
2850         if (vm_size <= 8)
2851                 return (bits - 9);
2852         else
2853                 return ((bits + 3) / 2);
2854 }
2855
2856 /**
2857  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2858  *
2859  * @adev: amdgpu_device pointer
2860  * @min_vm_size: the minimum vm size in GB if it's set auto
2861  * @fragment_size_default: Default PTE fragment size
2862  * @max_level: max VMPT level
2863  * @max_bits: max address space size in bits
2864  *
2865  */
2866 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2867                            uint32_t fragment_size_default, unsigned max_level,
2868                            unsigned max_bits)
2869 {
2870         unsigned int max_size = 1 << (max_bits - 30);
2871         unsigned int vm_size;
2872         uint64_t tmp;
2873
2874         /* adjust vm size first */
2875         if (amdgpu_vm_size != -1) {
2876                 vm_size = amdgpu_vm_size;
2877                 if (vm_size > max_size) {
2878                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2879                                  amdgpu_vm_size, max_size);
2880                         vm_size = max_size;
2881                 }
2882         } else {
2883                 struct sysinfo si;
2884                 unsigned int phys_ram_gb;
2885
2886                 /* Optimal VM size depends on the amount of physical
2887                  * RAM available. Underlying requirements and
2888                  * assumptions:
2889                  *
2890                  *  - Need to map system memory and VRAM from all GPUs
2891                  *     - VRAM from other GPUs not known here
2892                  *     - Assume VRAM <= system memory
2893                  *  - On GFX8 and older, VM space can be segmented for
2894                  *    different MTYPEs
2895                  *  - Need to allow room for fragmentation, guard pages etc.
2896                  *
2897                  * This adds up to a rough guess of system memory x3.
2898                  * Round up to power of two to maximize the available
2899                  * VM size with the given page table size.
2900                  */
2901                 si_meminfo(&si);
2902                 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2903                                (1 << 30) - 1) >> 30;
2904                 vm_size = roundup_pow_of_two(
2905                         min(max(phys_ram_gb * 3, min_vm_size), max_size));
2906         }
2907
2908         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2909
2910         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2911         if (amdgpu_vm_block_size != -1)
2912                 tmp >>= amdgpu_vm_block_size - 9;
2913         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2914         adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2915         switch (adev->vm_manager.num_level) {
2916         case 3:
2917                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2918                 break;
2919         case 2:
2920                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2921                 break;
2922         case 1:
2923                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2924                 break;
2925         default:
2926                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2927         }
2928         /* block size depends on vm size and hw setup*/
2929         if (amdgpu_vm_block_size != -1)
2930                 adev->vm_manager.block_size =
2931                         min((unsigned)amdgpu_vm_block_size, max_bits
2932                             - AMDGPU_GPU_PAGE_SHIFT
2933                             - 9 * adev->vm_manager.num_level);
2934         else if (adev->vm_manager.num_level > 1)
2935                 adev->vm_manager.block_size = 9;
2936         else
2937                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2938
2939         if (amdgpu_vm_fragment_size == -1)
2940                 adev->vm_manager.fragment_size = fragment_size_default;
2941         else
2942                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2943
2944         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2945                  vm_size, adev->vm_manager.num_level + 1,
2946                  adev->vm_manager.block_size,
2947                  adev->vm_manager.fragment_size);
2948 }
2949
2950 static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
2951 {
2952         struct amdgpu_retryfault_hashtable *fault_hash;
2953
2954         fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
2955         if (!fault_hash)
2956                 return fault_hash;
2957
2958         INIT_CHASH_TABLE(fault_hash->hash,
2959                         AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
2960         spin_lock_init(&fault_hash->lock);
2961         fault_hash->count = 0;
2962
2963         return fault_hash;
2964 }
2965
2966 /**
2967  * amdgpu_vm_init - initialize a vm instance
2968  *
2969  * @adev: amdgpu_device pointer
2970  * @vm: requested vm
2971  * @vm_context: Indicates if it GFX or Compute context
2972  * @pasid: Process address space identifier
2973  *
2974  * Init @vm fields.
2975  *
2976  * Returns:
2977  * 0 for success, error for failure.
2978  */
2979 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2980                    int vm_context, unsigned int pasid)
2981 {
2982         struct amdgpu_bo_param bp;
2983         struct amdgpu_bo *root;
2984         int r, i;
2985
2986         vm->va = RB_ROOT_CACHED;
2987         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2988                 vm->reserved_vmid[i] = NULL;
2989         INIT_LIST_HEAD(&vm->evicted);
2990         INIT_LIST_HEAD(&vm->relocated);
2991         INIT_LIST_HEAD(&vm->moved);
2992         INIT_LIST_HEAD(&vm->idle);
2993         INIT_LIST_HEAD(&vm->invalidated);
2994         spin_lock_init(&vm->invalidated_lock);
2995         INIT_LIST_HEAD(&vm->freed);
2996
2997         /* create scheduler entity for page table updates */
2998         r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
2999                                   adev->vm_manager.vm_pte_num_rqs, NULL);
3000         if (r)
3001                 return r;
3002
3003         vm->pte_support_ats = false;
3004
3005         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
3006                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3007                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
3008
3009                 if (adev->asic_type == CHIP_RAVEN)
3010                         vm->pte_support_ats = true;
3011         } else {
3012                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3013                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
3014         }
3015         DRM_DEBUG_DRIVER("VM update mode is %s\n",
3016                          vm->use_cpu_for_update ? "CPU" : "SDMA");
3017         WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3018                   "CPU update of VM recommended only for large BAR system\n");
3019         vm->last_update = NULL;
3020
3021         amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
3022         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
3023                 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
3024         r = amdgpu_bo_create(adev, &bp, &root);
3025         if (r)
3026                 goto error_free_sched_entity;
3027
3028         r = amdgpu_bo_reserve(root, true);
3029         if (r)
3030                 goto error_free_root;
3031
3032         r = reservation_object_reserve_shared(root->tbo.resv, 1);
3033         if (r)
3034                 goto error_unreserve;
3035
3036         amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
3037
3038         r = amdgpu_vm_clear_bo(adev, vm, root,
3039                                adev->vm_manager.root_level,
3040                                vm->pte_support_ats);
3041         if (r)
3042                 goto error_unreserve;
3043
3044         amdgpu_bo_unreserve(vm->root.base.bo);
3045
3046         if (pasid) {
3047                 unsigned long flags;
3048
3049                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3050                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
3051                               GFP_ATOMIC);
3052                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3053                 if (r < 0)
3054                         goto error_free_root;
3055
3056                 vm->pasid = pasid;
3057         }
3058
3059         vm->fault_hash = init_fault_hash();
3060         if (!vm->fault_hash) {
3061                 r = -ENOMEM;
3062                 goto error_free_root;
3063         }
3064
3065         INIT_KFIFO(vm->faults);
3066
3067         return 0;
3068
3069 error_unreserve:
3070         amdgpu_bo_unreserve(vm->root.base.bo);
3071
3072 error_free_root:
3073         amdgpu_bo_unref(&vm->root.base.bo->shadow);
3074         amdgpu_bo_unref(&vm->root.base.bo);
3075         vm->root.base.bo = NULL;
3076
3077 error_free_sched_entity:
3078         drm_sched_entity_destroy(&vm->entity);
3079
3080         return r;
3081 }
3082
3083 /**
3084  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3085  *
3086  * @adev: amdgpu_device pointer
3087  * @vm: requested vm
3088  *
3089  * This only works on GFX VMs that don't have any BOs added and no
3090  * page tables allocated yet.
3091  *
3092  * Changes the following VM parameters:
3093  * - use_cpu_for_update
3094  * - pte_supports_ats
3095  * - pasid (old PASID is released, because compute manages its own PASIDs)
3096  *
3097  * Reinitializes the page directory to reflect the changed ATS
3098  * setting.
3099  *
3100  * Returns:
3101  * 0 for success, -errno for errors.
3102  */
3103 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
3104 {
3105         bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
3106         int r;
3107
3108         r = amdgpu_bo_reserve(vm->root.base.bo, true);
3109         if (r)
3110                 return r;
3111
3112         /* Sanity checks */
3113         if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
3114                 r = -EINVAL;
3115                 goto unreserve_bo;
3116         }
3117
3118         if (pasid) {
3119                 unsigned long flags;
3120
3121                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3122                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
3123                               GFP_ATOMIC);
3124                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3125
3126                 if (r == -ENOSPC)
3127                         goto unreserve_bo;
3128                 r = 0;
3129         }
3130
3131         /* Check if PD needs to be reinitialized and do it before
3132          * changing any other state, in case it fails.
3133          */
3134         if (pte_support_ats != vm->pte_support_ats) {
3135                 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
3136                                adev->vm_manager.root_level,
3137                                pte_support_ats);
3138                 if (r)
3139                         goto free_idr;
3140         }
3141
3142         /* Update VM state */
3143         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3144                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
3145         vm->pte_support_ats = pte_support_ats;
3146         DRM_DEBUG_DRIVER("VM update mode is %s\n",
3147                          vm->use_cpu_for_update ? "CPU" : "SDMA");
3148         WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3149                   "CPU update of VM recommended only for large BAR system\n");