Merge branch 'drm-fixes-5.0' of git://people.freedesktop.org/~agd5f/linux into drm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63                      START, LAST, static, amdgpu_vm_it)
64
65 #undef START
66 #undef LAST
67
68 /**
69  * struct amdgpu_pte_update_params - Local structure
70  *
71  * Encapsulate some VM table update parameters to reduce
72  * the number of function parameters
73  *
74  */
75 struct amdgpu_pte_update_params {
76
77         /**
78          * @adev: amdgpu device we do this update for
79          */
80         struct amdgpu_device *adev;
81
82         /**
83          * @vm: optional amdgpu_vm we do this update for
84          */
85         struct amdgpu_vm *vm;
86
87         /**
88          * @src: address where to copy page table entries from
89          */
90         uint64_t src;
91
92         /**
93          * @ib: indirect buffer to fill with commands
94          */
95         struct amdgpu_ib *ib;
96
97         /**
98          * @func: Function which actually does the update
99          */
100         void (*func)(struct amdgpu_pte_update_params *params,
101                      struct amdgpu_bo *bo, uint64_t pe,
102                      uint64_t addr, unsigned count, uint32_t incr,
103                      uint64_t flags);
104         /**
105          * @pages_addr:
106          *
107          * DMA addresses to use for mapping, used during VM update by CPU
108          */
109         dma_addr_t *pages_addr;
110
111         /**
112          * @kptr:
113          *
114          * Kernel pointer of PD/PT BO that needs to be updated,
115          * used during VM update by CPU
116          */
117         void *kptr;
118 };
119
120 /**
121  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
122  */
123 struct amdgpu_prt_cb {
124
125         /**
126          * @adev: amdgpu device
127          */
128         struct amdgpu_device *adev;
129
130         /**
131          * @cb: callback
132          */
133         struct dma_fence_cb cb;
134 };
135
136 /**
137  * amdgpu_vm_level_shift - return the addr shift for each level
138  *
139  * @adev: amdgpu_device pointer
140  * @level: VMPT level
141  *
142  * Returns:
143  * The number of bits the pfn needs to be right shifted for a level.
144  */
145 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
146                                       unsigned level)
147 {
148         unsigned shift = 0xff;
149
150         switch (level) {
151         case AMDGPU_VM_PDB2:
152         case AMDGPU_VM_PDB1:
153         case AMDGPU_VM_PDB0:
154                 shift = 9 * (AMDGPU_VM_PDB0 - level) +
155                         adev->vm_manager.block_size;
156                 break;
157         case AMDGPU_VM_PTB:
158                 shift = 0;
159                 break;
160         default:
161                 dev_err(adev->dev, "the level%d isn't supported.\n", level);
162         }
163
164         return shift;
165 }
166
167 /**
168  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
169  *
170  * @adev: amdgpu_device pointer
171  * @level: VMPT level
172  *
173  * Returns:
174  * The number of entries in a page directory or page table.
175  */
176 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
177                                       unsigned level)
178 {
179         unsigned shift = amdgpu_vm_level_shift(adev,
180                                                adev->vm_manager.root_level);
181
182         if (level == adev->vm_manager.root_level)
183                 /* For the root directory */
184                 return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
185         else if (level != AMDGPU_VM_PTB)
186                 /* Everything in between */
187                 return 512;
188         else
189                 /* For the page tables on the leaves */
190                 return AMDGPU_VM_PTE_COUNT(adev);
191 }
192
193 /**
194  * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
195  *
196  * @adev: amdgpu_device pointer
197  * @level: VMPT level
198  *
199  * Returns:
200  * The mask to extract the entry number of a PD/PT from an address.
201  */
202 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
203                                        unsigned int level)
204 {
205         if (level <= adev->vm_manager.root_level)
206                 return 0xffffffff;
207         else if (level != AMDGPU_VM_PTB)
208                 return 0x1ff;
209         else
210                 return AMDGPU_VM_PTE_COUNT(adev) - 1;
211 }
212
213 /**
214  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
215  *
216  * @adev: amdgpu_device pointer
217  * @level: VMPT level
218  *
219  * Returns:
220  * The size of the BO for a page directory or page table in bytes.
221  */
222 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
223 {
224         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
225 }
226
227 /**
228  * amdgpu_vm_bo_evicted - vm_bo is evicted
229  *
230  * @vm_bo: vm_bo which is evicted
231  *
232  * State for PDs/PTs and per VM BOs which are not at the location they should
233  * be.
234  */
235 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
236 {
237         struct amdgpu_vm *vm = vm_bo->vm;
238         struct amdgpu_bo *bo = vm_bo->bo;
239
240         vm_bo->moved = true;
241         if (bo->tbo.type == ttm_bo_type_kernel)
242                 list_move(&vm_bo->vm_status, &vm->evicted);
243         else
244                 list_move_tail(&vm_bo->vm_status, &vm->evicted);
245 }
246
247 /**
248  * amdgpu_vm_bo_relocated - vm_bo is reloacted
249  *
250  * @vm_bo: vm_bo which is relocated
251  *
252  * State for PDs/PTs which needs to update their parent PD.
253  */
254 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
255 {
256         list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
257 }
258
259 /**
260  * amdgpu_vm_bo_moved - vm_bo is moved
261  *
262  * @vm_bo: vm_bo which is moved
263  *
264  * State for per VM BOs which are moved, but that change is not yet reflected
265  * in the page tables.
266  */
267 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
268 {
269         list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
270 }
271
272 /**
273  * amdgpu_vm_bo_idle - vm_bo is idle
274  *
275  * @vm_bo: vm_bo which is now idle
276  *
277  * State for PDs/PTs and per VM BOs which have gone through the state machine
278  * and are now idle.
279  */
280 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
281 {
282         list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
283         vm_bo->moved = false;
284 }
285
286 /**
287  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
288  *
289  * @vm_bo: vm_bo which is now invalidated
290  *
291  * State for normal BOs which are invalidated and that change not yet reflected
292  * in the PTs.
293  */
294 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
295 {
296         spin_lock(&vm_bo->vm->invalidated_lock);
297         list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
298         spin_unlock(&vm_bo->vm->invalidated_lock);
299 }
300
301 /**
302  * amdgpu_vm_bo_done - vm_bo is done
303  *
304  * @vm_bo: vm_bo which is now done
305  *
306  * State for normal BOs which are invalidated and that change has been updated
307  * in the PTs.
308  */
309 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
310 {
311         spin_lock(&vm_bo->vm->invalidated_lock);
312         list_del_init(&vm_bo->vm_status);
313         spin_unlock(&vm_bo->vm->invalidated_lock);
314 }
315
316 /**
317  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
318  *
319  * @base: base structure for tracking BO usage in a VM
320  * @vm: vm to which bo is to be added
321  * @bo: amdgpu buffer object
322  *
323  * Initialize a bo_va_base structure and add it to the appropriate lists
324  *
325  */
326 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
327                                    struct amdgpu_vm *vm,
328                                    struct amdgpu_bo *bo)
329 {
330         base->vm = vm;
331         base->bo = bo;
332         base->next = NULL;
333         INIT_LIST_HEAD(&base->vm_status);
334
335         if (!bo)
336                 return;
337         base->next = bo->vm_bo;
338         bo->vm_bo = base;
339
340         if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
341                 return;
342
343         vm->bulk_moveable = false;
344         if (bo->tbo.type == ttm_bo_type_kernel)
345                 amdgpu_vm_bo_relocated(base);
346         else
347                 amdgpu_vm_bo_idle(base);
348
349         if (bo->preferred_domains &
350             amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
351                 return;
352
353         /*
354          * we checked all the prerequisites, but it looks like this per vm bo
355          * is currently evicted. add the bo to the evicted list to make sure it
356          * is validated on next vm use to avoid fault.
357          * */
358         amdgpu_vm_bo_evicted(base);
359 }
360
361 /**
362  * amdgpu_vm_pt_parent - get the parent page directory
363  *
364  * @pt: child page table
365  *
366  * Helper to get the parent entry for the child page table. NULL if we are at
367  * the root page directory.
368  */
369 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
370 {
371         struct amdgpu_bo *parent = pt->base.bo->parent;
372
373         if (!parent)
374                 return NULL;
375
376         return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
377 }
378
379 /**
380  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
381  */
382 struct amdgpu_vm_pt_cursor {
383         uint64_t pfn;
384         struct amdgpu_vm_pt *parent;
385         struct amdgpu_vm_pt *entry;
386         unsigned level;
387 };
388
389 /**
390  * amdgpu_vm_pt_start - start PD/PT walk
391  *
392  * @adev: amdgpu_device pointer
393  * @vm: amdgpu_vm structure
394  * @start: start address of the walk
395  * @cursor: state to initialize
396  *
397  * Initialize a amdgpu_vm_pt_cursor to start a walk.
398  */
399 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
400                                struct amdgpu_vm *vm, uint64_t start,
401                                struct amdgpu_vm_pt_cursor *cursor)
402 {
403         cursor->pfn = start;
404         cursor->parent = NULL;
405         cursor->entry = &vm->root;
406         cursor->level = adev->vm_manager.root_level;
407 }
408
409 /**
410  * amdgpu_vm_pt_descendant - go to child node
411  *
412  * @adev: amdgpu_device pointer
413  * @cursor: current state
414  *
415  * Walk to the child node of the current node.
416  * Returns:
417  * True if the walk was possible, false otherwise.
418  */
419 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
420                                     struct amdgpu_vm_pt_cursor *cursor)
421 {
422         unsigned mask, shift, idx;
423
424         if (!cursor->entry->entries)
425                 return false;
426
427         BUG_ON(!cursor->entry->base.bo);
428         mask = amdgpu_vm_entries_mask(adev, cursor->level);
429         shift = amdgpu_vm_level_shift(adev, cursor->level);
430
431         ++cursor->level;
432         idx = (cursor->pfn >> shift) & mask;
433         cursor->parent = cursor->entry;
434         cursor->entry = &cursor->entry->entries[idx];
435         return true;
436 }
437
438 /**
439  * amdgpu_vm_pt_sibling - go to sibling node
440  *
441  * @adev: amdgpu_device pointer
442  * @cursor: current state
443  *
444  * Walk to the sibling node of the current node.
445  * Returns:
446  * True if the walk was possible, false otherwise.
447  */
448 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
449                                  struct amdgpu_vm_pt_cursor *cursor)
450 {
451         unsigned shift, num_entries;
452
453         /* Root doesn't have a sibling */
454         if (!cursor->parent)
455                 return false;
456
457         /* Go to our parents and see if we got a sibling */
458         shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
459         num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
460
461         if (cursor->entry == &cursor->parent->entries[num_entries - 1])
462                 return false;
463
464         cursor->pfn += 1ULL << shift;
465         cursor->pfn &= ~((1ULL << shift) - 1);
466         ++cursor->entry;
467         return true;
468 }
469
470 /**
471  * amdgpu_vm_pt_ancestor - go to parent node
472  *
473  * @cursor: current state
474  *
475  * Walk to the parent node of the current node.
476  * Returns:
477  * True if the walk was possible, false otherwise.
478  */
479 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
480 {
481         if (!cursor->parent)
482                 return false;
483
484         --cursor->level;
485         cursor->entry = cursor->parent;
486         cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
487         return true;
488 }
489
490 /**
491  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
492  *
493  * @adev: amdgpu_device pointer
494  * @cursor: current state
495  *
496  * Walk the PD/PT tree to the next node.
497  */
498 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
499                               struct amdgpu_vm_pt_cursor *cursor)
500 {
501         /* First try a newborn child */
502         if (amdgpu_vm_pt_descendant(adev, cursor))
503                 return;
504
505         /* If that didn't worked try to find a sibling */
506         while (!amdgpu_vm_pt_sibling(adev, cursor)) {
507                 /* No sibling, go to our parents and grandparents */
508                 if (!amdgpu_vm_pt_ancestor(cursor)) {
509                         cursor->pfn = ~0ll;
510                         return;
511                 }
512         }
513 }
514
515 /**
516  * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
517  *
518  * @adev: amdgpu_device pointer
519  * @vm: amdgpu_vm structure
520  * @start: start addr of the walk
521  * @cursor: state to initialize
522  *
523  * Start a walk and go directly to the leaf node.
524  */
525 static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
526                                     struct amdgpu_vm *vm, uint64_t start,
527                                     struct amdgpu_vm_pt_cursor *cursor)
528 {
529         amdgpu_vm_pt_start(adev, vm, start, cursor);
530         while (amdgpu_vm_pt_descendant(adev, cursor));
531 }
532
533 /**
534  * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
535  *
536  * @adev: amdgpu_device pointer
537  * @cursor: current state
538  *
539  * Walk the PD/PT tree to the next leaf node.
540  */
541 static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
542                                    struct amdgpu_vm_pt_cursor *cursor)
543 {
544         amdgpu_vm_pt_next(adev, cursor);
545         if (cursor->pfn != ~0ll)
546                 while (amdgpu_vm_pt_descendant(adev, cursor));
547 }
548
549 /**
550  * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
551  */
552 #define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor)                \
553         for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor));         \
554              (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
555
556 /**
557  * amdgpu_vm_pt_first_dfs - start a deep first search
558  *
559  * @adev: amdgpu_device structure
560  * @vm: amdgpu_vm structure
561  * @cursor: state to initialize
562  *
563  * Starts a deep first traversal of the PD/PT tree.
564  */
565 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
566                                    struct amdgpu_vm *vm,
567                                    struct amdgpu_vm_pt_cursor *cursor)
568 {
569         amdgpu_vm_pt_start(adev, vm, 0, cursor);
570         while (amdgpu_vm_pt_descendant(adev, cursor));
571 }
572
573 /**
574  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
575  *
576  * @adev: amdgpu_device structure
577  * @cursor: current state
578  *
579  * Move the cursor to the next node in a deep first search.
580  */
581 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
582                                   struct amdgpu_vm_pt_cursor *cursor)
583 {
584         if (!cursor->entry)
585                 return;
586
587         if (!cursor->parent)
588                 cursor->entry = NULL;
589         else if (amdgpu_vm_pt_sibling(adev, cursor))
590                 while (amdgpu_vm_pt_descendant(adev, cursor));
591         else
592                 amdgpu_vm_pt_ancestor(cursor);
593 }
594
595 /**
596  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
597  */
598 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)                 \
599         for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)),                   \
600              (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
601              (entry); (entry) = (cursor).entry,                                 \
602              amdgpu_vm_pt_next_dfs((adev), &(cursor)))
603
604 /**
605  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
606  *
607  * @vm: vm providing the BOs
608  * @validated: head of validation list
609  * @entry: entry to add
610  *
611  * Add the page directory to the list of BOs to
612  * validate for command submission.
613  */
614 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
615                          struct list_head *validated,
616                          struct amdgpu_bo_list_entry *entry)
617 {
618         entry->priority = 0;
619         entry->tv.bo = &vm->root.base.bo->tbo;
620         /* One for the VM updates, one for TTM and one for the CS job */
621         entry->tv.num_shared = 3;
622         entry->user_pages = NULL;
623         list_add(&entry->tv.head, validated);
624 }
625
626 /**
627  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
628  *
629  * @adev: amdgpu device pointer
630  * @vm: vm providing the BOs
631  *
632  * Move all BOs to the end of LRU and remember their positions to put them
633  * together.
634  */
635 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
636                                 struct amdgpu_vm *vm)
637 {
638         struct ttm_bo_global *glob = adev->mman.bdev.glob;
639         struct amdgpu_vm_bo_base *bo_base;
640
641         if (vm->bulk_moveable) {
642                 spin_lock(&glob->lru_lock);
643                 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
644                 spin_unlock(&glob->lru_lock);
645                 return;
646         }
647
648         memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
649
650         spin_lock(&glob->lru_lock);
651         list_for_each_entry(bo_base, &vm->idle, vm_status) {
652                 struct amdgpu_bo *bo = bo_base->bo;
653
654                 if (!bo->parent)
655                         continue;
656
657                 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
658                 if (bo->shadow)
659                         ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
660                                                 &vm->lru_bulk_move);
661         }
662         spin_unlock(&glob->lru_lock);
663
664         vm->bulk_moveable = true;
665 }
666
667 /**
668  * amdgpu_vm_validate_pt_bos - validate the page table BOs
669  *
670  * @adev: amdgpu device pointer
671  * @vm: vm providing the BOs
672  * @validate: callback to do the validation
673  * @param: parameter for the validation callback
674  *
675  * Validate the page table BOs on command submission if neccessary.
676  *
677  * Returns:
678  * Validation result.
679  */
680 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
681                               int (*validate)(void *p, struct amdgpu_bo *bo),
682                               void *param)
683 {
684         struct amdgpu_vm_bo_base *bo_base, *tmp;
685         int r = 0;
686
687         vm->bulk_moveable &= list_empty(&vm->evicted);
688
689         list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
690                 struct amdgpu_bo *bo = bo_base->bo;
691
692                 r = validate(param, bo);
693                 if (r)
694                         break;
695
696                 if (bo->tbo.type != ttm_bo_type_kernel) {
697                         amdgpu_vm_bo_moved(bo_base);
698                 } else {
699                         if (vm->use_cpu_for_update)
700                                 r = amdgpu_bo_kmap(bo, NULL);
701                         else
702                                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
703                         if (r)
704                                 break;
705                         if (bo->shadow) {
706                                 r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
707                                 if (r)
708                                         break;
709                         }
710                         amdgpu_vm_bo_relocated(bo_base);
711                 }
712         }
713
714         return r;
715 }
716
717 /**
718  * amdgpu_vm_ready - check VM is ready for updates
719  *
720  * @vm: VM to check
721  *
722  * Check if all VM PDs/PTs are ready for updates
723  *
724  * Returns:
725  * True if eviction list is empty.
726  */
727 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
728 {
729         return list_empty(&vm->evicted);
730 }
731
732 /**
733  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
734  *
735  * @adev: amdgpu_device pointer
736  * @vm: VM to clear BO from
737  * @bo: BO to clear
738  * @level: level this BO is at
739  * @pte_support_ats: indicate ATS support from PTE
740  *
741  * Root PD needs to be reserved when calling this.
742  *
743  * Returns:
744  * 0 on success, errno otherwise.
745  */
746 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
747                               struct amdgpu_vm *vm, struct amdgpu_bo *bo,
748                               unsigned level, bool pte_support_ats)
749 {
750         struct ttm_operation_ctx ctx = { true, false };
751         struct dma_fence *fence = NULL;
752         unsigned entries, ats_entries;
753         struct amdgpu_ring *ring;
754         struct amdgpu_job *job;
755         uint64_t addr;
756         int r;
757
758         entries = amdgpu_bo_size(bo) / 8;
759
760         if (pte_support_ats) {
761                 if (level == adev->vm_manager.root_level) {
762                         ats_entries = amdgpu_vm_level_shift(adev, level);
763                         ats_entries += AMDGPU_GPU_PAGE_SHIFT;
764                         ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
765                         ats_entries = min(ats_entries, entries);
766                         entries -= ats_entries;
767                 } else {
768                         ats_entries = entries;
769                         entries = 0;
770                 }
771         } else {
772                 ats_entries = 0;
773         }
774
775         ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
776
777         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
778         if (r)
779                 goto error;
780
781         r = amdgpu_ttm_alloc_gart(&bo->tbo);
782         if (r)
783                 return r;
784
785         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
786         if (r)
787                 goto error;
788
789         addr = amdgpu_bo_gpu_offset(bo);
790         if (ats_entries) {
791                 uint64_t ats_value;
792
793                 ats_value = AMDGPU_PTE_DEFAULT_ATC;
794                 if (level != AMDGPU_VM_PTB)
795                         ats_value |= AMDGPU_PDE_PTE;
796
797                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
798                                       ats_entries, 0, ats_value);
799                 addr += ats_entries * 8;
800         }
801
802         if (entries)
803                 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
804                                       entries, 0, 0);
805
806         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
807
808         WARN_ON(job->ibs[0].length_dw > 64);
809         r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
810                              AMDGPU_FENCE_OWNER_UNDEFINED, false);
811         if (r)
812                 goto error_free;
813
814         r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
815                               &fence);
816         if (r)
817                 goto error_free;
818
819         amdgpu_bo_fence(bo, fence, true);
820         dma_fence_put(fence);
821
822         if (bo->shadow)
823                 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
824                                           level, pte_support_ats);
825
826         return 0;
827
828 error_free:
829         amdgpu_job_free(job);
830
831 error:
832         return r;
833 }
834
835 /**
836  * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
837  *
838  * @adev: amdgpu_device pointer
839  * @vm: requesting vm
840  * @bp: resulting BO allocation parameters
841  */
842 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
843                                int level, struct amdgpu_bo_param *bp)
844 {
845         memset(bp, 0, sizeof(*bp));
846
847         bp->size = amdgpu_vm_bo_size(adev, level);
848         bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
849         bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
850         bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
851         bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
852                 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
853         if (vm->use_cpu_for_update)
854                 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
855         else if (!vm->root.base.bo || vm->root.base.bo->shadow)
856                 bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
857         bp->type = ttm_bo_type_kernel;
858         if (vm->root.base.bo)
859                 bp->resv = vm->root.base.bo->tbo.resv;
860 }
861
862 /**
863  * amdgpu_vm_alloc_pts - Allocate page tables.
864  *
865  * @adev: amdgpu_device pointer
866  * @vm: VM to allocate page tables for
867  * @saddr: Start address which needs to be allocated
868  * @size: Size from start address we need.
869  *
870  * Make sure the page directories and page tables are allocated
871  *
872  * Returns:
873  * 0 on success, errno otherwise.
874  */
875 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
876                         struct amdgpu_vm *vm,
877                         uint64_t saddr, uint64_t size)
878 {
879         struct amdgpu_vm_pt_cursor cursor;
880         struct amdgpu_bo *pt;
881         bool ats = false;
882         uint64_t eaddr;
883         int r;
884
885         /* validate the parameters */
886         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
887                 return -EINVAL;
888
889         eaddr = saddr + size - 1;
890
891         if (vm->pte_support_ats)
892                 ats = saddr < AMDGPU_GMC_HOLE_START;
893
894         saddr /= AMDGPU_GPU_PAGE_SIZE;
895         eaddr /= AMDGPU_GPU_PAGE_SIZE;
896
897         if (eaddr >= adev->vm_manager.max_pfn) {
898                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
899                         eaddr, adev->vm_manager.max_pfn);
900                 return -EINVAL;
901         }
902
903         for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
904                 struct amdgpu_vm_pt *entry = cursor.entry;
905                 struct amdgpu_bo_param bp;
906
907                 if (cursor.level < AMDGPU_VM_PTB) {
908                         unsigned num_entries;
909
910                         num_entries = amdgpu_vm_num_entries(adev, cursor.level);
911                         entry->entries = kvmalloc_array(num_entries,
912                                                         sizeof(*entry->entries),
913                                                         GFP_KERNEL |
914                                                         __GFP_ZERO);
915                         if (!entry->entries)
916                                 return -ENOMEM;
917                 }
918
919
920                 if (entry->base.bo)
921                         continue;
922
923                 amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
924
925                 r = amdgpu_bo_create(adev, &bp, &pt);
926                 if (r)
927                         return r;
928
929                 r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
930                 if (r)
931                         goto error_free_pt;
932
933                 if (vm->use_cpu_for_update) {
934                         r = amdgpu_bo_kmap(pt, NULL);
935                         if (r)
936                                 goto error_free_pt;
937                 }
938
939                 /* Keep a reference to the root directory to avoid
940                 * freeing them up in the wrong order.
941                 */
942                 pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
943
944                 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
945         }
946
947         return 0;
948
949 error_free_pt:
950         amdgpu_bo_unref(&pt->shadow);
951         amdgpu_bo_unref(&pt);
952         return r;
953 }
954
955 /**
956  * amdgpu_vm_free_pts - free PD/PT levels
957  *
958  * @adev: amdgpu device structure
959  * @vm: amdgpu vm structure
960  *
961  * Free the page directory or page table level and all sub levels.
962  */
963 static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
964                                struct amdgpu_vm *vm)
965 {
966         struct amdgpu_vm_pt_cursor cursor;
967         struct amdgpu_vm_pt *entry;
968
969         for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
970
971                 if (entry->base.bo) {
972                         entry->base.bo->vm_bo = NULL;
973                         list_del(&entry->base.vm_status);
974                         amdgpu_bo_unref(&entry->base.bo->shadow);
975                         amdgpu_bo_unref(&entry->base.bo);
976                 }
977                 kvfree(entry->entries);
978         }
979
980         BUG_ON(vm->root.base.bo);
981 }
982
983 /**
984  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
985  *
986  * @adev: amdgpu_device pointer
987  */
988 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
989 {
990         const struct amdgpu_ip_block *ip_block;
991         bool has_compute_vm_bug;
992         struct amdgpu_ring *ring;
993         int i;
994
995         has_compute_vm_bug = false;
996
997         ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
998         if (ip_block) {
999                 /* Compute has a VM bug for GFX version < 7.
1000                    Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1001                 if (ip_block->version->major <= 7)
1002                         has_compute_vm_bug = true;
1003                 else if (ip_block->version->major == 8)
1004                         if (adev->gfx.mec_fw_version < 673)
1005                                 has_compute_vm_bug = true;
1006         }
1007
1008         for (i = 0; i < adev->num_rings; i++) {
1009                 ring = adev->rings[i];
1010                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1011                         /* only compute rings */
1012                         ring->has_compute_vm_bug = has_compute_vm_bug;
1013                 else
1014                         ring->has_compute_vm_bug = false;
1015         }
1016 }
1017
1018 /**
1019  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1020  *
1021  * @ring: ring on which the job will be submitted
1022  * @job: job to submit
1023  *
1024  * Returns:
1025  * True if sync is needed.
1026  */
1027 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1028                                   struct amdgpu_job *job)
1029 {
1030         struct amdgpu_device *adev = ring->adev;
1031         unsigned vmhub = ring->funcs->vmhub;
1032         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1033         struct amdgpu_vmid *id;
1034         bool gds_switch_needed;
1035         bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1036
1037         if (job->vmid == 0)
1038                 return false;
1039         id = &id_mgr->ids[job->vmid];
1040         gds_switch_needed = ring->funcs->emit_gds_switch && (
1041                 id->gds_base != job->gds_base ||
1042                 id->gds_size != job->gds_size ||
1043                 id->gws_base != job->gws_base ||
1044                 id->gws_size != job->gws_size ||
1045                 id->oa_base != job->oa_base ||
1046                 id->oa_size != job->oa_size);
1047
1048         if (amdgpu_vmid_had_gpu_reset(adev, id))
1049                 return true;
1050
1051         return vm_flush_needed || gds_switch_needed;
1052 }
1053
1054 /**
1055  * amdgpu_vm_flush - hardware flush the vm
1056  *
1057  * @ring: ring to use for flush
1058  * @job:  related job
1059  * @need_pipe_sync: is pipe sync needed
1060  *
1061  * Emit a VM flush when it is necessary.
1062  *
1063  * Returns:
1064  * 0 on success, errno otherwise.
1065  */
1066 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
1067 {
1068         struct amdgpu_device *adev = ring->adev;
1069         unsigned vmhub = ring->funcs->vmhub;
1070         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1071         struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1072         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1073                 id->gds_base != job->gds_base ||
1074                 id->gds_size != job->gds_size ||
1075                 id->gws_base != job->gws_base ||
1076                 id->gws_size != job->gws_size ||
1077                 id->oa_base != job->oa_base ||
1078                 id->oa_size != job->oa_size);
1079         bool vm_flush_needed = job->vm_needs_flush;
1080         bool pasid_mapping_needed = id->pasid != job->pasid ||
1081                 !id->pasid_mapping ||
1082                 !dma_fence_is_signaled(id->pasid_mapping);
1083         struct dma_fence *fence = NULL;
1084         unsigned patch_offset = 0;
1085         int r;
1086
1087         if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1088                 gds_switch_needed = true;
1089                 vm_flush_needed = true;
1090                 pasid_mapping_needed = true;
1091         }
1092
1093         gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1094         vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
1095                         job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1096         pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1097                 ring->funcs->emit_wreg;
1098
1099         if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1100                 return 0;
1101
1102         if (ring->funcs->init_cond_exec)
1103                 patch_offset = amdgpu_ring_init_cond_exec(ring);
1104
1105         if (need_pipe_sync)
1106                 amdgpu_ring_emit_pipeline_sync(ring);
1107
1108         if (vm_flush_needed) {
1109                 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1110                 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1111         }
1112
1113         if (pasid_mapping_needed)
1114                 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1115
1116         if (vm_flush_needed || pasid_mapping_needed) {
1117                 r = amdgpu_fence_emit(ring, &fence, 0);
1118                 if (r)
1119                         return r;
1120         }
1121
1122         if (vm_flush_needed) {
1123                 mutex_lock(&id_mgr->lock);
1124                 dma_fence_put(id->last_flush);
1125                 id->last_flush = dma_fence_get(fence);
1126                 id->current_gpu_reset_count =
1127                         atomic_read(&adev->gpu_reset_counter);
1128                 mutex_unlock(&id_mgr->lock);
1129         }
1130
1131         if (pasid_mapping_needed) {
1132                 id->pasid = job->pasid;
1133                 dma_fence_put(id->pasid_mapping);
1134                 id->pasid_mapping = dma_fence_get(fence);
1135         }
1136         dma_fence_put(fence);
1137
1138         if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1139                 id->gds_base = job->gds_base;
1140                 id->gds_size = job->gds_size;
1141                 id->gws_base = job->gws_base;
1142                 id->gws_size = job->gws_size;
1143                 id->oa_base = job->oa_base;
1144                 id->oa_size = job->oa_size;
1145                 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1146                                             job->gds_size, job->gws_base,
1147                                             job->gws_size, job->oa_base,
1148                                             job->oa_size);
1149         }
1150
1151         if (ring->funcs->patch_cond_exec)
1152                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
1153
1154         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1155         if (ring->funcs->emit_switch_buffer) {
1156                 amdgpu_ring_emit_switch_buffer(ring);
1157                 amdgpu_ring_emit_switch_buffer(ring);
1158         }
1159         return 0;
1160 }
1161
1162 /**
1163  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1164  *
1165  * @vm: requested vm
1166  * @bo: requested buffer object
1167  *
1168  * Find @bo inside the requested vm.
1169  * Search inside the @bos vm list for the requested vm
1170  * Returns the found bo_va or NULL if none is found
1171  *
1172  * Object has to be reserved!
1173  *
1174  * Returns:
1175  * Found bo_va or NULL.
1176  */
1177 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1178                                        struct amdgpu_bo *bo)
1179 {
1180         struct amdgpu_vm_bo_base *base;
1181
1182         for (base = bo->vm_bo; base; base = base->next) {
1183                 if (base->vm != vm)
1184                         continue;
1185
1186                 return container_of(base, struct amdgpu_bo_va, base);
1187         }
1188         return NULL;
1189 }
1190
1191 /**
1192  * amdgpu_vm_do_set_ptes - helper to call the right asic function
1193  *
1194  * @params: see amdgpu_pte_update_params definition
1195  * @bo: PD/PT to update
1196  * @pe: addr of the page entry
1197  * @addr: dst addr to write into pe
1198  * @count: number of page entries to update
1199  * @incr: increase next addr by incr bytes
1200  * @flags: hw access flags
1201  *
1202  * Traces the parameters and calls the right asic functions
1203  * to setup the page table using the DMA.
1204  */
1205 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
1206                                   struct amdgpu_bo *bo,
1207                                   uint64_t pe, uint64_t addr,
1208                                   unsigned count, uint32_t incr,
1209                                   uint64_t flags)
1210 {
1211         pe += amdgpu_bo_gpu_offset(bo);
1212         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
1213
1214         if (count < 3) {
1215                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
1216                                     addr | flags, count, incr);
1217
1218         } else {
1219                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
1220                                       count, incr, flags);
1221         }
1222 }
1223
1224 /**
1225  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
1226  *
1227  * @params: see amdgpu_pte_update_params definition
1228  * @bo: PD/PT to update
1229  * @pe: addr of the page entry
1230  * @addr: dst addr to write into pe
1231  * @count: number of page entries to update
1232  * @incr: increase next addr by incr bytes
1233  * @flags: hw access flags
1234  *
1235  * Traces the parameters and calls the DMA function to copy the PTEs.
1236  */
1237 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
1238                                    struct amdgpu_bo *bo,
1239                                    uint64_t pe, uint64_t addr,
1240                                    unsigned count, uint32_t incr,
1241                                    uint64_t flags)
1242 {
1243         uint64_t src = (params->src + (addr >> 12) * 8);
1244
1245         pe += amdgpu_bo_gpu_offset(bo);
1246         trace_amdgpu_vm_copy_ptes(pe, src, count);
1247
1248         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
1249 }
1250
1251 /**
1252  * amdgpu_vm_map_gart - Resolve gart mapping of addr
1253  *
1254  * @pages_addr: optional DMA address to use for lookup
1255  * @addr: the unmapped addr
1256  *
1257  * Look up the physical address of the page that the pte resolves
1258  * to.
1259  *
1260  * Returns:
1261  * The pointer for the page table entry.
1262  */
1263 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1264 {
1265         uint64_t result;
1266
1267         /* page table offset */
1268         result = pages_addr[addr >> PAGE_SHIFT];
1269
1270         /* in case cpu page size != gpu page size*/
1271         result |= addr & (~PAGE_MASK);
1272
1273         result &= 0xFFFFFFFFFFFFF000ULL;
1274
1275         return result;
1276 }
1277
1278 /**
1279  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
1280  *
1281  * @params: see amdgpu_pte_update_params definition
1282  * @bo: PD/PT to update
1283  * @pe: kmap addr of the page entry
1284  * @addr: dst addr to write into pe
1285  * @count: number of page entries to update
1286  * @incr: increase next addr by incr bytes
1287  * @flags: hw access flags
1288  *
1289  * Write count number of PT/PD entries directly.
1290  */
1291 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
1292                                    struct amdgpu_bo *bo,
1293                                    uint64_t pe, uint64_t addr,
1294                                    unsigned count, uint32_t incr,
1295                                    uint64_t flags)
1296 {
1297         unsigned int i;
1298         uint64_t value;
1299
1300         pe += (unsigned long)amdgpu_bo_kptr(bo);
1301
1302         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
1303
1304         for (i = 0; i < count; i++) {
1305                 value = params->pages_addr ?
1306                         amdgpu_vm_map_gart(params->pages_addr, addr) :
1307                         addr;
1308                 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
1309                                        i, value, flags);
1310                 addr += incr;
1311         }
1312 }
1313
1314
1315 /**
1316  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
1317  *
1318  * @adev: amdgpu_device pointer
1319  * @vm: related vm
1320  * @owner: fence owner
1321  *
1322  * Returns:
1323  * 0 on success, errno otherwise.
1324  */
1325 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1326                              void *owner)
1327 {
1328         struct amdgpu_sync sync;
1329         int r;
1330
1331         amdgpu_sync_create(&sync);
1332         amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
1333         r = amdgpu_sync_wait(&sync, true);
1334         amdgpu_sync_free(&sync);
1335
1336         return r;
1337 }
1338
1339 /**
1340  * amdgpu_vm_update_func - helper to call update function
1341  *
1342  * Calls the update function for both the given BO as well as its shadow.
1343  */
1344 static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
1345                                   struct amdgpu_bo *bo,
1346                                   uint64_t pe, uint64_t addr,
1347                                   unsigned count, uint32_t incr,
1348                                   uint64_t flags)
1349 {
1350         if (bo->shadow)
1351                 params->func(params, bo->shadow, pe, addr, count, incr, flags);
1352         params->func(params, bo, pe, addr, count, incr, flags);
1353 }
1354
1355 /*
1356  * amdgpu_vm_update_pde - update a single level in the hierarchy
1357  *
1358  * @param: parameters for the update
1359  * @vm: requested vm
1360  * @parent: parent directory
1361  * @entry: entry to update
1362  *
1363  * Makes sure the requested entry in parent is up to date.
1364  */
1365 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
1366                                  struct amdgpu_vm *vm,
1367                                  struct amdgpu_vm_pt *parent,
1368                                  struct amdgpu_vm_pt *entry)
1369 {
1370         struct amdgpu_bo *bo = parent->base.bo, *pbo;
1371         uint64_t pde, pt, flags;
1372         unsigned level;
1373
1374         /* Don't update huge pages here */
1375         if (entry->huge)
1376                 return;
1377
1378         for (level = 0, pbo = bo->parent; pbo; ++level)
1379                 pbo = pbo->parent;
1380
1381         level += params->adev->vm_manager.root_level;
1382         amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1383         pde = (entry - parent->entries) * 8;
1384         amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
1385 }
1386
1387 /*
1388  * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1389  *
1390  * @adev: amdgpu_device pointer
1391  * @vm: related vm
1392  *
1393  * Mark all PD level as invalid after an error.
1394  */
1395 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1396                                      struct amdgpu_vm *vm)
1397 {
1398         struct amdgpu_vm_pt_cursor cursor;
1399         struct amdgpu_vm_pt *entry;
1400
1401         for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
1402                 if (entry->base.bo && !entry->base.moved)
1403                         amdgpu_vm_bo_relocated(&entry->base);
1404 }
1405
1406 /*
1407  * amdgpu_vm_update_directories - make sure that all directories are valid
1408  *
1409  * @adev: amdgpu_device pointer
1410  * @vm: requested vm
1411  *
1412  * Makes sure all directories are up to date.
1413  *
1414  * Returns:
1415  * 0 for success, error for failure.
1416  */
1417 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1418                                  struct amdgpu_vm *vm)
1419 {
1420         struct amdgpu_pte_update_params params;
1421         struct amdgpu_job *job;
1422         unsigned ndw = 0;
1423         int r = 0;
1424
1425         if (list_empty(&vm->relocated))
1426                 return 0;
1427
1428 restart:
1429         memset(&params, 0, sizeof(params));
1430         params.adev = adev;
1431
1432         if (vm->use_cpu_for_update) {
1433                 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1434                 if (unlikely(r))
1435                         return r;
1436
1437                 params.func = amdgpu_vm_cpu_set_ptes;
1438         } else {
1439                 ndw = 512 * 8;
1440                 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1441                 if (r)
1442                         return r;
1443
1444                 params.ib = &job->ibs[0];
1445                 params.func = amdgpu_vm_do_set_ptes;
1446         }
1447
1448         while (!list_empty(&vm->relocated)) {
1449                 struct amdgpu_vm_pt *pt, *entry;
1450
1451                 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1452                                          base.vm_status);
1453                 amdgpu_vm_bo_idle(&entry->base);
1454
1455                 pt = amdgpu_vm_pt_parent(entry);
1456                 if (!pt)
1457                         continue;
1458
1459                 amdgpu_vm_update_pde(&params, vm, pt, entry);
1460
1461                 if (!vm->use_cpu_for_update &&
1462                     (ndw - params.ib->length_dw) < 32)
1463                         break;
1464         }
1465
1466         if (vm->use_cpu_for_update) {
1467                 /* Flush HDP */
1468                 mb();
1469                 amdgpu_asic_flush_hdp(adev, NULL);
1470         } else if (params.ib->length_dw == 0) {
1471                 amdgpu_job_free(job);
1472         } else {
1473                 struct amdgpu_bo *root = vm->root.base.bo;
1474                 struct amdgpu_ring *ring;
1475                 struct dma_fence *fence;
1476
1477                 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1478                                     sched);
1479
1480                 amdgpu_ring_pad_ib(ring, params.ib);
1481                 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1482                                  AMDGPU_FENCE_OWNER_VM, false);
1483                 WARN_ON(params.ib->length_dw > ndw);
1484                 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1485                                       &fence);
1486                 if (r)
1487                         goto error;
1488
1489                 amdgpu_bo_fence(root, fence, true);
1490                 dma_fence_put(vm->last_update);
1491                 vm->last_update = fence;
1492         }
1493
1494         if (!list_empty(&vm->relocated))
1495                 goto restart;
1496
1497         return 0;
1498
1499 error:
1500         amdgpu_vm_invalidate_pds(adev, vm);
1501         amdgpu_job_free(job);
1502         return r;
1503 }
1504
1505 /**
1506  * amdgpu_vm_update_huge - figure out parameters for PTE updates
1507  *
1508  * Make sure to set the right flags for the PTEs at the desired level.
1509  */
1510 static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params *params,
1511                                   struct amdgpu_bo *bo, unsigned level,
1512                                   uint64_t pe, uint64_t addr,
1513                                   unsigned count, uint32_t incr,
1514                                   uint64_t flags)
1515
1516 {
1517         if (level != AMDGPU_VM_PTB) {
1518                 flags |= AMDGPU_PDE_PTE;
1519                 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1520         }
1521
1522         amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
1523 }
1524
1525 /**
1526  * amdgpu_vm_fragment - get fragment for PTEs
1527  *
1528  * @params: see amdgpu_pte_update_params definition
1529  * @start: first PTE to handle
1530  * @end: last PTE to handle
1531  * @flags: hw mapping flags
1532  * @frag: resulting fragment size
1533  * @frag_end: end of this fragment
1534  *
1535  * Returns the first possible fragment for the start and end address.
1536  */
1537 static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
1538                                uint64_t start, uint64_t end, uint64_t flags,
1539                                unsigned int *frag, uint64_t *frag_end)
1540 {
1541         /**
1542          * The MC L1 TLB supports variable sized pages, based on a fragment
1543          * field in the PTE. When this field is set to a non-zero value, page
1544          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1545          * flags are considered valid for all PTEs within the fragment range
1546          * and corresponding mappings are assumed to be physically contiguous.
1547          *
1548          * The L1 TLB can store a single PTE for the whole fragment,
1549          * significantly increasing the space available for translation
1550          * caching. This leads to large improvements in throughput when the
1551          * TLB is under pressure.
1552          *
1553          * The L2 TLB distributes small and large fragments into two
1554          * asymmetric partitions. The large fragment cache is significantly
1555          * larger. Thus, we try to use large fragments wherever possible.
1556          * Userspace can support this by aligning virtual base address and
1557          * allocation size to the fragment size.
1558          *
1559          * Starting with Vega10 the fragment size only controls the L1. The L2
1560          * is now directly feed with small/huge/giant pages from the walker.
1561          */
1562         unsigned max_frag;
1563
1564         if (params->adev->asic_type < CHIP_VEGA10)
1565                 max_frag = params->adev->vm_manager.fragment_size;
1566         else
1567                 max_frag = 31;
1568
1569         /* system pages are non continuously */
1570         if (params->src) {
1571                 *frag = 0;
1572                 *frag_end = end;
1573                 return;
1574         }
1575
1576         /* This intentionally wraps around if no bit is set */
1577         *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1578         if (*frag >= max_frag) {
1579                 *frag = max_frag;
1580                 *frag_end = end & ~((1ULL << max_frag) - 1);
1581         } else {
1582                 *frag_end = start + (1 << *frag);
1583         }
1584 }
1585
1586 /**
1587  * amdgpu_vm_update_ptes - make sure that page tables are valid
1588  *
1589  * @params: see amdgpu_pte_update_params definition
1590  * @start: start of GPU address range
1591  * @end: end of GPU address range
1592  * @dst: destination address to map to, the next dst inside the function
1593  * @flags: mapping flags
1594  *
1595  * Update the page tables in the range @start - @end.
1596  *
1597  * Returns:
1598  * 0 for success, -EINVAL for failure.
1599  */
1600 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1601                                  uint64_t start, uint64_t end,
1602                                  uint64_t dst, uint64_t flags)
1603 {
1604         struct amdgpu_device *adev = params->adev;
1605         struct amdgpu_vm_pt_cursor cursor;
1606         uint64_t frag_start = start, frag_end;
1607         unsigned int frag;
1608
1609         /* figure out the initial fragment */
1610         amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1611
1612         /* walk over the address space and update the PTs */
1613         amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1614         while (cursor.pfn < end) {
1615                 struct amdgpu_bo *pt = cursor.entry->base.bo;
1616                 unsigned shift, parent_shift, mask;
1617                 uint64_t incr, entry_end, pe_start;
1618
1619                 if (!pt)
1620                         return -ENOENT;
1621
1622                 /* The root level can't be a huge page */
1623                 if (cursor.level == adev->vm_manager.root_level) {
1624                         if (!amdgpu_vm_pt_descendant(adev, &cursor))
1625                                 return -ENOENT;
1626                         continue;
1627                 }
1628
1629                 /* If it isn't already handled it can't be a huge page */
1630                 if (cursor.entry->huge) {
1631                         /* Add the entry to the relocated list to update it. */
1632                         cursor.entry->huge = false;
1633                         amdgpu_vm_bo_relocated(&cursor.entry->base);
1634                 }
1635
1636                 shift = amdgpu_vm_level_shift(adev, cursor.level);
1637                 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1638                 if (adev->asic_type < CHIP_VEGA10) {
1639                         /* No huge page support before GMC v9 */
1640                         if (cursor.level != AMDGPU_VM_PTB) {
1641                                 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1642                                         return -ENOENT;
1643                                 continue;
1644                         }
1645                 } else if (frag < shift) {
1646                         /* We can't use this level when the fragment size is
1647                          * smaller than the address shift. Go to the next
1648                          * child entry and try again.
1649                          */
1650                         if (!amdgpu_vm_pt_descendant(adev, &cursor))
1651                                 return -ENOENT;
1652                         continue;
1653                 } else if (frag >= parent_shift &&
1654                            cursor.level - 1 != adev->vm_manager.root_level) {
1655                         /* If the fragment size is even larger than the parent
1656                          * shift we should go up one level and check it again
1657                          * unless one level up is the root level.
1658                          */
1659                         if (!amdgpu_vm_pt_ancestor(&cursor))
1660                                 return -ENOENT;
1661                         continue;
1662                 }
1663
1664                 /* Looks good so far, calculate parameters for the update */
1665                 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1666                 mask = amdgpu_vm_entries_mask(adev, cursor.level);
1667                 pe_start = ((cursor.pfn >> shift) & mask) * 8;
1668                 entry_end = (uint64_t)(mask + 1) << shift;
1669                 entry_end += cursor.pfn & ~(entry_end - 1);
1670                 entry_end = min(entry_end, end);
1671
1672                 do {
1673                         uint64_t upd_end = min(entry_end, frag_end);
1674                         unsigned nptes = (upd_end - frag_start) >> shift;
1675
1676                         amdgpu_vm_update_huge(params, pt, cursor.level,
1677                                               pe_start, dst, nptes, incr,
1678                                               flags | AMDGPU_PTE_FRAG(frag));
1679
1680                         pe_start += nptes * 8;
1681                         dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1682
1683                         frag_start = upd_end;
1684                         if (frag_start >= frag_end) {
1685                                 /* figure out the next fragment */
1686                                 amdgpu_vm_fragment(params, frag_start, end,
1687                                                    flags, &frag, &frag_end);
1688                                 if (frag < shift)
1689                                         break;
1690                         }
1691                 } while (frag_start < entry_end);
1692
1693                 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1694                         /* Mark all child entries as huge */
1695                         while (cursor.pfn < frag_start) {
1696                                 cursor.entry->huge = true;
1697                                 amdgpu_vm_pt_next(adev, &cursor);
1698                         }
1699
1700                 } else if (frag >= shift) {
1701                         /* or just move on to the next on the same level. */
1702                         amdgpu_vm_pt_next(adev, &cursor);
1703                 }
1704         }
1705
1706         return 0;
1707 }
1708
1709 /**
1710  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1711  *
1712  * @adev: amdgpu_device pointer
1713  * @exclusive: fence we need to sync to
1714  * @pages_addr: DMA addresses to use for mapping
1715  * @vm: requested vm
1716  * @start: start of mapped range
1717  * @last: last mapped entry
1718  * @flags: flags for the entries
1719  * @addr: addr to set the area to
1720  * @fence: optional resulting fence
1721  *
1722  * Fill in the page table entries between @start and @last.
1723  *
1724  * Returns:
1725  * 0 for success, -EINVAL for failure.
1726  */
1727 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1728                                        struct dma_fence *exclusive,
1729                                        dma_addr_t *pages_addr,
1730                                        struct amdgpu_vm *vm,
1731                                        uint64_t start, uint64_t last,
1732                                        uint64_t flags, uint64_t addr,
1733                                        struct dma_fence **fence)
1734 {
1735         struct amdgpu_ring *ring;
1736         void *owner = AMDGPU_FENCE_OWNER_VM;
1737         unsigned nptes, ncmds, ndw;
1738         struct amdgpu_job *job;
1739         struct amdgpu_pte_update_params params;
1740         struct dma_fence *f = NULL;
1741         int r;
1742
1743         memset(&params, 0, sizeof(params));
1744         params.adev = adev;
1745         params.vm = vm;
1746
1747         /* sync to everything on unmapping */
1748         if (!(flags & AMDGPU_PTE_VALID))
1749                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1750
1751         if (vm->use_cpu_for_update) {
1752                 /* params.src is used as flag to indicate system Memory */
1753                 if (pages_addr)
1754                         params.src = ~0;
1755
1756                 /* Wait for PT BOs to be free. PTs share the same resv. object
1757                  * as the root PD BO
1758                  */
1759                 r = amdgpu_vm_wait_pd(adev, vm, owner);
1760                 if (unlikely(r))
1761                         return r;
1762
1763                 params.func = amdgpu_vm_cpu_set_ptes;
1764                 params.pages_addr = pages_addr;
1765                 return amdgpu_vm_update_ptes(&params, start, last + 1,
1766                                              addr, flags);
1767         }
1768
1769         ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1770
1771         nptes = last - start + 1;
1772
1773         /*
1774          * reserve space for two commands every (1 << BLOCK_SIZE)
1775          *  entries or 2k dwords (whatever is smaller)
1776          *
1777          * The second command is for the shadow pagetables.
1778          */
1779         if (vm->root.base.bo->shadow)
1780                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1781         else
1782                 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1783
1784         /* padding, etc. */
1785         ndw = 64;
1786
1787         if (pages_addr) {
1788                 /* copy commands needed */
1789                 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1790
1791                 /* and also PTEs */
1792                 ndw += nptes * 2;
1793
1794                 params.func = amdgpu_vm_do_copy_ptes;
1795
1796         } else {
1797                 /* set page commands needed */
1798                 ndw += ncmds * 10;
1799
1800                 /* extra commands for begin/end fragments */
1801                 if (vm->root.base.bo->shadow)
1802                         ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1803                 else
1804                         ndw += 2 * 10 * adev->vm_manager.fragment_size;
1805
1806                 params.func = amdgpu_vm_do_set_ptes;
1807         }
1808
1809         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1810         if (r)
1811                 return r;
1812
1813         params.ib = &job->ibs[0];
1814
1815         if (pages_addr) {
1816                 uint64_t *pte;
1817                 unsigned i;
1818
1819                 /* Put the PTEs at the end of the IB. */
1820                 i = ndw - nptes * 2;
1821                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1822                 params.src = job->ibs->gpu_addr + i * 4;
1823
1824                 for (i = 0; i < nptes; ++i) {
1825                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1826                                                     AMDGPU_GPU_PAGE_SIZE);
1827                         pte[i] |= flags;
1828                 }
1829                 addr = 0;
1830         }
1831
1832         r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1833         if (r)
1834                 goto error_free;
1835
1836         r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1837                              owner, false);
1838         if (r)
1839                 goto error_free;
1840
1841         r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
1842         if (r)
1843                 goto error_free;
1844
1845         amdgpu_ring_pad_ib(ring, params.ib);
1846         WARN_ON(params.ib->length_dw > ndw);
1847         r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1848         if (r)
1849                 goto error_free;
1850
1851         amdgpu_bo_fence(vm->root.base.bo, f, true);
1852         dma_fence_put(*fence);
1853         *fence = f;
1854         return 0;
1855
1856 error_free:
1857         amdgpu_job_free(job);
1858         return r;
1859 }
1860
1861 /**
1862  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1863  *
1864  * @adev: amdgpu_device pointer
1865  * @exclusive: fence we need to sync to
1866  * @pages_addr: DMA addresses to use for mapping
1867  * @vm: requested vm
1868  * @mapping: mapped range and flags to use for the update
1869  * @flags: HW flags for the mapping
1870  * @nodes: array of drm_mm_nodes with the MC addresses
1871  * @fence: optional resulting fence
1872  *
1873  * Split the mapping into smaller chunks so that each update fits
1874  * into a SDMA IB.
1875  *
1876  * Returns:
1877  * 0 for success, -EINVAL for failure.
1878  */
1879 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1880                                       struct dma_fence *exclusive,
1881                                       dma_addr_t *pages_addr,
1882                                       struct amdgpu_vm *vm,
1883                                       struct amdgpu_bo_va_mapping *mapping,
1884                                       uint64_t flags,
1885                                       struct drm_mm_node *nodes,
1886                                       struct dma_fence **fence)
1887 {
1888         unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1889         uint64_t pfn, start = mapping->start;
1890         int r;
1891
1892         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1893          * but in case of something, we filter the flags in first place
1894          */
1895         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1896                 flags &= ~AMDGPU_PTE_READABLE;
1897         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1898                 flags &= ~AMDGPU_PTE_WRITEABLE;
1899
1900         flags &= ~AMDGPU_PTE_EXECUTABLE;
1901         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1902
1903         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1904         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1905
1906         if ((mapping->flags & AMDGPU_PTE_PRT) &&
1907             (adev->asic_type >= CHIP_VEGA10)) {
1908                 flags |= AMDGPU_PTE_PRT;
1909                 flags &= ~AMDGPU_PTE_VALID;
1910         }
1911
1912         trace_amdgpu_vm_bo_update(mapping);
1913
1914         pfn = mapping->offset >> PAGE_SHIFT;
1915         if (nodes) {
1916                 while (pfn >= nodes->size) {
1917                         pfn -= nodes->size;
1918                         ++nodes;
1919                 }
1920         }
1921
1922         do {
1923                 dma_addr_t *dma_addr = NULL;
1924                 uint64_t max_entries;
1925                 uint64_t addr, last;
1926
1927                 if (nodes) {
1928                         addr = nodes->start << PAGE_SHIFT;
1929                         max_entries = (nodes->size - pfn) *
1930                                 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1931                 } else {
1932                         addr = 0;
1933                         max_entries = S64_MAX;
1934                 }
1935
1936                 if (pages_addr) {
1937                         uint64_t count;
1938
1939                         max_entries = min(max_entries, 16ull * 1024ull);
1940                         for (count = 1;
1941                              count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1942                              ++count) {
1943                                 uint64_t idx = pfn + count;
1944
1945                                 if (pages_addr[idx] !=
1946                                     (pages_addr[idx - 1] + PAGE_SIZE))
1947                                         break;
1948                         }
1949
1950                         if (count < min_linear_pages) {
1951                                 addr = pfn << PAGE_SHIFT;
1952                                 dma_addr = pages_addr;
1953                         } else {
1954                                 addr = pages_addr[pfn];
1955                                 max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1956                         }
1957
1958                 } else if (flags & AMDGPU_PTE_VALID) {
1959                         addr += adev->vm_manager.vram_base_offset;
1960                         addr += pfn << PAGE_SHIFT;
1961                 }
1962
1963                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1964                 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1965                                                 start, last, flags, addr,
1966                                                 fence);
1967                 if (r)
1968                         return r;
1969
1970                 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1971                 if (nodes && nodes->size == pfn) {
1972                         pfn = 0;
1973                         ++nodes;
1974                 }
1975                 start = last + 1;
1976
1977         } while (unlikely(start != mapping->last + 1));
1978
1979         return 0;
1980 }
1981
1982 /**
1983  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1984  *
1985  * @adev: amdgpu_device pointer
1986  * @bo_va: requested BO and VM object
1987  * @clear: if true clear the entries
1988  *
1989  * Fill in the page table entries for @bo_va.
1990  *
1991  * Returns:
1992  * 0 for success, -EINVAL for failure.
1993  */
1994 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1995                         struct amdgpu_bo_va *bo_va,
1996                         bool clear)
1997 {
1998         struct amdgpu_bo *bo = bo_va->base.bo;
1999         struct amdgpu_vm *vm = bo_va->base.vm;
2000         struct amdgpu_bo_va_mapping *mapping;
2001         dma_addr_t *pages_addr = NULL;
2002         struct ttm_mem_reg *mem;
2003         struct drm_mm_node *nodes;
2004         struct dma_fence *exclusive, **last_update;
2005         uint64_t flags;
2006         int r;
2007
2008         if (clear || !bo) {
2009                 mem = NULL;
2010                 nodes = NULL;
2011                 exclusive = NULL;
2012         } else {
2013                 struct ttm_dma_tt *ttm;
2014
2015                 mem = &bo->tbo.mem;
2016                 nodes = mem->mm_node;
2017                 if (mem->mem_type == TTM_PL_TT) {
2018                         ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
2019                         pages_addr = ttm->dma_address;
2020                 }
2021                 exclusive = reservation_object_get_excl(bo->tbo.resv);
2022         }
2023
2024         if (bo)
2025                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
2026         else
2027                 flags = 0x0;
2028
2029         if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
2030                 last_update = &vm->last_update;
2031         else
2032                 last_update = &bo_va->last_pt_update;
2033
2034         if (!clear && bo_va->base.moved) {
2035                 bo_va->base.moved = false;
2036                 list_splice_init(&bo_va->valids, &bo_va->invalids);
2037
2038         } else if (bo_va->cleared != clear) {
2039                 list_splice_init(&bo_va->valids, &bo_va->invalids);
2040         }
2041
2042         list_for_each_entry(mapping, &bo_va->invalids, list) {
2043                 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
2044                                                mapping, flags, nodes,
2045                                                last_update);
2046                 if (r)
2047                         return r;
2048         }
2049
2050         if (vm->use_cpu_for_update) {
2051                 /* Flush HDP */
2052                 mb();
2053                 amdgpu_asic_flush_hdp(adev, NULL);
2054         }
2055
2056         /* If the BO is not in its preferred location add it back to
2057          * the evicted list so that it gets validated again on the
2058          * next command submission.
2059          */
2060         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2061                 uint32_t mem_type = bo->tbo.mem.mem_type;
2062
2063                 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
2064                         amdgpu_vm_bo_evicted(&bo_va->base);
2065                 else
2066                         amdgpu_vm_bo_idle(&bo_va->base);
2067         } else {
2068                 amdgpu_vm_bo_done(&bo_va->base);
2069         }
2070
2071         list_splice_init(&bo_va->invalids, &bo_va->valids);
2072         bo_va->cleared = clear;
2073
2074         if (trace_amdgpu_vm_bo_mapping_enabled()) {
2075                 list_for_each_entry(mapping, &bo_va->valids, list)
2076                         trace_amdgpu_vm_bo_mapping(mapping);
2077         }
2078
2079         return 0;
2080 }
2081
2082 /**
2083  * amdgpu_vm_update_prt_state - update the global PRT state
2084  *
2085  * @adev: amdgpu_device pointer
2086  */
2087 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
2088 {
2089         unsigned long flags;
2090         bool enable;
2091
2092         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
2093         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
2094         adev->gmc.gmc_funcs->set_prt(adev, enable);
2095         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
2096 }
2097
2098 /**
2099  * amdgpu_vm_prt_get - add a PRT user
2100  *
2101  * @adev: amdgpu_device pointer
2102  */
2103 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
2104 {
2105         if (!adev->gmc.gmc_funcs->set_prt)
2106                 return;
2107
2108         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
2109                 amdgpu_vm_update_prt_state(adev);
2110 }
2111
2112 /**
2113  * amdgpu_vm_prt_put - drop a PRT user
2114  *
2115  * @adev: amdgpu_device pointer
2116  */
2117 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
2118 {
2119         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
2120                 amdgpu_vm_update_prt_state(adev);
2121 }
2122
2123 /**
2124  * amdgpu_vm_prt_cb - callback for updating the PRT status
2125  *
2126  * @fence: fence for the callback
2127  * @_cb: the callback function
2128  */
2129 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
2130 {
2131         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
2132
2133         amdgpu_vm_prt_put(cb->adev);
2134         kfree(cb);
2135 }
2136
2137 /**
2138  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
2139  *
2140  * @adev: amdgpu_device pointer
2141  * @fence: fence for the callback
2142  */
2143 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
2144                                  struct dma_fence *fence)
2145 {
2146         struct amdgpu_prt_cb *cb;
2147
2148         if (!adev->gmc.gmc_funcs->set_prt)
2149                 return;
2150
2151         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
2152         if (!cb) {
2153                 /* Last resort when we are OOM */
2154                 if (fence)
2155                         dma_fence_wait(fence, false);
2156
2157                 amdgpu_vm_prt_put(adev);
2158         } else {
2159                 cb->adev = adev;
2160                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
2161                                                      amdgpu_vm_prt_cb))
2162                         amdgpu_vm_prt_cb(fence, &cb->cb);
2163         }
2164 }
2165
2166 /**
2167  * amdgpu_vm_free_mapping - free a mapping
2168  *
2169  * @adev: amdgpu_device pointer
2170  * @vm: requested vm
2171  * @mapping: mapping to be freed
2172  * @fence: fence of the unmap operation
2173  *
2174  * Free a mapping and make sure we decrease the PRT usage count if applicable.
2175  */
2176 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
2177                                    struct amdgpu_vm *vm,
2178                                    struct amdgpu_bo_va_mapping *mapping,
2179                                    struct dma_fence *fence)
2180 {
2181         if (mapping->flags & AMDGPU_PTE_PRT)
2182                 amdgpu_vm_add_prt_cb(adev, fence);
2183         kfree(mapping);
2184 }
2185
2186 /**
2187  * amdgpu_vm_prt_fini - finish all prt mappings
2188  *
2189  * @adev: amdgpu_device pointer
2190  * @vm: requested vm
2191  *
2192  * Register a cleanup callback to disable PRT support after VM dies.
2193  */
2194 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2195 {
2196         struct reservation_object *resv = vm->root.base.bo->tbo.resv;
2197         struct dma_fence *excl, **shared;
2198         unsigned i, shared_count;
2199         int r;
2200
2201         r = reservation_object_get_fences_rcu(resv, &excl,
2202                                               &shared_count, &shared);
2203         if (r) {
2204                 /* Not enough memory to grab the fence list, as last resort
2205                  * block for all the fences to complete.
2206                  */
2207                 reservation_object_wait_timeout_rcu(resv, true, false,
2208                                                     MAX_SCHEDULE_TIMEOUT);
2209                 return;
2210         }
2211
2212         /* Add a callback for each fence in the reservation object */
2213         amdgpu_vm_prt_get(adev);
2214         amdgpu_vm_add_prt_cb(adev, excl);
2215
2216         for (i = 0; i < shared_count; ++i) {
2217                 amdgpu_vm_prt_get(adev);
2218                 amdgpu_vm_add_prt_cb(adev, shared[i]);
2219         }
2220
2221         kfree(shared);
2222 }
2223
2224 /**
2225  * amdgpu_vm_clear_freed - clear freed BOs in the PT
2226  *
2227  * @adev: amdgpu_device pointer
2228  * @vm: requested vm
2229  * @fence: optional resulting fence (unchanged if no work needed to be done
2230  * or if an error occurred)
2231  *
2232  * Make sure all freed BOs are cleared in the PT.
2233  * PTs have to be reserved and mutex must be locked!
2234  *
2235  * Returns:
2236  * 0 for success.
2237  *
2238  */
2239 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2240                           struct amdgpu_vm *vm,
2241                           struct dma_fence **fence)
2242 {
2243         struct amdgpu_bo_va_mapping *mapping;
2244         uint64_t init_pte_value = 0;
2245         struct dma_fence *f = NULL;
2246         int r;
2247
2248         while (!list_empty(&vm->freed)) {
2249                 mapping = list_first_entry(&vm->freed,
2250                         struct amdgpu_bo_va_mapping, list);
2251                 list_del(&mapping->list);
2252
2253                 if (vm->pte_support_ats &&
2254                     mapping->start < AMDGPU_GMC_HOLE_START)
2255                         init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2256
2257                 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
2258                                                 mapping->start, mapping->last,
2259                                                 init_pte_value, 0, &f);
2260                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
2261                 if (r) {
2262                         dma_fence_put(f);
2263                         return r;
2264                 }
2265         }
2266
2267         if (fence && f) {
2268                 dma_fence_put(*fence);
2269                 *fence = f;
2270         } else {
2271                 dma_fence_put(f);
2272         }
2273
2274         return 0;
2275
2276 }
2277
2278 /**
2279  * amdgpu_vm_handle_moved - handle moved BOs in the PT
2280  *
2281  * @adev: amdgpu_device pointer
2282  * @vm: requested vm
2283  *
2284  * Make sure all BOs which are moved are updated in the PTs.
2285  *
2286  * Returns:
2287  * 0 for success.
2288  *
2289  * PTs have to be reserved!
2290  */
2291 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2292                            struct amdgpu_vm *vm)
2293 {
2294         struct amdgpu_bo_va *bo_va, *tmp;
2295         struct reservation_object *resv;
2296         bool clear;
2297         int r;
2298
2299         list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2300                 /* Per VM BOs never need to bo cleared in the page tables */
2301                 r = amdgpu_vm_bo_update(adev, bo_va, false);
2302                 if (r)
2303                         return r;
2304         }
2305
2306         spin_lock(&vm->invalidated_lock);
2307         while (!list_empty(&vm->invalidated)) {
2308                 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2309                                          base.vm_status);
2310                 resv = bo_va->base.bo->tbo.resv;
2311                 spin_unlock(&vm->invalidated_lock);
2312
2313                 /* Try to reserve the BO to avoid clearing its ptes */
2314                 if (!amdgpu_vm_debug && reservation_object_trylock(resv))
2315                         clear = false;
2316                 /* Somebody else is using the BO right now */
2317                 else
2318                         clear = true;
2319
2320                 r = amdgpu_vm_bo_update(adev, bo_va, clear);
2321                 if (r)
2322                         return r;
2323
2324                 if (!clear)
2325                         reservation_object_unlock(resv);
2326                 spin_lock(&vm->invalidated_lock);
2327         }
2328         spin_unlock(&vm->invalidated_lock);
2329
2330         return 0;
2331 }
2332
2333 /**
2334  * amdgpu_vm_bo_add - add a bo to a specific vm
2335  *
2336  * @adev: amdgpu_device pointer
2337  * @vm: requested vm
2338  * @bo: amdgpu buffer object
2339  *
2340  * Add @bo into the requested vm.
2341  * Add @bo to the list of bos associated with the vm
2342  *
2343  * Returns:
2344  * Newly added bo_va or NULL for failure
2345  *
2346  * Object has to be reserved!
2347  */
2348 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2349                                       struct amdgpu_vm *vm,
2350                                       struct amdgpu_bo *bo)
2351 {
2352         struct amdgpu_bo_va *bo_va;
2353
2354         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2355         if (bo_va == NULL) {
2356                 return NULL;
2357         }
2358         amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2359
2360         bo_va->ref_count = 1;
2361         INIT_LIST_HEAD(&bo_va->valids);
2362         INIT_LIST_HEAD(&bo_va->invalids);
2363
2364         return bo_va;
2365 }
2366
2367
2368 /**
2369  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2370  *
2371  * @adev: amdgpu_device pointer
2372  * @bo_va: bo_va to store the address
2373  * @mapping: the mapping to insert
2374  *
2375  * Insert a new mapping into all structures.
2376  */
2377 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2378                                     struct amdgpu_bo_va *bo_va,
2379                                     struct amdgpu_bo_va_mapping *mapping)
2380 {
2381         struct amdgpu_vm *vm = bo_va->base.vm;
2382         struct amdgpu_bo *bo = bo_va->base.bo;
2383
2384         mapping->bo_va = bo_va;
2385         list_add(&mapping->list, &bo_va->invalids);
2386         amdgpu_vm_it_insert(mapping, &vm->va);
2387
2388         if (mapping->flags & AMDGPU_PTE_PRT)
2389                 amdgpu_vm_prt_get(adev);
2390
2391         if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2392             !bo_va->base.moved) {
2393                 list_move(&bo_va->base.vm_status, &vm->moved);
2394         }
2395         trace_amdgpu_vm_bo_map(bo_va, mapping);
2396 }
2397
2398 /**
2399  * amdgpu_vm_bo_map - map bo inside a vm
2400  *
2401  * @adev: amdgpu_device pointer
2402  * @bo_va: bo_va to store the address
2403  * @saddr: where to map the BO
2404  * @offset: requested offset in the BO
2405  * @size: BO size in bytes
2406  * @flags: attributes of pages (read/write/valid/etc.)
2407  *
2408  * Add a mapping of the BO at the specefied addr into the VM.
2409  *
2410  * Returns:
2411  * 0 for success, error for failure.
2412  *
2413  * Object has to be reserved and unreserved outside!
2414  */
2415 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2416                      struct amdgpu_bo_va *bo_va,
2417                      uint64_t saddr, uint64_t offset,
2418                      uint64_t size, uint64_t flags)
2419 {
2420         struct amdgpu_bo_va_mapping *mapping, *tmp;
2421         struct amdgpu_bo *bo = bo_va->base.bo;
2422         struct amdgpu_vm *vm = bo_va->base.vm;
2423         uint64_t eaddr;
2424
2425         /* validate the parameters */
2426         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2427             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2428                 return -EINVAL;
2429
2430         /* make sure object fit at this offset */
2431         eaddr = saddr + size - 1;
2432         if (saddr >= eaddr ||
2433             (bo && offset + size > amdgpu_bo_size(bo)))
2434                 return -EINVAL;
2435
2436         saddr /= AMDGPU_GPU_PAGE_SIZE;
2437         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2438
2439         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2440         if (tmp) {
2441                 /* bo and tmp overlap, invalid addr */
2442                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2443                         "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2444                         tmp->start, tmp->last + 1);
2445                 return -EINVAL;
2446         }
2447
2448         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2449         if (!mapping)
2450                 return -ENOMEM;
2451
2452         mapping->start = saddr;
2453         mapping->last = eaddr;
2454         mapping->offset = offset;
2455         mapping->flags = flags;
2456
2457         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2458
2459         return 0;
2460 }
2461
2462 /**
2463  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2464  *
2465  * @adev: amdgpu_device pointer
2466  * @bo_va: bo_va to store the address
2467  * @saddr: where to map the BO
2468  * @offset: requested offset in the BO
2469  * @size: BO size in bytes
2470  * @flags: attributes of pages (read/write/valid/etc.)
2471  *
2472  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2473  * mappings as we do so.
2474  *
2475  * Returns:
2476  * 0 for success, error for failure.
2477  *
2478  * Object has to be reserved and unreserved outside!
2479  */
2480 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2481                              struct amdgpu_bo_va *bo_va,
2482                              uint64_t saddr, uint64_t offset,
2483                              uint64_t size, uint64_t flags)
2484 {
2485         struct amdgpu_bo_va_mapping *mapping;
2486         struct amdgpu_bo *bo = bo_va->base.bo;
2487         uint64_t eaddr;
2488         int r;
2489
2490         /* validate the parameters */
2491         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2492             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2493                 return -EINVAL;
2494
2495         /* make sure object fit at this offset */
2496         eaddr = saddr + size - 1;
2497         if (saddr >= eaddr ||
2498             (bo && offset + size > amdgpu_bo_size(bo)))
2499                 return -EINVAL;
2500
2501         /* Allocate all the needed memory */
2502         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2503         if (!mapping)
2504                 return -ENOMEM;
2505
2506         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2507         if (r) {
2508                 kfree(mapping);
2509                 return r;
2510         }
2511
2512         saddr /= AMDGPU_GPU_PAGE_SIZE;
2513         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2514
2515         mapping->start = saddr;
2516         mapping->last = eaddr;
2517         mapping->offset = offset;
2518         mapping->flags = flags;
2519
2520         amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2521
2522         return 0;
2523 }
2524
2525 /**
2526  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2527  *
2528  * @adev: amdgpu_device pointer
2529  * @bo_va: bo_va to remove the address from
2530  * @saddr: where to the BO is mapped
2531  *
2532  * Remove a mapping of the BO at the specefied addr from the VM.
2533  *
2534  * Returns:
2535  * 0 for success, error for failure.
2536  *
2537  * Object has to be reserved and unreserved outside!
2538  */
2539 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2540                        struct amdgpu_bo_va *bo_va,
2541                        uint64_t saddr)
2542 {
2543         struct amdgpu_bo_va_mapping *mapping;
2544         struct amdgpu_vm *vm = bo_va->base.vm;
2545         bool valid = true;
2546
2547         saddr /= AMDGPU_GPU_PAGE_SIZE;
2548
2549         list_for_each_entry(mapping, &bo_va->valids, list) {
2550                 if (mapping->start == saddr)
2551                         break;
2552         }
2553
2554         if (&mapping->list == &bo_va->valids) {
2555                 valid = false;
2556
2557                 list_for_each_entry(mapping, &bo_va->invalids, list) {
2558                         if (mapping->start == saddr)
2559                                 break;
2560                 }
2561
2562                 if (&mapping->list == &bo_va->invalids)
2563                         return -ENOENT;
2564         }
2565
2566         list_del(&mapping->list);
2567         amdgpu_vm_it_remove(mapping, &vm->va);
2568         mapping->bo_va = NULL;
2569         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2570
2571         if (valid)
2572                 list_add(&mapping->list, &vm->freed);
2573         else
2574                 amdgpu_vm_free_mapping(adev, vm, mapping,
2575                                        bo_va->last_pt_update);
2576
2577         return 0;
2578 }
2579
2580 /**
2581  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2582  *
2583  * @adev: amdgpu_device pointer
2584  * @vm: VM structure to use
2585  * @saddr: start of the range
2586  * @size: size of the range
2587  *
2588  * Remove all mappings in a range, split them as appropriate.
2589  *
2590  * Returns:
2591  * 0 for success, error for failure.
2592  */
2593 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2594                                 struct amdgpu_vm *vm,
2595                                 uint64_t saddr, uint64_t size)
2596 {
2597         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2598         LIST_HEAD(removed);
2599         uint64_t eaddr;
2600
2601         eaddr = saddr + size - 1;
2602         saddr /= AMDGPU_GPU_PAGE_SIZE;
2603         eaddr /= AMDGPU_GPU_PAGE_SIZE;
2604
2605         /* Allocate all the needed memory */
2606         before = kzalloc(sizeof(*before), GFP_KERNEL);
2607         if (!before)
2608                 return -ENOMEM;
2609         INIT_LIST_HEAD(&before->list);
2610
2611         after = kzalloc(sizeof(*after), GFP_KERNEL);
2612         if (!after) {
2613                 kfree(before);
2614                 return -ENOMEM;
2615         }
2616         INIT_LIST_HEAD(&after->list);
2617
2618         /* Now gather all removed mappings */
2619         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2620         while (tmp) {
2621                 /* Remember mapping split at the start */
2622                 if (tmp->start < saddr) {
2623                         before->start = tmp->start;
2624                         before->last = saddr - 1;
2625                         before->offset = tmp->offset;
2626                         before->flags = tmp->flags;
2627                         before->bo_va = tmp->bo_va;
2628                         list_add(&before->list, &tmp->bo_va->invalids);
2629                 }
2630
2631                 /* Remember mapping split at the end */
2632                 if (tmp->last > eaddr) {
2633                         after->start = eaddr + 1;
2634                         after->last = tmp->last;
2635                         after->offset = tmp->offset;
2636                         after->offset += after->start - tmp->start;
2637                         after->flags = tmp->flags;
2638                         after->bo_va = tmp->bo_va;
2639                         list_add(&after->list, &tmp->bo_va->invalids);
2640                 }
2641
2642                 list_del(&tmp->list);
2643                 list_add(&tmp->list, &removed);
2644
2645                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2646         }
2647
2648         /* And free them up */
2649         list_for_each_entry_safe(tmp, next, &removed, list) {
2650                 amdgpu_vm_it_remove(tmp, &vm->va);
2651                 list_del(&tmp->list);
2652
2653                 if (tmp->start < saddr)
2654                     tmp->start = saddr;
2655                 if (tmp->last > eaddr)
2656                     tmp->last = eaddr;
2657
2658                 tmp->bo_va = NULL;
2659                 list_add(&tmp->list, &vm->freed);
2660                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2661         }
2662
2663         /* Insert partial mapping before the range */
2664         if (!list_empty(&before->list)) {
2665                 amdgpu_vm_it_insert(before, &vm->va);
2666                 if (before->flags & AMDGPU_PTE_PRT)
2667                         amdgpu_vm_prt_get(adev);
2668         } else {
2669                 kfree(before);
2670         }
2671
2672         /* Insert partial mapping after the range */
2673         if (!list_empty(&after->list)) {
2674                 amdgpu_vm_it_insert(after, &vm->va);
2675                 if (after->flags & AMDGPU_PTE_PRT)
2676                         amdgpu_vm_prt_get(adev);
2677         } else {
2678                 kfree(after);
2679         }
2680
2681         return 0;
2682 }
2683
2684 /**
2685  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2686  *
2687  * @vm: the requested VM
2688  * @addr: the address
2689  *
2690  * Find a mapping by it's address.
2691  *
2692  * Returns:
2693  * The amdgpu_bo_va_mapping matching for addr or NULL
2694  *
2695  */
2696 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2697                                                          uint64_t addr)
2698 {
2699         return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2700 }
2701
2702 /**
2703  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2704  *
2705  * @vm: the requested vm
2706  * @ticket: CS ticket
2707  *
2708  * Trace all mappings of BOs reserved during a command submission.
2709  */
2710 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2711 {
2712         struct amdgpu_bo_va_mapping *mapping;
2713
2714         if (!trace_amdgpu_vm_bo_cs_enabled())
2715                 return;
2716
2717         for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2718              mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2719                 if (mapping->bo_va && mapping->bo_va->base.bo) {
2720                         struct amdgpu_bo *bo;
2721
2722                         bo = mapping->bo_va->base.bo;
2723                         if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2724                                 continue;
2725                 }
2726
2727                 trace_amdgpu_vm_bo_cs(mapping);
2728         }
2729 }
2730
2731 /**
2732  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2733  *
2734  * @adev: amdgpu_device pointer
2735  * @bo_va: requested bo_va
2736  *
2737  * Remove @bo_va->bo from the requested vm.
2738  *
2739  * Object have to be reserved!
2740  */
2741 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2742                       struct amdgpu_bo_va *bo_va)
2743 {
2744         struct amdgpu_bo_va_mapping *mapping, *next;
2745         struct amdgpu_bo *bo = bo_va->base.bo;
2746         struct amdgpu_vm *vm = bo_va->base.vm;
2747         struct amdgpu_vm_bo_base **base;
2748
2749         if (bo) {
2750                 if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2751                         vm->bulk_moveable = false;
2752
2753                 for (base = &bo_va->base.bo->vm_bo; *base;
2754                      base = &(*base)->next) {
2755                         if (*base != &bo_va->base)
2756                                 continue;
2757
2758                         *base = bo_va->base.next;
2759                         break;
2760                 }
2761         }
2762
2763         spin_lock(&vm->invalidated_lock);
2764         list_del(&bo_va->base.vm_status);
2765         spin_unlock(&vm->invalidated_lock);
2766
2767         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2768                 list_del(&mapping->list);
2769                 amdgpu_vm_it_remove(mapping, &vm->va);
2770                 mapping->bo_va = NULL;
2771                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2772                 list_add(&mapping->list, &vm->freed);
2773         }
2774         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2775                 list_del(&mapping->list);
2776                 amdgpu_vm_it_remove(mapping, &vm->va);
2777                 amdgpu_vm_free_mapping(adev, vm, mapping,
2778                                        bo_va->last_pt_update);
2779         }
2780
2781         dma_fence_put(bo_va->last_pt_update);
2782         kfree(bo_va);
2783 }
2784
2785 /**
2786  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2787  *
2788  * @adev: amdgpu_device pointer
2789  * @bo: amdgpu buffer object
2790  * @evicted: is the BO evicted
2791  *
2792  * Mark @bo as invalid.
2793  */
2794 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2795                              struct amdgpu_bo *bo, bool evicted)
2796 {
2797         struct amdgpu_vm_bo_base *bo_base;
2798
2799         /* shadow bo doesn't have bo base, its validation needs its parent */
2800         if (bo->parent && bo->parent->shadow == bo)
2801                 bo = bo->parent;
2802
2803         for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2804                 struct amdgpu_vm *vm = bo_base->vm;
2805
2806                 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2807                         amdgpu_vm_bo_evicted(bo_base);
2808                         continue;
2809                 }
2810
2811                 if (bo_base->moved)
2812                         continue;
2813                 bo_base->moved = true;
2814
2815                 if (bo->tbo.type == ttm_bo_type_kernel)
2816                         amdgpu_vm_bo_relocated(bo_base);
2817                 else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2818                         amdgpu_vm_bo_moved(bo_base);
2819                 else
2820                         amdgpu_vm_bo_invalidated(bo_base);
2821         }
2822 }
2823
2824 /**
2825  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2826  *
2827  * @vm_size: VM size
2828  *
2829  * Returns:
2830  * VM page table as power of two
2831  */
2832 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2833 {
2834         /* Total bits covered by PD + PTs */
2835         unsigned bits = ilog2(vm_size) + 18;
2836
2837         /* Make sure the PD is 4K in size up to 8GB address space.
2838            Above that split equal between PD and PTs */
2839         if (vm_size <= 8)
2840                 return (bits - 9);
2841         else
2842                 return ((bits + 3) / 2);
2843 }
2844
2845 /**
2846  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2847  *
2848  * @adev: amdgpu_device pointer
2849  * @min_vm_size: the minimum vm size in GB if it's set auto
2850  * @fragment_size_default: Default PTE fragment size
2851  * @max_level: max VMPT level
2852  * @max_bits: max address space size in bits
2853  *
2854  */
2855 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2856                            uint32_t fragment_size_default, unsigned max_level,
2857                            unsigned max_bits)
2858 {
2859         unsigned int max_size = 1 << (max_bits - 30);
2860         unsigned int vm_size;
2861         uint64_t tmp;
2862
2863         /* adjust vm size first */
2864         if (amdgpu_vm_size != -1) {
2865                 vm_size = amdgpu_vm_size;
2866                 if (vm_size > max_size) {
2867                         dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2868                                  amdgpu_vm_size, max_size);
2869                         vm_size = max_size;
2870                 }
2871         } else {
2872                 struct sysinfo si;
2873                 unsigned int phys_ram_gb;
2874
2875                 /* Optimal VM size depends on the amount of physical
2876                  * RAM available. Underlying requirements and
2877                  * assumptions:
2878                  *
2879                  *  - Need to map system memory and VRAM from all GPUs
2880                  *     - VRAM from other GPUs not known here
2881                  *     - Assume VRAM <= system memory
2882                  *  - On GFX8 and older, VM space can be segmented for
2883                  *    different MTYPEs
2884                  *  - Need to allow room for fragmentation, guard pages etc.
2885                  *
2886                  * This adds up to a rough guess of system memory x3.
2887                  * Round up to power of two to maximize the available
2888                  * VM size with the given page table size.
2889                  */
2890                 si_meminfo(&si);
2891                 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2892                                (1 << 30) - 1) >> 30;
2893                 vm_size = roundup_pow_of_two(
2894                         min(max(phys_ram_gb * 3, min_vm_size), max_size));
2895         }
2896
2897         adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2898
2899         tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2900         if (amdgpu_vm_block_size != -1)
2901                 tmp >>= amdgpu_vm_block_size - 9;
2902         tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2903         adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2904         switch (adev->vm_manager.num_level) {
2905         case 3:
2906                 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2907                 break;
2908         case 2:
2909                 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2910                 break;
2911         case 1:
2912                 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2913                 break;
2914         default:
2915                 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2916         }
2917         /* block size depends on vm size and hw setup*/
2918         if (amdgpu_vm_block_size != -1)
2919                 adev->vm_manager.block_size =
2920                         min((unsigned)amdgpu_vm_block_size, max_bits
2921                             - AMDGPU_GPU_PAGE_SHIFT
2922                             - 9 * adev->vm_manager.num_level);
2923         else if (adev->vm_manager.num_level > 1)
2924                 adev->vm_manager.block_size = 9;
2925         else
2926                 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2927
2928         if (amdgpu_vm_fragment_size == -1)
2929                 adev->vm_manager.fragment_size = fragment_size_default;
2930         else
2931                 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2932
2933         DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2934                  vm_size, adev->vm_manager.num_level + 1,
2935                  adev->vm_manager.block_size,
2936                  adev->vm_manager.fragment_size);
2937 }
2938
2939 static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
2940 {
2941         struct amdgpu_retryfault_hashtable *fault_hash;
2942
2943         fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
2944         if (!fault_hash)
2945                 return fault_hash;
2946
2947         INIT_CHASH_TABLE(fault_hash->hash,
2948                         AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
2949         spin_lock_init(&fault_hash->lock);
2950         fault_hash->count = 0;
2951
2952         return fault_hash;
2953 }
2954
2955 /**
2956  * amdgpu_vm_init - initialize a vm instance
2957  *
2958  * @adev: amdgpu_device pointer
2959  * @vm: requested vm
2960  * @vm_context: Indicates if it GFX or Compute context
2961  * @pasid: Process address space identifier
2962  *
2963  * Init @vm fields.
2964  *
2965  * Returns:
2966  * 0 for success, error for failure.
2967  */
2968 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2969                    int vm_context, unsigned int pasid)
2970 {
2971         struct amdgpu_bo_param bp;
2972         struct amdgpu_bo *root;
2973         int r, i;
2974
2975         vm->va = RB_ROOT_CACHED;
2976         for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2977                 vm->reserved_vmid[i] = NULL;
2978         INIT_LIST_HEAD(&vm->evicted);
2979         INIT_LIST_HEAD(&vm->relocated);
2980         INIT_LIST_HEAD(&vm->moved);
2981         INIT_LIST_HEAD(&vm->idle);
2982         INIT_LIST_HEAD(&vm->invalidated);
2983         spin_lock_init(&vm->invalidated_lock);
2984         INIT_LIST_HEAD(&vm->freed);
2985
2986         /* create scheduler entity for page table updates */
2987         r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
2988                                   adev->vm_manager.vm_pte_num_rqs, NULL);
2989         if (r)
2990                 return r;
2991
2992         vm->pte_support_ats = false;
2993
2994         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2995                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2996                                                 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2997
2998                 if (adev->asic_type == CHIP_RAVEN)
2999                         vm->pte_support_ats = true;
3000         } else {
3001                 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3002                                                 AMDGPU_VM_USE_CPU_FOR_GFX);
3003         }
3004         DRM_DEBUG_DRIVER("VM update mode is %s\n",
3005                          vm->use_cpu_for_update ? "CPU" : "SDMA");
3006         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3007                   "CPU update of VM recommended only for large BAR system\n");
3008         vm->last_update = NULL;
3009
3010         amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
3011         if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
3012                 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
3013         r = amdgpu_bo_create(adev, &bp, &root);
3014         if (r)
3015                 goto error_free_sched_entity;
3016
3017         r = amdgpu_bo_reserve(root, true);
3018         if (r)
3019                 goto error_free_root;
3020
3021         r = reservation_object_reserve_shared(root->tbo.resv, 1);
3022         if (r)
3023                 goto error_unreserve;
3024
3025         r = amdgpu_vm_clear_bo(adev, vm, root,
3026                                adev->vm_manager.root_level,
3027                                vm->pte_support_ats);
3028         if (r)
3029                 goto error_unreserve;
3030
3031         amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
3032         amdgpu_bo_unreserve(vm->root.base.bo);
3033
3034         if (pasid) {
3035                 unsigned long flags;
3036
3037                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3038                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
3039                               GFP_ATOMIC);
3040                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3041                 if (r < 0)
3042                         goto error_free_root;
3043
3044                 vm->pasid = pasid;
3045         }
3046
3047         vm->fault_hash = init_fault_hash();
3048         if (!vm->fault_hash) {
3049                 r = -ENOMEM;
3050                 goto error_free_root;
3051         }
3052
3053         INIT_KFIFO(vm->faults);
3054
3055         return 0;
3056
3057 error_unreserve:
3058         amdgpu_bo_unreserve(vm->root.base.bo);
3059
3060 error_free_root:
3061         amdgpu_bo_unref(&vm->root.base.bo->shadow);
3062         amdgpu_bo_unref(&vm->root.base.bo);
3063         vm->root.base.bo = NULL;
3064
3065 error_free_sched_entity:
3066         drm_sched_entity_destroy(&vm->entity);
3067
3068         return r;
3069 }
3070
3071 /**
3072  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3073  *
3074  * @adev: amdgpu_device pointer
3075  * @vm: requested vm
3076  *
3077  * This only works on GFX VMs that don't have any BOs added and no
3078  * page tables allocated yet.
3079  *
3080  * Changes the following VM parameters:
3081  * - use_cpu_for_update
3082  * - pte_supports_ats
3083  * - pasid (old PASID is released, because compute manages its own PASIDs)
3084  *
3085  * Reinitializes the page directory to reflect the changed ATS
3086  * setting.
3087  *
3088  * Returns:
3089  * 0 for success, -errno for errors.
3090  */
3091 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
3092 {
3093         bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
3094         int r;
3095
3096         r = amdgpu_bo_reserve(vm->root.base.bo, true);
3097         if (r)
3098                 return r;
3099
3100         /* Sanity checks */
3101         if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
3102                 r = -EINVAL;
3103                 goto unreserve_bo;
3104         }
3105
3106         if (pasid) {
3107                 unsigned long flags;
3108
3109                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3110                 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
3111                               GFP_ATOMIC);
3112                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3113
3114                 if (r == -ENOSPC)
3115                         goto unreserve_bo;
3116                 r = 0;
3117         }
3118
3119         /* Check if PD needs to be reinitialized and do it before
3120          * changing any other state, in case it fails.
3121          */
3122         if (pte_support_ats != vm->pte_support_ats) {
3123                 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
3124                                adev->vm_manager.root_level,
3125                                pte_support_ats);
3126                 if (r)
3127                         goto free_idr;
3128         }
3129
3130         /* Update VM state */
3131         vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3132                                     AMDGPU_VM_USE_CPU_FOR_COMPUTE);
3133         vm->pte_support_ats = pte_support_ats;
3134         DRM_DEBUG_DRIVER("VM update mode is %s\n",
3135                          vm->use_cpu_for_update ? "CPU" : "SDMA");
3136         WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3137                   "CPU update of VM recommended only for large BAR system\n");
3138
3139         if (vm->pasid) {
3140                 unsigned long flags;
3141
3142                 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3143                 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3144                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3145
3146                 /* Free the original amdgpu allocated pasid
3147                  * Will be replaced with kfd allocated pasid
3148                  */
3149                 amdgpu_pasid_free(vm->pasid);
3150                 vm->pasid = 0;
3151         }
3152
3153   &n