1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/log2.h>
8 #include "gem/i915_gem_lmem.h"
10 #include "gen8_ppgtt.h"
11 #include "i915_scatterlist.h"
12 #include "i915_trace.h"
13 #include "i915_pvinfo.h"
14 #include "i915_vgpu.h"
16 #include "intel_gtt.h"
18 static u64 gen8_pde_encode(const dma_addr_t addr,
19 const enum i915_cache_level level)
21 u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
23 if (level != I915_CACHE_NONE)
24 pde |= PPAT_CACHED_PDE;
31 static u64 gen8_pte_encode(dma_addr_t addr,
32 unsigned int pat_index,
35 gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
37 if (unlikely(flags & PTE_READ_ONLY))
41 pte |= GEN12_PPGTT_PTE_LM;
44 * For pre-gen12 platforms pat_index is the same as enum
45 * i915_cache_level, so the switch-case here is still valid.
46 * See translation table defined by LEGACY_CACHELEVEL.
53 pte |= PPAT_DISPLAY_ELLC;
63 static u64 gen12_pte_encode(dma_addr_t addr,
64 unsigned int pat_index,
67 gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
69 if (unlikely(flags & PTE_READ_ONLY))
73 pte |= GEN12_PPGTT_PTE_LM;
75 if (pat_index & BIT(0))
76 pte |= GEN12_PPGTT_PTE_PAT0;
78 if (pat_index & BIT(1))
79 pte |= GEN12_PPGTT_PTE_PAT1;
81 if (pat_index & BIT(2))
82 pte |= GEN12_PPGTT_PTE_PAT2;
84 if (pat_index & BIT(3))
85 pte |= MTL_PPGTT_PTE_PAT3;
90 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
92 struct drm_i915_private *i915 = ppgtt->vm.i915;
93 struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
94 enum vgt_g2v_type msg;
98 atomic_inc(px_used(ppgtt->pd)); /* never remove */
100 atomic_dec(px_used(ppgtt->pd));
102 mutex_lock(&i915->vgpu.lock);
104 if (i915_vm_is_4lvl(&ppgtt->vm)) {
105 const u64 daddr = px_dma(ppgtt->pd);
107 intel_uncore_write(uncore,
108 vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
109 intel_uncore_write(uncore,
110 vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
113 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
114 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
116 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
117 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
119 intel_uncore_write(uncore,
120 vgtif_reg(pdp[i].lo),
121 lower_32_bits(daddr));
122 intel_uncore_write(uncore,
123 vgtif_reg(pdp[i].hi),
124 upper_32_bits(daddr));
128 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
129 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
132 /* g2v_notify atomically (via hv trap) consumes the message packet. */
133 intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
135 mutex_unlock(&i915->vgpu.lock);
138 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
139 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
140 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
141 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
142 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
143 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
144 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
145 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
147 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
150 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
152 const int shift = gen8_pd_shift(lvl);
153 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
155 GEM_BUG_ON(start >= end);
156 end += ~mask >> gen8_pd_shift(1);
158 *idx = i915_pde_index(start, shift);
159 if ((start ^ end) & mask)
160 return GEN8_PDES - *idx;
162 return i915_pde_index(end, shift) - *idx;
165 static bool gen8_pd_contains(u64 start, u64 end, int lvl)
167 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
169 GEM_BUG_ON(start >= end);
170 return (start ^ end) & mask && (start & ~mask) == 0;
173 static unsigned int gen8_pt_count(u64 start, u64 end)
175 GEM_BUG_ON(start >= end);
176 if ((start ^ end) >> gen8_pd_shift(1))
177 return GEN8_PDES - (start & (GEN8_PDES - 1));
182 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
184 unsigned int shift = __gen8_pte_shift(vm->top);
186 return (vm->total + (1ull << shift) - 1) >> shift;
189 static struct i915_page_directory *
190 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
192 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
197 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
200 static struct i915_page_directory *
201 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
203 return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
206 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
207 struct i915_page_directory *pd,
211 void **pde = pd->entry;
217 __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
218 } while (pde++, --count);
221 free_px(vm, &pd->pt, lvl);
224 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
226 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
228 if (intel_vgpu_active(vm->i915))
229 gen8_ppgtt_notify_vgt(ppgtt, false);
232 __gen8_ppgtt_cleanup(vm, ppgtt->pd,
233 gen8_pd_top_count(vm), vm->top);
238 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
239 struct i915_page_directory * const pd,
240 u64 start, const u64 end, int lvl)
242 const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
243 unsigned int idx, len;
245 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
247 len = gen8_pd_range(start, end, lvl--, &idx);
248 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
249 __func__, vm, lvl + 1, start, end,
250 idx, len, atomic_read(px_used(pd)));
251 GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
254 struct i915_page_table *pt = pd->entry[idx];
256 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
257 gen8_pd_contains(start, end, lvl)) {
258 DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
259 __func__, vm, lvl + 1, idx, start, end);
260 clear_pd_entry(pd, idx, scratch);
261 __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
262 start += (u64)I915_PDES << gen8_pd_shift(lvl);
267 start = __gen8_ppgtt_clear(vm, as_pd(pt),
271 unsigned int pte = gen8_pd_index(start, 0);
272 unsigned int num_ptes;
275 count = gen8_pt_count(start, end);
276 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
277 __func__, vm, lvl, start, end,
278 gen8_pd_index(start, 0), count,
279 atomic_read(&pt->used));
280 GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
283 if (pt->is_compact) {
284 GEM_BUG_ON(num_ptes % 16);
285 GEM_BUG_ON(pte % 16);
290 vaddr = px_vaddr(pt);
291 memset64(vaddr + pte,
292 vm->scratch[0]->encode,
295 atomic_sub(count, &pt->used);
299 if (release_pd_entry(pd, idx, pt, scratch))
300 free_px(vm, pt, lvl);
301 } while (idx++, --len);
306 static void gen8_ppgtt_clear(struct i915_address_space *vm,
307 u64 start, u64 length)
309 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
310 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
311 GEM_BUG_ON(range_overflows(start, length, vm->total));
313 start >>= GEN8_PTE_SHIFT;
314 length >>= GEN8_PTE_SHIFT;
315 GEM_BUG_ON(length == 0);
317 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
318 start, start + length, vm->top);
321 static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
322 struct i915_vm_pt_stash *stash,
323 struct i915_page_directory * const pd,
324 u64 * const start, const u64 end, int lvl)
326 unsigned int idx, len;
328 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
330 len = gen8_pd_range(*start, end, lvl--, &idx);
331 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
332 __func__, vm, lvl + 1, *start, end,
333 idx, len, atomic_read(px_used(pd)));
334 GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
336 spin_lock(&pd->lock);
337 GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
339 struct i915_page_table *pt = pd->entry[idx];
342 spin_unlock(&pd->lock);
344 DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
345 __func__, vm, lvl + 1, idx);
347 pt = stash->pt[!!lvl];
348 __i915_gem_object_pin_pages(pt->base);
350 fill_px(pt, vm->scratch[lvl]->encode);
352 spin_lock(&pd->lock);
353 if (likely(!pd->entry[idx])) {
354 stash->pt[!!lvl] = pt->stash;
355 atomic_set(&pt->used, 0);
356 set_pd_entry(pd, idx, pt);
363 atomic_inc(&pt->used);
364 spin_unlock(&pd->lock);
366 __gen8_ppgtt_alloc(vm, stash,
367 as_pd(pt), start, end, lvl);
369 spin_lock(&pd->lock);
370 atomic_dec(&pt->used);
371 GEM_BUG_ON(!atomic_read(&pt->used));
373 unsigned int count = gen8_pt_count(*start, end);
375 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
376 __func__, vm, lvl, *start, end,
377 gen8_pd_index(*start, 0), count,
378 atomic_read(&pt->used));
380 atomic_add(count, &pt->used);
381 /* All other pdes may be simultaneously removed */
382 GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
385 } while (idx++, --len);
386 spin_unlock(&pd->lock);
389 static void gen8_ppgtt_alloc(struct i915_address_space *vm,
390 struct i915_vm_pt_stash *stash,
391 u64 start, u64 length)
393 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
394 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
395 GEM_BUG_ON(range_overflows(start, length, vm->total));
397 start >>= GEN8_PTE_SHIFT;
398 length >>= GEN8_PTE_SHIFT;
399 GEM_BUG_ON(length == 0);
401 __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
402 &start, start + length, vm->top);
405 static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
406 struct i915_page_directory *pd,
407 u64 *start, u64 end, int lvl,
408 void (*fn)(struct i915_address_space *vm,
409 struct i915_page_table *pt,
413 unsigned int idx, len;
415 len = gen8_pd_range(*start, end, lvl--, &idx);
417 spin_lock(&pd->lock);
419 struct i915_page_table *pt = pd->entry[idx];
421 atomic_inc(&pt->used);
422 spin_unlock(&pd->lock);
425 __gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
429 *start += gen8_pt_count(*start, end);
432 spin_lock(&pd->lock);
433 atomic_dec(&pt->used);
434 } while (idx++, --len);
435 spin_unlock(&pd->lock);
438 static void gen8_ppgtt_foreach(struct i915_address_space *vm,
439 u64 start, u64 length,
440 void (*fn)(struct i915_address_space *vm,
441 struct i915_page_table *pt,
445 start >>= GEN8_PTE_SHIFT;
446 length >>= GEN8_PTE_SHIFT;
448 __gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
449 &start, start + length, vm->top,
453 static __always_inline u64
454 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
455 struct i915_page_directory *pdp,
456 struct sgt_dma *iter,
458 unsigned int pat_index,
461 struct i915_page_directory *pd;
462 const gen8_pte_t pte_encode = ppgtt->vm.pte_encode(0, pat_index, flags);
465 pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
466 vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
468 GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
469 vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
471 iter->dma += I915_GTT_PAGE_SIZE;
472 if (iter->dma >= iter->max) {
473 iter->sg = __sg_next(iter->sg);
474 if (!iter->sg || sg_dma_len(iter->sg) == 0) {
479 iter->dma = sg_dma_address(iter->sg);
480 iter->max = iter->dma + sg_dma_len(iter->sg);
483 if (gen8_pd_index(++idx, 0) == 0) {
484 if (gen8_pd_index(idx, 1) == 0) {
485 /* Limited by sg length for 3lvl */
486 if (gen8_pd_index(idx, 2) == 0)
489 pd = pdp->entry[gen8_pd_index(idx, 2)];
492 drm_clflush_virt_range(vaddr, PAGE_SIZE);
493 vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
496 drm_clflush_virt_range(vaddr, PAGE_SIZE);
502 xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
503 struct i915_vma_resource *vma_res,
504 struct sgt_dma *iter,
505 unsigned int pat_index,
508 const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
509 unsigned int rem = sg_dma_len(iter->sg);
510 u64 start = vma_res->start;
511 u64 end = start + vma_res->vma_size;
513 GEM_BUG_ON(!i915_vm_is_4lvl(vm));
516 struct i915_page_directory * const pdp =
517 gen8_pdp_for_page_address(vm, start);
518 struct i915_page_directory * const pd =
519 i915_pd_entry(pdp, __gen8_pte_index(start, 2));
520 struct i915_page_table *pt =
521 i915_pt_entry(pd, __gen8_pte_index(start, 1));
522 gen8_pte_t encode = pte_encode;
523 unsigned int page_size;
525 u16 index, max, nent, i;
530 if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
531 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
532 rem >= I915_GTT_PAGE_SIZE_2M &&
533 !__gen8_pte_index(start, 0)) {
534 index = __gen8_pte_index(start, 1);
535 encode |= GEN8_PDE_PS_2M;
536 page_size = I915_GTT_PAGE_SIZE_2M;
538 vaddr = px_vaddr(pd);
540 index = __gen8_pte_index(start, 0);
541 page_size = I915_GTT_PAGE_SIZE;
543 if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
545 * Device local-memory on these platforms should
546 * always use 64K pages or larger (including GTT
547 * alignment), therefore if we know the whole
548 * page-table needs to be filled we can always
549 * safely use the compact-layout. Otherwise fall
550 * back to the TLB hint with PS64. If this is
551 * system memory we only bother with PS64.
553 if ((encode & GEN12_PPGTT_PTE_LM) &&
554 end - start >= SZ_2M && !index) {
555 index = __gen8_pte_index(start, 0) / 16;
556 page_size = I915_GTT_PAGE_SIZE_64K;
560 vaddr = px_vaddr(pd);
561 vaddr[__gen8_pte_index(start, 1)] |= GEN12_PDE_64K;
563 pt->is_compact = true;
564 } else if (IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
565 rem >= I915_GTT_PAGE_SIZE_64K &&
567 encode |= GEN12_PTE_PS64;
568 page_size = I915_GTT_PAGE_SIZE_64K;
573 vaddr = px_vaddr(pt);
577 GEM_BUG_ON(rem < page_size);
579 for (i = 0; i < nent; i++) {
581 encode | (iter->dma + i *
586 iter->dma += page_size;
588 if (iter->dma >= iter->max) {
589 iter->sg = __sg_next(iter->sg);
593 rem = sg_dma_len(iter->sg);
597 iter->dma = sg_dma_address(iter->sg);
598 iter->max = iter->dma + rem;
600 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
603 } while (rem >= page_size && index < max);
605 drm_clflush_virt_range(vaddr, PAGE_SIZE);
606 vma_res->page_sizes_gtt |= page_size;
607 } while (iter->sg && sg_dma_len(iter->sg));
610 static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
611 struct i915_vma_resource *vma_res,
612 struct sgt_dma *iter,
613 unsigned int pat_index,
616 const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
617 unsigned int rem = sg_dma_len(iter->sg);
618 u64 start = vma_res->start;
620 GEM_BUG_ON(!i915_vm_is_4lvl(vm));
623 struct i915_page_directory * const pdp =
624 gen8_pdp_for_page_address(vm, start);
625 struct i915_page_directory * const pd =
626 i915_pd_entry(pdp, __gen8_pte_index(start, 2));
627 gen8_pte_t encode = pte_encode;
628 unsigned int maybe_64K = -1;
629 unsigned int page_size;
633 if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
634 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
635 rem >= I915_GTT_PAGE_SIZE_2M &&
636 !__gen8_pte_index(start, 0)) {
637 index = __gen8_pte_index(start, 1);
638 encode |= GEN8_PDE_PS_2M;
639 page_size = I915_GTT_PAGE_SIZE_2M;
641 vaddr = px_vaddr(pd);
643 struct i915_page_table *pt =
644 i915_pt_entry(pd, __gen8_pte_index(start, 1));
646 index = __gen8_pte_index(start, 0);
647 page_size = I915_GTT_PAGE_SIZE;
650 vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
651 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
652 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
653 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
654 maybe_64K = __gen8_pte_index(start, 1);
656 vaddr = px_vaddr(pt);
660 GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
661 vaddr[index++] = encode | iter->dma;
664 iter->dma += page_size;
666 if (iter->dma >= iter->max) {
667 iter->sg = __sg_next(iter->sg);
671 rem = sg_dma_len(iter->sg);
675 iter->dma = sg_dma_address(iter->sg);
676 iter->max = iter->dma + rem;
678 if (maybe_64K != -1 && index < I915_PDES &&
679 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
680 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
681 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
684 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
687 } while (rem >= page_size && index < I915_PDES);
689 drm_clflush_virt_range(vaddr, PAGE_SIZE);
692 * Is it safe to mark the 2M block as 64K? -- Either we have
693 * filled whole page-table with 64K entries, or filled part of
694 * it and have reached the end of the sg table and we have
697 if (maybe_64K != -1 &&
698 (index == I915_PDES ||
699 (i915_vm_has_scratch_64K(vm) &&
700 !iter->sg && IS_ALIGNED(vma_res->start +
702 I915_GTT_PAGE_SIZE_2M)))) {
703 vaddr = px_vaddr(pd);
704 vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
705 drm_clflush_virt_range(vaddr, PAGE_SIZE);
706 page_size = I915_GTT_PAGE_SIZE_64K;
709 * We write all 4K page entries, even when using 64K
710 * pages. In order to verify that the HW isn't cheating
711 * by using the 4K PTE instead of the 64K PTE, we want
712 * to remove all the surplus entries. If the HW skipped
713 * the 64K PTE, it will read/write into the scratch page
714 * instead - which we detect as missing results during
717 if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
720 encode = vm->scratch[0]->encode;
721 vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
723 for (i = 1; i < index; i += 16)
724 memset64(vaddr + i, encode, 15);
726 drm_clflush_virt_range(vaddr, PAGE_SIZE);
730 vma_res->page_sizes_gtt |= page_size;
731 } while (iter->sg && sg_dma_len(iter->sg));
734 static void gen8_ppgtt_insert(struct i915_address_space *vm,
735 struct i915_vma_resource *vma_res,
736 unsigned int pat_index,
739 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
740 struct sgt_dma iter = sgt_dma(vma_res);
742 if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
743 if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50))
744 xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
746 gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
748 u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
751 struct i915_page_directory * const pdp =
752 gen8_pdp_for_page_index(vm, idx);
754 idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
758 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
762 static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
765 unsigned int pat_index,
768 u64 idx = offset >> GEN8_PTE_SHIFT;
769 struct i915_page_directory * const pdp =
770 gen8_pdp_for_page_index(vm, idx);
771 struct i915_page_directory *pd =
772 i915_pd_entry(pdp, gen8_pd_index(idx, 2));
773 struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
776 GEM_BUG_ON(pt->is_compact);
778 vaddr = px_vaddr(pt);
779 vaddr[gen8_pd_index(idx, 0)] = vm->pte_encode(addr, pat_index, flags);
780 drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
783 static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
786 unsigned int pat_index,
789 u64 idx = offset >> GEN8_PTE_SHIFT;
790 struct i915_page_directory * const pdp =
791 gen8_pdp_for_page_index(vm, idx);
792 struct i915_page_directory *pd =
793 i915_pd_entry(pdp, gen8_pd_index(idx, 2));
794 struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
797 GEM_BUG_ON(!IS_ALIGNED(addr, SZ_64K));
798 GEM_BUG_ON(!IS_ALIGNED(offset, SZ_64K));
800 /* XXX: we don't strictly need to use this layout */
802 if (!pt->is_compact) {
803 vaddr = px_vaddr(pd);
804 vaddr[gen8_pd_index(idx, 1)] |= GEN12_PDE_64K;
805 pt->is_compact = true;
808 vaddr = px_vaddr(pt);
809 vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
812 static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
815 unsigned int pat_index,
819 return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
822 return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
825 static int gen8_init_scratch(struct i915_address_space *vm)
832 * If everybody agrees to not to write into the scratch page,
833 * we can reuse it for all vm, keeping contexts and processes separate.
835 if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
836 struct i915_address_space *clone = vm->gt->vm;
838 GEM_BUG_ON(!clone->has_read_only);
840 vm->scratch_order = clone->scratch_order;
841 for (i = 0; i <= vm->top; i++)
842 vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
847 ret = setup_scratch_page(vm);
851 pte_flags = vm->has_read_only;
852 if (i915_gem_object_is_lmem(vm->scratch[0]))
855 vm->scratch[0]->encode =
856 vm->pte_encode(px_dma(vm->scratch[0]),
857 i915_gem_get_pat_index(vm->i915,
861 for (i = 1; i <= vm->top; i++) {
862 struct drm_i915_gem_object *obj;
864 obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
870 ret = map_pt_dma(vm, obj);
872 i915_gem_object_put(obj);
876 fill_px(obj, vm->scratch[i - 1]->encode);
877 obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
879 vm->scratch[i] = obj;
886 i915_gem_object_put(vm->scratch[i]);
887 vm->scratch[0] = NULL;
891 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
893 struct i915_address_space *vm = &ppgtt->vm;
894 struct i915_page_directory *pd = ppgtt->pd;
897 GEM_BUG_ON(vm->top != 2);
898 GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
900 for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
901 struct i915_page_directory *pde;
908 err = map_pt_dma(vm, pde->pt.base);
914 fill_px(pde, vm->scratch[1]->encode);
915 set_pd_entry(pd, idx, pde);
916 atomic_inc(px_used(pde)); /* keep pinned */
923 static struct i915_page_directory *
924 gen8_alloc_top_pd(struct i915_address_space *vm)
926 const unsigned int count = gen8_pd_top_count(vm);
927 struct i915_page_directory *pd;
930 GEM_BUG_ON(count > I915_PDES);
932 pd = __alloc_pd(count);
934 return ERR_PTR(-ENOMEM);
936 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
937 if (IS_ERR(pd->pt.base)) {
938 err = PTR_ERR(pd->pt.base);
943 err = map_pt_dma(vm, pd->pt.base);
947 fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
948 atomic_inc(px_used(pd)); /* mark as pinned */
957 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
958 * with a net effect resembling a 2-level page table in normal x86 terms. Each
959 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
963 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
964 unsigned long lmem_pt_obj_flags)
966 struct i915_page_directory *pd;
967 struct i915_ppgtt *ppgtt;
970 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
972 return ERR_PTR(-ENOMEM);
974 ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
975 ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
976 ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
979 * From bdw, there is hw support for read-only pages in the PPGTT.
981 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
984 * Gen12 has inherited the same read-only fault issue from gen11.
986 ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
988 if (HAS_LMEM(gt->i915))
989 ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
991 ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
994 * Using SMEM here instead of LMEM has the advantage of not reserving
995 * high performance memory for a "never" used filler page. It also
996 * removes the device access that would be required to initialise the
997 * scratch page, reducing pressure on an even scarcer resource.
999 ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
1001 if (GRAPHICS_VER(gt->i915) >= 12)
1002 ppgtt->vm.pte_encode = gen12_pte_encode;
1004 ppgtt->vm.pte_encode = gen8_pte_encode;
1006 ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
1007 ppgtt->vm.insert_entries = gen8_ppgtt_insert;
1008 if (HAS_64K_PAGES(gt->i915))
1009 ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
1011 ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
1012 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
1013 ppgtt->vm.clear_range = gen8_ppgtt_clear;
1014 ppgtt->vm.foreach = gen8_ppgtt_foreach;
1015 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1017 err = gen8_init_scratch(&ppgtt->vm);
1021 pd = gen8_alloc_top_pd(&ppgtt->vm);
1028 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1029 err = gen8_preallocate_top_level_pdp(ppgtt);
1034 if (intel_vgpu_active(gt->i915))
1035 gen8_ppgtt_notify_vgt(ppgtt, true);
1040 i915_vm_put(&ppgtt->vm);
1041 return ERR_PTR(err);