2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
34 #include <asm/set_memory.h>
37 #include <drm/i915_drm.h>
40 #include "i915_vgpu.h"
41 #include "i915_trace.h"
42 #include "intel_drv.h"
43 #include "intel_frontbuffer.h"
45 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
48 * DOC: Global GTT views
50 * Background and previous state
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
77 * Implementation and usage
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
82 * A new flavour of core GEM functions which work with GGTT bound objects were
83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
92 * Code wanting to add or use a new GGTT view needs to:
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
109 i915_get_ggtt_vma_pages(struct i915_vma *vma);
111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
114 * Note that as an uncached mmio write, this will flush the
115 * WCB of the writes into the GGTT before it triggers the invalidate.
117 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
120 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
122 gen6_ggtt_invalidate(dev_priv);
123 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
126 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
128 intel_gtt_chipset_flush();
131 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
133 i915->ggtt.invalidate(i915);
136 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
140 bool has_full_48bit_ppgtt;
142 if (!dev_priv->info.has_aliasing_ppgtt)
145 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
146 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
148 if (intel_vgpu_active(dev_priv)) {
149 /* GVT-g has no support for 32bit ppgtt */
150 has_full_ppgtt = false;
151 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
155 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
156 * execlists, the sole mechanism available to submit work.
158 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
161 if (enable_ppgtt == 1)
164 if (enable_ppgtt == 2 && has_full_ppgtt)
167 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
170 /* Disable ppgtt on SNB if VT-d is on. */
171 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
172 DRM_INFO("Disabling PPGTT because VT-d is on\n");
176 /* Early VLV doesn't have this */
177 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
178 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
182 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
183 if (has_full_48bit_ppgtt)
193 static int ppgtt_bind_vma(struct i915_vma *vma,
194 enum i915_cache_level cache_level,
200 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
201 err = vma->vm->allocate_va_range(vma->vm,
202 vma->node.start, vma->size);
207 /* Currently applicable only to VLV */
210 pte_flags |= PTE_READ_ONLY;
212 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
217 static void ppgtt_unbind_vma(struct i915_vma *vma)
219 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
222 static int ppgtt_set_pages(struct i915_vma *vma)
224 GEM_BUG_ON(vma->pages);
226 vma->pages = vma->obj->mm.pages;
228 vma->page_sizes = vma->obj->mm.page_sizes;
233 static void clear_pages(struct i915_vma *vma)
235 GEM_BUG_ON(!vma->pages);
237 if (vma->pages != vma->obj->mm.pages) {
238 sg_free_table(vma->pages);
243 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
246 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
247 enum i915_cache_level level)
249 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
253 case I915_CACHE_NONE:
254 pte |= PPAT_UNCACHED;
257 pte |= PPAT_DISPLAY_ELLC;
267 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
268 const enum i915_cache_level level)
270 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
272 if (level != I915_CACHE_NONE)
273 pde |= PPAT_CACHED_PDE;
275 pde |= PPAT_UNCACHED;
279 #define gen8_pdpe_encode gen8_pde_encode
280 #define gen8_pml4e_encode gen8_pde_encode
282 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
283 enum i915_cache_level level,
286 gen6_pte_t pte = GEN6_PTE_VALID;
287 pte |= GEN6_PTE_ADDR_ENCODE(addr);
290 case I915_CACHE_L3_LLC:
292 pte |= GEN6_PTE_CACHE_LLC;
294 case I915_CACHE_NONE:
295 pte |= GEN6_PTE_UNCACHED;
304 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
305 enum i915_cache_level level,
308 gen6_pte_t pte = GEN6_PTE_VALID;
309 pte |= GEN6_PTE_ADDR_ENCODE(addr);
312 case I915_CACHE_L3_LLC:
313 pte |= GEN7_PTE_CACHE_L3_LLC;
316 pte |= GEN6_PTE_CACHE_LLC;
318 case I915_CACHE_NONE:
319 pte |= GEN6_PTE_UNCACHED;
328 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
329 enum i915_cache_level level,
332 gen6_pte_t pte = GEN6_PTE_VALID;
333 pte |= GEN6_PTE_ADDR_ENCODE(addr);
335 if (!(flags & PTE_READ_ONLY))
336 pte |= BYT_PTE_WRITEABLE;
338 if (level != I915_CACHE_NONE)
339 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
344 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
345 enum i915_cache_level level,
348 gen6_pte_t pte = GEN6_PTE_VALID;
349 pte |= HSW_PTE_ADDR_ENCODE(addr);
351 if (level != I915_CACHE_NONE)
352 pte |= HSW_WB_LLC_AGE3;
357 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
358 enum i915_cache_level level,
361 gen6_pte_t pte = GEN6_PTE_VALID;
362 pte |= HSW_PTE_ADDR_ENCODE(addr);
365 case I915_CACHE_NONE:
368 pte |= HSW_WT_ELLC_LLC_AGE3;
371 pte |= HSW_WB_ELLC_LLC_AGE3;
378 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
380 struct pagevec *pvec = &vm->free_pages;
381 struct pagevec stash;
383 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
384 i915_gem_shrink_all(vm->i915);
386 if (likely(pvec->nr))
387 return pvec->pages[--pvec->nr];
390 return alloc_page(gfp);
392 /* A placeholder for a specific mutex to guard the WC stash */
393 lockdep_assert_held(&vm->i915->drm.struct_mutex);
395 /* Look in our global stash of WC pages... */
396 pvec = &vm->i915->mm.wc_stash;
397 if (likely(pvec->nr))
398 return pvec->pages[--pvec->nr];
401 * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
403 * We have to be careful as page allocation may trigger the shrinker
404 * (via direct reclaim) which will fill up the WC stash underneath us.
405 * So we add our WB pages into a temporary pvec on the stack and merge
406 * them into the WC stash after all the allocations are complete.
408 pagevec_init(&stash);
412 page = alloc_page(gfp);
416 stash.pages[stash.nr++] = page;
417 } while (stash.nr < pagevec_space(pvec));
420 int nr = min_t(int, stash.nr, pagevec_space(pvec));
421 struct page **pages = stash.pages + stash.nr - nr;
423 if (nr && !set_pages_array_wc(pages, nr)) {
424 memcpy(pvec->pages + pvec->nr,
425 pages, sizeof(pages[0]) * nr);
430 pagevec_release(&stash);
433 return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
436 static void vm_free_pages_release(struct i915_address_space *vm,
439 struct pagevec *pvec = &vm->free_pages;
441 GEM_BUG_ON(!pagevec_count(pvec));
443 if (vm->pt_kmap_wc) {
444 struct pagevec *stash = &vm->i915->mm.wc_stash;
446 /* When we use WC, first fill up the global stash and then
447 * only if full immediately free the overflow.
450 lockdep_assert_held(&vm->i915->drm.struct_mutex);
451 if (pagevec_space(stash)) {
453 stash->pages[stash->nr++] =
454 pvec->pages[--pvec->nr];
457 } while (pagevec_space(stash));
459 /* As we have made some room in the VM's free_pages,
460 * we can wait for it to fill again. Unless we are
461 * inside i915_address_space_fini() and must
462 * immediately release the pages!
468 set_pages_array_wb(pvec->pages, pvec->nr);
471 __pagevec_release(pvec);
474 static void vm_free_page(struct i915_address_space *vm, struct page *page)
477 * On !llc, we need to change the pages back to WB. We only do so
478 * in bulk, so we rarely need to change the page attributes here,
479 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
480 * To make detection of the possible sleep more likely, use an
481 * unconditional might_sleep() for everybody.
484 if (!pagevec_add(&vm->free_pages, page))
485 vm_free_pages_release(vm, false);
488 static int __setup_page_dma(struct i915_address_space *vm,
489 struct i915_page_dma *p,
492 p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
493 if (unlikely(!p->page))
496 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
497 PCI_DMA_BIDIRECTIONAL);
498 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
499 vm_free_page(vm, p->page);
506 static int setup_page_dma(struct i915_address_space *vm,
507 struct i915_page_dma *p)
509 return __setup_page_dma(vm, p, __GFP_HIGHMEM);
512 static void cleanup_page_dma(struct i915_address_space *vm,
513 struct i915_page_dma *p)
515 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
516 vm_free_page(vm, p->page);
519 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
521 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
522 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
523 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
524 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
526 static void fill_page_dma(struct i915_address_space *vm,
527 struct i915_page_dma *p,
530 u64 * const vaddr = kmap_atomic(p->page);
532 memset64(vaddr, val, PAGE_SIZE / sizeof(val));
534 kunmap_atomic(vaddr);
537 static void fill_page_dma_32(struct i915_address_space *vm,
538 struct i915_page_dma *p,
541 fill_page_dma(vm, p, (u64)v << 32 | v);
545 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
550 * In order to utilize 64K pages for an object with a size < 2M, we will
551 * need to support a 64K scratch page, given that every 16th entry for a
552 * page-table operating in 64K mode must point to a properly aligned 64K
553 * region, including any PTEs which happen to point to scratch.
555 * This is only relevant for the 48b PPGTT where we support
556 * huge-gtt-pages, see also i915_vma_insert().
558 * TODO: we should really consider write-protecting the scratch-page and
559 * sharing between ppgtt
561 size = I915_GTT_PAGE_SIZE_4K;
562 if (i915_vm_is_48bit(vm) &&
563 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
564 size = I915_GTT_PAGE_SIZE_64K;
567 gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
570 int order = get_order(size);
574 page = alloc_pages(gfp, order);
578 addr = dma_map_page(vm->dma, page, 0, size,
579 PCI_DMA_BIDIRECTIONAL);
580 if (unlikely(dma_mapping_error(vm->dma, addr)))
583 if (unlikely(!IS_ALIGNED(addr, size)))
586 vm->scratch_page.page = page;
587 vm->scratch_page.daddr = addr;
588 vm->scratch_page.order = order;
592 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
594 __free_pages(page, order);
596 if (size == I915_GTT_PAGE_SIZE_4K)
599 size = I915_GTT_PAGE_SIZE_4K;
600 gfp &= ~__GFP_NOWARN;
604 static void cleanup_scratch_page(struct i915_address_space *vm)
606 struct i915_page_dma *p = &vm->scratch_page;
608 dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
609 PCI_DMA_BIDIRECTIONAL);
610 __free_pages(p->page, p->order);
613 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
615 struct i915_page_table *pt;
617 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
619 return ERR_PTR(-ENOMEM);
621 if (unlikely(setup_px(vm, pt))) {
623 return ERR_PTR(-ENOMEM);
630 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
636 static void gen8_initialize_pt(struct i915_address_space *vm,
637 struct i915_page_table *pt)
640 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
643 static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
644 struct i915_page_table *pt)
646 fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
649 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
651 struct i915_page_directory *pd;
653 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
655 return ERR_PTR(-ENOMEM);
657 if (unlikely(setup_px(vm, pd))) {
659 return ERR_PTR(-ENOMEM);
666 static void free_pd(struct i915_address_space *vm,
667 struct i915_page_directory *pd)
673 static void gen8_initialize_pd(struct i915_address_space *vm,
674 struct i915_page_directory *pd)
677 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
678 memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
681 static int __pdp_init(struct i915_address_space *vm,
682 struct i915_page_directory_pointer *pdp)
684 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
686 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
687 I915_GFP_ALLOW_FAIL);
688 if (unlikely(!pdp->page_directory))
691 memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
696 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
698 kfree(pdp->page_directory);
699 pdp->page_directory = NULL;
702 static inline bool use_4lvl(const struct i915_address_space *vm)
704 return i915_vm_is_48bit(vm);
707 static struct i915_page_directory_pointer *
708 alloc_pdp(struct i915_address_space *vm)
710 struct i915_page_directory_pointer *pdp;
713 GEM_BUG_ON(!use_4lvl(vm));
715 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
717 return ERR_PTR(-ENOMEM);
719 ret = __pdp_init(vm, pdp);
723 ret = setup_px(vm, pdp);
737 static void free_pdp(struct i915_address_space *vm,
738 struct i915_page_directory_pointer *pdp)
749 static void gen8_initialize_pdp(struct i915_address_space *vm,
750 struct i915_page_directory_pointer *pdp)
752 gen8_ppgtt_pdpe_t scratch_pdpe;
754 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
756 fill_px(vm, pdp, scratch_pdpe);
759 static void gen8_initialize_pml4(struct i915_address_space *vm,
760 struct i915_pml4 *pml4)
763 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
764 memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
767 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
768 * the page table structures, we mark them dirty so that
769 * context switching/execlist queuing code takes extra steps
770 * to ensure that tlbs are flushed.
772 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
774 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
777 /* Removes entries from a single page table, releasing it if it's empty.
778 * Caller can use the return value to update higher-level entries.
780 static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
781 struct i915_page_table *pt,
782 u64 start, u64 length)
784 unsigned int num_entries = gen8_pte_count(start, length);
785 unsigned int pte = gen8_pte_index(start);
786 unsigned int pte_end = pte + num_entries;
787 const gen8_pte_t scratch_pte =
788 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
791 GEM_BUG_ON(num_entries > pt->used_ptes);
793 pt->used_ptes -= num_entries;
797 vaddr = kmap_atomic_px(pt);
798 while (pte < pte_end)
799 vaddr[pte++] = scratch_pte;
800 kunmap_atomic(vaddr);
805 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
806 struct i915_page_directory *pd,
807 struct i915_page_table *pt,
812 pd->page_table[pde] = pt;
814 vaddr = kmap_atomic_px(pd);
815 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
816 kunmap_atomic(vaddr);
819 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
820 struct i915_page_directory *pd,
821 u64 start, u64 length)
823 struct i915_page_table *pt;
826 gen8_for_each_pde(pt, pd, start, length, pde) {
827 GEM_BUG_ON(pt == vm->scratch_pt);
829 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
832 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
833 GEM_BUG_ON(!pd->used_pdes);
839 return !pd->used_pdes;
842 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
843 struct i915_page_directory_pointer *pdp,
844 struct i915_page_directory *pd,
847 gen8_ppgtt_pdpe_t *vaddr;
849 pdp->page_directory[pdpe] = pd;
853 vaddr = kmap_atomic_px(pdp);
854 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
855 kunmap_atomic(vaddr);
858 /* Removes entries from a single page dir pointer, releasing it if it's empty.
859 * Caller can use the return value to update higher-level entries
861 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
862 struct i915_page_directory_pointer *pdp,
863 u64 start, u64 length)
865 struct i915_page_directory *pd;
868 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
869 GEM_BUG_ON(pd == vm->scratch_pd);
871 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
874 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
875 GEM_BUG_ON(!pdp->used_pdpes);
881 return !pdp->used_pdpes;
884 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
885 u64 start, u64 length)
887 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
890 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
891 struct i915_page_directory_pointer *pdp,
894 gen8_ppgtt_pml4e_t *vaddr;
896 pml4->pdps[pml4e] = pdp;
898 vaddr = kmap_atomic_px(pml4);
899 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
900 kunmap_atomic(vaddr);
903 /* Removes entries from a single pml4.
904 * This is the top-level structure in 4-level page tables used on gen8+.
905 * Empty entries are always scratch pml4e.
907 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
908 u64 start, u64 length)
910 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
911 struct i915_pml4 *pml4 = &ppgtt->pml4;
912 struct i915_page_directory_pointer *pdp;
915 GEM_BUG_ON(!use_4lvl(vm));
917 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
918 GEM_BUG_ON(pdp == vm->scratch_pdp);
920 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
923 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
929 static inline struct sgt_dma {
930 struct scatterlist *sg;
932 } sgt_dma(struct i915_vma *vma) {
933 struct scatterlist *sg = vma->pages->sgl;
934 dma_addr_t addr = sg_dma_address(sg);
935 return (struct sgt_dma) { sg, addr, addr + sg->length };
938 struct gen8_insert_pte {
945 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
947 return (struct gen8_insert_pte) {
948 gen8_pml4e_index(start),
949 gen8_pdpe_index(start),
950 gen8_pde_index(start),
951 gen8_pte_index(start),
955 static __always_inline bool
956 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
957 struct i915_page_directory_pointer *pdp,
958 struct sgt_dma *iter,
959 struct gen8_insert_pte *idx,
960 enum i915_cache_level cache_level)
962 struct i915_page_directory *pd;
963 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
967 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
968 pd = pdp->page_directory[idx->pdpe];
969 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
971 vaddr[idx->pte] = pte_encode | iter->dma;
973 iter->dma += PAGE_SIZE;
974 if (iter->dma >= iter->max) {
975 iter->sg = __sg_next(iter->sg);
981 iter->dma = sg_dma_address(iter->sg);
982 iter->max = iter->dma + iter->sg->length;
985 if (++idx->pte == GEN8_PTES) {
988 if (++idx->pde == I915_PDES) {
991 /* Limited by sg length for 3lvl */
992 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
998 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
999 pd = pdp->page_directory[idx->pdpe];
1002 kunmap_atomic(vaddr);
1003 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1006 kunmap_atomic(vaddr);
1011 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1012 struct i915_vma *vma,
1013 enum i915_cache_level cache_level,
1016 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1017 struct sgt_dma iter = sgt_dma(vma);
1018 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1020 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1023 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1026 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1027 struct i915_page_directory_pointer **pdps,
1028 struct sgt_dma *iter,
1029 enum i915_cache_level cache_level)
1031 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
1032 u64 start = vma->node.start;
1033 dma_addr_t rem = iter->sg->length;
1036 struct gen8_insert_pte idx = gen8_insert_pte(start);
1037 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1038 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1039 unsigned int page_size;
1040 bool maybe_64K = false;
1041 gen8_pte_t encode = pte_encode;
1045 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1046 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1047 rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1050 page_size = I915_GTT_PAGE_SIZE_2M;
1052 encode |= GEN8_PDE_PS_2M;
1054 vaddr = kmap_atomic_px(pd);
1056 struct i915_page_table *pt = pd->page_table[idx.pde];
1060 page_size = I915_GTT_PAGE_SIZE;
1063 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1064 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1065 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1066 rem >= (max - index) << PAGE_SHIFT))
1069 vaddr = kmap_atomic_px(pt);
1073 GEM_BUG_ON(iter->sg->length < page_size);
1074 vaddr[index++] = encode | iter->dma;
1077 iter->dma += page_size;
1079 if (iter->dma >= iter->max) {
1080 iter->sg = __sg_next(iter->sg);
1084 rem = iter->sg->length;
1085 iter->dma = sg_dma_address(iter->sg);
1086 iter->max = iter->dma + rem;
1088 if (maybe_64K && index < max &&
1089 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1090 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1091 rem >= (max - index) << PAGE_SHIFT)))
1094 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1097 } while (rem >= page_size && index < max);
1099 kunmap_atomic(vaddr);
1102 * Is it safe to mark the 2M block as 64K? -- Either we have
1103 * filled whole page-table with 64K entries, or filled part of
1104 * it and have reached the end of the sg table and we have
1109 (i915_vm_has_scratch_64K(vma->vm) &&
1110 !iter->sg && IS_ALIGNED(vma->node.start +
1112 I915_GTT_PAGE_SIZE_2M)))) {
1113 vaddr = kmap_atomic_px(pd);
1114 vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1115 kunmap_atomic(vaddr);
1116 page_size = I915_GTT_PAGE_SIZE_64K;
1119 * We write all 4K page entries, even when using 64K
1120 * pages. In order to verify that the HW isn't cheating
1121 * by using the 4K PTE instead of the 64K PTE, we want
1122 * to remove all the surplus entries. If the HW skipped
1123 * the 64K PTE, it will read/write into the scratch page
1124 * instead - which we detect as missing results during
1127 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1130 encode = pte_encode | vma->vm->scratch_page.daddr;
1131 vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
1133 for (i = 1; i < index; i += 16)
1134 memset64(vaddr + i, encode, 15);
1136 kunmap_atomic(vaddr);
1140 vma->page_sizes.gtt |= page_size;
1144 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1145 struct i915_vma *vma,
1146 enum i915_cache_level cache_level,
1149 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1150 struct sgt_dma iter = sgt_dma(vma);
1151 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1153 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1154 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
1156 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1158 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1159 &iter, &idx, cache_level))
1160 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1162 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1166 static void gen8_free_page_tables(struct i915_address_space *vm,
1167 struct i915_page_directory *pd)
1174 for (i = 0; i < I915_PDES; i++) {
1175 if (pd->page_table[i] != vm->scratch_pt)
1176 free_pt(vm, pd->page_table[i]);
1180 static int gen8_init_scratch(struct i915_address_space *vm)
1184 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1188 vm->scratch_pt = alloc_pt(vm);
1189 if (IS_ERR(vm->scratch_pt)) {
1190 ret = PTR_ERR(vm->scratch_pt);
1191 goto free_scratch_page;
1194 vm->scratch_pd = alloc_pd(vm);
1195 if (IS_ERR(vm->scratch_pd)) {
1196 ret = PTR_ERR(vm->scratch_pd);
1201 vm->scratch_pdp = alloc_pdp(vm);
1202 if (IS_ERR(vm->scratch_pdp)) {
1203 ret = PTR_ERR(vm->scratch_pdp);
1208 gen8_initialize_pt(vm, vm->scratch_pt);
1209 gen8_initialize_pd(vm, vm->scratch_pd);
1211 gen8_initialize_pdp(vm, vm->scratch_pdp);
1216 free_pd(vm, vm->scratch_pd);
1218 free_pt(vm, vm->scratch_pt);
1220 cleanup_scratch_page(vm);
1225 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1227 struct i915_address_space *vm = &ppgtt->vm;
1228 struct drm_i915_private *dev_priv = vm->i915;
1229 enum vgt_g2v_type msg;
1233 const u64 daddr = px_dma(&ppgtt->pml4);
1235 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1236 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1238 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1239 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1241 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1242 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1244 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1245 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1248 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1249 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1252 I915_WRITE(vgtif_reg(g2v_notify), msg);
1257 static void gen8_free_scratch(struct i915_address_space *vm)
1260 free_pdp(vm, vm->scratch_pdp);
1261 free_pd(vm, vm->scratch_pd);
1262 free_pt(vm, vm->scratch_pt);
1263 cleanup_scratch_page(vm);
1266 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1267 struct i915_page_directory_pointer *pdp)
1269 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1272 for (i = 0; i < pdpes; i++) {
1273 if (pdp->page_directory[i] == vm->scratch_pd)
1276 gen8_free_page_tables(vm, pdp->page_directory[i]);
1277 free_pd(vm, pdp->page_directory[i]);
1283 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1287 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1288 if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
1291 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
1294 cleanup_px(&ppgtt->vm, &ppgtt->pml4);
1297 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1299 struct drm_i915_private *dev_priv = vm->i915;
1300 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1302 if (intel_vgpu_active(dev_priv))
1303 gen8_ppgtt_notify_vgt(ppgtt, false);
1306 gen8_ppgtt_cleanup_4lvl(ppgtt);
1308 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
1310 gen8_free_scratch(vm);
1313 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1314 struct i915_page_directory *pd,
1315 u64 start, u64 length)
1317 struct i915_page_table *pt;
1321 gen8_for_each_pde(pt, pd, start, length, pde) {
1322 int count = gen8_pte_count(start, length);
1324 if (pt == vm->scratch_pt) {
1333 if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1334 gen8_initialize_pt(vm, pt);
1336 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1337 GEM_BUG_ON(pd->used_pdes > I915_PDES);
1340 pt->used_ptes += count;
1345 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1349 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1350 struct i915_page_directory_pointer *pdp,
1351 u64 start, u64 length)
1353 struct i915_page_directory *pd;
1358 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1359 if (pd == vm->scratch_pd) {
1368 gen8_initialize_pd(vm, pd);
1369 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1370 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1372 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
1375 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1383 if (!pd->used_pdes) {
1384 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1385 GEM_BUG_ON(!pdp->used_pdpes);
1390 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1394 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1395 u64 start, u64 length)
1397 return gen8_ppgtt_alloc_pdp(vm,
1398 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1401 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1402 u64 start, u64 length)
1404 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1405 struct i915_pml4 *pml4 = &ppgtt->pml4;
1406 struct i915_page_directory_pointer *pdp;
1411 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1412 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1413 pdp = alloc_pdp(vm);
1417 gen8_initialize_pdp(vm, pdp);
1418 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1421 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1429 if (!pdp->used_pdpes) {
1430 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1434 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1438 static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1439 struct i915_page_directory_pointer *pdp,
1440 u64 start, u64 length,
1441 gen8_pte_t scratch_pte,
1444 struct i915_address_space *vm = &ppgtt->vm;
1445 struct i915_page_directory *pd;
1448 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1449 struct i915_page_table *pt;
1450 u64 pd_len = length;
1451 u64 pd_start = start;
1454 if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
1457 seq_printf(m, "\tPDPE #%d\n", pdpe);
1458 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1460 gen8_pte_t *pt_vaddr;
1462 if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
1465 pt_vaddr = kmap_atomic_px(pt);
1466 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1467 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1468 pde << GEN8_PDE_SHIFT |
1469 pte << GEN8_PTE_SHIFT);
1473 for (i = 0; i < 4; i++)
1474 if (pt_vaddr[pte + i] != scratch_pte)
1479 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1480 for (i = 0; i < 4; i++) {
1481 if (pt_vaddr[pte + i] != scratch_pte)
1482 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1484 seq_puts(m, " SCRATCH ");
1488 kunmap_atomic(pt_vaddr);
1493 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1495 struct i915_address_space *vm = &ppgtt->vm;
1496 const gen8_pte_t scratch_pte =
1497 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
1498 u64 start = 0, length = ppgtt->vm.total;
1502 struct i915_pml4 *pml4 = &ppgtt->pml4;
1503 struct i915_page_directory_pointer *pdp;
1505 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1506 if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
1509 seq_printf(m, " PML4E #%llu\n", pml4e);
1510 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
1513 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
1517 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1519 struct i915_address_space *vm = &ppgtt->vm;
1520 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1521 struct i915_page_directory *pd;
1522 u64 start = 0, length = ppgtt->vm.total;
1526 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1531 gen8_initialize_pd(vm, pd);
1532 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1536 pdp->used_pdpes++; /* never remove */
1541 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1542 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1545 pdp->used_pdpes = 0;
1550 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1551 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1552 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1556 static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1558 struct i915_hw_ppgtt *ppgtt;
1561 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1563 return ERR_PTR(-ENOMEM);
1565 ppgtt->vm.i915 = i915;
1566 ppgtt->vm.dma = &i915->drm.pdev->dev;
1568 ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
1572 /* There are only few exceptions for gen >=6. chv and bxt.
1573 * And we are not sure about the latter so play safe for now.
1575 if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1576 ppgtt->vm.pt_kmap_wc = true;
1578 err = gen8_init_scratch(&ppgtt->vm);
1582 if (use_4lvl(&ppgtt->vm)) {
1583 err = setup_px(&ppgtt->vm, &ppgtt->pml4);
1587 gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
1589 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1590 ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
1591 ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
1593 err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
1597 if (intel_vgpu_active(i915)) {
1598 err = gen8_preallocate_top_level_pdp(ppgtt);
1600 __pdp_fini(&ppgtt->pdp);
1605 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1606 ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
1607 ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
1610 if (intel_vgpu_active(i915))
1611 gen8_ppgtt_notify_vgt(ppgtt, true);
1613 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1614 ppgtt->debug_dump = gen8_dump_ppgtt;
1616 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
1617 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
1618 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
1619 ppgtt->vm.vma_ops.clear_pages = clear_pages;
1624 gen8_free_scratch(&ppgtt->vm);
1627 return ERR_PTR(err);
1630 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1632 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1633 const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
1634 struct i915_page_table *pt;
1637 gen6_for_all_pdes(pt, &base->pd, pde) {
1640 if (pt == base->vm.scratch_pt)
1643 if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
1645 GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
1647 u32 pd_entry = readl(ppgtt->pd_addr + pde);
1649 if (pd_entry != expected)
1651 "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1656 seq_printf(m, "\tPDE: %x\n", pd_entry);
1659 vaddr = kmap_atomic_px(base->pd.page_table[pde]);
1660 for (pte = 0; pte < GEN6_PTES; pte += 4) {
1663 for (i = 0; i < 4; i++)
1664 if (vaddr[pte + i] != scratch_pte)
1669 seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
1671 (pde * GEN6_PTES + pte) * PAGE_SIZE);
1672 for (i = 0; i < 4; i++) {
1673 if (vaddr[pte + i] != scratch_pte)
1674 seq_printf(m, " %08x", vaddr[pte + i]);
1676 seq_puts(m, " SCRATCH");
1680 kunmap_atomic(vaddr);
1684 /* Write pde (index) from the page directory @pd to the page table @pt */
1685 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1686 const unsigned int pde,
1687 const struct i915_page_table *pt)
1689 /* Caller needs to make sure the write completes if necessary */
1690 iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1691 ppgtt->pd_addr + pde);
1694 static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1696 struct intel_engine_cs *engine;
1697 enum intel_engine_id id;
1699 for_each_engine(engine, dev_priv, id) {
1700 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1701 GEN8_GFX_PPGTT_48B : 0;
1702 I915_WRITE(RING_MODE_GEN7(engine),
1703 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1707 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1709 struct intel_engine_cs *engine;
1710 u32 ecochk, ecobits;
1711 enum intel_engine_id id;
1713 ecobits = I915_READ(GAC_ECO_BITS);
1714 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1716 ecochk = I915_READ(GAM_ECOCHK);
1717 if (IS_HASWELL(dev_priv)) {
1718 ecochk |= ECOCHK_PPGTT_WB_HSW;
1720 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1721 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1723 I915_WRITE(GAM_ECOCHK, ecochk);
1725 for_each_engine(engine, dev_priv, id) {
1726 /* GFX_MODE is per-ring on gen7+ */
1727 I915_WRITE(RING_MODE_GEN7(engine),
1728 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1732 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1734 u32 ecochk, gab_ctl, ecobits;
1736 ecobits = I915_READ(GAC_ECO_BITS);
1737 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1738 ECOBITS_PPGTT_CACHE64B);
1740 gab_ctl = I915_READ(GAB_CTL);
1741 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1743 ecochk = I915_READ(GAM_ECOCHK);
1744 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1746 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1749 /* PPGTT support for Sandybdrige/Gen6 and later */
1750 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1751 u64 start, u64 length)
1753 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1754 unsigned int first_entry = start >> PAGE_SHIFT;
1755 unsigned int pde = first_entry / GEN6_PTES;
1756 unsigned int pte = first_entry % GEN6_PTES;
1757 unsigned int num_entries = length >> PAGE_SHIFT;
1758 const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
1760 while (num_entries) {
1761 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
1762 const unsigned int end = min(pte + num_entries, GEN6_PTES);
1763 const unsigned int count = end - pte;
1766 GEM_BUG_ON(pt == vm->scratch_pt);
1768 num_entries -= count;
1770 GEM_BUG_ON(count > pt->used_ptes);
1771 pt->used_ptes -= count;
1773 ppgtt->scan_for_unused_pt = true;
1776 * Note that the hw doesn't support removing PDE on the fly
1777 * (they are cached inside the context with no means to
1778 * invalidate the cache), so we can only reset the PTE
1779 * entries back to scratch.
1782 vaddr = kmap_atomic_px(pt);
1784 vaddr[pte++] = scratch_pte;
1785 } while (pte < end);
1786 kunmap_atomic(vaddr);
1792 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1793 struct i915_vma *vma,
1794 enum i915_cache_level cache_level,
1797 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1798 unsigned first_entry = vma->node.start >> PAGE_SHIFT;
1799 unsigned act_pt = first_entry / GEN6_PTES;
1800 unsigned act_pte = first_entry % GEN6_PTES;
1801 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1802 struct sgt_dma iter = sgt_dma(vma);
1805 GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
1807 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1809 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1811 iter.dma += PAGE_SIZE;
1812 if (iter.dma == iter.max) {
1813 iter.sg = __sg_next(iter.sg);
1817 iter.dma = sg_dma_address(iter.sg);
1818 iter.max = iter.dma + iter.sg->length;
1821 if (++act_pte == GEN6_PTES) {
1822 kunmap_atomic(vaddr);
1823 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1827 kunmap_atomic(vaddr);
1829 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1832 static int gen6_alloc_va_range(struct i915_address_space *vm,
1833 u64 start, u64 length)
1835 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1836 struct i915_page_table *pt;
1841 gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
1842 const unsigned int count = gen6_pte_count(start, length);
1844 if (pt == vm->scratch_pt) {
1849 gen6_initialize_pt(ppgtt, pt);
1850 ppgtt->base.pd.page_table[pde] = pt;
1852 if (i915_vma_is_bound(ppgtt->vma,
1853 I915_VMA_GLOBAL_BIND)) {
1854 gen6_write_pde(ppgtt, pde, pt);
1858 GEM_BUG_ON(pt->used_ptes);
1861 pt->used_ptes += count;
1865 mark_tlbs_dirty(&ppgtt->base);
1866 gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1872 gen6_ppgtt_clear_range(vm, from, start - from);
1876 static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1878 struct i915_address_space * const vm = &ppgtt->base.vm;
1879 struct i915_page_table *unused;
1883 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1887 ppgtt->scratch_pte =
1888 vm->pte_encode(vm->scratch_page.daddr,
1889 I915_CACHE_NONE, PTE_READ_ONLY);
1891 vm->scratch_pt = alloc_pt(vm);
1892 if (IS_ERR(vm->scratch_pt)) {
1893 cleanup_scratch_page(vm);
1894 return PTR_ERR(vm->scratch_pt);
1897 gen6_initialize_pt(ppgtt, vm->scratch_pt);
1898 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
1899 ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1904 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
1906 free_pt(vm, vm->scratch_pt);
1907 cleanup_scratch_page(vm);
1910 static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
1912 struct i915_page_table *pt;
1915 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1916 if (pt != ppgtt->base.vm.scratch_pt)
1917 free_pt(&ppgtt->base.vm, pt);
1920 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1922 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1924 i915_vma_destroy(ppgtt->vma);
1926 gen6_ppgtt_free_pd(ppgtt);
1927 gen6_ppgtt_free_scratch(vm);
1930 static int pd_vma_set_pages(struct i915_vma *vma)
1932 vma->pages = ERR_PTR(-ENODEV);
1936 static void pd_vma_clear_pages(struct i915_vma *vma)
1938 GEM_BUG_ON(!vma->pages);
1943 static int pd_vma_bind(struct i915_vma *vma,
1944 enum i915_cache_level cache_level,
1947 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1948 struct gen6_hw_ppgtt *ppgtt = vma->private;
1949 u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
1950 struct i915_page_table *pt;
1953 ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
1954 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1956 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1957 gen6_write_pde(ppgtt, pde, pt);
1959 mark_tlbs_dirty(&ppgtt->base);
1960 gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1965 static void pd_vma_unbind(struct i915_vma *vma)
1967 struct gen6_hw_ppgtt *ppgtt = vma->private;
1968 struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
1969 struct i915_page_table *pt;
1972 if (!ppgtt->scan_for_unused_pt)
1975 /* Free all no longer used page tables */
1976 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
1977 if (pt->used_ptes || pt == scratch_pt)
1980 free_pt(&ppgtt->base.vm, pt);
1981 ppgtt->base.pd.page_table[pde] = scratch_pt;
1984 ppgtt->scan_for_unused_pt = false;
1987 static const struct i915_vma_ops pd_vma_ops = {
1988 .set_pages = pd_vma_set_pages,
1989 .clear_pages = pd_vma_clear_pages,
1990 .bind_vma = pd_vma_bind,
1991 .unbind_vma = pd_vma_unbind,
1994 static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
1996 struct drm_i915_private *i915 = ppgtt->base.vm.i915;
1997 struct i915_ggtt *ggtt = &i915->ggtt;
1998 struct i915_vma *vma;
2001 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
2002 GEM_BUG_ON(size > ggtt->vm.total);
2004 vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
2006 return ERR_PTR(-ENOMEM);
2008 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
2009 init_request_active(&vma->last_read[i], NULL);
2010 init_request_active(&vma->last_fence, NULL);
2012 vma->vm = &ggtt->vm;
2013 vma->ops = &pd_vma_ops;
2014 vma->private = ppgtt;
2017 vma->fence_size = size;
2018 vma->flags = I915_VMA_GGTT;
2019 vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
2021 INIT_LIST_HEAD(&vma->obj_link);
2022 list_add(&vma->vm_link, &vma->vm->unbound_list);
2027 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2029 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2032 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
2033 * which will be pinned into every active context.
2034 * (When vma->pin_count becomes atomic, I expect we will naturally
2035 * need a larger, unpacked, type and kill this redundancy.)
2037 if (ppgtt->pin_count++)
2041 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2042 * allocator works in address space sizes, so it's multiplied by page
2043 * size. We allocate at the top of the GTT to avoid fragmentation.
2045 return i915_vma_pin(ppgtt->vma,
2047 PIN_GLOBAL | PIN_HIGH);
2050 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
2052 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2054 GEM_BUG_ON(!ppgtt->pin_count);
2055 if (--ppgtt->pin_count)
2058 i915_vma_unpin(ppgtt->vma);
2061 static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
2063 struct i915_ggtt * const ggtt = &i915->ggtt;
2064 struct gen6_hw_ppgtt *ppgtt;
2067 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2069 return ERR_PTR(-ENOMEM);
2071 ppgtt->base.vm.i915 = i915;
2072 ppgtt->base.vm.dma = &i915->drm.pdev->dev;
2074 ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
2076 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
2077 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
2078 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
2079 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
2080 ppgtt->base.debug_dump = gen6_dump_ppgtt;
2082 ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
2083 ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
2084 ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
2085 ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
2087 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
2089 err = gen6_ppgtt_init_scratch(ppgtt);
2093 ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
2094 if (IS_ERR(ppgtt->vma)) {
2095 err = PTR_ERR(ppgtt->vma);
2099 return &ppgtt->base;
2102 gen6_ppgtt_free_scratch(&ppgtt->base.vm);
2105 return ERR_PTR(err);
2108 static void i915_address_space_init(struct i915_address_space *vm,
2109 struct drm_i915_private *dev_priv,
2112 drm_mm_init(&vm->mm, 0, vm->total);
2113 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
2115 INIT_LIST_HEAD(&vm->active_list);
2116 INIT_LIST_HEAD(&vm->inactive_list);
2117 INIT_LIST_HEAD(&vm->unbound_list);
2119 list_add_tail(&vm->global_link, &dev_priv->vm_list);
2120 pagevec_init(&vm->free_pages);
2123 static void i915_address_space_fini(struct i915_address_space *vm)
2125 if (pagevec_count(&vm->free_pages))
2126 vm_free_pages_release(vm, true);
2128 drm_mm_takedown(&vm->mm);
2129 list_del(&vm->global_link);
2132 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2134 /* This function is for gtt related workarounds. This function is
2135 * called on driver load and after a GPU reset, so you can place
2136 * workarounds here even if they get overwritten by GPU reset.
2138 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2139 if (IS_BROADWELL(dev_priv))
2140 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2141 else if (IS_CHERRYVIEW(dev_priv))
2142 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2143 else if (IS_GEN9_LP(dev_priv))
2144 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2145 else if (INTEL_GEN(dev_priv) >= 9)
2146 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2149 * To support 64K PTEs we need to first enable the use of the
2150 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2151 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2152 * shouldn't be needed after GEN10.
2154 * 64K pages were first introduced from BDW+, although technically they
2155 * only *work* from gen9+. For pre-BDW we instead have the option for
2156 * 32K pages, but we don't currently have any support for it in our
2159 if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2160 INTEL_GEN(dev_priv) <= 10)
2161 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2162 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2163 GAMW_ECO_ENABLE_64K_IPS_FIELD);
2166 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2168 gtt_write_workarounds(dev_priv);
2170 /* In the case of execlists, PPGTT is enabled by the context descriptor
2171 * and the PDPs are contained within the context itself. We don't
2172 * need to do anything here. */
2173 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
2176 if (!USES_PPGTT(dev_priv))
2179 if (IS_GEN6(dev_priv))
2180 gen6_ppgtt_enable(dev_priv);
2181 else if (IS_GEN7(dev_priv))
2182 gen7_ppgtt_enable(dev_priv);
2183 else if (INTEL_GEN(dev_priv) >= 8)
2184 gen8_ppgtt_enable(dev_priv);
2186 MISSING_CASE(INTEL_GEN(dev_priv));
2191 static struct i915_hw_ppgtt *
2192 __hw_ppgtt_create(struct drm_i915_private *i915)
2194 if (INTEL_GEN(i915) < 8)
2195 return gen6_ppgtt_create(i915);
2197 return gen8_ppgtt_create(i915);
2200 struct i915_hw_ppgtt *
2201 i915_ppgtt_create(struct drm_i915_private *i915,
2202 struct drm_i915_file_private *fpriv,
2205 struct i915_hw_ppgtt *ppgtt;
2207 ppgtt = __hw_ppgtt_create(i915);
2211 kref_init(&ppgtt->ref);
2212 i915_address_space_init(&ppgtt->vm, i915, name);
2213 ppgtt->vm.file = fpriv;
2215 trace_i915_ppgtt_create(&ppgtt->vm);
2220 void i915_ppgtt_close(struct i915_address_space *vm)
2222 GEM_BUG_ON(vm->closed);
2226 static void ppgtt_destroy_vma(struct i915_address_space *vm)
2228 struct list_head *phases[] = {
2236 for (phase = phases; *phase; phase++) {
2237 struct i915_vma *vma, *vn;
2239 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2240 i915_vma_destroy(vma);
2244 void i915_ppgtt_release(struct kref *kref)
2246 struct i915_hw_ppgtt *ppgtt =
2247 container_of(kref, struct i915_hw_ppgtt, ref);
2249 trace_i915_ppgtt_release(&ppgtt->vm);
2251 ppgtt_destroy_vma(&ppgtt->vm);
2253 GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
2254 GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
2255 GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
2257 ppgtt->vm.cleanup(&ppgtt->vm);
2258 i915_address_space_fini(&ppgtt->vm);
2262 /* Certain Gen5 chipsets require require idling the GPU before
2263 * unmapping anything from the GTT when VT-d is enabled.
2265 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2267 /* Query intel_iommu to see if we need the workaround. Presumably that
2270 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
2273 static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
2275 struct intel_engine_cs *engine;
2276 enum intel_engine_id id;
2279 for_each_engine(engine, dev_priv, id) {
2280 fault = I915_READ(RING_FAULT_REG(engine));
2281 if (fault & RING_FAULT_VALID) {
2282 DRM_DEBUG_DRIVER("Unexpected fault\n"
2284 "\tAddress space: %s\n"
2288 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2289 RING_FAULT_SRCID(fault),
2290 RING_FAULT_FAULT_TYPE(fault));
2291 I915_WRITE(RING_FAULT_REG(engine),
2292 fault & ~RING_FAULT_VALID);
2296 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
2299 static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
2301 u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2303 if (fault & RING_FAULT_VALID) {
2304 u32 fault_data0, fault_data1;
2307 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
2308 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
2309 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
2310 ((u64)fault_data0 << 12);
2312 DRM_DEBUG_DRIVER("Unexpected fault\n"
2313 "\tAddr: 0x%08x_%08x\n"
2314 "\tAddress space: %s\n"
2318 upper_32_bits(fault_addr),
2319 lower_32_bits(fault_addr),
2320 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2321 GEN8_RING_FAULT_ENGINE_ID(fault),
2322 RING_FAULT_SRCID(fault),
2323 RING_FAULT_FAULT_TYPE(fault));
2324 I915_WRITE(GEN8_RING_FAULT_REG,
2325 fault & ~RING_FAULT_VALID);
2328 POSTING_READ(GEN8_RING_FAULT_REG);
2331 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2333 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
2334 if (INTEL_GEN(dev_priv) >= 8)
2335 gen8_check_and_clear_faults(dev_priv);
2336 else if (INTEL_GEN(dev_priv) >= 6)
2337 gen6_check_and_clear_faults(dev_priv);
2342 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2344 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2346 /* Don't bother messing with faults pre GEN6 as we have little
2347 * documentation supporting that it's a good idea.
2349 if (INTEL_GEN(dev_priv) < 6)
2352 i915_check_and_clear_faults(dev_priv);
2354 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2356 i915_ggtt_invalidate(dev_priv);
2359 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2360 struct sg_table *pages)
2363 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2364 pages->sgl, pages->nents,
2365 PCI_DMA_BIDIRECTIONAL,
2369 /* If the DMA remap fails, one cause can be that we have
2370 * too many objects pinned in a small remapping table,
2371 * such as swiotlb. Incrementally purge all other objects and
2372 * try again - if there are no more pages to remove from
2373 * the DMA remapper, i915_gem_shrink will return 0.
2375 GEM_BUG_ON(obj->mm.pages == pages);
2376 } while (i915_gem_shrink(to_i915(obj->base.dev),
2377 obj->base.size >> PAGE_SHIFT, NULL,
2379 I915_SHRINK_UNBOUND |
2380 I915_SHRINK_ACTIVE));
2385 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2390 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2393 enum i915_cache_level level,
2396 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2397 gen8_pte_t __iomem *pte =
2398 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2400 gen8_set_pte(pte, gen8_pte_encode(addr, level));
2402 ggtt->invalidate(vm->i915);
2405 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2406 struct i915_vma *vma,
2407 enum i915_cache_level level,
2410 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2411 struct sgt_iter sgt_iter;
2412 gen8_pte_t __iomem *gtt_entries;
2413 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
2416 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2417 gtt_entries += vma->node.start >> PAGE_SHIFT;
2418 for_each_sgt_dma(addr, sgt_iter, vma->pages)
2419 gen8_set_pte(gtt_entries++, pte_encode | addr);
2422 * We want to flush the TLBs only after we're certain all the PTE
2423 * updates have finished.
2425 ggtt->invalidate(vm->i915);
2428 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2431 enum i915_cache_level level,
2434 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2435 gen6_pte_t __iomem *pte =
2436 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2438 iowrite32(vm->pte_encode(addr, level, flags), pte);
2440 ggtt->invalidate(vm->i915);
2444 * Binds an object into the global gtt with the specified cache level. The object
2445 * will be accessible to the GPU via commands whose operands reference offsets
2446 * within the global GTT as well as accessible by the GPU through the GMADR
2447 * mapped BAR (dev_priv->mm.gtt->gtt).
2449 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2450 struct i915_vma *vma,
2451 enum i915_cache_level level,
2454 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2455 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2456 unsigned int i = vma->node.start >> PAGE_SHIFT;
2457 struct sgt_iter iter;
2459 for_each_sgt_dma(addr, iter, vma->pages)
2460 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2463 * We want to flush the TLBs only after we're certain all the PTE
2464 * updates have finished.
2466 ggtt->invalidate(vm->i915);
2469 static void nop_clear_range(struct i915_address_space *vm,
2470 u64 start, u64 length)
2474 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2475 u64 start, u64 length)
2477 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2478 unsigned first_entry = start >> PAGE_SHIFT;
2479 unsigned num_entries = length >> PAGE_SHIFT;
2480 const gen8_pte_t scratch_pte =
2481 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2482 gen8_pte_t __iomem *gtt_base =
2483 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2484 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2487 if (WARN(num_entries > max_entries,
2488 "First entry = %d; Num entries = %d (max=%d)\n",
2489 first_entry, num_entries, max_entries))
2490 num_entries = max_entries;
2492 for (i = 0; i < num_entries; i++)
2493 gen8_set_pte(>t_base[i], scratch_pte);
2496 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2498 struct drm_i915_private *dev_priv = vm->i915;
2501 * Make sure the internal GAM fifo has been cleared of all GTT
2502 * writes before exiting stop_machine(). This guarantees that
2503 * any aperture accesses waiting to start in another process
2504 * cannot back up behind the GTT writes causing a hang.
2505 * The register can be any arbitrary GAM register.
2507 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2510 struct insert_page {
2511 struct i915_address_space *vm;
2514 enum i915_cache_level level;
2517 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2519 struct insert_page *arg = _arg;
2521 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2522 bxt_vtd_ggtt_wa(arg->vm);
2527 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2530 enum i915_cache_level level,
2533 struct insert_page arg = { vm, addr, offset, level };
2535 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2538 struct insert_entries {
2539 struct i915_address_space *vm;
2540 struct i915_vma *vma;
2541 enum i915_cache_level level;
2544 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2546 struct insert_entries *arg = _arg;
2548 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
2549 bxt_vtd_ggtt_wa(arg->vm);
2554 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2555 struct i915_vma *vma,
2556 enum i915_cache_level level,
2559 struct insert_entries arg = { vm, vma, level };
2561 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2564 struct clear_range {
2565 struct i915_address_space *vm;
2570 static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2572 struct clear_range *arg = _arg;
2574 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2575 bxt_vtd_ggtt_wa(arg->vm);
2580 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2584 struct clear_range arg = { vm, start, length };
2586 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2589 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2590 u64 start, u64 length)
2592 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2593 unsigned first_entry = start >> PAGE_SHIFT;
2594 unsigned num_entries = length >> PAGE_SHIFT;
2595 gen6_pte_t scratch_pte, __iomem *gtt_base =
2596 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2597 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2600 if (WARN(num_entries > max_entries,
2601 "First entry = %d; Num entries = %d (max=%d)\n",
2602 first_entry, num_entries, max_entries))
2603 num_entries = max_entries;
2605 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2608 for (i = 0; i < num_entries; i++)
2609 iowrite32(scratch_pte, >t_base[i]);
2612 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2615 enum i915_cache_level cache_level,
2618 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2619 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2621 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2624 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2625 struct i915_vma *vma,
2626 enum i915_cache_level cache_level,
2629 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2630 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2632 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2636 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2637 u64 start, u64 length)
2639 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2642 static int ggtt_bind_vma(struct i915_vma *vma,
2643 enum i915_cache_level cache_level,
2646 struct drm_i915_private *i915 = vma->vm->i915;
2647 struct drm_i915_gem_object *obj = vma->obj;
2650 /* Currently applicable only to VLV */
2653 pte_flags |= PTE_READ_ONLY;
2655 intel_runtime_pm_get(i915);
2656 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2657 intel_runtime_pm_put(i915);
2659 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2662 * Without aliasing PPGTT there's no difference between
2663 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2664 * upgrade to both bound if we bind either to avoid double-binding.
2666 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2671 static void ggtt_unbind_vma(struct i915_vma *vma)
2673 struct drm_i915_private *i915 = vma->vm->i915;
2675 intel_runtime_pm_get(i915);
2676 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2677 intel_runtime_pm_put(i915);
2680 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2681 enum i915_cache_level cache_level,
2684 struct drm_i915_private *i915 = vma->vm->i915;
2688 /* Currently applicable only to VLV */
2690 if (vma->obj->gt_ro)
2691 pte_flags |= PTE_READ_ONLY;
2693 if (flags & I915_VMA_LOCAL_BIND) {
2694 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2696 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2697 ret = appgtt->vm.allocate_va_range(&appgtt->vm,
2704 appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
2708 if (flags & I915_VMA_GLOBAL_BIND) {
2709 intel_runtime_pm_get(i915);
2710 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2711 intel_runtime_pm_put(i915);
2717 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2719 struct drm_i915_private *i915 = vma->vm->i915;
2721 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2722 intel_runtime_pm_get(i915);
2723 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2724 intel_runtime_pm_put(i915);
2727 if (vma->flags & I915_VMA_LOCAL_BIND) {
2728 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
2730 vm->clear_range(vm, vma->node.start, vma->size);
2734 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2735 struct sg_table *pages)
2737 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2738 struct device *kdev = &dev_priv->drm.pdev->dev;
2739 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2741 if (unlikely(ggtt->do_idle_maps)) {
2742 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2743 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2744 /* Wait a bit, in hopes it avoids the hang */
2749 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2752 static int ggtt_set_pages(struct i915_vma *vma)
2756 GEM_BUG_ON(vma->pages);
2758 ret = i915_get_ggtt_vma_pages(vma);
2762 vma->page_sizes = vma->obj->mm.page_sizes;
2767 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2768 unsigned long color,
2772 if (node->allocated && node->color != color)
2773 *start += I915_GTT_PAGE_SIZE;
2775 /* Also leave a space between the unallocated reserved node after the
2776 * GTT and any objects within the GTT, i.e. we use the color adjustment
2777 * to insert a guard page to prevent prefetches crossing over the
2780 node = list_next_entry(node, node_list);
2781 if (node->color != color)
2782 *end -= I915_GTT_PAGE_SIZE;
2785 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2787 struct i915_ggtt *ggtt = &i915->ggtt;
2788 struct i915_hw_ppgtt *ppgtt;
2791 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
2793 return PTR_ERR(ppgtt);
2795 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2801 * Note we only pre-allocate as far as the end of the global
2802 * GTT. On 48b / 4-level page-tables, the difference is very,
2803 * very significant! We have to preallocate as GVT/vgpu does
2804 * not like the page directory disappearing.
2806 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2810 i915->mm.aliasing_ppgtt = ppgtt;
2812 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2813 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2815 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2816 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2821 i915_ppgtt_put(ppgtt);
2825 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2827 struct i915_ggtt *ggtt = &i915->ggtt;
2828 struct i915_hw_ppgtt *ppgtt;
2830 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2834 i915_ppgtt_put(ppgtt);
2836 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
2837 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2840 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2842 /* Let GEM Manage all of the aperture.
2844 * However, leave one page at the end still bound to the scratch page.
2845 * There are a number of places where the hardware apparently prefetches
2846 * past the end of the object, and we've seen multiple hangs with the
2847 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2848 * aperture. One page should be enough to keep any prefetching inside
2851 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2852 unsigned long hole_start, hole_end;
2853 struct drm_mm_node *entry;
2856 ret = intel_vgt_balloon(dev_priv);
2860 /* Reserve a mappable slot for our lockless error capture */
2861 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2862 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2863 0, ggtt->mappable_end,
2868 /* Clear any non-preallocated blocks */
2869 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2870 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2871 hole_start, hole_end);
2872 ggtt->vm.clear_range(&ggtt->vm, hole_start,
2873 hole_end - hole_start);
2876 /* And finally clear the reserved guard page */
2877 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2879 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2880 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2888 drm_mm_remove_node(&ggtt->error_capture);
2893 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2894 * @dev_priv: i915 device
2896 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2898 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2899 struct i915_vma *vma, *vn;
2900 struct pagevec *pvec;
2902 ggtt->vm.closed = true;
2904 mutex_lock(&dev_priv->drm.struct_mutex);
2905 i915_gem_fini_aliasing_ppgtt(dev_priv);
2907 GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
2908 list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
2909 WARN_ON(i915_vma_unbind(vma));
2911 if (drm_mm_node_allocated(&ggtt->error_capture))
2912 drm_mm_remove_node(&ggtt->error_capture);
2914 if (drm_mm_initialized(&ggtt->vm.mm)) {
2915 intel_vgt_deballoon(dev_priv);
2916 i915_address_space_fini(&ggtt->vm);
2919 ggtt->vm.cleanup(&ggtt->vm);
2921 pvec = &dev_priv->mm.wc_stash;
2923 set_pages_array_wb(pvec->pages, pvec->nr);
2924 __pagevec_release(pvec);
2927 mutex_unlock(&dev_priv->drm.struct_mutex);
2929 arch_phys_wc_del(ggtt->mtrr);
2930 io_mapping_fini(&ggtt->iomap);
2932 i915_gem_cleanup_stolen(&dev_priv->drm);
2935 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2937 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2938 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2939 return snb_gmch_ctl << 20;
2942 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2944 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2945 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2947 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2949 #ifdef CONFIG_X86_32
2950 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2951 if (bdw_gmch_ctl > 4)
2955 return bdw_gmch_ctl << 20;
2958 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2960 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2961 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2964 return 1 << (20 + gmch_ctrl);
2969 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2971 struct drm_i915_private *dev_priv = ggtt->vm.i915;
2972 struct pci_dev *pdev = dev_priv->drm.pdev;
2973 phys_addr_t phys_addr;
2976 /* For Modern GENs the PTEs and register space are split in the BAR */
2977 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2980 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2981 * will be dropped. For WC mappings in general we have 64 byte burst
2982 * writes when the WC buffer is flushed, so we can't use it, but have to
2983 * resort to an uncached mapping. The WC issue is easily caught by the
2984 * readback check when writing GTT PTE entries.
2986 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2987 ggtt->gsm = ioremap_nocache(phys_addr, size);
2989 ggtt->gsm = ioremap_wc(phys_addr, size);
2991 DRM_ERROR("Failed to map the ggtt page table\n");
2995 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2997 DRM_ERROR("Scratch setup failed\n");
2998 /* iounmap will also get called at remove, but meh */
3006 static struct intel_ppat_entry *
3007 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
3009 struct intel_ppat_entry *entry = &ppat->entries[index];
3011 GEM_BUG_ON(index >= ppat->max_entries);
3012 GEM_BUG_ON(test_bit(index, ppat->used));
3015 entry->value = value;
3016 kref_init(&entry->ref);
3017 set_bit(index, ppat->used);
3018 set_bit(index, ppat->dirty);
3023 static void __free_ppat_entry(struct intel_ppat_entry *entry)
3025 struct intel_ppat *ppat = entry->ppat;
3026 unsigned int index = entry - ppat->entries;
3028 GEM_BUG_ON(index >= ppat->max_entries);
3029 GEM_BUG_ON(!test_bit(index, ppat->used));
3031 entry->value = ppat->clear_value;
3032 clear_bit(index, ppat->used);
3033 set_bit(index, ppat->dirty);
3037 * intel_ppat_get - get a usable PPAT entry
3038 * @i915: i915 device instance
3039 * @value: the PPAT value required by the caller
3041 * The function tries to search if there is an existing PPAT entry which
3042 * matches with the required value. If perfectly matched, the existing PPAT
3043 * entry will be used. If only partially matched, it will try to check if
3044 * there is any available PPAT index. If yes, it will allocate a new PPAT
3045 * index for the required entry and update the HW. If not, the partially
3046 * matched entry will be used.
3048 const struct intel_ppat_entry *
3049 intel_ppat_get(struct drm_i915_private *i915, u8 value)
3051 struct intel_ppat *ppat = &i915->ppat;
3052 struct intel_ppat_entry *entry = NULL;
3053 unsigned int scanned, best_score;
3056 GEM_BUG_ON(!ppat->max_entries);
3058 scanned = best_score = 0;
3059 for_each_set_bit(i, ppat->used, ppat->max_entries) {
3062 score = ppat->match(ppat->entries[i].value, value);
3063 if (score > best_score) {
3064 entry = &ppat->entries[i];
3065 if (score == INTEL_PPAT_PERFECT_MATCH) {
3066 kref_get(&entry->ref);
3074 if (scanned == ppat->max_entries) {
3076 return ERR_PTR(-ENOSPC);
3078 kref_get(&entry->ref);
3082 i = find_first_zero_bit(ppat->used, ppat->max_entries);
3083 entry = __alloc_ppat_entry(ppat, i, value);
3084 ppat->update_hw(i915);
3088 static void release_ppat(struct kref *kref)
3090 struct intel_ppat_entry *entry =
3091 container_of(kref, struct intel_ppat_entry, ref);
3092 struct drm_i915_private *i915 = entry->ppat->i915;
3094 __free_ppat_entry(entry);
3095 entry->ppat->update_hw(i915);
3099 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3100 * @entry: an intel PPAT entry
3102 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3103 * entry is dynamically allocated, its reference count will be decreased. Once
3104 * the reference count becomes into zero, the PPAT index becomes free again.
3106 void intel_ppat_put(const struct intel_ppat_entry *entry)
3108 struct intel_ppat *ppat = entry->ppat;
3109 unsigned int index = entry - ppat->entries;
3111 GEM_BUG_ON(!ppat->max_entries);
3113 kref_put(&ppat->entries[index].ref, release_ppat);
3116 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3118 struct intel_ppat *ppat = &dev_priv->ppat;
3121 for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3122 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3123 clear_bit(i, ppat->dirty);
3127 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3129 struct intel_ppat *ppat = &dev_priv->ppat;
3133 for (i = 0; i < ppat->max_entries; i++)
3134 pat |= GEN8_PPAT(i, ppat->entries[i].value);
3136 bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3138 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3139 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3142 static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3144 unsigned int score = 0;
3151 /* Cache attribute has to be matched. */
3152 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3157 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3160 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3163 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3164 return INTEL_PPAT_PERFECT_MATCH;
3169 static unsigned int chv_private_pat_match(u8 src, u8 dst)
3171 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3172 INTEL_PPAT_PERFECT_MATCH : 0;
3175 static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3177 ppat->max_entries = 8;
3178 ppat->update_hw = cnl_private_pat_update_hw;
3179 ppat->match = bdw_private_pat_match;
3180 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3182 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3183 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3184 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3185 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3186 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3187 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3188 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3189 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3192 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3193 * bits. When using advanced contexts each context stores its own PAT, but
3194 * writing this data shouldn't be harmful even in those cases. */
3195 static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3197 ppat->max_entries = 8;
3198 ppat->update_hw = bdw_private_pat_update_hw;
3199 ppat->match = bdw_private_pat_match;
3200 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3202 if (!USES_PPGTT(ppat->i915)) {
3203 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3204 * so RTL will always use the value corresponding to
3206 * So let's disable cache for GGTT to avoid screen corruptions.
3207 * MOCS still can be used though.
3208 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3209 * before this patch, i.e. the same uncached + snooping access
3210 * like on gen6/7 seems to be in effect.
3211 * - So this just fixes blitter/render access. Again it looks
3212 * like it's not just uncached access, but uncached + snooping.
3213 * So we can still hold onto all our assumptions wrt cpu
3214 * clflushing on LLC machines.
3216 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3220 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
3221 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
3222 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
3223 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
3224 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3225 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3226 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3227 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3230 static void chv_setup_private_ppat(struct intel_ppat *ppat)
3232 ppat->max_entries = 8;
3233 ppat->update_hw = bdw_private_pat_update_hw;
3234 ppat->match = chv_private_pat_match;
3235 ppat->clear_value = CHV_PPAT_SNOOP;
3238 * Map WB on BDW to snooped on CHV.
3240 * Only the snoop bit has meaning for CHV, the rest is
3243 * The hardware will never snoop for certain types of accesses:
3244 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3245 * - PPGTT page tables
3246 * - some other special cycles
3248 * As with BDW, we also need to consider the following for GT accesses:
3249 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3250 * so RTL will always use the value corresponding to
3252 * Which means we must set the snoop bit in PAT entry 0
3253 * in order to keep the global status page working.
3256 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3257 __alloc_ppat_entry(ppat, 1, 0);
3258 __alloc_ppat_entry(ppat, 2, 0);
3259 __alloc_ppat_entry(ppat, 3, 0);
3260 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3261 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3262 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3263 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3266 static void gen6_gmch_remove(struct i915_address_space *vm)
3268 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3271 cleanup_scratch_page(vm);
3274 static void setup_private_pat(struct drm_i915_private *dev_priv)
3276 struct intel_ppat *ppat = &dev_priv->ppat;
3279 ppat->i915 = dev_priv;
3281 if (INTEL_GEN(dev_priv) >= 10)
3282 cnl_setup_private_ppat(ppat);
3283 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3284 chv_setup_private_ppat(ppat);
3286 bdw_setup_private_ppat(ppat);
3288 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3290 for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3291 ppat->entries[i].value = ppat->clear_value;
3292 ppat->entries[i].ppat = ppat;
3293 set_bit(i, ppat->dirty);
3296 ppat->update_hw(dev_priv);
3299 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3301 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3302 struct pci_dev *pdev = dev_priv->drm.pdev;
3307 /* TODO: We're not aware of mappable constraints on gen8 yet */
3309 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3310 pci_resource_len(pdev, 2));
3311 ggtt->mappable_end = resource_size(&ggtt->gmadr);
3313 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3315 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3317 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3319 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3320 if (IS_CHERRYVIEW(dev_priv))
3321 size = chv_get_total_gtt_size(snb_gmch_ctl);
3323 size = gen8_get_total_gtt_size(snb_gmch_ctl);
3325 ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
3326 ggtt->vm.cleanup = gen6_gmch_remove;
3327 ggtt->vm.insert_page = gen8_ggtt_insert_page;
3328 ggtt->vm.clear_range = nop_clear_range;
3329 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3330 ggtt->vm.clear_range = gen8_ggtt_clear_range;
3332 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3334 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3335 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
3336 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3337 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3338 if (ggtt->vm.clear_range != nop_clear_range)
3339 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3342 ggtt->invalidate = gen6_ggtt_invalidate;
3344 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3345 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3346 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3347 ggtt->vm.vma_ops.clear_pages = clear_pages;
3349 setup_private_pat(dev_priv);
3351 return ggtt_probe_common(ggtt, size);
3354 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3356 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3357 struct pci_dev *pdev = dev_priv->drm.pdev;
3363 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3364 pci_resource_len(pdev, 2));
3365 ggtt->mappable_end = resource_size(&ggtt->gmadr);
3367 /* 64/512MB is the current min/max we actually know of, but this is just
3368 * a coarse sanity check.
3370 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3371 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3375 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3377 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3379 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3380 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3382 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3383 ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3385 ggtt->vm.clear_range = gen6_ggtt_clear_range;
3386 ggtt->vm.insert_page = gen6_ggtt_insert_page;
3387 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3388 ggtt->vm.cleanup = gen6_gmch_remove;
3390 ggtt->invalidate = gen6_ggtt_invalidate;
3392 if (HAS_EDRAM(dev_priv))
3393 ggtt->vm.pte_encode = iris_pte_encode;
3394 else if (IS_HASWELL(dev_priv))
3395 ggtt->vm.pte_encode = hsw_pte_encode;
3396 else if (IS_VALLEYVIEW(dev_priv))
3397 ggtt->vm.pte_encode = byt_pte_encode;
3398 else if (INTEL_GEN(dev_priv) >= 7)
3399 ggtt->vm.pte_encode = ivb_pte_encode;
3401 ggtt->vm.pte_encode = snb_pte_encode;
3403 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3404 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3405 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3406 ggtt->vm.vma_ops.clear_pages = clear_pages;
3408 return ggtt_probe_common(ggtt, size);
3411 static void i915_gmch_remove(struct i915_address_space *vm)
3413 intel_gmch_remove();
3416 static int i915_gmch_probe(struct i915_ggtt *ggtt)
3418 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3419 phys_addr_t gmadr_base;
3422 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3424 DRM_ERROR("failed to set up gmch\n");
3428 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3431 (struct resource) DEFINE_RES_MEM(gmadr_base,
3432 ggtt->mappable_end);
3434 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3435 ggtt->vm.insert_page = i915_ggtt_insert_page;
3436 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3437 ggtt->vm.clear_range = i915_ggtt_clear_range;
3438 ggtt->vm.cleanup = i915_gmch_remove;
3440 ggtt->invalidate = gmch_ggtt_invalidate;
3442 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3443 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3444 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3445 ggtt->vm.vma_ops.clear_pages = clear_pages;
3447 if (unlikely(ggtt->do_idle_maps))
3448 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3454 * i915_ggtt_probe_hw - Probe GGTT hardware location
3455 * @dev_priv: i915 device
3457 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3459 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3462 ggtt->vm.i915 = dev_priv;
3463 ggtt->vm.dma = &dev_priv->drm.pdev->dev;
3465 if (INTEL_GEN(dev_priv) <= 5)
3466 ret = i915_gmch_probe(ggtt);
3467 else if (INTEL_GEN(dev_priv) < 8)
3468 ret = gen6_gmch_probe(ggtt);
3470 ret = gen8_gmch_probe(ggtt);
3474 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3475 * This is easier than doing range restriction on the fly, as we
3476 * currently don't have any bits spare to pass in this upper
3479 if (USES_GUC(dev_priv)) {
3480 ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
3481 ggtt->mappable_end =
3482 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3485 if ((ggtt->vm.total - 1) >> 32) {
3486 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3487 " of address space! Found %lldM!\n",
3488 ggtt->vm.total >> 20);
3489 ggtt->vm.total = 1ULL << 32;
3490 ggtt->mappable_end =
3491 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3494 if (ggtt->mappable_end > ggtt->vm.total) {
3495 DRM_ERROR("mappable aperture extends past end of GGTT,"
3496 " aperture=%pa, total=%llx\n",
3497 &ggtt->mappable_end, ggtt->vm.total);
3498 ggtt->mappable_end = ggtt->vm.total;
3501 /* GMADR is the PCI mmio aperture into the global GTT. */
3502 DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3503 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3504 DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3505 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3506 if (intel_vtd_active())
3507 DRM_INFO("VT-d active for gfx access\n");
3513 * i915_ggtt_init_hw - Initialize GGTT hardware
3514 * @dev_priv: i915 device
3516 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3518 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3521 INIT_LIST_HEAD(&dev_priv->vm_list);
3523 /* Note that we use page colouring to enforce a guard page at the
3524 * end of the address space. This is required as the CS may prefetch
3525 * beyond the end of the batch buffer, across the page boundary,
3526 * and beyond the end of the GTT if we do not provide a guard.
3528 mutex_lock(&dev_priv->drm.struct_mutex);
3529 i915_address_space_init(&ggtt->vm, dev_priv, "[global]");
3530 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
3531 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3532 mutex_unlock(&dev_priv->drm.struct_mutex);
3534 if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3535 dev_priv->ggtt.gmadr.start,
3536 dev_priv->ggtt.mappable_end)) {
3538 goto out_gtt_cleanup;
3541 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3544 * Initialise stolen early so that we may reserve preallocated
3545 * objects for the BIOS to KMS transition.
3547 ret = i915_gem_init_stolen(dev_priv);
3549 goto out_gtt_cleanup;
3554 ggtt->vm.cleanup(&ggtt->vm);
3558 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3560 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3566 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3568 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3570 i915->ggtt.invalidate = guc_ggtt_invalidate;
3572 i915_ggtt_invalidate(i915);
3575 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3577 /* We should only be called after i915_ggtt_enable_guc() */
3578 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3580 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3582 i915_ggtt_invalidate(i915);
3585 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3587 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3588 struct i915_vma *vma, *vn;
3590 i915_check_and_clear_faults(dev_priv);
3592 /* First fill our portion of the GTT with scratch pages */
3593 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3595 ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3597 /* clflush objects bound into the GGTT and rebind them. */
3598 GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
3599 list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
3600 struct drm_i915_gem_object *obj = vma->obj;
3602 if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3605 if (!i915_vma_unbind(vma))
3608 WARN_ON(i915_vma_bind(vma,
3609 obj ? obj->cache_level : 0,
3612 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3615 ggtt->vm.closed = false;
3616 i915_ggtt_invalidate(dev_priv);
3618 if (INTEL_GEN(dev_priv) >= 8) {
3619 struct intel_ppat *ppat = &dev_priv->ppat;
3621 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3622 dev_priv->ppat.update_hw(dev_priv);
3627 static struct scatterlist *
3628 rotate_pages(const dma_addr_t *in, unsigned int offset,
3629 unsigned int width, unsigned int height,
3630 unsigned int stride,
3631 struct sg_table *st, struct scatterlist *sg)
3633 unsigned int column, row;
3634 unsigned int src_idx;
3636 for (column = 0; column < width; column++) {
3637 src_idx = stride * (height - 1) + column;
3638 for (row = 0; row < height; row++) {
3640 /* We don't need the pages, but need to initialize
3641 * the entries so the sg list can be happily traversed.
3642 * The only thing we need are DMA addresses.
3644 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3645 sg_dma_address(sg) = in[offset + src_idx];
3646 sg_dma_len(sg) = PAGE_SIZE;
3655 static noinline struct sg_table *
3656 intel_rotate_pages(struct intel_rotation_info *rot_info,
3657 struct drm_i915_gem_object *obj)
3659 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
3660 unsigned int size = intel_rotation_info_size(rot_info);
3661 struct sgt_iter sgt_iter;
3662 dma_addr_t dma_addr;
3664 dma_addr_t *page_addr_list;
3665 struct sg_table *st;
3666 struct scatterlist *sg;
3669 /* Allocate a temporary list of source pages for random access. */
3670 page_addr_list = kvmalloc_array(n_pages,
3673 if (!page_addr_list)
3674 return ERR_PTR(ret);
3676 /* Allocate target SG list. */
3677 st = kmalloc(sizeof(*st), GFP_KERNEL);
3681 ret = sg_alloc_table(st, size, GFP_KERNEL);
3685 /* Populate source page list from the object. */
3687 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3688 page_addr_list[i++] = dma_addr;
3690 GEM_BUG_ON(i != n_pages);
3694 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3695 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3696 rot_info->plane[i].width, rot_info->plane[i].height,
3697 rot_info->plane[i].stride, st, sg);
3700 kvfree(page_addr_list);
3707 kvfree(page_addr_list);
3709 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3710 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3712 return ERR_PTR(ret);
3715 static noinline struct sg_table *
3716 intel_partial_pages(const struct i915_ggtt_view *view,
3717 struct drm_i915_gem_object *obj)
3719 struct sg_table *st;
3720 struct scatterlist *sg, *iter;
3721 unsigned int count = view->partial.size;
3722 unsigned int offset;
3725 st = kmalloc(sizeof(*st), GFP_KERNEL);
3729 ret = sg_alloc_table(st, count, GFP_KERNEL);
3733 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3741 len = min(iter->length - (offset << PAGE_SHIFT),
3742 count << PAGE_SHIFT);
3743 sg_set_page(sg, NULL, len, 0);
3744 sg_dma_address(sg) =
3745 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3746 sg_dma_len(sg) = len;
3749 count -= len >> PAGE_SHIFT;
3756 iter = __sg_next(iter);
3763 return ERR_PTR(ret);
3767 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3771 /* The vma->pages are only valid within the lifespan of the borrowed
3772 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3773 * must be the vma->pages. A simple rule is that vma->pages must only
3774 * be accessed when the obj->mm.pages are pinned.
3776 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3778 switch (vma->ggtt_view.type) {
3780 GEM_BUG_ON(vma->ggtt_view.type);
3782 case I915_GGTT_VIEW_NORMAL:
3783 vma->pages = vma->obj->mm.pages;
3786 case I915_GGTT_VIEW_ROTATED:
3788 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3791 case I915_GGTT_VIEW_PARTIAL:
3792 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3797 if (unlikely(IS_ERR(vma->pages))) {
3798 ret = PTR_ERR(vma->pages);
3800 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3801 vma->ggtt_view.type, ret);
3807 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3808 * @vm: the &struct i915_address_space
3809 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3810 * @size: how much space to allocate inside the GTT,
3811 * must be #I915_GTT_PAGE_SIZE aligned
3812 * @offset: where to insert inside the GTT,
3813 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3814 * (@offset + @size) must fit within the address space
3815 * @color: color to apply to node, if this node is not from a VMA,
3816 * color must be #I915_COLOR_UNEVICTABLE
3817 * @flags: control search and eviction behaviour
3819 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3820 * the address space (using @size and @color). If the @node does not fit, it
3821 * tries to evict any overlapping nodes from the GTT, including any
3822 * neighbouring nodes if the colors do not match (to ensure guard pages between
3823 * differing domains). See i915_gem_evict_for_node() for the gory details
3824 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3825 * evicting active overlapping objects, and any overlapping node that is pinned
3826 * or marked as unevictable will also result in failure.
3828 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3829 * asked to wait for eviction and interrupted.
3831 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3832 struct drm_mm_node *node,
3833 u64 size, u64 offset, unsigned long color,
3839 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3840 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3841 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3842 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3843 GEM_BUG_ON(drm_mm_node_allocated(node));
3846 node->start = offset;
3847 node->color = color;
3849 err = drm_mm_reserve_node(&vm->mm, node);
3853 if (flags & PIN_NOEVICT)
3856 err = i915_gem_evict_for_node(vm, node, flags);
3858 err = drm_mm_reserve_node(&vm->mm, node);
3863 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3867 GEM_BUG_ON(range_overflows(start, len, end));
3868 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3870 range = round_down(end - len, align) - round_up(start, align);
3872 if (sizeof(unsigned long) == sizeof(u64)) {
3873 addr = get_random_long();
3875 addr = get_random_int();
3876 if (range > U32_MAX) {
3878 addr |= get_random_int();
3881 div64_u64_rem(addr, range, &addr);
3885 return round_up(start, align);
3889 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3890 * @vm: the &struct i915_address_space
3891 * @node: the &struct drm_mm_node (typically i915_vma.node)
3892 * @size: how much space to allocate inside the GTT,
3893 * must be #I915_GTT_PAGE_SIZE aligned
3894 * @alignment: required alignment of starting offset, may be 0 but
3895 * if specified, this must be a power-of-two and at least
3896 * #I915_GTT_MIN_ALIGNMENT
3897 * @color: color to apply to node
3898 * @start: start of any range restriction inside GTT (0 for all),
3899 * must be #I915_GTT_PAGE_SIZE aligned
3900 * @end: end of any range restriction inside GTT (U64_MAX for all),
3901 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3902 * @flags: control search and eviction behaviour
3904 * i915_gem_gtt_insert() first searches for an available hole into which
3905 * is can insert the node. The hole address is aligned to @alignment and
3906 * its @size must then fit entirely within the [@start, @end] bounds. The
3907 * nodes on either side of the hole must match @color, or else a guard page
3908 * will be inserted between the two nodes (or the node evicted). If no
3909 * suitable hole is found, first a victim is randomly selected and tested
3910 * for eviction, otherwise then the LRU list of objects within the GTT
3911 * is scanned to find the first set of replacement nodes to create the hole.
3912 * Those old overlapping nodes are evicted from the GTT (and so must be
3913 * rebound before any future use). Any node that is currently pinned cannot
3914 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3915 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3916 * searching for an eviction candidate. See i915_gem_evict_something() for
3917 * the gory details on the eviction algorithm.
3919 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3920 * asked to wait for eviction and interrupted.
3922 int i915_gem_gtt_insert(struct i915_address_space *vm,
3923 struct drm_mm_node *node,
3924 u64 size, u64 alignment, unsigned long color,
3925 u64 start, u64 end, unsigned int flags)
3927 enum drm_mm_insert_mode mode;
3931 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3933 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3934 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3935 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3936 GEM_BUG_ON(start >= end);
3937 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3938 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3939 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3940 GEM_BUG_ON(drm_mm_node_allocated(node));
3942 if (unlikely(range_overflows(start, size, end)))
3945 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3948 mode = DRM_MM_INSERT_BEST;
3949 if (flags & PIN_HIGH)
3950 mode = DRM_MM_INSERT_HIGHEST;
3951 if (flags & PIN_MAPPABLE)
3952 mode = DRM_MM_INSERT_LOW;
3954 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3955 * so we know that we always have a minimum alignment of 4096.
3956 * The drm_mm range manager is optimised to return results
3957 * with zero alignment, so where possible use the optimal
3960 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3961 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3964 err = drm_mm_insert_node_in_range(&vm->mm, node,
3965 size, alignment, color,
3970 if (mode & DRM_MM_INSERT_ONCE) {
3971 err = drm_mm_insert_node_in_range(&vm->mm, node,
3972 size, alignment, color,
3974 DRM_MM_INSERT_BEST);
3979 if (flags & PIN_NOEVICT)
3982 /* No free space, pick a slot at random.
3984 * There is a pathological case here using a GTT shared between
3985 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3987 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3988 * (64k objects) (448k objects)
3990 * Now imagine that the eviction LRU is ordered top-down (just because
3991 * pathology meets real life), and that we need to evict an object to
3992 * make room inside the aperture. The eviction scan then has to walk
3993 * the 448k list before it finds one within range. And now imagine that
3994 * it has to search for a new hole between every byte inside the memcpy,
3995 * for several simultaneous clients.
3997 * On a full-ppgtt system, if we have run out of available space, there
3998 * will be lots and lots of objects in the eviction list! Again,
3999 * searching that LRU list may be slow if we are also applying any
4000 * range restrictions (e.g. restriction to low 4GiB) and so, for
4001 * simplicity and similarilty between different GTT, try the single
4002 * random replacement first.
4004 offset = random_offset(start, end,
4005 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4006 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4010 /* Randomly selected placement is pinned, do a search */
4011 err = i915_gem_evict_something(vm, size, alignment, color,
4016 return drm_mm_insert_node_in_range(&vm->mm, node,
4017 size, alignment, color,
4018 start, end, DRM_MM_INSERT_EVICT);
4021 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4022 #include "selftests/mock_gtt.c"
4023 #include "selftests/i915_gem_gtt.c"