1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/slab.h> /* fault-inject.h is not standalone! */
8 #include <linux/fault-inject.h>
9 #include <linux/sched/mm.h>
11 #include <drm/drm_cache.h>
13 #include "gem/i915_gem_internal.h"
14 #include "gem/i915_gem_lmem.h"
16 #include "i915_trace.h"
17 #include "i915_utils.h"
19 #include "intel_gt_regs.h"
20 #include "intel_gtt.h"
23 static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
25 return IS_BROXTON(i915) && i915_vtd_active(i915);
28 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
30 return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
33 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
35 struct drm_i915_gem_object *obj;
38 * To avoid severe over-allocation when dealing with min_page_size
39 * restrictions, we override that behaviour here by allowing an object
40 * size and page layout which can be smaller. In practice this should be
41 * totally fine, since GTT paging structures are not typically inserted
44 * Note that we also hit this path for the scratch page, and for this
45 * case it might need to be 64K, but that should work fine here since we
46 * used the passed in size for the page size, which should ensure it
47 * also has the same alignment.
49 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
50 vm->lmem_pt_obj_flags);
52 * Ensure all paging structures for this vm share the same dma-resv
53 * object underneath, with the idea that one object_lock() will lock
57 obj->base.resv = i915_vm_resv_get(vm);
58 obj->shares_resv_from = vm;
64 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
66 struct drm_i915_gem_object *obj;
68 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
69 i915_gem_shrink_all(vm->i915);
71 obj = i915_gem_object_create_internal(vm->i915, sz);
73 * Ensure all paging structures for this vm share the same dma-resv
74 * object underneath, with the idea that one object_lock() will lock
78 obj->base.resv = i915_vm_resv_get(vm);
79 obj->shares_resv_from = vm;
85 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
87 enum i915_map_type type;
90 type = i915_coherent_map_type(vm->i915, obj, true);
91 vaddr = i915_gem_object_pin_map_unlocked(obj, type);
93 return PTR_ERR(vaddr);
95 i915_gem_object_make_unshrinkable(obj);
99 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
101 enum i915_map_type type;
104 type = i915_coherent_map_type(vm->i915, obj, true);
105 vaddr = i915_gem_object_pin_map(obj, type);
107 return PTR_ERR(vaddr);
109 i915_gem_object_make_unshrinkable(obj);
113 static void clear_vm_list(struct list_head *list)
115 struct i915_vma *vma, *vn;
117 list_for_each_entry_safe(vma, vn, list, vm_link) {
118 struct drm_i915_gem_object *obj = vma->obj;
120 if (!i915_gem_object_get_rcu(obj)) {
122 * Object is dying, but has not yet cleared its
124 * Unbind the dying vma to ensure our list
125 * is completely drained. We leave the destruction to
126 * the object destructor to avoid the vma
127 * disappearing under it.
129 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
130 WARN_ON(__i915_vma_unbind(vma));
132 /* Remove from the unbound list */
133 list_del_init(&vma->vm_link);
136 * Delay the vm and vm mutex freeing until the
137 * object is done with destruction.
139 i915_vm_resv_get(vma->vm);
140 vma->vm_ddestroy = true;
142 i915_vma_destroy_locked(vma);
143 i915_gem_object_put(obj);
149 static void __i915_vm_close(struct i915_address_space *vm)
151 mutex_lock(&vm->mutex);
153 clear_vm_list(&vm->bound_list);
154 clear_vm_list(&vm->unbound_list);
156 /* Check for must-fix unanticipated side-effects */
157 GEM_BUG_ON(!list_empty(&vm->bound_list));
158 GEM_BUG_ON(!list_empty(&vm->unbound_list));
160 mutex_unlock(&vm->mutex);
163 /* lock the vm into the current ww, if we lock one, we lock all */
164 int i915_vm_lock_objects(struct i915_address_space *vm,
165 struct i915_gem_ww_ctx *ww)
167 if (vm->scratch[0]->base.resv == &vm->_resv) {
168 return i915_gem_object_lock(vm->scratch[0], ww);
170 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
172 /* We borrowed the scratch page from ggtt, take the top level object */
173 return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
177 void i915_address_space_fini(struct i915_address_space *vm)
179 drm_mm_takedown(&vm->mm);
183 * i915_vm_resv_release - Final struct i915_address_space destructor
184 * @kref: Pointer to the &i915_address_space.resv_ref member.
186 * This function is called when the last lock sharer no longer shares the
187 * &i915_address_space._resv lock, and also if we raced when
188 * destroying a vma by the vma destruction
190 void i915_vm_resv_release(struct kref *kref)
192 struct i915_address_space *vm =
193 container_of(kref, typeof(*vm), resv_ref);
195 dma_resv_fini(&vm->_resv);
196 mutex_destroy(&vm->mutex);
201 static void __i915_vm_release(struct work_struct *work)
203 struct i915_address_space *vm =
204 container_of(work, struct i915_address_space, release_work);
208 /* Synchronize async unbinds. */
209 i915_vma_resource_bind_dep_sync_all(vm);
212 i915_address_space_fini(vm);
214 i915_vm_resv_put(vm);
217 void i915_vm_release(struct kref *kref)
219 struct i915_address_space *vm =
220 container_of(kref, struct i915_address_space, ref);
222 GEM_BUG_ON(i915_is_ggtt(vm));
223 trace_i915_ppgtt_release(vm);
225 queue_work(vm->i915->wq, &vm->release_work);
228 void i915_address_space_init(struct i915_address_space *vm, int subclass)
233 * Special case for GGTT that has already done an early
236 if (!kref_read(&vm->resv_ref))
237 kref_init(&vm->resv_ref);
239 vm->pending_unbind = RB_ROOT_CACHED;
240 INIT_WORK(&vm->release_work, __i915_vm_release);
243 * The vm->mutex must be reclaim safe (for use in the shrinker).
244 * Do a dummy acquire now under fs_reclaim so that any allocation
245 * attempt holding the lock is immediately reported by lockdep.
247 mutex_init(&vm->mutex);
248 lockdep_set_subclass(&vm->mutex, subclass);
250 if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
251 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
254 * CHV + BXT VTD workaround use stop_machine(),
255 * which is allowed to allocate memory. This means &vm->mutex
256 * is the outer lock, and in theory we can allocate memory inside
257 * it through stop_machine().
259 * Add the annotation for this, we use trylock in shrinker.
261 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
262 might_alloc(GFP_KERNEL);
263 mutex_release(&vm->mutex.dep_map, _THIS_IP_);
265 dma_resv_init(&vm->_resv);
267 GEM_BUG_ON(!vm->total);
268 drm_mm_init(&vm->mm, 0, vm->total);
270 memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
271 ARRAY_SIZE(vm->min_alignment));
273 if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) &&
274 subclass == VM_CLASS_PPGTT) {
275 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M;
276 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M;
277 } else if (HAS_64K_PAGES(vm->i915)) {
278 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
279 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
282 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
284 INIT_LIST_HEAD(&vm->bound_list);
285 INIT_LIST_HEAD(&vm->unbound_list);
288 void *__px_vaddr(struct drm_i915_gem_object *p)
290 enum i915_map_type type;
292 GEM_BUG_ON(!i915_gem_object_has_pages(p));
293 return page_unpack_bits(p->mm.mapping, &type);
296 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
298 GEM_BUG_ON(!i915_gem_object_has_pages(p));
299 return sg_dma_address(p->mm.pages->sgl);
302 struct page *__px_page(struct drm_i915_gem_object *p)
304 GEM_BUG_ON(!i915_gem_object_has_pages(p));
305 return sg_page(p->mm.pages->sgl);
309 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
311 void *vaddr = __px_vaddr(p);
313 memset64(vaddr, val, count);
314 drm_clflush_virt_range(vaddr, PAGE_SIZE);
317 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
319 void *vaddr = __px_vaddr(scratch);
323 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
326 memset(vaddr, val, scratch->base.size);
327 drm_clflush_virt_range(vaddr, scratch->base.size);
330 int setup_scratch_page(struct i915_address_space *vm)
335 * In order to utilize 64K pages for an object with a size < 2M, we will
336 * need to support a 64K scratch page, given that every 16th entry for a
337 * page-table operating in 64K mode must point to a properly aligned 64K
338 * region, including any PTEs which happen to point to scratch.
340 * This is only relevant for the 48b PPGTT where we support
341 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
342 * scratch (read-only) between all vm, we create one 64k scratch page
345 size = I915_GTT_PAGE_SIZE_4K;
346 if (i915_vm_is_4lvl(vm) &&
347 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
348 size = I915_GTT_PAGE_SIZE_64K;
351 struct drm_i915_gem_object *obj;
353 obj = vm->alloc_scratch_dma(vm, size);
357 if (map_pt_dma(vm, obj))
360 /* We need a single contiguous page for our scratch */
361 if (obj->mm.page_sizes.sg < size)
364 /* And it needs to be correspondingly aligned */
365 if (__px_dma(obj) & (size - 1))
369 * Use a non-zero scratch page for debugging.
371 * We want a value that should be reasonably obvious
372 * to spot in the error state, while also causing a GPU hang
373 * if executed. We prefer using a clear page in production, so
374 * should it ever be accidentally used, the effect should be
377 poison_scratch_page(obj);
379 vm->scratch[0] = obj;
380 vm->scratch_order = get_order(size);
384 i915_gem_object_put(obj);
386 if (size == I915_GTT_PAGE_SIZE_4K)
390 * If we need 64K minimum GTT pages for device local-memory,
391 * like on XEHPSDV, then we need to fail the allocation here,
392 * otherwise we can't safely support the insertion of
393 * local-memory pages for this vm, since the HW expects the
394 * correct physical alignment and size when the page-table is
395 * operating in 64K GTT mode, which includes any scratch PTEs,
396 * since userspace can still touch them.
398 if (HAS_64K_PAGES(vm->i915))
401 size = I915_GTT_PAGE_SIZE_4K;
405 void free_scratch(struct i915_address_space *vm)
409 for (i = 0; i <= vm->top; i++)
410 i915_gem_object_put(vm->scratch[i]);
413 void gtt_write_workarounds(struct intel_gt *gt)
415 struct drm_i915_private *i915 = gt->i915;
416 struct intel_uncore *uncore = gt->uncore;
419 * This function is for gtt related workarounds. This function is
420 * called on driver load and after a GPU reset, so you can place
421 * workarounds here even if they get overwritten by GPU reset.
423 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
424 if (IS_BROADWELL(i915))
425 intel_uncore_write(uncore,
427 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
428 else if (IS_CHERRYVIEW(i915))
429 intel_uncore_write(uncore,
431 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
432 else if (IS_GEN9_LP(i915))
433 intel_uncore_write(uncore,
435 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
436 else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
437 intel_uncore_write(uncore,
439 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
442 * To support 64K PTEs we need to first enable the use of the
443 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
444 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
445 * shouldn't be needed after GEN10.
447 * 64K pages were first introduced from BDW+, although technically they
448 * only *work* from gen9+. For pre-BDW we instead have the option for
449 * 32K pages, but we don't currently have any support for it in our
452 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
453 GRAPHICS_VER(i915) <= 10)
454 intel_uncore_rmw(uncore,
455 GEN8_GAMW_ECO_DEV_RW_IA,
457 GAMW_ECO_ENABLE_64K_IPS_FIELD);
459 if (IS_GRAPHICS_VER(i915, 8, 11)) {
460 bool can_use_gtt_cache = true;
463 * According to the BSpec if we use 2M/1G pages then we also
464 * need to disable the GTT cache. At least on BDW we can see
465 * visual corruption when using 2M pages, and not disabling the
468 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
469 can_use_gtt_cache = false;
471 /* WaGttCachingOffByDefault */
472 intel_uncore_write(uncore,
474 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
475 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
476 intel_uncore_read(uncore,
477 HSW_GTT_CACHE_EN) == 0);
481 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
483 /* TGL doesn't support LLC or AGE settings */
484 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
485 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
486 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
487 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
488 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
489 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
490 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
491 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
494 static void icl_setup_private_ppat(struct intel_uncore *uncore)
496 intel_uncore_write(uncore,
498 GEN8_PPAT_WB | GEN8_PPAT_LLC);
499 intel_uncore_write(uncore,
501 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
502 intel_uncore_write(uncore,
504 GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
505 intel_uncore_write(uncore,
508 intel_uncore_write(uncore,
510 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
511 intel_uncore_write(uncore,
513 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
514 intel_uncore_write(uncore,
516 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
517 intel_uncore_write(uncore,
519 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
523 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
524 * bits. When using advanced contexts each context stores its own PAT, but
525 * writing this data shouldn't be harmful even in those cases.
527 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
529 struct drm_i915_private *i915 = uncore->i915;
532 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
533 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
534 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
535 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
536 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
537 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
538 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
540 /* for scanout with eLLC */
541 if (GRAPHICS_VER(i915) >= 9)
542 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
544 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
546 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
547 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
550 static void chv_setup_private_ppat(struct intel_uncore *uncore)
555 * Map WB on BDW to snooped on CHV.
557 * Only the snoop bit has meaning for CHV, the rest is
560 * The hardware will never snoop for certain types of accesses:
561 * - CPU GTT (GMADR->GGTT->no snoop->memory)
562 * - PPGTT page tables
563 * - some other special cycles
565 * As with BDW, we also need to consider the following for GT accesses:
566 * "For GGTT, there is NO pat_sel[2:0] from the entry,
567 * so RTL will always use the value corresponding to
569 * Which means we must set the snoop bit in PAT entry 0
570 * in order to keep the global status page working.
573 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
577 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
578 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
579 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
580 GEN8_PPAT(7, CHV_PPAT_SNOOP);
582 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
583 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
586 void setup_private_pat(struct intel_uncore *uncore)
588 struct drm_i915_private *i915 = uncore->i915;
590 GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
592 if (GRAPHICS_VER(i915) >= 12)
593 tgl_setup_private_ppat(uncore);
594 else if (GRAPHICS_VER(i915) >= 11)
595 icl_setup_private_ppat(uncore);
596 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
597 chv_setup_private_ppat(uncore);
599 bdw_setup_private_ppat(uncore);
603 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
605 struct drm_i915_gem_object *obj;
606 struct i915_vma *vma;
608 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
610 return ERR_CAST(obj);
612 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
614 vma = i915_vma_instance(obj, vm, NULL);
616 i915_gem_object_put(obj);
624 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
626 struct i915_vma *vma;
629 vma = __vm_create_scratch_for_read(vm, size);
633 err = i915_vma_pin(vma, 0, 0,
634 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
643 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
644 #include "selftests/mock_gtt.c"