drm/i915: stop including i915_irq.h from i915_trace.h
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / intel_gtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <linux/slab.h> /* fault-inject.h is not standalone! */
7
8 #include <linux/fault-inject.h>
9 #include <linux/sched/mm.h>
10
11 #include <drm/drm_cache.h>
12
13 #include "gem/i915_gem_internal.h"
14 #include "gem/i915_gem_lmem.h"
15 #include "i915_reg.h"
16 #include "i915_trace.h"
17 #include "i915_utils.h"
18 #include "intel_gt.h"
19 #include "intel_gt_regs.h"
20 #include "intel_gtt.h"
21
22
23 static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
24 {
25         return IS_BROXTON(i915) && i915_vtd_active(i915);
26 }
27
28 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
29 {
30         return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
31 }
32
33 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
34 {
35         struct drm_i915_gem_object *obj;
36
37         /*
38          * To avoid severe over-allocation when dealing with min_page_size
39          * restrictions, we override that behaviour here by allowing an object
40          * size and page layout which can be smaller. In practice this should be
41          * totally fine, since GTT paging structures are not typically inserted
42          * into the GTT.
43          *
44          * Note that we also hit this path for the scratch page, and for this
45          * case it might need to be 64K, but that should work fine here since we
46          * used the passed in size for the page size, which should ensure it
47          * also has the same alignment.
48          */
49         obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
50                                                     vm->lmem_pt_obj_flags);
51         /*
52          * Ensure all paging structures for this vm share the same dma-resv
53          * object underneath, with the idea that one object_lock() will lock
54          * them all at once.
55          */
56         if (!IS_ERR(obj)) {
57                 obj->base.resv = i915_vm_resv_get(vm);
58                 obj->shares_resv_from = vm;
59         }
60
61         return obj;
62 }
63
64 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
65 {
66         struct drm_i915_gem_object *obj;
67
68         if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
69                 i915_gem_shrink_all(vm->i915);
70
71         obj = i915_gem_object_create_internal(vm->i915, sz);
72         /*
73          * Ensure all paging structures for this vm share the same dma-resv
74          * object underneath, with the idea that one object_lock() will lock
75          * them all at once.
76          */
77         if (!IS_ERR(obj)) {
78                 obj->base.resv = i915_vm_resv_get(vm);
79                 obj->shares_resv_from = vm;
80         }
81
82         return obj;
83 }
84
85 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
86 {
87         enum i915_map_type type;
88         void *vaddr;
89
90         type = i915_coherent_map_type(vm->i915, obj, true);
91         vaddr = i915_gem_object_pin_map_unlocked(obj, type);
92         if (IS_ERR(vaddr))
93                 return PTR_ERR(vaddr);
94
95         i915_gem_object_make_unshrinkable(obj);
96         return 0;
97 }
98
99 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
100 {
101         enum i915_map_type type;
102         void *vaddr;
103
104         type = i915_coherent_map_type(vm->i915, obj, true);
105         vaddr = i915_gem_object_pin_map(obj, type);
106         if (IS_ERR(vaddr))
107                 return PTR_ERR(vaddr);
108
109         i915_gem_object_make_unshrinkable(obj);
110         return 0;
111 }
112
113 static void clear_vm_list(struct list_head *list)
114 {
115         struct i915_vma *vma, *vn;
116
117         list_for_each_entry_safe(vma, vn, list, vm_link) {
118                 struct drm_i915_gem_object *obj = vma->obj;
119
120                 if (!i915_gem_object_get_rcu(obj)) {
121                         /*
122                          * Object is dying, but has not yet cleared its
123                          * vma list.
124                          * Unbind the dying vma to ensure our list
125                          * is completely drained. We leave the destruction to
126                          * the object destructor to avoid the vma
127                          * disappearing under it.
128                          */
129                         atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
130                         WARN_ON(__i915_vma_unbind(vma));
131
132                         /* Remove from the unbound list */
133                         list_del_init(&vma->vm_link);
134
135                         /*
136                          * Delay the vm and vm mutex freeing until the
137                          * object is done with destruction.
138                          */
139                         i915_vm_resv_get(vma->vm);
140                         vma->vm_ddestroy = true;
141                 } else {
142                         i915_vma_destroy_locked(vma);
143                         i915_gem_object_put(obj);
144                 }
145
146         }
147 }
148
149 static void __i915_vm_close(struct i915_address_space *vm)
150 {
151         mutex_lock(&vm->mutex);
152
153         clear_vm_list(&vm->bound_list);
154         clear_vm_list(&vm->unbound_list);
155
156         /* Check for must-fix unanticipated side-effects */
157         GEM_BUG_ON(!list_empty(&vm->bound_list));
158         GEM_BUG_ON(!list_empty(&vm->unbound_list));
159
160         mutex_unlock(&vm->mutex);
161 }
162
163 /* lock the vm into the current ww, if we lock one, we lock all */
164 int i915_vm_lock_objects(struct i915_address_space *vm,
165                          struct i915_gem_ww_ctx *ww)
166 {
167         if (vm->scratch[0]->base.resv == &vm->_resv) {
168                 return i915_gem_object_lock(vm->scratch[0], ww);
169         } else {
170                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
171
172                 /* We borrowed the scratch page from ggtt, take the top level object */
173                 return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
174         }
175 }
176
177 void i915_address_space_fini(struct i915_address_space *vm)
178 {
179         drm_mm_takedown(&vm->mm);
180 }
181
182 /**
183  * i915_vm_resv_release - Final struct i915_address_space destructor
184  * @kref: Pointer to the &i915_address_space.resv_ref member.
185  *
186  * This function is called when the last lock sharer no longer shares the
187  * &i915_address_space._resv lock, and also if we raced when
188  * destroying a vma by the vma destruction
189  */
190 void i915_vm_resv_release(struct kref *kref)
191 {
192         struct i915_address_space *vm =
193                 container_of(kref, typeof(*vm), resv_ref);
194
195         dma_resv_fini(&vm->_resv);
196         mutex_destroy(&vm->mutex);
197
198         kfree(vm);
199 }
200
201 static void __i915_vm_release(struct work_struct *work)
202 {
203         struct i915_address_space *vm =
204                 container_of(work, struct i915_address_space, release_work);
205
206         __i915_vm_close(vm);
207
208         /* Synchronize async unbinds. */
209         i915_vma_resource_bind_dep_sync_all(vm);
210
211         vm->cleanup(vm);
212         i915_address_space_fini(vm);
213
214         i915_vm_resv_put(vm);
215 }
216
217 void i915_vm_release(struct kref *kref)
218 {
219         struct i915_address_space *vm =
220                 container_of(kref, struct i915_address_space, ref);
221
222         GEM_BUG_ON(i915_is_ggtt(vm));
223         trace_i915_ppgtt_release(vm);
224
225         queue_work(vm->i915->wq, &vm->release_work);
226 }
227
228 void i915_address_space_init(struct i915_address_space *vm, int subclass)
229 {
230         kref_init(&vm->ref);
231
232         /*
233          * Special case for GGTT that has already done an early
234          * kref_init here.
235          */
236         if (!kref_read(&vm->resv_ref))
237                 kref_init(&vm->resv_ref);
238
239         vm->pending_unbind = RB_ROOT_CACHED;
240         INIT_WORK(&vm->release_work, __i915_vm_release);
241
242         /*
243          * The vm->mutex must be reclaim safe (for use in the shrinker).
244          * Do a dummy acquire now under fs_reclaim so that any allocation
245          * attempt holding the lock is immediately reported by lockdep.
246          */
247         mutex_init(&vm->mutex);
248         lockdep_set_subclass(&vm->mutex, subclass);
249
250         if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
251                 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
252         } else {
253                 /*
254                  * CHV + BXT VTD workaround use stop_machine(),
255                  * which is allowed to allocate memory. This means &vm->mutex
256                  * is the outer lock, and in theory we can allocate memory inside
257                  * it through stop_machine().
258                  *
259                  * Add the annotation for this, we use trylock in shrinker.
260                  */
261                 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
262                 might_alloc(GFP_KERNEL);
263                 mutex_release(&vm->mutex.dep_map, _THIS_IP_);
264         }
265         dma_resv_init(&vm->_resv);
266
267         GEM_BUG_ON(!vm->total);
268         drm_mm_init(&vm->mm, 0, vm->total);
269
270         memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
271                  ARRAY_SIZE(vm->min_alignment));
272
273         if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) &&
274             subclass == VM_CLASS_PPGTT) {
275                 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M;
276                 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M;
277         } else if (HAS_64K_PAGES(vm->i915)) {
278                 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
279                 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
280         }
281
282         vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
283
284         INIT_LIST_HEAD(&vm->bound_list);
285         INIT_LIST_HEAD(&vm->unbound_list);
286 }
287
288 void *__px_vaddr(struct drm_i915_gem_object *p)
289 {
290         enum i915_map_type type;
291
292         GEM_BUG_ON(!i915_gem_object_has_pages(p));
293         return page_unpack_bits(p->mm.mapping, &type);
294 }
295
296 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
297 {
298         GEM_BUG_ON(!i915_gem_object_has_pages(p));
299         return sg_dma_address(p->mm.pages->sgl);
300 }
301
302 struct page *__px_page(struct drm_i915_gem_object *p)
303 {
304         GEM_BUG_ON(!i915_gem_object_has_pages(p));
305         return sg_page(p->mm.pages->sgl);
306 }
307
308 void
309 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
310 {
311         void *vaddr = __px_vaddr(p);
312
313         memset64(vaddr, val, count);
314         drm_clflush_virt_range(vaddr, PAGE_SIZE);
315 }
316
317 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
318 {
319         void *vaddr = __px_vaddr(scratch);
320         u8 val;
321
322         val = 0;
323         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
324                 val = POISON_FREE;
325
326         memset(vaddr, val, scratch->base.size);
327         drm_clflush_virt_range(vaddr, scratch->base.size);
328 }
329
330 int setup_scratch_page(struct i915_address_space *vm)
331 {
332         unsigned long size;
333
334         /*
335          * In order to utilize 64K pages for an object with a size < 2M, we will
336          * need to support a 64K scratch page, given that every 16th entry for a
337          * page-table operating in 64K mode must point to a properly aligned 64K
338          * region, including any PTEs which happen to point to scratch.
339          *
340          * This is only relevant for the 48b PPGTT where we support
341          * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
342          * scratch (read-only) between all vm, we create one 64k scratch page
343          * for all.
344          */
345         size = I915_GTT_PAGE_SIZE_4K;
346         if (i915_vm_is_4lvl(vm) &&
347             HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
348                 size = I915_GTT_PAGE_SIZE_64K;
349
350         do {
351                 struct drm_i915_gem_object *obj;
352
353                 obj = vm->alloc_scratch_dma(vm, size);
354                 if (IS_ERR(obj))
355                         goto skip;
356
357                 if (map_pt_dma(vm, obj))
358                         goto skip_obj;
359
360                 /* We need a single contiguous page for our scratch */
361                 if (obj->mm.page_sizes.sg < size)
362                         goto skip_obj;
363
364                 /* And it needs to be correspondingly aligned */
365                 if (__px_dma(obj) & (size - 1))
366                         goto skip_obj;
367
368                 /*
369                  * Use a non-zero scratch page for debugging.
370                  *
371                  * We want a value that should be reasonably obvious
372                  * to spot in the error state, while also causing a GPU hang
373                  * if executed. We prefer using a clear page in production, so
374                  * should it ever be accidentally used, the effect should be
375                  * fairly benign.
376                  */
377                 poison_scratch_page(obj);
378
379                 vm->scratch[0] = obj;
380                 vm->scratch_order = get_order(size);
381                 return 0;
382
383 skip_obj:
384                 i915_gem_object_put(obj);
385 skip:
386                 if (size == I915_GTT_PAGE_SIZE_4K)
387                         return -ENOMEM;
388
389                 /*
390                  * If we need 64K minimum GTT pages for device local-memory,
391                  * like on XEHPSDV, then we need to fail the allocation here,
392                  * otherwise we can't safely support the insertion of
393                  * local-memory pages for this vm, since the HW expects the
394                  * correct physical alignment and size when the page-table is
395                  * operating in 64K GTT mode, which includes any scratch PTEs,
396                  * since userspace can still touch them.
397                  */
398                 if (HAS_64K_PAGES(vm->i915))
399                         return -ENOMEM;
400
401                 size = I915_GTT_PAGE_SIZE_4K;
402         } while (1);
403 }
404
405 void free_scratch(struct i915_address_space *vm)
406 {
407         int i;
408
409         for (i = 0; i <= vm->top; i++)
410                 i915_gem_object_put(vm->scratch[i]);
411 }
412
413 void gtt_write_workarounds(struct intel_gt *gt)
414 {
415         struct drm_i915_private *i915 = gt->i915;
416         struct intel_uncore *uncore = gt->uncore;
417
418         /*
419          * This function is for gtt related workarounds. This function is
420          * called on driver load and after a GPU reset, so you can place
421          * workarounds here even if they get overwritten by GPU reset.
422          */
423         /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
424         if (IS_BROADWELL(i915))
425                 intel_uncore_write(uncore,
426                                    GEN8_L3_LRA_1_GPGPU,
427                                    GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
428         else if (IS_CHERRYVIEW(i915))
429                 intel_uncore_write(uncore,
430                                    GEN8_L3_LRA_1_GPGPU,
431                                    GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
432         else if (IS_GEN9_LP(i915))
433                 intel_uncore_write(uncore,
434                                    GEN8_L3_LRA_1_GPGPU,
435                                    GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
436         else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
437                 intel_uncore_write(uncore,
438                                    GEN8_L3_LRA_1_GPGPU,
439                                    GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
440
441         /*
442          * To support 64K PTEs we need to first enable the use of the
443          * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
444          * mmio, otherwise the page-walker will simply ignore the IPS bit. This
445          * shouldn't be needed after GEN10.
446          *
447          * 64K pages were first introduced from BDW+, although technically they
448          * only *work* from gen9+. For pre-BDW we instead have the option for
449          * 32K pages, but we don't currently have any support for it in our
450          * driver.
451          */
452         if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
453             GRAPHICS_VER(i915) <= 10)
454                 intel_uncore_rmw(uncore,
455                                  GEN8_GAMW_ECO_DEV_RW_IA,
456                                  0,
457                                  GAMW_ECO_ENABLE_64K_IPS_FIELD);
458
459         if (IS_GRAPHICS_VER(i915, 8, 11)) {
460                 bool can_use_gtt_cache = true;
461
462                 /*
463                  * According to the BSpec if we use 2M/1G pages then we also
464                  * need to disable the GTT cache. At least on BDW we can see
465                  * visual corruption when using 2M pages, and not disabling the
466                  * GTT cache.
467                  */
468                 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
469                         can_use_gtt_cache = false;
470
471                 /* WaGttCachingOffByDefault */
472                 intel_uncore_write(uncore,
473                                    HSW_GTT_CACHE_EN,
474                                    can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
475                 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
476                                  intel_uncore_read(uncore,
477                                                    HSW_GTT_CACHE_EN) == 0);
478         }
479 }
480
481 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
482 {
483         /* TGL doesn't support LLC or AGE settings */
484         intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
485         intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
486         intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
487         intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
488         intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
489         intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
490         intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
491         intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
492 }
493
494 static void icl_setup_private_ppat(struct intel_uncore *uncore)
495 {
496         intel_uncore_write(uncore,
497                            GEN10_PAT_INDEX(0),
498                            GEN8_PPAT_WB | GEN8_PPAT_LLC);
499         intel_uncore_write(uncore,
500                            GEN10_PAT_INDEX(1),
501                            GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
502         intel_uncore_write(uncore,
503                            GEN10_PAT_INDEX(2),
504                            GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
505         intel_uncore_write(uncore,
506                            GEN10_PAT_INDEX(3),
507                            GEN8_PPAT_UC);
508         intel_uncore_write(uncore,
509                            GEN10_PAT_INDEX(4),
510                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
511         intel_uncore_write(uncore,
512                            GEN10_PAT_INDEX(5),
513                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
514         intel_uncore_write(uncore,
515                            GEN10_PAT_INDEX(6),
516                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
517         intel_uncore_write(uncore,
518                            GEN10_PAT_INDEX(7),
519                            GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
520 }
521
522 /*
523  * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
524  * bits. When using advanced contexts each context stores its own PAT, but
525  * writing this data shouldn't be harmful even in those cases.
526  */
527 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
528 {
529         struct drm_i915_private *i915 = uncore->i915;
530         u64 pat;
531
532         pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |      /* for normal objects, no eLLC */
533               GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |  /* for something pointing to ptes? */
534               GEN8_PPAT(3, GEN8_PPAT_UC) |                      /* Uncached objects, mostly for scanout */
535               GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
536               GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
537               GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
538               GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
539
540         /* for scanout with eLLC */
541         if (GRAPHICS_VER(i915) >= 9)
542                 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
543         else
544                 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
545
546         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
547         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
548 }
549
550 static void chv_setup_private_ppat(struct intel_uncore *uncore)
551 {
552         u64 pat;
553
554         /*
555          * Map WB on BDW to snooped on CHV.
556          *
557          * Only the snoop bit has meaning for CHV, the rest is
558          * ignored.
559          *
560          * The hardware will never snoop for certain types of accesses:
561          * - CPU GTT (GMADR->GGTT->no snoop->memory)
562          * - PPGTT page tables
563          * - some other special cycles
564          *
565          * As with BDW, we also need to consider the following for GT accesses:
566          * "For GGTT, there is NO pat_sel[2:0] from the entry,
567          * so RTL will always use the value corresponding to
568          * pat_sel = 000".
569          * Which means we must set the snoop bit in PAT entry 0
570          * in order to keep the global status page working.
571          */
572
573         pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
574               GEN8_PPAT(1, 0) |
575               GEN8_PPAT(2, 0) |
576               GEN8_PPAT(3, 0) |
577               GEN8_PPAT(4, CHV_PPAT_SNOOP) |
578               GEN8_PPAT(5, CHV_PPAT_SNOOP) |
579               GEN8_PPAT(6, CHV_PPAT_SNOOP) |
580               GEN8_PPAT(7, CHV_PPAT_SNOOP);
581
582         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
583         intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
584 }
585
586 void setup_private_pat(struct intel_uncore *uncore)
587 {
588         struct drm_i915_private *i915 = uncore->i915;
589
590         GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
591
592         if (GRAPHICS_VER(i915) >= 12)
593                 tgl_setup_private_ppat(uncore);
594         else if (GRAPHICS_VER(i915) >= 11)
595                 icl_setup_private_ppat(uncore);
596         else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
597                 chv_setup_private_ppat(uncore);
598         else
599                 bdw_setup_private_ppat(uncore);
600 }
601
602 struct i915_vma *
603 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
604 {
605         struct drm_i915_gem_object *obj;
606         struct i915_vma *vma;
607
608         obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
609         if (IS_ERR(obj))
610                 return ERR_CAST(obj);
611
612         i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
613
614         vma = i915_vma_instance(obj, vm, NULL);
615         if (IS_ERR(vma)) {
616                 i915_gem_object_put(obj);
617                 return vma;
618         }
619
620         return vma;
621 }
622
623 struct i915_vma *
624 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
625 {
626         struct i915_vma *vma;
627         int err;
628
629         vma = __vm_create_scratch_for_read(vm, size);
630         if (IS_ERR(vma))
631                 return vma;
632
633         err = i915_vma_pin(vma, 0, 0,
634                            i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
635         if (err) {
636                 i915_vma_put(vma);
637                 return ERR_PTR(err);
638         }
639
640         return vma;
641 }
642
643 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
644 #include "selftests/mock_gtt.c"
645 #endif