Merge drm/drm-next into drm-intel-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gem / i915_gem_stolen.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_mcr.h"
17 #include "gt/intel_gt_regs.h"
18 #include "gt/intel_region_lmem.h"
19 #include "i915_drv.h"
20 #include "i915_gem_stolen.h"
21 #include "i915_pci.h"
22 #include "i915_reg.h"
23 #include "i915_utils.h"
24 #include "i915_vgpu.h"
25 #include "intel_mchbar_regs.h"
26 #include "intel_pci_config.h"
27
28 /*
29  * The BIOS typically reserves some of the system's memory for the exclusive
30  * use of the integrated graphics. This memory is no longer available for
31  * use by the OS and so the user finds that his system has less memory
32  * available than he put in. We refer to this memory as stolen.
33  *
34  * The BIOS will allocate its framebuffer from the stolen memory. Our
35  * goal is try to reuse that object for our own fbcon which must always
36  * be available for panics. Anything else we can reuse the stolen memory
37  * for is a boon.
38  */
39
40 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
41                                          struct drm_mm_node *node, u64 size,
42                                          unsigned alignment, u64 start, u64 end)
43 {
44         int ret;
45
46         if (!drm_mm_initialized(&i915->mm.stolen))
47                 return -ENODEV;
48
49         /* WaSkipStolenMemoryFirstPage:bdw+ */
50         if (GRAPHICS_VER(i915) >= 8 && start < 4096)
51                 start = 4096;
52
53         mutex_lock(&i915->mm.stolen_lock);
54         ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
55                                           size, alignment, 0,
56                                           start, end, DRM_MM_INSERT_BEST);
57         mutex_unlock(&i915->mm.stolen_lock);
58
59         return ret;
60 }
61
62 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
63                                 struct drm_mm_node *node, u64 size,
64                                 unsigned alignment)
65 {
66         return i915_gem_stolen_insert_node_in_range(i915, node,
67                                                     size, alignment,
68                                                     I915_GEM_STOLEN_BIAS,
69                                                     U64_MAX);
70 }
71
72 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
73                                  struct drm_mm_node *node)
74 {
75         mutex_lock(&i915->mm.stolen_lock);
76         drm_mm_remove_node(node);
77         mutex_unlock(&i915->mm.stolen_lock);
78 }
79
80 static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
81 {
82         return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
83 }
84
85 static int adjust_stolen(struct drm_i915_private *i915,
86                          struct resource *dsm)
87 {
88         struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
89         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
90
91         if (!valid_stolen_size(i915, dsm))
92                 return -EINVAL;
93
94         /*
95          * Make sure we don't clobber the GTT if it's within stolen memory
96          *
97          * TODO: We have yet too encounter the case where the GTT wasn't at the
98          * end of stolen. With that assumption we could simplify this.
99          */
100         if (GRAPHICS_VER(i915) <= 4 &&
101             !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
102                 struct resource stolen[2] = {*dsm, *dsm};
103                 struct resource ggtt_res;
104                 resource_size_t ggtt_start;
105
106                 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
107                 if (GRAPHICS_VER(i915) == 4)
108                         ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
109                                      (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
110                 else
111                         ggtt_start &= PGTBL_ADDRESS_LO_MASK;
112
113                 ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
114
115                 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
116                         stolen[0].end = ggtt_res.start;
117                 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
118                         stolen[1].start = ggtt_res.end;
119
120                 /* Pick the larger of the two chunks */
121                 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
122                         *dsm = stolen[0];
123                 else
124                         *dsm = stolen[1];
125
126                 if (stolen[0].start != stolen[1].start ||
127                     stolen[0].end != stolen[1].end) {
128                         drm_dbg(&i915->drm,
129                                 "GTT within stolen memory at %pR\n",
130                                 &ggtt_res);
131                         drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
132                                 dsm);
133                 }
134         }
135
136         if (!valid_stolen_size(i915, dsm))
137                 return -EINVAL;
138
139         return 0;
140 }
141
142 static int request_smem_stolen(struct drm_i915_private *i915,
143                                struct resource *dsm)
144 {
145         struct resource *r;
146
147         /*
148          * With stolen lmem, we don't need to request system memory for the
149          * address range since it's local to the gpu.
150          *
151          * Starting MTL, in IGFX devices the stolen memory is exposed via
152          * LMEMBAR and shall be considered similar to stolen lmem.
153          */
154         if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
155                 return 0;
156
157         /*
158          * Verify that nothing else uses this physical address. Stolen
159          * memory should be reserved by the BIOS and hidden from the
160          * kernel. So if the region is already marked as busy, something
161          * is seriously wrong.
162          */
163         r = devm_request_mem_region(i915->drm.dev, dsm->start,
164                                     resource_size(dsm),
165                                     "Graphics Stolen Memory");
166         if (r == NULL) {
167                 /*
168                  * One more attempt but this time requesting region from
169                  * start + 1, as we have seen that this resolves the region
170                  * conflict with the PCI Bus.
171                  * This is a BIOS w/a: Some BIOS wrap stolen in the root
172                  * PCI bus, but have an off-by-one error. Hence retry the
173                  * reservation starting from 1 instead of 0.
174                  * There's also BIOS with off-by-one on the other end.
175                  */
176                 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
177                                             resource_size(dsm) - 2,
178                                             "Graphics Stolen Memory");
179                 /*
180                  * GEN3 firmware likes to smash pci bridges into the stolen
181                  * range. Apparently this works.
182                  */
183                 if (!r && GRAPHICS_VER(i915) != 3) {
184                         drm_err(&i915->drm,
185                                 "conflict detected with stolen region: %pR\n",
186                                 dsm);
187
188                         return -EBUSY;
189                 }
190         }
191
192         return 0;
193 }
194
195 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
196 {
197         if (!drm_mm_initialized(&i915->mm.stolen))
198                 return;
199
200         drm_mm_takedown(&i915->mm.stolen);
201 }
202
203 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
204                                     struct intel_uncore *uncore,
205                                     resource_size_t *base,
206                                     resource_size_t *size)
207 {
208         u32 reg_val = intel_uncore_read(uncore,
209                                         IS_GM45(i915) ?
210                                         CTG_STOLEN_RESERVED :
211                                         ELK_STOLEN_RESERVED);
212         resource_size_t stolen_top = i915->dsm.stolen.end + 1;
213
214         drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
215                 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
216
217         if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
218                 return;
219
220         /*
221          * Whether ILK really reuses the ELK register for this is unclear.
222          * Let's see if we catch anyone with this supposedly enabled on ILK.
223          */
224         drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
225                  "ILK stolen reserved found? 0x%08x\n",
226                  reg_val);
227
228         if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
229                 return;
230
231         *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
232         drm_WARN_ON(&i915->drm,
233                     (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
234
235         *size = stolen_top - *base;
236 }
237
238 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
239                                      struct intel_uncore *uncore,
240                                      resource_size_t *base,
241                                      resource_size_t *size)
242 {
243         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
244
245         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
246
247         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
248                 return;
249
250         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
251
252         switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
253         case GEN6_STOLEN_RESERVED_1M:
254                 *size = 1024 * 1024;
255                 break;
256         case GEN6_STOLEN_RESERVED_512K:
257                 *size = 512 * 1024;
258                 break;
259         case GEN6_STOLEN_RESERVED_256K:
260                 *size = 256 * 1024;
261                 break;
262         case GEN6_STOLEN_RESERVED_128K:
263                 *size = 128 * 1024;
264                 break;
265         default:
266                 *size = 1024 * 1024;
267                 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
268         }
269 }
270
271 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
272                                     struct intel_uncore *uncore,
273                                     resource_size_t *base,
274                                     resource_size_t *size)
275 {
276         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
277         resource_size_t stolen_top = i915->dsm.stolen.end + 1;
278
279         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
280
281         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
282                 return;
283
284         switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
285         default:
286                 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
287                 fallthrough;
288         case GEN7_STOLEN_RESERVED_1M:
289                 *size = 1024 * 1024;
290                 break;
291         }
292
293         /*
294          * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
295          * reserved location as (top - size).
296          */
297         *base = stolen_top - *size;
298 }
299
300 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
301                                      struct intel_uncore *uncore,
302                                      resource_size_t *base,
303                                      resource_size_t *size)
304 {
305         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
306
307         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
308
309         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
310                 return;
311
312         *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
313
314         switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
315         case GEN7_STOLEN_RESERVED_1M:
316                 *size = 1024 * 1024;
317                 break;
318         case GEN7_STOLEN_RESERVED_256K:
319                 *size = 256 * 1024;
320                 break;
321         default:
322                 *size = 1024 * 1024;
323                 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
324         }
325 }
326
327 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
328                                     struct intel_uncore *uncore,
329                                     resource_size_t *base,
330                                     resource_size_t *size)
331 {
332         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
333
334         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
335
336         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
337                 return;
338
339         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
340
341         switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
342         case GEN8_STOLEN_RESERVED_1M:
343                 *size = 1024 * 1024;
344                 break;
345         case GEN8_STOLEN_RESERVED_2M:
346                 *size = 2 * 1024 * 1024;
347                 break;
348         case GEN8_STOLEN_RESERVED_4M:
349                 *size = 4 * 1024 * 1024;
350                 break;
351         case GEN8_STOLEN_RESERVED_8M:
352                 *size = 8 * 1024 * 1024;
353                 break;
354         default:
355                 *size = 8 * 1024 * 1024;
356                 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
357         }
358 }
359
360 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
361                                     struct intel_uncore *uncore,
362                                     resource_size_t *base,
363                                     resource_size_t *size)
364 {
365         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
366         resource_size_t stolen_top = i915->dsm.stolen.end + 1;
367
368         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
369
370         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
371                 return;
372
373         if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
374                 return;
375
376         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
377         *size = stolen_top - *base;
378 }
379
380 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
381                                     struct intel_uncore *uncore,
382                                     resource_size_t *base,
383                                     resource_size_t *size)
384 {
385         u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
386
387         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
388
389         switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
390         case GEN8_STOLEN_RESERVED_1M:
391                 *size = 1024 * 1024;
392                 break;
393         case GEN8_STOLEN_RESERVED_2M:
394                 *size = 2 * 1024 * 1024;
395                 break;
396         case GEN8_STOLEN_RESERVED_4M:
397                 *size = 4 * 1024 * 1024;
398                 break;
399         case GEN8_STOLEN_RESERVED_8M:
400                 *size = 8 * 1024 * 1024;
401                 break;
402         default:
403                 *size = 8 * 1024 * 1024;
404                 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
405         }
406
407         if (HAS_LMEMBAR_SMEM_STOLEN(i915))
408                 /* the base is initialized to stolen top so subtract size to get base */
409                 *base -= *size;
410         else
411                 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
412 }
413
414 /*
415  * Initialize i915->dsm.reserved to contain the reserved space within the Data
416  * Stolen Memory. This is a range on the top of DSM that is reserved, not to
417  * be used by driver, so must be excluded from the region passed to the
418  * allocator later. In the spec this is also called as WOPCM.
419  *
420  * Our expectation is that the reserved space is at the top of the stolen
421  * region, as it has been the case for every platform, and *never* at the
422  * bottom, so the calculation here can be simplified.
423  */
424 static int init_reserved_stolen(struct drm_i915_private *i915)
425 {
426         struct intel_uncore *uncore = &i915->uncore;
427         resource_size_t reserved_base, stolen_top;
428         resource_size_t reserved_size;
429         int ret = 0;
430
431         stolen_top = i915->dsm.stolen.end + 1;
432         reserved_base = stolen_top;
433         reserved_size = 0;
434
435         if (GRAPHICS_VER(i915) >= 11) {
436                 icl_get_stolen_reserved(i915, uncore,
437                                         &reserved_base, &reserved_size);
438         } else if (GRAPHICS_VER(i915) >= 8) {
439                 if (IS_LP(i915))
440                         chv_get_stolen_reserved(i915, uncore,
441                                                 &reserved_base, &reserved_size);
442                 else
443                         bdw_get_stolen_reserved(i915, uncore,
444                                                 &reserved_base, &reserved_size);
445         } else if (GRAPHICS_VER(i915) >= 7) {
446                 if (IS_VALLEYVIEW(i915))
447                         vlv_get_stolen_reserved(i915, uncore,
448                                                 &reserved_base, &reserved_size);
449                 else
450                         gen7_get_stolen_reserved(i915, uncore,
451                                                  &reserved_base, &reserved_size);
452         } else if (GRAPHICS_VER(i915) >= 6) {
453                 gen6_get_stolen_reserved(i915, uncore,
454                                          &reserved_base, &reserved_size);
455         } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
456                 g4x_get_stolen_reserved(i915, uncore,
457                                         &reserved_base, &reserved_size);
458         }
459
460         /* No reserved stolen */
461         if (reserved_base == stolen_top)
462                 goto bail_out;
463
464         if (!reserved_base) {
465                 drm_err(&i915->drm,
466                         "inconsistent reservation %pa + %pa; ignoring\n",
467                         &reserved_base, &reserved_size);
468                 ret = -EINVAL;
469                 goto bail_out;
470         }
471
472         i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
473
474         if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
475                 drm_err(&i915->drm,
476                         "Stolen reserved area %pR outside stolen memory %pR\n",
477                         &i915->dsm.reserved, &i915->dsm.stolen);
478                 ret = -EINVAL;
479                 goto bail_out;
480         }
481
482         return 0;
483
484 bail_out:
485         i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
486
487         return ret;
488 }
489
490 static int i915_gem_init_stolen(struct intel_memory_region *mem)
491 {
492         struct drm_i915_private *i915 = mem->i915;
493
494         mutex_init(&i915->mm.stolen_lock);
495
496         if (intel_vgpu_active(i915)) {
497                 drm_notice(&i915->drm,
498                            "%s, disabling use of stolen memory\n",
499                            "iGVT-g active");
500                 return -ENOSPC;
501         }
502
503         if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
504                 drm_notice(&i915->drm,
505                            "%s, disabling use of stolen memory\n",
506                            "DMAR active");
507                 return -ENOSPC;
508         }
509
510         if (adjust_stolen(i915, &mem->region))
511                 return -ENOSPC;
512
513         if (request_smem_stolen(i915, &mem->region))
514                 return -ENOSPC;
515
516         i915->dsm.stolen = mem->region;
517
518         if (init_reserved_stolen(i915))
519                 return -ENOSPC;
520
521         /* Exclude the reserved region from driver use */
522         mem->region.end = i915->dsm.reserved.start - 1;
523         mem->io_size = min(mem->io_size, resource_size(&mem->region));
524
525         i915->dsm.usable_size = resource_size(&mem->region);
526
527         drm_dbg(&i915->drm,
528                 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
529                 (u64)resource_size(&i915->dsm.stolen) >> 10,
530                 (u64)i915->dsm.usable_size >> 10);
531
532         if (i915->dsm.usable_size == 0)
533                 return -ENOSPC;
534
535         /* Basic memrange allocator for stolen space. */
536         drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
537
538         /*
539          * Access to stolen lmem beyond certain size for MTL A0 stepping
540          * would crash the machine. Disable stolen lmem for userspace access
541          * by setting usable_size to zero.
542          */
543         if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
544                 i915->dsm.usable_size = 0;
545
546         return 0;
547 }
548
549 static void dbg_poison(struct i915_ggtt *ggtt,
550                        dma_addr_t addr, resource_size_t size,
551                        u8 x)
552 {
553 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
554         if (!drm_mm_node_allocated(&ggtt->error_capture))
555                 return;
556
557         if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
558                 return; /* beware stop_machine() inversion */
559
560         GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
561
562         mutex_lock(&ggtt->error_mutex);
563         while (size) {
564                 void __iomem *s;
565
566                 ggtt->vm.insert_page(&ggtt->vm, addr,
567                                      ggtt->error_capture.start,
568                                      i915_gem_get_pat_index(ggtt->vm.i915,
569                                                             I915_CACHE_NONE),
570                                      0);
571                 mb();
572
573                 s = io_mapping_map_wc(&ggtt->iomap,
574                                       ggtt->error_capture.start,
575                                       PAGE_SIZE);
576                 memset_io(s, x, PAGE_SIZE);
577                 io_mapping_unmap(s);
578
579                 addr += PAGE_SIZE;
580                 size -= PAGE_SIZE;
581         }
582         mb();
583         ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
584         mutex_unlock(&ggtt->error_mutex);
585 #endif
586 }
587
588 static struct sg_table *
589 i915_pages_create_for_stolen(struct drm_device *dev,
590                              resource_size_t offset, resource_size_t size)
591 {
592         struct drm_i915_private *i915 = to_i915(dev);
593         struct sg_table *st;
594         struct scatterlist *sg;
595
596         GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
597
598         /* We hide that we have no struct page backing our stolen object
599          * by wrapping the contiguous physical allocation with a fake
600          * dma mapping in a single scatterlist.
601          */
602
603         st = kmalloc(sizeof(*st), GFP_KERNEL);
604         if (st == NULL)
605                 return ERR_PTR(-ENOMEM);
606
607         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
608                 kfree(st);
609                 return ERR_PTR(-ENOMEM);
610         }
611
612         sg = st->sgl;
613         sg->offset = 0;
614         sg->length = size;
615
616         sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
617         sg_dma_len(sg) = size;
618
619         return st;
620 }
621
622 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
623 {
624         struct drm_i915_private *i915 = to_i915(obj->base.dev);
625         struct sg_table *pages =
626                 i915_pages_create_for_stolen(obj->base.dev,
627                                              obj->stolen->start,
628                                              obj->stolen->size);
629         if (IS_ERR(pages))
630                 return PTR_ERR(pages);
631
632         dbg_poison(to_gt(i915)->ggtt,
633                    sg_dma_address(pages->sgl),
634                    sg_dma_len(pages->sgl),
635                    POISON_INUSE);
636
637         __i915_gem_object_set_pages(obj, pages);
638
639         return 0;
640 }
641
642 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
643                                              struct sg_table *pages)
644 {
645         struct drm_i915_private *i915 = to_i915(obj->base.dev);
646         /* Should only be called from i915_gem_object_release_stolen() */
647
648         dbg_poison(to_gt(i915)->ggtt,
649                    sg_dma_address(pages->sgl),
650                    sg_dma_len(pages->sgl),
651                    POISON_FREE);
652
653         sg_free_table(pages);
654         kfree(pages);
655 }
656
657 static void
658 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
659 {
660         struct drm_i915_private *i915 = to_i915(obj->base.dev);
661         struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
662
663         GEM_BUG_ON(!stolen);
664         i915_gem_stolen_remove_node(i915, stolen);
665         kfree(stolen);
666
667         i915_gem_object_release_memory_region(obj);
668 }
669
670 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
671         .name = "i915_gem_object_stolen",
672         .get_pages = i915_gem_object_get_pages_stolen,
673         .put_pages = i915_gem_object_put_pages_stolen,
674         .release = i915_gem_object_release_stolen,
675 };
676
677 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
678                                            struct drm_i915_gem_object *obj,
679                                            struct drm_mm_node *stolen)
680 {
681         static struct lock_class_key lock_class;
682         unsigned int cache_level;
683         unsigned int flags;
684         int err;
685
686         /*
687          * Stolen objects are always physically contiguous since we just
688          * allocate one big block underneath using the drm_mm range allocator.
689          */
690         flags = I915_BO_ALLOC_CONTIGUOUS;
691
692         drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
693         i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
694
695         obj->stolen = stolen;
696         obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
697         cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
698         i915_gem_object_set_cache_coherency(obj, cache_level);
699
700         if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
701                 return -EBUSY;
702
703         i915_gem_object_init_memory_region(obj, mem);
704
705         err = i915_gem_object_pin_pages(obj);
706         if (err)
707                 i915_gem_object_release_memory_region(obj);
708         i915_gem_object_unlock(obj);
709
710         return err;
711 }
712
713 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
714                                         struct drm_i915_gem_object *obj,
715                                         resource_size_t offset,
716                                         resource_size_t size,
717                                         resource_size_t page_size,
718                                         unsigned int flags)
719 {
720         struct drm_i915_private *i915 = mem->i915;
721         struct drm_mm_node *stolen;
722         int ret;
723
724         if (!drm_mm_initialized(&i915->mm.stolen))
725                 return -ENODEV;
726
727         if (size == 0)
728                 return -EINVAL;
729
730         /*
731          * With discrete devices, where we lack a mappable aperture there is no
732          * possible way to ever access this memory on the CPU side.
733          */
734         if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
735             !(flags & I915_BO_ALLOC_GPU_ONLY))
736                 return -ENOSPC;
737
738         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
739         if (!stolen)
740                 return -ENOMEM;
741
742         if (offset != I915_BO_INVALID_OFFSET) {
743                 drm_dbg(&i915->drm,
744                         "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
745                         &offset, &size);
746
747                 stolen->start = offset;
748                 stolen->size = size;
749                 mutex_lock(&i915->mm.stolen_lock);
750                 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
751                 mutex_unlock(&i915->mm.stolen_lock);
752         } else {
753                 ret = i915_gem_stolen_insert_node(i915, stolen, size,
754                                                   mem->min_page_size);
755         }
756         if (ret)
757                 goto err_free;
758
759         ret = __i915_gem_object_create_stolen(mem, obj, stolen);
760         if (ret)
761                 goto err_remove;
762
763         return 0;
764
765 err_remove:
766         i915_gem_stolen_remove_node(i915, stolen);
767 err_free:
768         kfree(stolen);
769         return ret;
770 }
771
772 struct drm_i915_gem_object *
773 i915_gem_object_create_stolen(struct drm_i915_private *i915,
774                               resource_size_t size)
775 {
776         return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
777 }
778
779 static int init_stolen_smem(struct intel_memory_region *mem)
780 {
781         int err;
782
783         /*
784          * Initialise stolen early so that we may reserve preallocated
785          * objects for the BIOS to KMS transition.
786          */
787         err = i915_gem_init_stolen(mem);
788         if (err)
789                 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
790
791         return 0;
792 }
793
794 static int release_stolen_smem(struct intel_memory_region *mem)
795 {
796         i915_gem_cleanup_stolen(mem->i915);
797         return 0;
798 }
799
800 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
801         .init = init_stolen_smem,
802         .release = release_stolen_smem,
803         .init_object = _i915_gem_object_stolen_init,
804 };
805
806 static int init_stolen_lmem(struct intel_memory_region *mem)
807 {
808         struct drm_i915_private *i915 = mem->i915;
809         int err;
810
811         if (GEM_WARN_ON(resource_size(&mem->region) == 0))
812                 return 0;
813
814         err = i915_gem_init_stolen(mem);
815         if (err) {
816                 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
817                 return 0;
818         }
819
820         if (mem->io_size &&
821             !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
822                 goto err_cleanup;
823
824         drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
825                 &mem->io_start);
826         drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
827
828         return 0;
829
830 err_cleanup:
831         i915_gem_cleanup_stolen(mem->i915);
832         return err;
833 }
834
835 static int release_stolen_lmem(struct intel_memory_region *mem)
836 {
837         if (mem->io_size)
838                 io_mapping_fini(&mem->iomap);
839         i915_gem_cleanup_stolen(mem->i915);
840         return 0;
841 }
842
843 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
844         .init = init_stolen_lmem,
845         .release = release_stolen_lmem,
846         .init_object = _i915_gem_object_stolen_init,
847 };
848
849 static int mtl_get_gms_size(struct intel_uncore *uncore)
850 {
851         u16 ggc, gms;
852
853         ggc = intel_uncore_read16(uncore, GGC);
854
855         /* check GGMS, should be fixed 0x3 (8MB) */
856         if ((ggc & GGMS_MASK) != GGMS_MASK)
857                 return -EIO;
858
859         /* return valid GMS value, -EIO if invalid */
860         gms = REG_FIELD_GET(GMS_MASK, ggc);
861         switch (gms) {
862         case 0x0 ... 0x04:
863                 return gms * 32;
864         case 0xf0 ... 0xfe:
865                 return (gms - 0xf0 + 1) * 4;
866         default:
867                 MISSING_CASE(gms);
868                 return -EIO;
869         }
870 }
871
872 struct intel_memory_region *
873 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
874                            u16 instance)
875 {
876         struct intel_uncore *uncore = &i915->uncore;
877         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
878         resource_size_t dsm_size, dsm_base, lmem_size;
879         struct intel_memory_region *mem;
880         resource_size_t io_start, io_size;
881         resource_size_t min_page_size;
882         int ret;
883
884         if (WARN_ON_ONCE(instance))
885                 return ERR_PTR(-ENODEV);
886
887         if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
888                 return ERR_PTR(-ENXIO);
889
890         if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
891                 lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
892         } else {
893                 resource_size_t lmem_range;
894
895                 lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
896                 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
897                 lmem_size *= SZ_1G;
898         }
899
900         if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
901                 /*
902                  * MTL dsm size is in GGC register.
903                  * Also MTL uses offset to GSMBASE in ptes, so i915
904                  * uses dsm_base = 8MBs to setup stolen region, since
905                  * DSMBASE = GSMBASE + 8MB.
906                  */
907                 ret = mtl_get_gms_size(uncore);
908                 if (ret < 0) {
909                         drm_err(&i915->drm, "invalid MTL GGC register setting\n");
910                         return ERR_PTR(ret);
911                 }
912
913                 dsm_base = SZ_8M;
914                 dsm_size = (resource_size_t)(ret * SZ_1M);
915
916                 GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
917                 GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
918         } else {
919                 /* Use DSM base address instead for stolen memory */
920                 dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
921                 if (WARN_ON(lmem_size < dsm_base))
922                         return ERR_PTR(-ENODEV);
923                 dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
924         }
925
926         if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
927                 io_start = 0;
928                 io_size = 0;
929         } else {
930                 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
931                 io_size = dsm_size;
932         }
933
934         min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
935                                                 I915_GTT_PAGE_SIZE_4K;
936
937         mem = intel_memory_region_create(i915, dsm_base, dsm_size,
938                                          min_page_size,
939                                          io_start, io_size,
940                                          type, instance,
941                                          &i915_region_stolen_lmem_ops);
942         if (IS_ERR(mem))
943                 return mem;
944
945         intel_memory_region_set_name(mem, "stolen-local");
946
947         mem->private = true;
948
949         return mem;
950 }
951
952 struct intel_memory_region*
953 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
954                            u16 instance)
955 {
956         struct intel_memory_region *mem;
957
958         mem = intel_memory_region_create(i915,
959                                          intel_graphics_stolen_res.start,
960                                          resource_size(&intel_graphics_stolen_res),
961                                          PAGE_SIZE, 0, 0, type, instance,
962                                          &i915_region_stolen_smem_ops);
963         if (IS_ERR(mem))
964                 return mem;
965
966         intel_memory_region_set_name(mem, "stolen-system");
967
968         mem->private = true;
969
970         return mem;
971 }
972
973 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
974 {
975         return obj->ops == &i915_gem_object_stolen_ops;
976 }
977
978 bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
979 {
980         return drm_mm_initialized(&i915->mm.stolen);
981 }
982
983 u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
984 {
985         return i915->dsm.stolen.start;
986 }
987
988 u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
989 {
990         return resource_size(&i915->dsm.stolen);
991 }
992
993 u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
994                                  const struct drm_mm_node *node)
995 {
996         return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
997 }
998
999 bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
1000 {
1001         return drm_mm_node_allocated(node);
1002 }
1003
1004 u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
1005 {
1006         return node->start;
1007 }
1008
1009 u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
1010 {
1011         return node->size;
1012 }