Merge tag 'rust-6.9' of https://github.com/Rust-for-Linux/linux
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <linux/prandom.h>
7
8 #include <uapi/drm/i915_drm.h>
9
10 #include "intel_memory_region.h"
11 #include "i915_drv.h"
12 #include "i915_ttm_buddy_manager.h"
13
14 static const struct {
15         u16 class;
16         u16 instance;
17 } intel_region_map[] = {
18         [INTEL_REGION_SMEM] = {
19                 .class = INTEL_MEMORY_SYSTEM,
20                 .instance = 0,
21         },
22         [INTEL_REGION_LMEM_0] = {
23                 .class = INTEL_MEMORY_LOCAL,
24                 .instance = 0,
25         },
26         [INTEL_REGION_STOLEN_SMEM] = {
27                 .class = INTEL_MEMORY_STOLEN_SYSTEM,
28                 .instance = 0,
29         },
30         [INTEL_REGION_STOLEN_LMEM] = {
31                 .class = INTEL_MEMORY_STOLEN_LOCAL,
32                 .instance = 0,
33         },
34 };
35
36 static int __iopagetest(struct intel_memory_region *mem,
37                         u8 __iomem *va, int pagesize,
38                         u8 value, resource_size_t offset,
39                         const void *caller)
40 {
41         int byte = get_random_u32_below(pagesize);
42         u8 result[3];
43
44         memset_io(va, value, pagesize); /* or GPF! */
45         wmb();
46
47         result[0] = ioread8(va);
48         result[1] = ioread8(va + byte);
49         result[2] = ioread8(va + pagesize - 1);
50         if (memchr_inv(result, value, sizeof(result))) {
51                 dev_err(mem->i915->drm.dev,
52                         "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
53                         &mem->region, &mem->io_start, &offset, caller,
54                         value, result[0], result[1], result[2]);
55                 return -EINVAL;
56         }
57
58         return 0;
59 }
60
61 static int iopagetest(struct intel_memory_region *mem,
62                       resource_size_t offset,
63                       const void *caller)
64 {
65         const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
66         void __iomem *va;
67         int err;
68         int i;
69
70         va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
71         if (!va) {
72                 dev_err(mem->i915->drm.dev,
73                         "Failed to ioremap memory region [%pa + %pa] for %ps\n",
74                         &mem->io_start, &offset, caller);
75                 return -EFAULT;
76         }
77
78         for (i = 0; i < ARRAY_SIZE(val); i++) {
79                 err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
80                 if (err)
81                         break;
82
83                 err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
84                 if (err)
85                         break;
86         }
87
88         iounmap(va);
89         return err;
90 }
91
92 static resource_size_t random_page(resource_size_t last)
93 {
94         /* Limited to low 44b (16TiB), but should suffice for a spot check */
95         return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
96 }
97
98 static int iomemtest(struct intel_memory_region *mem,
99                      bool test_all,
100                      const void *caller)
101 {
102         resource_size_t last, page;
103         int err;
104
105         if (mem->io_size < PAGE_SIZE)
106                 return 0;
107
108         last = mem->io_size - PAGE_SIZE;
109
110         /*
111          * Quick test to check read/write access to the iomap (backing store).
112          *
113          * Write a byte, read it back. If the iomapping fails, we expect
114          * a GPF preventing further execution. If the backing store does not
115          * exist, the read back will return garbage. We check a couple of pages,
116          * the first and last of the specified region to confirm the backing
117          * store + iomap does cover the entire memory region; and we check
118          * a random offset within as a quick spot check for bad memory.
119          */
120
121         if (test_all) {
122                 for (page = 0; page <= last; page += PAGE_SIZE) {
123                         err = iopagetest(mem, page, caller);
124                         if (err)
125                                 return err;
126                 }
127         } else {
128                 err = iopagetest(mem, 0, caller);
129                 if (err)
130                         return err;
131
132                 err = iopagetest(mem, last, caller);
133                 if (err)
134                         return err;
135
136                 err = iopagetest(mem, random_page(last), caller);
137                 if (err)
138                         return err;
139         }
140
141         return 0;
142 }
143
144 struct intel_memory_region *
145 intel_memory_region_lookup(struct drm_i915_private *i915,
146                            u16 class, u16 instance)
147 {
148         struct intel_memory_region *mr;
149         int id;
150
151         /* XXX: consider maybe converting to an rb tree at some point */
152         for_each_memory_region(mr, i915, id) {
153                 if (mr->type == class && mr->instance == instance)
154                         return mr;
155         }
156
157         return NULL;
158 }
159
160 struct intel_memory_region *
161 intel_memory_region_by_type(struct drm_i915_private *i915,
162                             enum intel_memory_type mem_type)
163 {
164         struct intel_memory_region *mr;
165         int id;
166
167         for_each_memory_region(mr, i915, id)
168                 if (mr->type == mem_type)
169                         return mr;
170
171         return NULL;
172 }
173
174 /**
175  * intel_memory_region_reserve - Reserve a memory range
176  * @mem: The region for which we want to reserve a range.
177  * @offset: Start of the range to reserve.
178  * @size: The size of the range to reserve.
179  *
180  * Return: 0 on success, negative error code on failure.
181  */
182 int intel_memory_region_reserve(struct intel_memory_region *mem,
183                                 resource_size_t offset,
184                                 resource_size_t size)
185 {
186         struct ttm_resource_manager *man = mem->region_private;
187
188         GEM_BUG_ON(mem->is_range_manager);
189
190         return i915_ttm_buddy_man_reserve(man, offset, size);
191 }
192
193 void intel_memory_region_debug(struct intel_memory_region *mr,
194                                struct drm_printer *printer)
195 {
196         drm_printf(printer, "%s: ", mr->name);
197
198         if (mr->region_private)
199                 ttm_resource_manager_debug(mr->region_private, printer);
200         else
201                 drm_printf(printer, "total:%pa bytes\n", &mr->total);
202 }
203
204 static int intel_memory_region_memtest(struct intel_memory_region *mem,
205                                        void *caller)
206 {
207         struct drm_i915_private *i915 = mem->i915;
208         int err = 0;
209
210         if (!mem->io_start)
211                 return 0;
212
213         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
214                 err = iomemtest(mem, i915->params.memtest, caller);
215
216         return err;
217 }
218
219 static const char *region_type_str(u16 type)
220 {
221         switch (type) {
222         case INTEL_MEMORY_SYSTEM:
223                 return "system";
224         case INTEL_MEMORY_LOCAL:
225                 return "local";
226         case INTEL_MEMORY_STOLEN_LOCAL:
227                 return "stolen-local";
228         case INTEL_MEMORY_STOLEN_SYSTEM:
229                 return "stolen-system";
230         default:
231                 return "unknown";
232         }
233 }
234
235 struct intel_memory_region *
236 intel_memory_region_create(struct drm_i915_private *i915,
237                            resource_size_t start,
238                            resource_size_t size,
239                            resource_size_t min_page_size,
240                            resource_size_t io_start,
241                            resource_size_t io_size,
242                            u16 type,
243                            u16 instance,
244                            const struct intel_memory_region_ops *ops)
245 {
246         struct intel_memory_region *mem;
247         int err;
248
249         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
250         if (!mem)
251                 return ERR_PTR(-ENOMEM);
252
253         mem->i915 = i915;
254         mem->region = DEFINE_RES_MEM(start, size);
255         mem->io_start = io_start;
256         mem->io_size = io_size;
257         mem->min_page_size = min_page_size;
258         mem->ops = ops;
259         mem->total = size;
260         mem->type = type;
261         mem->instance = instance;
262
263         snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
264                  region_type_str(type), instance);
265
266         mutex_init(&mem->objects.lock);
267         INIT_LIST_HEAD(&mem->objects.list);
268
269         if (ops->init) {
270                 err = ops->init(mem);
271                 if (err)
272                         goto err_free;
273         }
274
275         err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
276         if (err)
277                 goto err_release;
278
279         return mem;
280
281 err_release:
282         if (mem->ops->release)
283                 mem->ops->release(mem);
284 err_free:
285         kfree(mem);
286         return ERR_PTR(err);
287 }
288
289 void intel_memory_region_set_name(struct intel_memory_region *mem,
290                                   const char *fmt, ...)
291 {
292         va_list ap;
293
294         va_start(ap, fmt);
295         vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
296         va_end(ap);
297 }
298
299 void intel_memory_region_avail(struct intel_memory_region *mr,
300                                u64 *avail, u64 *visible_avail)
301 {
302         if (mr->type == INTEL_MEMORY_LOCAL) {
303                 i915_ttm_buddy_man_avail(mr->region_private,
304                                          avail, visible_avail);
305                 *avail <<= PAGE_SHIFT;
306                 *visible_avail <<= PAGE_SHIFT;
307         } else {
308                 *avail = mr->total;
309                 *visible_avail = mr->total;
310         }
311 }
312
313 void intel_memory_region_destroy(struct intel_memory_region *mem)
314 {
315         int ret = 0;
316
317         if (mem->ops->release)
318                 ret = mem->ops->release(mem);
319
320         GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
321         mutex_destroy(&mem->objects.lock);
322         if (!ret)
323                 kfree(mem);
324 }
325
326 /* Global memory region registration -- only slight layer inversions! */
327
328 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
329 {
330         int err, i;
331
332         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
333                 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
334                 u16 type, instance;
335
336                 if (!HAS_REGION(i915, BIT(i)))
337                         continue;
338
339                 type = intel_region_map[i].class;
340                 instance = intel_region_map[i].instance;
341                 switch (type) {
342                 case INTEL_MEMORY_SYSTEM:
343                         if (IS_DGFX(i915))
344                                 mem = i915_gem_ttm_system_setup(i915, type,
345                                                                 instance);
346                         else
347                                 mem = i915_gem_shmem_setup(i915, type,
348                                                            instance);
349                         break;
350                 case INTEL_MEMORY_STOLEN_LOCAL:
351                         mem = i915_gem_stolen_lmem_setup(i915, type, instance);
352                         if (!IS_ERR(mem))
353                                 i915->mm.stolen_region = mem;
354                         break;
355                 case INTEL_MEMORY_STOLEN_SYSTEM:
356                         mem = i915_gem_stolen_smem_setup(i915, type, instance);
357                         if (!IS_ERR(mem))
358                                 i915->mm.stolen_region = mem;
359                         break;
360                 default:
361                         continue;
362                 }
363
364                 if (IS_ERR(mem)) {
365                         err = PTR_ERR(mem);
366                         drm_err(&i915->drm,
367                                 "Failed to setup region(%d) type=%d\n",
368                                 err, type);
369                         goto out_cleanup;
370                 }
371
372                 mem->id = i;
373                 i915->mm.regions[i] = mem;
374         }
375
376         return 0;
377
378 out_cleanup:
379         intel_memory_regions_driver_release(i915);
380         return err;
381 }
382
383 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
384 {
385         int i;
386
387         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
388                 struct intel_memory_region *region =
389                         fetch_and_zero(&i915->mm.regions[i]);
390
391                 if (region)
392                         intel_memory_region_destroy(region);
393         }
394 }
395
396 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
397 #include "selftests/intel_memory_region.c"
398 #include "selftests/mock_region.c"
399 #endif