2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <drm/drm_cache.h>
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_pm.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_gem_lmem.h"
16 #include "i915_gem_mman.h"
18 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
19 struct sg_table *pages,
20 unsigned int sg_page_sizes)
22 struct drm_i915_private *i915 = to_i915(obj->base.dev);
23 unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
27 assert_object_held_shared(obj);
29 if (i915_gem_object_is_volatile(obj))
30 obj->mm.madv = I915_MADV_DONTNEED;
32 /* Make the pages coherent with the GPU (flushing any swapin). */
33 if (obj->cache_dirty) {
34 WARN_ON_ONCE(IS_DGFX(i915));
35 obj->write_domain = 0;
36 if (i915_gem_object_has_struct_page(obj))
37 drm_clflush_sg(pages);
38 obj->cache_dirty = false;
41 obj->mm.get_page.sg_pos = pages->sgl;
42 obj->mm.get_page.sg_idx = 0;
43 obj->mm.get_dma_page.sg_pos = pages->sgl;
44 obj->mm.get_dma_page.sg_idx = 0;
46 obj->mm.pages = pages;
48 GEM_BUG_ON(!sg_page_sizes);
49 obj->mm.page_sizes.phys = sg_page_sizes;
52 * Calculate the supported page-sizes which fit into the given
53 * sg_page_sizes. This will give us the page-sizes which we may be able
54 * to use opportunistically when later inserting into the GTT. For
55 * example if phys=2G, then in theory we should be able to use 1G, 2M,
56 * 64K or 4K pages, although in practice this will depend on a number of
59 obj->mm.page_sizes.sg = 0;
60 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
61 if (obj->mm.page_sizes.phys & ~0u << i)
62 obj->mm.page_sizes.sg |= BIT(i);
64 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
66 shrinkable = i915_gem_object_is_shrinkable(obj);
68 if (i915_gem_object_is_tiled(obj) &&
69 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
70 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
71 i915_gem_object_set_tiling_quirk(obj);
72 GEM_BUG_ON(!list_empty(&obj->mm.link));
73 atomic_inc(&obj->mm.shrink_pin);
77 if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
78 struct list_head *list;
81 assert_object_held(obj);
82 spin_lock_irqsave(&i915->mm.obj_lock, flags);
84 i915->mm.shrink_count++;
85 i915->mm.shrink_memory += obj->base.size;
87 if (obj->mm.madv != I915_MADV_WILLNEED)
88 list = &i915->mm.purge_list;
90 list = &i915->mm.shrink_list;
91 list_add_tail(&obj->mm.link, list);
93 atomic_set(&obj->mm.shrink_pin, 0);
94 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
98 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
100 struct drm_i915_private *i915 = to_i915(obj->base.dev);
103 assert_object_held_shared(obj);
105 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
107 "Attempting to obtain a purgeable object\n");
111 err = obj->ops->get_pages(obj);
112 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
117 /* Ensure that the associated pages are gathered from the backing storage
118 * and pinned into our object. i915_gem_object_pin_pages() may be called
119 * multiple times before they are released by a single call to
120 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
121 * either as a result of memory pressure (reaping pages under the shrinker)
122 * or as the object is itself released.
124 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
128 assert_object_held(obj);
130 assert_object_held_shared(obj);
132 if (unlikely(!i915_gem_object_has_pages(obj))) {
133 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
135 err = ____i915_gem_object_get_pages(obj);
139 smp_mb__before_atomic();
141 atomic_inc(&obj->mm.pages_pin_count);
146 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
148 struct i915_gem_ww_ctx ww;
151 i915_gem_ww_ctx_init(&ww, true);
153 err = i915_gem_object_lock(obj, &ww);
155 err = i915_gem_object_pin_pages(obj);
157 if (err == -EDEADLK) {
158 err = i915_gem_ww_ctx_backoff(&ww);
162 i915_gem_ww_ctx_fini(&ww);
166 /* Immediately discard the backing storage */
167 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
169 if (obj->ops->truncate)
170 return obj->ops->truncate(obj);
175 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
177 struct radix_tree_iter iter;
181 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
182 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
183 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
184 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
188 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
190 if (is_vmalloc_addr(ptr))
194 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
196 struct drm_i915_private *i915 = to_i915(obj->base.dev);
197 struct intel_gt *gt = to_gt(i915);
202 intel_gt_invalidate_tlb(gt, obj->mm.tlb);
207 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
209 struct sg_table *pages;
211 assert_object_held_shared(obj);
213 pages = fetch_and_zero(&obj->mm.pages);
214 if (IS_ERR_OR_NULL(pages))
217 if (i915_gem_object_is_volatile(obj))
218 obj->mm.madv = I915_MADV_WILLNEED;
220 if (!i915_gem_object_has_self_managed_shrink_list(obj))
221 i915_gem_object_make_unshrinkable(obj);
223 if (obj->mm.mapping) {
224 unmap_object(obj, page_mask_bits(obj->mm.mapping));
225 obj->mm.mapping = NULL;
228 __i915_gem_object_reset_page_iter(obj);
229 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
231 flush_tlb_invalidate(obj);
236 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
238 struct sg_table *pages;
240 if (i915_gem_object_has_pinned_pages(obj))
243 /* May be called by shrinker from within get_pages() (on another bo) */
244 assert_object_held_shared(obj);
246 i915_gem_object_release_mmap_offset(obj);
249 * ->put_pages might need to allocate memory for the bit17 swizzle
250 * array, hence protect them from being reaped by removing them from gtt
253 pages = __i915_gem_object_unset_pages(obj);
256 * XXX Temporary hijinx to avoid updating all backends to handle
257 * NULL pages. In the future, when we have more asynchronous
258 * get_pages backends we should be better able to handle the
259 * cancellation of the async task in a more uniform manner.
261 if (!IS_ERR_OR_NULL(pages))
262 obj->ops->put_pages(obj, pages);
267 /* The 'mapping' part of i915_gem_object_pin_map() below */
268 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
269 enum i915_map_type type)
271 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
272 struct page *stack[32], **pages = stack, *page;
273 struct sgt_iter iter;
280 fallthrough; /* to use PAGE_KERNEL anyway */
283 * On 32b, highmem using a finite set of indirect PTE (i.e.
284 * vmap) to provide virtual mappings of the high pages.
285 * As these are finite, map_new_virtual() must wait for some
286 * other kmap() to finish when it runs out. If we map a large
287 * number of objects, there is no method for it to tell us
288 * to release the mappings, and we deadlock.
290 * However, if we make an explicit vmap of the page, that
291 * uses a larger vmalloc arena, and also has the ability
292 * to tell us to release unwanted mappings. Most importantly,
293 * it will fail and propagate an error instead of waiting
296 * So if the page is beyond the 32b boundary, make an explicit
299 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
300 return page_address(sg_page(obj->mm.pages->sgl));
301 pgprot = PAGE_KERNEL;
304 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
308 if (n_pages > ARRAY_SIZE(stack)) {
309 /* Too big for stack -- allocate temporary array instead */
310 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
312 return ERR_PTR(-ENOMEM);
316 for_each_sgt_page(page, iter, obj->mm.pages)
318 vaddr = vmap(pages, n_pages, 0, pgprot);
322 return vaddr ?: ERR_PTR(-ENOMEM);
325 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
326 enum i915_map_type type)
328 resource_size_t iomap = obj->mm.region->iomap.base -
329 obj->mm.region->region.start;
330 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
331 unsigned long stack[32], *pfns = stack, i;
332 struct sgt_iter iter;
336 GEM_BUG_ON(type != I915_MAP_WC);
338 if (n_pfn > ARRAY_SIZE(stack)) {
339 /* Too big for stack -- allocate temporary array instead */
340 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
342 return ERR_PTR(-ENOMEM);
346 for_each_sgt_daddr(addr, iter, obj->mm.pages)
347 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
348 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
352 return vaddr ?: ERR_PTR(-ENOMEM);
355 /* get, pin, and map the pages of the object into kernel space */
356 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
357 enum i915_map_type type)
359 enum i915_map_type has_type;
364 if (!i915_gem_object_has_struct_page(obj) &&
365 !i915_gem_object_has_iomem(obj))
366 return ERR_PTR(-ENXIO);
368 if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
369 return ERR_PTR(-EINVAL);
371 assert_object_held(obj);
373 pinned = !(type & I915_MAP_OVERRIDE);
374 type &= ~I915_MAP_OVERRIDE;
376 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
377 if (unlikely(!i915_gem_object_has_pages(obj))) {
378 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
380 err = ____i915_gem_object_get_pages(obj);
384 smp_mb__before_atomic();
386 atomic_inc(&obj->mm.pages_pin_count);
389 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
392 * For discrete our CPU mappings needs to be consistent in order to
393 * function correctly on !x86. When mapping things through TTM, we use
394 * the same rules to determine the caching type.
396 * The caching rules, starting from DG1:
398 * - If the object can be placed in device local-memory, then the
399 * pages should be allocated and mapped as write-combined only.
401 * - Everything else is always allocated and mapped as write-back,
402 * with the guarantee that everything is also coherent with the
405 * Internal users of lmem are already expected to get this right, so no
406 * fudging needed there.
408 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
409 if (type != I915_MAP_WC && !obj->mm.n_placements) {
410 ptr = ERR_PTR(-ENODEV);
415 } else if (IS_DGFX(to_i915(obj->base.dev))) {
419 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
420 if (ptr && has_type != type) {
422 ptr = ERR_PTR(-EBUSY);
426 unmap_object(obj, ptr);
428 ptr = obj->mm.mapping = NULL;
432 err = i915_gem_object_wait_moving_fence(obj, true);
438 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
439 ptr = ERR_PTR(-ENODEV);
440 else if (i915_gem_object_has_struct_page(obj))
441 ptr = i915_gem_object_map_page(obj, type);
443 ptr = i915_gem_object_map_pfn(obj, type);
447 obj->mm.mapping = page_pack_bits(ptr, type);
453 atomic_dec(&obj->mm.pages_pin_count);
457 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
458 enum i915_map_type type)
462 i915_gem_object_lock(obj, NULL);
463 ret = i915_gem_object_pin_map(obj, type);
464 i915_gem_object_unlock(obj);
469 enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
470 struct drm_i915_gem_object *obj,
471 bool always_coherent)
473 if (i915_gem_object_is_lmem(obj))
475 if (HAS_LLC(i915) || always_coherent)
481 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
482 unsigned long offset,
485 enum i915_map_type has_type;
488 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
489 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
490 offset, size, obj->base.size));
492 wmb(); /* let all previous writes be visible to coherent partners */
493 obj->mm.dirty = true;
495 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
498 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
499 if (has_type == I915_MAP_WC)
502 drm_clflush_virt_range(ptr + offset, size);
503 if (size == obj->base.size) {
504 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
505 obj->cache_dirty = false;
509 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
511 GEM_BUG_ON(!obj->mm.mapping);
514 * We allow removing the mapping from underneath pinned pages!
516 * Furthermore, since this is an unsafe operation reserved only
517 * for construction time manipulation, we ignore locking prudence.
519 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
521 i915_gem_object_unpin_map(obj);
525 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
526 struct i915_gem_object_page_iter *iter,
528 unsigned int *offset,
531 struct scatterlist *sg;
532 unsigned int idx, count;
535 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
536 if (!i915_gem_object_has_pinned_pages(obj))
537 assert_object_held(obj);
539 /* As we iterate forward through the sg, we record each entry in a
540 * radixtree for quick repeated (backwards) lookups. If we have seen
541 * this index previously, we will have an entry for it.
543 * Initial lookup is O(N), but this is amortized to O(1) for
544 * sequential page access (where each new request is consecutive
545 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
546 * i.e. O(1) with a large constant!
548 if (n < READ_ONCE(iter->sg_idx))
551 mutex_lock(&iter->lock);
553 /* We prefer to reuse the last sg so that repeated lookup of this
554 * (or the subsequent) sg are fast - comparing against the last
555 * sg is faster than going through the radixtree.
560 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
562 while (idx + count <= n) {
567 /* If we cannot allocate and insert this entry, or the
568 * individual pages from this range, cancel updating the
569 * sg_idx so that on this lookup we are forced to linearly
570 * scan onwards, but on future lookups we will try the
571 * insertion again (in which case we need to be careful of
572 * the error return reporting that we have already inserted
575 ret = radix_tree_insert(&iter->radix, idx, sg);
576 if (ret && ret != -EEXIST)
579 entry = xa_mk_value(idx);
580 for (i = 1; i < count; i++) {
581 ret = radix_tree_insert(&iter->radix, idx + i, entry);
582 if (ret && ret != -EEXIST)
587 sg = ____sg_next(sg);
588 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
595 mutex_unlock(&iter->lock);
597 if (unlikely(n < idx)) /* insertion completed by another thread */
600 /* In case we failed to insert the entry into the radixtree, we need
601 * to look beyond the current sg.
603 while (idx + count <= n) {
605 sg = ____sg_next(sg);
606 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
615 sg = radix_tree_lookup(&iter->radix, n);
618 /* If this index is in the middle of multi-page sg entry,
619 * the radix tree will contain a value entry that points
620 * to the start of that range. We will return the pointer to
621 * the base page and the offset of this page within the
625 if (unlikely(xa_is_value(sg))) {
626 unsigned long base = xa_to_value(sg);
628 sg = radix_tree_lookup(&iter->radix, base);
640 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
642 struct scatterlist *sg;
645 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
647 sg = i915_gem_object_get_sg(obj, n, &offset);
648 return nth_page(sg_page(sg), offset);
651 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
653 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
658 page = i915_gem_object_get_page(obj, n);
660 set_page_dirty(page);
666 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
670 struct scatterlist *sg;
673 sg = i915_gem_object_get_sg_dma(obj, n, &offset);
676 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
678 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
682 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
685 return i915_gem_object_get_dma_address_len(obj, n, NULL);