2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "gt/intel_engine.h"
30 #include "i915_globals.h"
31 #include "intel_frontbuffer.h"
33 #include <drm/drm_gem.h>
35 static struct i915_global_vma {
36 struct i915_global base;
37 struct kmem_cache *slab_vmas;
40 struct i915_vma *i915_vma_alloc(void)
42 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
45 void i915_vma_free(struct i915_vma *vma)
47 return kmem_cache_free(global.slab_vmas, vma);
50 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
52 #include <linux/stackdepot.h>
54 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
56 unsigned long *entries;
57 unsigned int nr_entries;
60 if (!vma->node.stack) {
61 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
62 vma->node.start, vma->node.size, reason);
66 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
67 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
68 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
69 vma->node.start, vma->node.size, reason, buf);
74 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
80 static void obj_bump_mru(struct drm_i915_gem_object *obj)
82 struct drm_i915_private *i915 = to_i915(obj->base.dev);
85 spin_lock_irqsave(&i915->mm.obj_lock, flags);
88 list_move_tail(&obj->mm.link, &i915->mm.bound_list);
90 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
92 obj->mm.dirty = true; /* be paranoid */
95 static void __i915_vma_retire(struct i915_active *ref)
97 struct i915_vma *vma = container_of(ref, typeof(*vma), active);
98 struct drm_i915_gem_object *obj = vma->obj;
100 GEM_BUG_ON(!i915_gem_object_is_active(obj));
101 if (--obj->active_count)
104 /* Prune the shared fence arrays iff completely idle (inc. external) */
105 if (reservation_object_trylock(obj->resv)) {
106 if (reservation_object_test_signaled_rcu(obj->resv, true))
107 reservation_object_add_excl_fence(obj->resv, NULL);
108 reservation_object_unlock(obj->resv);
112 * Bump our place on the bound list to keep it roughly in LRU order
113 * so that we don't steal from recently used but inactive objects
114 * (unless we are forced to ofc!)
116 if (i915_gem_object_is_shrinkable(obj))
119 i915_gem_object_put(obj); /* and drop the active reference */
122 static struct i915_vma *
123 vma_create(struct drm_i915_gem_object *obj,
124 struct i915_address_space *vm,
125 const struct i915_ggtt_view *view)
127 struct i915_vma *vma;
128 struct rb_node *rb, **p;
130 /* The aliasing_ppgtt should never be used directly! */
131 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
133 vma = i915_vma_alloc();
135 return ERR_PTR(-ENOMEM);
138 vma->ops = &vm->vma_ops;
140 vma->resv = obj->resv;
141 vma->size = obj->base.size;
142 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
144 i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
145 INIT_ACTIVE_REQUEST(&vma->last_fence);
147 INIT_LIST_HEAD(&vma->closed_link);
149 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
150 vma->ggtt_view = *view;
151 if (view->type == I915_GGTT_VIEW_PARTIAL) {
152 GEM_BUG_ON(range_overflows_t(u64,
153 view->partial.offset,
155 obj->base.size >> PAGE_SHIFT));
156 vma->size = view->partial.size;
157 vma->size <<= PAGE_SHIFT;
158 GEM_BUG_ON(vma->size > obj->base.size);
159 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
160 vma->size = intel_rotation_info_size(&view->rotated);
161 vma->size <<= PAGE_SHIFT;
162 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
163 vma->size = intel_remapped_info_size(&view->remapped);
164 vma->size <<= PAGE_SHIFT;
168 if (unlikely(vma->size > vm->total))
171 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
173 if (i915_is_ggtt(vm)) {
174 if (unlikely(overflows_type(vma->size, u32)))
177 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
178 i915_gem_object_get_tiling(obj),
179 i915_gem_object_get_stride(obj));
180 if (unlikely(vma->fence_size < vma->size || /* overflow */
181 vma->fence_size > vm->total))
184 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
186 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
187 i915_gem_object_get_tiling(obj),
188 i915_gem_object_get_stride(obj));
189 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
191 vma->flags |= I915_VMA_GGTT;
194 spin_lock(&obj->vma.lock);
197 p = &obj->vma.tree.rb_node;
199 struct i915_vma *pos;
203 pos = rb_entry(rb, struct i915_vma, obj_node);
206 * If the view already exists in the tree, another thread
207 * already created a matching vma, so return the older instance
208 * and dispose of ours.
210 cmp = i915_vma_compare(pos, vm, view);
212 spin_unlock(&obj->vma.lock);
222 rb_link_node(&vma->obj_node, rb, p);
223 rb_insert_color(&vma->obj_node, &obj->vma.tree);
225 if (i915_vma_is_ggtt(vma))
227 * We put the GGTT vma at the start of the vma-list, followed
228 * by the ppGGTT vma. This allows us to break early when
229 * iterating over only the GGTT vma for an object, see
230 * for_each_ggtt_vma()
232 list_add(&vma->obj_link, &obj->vma.list);
234 list_add_tail(&vma->obj_link, &obj->vma.list);
236 spin_unlock(&obj->vma.lock);
238 mutex_lock(&vm->mutex);
239 list_add(&vma->vm_link, &vm->unbound_list);
240 mutex_unlock(&vm->mutex);
246 return ERR_PTR(-E2BIG);
249 static struct i915_vma *
250 vma_lookup(struct drm_i915_gem_object *obj,
251 struct i915_address_space *vm,
252 const struct i915_ggtt_view *view)
256 rb = obj->vma.tree.rb_node;
258 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
261 cmp = i915_vma_compare(vma, vm, view);
275 * i915_vma_instance - return the singleton instance of the VMA
276 * @obj: parent &struct drm_i915_gem_object to be mapped
277 * @vm: address space in which the mapping is located
278 * @view: additional mapping requirements
280 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
281 * the same @view characteristics. If a match is not found, one is created.
282 * Once created, the VMA is kept until either the object is freed, or the
283 * address space is closed.
285 * Must be called with struct_mutex held.
287 * Returns the vma, or an error pointer.
290 i915_vma_instance(struct drm_i915_gem_object *obj,
291 struct i915_address_space *vm,
292 const struct i915_ggtt_view *view)
294 struct i915_vma *vma;
296 GEM_BUG_ON(view && !i915_is_ggtt(vm));
297 GEM_BUG_ON(vm->closed);
299 spin_lock(&obj->vma.lock);
300 vma = vma_lookup(obj, vm, view);
301 spin_unlock(&obj->vma.lock);
303 /* vma_create() will resolve the race if another creates the vma */
305 vma = vma_create(obj, vm, view);
307 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
312 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
314 * @cache_level: mapping cache level
315 * @flags: flags like global or local mapping
317 * DMA addresses are taken from the scatter-gather table of this object (or of
318 * this VMA in case of non-default GGTT views) and PTE entries set up.
319 * Note that DMA addresses are also the only part of the SG table we care about.
321 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
328 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
329 GEM_BUG_ON(vma->size > vma->node.size);
331 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
336 if (GEM_DEBUG_WARN_ON(!flags))
340 if (flags & PIN_GLOBAL)
341 bind_flags |= I915_VMA_GLOBAL_BIND;
342 if (flags & PIN_USER)
343 bind_flags |= I915_VMA_LOCAL_BIND;
345 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
346 if (flags & PIN_UPDATE)
347 bind_flags |= vma_flags;
349 bind_flags &= ~vma_flags;
353 GEM_BUG_ON(!vma->pages);
355 trace_i915_vma_bind(vma, bind_flags);
356 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
360 vma->flags |= bind_flags;
364 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
369 /* Access through the GTT requires the device to be awake. */
370 assert_rpm_wakelock_held(vma->vm->i915);
372 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
373 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
378 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
379 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
383 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
396 err = i915_vma_pin_fence(vma);
400 i915_vma_set_ggtt_write(vma);
404 __i915_vma_unpin(vma);
406 return IO_ERR_PTR(err);
409 void i915_vma_flush_writes(struct i915_vma *vma)
411 if (!i915_vma_has_ggtt_write(vma))
414 i915_gem_flush_ggtt_writes(vma->vm->i915);
416 i915_vma_unset_ggtt_write(vma);
419 void i915_vma_unpin_iomap(struct i915_vma *vma)
421 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
423 GEM_BUG_ON(vma->iomap == NULL);
425 i915_vma_flush_writes(vma);
427 i915_vma_unpin_fence(vma);
431 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
433 struct i915_vma *vma;
434 struct drm_i915_gem_object *obj;
436 vma = fetch_and_zero(p_vma);
446 if (flags & I915_VMA_RELEASE_MAP)
447 i915_gem_object_unpin_map(obj);
449 i915_gem_object_put(obj);
452 bool i915_vma_misplaced(const struct i915_vma *vma,
453 u64 size, u64 alignment, u64 flags)
455 if (!drm_mm_node_allocated(&vma->node))
458 if (vma->node.size < size)
461 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
462 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
465 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
468 if (flags & PIN_OFFSET_BIAS &&
469 vma->node.start < (flags & PIN_OFFSET_MASK))
472 if (flags & PIN_OFFSET_FIXED &&
473 vma->node.start != (flags & PIN_OFFSET_MASK))
479 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
481 bool mappable, fenceable;
483 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
484 GEM_BUG_ON(!vma->fence_size);
486 fenceable = (vma->node.size >= vma->fence_size &&
487 IS_ALIGNED(vma->node.start, vma->fence_alignment));
489 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
491 if (mappable && fenceable)
492 vma->flags |= I915_VMA_CAN_FENCE;
494 vma->flags &= ~I915_VMA_CAN_FENCE;
497 static bool color_differs(struct drm_mm_node *node, unsigned long color)
499 return node->allocated && node->color != color;
502 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
504 struct drm_mm_node *node = &vma->node;
505 struct drm_mm_node *other;
508 * On some machines we have to be careful when putting differing types
509 * of snoopable memory together to avoid the prefetcher crossing memory
510 * domains and dying. During vm initialisation, we decide whether or not
511 * these constraints apply and set the drm_mm.color_adjust
514 if (vma->vm->mm.color_adjust == NULL)
517 /* Only valid to be called on an already inserted vma */
518 GEM_BUG_ON(!drm_mm_node_allocated(node));
519 GEM_BUG_ON(list_empty(&node->node_list));
521 other = list_prev_entry(node, node_list);
522 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
525 other = list_next_entry(node, node_list);
526 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
532 static void assert_bind_count(const struct drm_i915_gem_object *obj)
535 * Combine the assertion that the object is bound and that we have
536 * pinned its pages. But we should never have bound the object
537 * more than we have pinned its pages. (For complete accuracy, we
538 * assume that no else is pinning the pages, but as a rough assertion
539 * that we will not run into problems later, this will do!)
541 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
545 * i915_vma_insert - finds a slot for the vma in its address space
547 * @size: requested size in bytes (can be larger than the VMA)
548 * @alignment: required alignment
549 * @flags: mask of PIN_* flags to use
551 * First we try to allocate some free space that meets the requirements for
552 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
553 * preferrably the oldest idle entry to make room for the new VMA.
556 * 0 on success, negative error code otherwise.
559 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
561 struct drm_i915_private *dev_priv = vma->vm->i915;
562 unsigned int cache_level;
566 GEM_BUG_ON(i915_vma_is_closed(vma));
567 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
568 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
570 size = max(size, vma->size);
571 alignment = max(alignment, vma->display_alignment);
572 if (flags & PIN_MAPPABLE) {
573 size = max_t(typeof(size), size, vma->fence_size);
574 alignment = max_t(typeof(alignment),
575 alignment, vma->fence_alignment);
578 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
579 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
580 GEM_BUG_ON(!is_power_of_2(alignment));
582 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
583 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
585 end = vma->vm->total;
586 if (flags & PIN_MAPPABLE)
587 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
588 if (flags & PIN_ZONE_4G)
589 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
590 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
592 /* If binding the object/GGTT view requires more space than the entire
593 * aperture has, reject it early before evicting everything in a vain
594 * attempt to find space.
597 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
598 size, flags & PIN_MAPPABLE ? "mappable" : "total",
604 ret = i915_gem_object_pin_pages(vma->obj);
608 cache_level = vma->obj->cache_level;
613 GEM_BUG_ON(vma->pages);
615 ret = vma->ops->set_pages(vma);
619 if (flags & PIN_OFFSET_FIXED) {
620 u64 offset = flags & PIN_OFFSET_MASK;
621 if (!IS_ALIGNED(offset, alignment) ||
622 range_overflows(offset, size, end)) {
627 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
628 size, offset, cache_level,
634 * We only support huge gtt pages through the 48b PPGTT,
635 * however we also don't want to force any alignment for
636 * objects which need to be tightly packed into the low 32bits.
638 * Note that we assume that GGTT are limited to 4GiB for the
639 * forseeable future. See also i915_ggtt_offset().
641 if (upper_32_bits(end - 1) &&
642 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
644 * We can't mix 64K and 4K PTEs in the same page-table
645 * (2M block), and so to avoid the ugliness and
646 * complexity of coloring we opt for just aligning 64K
650 rounddown_pow_of_two(vma->page_sizes.sg |
651 I915_GTT_PAGE_SIZE_2M);
654 * Check we don't expand for the limited Global GTT
655 * (mappable aperture is even more precious!). This
656 * also checks that we exclude the aliasing-ppgtt.
658 GEM_BUG_ON(i915_vma_is_ggtt(vma));
660 alignment = max(alignment, page_alignment);
662 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
663 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
666 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
667 size, alignment, cache_level,
672 GEM_BUG_ON(vma->node.start < start);
673 GEM_BUG_ON(vma->node.start + vma->node.size > end);
675 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
676 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
678 mutex_lock(&vma->vm->mutex);
679 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
680 mutex_unlock(&vma->vm->mutex);
683 struct drm_i915_gem_object *obj = vma->obj;
686 spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
688 if (i915_gem_object_is_shrinkable(obj))
689 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
692 assert_bind_count(obj);
694 spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
700 vma->ops->clear_pages(vma);
703 i915_gem_object_unpin_pages(vma->obj);
708 i915_vma_remove(struct i915_vma *vma)
710 struct drm_i915_private *i915 = vma->vm->i915;
712 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
713 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
715 vma->ops->clear_pages(vma);
717 mutex_lock(&vma->vm->mutex);
718 drm_mm_remove_node(&vma->node);
719 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
720 mutex_unlock(&vma->vm->mutex);
723 * Since the unbound list is global, only move to that list if
724 * no more VMAs exist.
727 struct drm_i915_gem_object *obj = vma->obj;
730 spin_lock_irqsave(&i915->mm.obj_lock, flags);
732 GEM_BUG_ON(obj->bind_count == 0);
733 if (--obj->bind_count == 0 &&
734 i915_gem_object_is_shrinkable(obj) &&
735 obj->mm.madv == I915_MADV_WILLNEED)
736 list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
738 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
741 * And finally now the object is completely decoupled from this
742 * vma, we can drop its hold on the backing storage and allow
743 * it to be reaped by the shrinker.
745 i915_gem_object_unpin_pages(obj);
746 assert_bind_count(obj);
750 int __i915_vma_do_pin(struct i915_vma *vma,
751 u64 size, u64 alignment, u64 flags)
753 const unsigned int bound = vma->flags;
756 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
757 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
758 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
760 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
765 if ((bound & I915_VMA_BIND_MASK) == 0) {
766 ret = i915_vma_insert(vma, size, alignment, flags);
770 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
772 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
776 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
778 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
779 __i915_vma_set_map_and_fenceable(vma);
781 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
785 if ((bound & I915_VMA_BIND_MASK) == 0) {
786 i915_vma_remove(vma);
787 GEM_BUG_ON(vma->pages);
788 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
791 __i915_vma_unpin(vma);
795 void i915_vma_close(struct i915_vma *vma)
797 struct drm_i915_private *i915 = vma->vm->i915;
800 GEM_BUG_ON(i915_vma_is_closed(vma));
803 * We defer actually closing, unbinding and destroying the VMA until
804 * the next idle point, or if the object is freed in the meantime. By
805 * postponing the unbind, we allow for it to be resurrected by the
806 * client, avoiding the work required to rebind the VMA. This is
807 * advantageous for DRI, where the client/server pass objects
808 * between themselves, temporarily opening a local VMA to the
809 * object, and then closing it again. The same object is then reused
810 * on the next frame (or two, depending on the depth of the swap queue)
811 * causing us to rebind the VMA once more. This ends up being a lot
812 * of wasted work for the steady state.
814 spin_lock_irqsave(&i915->gt.closed_lock, flags);
815 list_add(&vma->closed_link, &i915->gt.closed_vma);
816 spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
819 static void __i915_vma_remove_closed(struct i915_vma *vma)
821 struct drm_i915_private *i915 = vma->vm->i915;
823 if (!i915_vma_is_closed(vma))
826 spin_lock_irq(&i915->gt.closed_lock);
827 list_del_init(&vma->closed_link);
828 spin_unlock_irq(&i915->gt.closed_lock);
831 void i915_vma_reopen(struct i915_vma *vma)
833 __i915_vma_remove_closed(vma);
836 static void __i915_vma_destroy(struct i915_vma *vma)
838 GEM_BUG_ON(vma->node.allocated);
839 GEM_BUG_ON(vma->fence);
841 GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
843 mutex_lock(&vma->vm->mutex);
844 list_del(&vma->vm_link);
845 mutex_unlock(&vma->vm->mutex);
848 struct drm_i915_gem_object *obj = vma->obj;
850 spin_lock(&obj->vma.lock);
851 list_del(&vma->obj_link);
852 rb_erase(&vma->obj_node, &vma->obj->vma.tree);
853 spin_unlock(&obj->vma.lock);
856 i915_active_fini(&vma->active);
861 void i915_vma_destroy(struct i915_vma *vma)
863 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
865 GEM_BUG_ON(i915_vma_is_pinned(vma));
867 __i915_vma_remove_closed(vma);
869 WARN_ON(i915_vma_unbind(vma));
870 GEM_BUG_ON(i915_vma_is_active(vma));
872 __i915_vma_destroy(vma);
875 void i915_vma_parked(struct drm_i915_private *i915)
877 struct i915_vma *vma, *next;
879 spin_lock_irq(&i915->gt.closed_lock);
880 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
881 list_del_init(&vma->closed_link);
882 spin_unlock_irq(&i915->gt.closed_lock);
884 i915_vma_destroy(vma);
886 spin_lock_irq(&i915->gt.closed_lock);
888 spin_unlock_irq(&i915->gt.closed_lock);
891 static void __i915_vma_iounmap(struct i915_vma *vma)
893 GEM_BUG_ON(i915_vma_is_pinned(vma));
895 if (vma->iomap == NULL)
898 io_mapping_unmap(vma->iomap);
902 void i915_vma_revoke_mmap(struct i915_vma *vma)
904 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
907 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
909 if (!i915_vma_has_userfault(vma))
912 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
913 GEM_BUG_ON(!vma->obj->userfault_count);
915 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
916 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
917 drm_vma_node_offset_addr(node) + vma_offset,
921 i915_vma_unset_userfault(vma);
922 if (!--vma->obj->userfault_count)
923 list_del(&vma->obj->userfault_link);
926 static void export_fence(struct i915_vma *vma,
927 struct i915_request *rq,
930 struct reservation_object *resv = vma->resv;
933 * Ignore errors from failing to allocate the new fence, we can't
934 * handle an error right now. Worst case should be missed
935 * synchronisation leading to rendering corruption.
937 if (flags & EXEC_OBJECT_WRITE)
938 reservation_object_add_excl_fence(resv, &rq->fence);
939 else if (reservation_object_reserve_shared(resv, 1) == 0)
940 reservation_object_add_shared_fence(resv, &rq->fence);
943 int i915_vma_move_to_active(struct i915_vma *vma,
944 struct i915_request *rq,
947 struct drm_i915_gem_object *obj = vma->obj;
949 assert_vma_held(vma);
950 assert_object_held(obj);
951 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
954 * Add a reference if we're newly entering the active list.
955 * The order in which we add operations to the retirement queue is
956 * vital here: mark_active adds to the start of the callback list,
957 * such that subsequent callbacks are called first. Therefore we
958 * add the active reference first and queue for it to be dropped
961 if (!vma->active.count && !obj->active_count++)
962 i915_gem_object_get(obj); /* once more for the active ref */
964 if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
965 if (!vma->active.count && !--obj->active_count)
966 i915_gem_object_put(obj);
970 GEM_BUG_ON(!i915_vma_is_active(vma));
971 GEM_BUG_ON(!obj->active_count);
973 obj->write_domain = 0;
974 if (flags & EXEC_OBJECT_WRITE) {
975 obj->write_domain = I915_GEM_DOMAIN_RENDER;
977 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
978 __i915_active_request_set(&obj->frontbuffer_write, rq);
980 obj->read_domains = 0;
982 obj->read_domains |= I915_GEM_GPU_DOMAINS;
984 if (flags & EXEC_OBJECT_NEEDS_FENCE)
985 __i915_active_request_set(&vma->last_fence, rq);
987 export_fence(vma, rq, flags);
991 int i915_vma_unbind(struct i915_vma *vma)
995 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
998 * First wait upon any activity as retiring the request may
999 * have side-effects such as unpinning or even unbinding this vma.
1002 if (i915_vma_is_active(vma)) {
1004 * When a closed VMA is retired, it is unbound - eek.
1005 * In order to prevent it from being recursively closed,
1006 * take a pin on the vma so that the second unbind is
1009 * Even more scary is that the retire callback may free
1010 * the object (last active vma). To prevent the explosion
1011 * we defer the actual object free to a worker that can
1012 * only proceed once it acquires the struct_mutex (which
1013 * we currently hold, therefore it cannot free this object
1014 * before we are finished).
1016 __i915_vma_pin(vma);
1018 ret = i915_active_wait(&vma->active);
1022 ret = i915_active_request_retire(&vma->last_fence,
1023 &vma->vm->i915->drm.struct_mutex);
1025 __i915_vma_unpin(vma);
1029 GEM_BUG_ON(i915_vma_is_active(vma));
1031 if (i915_vma_is_pinned(vma)) {
1032 vma_print_allocator(vma, "is pinned");
1036 if (!drm_mm_node_allocated(&vma->node))
1039 if (i915_vma_is_map_and_fenceable(vma)) {
1041 * Check that we have flushed all writes through the GGTT
1042 * before the unbind, other due to non-strict nature of those
1043 * indirect writes they may end up referencing the GGTT PTE
1046 i915_vma_flush_writes(vma);
1047 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1049 /* release the fence reg _after_ flushing */
1050 ret = i915_vma_put_fence(vma);
1054 /* Force a pagefault for domain tracking on next user access */
1055 i915_vma_revoke_mmap(vma);
1057 __i915_vma_iounmap(vma);
1058 vma->flags &= ~I915_VMA_CAN_FENCE;
1060 GEM_BUG_ON(vma->fence);
1061 GEM_BUG_ON(i915_vma_has_userfault(vma));
1063 if (likely(!vma->vm->closed)) {
1064 trace_i915_vma_unbind(vma);
1065 vma->ops->unbind_vma(vma);
1067 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1069 i915_vma_remove(vma);
1074 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1075 #include "selftests/i915_vma.c"
1078 static void i915_global_vma_shrink(void)
1080 kmem_cache_shrink(global.slab_vmas);
1083 static void i915_global_vma_exit(void)
1085 kmem_cache_destroy(global.slab_vmas);
1088 static struct i915_global_vma global = { {
1089 .shrink = i915_global_vma_shrink,
1090 .exit = i915_global_vma_exit,
1093 int __init i915_global_vma_init(void)
1095 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1096 if (!global.slab_vmas)
1099 i915_global_register(&global.base);