2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
31 #include <drm/drm_gem.h>
34 i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
48 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
49 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
50 WARN_ON(i915_vma_unbind(vma));
52 GEM_BUG_ON(!i915_gem_object_is_active(obj));
53 if (--obj->active_count)
56 /* Bump our place on the bound list to keep it roughly in LRU order
57 * so that we don't steal from recently used but inactive objects
58 * (unless we are forced to ofc!)
61 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
63 obj->mm.dirty = true; /* be paranoid */
65 if (i915_gem_object_has_active_reference(obj)) {
66 i915_gem_object_clear_active_reference(obj);
67 i915_gem_object_put(obj);
71 static struct i915_vma *
72 __i915_vma_create(struct drm_i915_gem_object *obj,
73 struct i915_address_space *vm,
74 const struct i915_ggtt_view *view)
77 struct rb_node *rb, **p;
80 GEM_BUG_ON(vm->closed);
82 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
84 return ERR_PTR(-ENOMEM);
86 INIT_LIST_HEAD(&vma->exec_list);
87 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
88 init_request_active(&vma->last_read[i], i915_vma_retire);
89 init_request_active(&vma->last_fence, NULL);
90 list_add(&vma->vm_link, &vm->unbound_list);
93 vma->size = obj->base.size;
96 vma->ggtt_view = *view;
97 if (view->type == I915_GGTT_VIEW_PARTIAL) {
98 GEM_BUG_ON(range_overflows_t(u64,
99 view->params.partial.offset,
100 view->params.partial.size,
101 obj->base.size >> PAGE_SHIFT));
102 vma->size = view->params.partial.size;
103 vma->size <<= PAGE_SHIFT;
104 GEM_BUG_ON(vma->size >= obj->base.size);
105 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
107 intel_rotation_info_size(&view->params.rotated);
108 vma->size <<= PAGE_SHIFT;
112 if (i915_is_ggtt(vm)) {
113 vma->flags |= I915_VMA_GGTT;
114 list_add(&vma->obj_link, &obj->vma_list);
116 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
117 list_add_tail(&vma->obj_link, &obj->vma_list);
121 p = &obj->vma_tree.rb_node;
123 struct i915_vma *pos;
126 pos = rb_entry(rb, struct i915_vma, obj_node);
127 if (i915_vma_compare(pos, vm, view) < 0)
132 rb_link_node(&vma->obj_node, rb, p);
133 rb_insert_color(&vma->obj_node, &obj->vma_tree);
139 i915_vma_create(struct drm_i915_gem_object *obj,
140 struct i915_address_space *vm,
141 const struct i915_ggtt_view *view)
143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144 GEM_BUG_ON(view && !i915_is_ggtt(vm));
145 GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
147 return __i915_vma_create(obj, vm, view);
151 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
153 * @cache_level: mapping cache level
154 * @flags: flags like global or local mapping
156 * DMA addresses are taken from the scatter-gather table of this object (or of
157 * this VMA in case of non-default GGTT views) and PTE entries set up.
158 * Note that DMA addresses are also the only part of the SG table we care about.
160 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
167 if (WARN_ON(flags == 0))
171 if (flags & PIN_GLOBAL)
172 bind_flags |= I915_VMA_GLOBAL_BIND;
173 if (flags & PIN_USER)
174 bind_flags |= I915_VMA_LOCAL_BIND;
176 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
177 if (flags & PIN_UPDATE)
178 bind_flags |= vma_flags;
180 bind_flags &= ~vma_flags;
184 if (GEM_WARN_ON(range_overflows(vma->node.start,
189 if (vma_flags == 0 && vma->vm->allocate_va_range) {
190 trace_i915_va_alloc(vma);
191 ret = vma->vm->allocate_va_range(vma->vm,
198 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
202 vma->flags |= bind_flags;
206 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
210 /* Access through the GTT requires the device to be awake. */
211 assert_rpm_wakelock_held(vma->vm->i915);
213 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
214 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
215 return IO_ERR_PTR(-ENODEV);
217 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
218 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
222 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
226 return IO_ERR_PTR(-ENOMEM);
235 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
237 struct i915_vma *vma;
238 struct drm_i915_gem_object *obj;
240 vma = fetch_and_zero(p_vma);
249 __i915_gem_object_release_unless_active(obj);
253 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
255 if (!drm_mm_node_allocated(&vma->node))
258 if (vma->node.size < size)
261 if (alignment && vma->node.start & (alignment - 1))
264 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
267 if (flags & PIN_OFFSET_BIAS &&
268 vma->node.start < (flags & PIN_OFFSET_MASK))
271 if (flags & PIN_OFFSET_FIXED &&
272 vma->node.start != (flags & PIN_OFFSET_MASK))
278 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
280 struct drm_i915_gem_object *obj = vma->obj;
281 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
282 bool mappable, fenceable;
283 u32 fence_size, fence_alignment;
285 fence_size = i915_gem_get_ggtt_size(dev_priv,
287 i915_gem_object_get_tiling(obj));
288 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
290 i915_gem_object_get_tiling(obj),
293 fenceable = (vma->node.size == fence_size &&
294 (vma->node.start & (fence_alignment - 1)) == 0);
296 mappable = (vma->node.start + fence_size <=
297 dev_priv->ggtt.mappable_end);
300 * Explicitly disable for rotated VMA since the display does not
301 * need the fence and the VMA is not accessible to other users.
303 if (mappable && fenceable &&
304 vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
305 vma->flags |= I915_VMA_CAN_FENCE;
307 vma->flags &= ~I915_VMA_CAN_FENCE;
310 static bool color_differs(struct drm_mm_node *node, unsigned long color)
312 return node->allocated && node->color != color;
315 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
317 struct drm_mm_node *node = &vma->node;
318 struct drm_mm_node *other;
321 * On some machines we have to be careful when putting differing types
322 * of snoopable memory together to avoid the prefetcher crossing memory
323 * domains and dying. During vm initialisation, we decide whether or not
324 * these constraints apply and set the drm_mm.color_adjust
327 if (vma->vm->mm.color_adjust == NULL)
330 /* Only valid to be called on an already inserted vma */
331 GEM_BUG_ON(!drm_mm_node_allocated(node));
332 GEM_BUG_ON(list_empty(&node->node_list));
334 other = list_prev_entry(node, node_list);
335 if (color_differs(other, cache_level) && !other->hole_follows)
338 other = list_next_entry(node, node_list);
339 if (color_differs(other, cache_level) && !node->hole_follows)
346 * i915_vma_insert - finds a slot for the vma in its address space
348 * @size: requested size in bytes (can be larger than the VMA)
349 * @alignment: required alignment
350 * @flags: mask of PIN_* flags to use
352 * First we try to allocate some free space that meets the requirements for
353 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
354 * preferrably the oldest idle entry to make room for the new VMA.
357 * 0 on success, negative error code otherwise.
360 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
362 struct drm_i915_private *dev_priv = vma->vm->i915;
363 struct drm_i915_gem_object *obj = vma->obj;
367 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
368 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
370 size = max(size, vma->size);
371 if (flags & PIN_MAPPABLE)
372 size = i915_gem_get_ggtt_size(dev_priv, size,
373 i915_gem_object_get_tiling(obj));
375 alignment = max(max(alignment, vma->display_alignment),
376 i915_gem_get_ggtt_alignment(dev_priv, size,
377 i915_gem_object_get_tiling(obj),
378 flags & PIN_MAPPABLE));
380 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
382 end = vma->vm->total;
383 if (flags & PIN_MAPPABLE)
384 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
385 if (flags & PIN_ZONE_4G)
386 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
388 /* If binding the object/GGTT view requires more space than the entire
389 * aperture has, reject it early before evicting everything in a vain
390 * attempt to find space.
393 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
394 size, obj->base.size,
395 flags & PIN_MAPPABLE ? "mappable" : "total",
400 ret = i915_gem_object_pin_pages(obj);
404 if (flags & PIN_OFFSET_FIXED) {
405 u64 offset = flags & PIN_OFFSET_MASK;
406 if (offset & (alignment - 1) || offset > end - size) {
411 vma->node.start = offset;
412 vma->node.size = size;
413 vma->node.color = obj->cache_level;
414 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
416 ret = i915_gem_evict_for_vma(vma, flags);
418 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
423 u32 search_flag, alloc_flag;
425 if (flags & PIN_HIGH) {
426 search_flag = DRM_MM_SEARCH_BELOW;
427 alloc_flag = DRM_MM_CREATE_TOP;
429 search_flag = DRM_MM_SEARCH_DEFAULT;
430 alloc_flag = DRM_MM_CREATE_DEFAULT;
433 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
434 * so we know that we always have a minimum alignment of 4096.
435 * The drm_mm range manager is optimised to return results
436 * with zero alignment, so where possible use the optimal
439 if (alignment <= 4096)
443 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
451 ret = i915_gem_evict_something(vma->vm, size, alignment,
461 GEM_BUG_ON(vma->node.start < start);
462 GEM_BUG_ON(vma->node.start + vma->node.size > end);
464 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
466 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
467 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
469 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
474 i915_gem_object_unpin_pages(obj);
478 int __i915_vma_do_pin(struct i915_vma *vma,
479 u64 size, u64 alignment, u64 flags)
481 unsigned int bound = vma->flags;
484 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
485 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
486 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
488 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
493 if ((bound & I915_VMA_BIND_MASK) == 0) {
494 ret = i915_vma_insert(vma, size, alignment, flags);
499 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
503 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
504 __i915_vma_set_map_and_fenceable(vma);
506 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
510 __i915_vma_unpin(vma);
514 void i915_vma_destroy(struct i915_vma *vma)
516 GEM_BUG_ON(vma->node.allocated);
517 GEM_BUG_ON(i915_vma_is_active(vma));
518 GEM_BUG_ON(!i915_vma_is_closed(vma));
519 GEM_BUG_ON(vma->fence);
521 list_del(&vma->vm_link);
522 if (!i915_vma_is_ggtt(vma))
523 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
525 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
528 void i915_vma_close(struct i915_vma *vma)
530 GEM_BUG_ON(i915_vma_is_closed(vma));
531 vma->flags |= I915_VMA_CLOSED;
533 list_del(&vma->obj_link);
534 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
536 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
537 WARN_ON(i915_vma_unbind(vma));
540 static void __i915_vma_iounmap(struct i915_vma *vma)
542 GEM_BUG_ON(i915_vma_is_pinned(vma));
544 if (vma->iomap == NULL)
547 io_mapping_unmap(vma->iomap);
551 int i915_vma_unbind(struct i915_vma *vma)
553 struct drm_i915_gem_object *obj = vma->obj;
554 unsigned long active;
557 lockdep_assert_held(&obj->base.dev->struct_mutex);
559 /* First wait upon any activity as retiring the request may
560 * have side-effects such as unpinning or even unbinding this vma.
562 active = i915_vma_get_active(vma);
566 /* When a closed VMA is retired, it is unbound - eek.
567 * In order to prevent it from being recursively closed,
568 * take a pin on the vma so that the second unbind is
571 * Even more scary is that the retire callback may free
572 * the object (last active vma). To prevent the explosion
573 * we defer the actual object free to a worker that can
574 * only proceed once it acquires the struct_mutex (which
575 * we currently hold, therefore it cannot free this object
576 * before we are finished).
580 for_each_active(active, idx) {
581 ret = i915_gem_active_retire(&vma->last_read[idx],
582 &vma->vm->i915->drm.struct_mutex);
587 __i915_vma_unpin(vma);
591 GEM_BUG_ON(i915_vma_is_active(vma));
594 if (i915_vma_is_pinned(vma))
597 if (!drm_mm_node_allocated(&vma->node))
600 GEM_BUG_ON(obj->bind_count == 0);
601 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
603 if (i915_vma_is_map_and_fenceable(vma)) {
604 /* release the fence reg _after_ flushing */
605 ret = i915_vma_put_fence(vma);
609 /* Force a pagefault for domain tracking on next user access */
610 i915_gem_release_mmap(obj);
612 __i915_vma_iounmap(vma);
613 vma->flags &= ~I915_VMA_CAN_FENCE;
616 if (likely(!vma->vm->closed)) {
617 trace_i915_vma_unbind(vma);
618 vma->vm->unbind_vma(vma);
620 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
622 drm_mm_remove_node(&vma->node);
623 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
625 if (vma->pages != obj->mm.pages) {
626 GEM_BUG_ON(!vma->pages);
627 sg_free_table(vma->pages);
632 /* Since the unbound list is global, only move to that list if
633 * no more VMAs exist. */
634 if (--obj->bind_count == 0)
635 list_move_tail(&obj->global_link,
636 &to_i915(obj->base.dev)->mm.unbound_list);
638 /* And finally now the object is completely decoupled from this vma,
639 * we can drop its hold on the backing storage and allow it to be
640 * reaped by the shrinker.
642 i915_gem_object_unpin_pages(obj);
645 if (unlikely(i915_vma_is_closed(vma)))
646 i915_vma_destroy(vma);