Merge remote-tracking branches 'asoc/topic/max9878', 'asoc/topic/max98927', 'asoc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24  
25 #include "i915_vma.h"
26
27 #include "i915_drv.h"
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
30
31 #include <drm/drm_gem.h>
32
33 static void
34 i915_vma_retire(struct i915_gem_active *active,
35                 struct drm_i915_gem_request *rq)
36 {
37         const unsigned int idx = rq->engine->id;
38         struct i915_vma *vma =
39                 container_of(active, struct i915_vma, last_read[idx]);
40         struct drm_i915_gem_object *obj = vma->obj;
41
42         GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43
44         i915_vma_clear_active(vma, idx);
45         if (i915_vma_is_active(vma))
46                 return;
47
48         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
49         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
50         if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
51                 WARN_ON(i915_vma_unbind(vma));
52
53         GEM_BUG_ON(!i915_gem_object_is_active(obj));
54         if (--obj->active_count)
55                 return;
56
57         /* Bump our place on the bound list to keep it roughly in LRU order
58          * so that we don't steal from recently used but inactive objects
59          * (unless we are forced to ofc!)
60          */
61         if (obj->bind_count)
62                 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
63
64         obj->mm.dirty = true; /* be paranoid  */
65
66         if (i915_gem_object_has_active_reference(obj)) {
67                 i915_gem_object_clear_active_reference(obj);
68                 i915_gem_object_put(obj);
69         }
70 }
71
72 static struct i915_vma *
73 vma_create(struct drm_i915_gem_object *obj,
74            struct i915_address_space *vm,
75            const struct i915_ggtt_view *view)
76 {
77         struct i915_vma *vma;
78         struct rb_node *rb, **p;
79         int i;
80
81         vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
82         if (vma == NULL)
83                 return ERR_PTR(-ENOMEM);
84
85         INIT_LIST_HEAD(&vma->exec_list);
86         for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
87                 init_request_active(&vma->last_read[i], i915_vma_retire);
88         init_request_active(&vma->last_fence, NULL);
89         vma->vm = vm;
90         vma->obj = obj;
91         vma->size = obj->base.size;
92         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
93
94         if (view && view->type != I915_GGTT_VIEW_NORMAL) {
95                 vma->ggtt_view = *view;
96                 if (view->type == I915_GGTT_VIEW_PARTIAL) {
97                         GEM_BUG_ON(range_overflows_t(u64,
98                                                      view->partial.offset,
99                                                      view->partial.size,
100                                                      obj->base.size >> PAGE_SHIFT));
101                         vma->size = view->partial.size;
102                         vma->size <<= PAGE_SHIFT;
103                         GEM_BUG_ON(vma->size >= obj->base.size);
104                 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
105                         vma->size = intel_rotation_info_size(&view->rotated);
106                         vma->size <<= PAGE_SHIFT;
107                 }
108         }
109
110         if (unlikely(vma->size > vm->total))
111                 goto err_vma;
112
113         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
114
115         if (i915_is_ggtt(vm)) {
116                 if (unlikely(overflows_type(vma->size, u32)))
117                         goto err_vma;
118
119                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
120                                                       i915_gem_object_get_tiling(obj),
121                                                       i915_gem_object_get_stride(obj));
122                 if (unlikely(vma->fence_size < vma->size || /* overflow */
123                              vma->fence_size > vm->total))
124                         goto err_vma;
125
126                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
127
128                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
129                                                                 i915_gem_object_get_tiling(obj),
130                                                                 i915_gem_object_get_stride(obj));
131                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
132
133                 vma->flags |= I915_VMA_GGTT;
134                 list_add(&vma->obj_link, &obj->vma_list);
135         } else {
136                 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
137                 list_add_tail(&vma->obj_link, &obj->vma_list);
138         }
139
140         rb = NULL;
141         p = &obj->vma_tree.rb_node;
142         while (*p) {
143                 struct i915_vma *pos;
144
145                 rb = *p;
146                 pos = rb_entry(rb, struct i915_vma, obj_node);
147                 if (i915_vma_compare(pos, vm, view) < 0)
148                         p = &rb->rb_right;
149                 else
150                         p = &rb->rb_left;
151         }
152         rb_link_node(&vma->obj_node, rb, p);
153         rb_insert_color(&vma->obj_node, &obj->vma_tree);
154         list_add(&vma->vm_link, &vm->unbound_list);
155
156         return vma;
157
158 err_vma:
159         kmem_cache_free(vm->i915->vmas, vma);
160         return ERR_PTR(-E2BIG);
161 }
162
163 static struct i915_vma *
164 vma_lookup(struct drm_i915_gem_object *obj,
165            struct i915_address_space *vm,
166            const struct i915_ggtt_view *view)
167 {
168         struct rb_node *rb;
169
170         rb = obj->vma_tree.rb_node;
171         while (rb) {
172                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
173                 long cmp;
174
175                 cmp = i915_vma_compare(vma, vm, view);
176                 if (cmp == 0)
177                         return vma;
178
179                 if (cmp < 0)
180                         rb = rb->rb_right;
181                 else
182                         rb = rb->rb_left;
183         }
184
185         return NULL;
186 }
187
188 /**
189  * i915_vma_instance - return the singleton instance of the VMA
190  * @obj: parent &struct drm_i915_gem_object to be mapped
191  * @vm: address space in which the mapping is located
192  * @view: additional mapping requirements
193  *
194  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
195  * the same @view characteristics. If a match is not found, one is created.
196  * Once created, the VMA is kept until either the object is freed, or the
197  * address space is closed.
198  *
199  * Must be called with struct_mutex held.
200  *
201  * Returns the vma, or an error pointer.
202  */
203 struct i915_vma *
204 i915_vma_instance(struct drm_i915_gem_object *obj,
205                   struct i915_address_space *vm,
206                   const struct i915_ggtt_view *view)
207 {
208         struct i915_vma *vma;
209
210         lockdep_assert_held(&obj->base.dev->struct_mutex);
211         GEM_BUG_ON(view && !i915_is_ggtt(vm));
212         GEM_BUG_ON(vm->closed);
213
214         vma = vma_lookup(obj, vm, view);
215         if (!vma)
216                 vma = vma_create(obj, vm, view);
217
218         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
219         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
220         GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
221         return vma;
222 }
223
224 /**
225  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
226  * @vma: VMA to map
227  * @cache_level: mapping cache level
228  * @flags: flags like global or local mapping
229  *
230  * DMA addresses are taken from the scatter-gather table of this object (or of
231  * this VMA in case of non-default GGTT views) and PTE entries set up.
232  * Note that DMA addresses are also the only part of the SG table we care about.
233  */
234 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
235                   u32 flags)
236 {
237         u32 bind_flags;
238         u32 vma_flags;
239         int ret;
240
241         if (WARN_ON(flags == 0))
242                 return -EINVAL;
243
244         bind_flags = 0;
245         if (flags & PIN_GLOBAL)
246                 bind_flags |= I915_VMA_GLOBAL_BIND;
247         if (flags & PIN_USER)
248                 bind_flags |= I915_VMA_LOCAL_BIND;
249
250         vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
251         if (flags & PIN_UPDATE)
252                 bind_flags |= vma_flags;
253         else
254                 bind_flags &= ~vma_flags;
255         if (bind_flags == 0)
256                 return 0;
257
258         if (GEM_WARN_ON(range_overflows(vma->node.start,
259                                         vma->node.size,
260                                         vma->vm->total)))
261                 return -ENODEV;
262
263         if (vma_flags == 0 && vma->vm->allocate_va_range) {
264                 trace_i915_va_alloc(vma);
265                 ret = vma->vm->allocate_va_range(vma->vm,
266                                                  vma->node.start,
267                                                  vma->node.size);
268                 if (ret)
269                         return ret;
270         }
271
272         trace_i915_vma_bind(vma, bind_flags);
273         ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
274         if (ret)
275                 return ret;
276
277         vma->flags |= bind_flags;
278         return 0;
279 }
280
281 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
282 {
283         void __iomem *ptr;
284
285         /* Access through the GTT requires the device to be awake. */
286         assert_rpm_wakelock_held(vma->vm->i915);
287
288         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
289         if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
290                 return IO_ERR_PTR(-ENODEV);
291
292         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
293         GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
294
295         ptr = vma->iomap;
296         if (ptr == NULL) {
297                 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
298                                         vma->node.start,
299                                         vma->node.size);
300                 if (ptr == NULL)
301                         return IO_ERR_PTR(-ENOMEM);
302
303                 vma->iomap = ptr;
304         }
305
306         __i915_vma_pin(vma);
307         return ptr;
308 }
309
310 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
311 {
312         struct i915_vma *vma;
313         struct drm_i915_gem_object *obj;
314
315         vma = fetch_and_zero(p_vma);
316         if (!vma)
317                 return;
318
319         obj = vma->obj;
320
321         i915_vma_unpin(vma);
322         i915_vma_close(vma);
323
324         __i915_gem_object_release_unless_active(obj);
325 }
326
327 bool
328 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
329 {
330         if (!drm_mm_node_allocated(&vma->node))
331                 return false;
332
333         if (vma->node.size < size)
334                 return true;
335
336         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
337         if (alignment && !IS_ALIGNED(vma->node.start, alignment))
338                 return true;
339
340         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
341                 return true;
342
343         if (flags & PIN_OFFSET_BIAS &&
344             vma->node.start < (flags & PIN_OFFSET_MASK))
345                 return true;
346
347         if (flags & PIN_OFFSET_FIXED &&
348             vma->node.start != (flags & PIN_OFFSET_MASK))
349                 return true;
350
351         return false;
352 }
353
354 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
355 {
356         bool mappable, fenceable;
357
358         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
359         GEM_BUG_ON(!vma->fence_size);
360
361         /*
362          * Explicitly disable for rotated VMA since the display does not
363          * need the fence and the VMA is not accessible to other users.
364          */
365         if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
366                 return;
367
368         fenceable = (vma->node.size >= vma->fence_size &&
369                      IS_ALIGNED(vma->node.start, vma->fence_alignment));
370
371         mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
372
373         if (mappable && fenceable)
374                 vma->flags |= I915_VMA_CAN_FENCE;
375         else
376                 vma->flags &= ~I915_VMA_CAN_FENCE;
377 }
378
379 static bool color_differs(struct drm_mm_node *node, unsigned long color)
380 {
381         return node->allocated && node->color != color;
382 }
383
384 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
385 {
386         struct drm_mm_node *node = &vma->node;
387         struct drm_mm_node *other;
388
389         /*
390          * On some machines we have to be careful when putting differing types
391          * of snoopable memory together to avoid the prefetcher crossing memory
392          * domains and dying. During vm initialisation, we decide whether or not
393          * these constraints apply and set the drm_mm.color_adjust
394          * appropriately.
395          */
396         if (vma->vm->mm.color_adjust == NULL)
397                 return true;
398
399         /* Only valid to be called on an already inserted vma */
400         GEM_BUG_ON(!drm_mm_node_allocated(node));
401         GEM_BUG_ON(list_empty(&node->node_list));
402
403         other = list_prev_entry(node, node_list);
404         if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
405                 return false;
406
407         other = list_next_entry(node, node_list);
408         if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
409                 return false;
410
411         return true;
412 }
413
414 /**
415  * i915_vma_insert - finds a slot for the vma in its address space
416  * @vma: the vma
417  * @size: requested size in bytes (can be larger than the VMA)
418  * @alignment: required alignment
419  * @flags: mask of PIN_* flags to use
420  *
421  * First we try to allocate some free space that meets the requirements for
422  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
423  * preferrably the oldest idle entry to make room for the new VMA.
424  *
425  * Returns:
426  * 0 on success, negative error code otherwise.
427  */
428 static int
429 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
430 {
431         struct drm_i915_private *dev_priv = vma->vm->i915;
432         struct drm_i915_gem_object *obj = vma->obj;
433         u64 start, end;
434         int ret;
435
436         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
437         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
438
439         size = max(size, vma->size);
440         alignment = max(alignment, vma->display_alignment);
441         if (flags & PIN_MAPPABLE) {
442                 size = max_t(typeof(size), size, vma->fence_size);
443                 alignment = max_t(typeof(alignment),
444                                   alignment, vma->fence_alignment);
445         }
446
447         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
448         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
449         GEM_BUG_ON(!is_power_of_2(alignment));
450
451         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
452         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
453
454         end = vma->vm->total;
455         if (flags & PIN_MAPPABLE)
456                 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
457         if (flags & PIN_ZONE_4G)
458                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
459         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
460
461         /* If binding the object/GGTT view requires more space than the entire
462          * aperture has, reject it early before evicting everything in a vain
463          * attempt to find space.
464          */
465         if (size > end) {
466                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
467                           size, obj->base.size,
468                           flags & PIN_MAPPABLE ? "mappable" : "total",
469                           end);
470                 return -E2BIG;
471         }
472
473         ret = i915_gem_object_pin_pages(obj);
474         if (ret)
475                 return ret;
476
477         if (flags & PIN_OFFSET_FIXED) {
478                 u64 offset = flags & PIN_OFFSET_MASK;
479                 if (!IS_ALIGNED(offset, alignment) ||
480                     range_overflows(offset, size, end)) {
481                         ret = -EINVAL;
482                         goto err_unpin;
483                 }
484
485                 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
486                                            size, offset, obj->cache_level,
487                                            flags);
488                 if (ret)
489                         goto err_unpin;
490         } else {
491                 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
492                                           size, alignment, obj->cache_level,
493                                           start, end, flags);
494                 if (ret)
495                         goto err_unpin;
496
497                 GEM_BUG_ON(vma->node.start < start);
498                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
499         }
500         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
501         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
502
503         list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
504         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
505         obj->bind_count++;
506         GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
507
508         return 0;
509
510 err_unpin:
511         i915_gem_object_unpin_pages(obj);
512         return ret;
513 }
514
515 static void
516 i915_vma_remove(struct i915_vma *vma)
517 {
518         struct drm_i915_gem_object *obj = vma->obj;
519
520         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
521         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
522
523         drm_mm_remove_node(&vma->node);
524         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
525
526         /* Since the unbound list is global, only move to that list if
527          * no more VMAs exist.
528          */
529         if (--obj->bind_count == 0)
530                 list_move_tail(&obj->global_link,
531                                &to_i915(obj->base.dev)->mm.unbound_list);
532
533         /* And finally now the object is completely decoupled from this vma,
534          * we can drop its hold on the backing storage and allow it to be
535          * reaped by the shrinker.
536          */
537         i915_gem_object_unpin_pages(obj);
538         GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
539 }
540
541 int __i915_vma_do_pin(struct i915_vma *vma,
542                       u64 size, u64 alignment, u64 flags)
543 {
544         const unsigned int bound = vma->flags;
545         int ret;
546
547         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
548         GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
549         GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
550
551         if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
552                 ret = -EBUSY;
553                 goto err_unpin;
554         }
555
556         if ((bound & I915_VMA_BIND_MASK) == 0) {
557                 ret = i915_vma_insert(vma, size, alignment, flags);
558                 if (ret)
559                         goto err_unpin;
560         }
561
562         ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
563         if (ret)
564                 goto err_remove;
565
566         if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
567                 __i915_vma_set_map_and_fenceable(vma);
568
569         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
570         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
571         return 0;
572
573 err_remove:
574         if ((bound & I915_VMA_BIND_MASK) == 0) {
575                 GEM_BUG_ON(vma->pages);
576                 i915_vma_remove(vma);
577         }
578 err_unpin:
579         __i915_vma_unpin(vma);
580         return ret;
581 }
582
583 void i915_vma_destroy(struct i915_vma *vma)
584 {
585         GEM_BUG_ON(vma->node.allocated);
586         GEM_BUG_ON(i915_vma_is_active(vma));
587         GEM_BUG_ON(!i915_vma_is_closed(vma));
588         GEM_BUG_ON(vma->fence);
589
590         list_del(&vma->vm_link);
591         if (!i915_vma_is_ggtt(vma))
592                 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
593
594         kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
595 }
596
597 void i915_vma_close(struct i915_vma *vma)
598 {
599         GEM_BUG_ON(i915_vma_is_closed(vma));
600         vma->flags |= I915_VMA_CLOSED;
601
602         list_del(&vma->obj_link);
603         rb_erase(&vma->obj_node, &vma->obj->vma_tree);
604
605         if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
606                 WARN_ON(i915_vma_unbind(vma));
607 }
608
609 static void __i915_vma_iounmap(struct i915_vma *vma)
610 {
611         GEM_BUG_ON(i915_vma_is_pinned(vma));
612
613         if (vma->iomap == NULL)
614                 return;
615
616         io_mapping_unmap(vma->iomap);
617         vma->iomap = NULL;
618 }
619
620 int i915_vma_unbind(struct i915_vma *vma)
621 {
622         struct drm_i915_gem_object *obj = vma->obj;
623         unsigned long active;
624         int ret;
625
626         lockdep_assert_held(&obj->base.dev->struct_mutex);
627
628         /* First wait upon any activity as retiring the request may
629          * have side-effects such as unpinning or even unbinding this vma.
630          */
631         active = i915_vma_get_active(vma);
632         if (active) {
633                 int idx;
634
635                 /* When a closed VMA is retired, it is unbound - eek.
636                  * In order to prevent it from being recursively closed,
637                  * take a pin on the vma so that the second unbind is
638                  * aborted.
639                  *
640                  * Even more scary is that the retire callback may free
641                  * the object (last active vma). To prevent the explosion
642                  * we defer the actual object free to a worker that can
643                  * only proceed once it acquires the struct_mutex (which
644                  * we currently hold, therefore it cannot free this object
645                  * before we are finished).
646                  */
647                 __i915_vma_pin(vma);
648
649                 for_each_active(active, idx) {
650                         ret = i915_gem_active_retire(&vma->last_read[idx],
651                                                      &vma->vm->i915->drm.struct_mutex);
652                         if (ret)
653                                 break;
654                 }
655
656                 __i915_vma_unpin(vma);
657                 if (ret)
658                         return ret;
659
660                 GEM_BUG_ON(i915_vma_is_active(vma));
661         }
662
663         if (i915_vma_is_pinned(vma))
664                 return -EBUSY;
665
666         if (!drm_mm_node_allocated(&vma->node))
667                 goto destroy;
668
669         GEM_BUG_ON(obj->bind_count == 0);
670         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
671
672         if (i915_vma_is_map_and_fenceable(vma)) {
673                 /* release the fence reg _after_ flushing */
674                 ret = i915_vma_put_fence(vma);
675                 if (ret)
676                         return ret;
677
678                 /* Force a pagefault for domain tracking on next user access */
679                 i915_gem_release_mmap(obj);
680
681                 __i915_vma_iounmap(vma);
682                 vma->flags &= ~I915_VMA_CAN_FENCE;
683         }
684
685         if (likely(!vma->vm->closed)) {
686                 trace_i915_vma_unbind(vma);
687                 vma->vm->unbind_vma(vma);
688         }
689         vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
690
691         if (vma->pages != obj->mm.pages) {
692                 GEM_BUG_ON(!vma->pages);
693                 sg_free_table(vma->pages);
694                 kfree(vma->pages);
695         }
696         vma->pages = NULL;
697
698         i915_vma_remove(vma);
699
700 destroy:
701         if (unlikely(i915_vma_is_closed(vma)))
702                 i915_vma_destroy(vma);
703
704         return 0;
705 }
706