3000b26b4bbf5733ff0212cf6f08025e71e65a00
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gem / i915_gem_object.c
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "i915_drv.h"
26 #include "i915_gem_object.h"
27 #include "i915_globals.h"
28 #include "intel_frontbuffer.h"
29
30 static struct i915_global_object {
31         struct i915_global base;
32         struct kmem_cache *slab_objects;
33 } global;
34
35 struct drm_i915_gem_object *i915_gem_object_alloc(void)
36 {
37         return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
38 }
39
40 void i915_gem_object_free(struct drm_i915_gem_object *obj)
41 {
42         return kmem_cache_free(global.slab_objects, obj);
43 }
44
45 /* some bookkeeping */
46 static void i915_gem_info_add_obj(struct drm_i915_private *i915,
47                                   u64 size)
48 {
49         spin_lock(&i915->mm.object_stat_lock);
50         i915->mm.object_count++;
51         i915->mm.object_memory += size;
52         spin_unlock(&i915->mm.object_stat_lock);
53 }
54
55 static void i915_gem_info_remove_obj(struct drm_i915_private *i915,
56                                      u64 size)
57 {
58         spin_lock(&i915->mm.object_stat_lock);
59         i915->mm.object_count--;
60         i915->mm.object_memory -= size;
61         spin_unlock(&i915->mm.object_stat_lock);
62 }
63
64 static void
65 frontbuffer_retire(struct i915_active_request *active,
66                    struct i915_request *request)
67 {
68         struct drm_i915_gem_object *obj =
69                 container_of(active, typeof(*obj), frontbuffer_write);
70
71         intel_fb_obj_flush(obj, ORIGIN_CS);
72 }
73
74 void i915_gem_object_init(struct drm_i915_gem_object *obj,
75                           const struct drm_i915_gem_object_ops *ops)
76 {
77         mutex_init(&obj->mm.lock);
78
79         spin_lock_init(&obj->vma.lock);
80         INIT_LIST_HEAD(&obj->vma.list);
81
82         INIT_LIST_HEAD(&obj->lut_list);
83         INIT_LIST_HEAD(&obj->batch_pool_link);
84
85         init_rcu_head(&obj->rcu);
86
87         obj->ops = ops;
88
89         reservation_object_init(&obj->__builtin_resv);
90         obj->resv = &obj->__builtin_resv;
91
92         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
93         i915_active_request_init(&obj->frontbuffer_write,
94                                  NULL, frontbuffer_retire);
95
96         obj->mm.madv = I915_MADV_WILLNEED;
97         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
98         mutex_init(&obj->mm.get_page.lock);
99
100         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
101 }
102
103 /**
104  * Mark up the object's coherency levels for a given cache_level
105  * @obj: #drm_i915_gem_object
106  * @cache_level: cache level
107  */
108 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
109                                          unsigned int cache_level)
110 {
111         obj->cache_level = cache_level;
112
113         if (cache_level != I915_CACHE_NONE)
114                 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
115                                        I915_BO_CACHE_COHERENT_FOR_WRITE);
116         else if (HAS_LLC(to_i915(obj->base.dev)))
117                 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
118         else
119                 obj->cache_coherent = 0;
120
121         obj->cache_dirty =
122                 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
123 }
124
125 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
126 {
127         struct drm_i915_private *i915 = to_i915(gem->dev);
128         struct drm_i915_gem_object *obj = to_intel_bo(gem);
129         struct drm_i915_file_private *fpriv = file->driver_priv;
130         struct i915_lut_handle *lut, *ln;
131
132         mutex_lock(&i915->drm.struct_mutex);
133
134         list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
135                 struct i915_gem_context *ctx = lut->ctx;
136                 struct i915_vma *vma;
137
138                 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
139                 if (ctx->file_priv != fpriv)
140                         continue;
141
142                 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
143                 GEM_BUG_ON(vma->obj != obj);
144
145                 /* We allow the process to have multiple handles to the same
146                  * vma, in the same fd namespace, by virtue of flink/open.
147                  */
148                 GEM_BUG_ON(!vma->open_count);
149                 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
150                         i915_vma_close(vma);
151
152                 list_del(&lut->obj_link);
153                 list_del(&lut->ctx_link);
154
155                 i915_lut_handle_free(lut);
156                 __i915_gem_object_release_unless_active(obj);
157         }
158
159         mutex_unlock(&i915->drm.struct_mutex);
160 }
161
162 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
163 {
164         /* If we are the last user of the backing storage (be it shmemfs
165          * pages or stolen etc), we know that the pages are going to be
166          * immediately released. In this case, we can then skip copying
167          * back the contents from the GPU.
168          */
169
170         if (obj->mm.madv != I915_MADV_WILLNEED)
171                 return false;
172
173         if (!obj->base.filp)
174                 return true;
175
176         /* At first glance, this looks racy, but then again so would be
177          * userspace racing mmap against close. However, the first external
178          * reference to the filp can only be obtained through the
179          * i915_gem_mmap_ioctl() which safeguards us against the user
180          * acquiring such a reference whilst we are in the middle of
181          * freeing the object.
182          */
183         return file_count(obj->base.filp) == 1;
184 }
185
186 static void __i915_gem_free_objects(struct drm_i915_private *i915,
187                                     struct llist_node *freed)
188 {
189         struct drm_i915_gem_object *obj, *on;
190         intel_wakeref_t wakeref;
191
192         wakeref = intel_runtime_pm_get(i915);
193         llist_for_each_entry_safe(obj, on, freed, freed) {
194                 struct i915_vma *vma, *vn;
195
196                 trace_i915_gem_object_destroy(obj);
197
198                 mutex_lock(&i915->drm.struct_mutex);
199
200                 GEM_BUG_ON(i915_gem_object_is_active(obj));
201                 list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
202                         GEM_BUG_ON(i915_vma_is_active(vma));
203                         vma->flags &= ~I915_VMA_PIN_MASK;
204                         i915_vma_destroy(vma);
205                 }
206                 GEM_BUG_ON(!list_empty(&obj->vma.list));
207                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
208
209                 /* This serializes freeing with the shrinker. Since the free
210                  * is delayed, first by RCU then by the workqueue, we want the
211                  * shrinker to be able to free pages of unreferenced objects,
212                  * or else we may oom whilst there are plenty of deferred
213                  * freed objects.
214                  */
215                 if (i915_gem_object_has_pages(obj)) {
216                         spin_lock(&i915->mm.obj_lock);
217                         list_del_init(&obj->mm.link);
218                         spin_unlock(&i915->mm.obj_lock);
219                 }
220
221                 mutex_unlock(&i915->drm.struct_mutex);
222
223                 GEM_BUG_ON(obj->bind_count);
224                 GEM_BUG_ON(obj->userfault_count);
225                 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
226                 GEM_BUG_ON(!list_empty(&obj->lut_list));
227
228                 if (obj->ops->release)
229                         obj->ops->release(obj);
230
231                 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
232                         atomic_set(&obj->mm.pages_pin_count, 0);
233                 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
234                 GEM_BUG_ON(i915_gem_object_has_pages(obj));
235
236                 if (obj->base.import_attach)
237                         drm_prime_gem_destroy(&obj->base, NULL);
238
239                 reservation_object_fini(&obj->__builtin_resv);
240                 drm_gem_object_release(&obj->base);
241                 i915_gem_info_remove_obj(i915, obj->base.size);
242
243                 bitmap_free(obj->bit_17);
244                 i915_gem_object_free(obj);
245
246                 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
247                 atomic_dec(&i915->mm.free_count);
248
249                 if (on)
250                         cond_resched();
251         }
252         intel_runtime_pm_put(i915, wakeref);
253 }
254
255 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
256 {
257         struct llist_node *freed;
258
259         /* Free the oldest, most stale object to keep the free_list short */
260         freed = NULL;
261         if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
262                 /* Only one consumer of llist_del_first() allowed */
263                 spin_lock(&i915->mm.free_lock);
264                 freed = llist_del_first(&i915->mm.free_list);
265                 spin_unlock(&i915->mm.free_lock);
266         }
267         if (unlikely(freed)) {
268                 freed->next = NULL;
269                 __i915_gem_free_objects(i915, freed);
270         }
271 }
272
273 static void __i915_gem_free_work(struct work_struct *work)
274 {
275         struct drm_i915_private *i915 =
276                 container_of(work, struct drm_i915_private, mm.free_work);
277         struct llist_node *freed;
278
279         /*
280          * All file-owned VMA should have been released by this point through
281          * i915_gem_close_object(), or earlier by i915_gem_context_close().
282          * However, the object may also be bound into the global GTT (e.g.
283          * older GPUs without per-process support, or for direct access through
284          * the GTT either for the user or for scanout). Those VMA still need to
285          * unbound now.
286          */
287
288         spin_lock(&i915->mm.free_lock);
289         while ((freed = llist_del_all(&i915->mm.free_list))) {
290                 spin_unlock(&i915->mm.free_lock);
291
292                 __i915_gem_free_objects(i915, freed);
293                 if (need_resched())
294                         return;
295
296                 spin_lock(&i915->mm.free_lock);
297         }
298         spin_unlock(&i915->mm.free_lock);
299 }
300
301 static void __i915_gem_free_object_rcu(struct rcu_head *head)
302 {
303         struct drm_i915_gem_object *obj =
304                 container_of(head, typeof(*obj), rcu);
305         struct drm_i915_private *i915 = to_i915(obj->base.dev);
306
307         /*
308          * We reuse obj->rcu for the freed list, so we had better not treat
309          * it like a rcu_head from this point forwards. And we expect all
310          * objects to be freed via this path.
311          */
312         destroy_rcu_head(&obj->rcu);
313
314         /*
315          * Since we require blocking on struct_mutex to unbind the freed
316          * object from the GPU before releasing resources back to the
317          * system, we can not do that directly from the RCU callback (which may
318          * be a softirq context), but must instead then defer that work onto a
319          * kthread. We use the RCU callback rather than move the freed object
320          * directly onto the work queue so that we can mix between using the
321          * worker and performing frees directly from subsequent allocations for
322          * crude but effective memory throttling.
323          */
324         if (llist_add(&obj->freed, &i915->mm.free_list))
325                 queue_work(i915->wq, &i915->mm.free_work);
326 }
327
328 void i915_gem_free_object(struct drm_gem_object *gem_obj)
329 {
330         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
331
332         if (obj->mm.quirked)
333                 __i915_gem_object_unpin_pages(obj);
334
335         if (discard_backing_storage(obj))
336                 obj->mm.madv = I915_MADV_DONTNEED;
337
338         /*
339          * Before we free the object, make sure any pure RCU-only
340          * read-side critical sections are complete, e.g.
341          * i915_gem_busy_ioctl(). For the corresponding synchronized
342          * lookup see i915_gem_object_lookup_rcu().
343          */
344         atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
345         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
346 }
347
348 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
349 {
350         lockdep_assert_held(&obj->base.dev->struct_mutex);
351
352         if (!i915_gem_object_has_active_reference(obj) &&
353             i915_gem_object_is_active(obj))
354                 i915_gem_object_set_active_reference(obj);
355         else
356                 i915_gem_object_put(obj);
357 }
358
359 void i915_gem_init__objects(struct drm_i915_private *i915)
360 {
361         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
362 }
363
364 static void i915_global_objects_shrink(void)
365 {
366         kmem_cache_shrink(global.slab_objects);
367 }
368
369 static void i915_global_objects_exit(void)
370 {
371         kmem_cache_destroy(global.slab_objects);
372 }
373
374 static struct i915_global_object global = { {
375         .shrink = i915_global_objects_shrink,
376         .exit = i915_global_objects_exit,
377 } };
378
379 int __init i915_global_objects_init(void)
380 {
381         global.slab_objects =
382                 KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
383         if (!global.slab_objects)
384                 return -ENOMEM;
385
386         i915_global_register(&global.base);
387         return 0;
388 }