2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "i915_gem_object.h"
27 #include "i915_globals.h"
28 #include "intel_frontbuffer.h"
30 static struct i915_global_object {
31 struct i915_global base;
32 struct kmem_cache *slab_objects;
35 struct drm_i915_gem_object *i915_gem_object_alloc(void)
37 return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
40 void i915_gem_object_free(struct drm_i915_gem_object *obj)
42 return kmem_cache_free(global.slab_objects, obj);
45 /* some bookkeeping */
46 static void i915_gem_info_add_obj(struct drm_i915_private *i915,
49 spin_lock(&i915->mm.object_stat_lock);
50 i915->mm.object_count++;
51 i915->mm.object_memory += size;
52 spin_unlock(&i915->mm.object_stat_lock);
55 static void i915_gem_info_remove_obj(struct drm_i915_private *i915,
58 spin_lock(&i915->mm.object_stat_lock);
59 i915->mm.object_count--;
60 i915->mm.object_memory -= size;
61 spin_unlock(&i915->mm.object_stat_lock);
65 frontbuffer_retire(struct i915_active_request *active,
66 struct i915_request *request)
68 struct drm_i915_gem_object *obj =
69 container_of(active, typeof(*obj), frontbuffer_write);
71 intel_fb_obj_flush(obj, ORIGIN_CS);
74 void i915_gem_object_init(struct drm_i915_gem_object *obj,
75 const struct drm_i915_gem_object_ops *ops)
77 mutex_init(&obj->mm.lock);
79 spin_lock_init(&obj->vma.lock);
80 INIT_LIST_HEAD(&obj->vma.list);
82 INIT_LIST_HEAD(&obj->lut_list);
83 INIT_LIST_HEAD(&obj->batch_pool_link);
85 init_rcu_head(&obj->rcu);
89 reservation_object_init(&obj->__builtin_resv);
90 obj->resv = &obj->__builtin_resv;
92 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
93 i915_active_request_init(&obj->frontbuffer_write,
94 NULL, frontbuffer_retire);
96 obj->mm.madv = I915_MADV_WILLNEED;
97 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
98 mutex_init(&obj->mm.get_page.lock);
100 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
104 * Mark up the object's coherency levels for a given cache_level
105 * @obj: #drm_i915_gem_object
106 * @cache_level: cache level
108 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
109 unsigned int cache_level)
111 obj->cache_level = cache_level;
113 if (cache_level != I915_CACHE_NONE)
114 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
115 I915_BO_CACHE_COHERENT_FOR_WRITE);
116 else if (HAS_LLC(to_i915(obj->base.dev)))
117 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
119 obj->cache_coherent = 0;
122 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
125 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
127 struct drm_i915_private *i915 = to_i915(gem->dev);
128 struct drm_i915_gem_object *obj = to_intel_bo(gem);
129 struct drm_i915_file_private *fpriv = file->driver_priv;
130 struct i915_lut_handle *lut, *ln;
132 mutex_lock(&i915->drm.struct_mutex);
134 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
135 struct i915_gem_context *ctx = lut->ctx;
136 struct i915_vma *vma;
138 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
139 if (ctx->file_priv != fpriv)
142 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
143 GEM_BUG_ON(vma->obj != obj);
145 /* We allow the process to have multiple handles to the same
146 * vma, in the same fd namespace, by virtue of flink/open.
148 GEM_BUG_ON(!vma->open_count);
149 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
152 list_del(&lut->obj_link);
153 list_del(&lut->ctx_link);
155 i915_lut_handle_free(lut);
156 __i915_gem_object_release_unless_active(obj);
159 mutex_unlock(&i915->drm.struct_mutex);
162 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
164 /* If we are the last user of the backing storage (be it shmemfs
165 * pages or stolen etc), we know that the pages are going to be
166 * immediately released. In this case, we can then skip copying
167 * back the contents from the GPU.
170 if (obj->mm.madv != I915_MADV_WILLNEED)
176 /* At first glance, this looks racy, but then again so would be
177 * userspace racing mmap against close. However, the first external
178 * reference to the filp can only be obtained through the
179 * i915_gem_mmap_ioctl() which safeguards us against the user
180 * acquiring such a reference whilst we are in the middle of
181 * freeing the object.
183 return file_count(obj->base.filp) == 1;
186 static void __i915_gem_free_objects(struct drm_i915_private *i915,
187 struct llist_node *freed)
189 struct drm_i915_gem_object *obj, *on;
190 intel_wakeref_t wakeref;
192 wakeref = intel_runtime_pm_get(i915);
193 llist_for_each_entry_safe(obj, on, freed, freed) {
194 struct i915_vma *vma, *vn;
196 trace_i915_gem_object_destroy(obj);
198 mutex_lock(&i915->drm.struct_mutex);
200 GEM_BUG_ON(i915_gem_object_is_active(obj));
201 list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
202 GEM_BUG_ON(i915_vma_is_active(vma));
203 vma->flags &= ~I915_VMA_PIN_MASK;
204 i915_vma_destroy(vma);
206 GEM_BUG_ON(!list_empty(&obj->vma.list));
207 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
209 /* This serializes freeing with the shrinker. Since the free
210 * is delayed, first by RCU then by the workqueue, we want the
211 * shrinker to be able to free pages of unreferenced objects,
212 * or else we may oom whilst there are plenty of deferred
215 if (i915_gem_object_has_pages(obj)) {
216 spin_lock(&i915->mm.obj_lock);
217 list_del_init(&obj->mm.link);
218 spin_unlock(&i915->mm.obj_lock);
221 mutex_unlock(&i915->drm.struct_mutex);
223 GEM_BUG_ON(obj->bind_count);
224 GEM_BUG_ON(obj->userfault_count);
225 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
226 GEM_BUG_ON(!list_empty(&obj->lut_list));
228 if (obj->ops->release)
229 obj->ops->release(obj);
231 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
232 atomic_set(&obj->mm.pages_pin_count, 0);
233 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
234 GEM_BUG_ON(i915_gem_object_has_pages(obj));
236 if (obj->base.import_attach)
237 drm_prime_gem_destroy(&obj->base, NULL);
239 reservation_object_fini(&obj->__builtin_resv);
240 drm_gem_object_release(&obj->base);
241 i915_gem_info_remove_obj(i915, obj->base.size);
243 bitmap_free(obj->bit_17);
244 i915_gem_object_free(obj);
246 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
247 atomic_dec(&i915->mm.free_count);
252 intel_runtime_pm_put(i915, wakeref);
255 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
257 struct llist_node *freed;
259 /* Free the oldest, most stale object to keep the free_list short */
261 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
262 /* Only one consumer of llist_del_first() allowed */
263 spin_lock(&i915->mm.free_lock);
264 freed = llist_del_first(&i915->mm.free_list);
265 spin_unlock(&i915->mm.free_lock);
267 if (unlikely(freed)) {
269 __i915_gem_free_objects(i915, freed);
273 static void __i915_gem_free_work(struct work_struct *work)
275 struct drm_i915_private *i915 =
276 container_of(work, struct drm_i915_private, mm.free_work);
277 struct llist_node *freed;
280 * All file-owned VMA should have been released by this point through
281 * i915_gem_close_object(), or earlier by i915_gem_context_close().
282 * However, the object may also be bound into the global GTT (e.g.
283 * older GPUs without per-process support, or for direct access through
284 * the GTT either for the user or for scanout). Those VMA still need to
288 spin_lock(&i915->mm.free_lock);
289 while ((freed = llist_del_all(&i915->mm.free_list))) {
290 spin_unlock(&i915->mm.free_lock);
292 __i915_gem_free_objects(i915, freed);
296 spin_lock(&i915->mm.free_lock);
298 spin_unlock(&i915->mm.free_lock);
301 static void __i915_gem_free_object_rcu(struct rcu_head *head)
303 struct drm_i915_gem_object *obj =
304 container_of(head, typeof(*obj), rcu);
305 struct drm_i915_private *i915 = to_i915(obj->base.dev);
308 * We reuse obj->rcu for the freed list, so we had better not treat
309 * it like a rcu_head from this point forwards. And we expect all
310 * objects to be freed via this path.
312 destroy_rcu_head(&obj->rcu);
315 * Since we require blocking on struct_mutex to unbind the freed
316 * object from the GPU before releasing resources back to the
317 * system, we can not do that directly from the RCU callback (which may
318 * be a softirq context), but must instead then defer that work onto a
319 * kthread. We use the RCU callback rather than move the freed object
320 * directly onto the work queue so that we can mix between using the
321 * worker and performing frees directly from subsequent allocations for
322 * crude but effective memory throttling.
324 if (llist_add(&obj->freed, &i915->mm.free_list))
325 queue_work(i915->wq, &i915->mm.free_work);
328 void i915_gem_free_object(struct drm_gem_object *gem_obj)
330 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
333 __i915_gem_object_unpin_pages(obj);
335 if (discard_backing_storage(obj))
336 obj->mm.madv = I915_MADV_DONTNEED;
339 * Before we free the object, make sure any pure RCU-only
340 * read-side critical sections are complete, e.g.
341 * i915_gem_busy_ioctl(). For the corresponding synchronized
342 * lookup see i915_gem_object_lookup_rcu().
344 atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
345 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
348 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
350 lockdep_assert_held(&obj->base.dev->struct_mutex);
352 if (!i915_gem_object_has_active_reference(obj) &&
353 i915_gem_object_is_active(obj))
354 i915_gem_object_set_active_reference(obj);
356 i915_gem_object_put(obj);
359 void i915_gem_init__objects(struct drm_i915_private *i915)
361 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
364 static void i915_global_objects_shrink(void)
366 kmem_cache_shrink(global.slab_objects);
369 static void i915_global_objects_exit(void)
371 kmem_cache_destroy(global.slab_objects);
374 static struct i915_global_object global = { {
375 .shrink = i915_global_objects_shrink,
376 .exit = i915_global_objects_exit,
379 int __init i915_global_objects_init(void)
381 global.slab_objects =
382 KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
383 if (!global.slab_objects)
386 i915_global_register(&global.base);