2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
14 #include <drm/i915_drm.h>
16 #include "display/intel_frontbuffer.h"
17 #include "i915_gem_object_types.h"
18 #include "i915_gem_gtt.h"
19 #include "i915_vma_types.h"
21 void i915_gem_init__objects(struct drm_i915_private *i915);
23 struct drm_i915_gem_object *i915_gem_object_alloc(void);
24 void i915_gem_object_free(struct drm_i915_gem_object *obj);
26 void i915_gem_object_init(struct drm_i915_gem_object *obj,
27 const struct drm_i915_gem_object_ops *ops,
28 struct lock_class_key *key);
29 struct drm_i915_gem_object *
30 i915_gem_object_create_shmem(struct drm_i915_private *i915,
31 resource_size_t size);
32 struct drm_i915_gem_object *
33 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
34 const void *data, resource_size_t size);
36 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
37 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
38 struct sg_table *pages,
41 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
43 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
44 void i915_gem_free_object(struct drm_gem_object *obj);
46 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
49 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
50 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
53 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
54 * @filp: DRM file private date
55 * @handle: userspace handle
59 * A pointer to the object named by the handle if such exists on @filp, NULL
60 * otherwise. This object is only valid whilst under the RCU read lock, and
61 * note carefully the object may be in the process of being destroyed.
63 static inline struct drm_i915_gem_object *
64 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
67 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
69 return idr_find(&file->object_idr, handle);
72 static inline struct drm_i915_gem_object *
73 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
75 if (obj && !kref_get_unless_zero(&obj->base.refcount))
81 static inline struct drm_i915_gem_object *
82 i915_gem_object_lookup(struct drm_file *file, u32 handle)
84 struct drm_i915_gem_object *obj;
87 obj = i915_gem_object_lookup_rcu(file, handle);
88 obj = i915_gem_object_get_rcu(obj);
95 struct drm_gem_object *
96 drm_gem_object_lookup(struct drm_file *file, u32 handle);
98 __attribute__((nonnull))
99 static inline struct drm_i915_gem_object *
100 i915_gem_object_get(struct drm_i915_gem_object *obj)
102 drm_gem_object_get(&obj->base);
106 __attribute__((nonnull))
108 i915_gem_object_put(struct drm_i915_gem_object *obj)
110 __drm_gem_object_put(&obj->base);
113 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
115 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
117 dma_resv_lock(obj->base.resv, NULL);
120 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
122 return dma_resv_trylock(obj->base.resv);
126 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
128 return dma_resv_lock_interruptible(obj->base.resv, NULL);
131 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
133 dma_resv_unlock(obj->base.resv);
137 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
138 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
139 struct dma_fence *fence);
142 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
144 obj->flags |= I915_BO_READONLY;
148 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
150 return obj->flags & I915_BO_READONLY;
154 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
156 return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
160 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
162 return obj->flags & I915_BO_ALLOC_VOLATILE;
166 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
168 obj->flags |= I915_BO_ALLOC_VOLATILE;
172 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
175 return obj->ops->flags & flags;
179 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
181 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
185 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
187 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
191 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
193 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
197 i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
199 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
203 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
205 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
209 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
211 return READ_ONCE(obj->frontbuffer);
214 static inline unsigned int
215 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
217 return obj->tiling_and_stride & TILING_MASK;
221 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
223 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
226 static inline unsigned int
227 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
229 return obj->tiling_and_stride & STRIDE_MASK;
232 static inline unsigned int
233 i915_gem_tile_height(unsigned int tiling)
236 return tiling == I915_TILING_Y ? 32 : 8;
239 static inline unsigned int
240 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
242 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
245 static inline unsigned int
246 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
248 return (i915_gem_object_get_stride(obj) *
249 i915_gem_object_get_tile_height(obj));
252 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
253 unsigned int tiling, unsigned int stride);
256 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
257 unsigned int n, unsigned int *offset);
260 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
264 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
268 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
273 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
276 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
277 struct sg_table *pages,
278 unsigned int sg_page_sizes);
280 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
281 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
283 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
286 * Only used by struct_mutex, when called "recursively" from
287 * direct-reclaim-esque. Safe because there is only every one
288 * struct_mutex in the entire system.
290 I915_MM_SHRINKER = 1,
292 * Used for obj->mm.lock when allocating pages. Safe because the object
293 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
294 * it. As soon as the object has pages, obj->mm.lock nests within
297 I915_MM_GET_PAGES = 1,
300 static inline int __must_check
301 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
303 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
305 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
308 return __i915_gem_object_get_pages(obj);
312 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
314 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
318 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
320 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
322 atomic_inc(&obj->mm.pages_pin_count);
326 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
328 return atomic_read(&obj->mm.pages_pin_count);
332 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
334 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
335 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
337 atomic_dec(&obj->mm.pages_pin_count);
341 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
343 __i915_gem_object_unpin_pages(obj);
346 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
347 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
348 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
353 #define I915_MAP_OVERRIDE BIT(31)
354 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
355 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
359 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
360 * @obj: the object to map into kernel address space
361 * @type: the type of mapping, used to select pgprot_t
363 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
364 * pages and then returns a contiguous mapping of the backing storage into
365 * the kernel address space. Based on the @type of mapping, the PTE will be
366 * set to either WriteBack or WriteCombine (via pgprot_t).
368 * The caller is responsible for calling i915_gem_object_unpin_map() when the
369 * mapping is no longer required.
371 * Returns the pointer through which to access the mapped object, or an
372 * ERR_PTR() on error.
374 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
375 enum i915_map_type type);
377 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
378 unsigned long offset,
380 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
382 __i915_gem_object_flush_map(obj, 0, obj->base.size);
386 * i915_gem_object_unpin_map - releases an earlier mapping
387 * @obj: the object to unmap
389 * After pinning the object and mapping its pages, once you are finished
390 * with your access, call i915_gem_object_unpin_map() to release the pin
391 * upon the mapping. Once the pin count reaches zero, that mapping may be
394 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
396 i915_gem_object_unpin_pages(obj);
400 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
401 unsigned int flush_domains);
403 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
404 unsigned int *needs_clflush);
405 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
406 unsigned int *needs_clflush);
407 #define CLFLUSH_BEFORE BIT(0)
408 #define CLFLUSH_AFTER BIT(1)
409 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
412 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
414 i915_gem_object_unpin_pages(obj);
415 i915_gem_object_unlock(obj);
418 static inline struct intel_engine_cs *
419 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
421 struct intel_engine_cs *engine = NULL;
422 struct dma_fence *fence;
425 fence = dma_resv_get_excl_rcu(obj->base.resv);
428 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
429 engine = to_request(fence)->engine;
430 dma_fence_put(fence);
435 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
436 unsigned int cache_level);
437 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
440 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
442 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
444 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
445 struct i915_vma * __must_check
446 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
448 const struct i915_ggtt_view *view,
450 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
452 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
453 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
454 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
456 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
458 if (obj->cache_dirty)
461 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
464 /* Currently in use by HW (display engine)? Keep flushed. */
465 return i915_gem_object_is_framebuffer(obj);
468 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
470 obj->read_domains = I915_GEM_DOMAIN_CPU;
471 obj->write_domain = I915_GEM_DOMAIN_CPU;
472 if (cpu_write_needs_clflush(obj))
473 obj->cache_dirty = true;
476 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
479 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
481 const struct i915_sched_attr *attr);
483 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
484 enum fb_op_origin origin);
485 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
486 enum fb_op_origin origin);
489 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
490 enum fb_op_origin origin)
492 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
493 __i915_gem_object_flush_frontbuffer(obj, origin);
497 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
498 enum fb_op_origin origin)
500 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
501 __i915_gem_object_invalidate_frontbuffer(obj, origin);