2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/anon_inodes.h>
8 #include <linux/mman.h>
9 #include <linux/pfn_t.h>
10 #include <linux/sizes.h>
12 #include <drm/drm_cache.h>
14 #include "gt/intel_gt.h"
15 #include "gt/intel_gt_requests.h"
18 #include "i915_gem_evict.h"
19 #include "i915_gem_gtt.h"
20 #include "i915_gem_ioctls.h"
21 #include "i915_gem_object.h"
22 #include "i915_gem_mman.h"
24 #include "i915_trace.h"
25 #include "i915_user_extensions.h"
26 #include "i915_gem_ttm.h"
30 __vma_matches(struct vm_area_struct *vma, struct file *filp,
31 unsigned long addr, unsigned long size)
33 if (vma->vm_file != filp)
36 return vma->vm_start == addr &&
37 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
41 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
44 * @data: ioctl data blob
47 * While the mapping holds a reference on the contents of the object, it doesn't
48 * imply a ref on the object itself.
52 * DRM driver writers who look a this function as an example for how to do GEM
53 * mmap support, please don't implement mmap support like here. The modern way
54 * to implement DRM mmap support is with an mmap offset ioctl (like
55 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
56 * That way debug tooling like valgrind will understand what's going on, hiding
57 * the mmap call in a driver private ioctl will break that. The i915 driver only
58 * does cpu mmaps this way because we didn't know better.
61 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
62 struct drm_file *file)
64 struct drm_i915_private *i915 = to_i915(dev);
65 struct drm_i915_gem_mmap *args = data;
66 struct drm_i915_gem_object *obj;
70 * mmap ioctl is disallowed for all discrete platforms,
71 * and for all platforms with GRAPHICS_VER > 12.
73 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
76 if (args->flags & ~(I915_MMAP_WC))
79 if (args->flags & I915_MMAP_WC && !pat_enabled())
82 obj = i915_gem_object_lookup(file, args->handle);
86 /* prime objects have no backing filp to GEM mmap
89 if (!obj->base.filp) {
94 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
99 addr = vm_mmap(obj->base.filp, 0, args->size,
100 PROT_READ | PROT_WRITE, MAP_SHARED,
102 if (IS_ERR_VALUE(addr))
105 if (args->flags & I915_MMAP_WC) {
106 struct mm_struct *mm = current->mm;
107 struct vm_area_struct *vma;
109 if (mmap_write_lock_killable(mm)) {
113 vma = find_vma(mm, addr);
114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
116 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
119 mmap_write_unlock(mm);
120 if (IS_ERR_VALUE(addr))
123 i915_gem_object_put(obj);
125 args->addr_ptr = (u64)addr;
129 i915_gem_object_put(obj);
133 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
135 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
139 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
141 * A history of the GTT mmap interface:
143 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
144 * aligned and suitable for fencing, and still fit into the available
145 * mappable space left by the pinned display objects. A classic problem
146 * we called the page-fault-of-doom where we would ping-pong between
147 * two objects that could not fit inside the GTT and so the memcpy
148 * would page one object in at the expense of the other between every
151 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
152 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
153 * object is too large for the available space (or simply too large
154 * for the mappable aperture!), a view is created instead and faulted
155 * into userspace. (This view is aligned and sized appropriately for
158 * 2 - Recognise WC as a separate cache domain so that we can flush the
159 * delayed writes via GTT before performing direct access via WC.
161 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
162 * pagefault; swapin remains transparent.
164 * 4 - Support multiple fault handlers per object depending on object's
165 * backing storage (a.k.a. MMAP_OFFSET).
169 * * snoopable objects cannot be accessed via the GTT. It can cause machine
170 * hangs on some architectures, corruption on others. An attempt to service
171 * a GTT page fault from a snoopable object will generate a SIGBUS.
173 * * the object must be able to fit into RAM (physical memory, though no
174 * limited to the mappable aperture).
179 * * a new GTT page fault will synchronize rendering from the GPU and flush
180 * all data to system memory. Subsequent access will not be synchronized.
182 * * all mappings are revoked on runtime device suspend.
184 * * there are only 8, 16 or 32 fence registers to share between all users
185 * (older machines require fence register for display and blitter access
186 * as well). Contention of the fence registers will cause the previous users
187 * to be unmapped and any new access will generate new page faults.
189 * * running out of memory while servicing a fault may generate a SIGBUS,
190 * rather than the expected SIGSEGV.
192 int i915_gem_mmap_gtt_version(void)
197 static inline struct i915_gtt_view
198 compute_partial_view(const struct drm_i915_gem_object *obj,
202 struct i915_gtt_view view;
204 if (i915_gem_object_is_tiled(obj))
205 chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
207 view.type = I915_GTT_VIEW_PARTIAL;
208 view.partial.offset = rounddown(page_offset, chunk);
210 min_t(unsigned int, chunk,
211 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
213 /* If the partial covers the entire object, just create a normal VMA. */
214 if (chunk >= obj->base.size >> PAGE_SHIFT)
215 view.type = I915_GTT_VIEW_NORMAL;
220 static vm_fault_t i915_error_to_vmf_fault(int err)
224 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
226 case -EIO: /* shmemfs failure from swap device */
227 case -EFAULT: /* purged object */
228 case -ENODEV: /* bad object, how did you get here! */
229 case -ENXIO: /* unable to access backing store (on device) */
230 return VM_FAULT_SIGBUS;
232 case -ENOMEM: /* our allocation failure */
237 case -ENOSPC: /* transient failure to evict? */
242 * EBUSY is ok: this just means that another thread
243 * already did the job.
245 return VM_FAULT_NOPAGE;
249 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
251 struct vm_area_struct *area = vmf->vma;
252 struct i915_mmap_offset *mmo = area->vm_private_data;
253 struct drm_i915_gem_object *obj = mmo->obj;
254 resource_size_t iomap;
257 /* Sanity check that we allow writing into this object */
258 if (unlikely(i915_gem_object_is_readonly(obj) &&
259 area->vm_flags & VM_WRITE))
260 return VM_FAULT_SIGBUS;
262 if (i915_gem_object_lock_interruptible(obj, NULL))
263 return VM_FAULT_NOPAGE;
265 err = i915_gem_object_pin_pages(obj);
270 if (!i915_gem_object_has_struct_page(obj)) {
271 iomap = obj->mm.region->iomap.base;
272 iomap -= obj->mm.region->region.start;
275 /* PTEs are revoked in obj->ops->put_pages() */
276 err = remap_io_sg(area,
277 area->vm_start, area->vm_end - area->vm_start,
278 obj->mm.pages->sgl, iomap);
280 if (area->vm_flags & VM_WRITE) {
281 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
282 obj->mm.dirty = true;
285 i915_gem_object_unpin_pages(obj);
288 i915_gem_object_unlock(obj);
289 return i915_error_to_vmf_fault(err);
292 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
294 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
295 struct vm_area_struct *area = vmf->vma;
296 struct i915_mmap_offset *mmo = area->vm_private_data;
297 struct drm_i915_gem_object *obj = mmo->obj;
298 struct drm_device *dev = obj->base.dev;
299 struct drm_i915_private *i915 = to_i915(dev);
300 struct intel_runtime_pm *rpm = &i915->runtime_pm;
301 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
302 bool write = area->vm_flags & VM_WRITE;
303 struct i915_gem_ww_ctx ww;
304 intel_wakeref_t wakeref;
305 struct i915_vma *vma;
310 /* We don't use vmf->pgoff since that has the fake offset */
311 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
313 trace_i915_gem_object_fault(obj, page_offset, true, write);
315 wakeref = intel_runtime_pm_get(rpm);
317 i915_gem_ww_ctx_init(&ww, true);
319 ret = i915_gem_object_lock(obj, &ww);
323 /* Sanity check that we allow writing into this object */
324 if (i915_gem_object_is_readonly(obj) && write) {
329 ret = i915_gem_object_pin_pages(obj);
333 ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu);
337 /* Now pin it into the GTT as needed */
338 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
340 PIN_NONBLOCK /* NOWARN */ |
342 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
343 /* Use a partial view if it is bigger than available space */
344 struct i915_gtt_view view =
345 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
348 flags = PIN_MAPPABLE | PIN_NOSEARCH;
349 if (view.type == I915_GTT_VIEW_NORMAL)
350 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
353 * Userspace is now writing through an untracked VMA, abandon
354 * all hope that the hardware is able to track future writes.
357 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
358 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
359 flags = PIN_MAPPABLE;
360 view.type = I915_GTT_VIEW_PARTIAL;
361 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
365 * The entire mappable GGTT is pinned? Unexpected!
366 * Try to evict the object we locked too, as normally we skip it
367 * due to lack of short term pinning inside execbuf.
369 if (vma == ERR_PTR(-ENOSPC)) {
370 ret = mutex_lock_interruptible(&ggtt->vm.mutex);
372 ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
373 mutex_unlock(&ggtt->vm.mutex);
377 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
385 /* Access to snoopable pages through the GTT is incoherent. */
386 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
391 ret = i915_vma_pin_fence(vma);
395 /* Finally, remap it using the new GTT offset */
396 ret = remap_io_mapping(area,
397 area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
398 (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
399 min_t(u64, vma->size, area->vm_end - area->vm_start),
404 assert_rpm_wakelock_held(rpm);
406 /* Mark as being mmapped into userspace for later revocation */
407 mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
408 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
409 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
410 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
412 /* Track the mmo associated with the fenced vma */
415 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
416 intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
417 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
420 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
421 i915_vma_set_ggtt_write(vma);
422 obj->mm.dirty = true;
426 i915_vma_unpin_fence(vma);
428 __i915_vma_unpin(vma);
430 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
432 i915_gem_object_unpin_pages(obj);
434 if (ret == -EDEADLK) {
435 ret = i915_gem_ww_ctx_backoff(&ww);
439 i915_gem_ww_ctx_fini(&ww);
440 intel_runtime_pm_put(rpm, wakeref);
441 return i915_error_to_vmf_fault(ret);
445 vm_access(struct vm_area_struct *area, unsigned long addr,
446 void *buf, int len, int write)
448 struct i915_mmap_offset *mmo = area->vm_private_data;
449 struct drm_i915_gem_object *obj = mmo->obj;
450 struct i915_gem_ww_ctx ww;
454 if (i915_gem_object_is_readonly(obj) && write)
457 addr -= area->vm_start;
458 if (range_overflows_t(u64, addr, len, obj->base.size))
461 i915_gem_ww_ctx_init(&ww, true);
463 err = i915_gem_object_lock(obj, &ww);
467 /* As this is primarily for debugging, let's focus on simplicity */
468 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
470 err = PTR_ERR(vaddr);
475 memcpy(vaddr + addr, buf, len);
476 __i915_gem_object_flush_map(obj, addr, len);
478 memcpy(buf, vaddr + addr, len);
481 i915_gem_object_unpin_map(obj);
483 if (err == -EDEADLK) {
484 err = i915_gem_ww_ctx_backoff(&ww);
488 i915_gem_ww_ctx_fini(&ww);
496 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
498 struct i915_vma *vma;
500 GEM_BUG_ON(!obj->userfault_count);
502 for_each_ggtt_vma(vma, obj)
503 i915_vma_revoke_mmap(vma);
505 GEM_BUG_ON(obj->userfault_count);
509 * It is vital that we remove the page mapping if we have mapped a tiled
510 * object through the GTT and then lose the fence register due to
511 * resource pressure. Similarly if the object has been moved out of the
512 * aperture, than pages mapped into userspace must be revoked. Removing the
513 * mapping will then trigger a page fault on the next user access, allowing
514 * fixup by vm_fault_gtt().
516 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
518 struct drm_i915_private *i915 = to_i915(obj->base.dev);
519 intel_wakeref_t wakeref;
522 * Serialisation between user GTT access and our code depends upon
523 * revoking the CPU's PTE whilst the mutex is held. The next user
524 * pagefault then has to wait until we release the mutex.
526 * Note that RPM complicates somewhat by adding an additional
527 * requirement that operations to the GGTT be made holding the RPM
530 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
531 mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
533 if (!obj->userfault_count)
536 __i915_gem_object_release_mmap_gtt(obj);
539 * Ensure that the CPU's PTE are revoked and there are not outstanding
540 * memory transactions from userspace before we return. The TLB
541 * flushing implied above by changing the PTE above *should* be
542 * sufficient, an extra barrier here just provides us with a bit
543 * of paranoid documentation about our requirement to serialise
544 * memory writes before touching registers / GSM.
549 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
550 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
553 void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
555 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
556 struct ttm_device *bdev = bo->bdev;
558 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
561 * We have exclusive access here via runtime suspend. All other callers
562 * must first grab the rpm wakeref.
564 GEM_BUG_ON(!obj->userfault_count);
565 list_del(&obj->userfault_link);
566 obj->userfault_count = 0;
569 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
571 struct i915_mmap_offset *mmo, *mn;
573 if (obj->ops->unmap_virtual)
574 obj->ops->unmap_virtual(obj);
576 spin_lock(&obj->mmo.lock);
577 rbtree_postorder_for_each_entry_safe(mmo, mn,
578 &obj->mmo.offsets, offset) {
580 * vma_node_unmap for GTT mmaps handled already in
581 * __i915_gem_object_release_mmap_gtt
583 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
586 spin_unlock(&obj->mmo.lock);
587 drm_vma_node_unmap(&mmo->vma_node,
588 obj->base.dev->anon_inode->i_mapping);
589 spin_lock(&obj->mmo.lock);
591 spin_unlock(&obj->mmo.lock);
594 static struct i915_mmap_offset *
595 lookup_mmo(struct drm_i915_gem_object *obj,
596 enum i915_mmap_type mmap_type)
600 spin_lock(&obj->mmo.lock);
601 rb = obj->mmo.offsets.rb_node;
603 struct i915_mmap_offset *mmo =
604 rb_entry(rb, typeof(*mmo), offset);
606 if (mmo->mmap_type == mmap_type) {
607 spin_unlock(&obj->mmo.lock);
611 if (mmo->mmap_type < mmap_type)
616 spin_unlock(&obj->mmo.lock);
621 static struct i915_mmap_offset *
622 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
624 struct rb_node *rb, **p;
626 spin_lock(&obj->mmo.lock);
628 p = &obj->mmo.offsets.rb_node;
630 struct i915_mmap_offset *pos;
633 pos = rb_entry(rb, typeof(*pos), offset);
635 if (pos->mmap_type == mmo->mmap_type) {
636 spin_unlock(&obj->mmo.lock);
637 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
643 if (pos->mmap_type < mmo->mmap_type)
648 rb_link_node(&mmo->offset, rb, p);
649 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
650 spin_unlock(&obj->mmo.lock);
655 static struct i915_mmap_offset *
656 mmap_offset_attach(struct drm_i915_gem_object *obj,
657 enum i915_mmap_type mmap_type,
658 struct drm_file *file)
660 struct drm_i915_private *i915 = to_i915(obj->base.dev);
661 struct i915_mmap_offset *mmo;
664 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
666 mmo = lookup_mmo(obj, mmap_type);
670 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
672 return ERR_PTR(-ENOMEM);
675 mmo->mmap_type = mmap_type;
676 drm_vma_node_reset(&mmo->vma_node);
678 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
679 &mmo->vma_node, obj->base.size / PAGE_SIZE);
683 /* Attempt to reap some mmap space from dead objects */
684 err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
689 i915_gem_drain_freed_objects(i915);
690 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
691 &mmo->vma_node, obj->base.size / PAGE_SIZE);
696 mmo = insert_mmo(obj, mmo);
697 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
700 drm_vma_node_allow_once(&mmo->vma_node, file);
709 __assign_mmap_offset(struct drm_i915_gem_object *obj,
710 enum i915_mmap_type mmap_type,
711 u64 *offset, struct drm_file *file)
713 struct i915_mmap_offset *mmo;
715 if (i915_gem_object_never_mmap(obj))
718 if (obj->ops->mmap_offset) {
719 if (mmap_type != I915_MMAP_TYPE_FIXED)
722 *offset = obj->ops->mmap_offset(obj);
726 if (mmap_type == I915_MMAP_TYPE_FIXED)
729 if (mmap_type != I915_MMAP_TYPE_GTT &&
730 !i915_gem_object_has_struct_page(obj) &&
731 !i915_gem_object_has_iomem(obj))
734 mmo = mmap_offset_attach(obj, mmap_type, file);
738 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
743 __assign_mmap_offset_handle(struct drm_file *file,
745 enum i915_mmap_type mmap_type,
748 struct drm_i915_gem_object *obj;
751 obj = i915_gem_object_lookup(file, handle);
755 err = i915_gem_object_lock_interruptible(obj, NULL);
758 err = __assign_mmap_offset(obj, mmap_type, offset, file);
759 i915_gem_object_unlock(obj);
761 i915_gem_object_put(obj);
766 i915_gem_dumb_mmap_offset(struct drm_file *file,
767 struct drm_device *dev,
771 struct drm_i915_private *i915 = to_i915(dev);
772 enum i915_mmap_type mmap_type;
774 if (HAS_LMEM(to_i915(dev)))
775 mmap_type = I915_MMAP_TYPE_FIXED;
776 else if (pat_enabled())
777 mmap_type = I915_MMAP_TYPE_WC;
778 else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
781 mmap_type = I915_MMAP_TYPE_GTT;
783 return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
787 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
789 * @data: GTT mapping ioctl data
790 * @file: GEM object info
792 * Simply returns the fake offset to userspace so it can mmap it.
793 * The mmap call will end up in drm_gem_mmap(), which will set things
794 * up so we can get faults in the handler above.
796 * The fault handler will take care of binding the object into the GTT
797 * (since it may have been evicted to make room for something), allocating
798 * a fence register, and mapping the appropriate aperture address into
802 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
803 struct drm_file *file)
805 struct drm_i915_private *i915 = to_i915(dev);
806 struct drm_i915_gem_mmap_offset *args = data;
807 enum i915_mmap_type type;
811 * Historically we failed to check args.pad and args.offset
812 * and so we cannot use those fields for user input and we cannot
813 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
814 * may be feeding in garbage in those fields.
816 * if (args->pad) return -EINVAL; is verbotten!
819 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
824 switch (args->flags) {
825 case I915_MMAP_OFFSET_GTT:
826 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
828 type = I915_MMAP_TYPE_GTT;
831 case I915_MMAP_OFFSET_WC:
834 type = I915_MMAP_TYPE_WC;
837 case I915_MMAP_OFFSET_WB:
838 type = I915_MMAP_TYPE_WB;
841 case I915_MMAP_OFFSET_UC:
844 type = I915_MMAP_TYPE_UC;
847 case I915_MMAP_OFFSET_FIXED:
848 type = I915_MMAP_TYPE_FIXED;
855 return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
858 static void vm_open(struct vm_area_struct *vma)
860 struct i915_mmap_offset *mmo = vma->vm_private_data;
861 struct drm_i915_gem_object *obj = mmo->obj;
864 i915_gem_object_get(obj);
867 static void vm_close(struct vm_area_struct *vma)
869 struct i915_mmap_offset *mmo = vma->vm_private_data;
870 struct drm_i915_gem_object *obj = mmo->obj;
873 i915_gem_object_put(obj);
876 static const struct vm_operations_struct vm_ops_gtt = {
877 .fault = vm_fault_gtt,
883 static const struct vm_operations_struct vm_ops_cpu = {
884 .fault = vm_fault_cpu,
890 static int singleton_release(struct inode *inode, struct file *file)
892 struct drm_i915_private *i915 = file->private_data;
894 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
895 drm_dev_put(&i915->drm);
900 static const struct file_operations singleton_fops = {
901 .owner = THIS_MODULE,
902 .release = singleton_release,
905 static struct file *mmap_singleton(struct drm_i915_private *i915)
910 file = READ_ONCE(i915->gem.mmap_singleton);
911 if (file && !get_file_rcu(file))
917 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
921 /* Everyone shares a single global address space */
922 file->f_mapping = i915->drm.anon_inode->i_mapping;
924 smp_store_mb(i915->gem.mmap_singleton, file);
925 drm_dev_get(&i915->drm);
931 i915_gem_object_mmap(struct drm_i915_gem_object *obj,
932 struct i915_mmap_offset *mmo,
933 struct vm_area_struct *vma)
935 struct drm_i915_private *i915 = to_i915(obj->base.dev);
936 struct drm_device *dev = &i915->drm;
939 if (i915_gem_object_is_readonly(obj)) {
940 if (vma->vm_flags & VM_WRITE) {
941 i915_gem_object_put(obj);
944 vm_flags_clear(vma, VM_MAYWRITE);
947 anon = mmap_singleton(to_i915(dev));
949 i915_gem_object_put(obj);
950 return PTR_ERR(anon);
953 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
956 * We keep the ref on mmo->obj, not vm_file, but we require
957 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
958 * Our userspace is accustomed to having per-file resource cleanup
959 * (i.e. contexts, objects and requests) on their close(fd), which
960 * requires avoiding extraneous references to their filp, hence why
961 * we prefer to use an anonymous file for their mmaps.
963 vma_set_file(vma, anon);
964 /* Drop the initial creation reference, the vma is now holding one. */
967 if (obj->ops->mmap_ops) {
968 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
969 vma->vm_ops = obj->ops->mmap_ops;
970 vma->vm_private_data = obj->base.vma_node.driver_private;
974 vma->vm_private_data = mmo;
976 switch (mmo->mmap_type) {
977 case I915_MMAP_TYPE_WC:
979 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
980 vma->vm_ops = &vm_ops_cpu;
983 case I915_MMAP_TYPE_FIXED:
986 case I915_MMAP_TYPE_WB:
987 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
988 vma->vm_ops = &vm_ops_cpu;
991 case I915_MMAP_TYPE_UC:
993 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
994 vma->vm_ops = &vm_ops_cpu;
997 case I915_MMAP_TYPE_GTT:
999 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1000 vma->vm_ops = &vm_ops_gtt;
1003 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1009 * This overcomes the limitation in drm_gem_mmap's assignment of a
1010 * drm_gem_object as the vma->vm_private_data. Since we need to
1011 * be able to resolve multiple mmap offsets which could be tied
1012 * to a single gem object.
1014 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1016 struct drm_vma_offset_node *node;
1017 struct drm_file *priv = filp->private_data;
1018 struct drm_device *dev = priv->minor->dev;
1019 struct drm_i915_gem_object *obj = NULL;
1020 struct i915_mmap_offset *mmo = NULL;
1022 if (drm_dev_is_unplugged(dev))
1026 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1027 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1030 if (node && drm_vma_node_is_allowed(node, priv)) {
1032 * Skip 0-refcnted objects as it is in the process of being
1033 * destroyed and will be invalid when the vma manager lock
1036 if (!node->driver_private) {
1037 mmo = container_of(node, struct i915_mmap_offset, vma_node);
1038 obj = i915_gem_object_get_rcu(mmo->obj);
1040 GEM_BUG_ON(obj && obj->ops->mmap_ops);
1042 obj = i915_gem_object_get_rcu
1043 (container_of(node, struct drm_i915_gem_object,
1046 GEM_BUG_ON(obj && !obj->ops->mmap_ops);
1049 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1052 return node ? -EACCES : -EINVAL;
1054 return i915_gem_object_mmap(obj, mmo, vma);
1057 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
1059 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1060 struct drm_device *dev = &i915->drm;
1061 struct i915_mmap_offset *mmo = NULL;
1062 enum i915_mmap_type mmap_type;
1063 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1065 if (drm_dev_is_unplugged(dev))
1068 /* handle ttm object */
1069 if (obj->ops->mmap_ops) {
1071 * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset
1072 * to calculate page offset so set that up.
1074 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
1076 /* handle stolen and smem objects */
1077 mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
1078 mmo = mmap_offset_attach(obj, mmap_type, NULL);
1084 * When we install vm_ops for mmap we are too late for
1085 * the vm_ops->open() which increases the ref_count of
1086 * this obj and then it gets decreased by the vm_ops->close().
1087 * To balance this increase the obj ref_count here.
1089 obj = i915_gem_object_get(obj);
1090 return i915_gem_object_mmap(obj, mmo, vma);
1093 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1094 #include "selftests/i915_gem_mman.c"