1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/spinlock.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pfn_t.h>
13 #include "msm_fence.h"
18 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
21 static dma_addr_t physaddr(struct drm_gem_object *obj)
23 struct msm_gem_object *msm_obj = to_msm_bo(obj);
24 struct msm_drm_private *priv = obj->dev->dev_private;
25 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 static bool use_pages(struct drm_gem_object *obj)
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 return !msm_obj->vram_node;
36 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
37 * API. Really GPU cache is out of scope here (handled on cmdstream)
38 * and all we need to do is invalidate newly allocated pages before
39 * mapping to CPU as uncached/writecombine.
41 * On top of this, we have the added headache, that depending on
42 * display generation, the display's iommu may be wired up to either
43 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
44 * that here we either have dma-direct or iommu ops.
46 * Let this be a cautionary tail of abstraction gone wrong.
49 static void sync_for_device(struct msm_gem_object *msm_obj)
51 struct device *dev = msm_obj->base.dev->dev;
53 if (get_dma_ops(dev)) {
54 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
55 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
57 dma_map_sg(dev, msm_obj->sgt->sgl,
58 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
62 static void sync_for_cpu(struct msm_gem_object *msm_obj)
64 struct device *dev = msm_obj->base.dev->dev;
66 if (get_dma_ops(dev)) {
67 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
68 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
70 dma_unmap_sg(dev, msm_obj->sgt->sgl,
71 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
75 /* allocate pages from VRAM carveout, used when no IOMMU: */
76 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79 struct msm_drm_private *priv = obj->dev->dev_private;
84 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
86 return ERR_PTR(-ENOMEM);
88 spin_lock(&priv->vram.lock);
89 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
90 spin_unlock(&priv->vram.lock);
96 paddr = physaddr(obj);
97 for (i = 0; i < npages; i++) {
98 p[i] = phys_to_page(paddr);
105 static struct page **get_pages(struct drm_gem_object *obj)
107 struct msm_gem_object *msm_obj = to_msm_bo(obj);
109 if (!msm_obj->pages) {
110 struct drm_device *dev = obj->dev;
112 int npages = obj->size >> PAGE_SHIFT;
115 p = drm_gem_get_pages(obj);
117 p = get_pages_vram(obj, npages);
120 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
127 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
128 if (IS_ERR(msm_obj->sgt)) {
129 void *ptr = ERR_CAST(msm_obj->sgt);
131 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
136 /* For non-cached buffers, ensure the new pages are clean
137 * because display controller, GPU, etc. are not coherent:
139 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
140 sync_for_device(msm_obj);
143 return msm_obj->pages;
146 static void put_pages_vram(struct drm_gem_object *obj)
148 struct msm_gem_object *msm_obj = to_msm_bo(obj);
149 struct msm_drm_private *priv = obj->dev->dev_private;
151 spin_lock(&priv->vram.lock);
152 drm_mm_remove_node(msm_obj->vram_node);
153 spin_unlock(&priv->vram.lock);
155 kvfree(msm_obj->pages);
158 static void put_pages(struct drm_gem_object *obj)
160 struct msm_gem_object *msm_obj = to_msm_bo(obj);
162 if (msm_obj->pages) {
164 /* For non-cached buffers, ensure the new
165 * pages are clean because display controller,
166 * GPU, etc. are not coherent:
168 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
169 sync_for_cpu(msm_obj);
171 sg_free_table(msm_obj->sgt);
176 drm_gem_put_pages(obj, msm_obj->pages, true, false);
180 msm_obj->pages = NULL;
184 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
186 struct msm_gem_object *msm_obj = to_msm_bo(obj);
189 mutex_lock(&msm_obj->lock);
191 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
192 mutex_unlock(&msm_obj->lock);
193 return ERR_PTR(-EBUSY);
197 mutex_unlock(&msm_obj->lock);
201 void msm_gem_put_pages(struct drm_gem_object *obj)
203 /* when we start tracking the pin count, then do something here */
206 int msm_gem_mmap_obj(struct drm_gem_object *obj,
207 struct vm_area_struct *vma)
209 struct msm_gem_object *msm_obj = to_msm_bo(obj);
211 vma->vm_flags &= ~VM_PFNMAP;
212 vma->vm_flags |= VM_MIXEDMAP;
214 if (msm_obj->flags & MSM_BO_WC) {
215 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
216 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
217 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
220 * Shunt off cached objs to shmem file so they have their own
221 * address_space (so unmap_mapping_range does what we want,
222 * in particular in the case of mmap'd dmabufs)
227 vma->vm_file = obj->filp;
229 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
235 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
239 ret = drm_gem_mmap(filp, vma);
241 DBG("mmap failed: %d", ret);
245 return msm_gem_mmap_obj(vma->vm_private_data, vma);
248 vm_fault_t msm_gem_fault(struct vm_fault *vmf)
250 struct vm_area_struct *vma = vmf->vma;
251 struct drm_gem_object *obj = vma->vm_private_data;
252 struct msm_gem_object *msm_obj = to_msm_bo(obj);
260 * vm_ops.open/drm_gem_mmap_obj and close get and put
261 * a reference on obj. So, we dont need to hold one here.
263 err = mutex_lock_interruptible(&msm_obj->lock);
265 ret = VM_FAULT_NOPAGE;
269 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
270 mutex_unlock(&msm_obj->lock);
271 return VM_FAULT_SIGBUS;
274 /* make sure we have pages attached now */
275 pages = get_pages(obj);
277 ret = vmf_error(PTR_ERR(pages));
281 /* We don't use vmf->pgoff since that has the fake offset: */
282 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
284 pfn = page_to_pfn(pages[pgoff]);
286 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
287 pfn, pfn << PAGE_SHIFT);
289 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
291 mutex_unlock(&msm_obj->lock);
296 /** get mmap offset */
297 static uint64_t mmap_offset(struct drm_gem_object *obj)
299 struct drm_device *dev = obj->dev;
300 struct msm_gem_object *msm_obj = to_msm_bo(obj);
303 WARN_ON(!mutex_is_locked(&msm_obj->lock));
305 /* Make it mmapable */
306 ret = drm_gem_create_mmap_offset(obj);
309 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
313 return drm_vma_node_offset_addr(&obj->vma_node);
316 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
319 struct msm_gem_object *msm_obj = to_msm_bo(obj);
321 mutex_lock(&msm_obj->lock);
322 offset = mmap_offset(obj);
323 mutex_unlock(&msm_obj->lock);
327 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
328 struct msm_gem_address_space *aspace)
330 struct msm_gem_object *msm_obj = to_msm_bo(obj);
331 struct msm_gem_vma *vma;
333 WARN_ON(!mutex_is_locked(&msm_obj->lock));
335 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
337 return ERR_PTR(-ENOMEM);
339 vma->aspace = aspace;
341 list_add_tail(&vma->list, &msm_obj->vmas);
346 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
347 struct msm_gem_address_space *aspace)
349 struct msm_gem_object *msm_obj = to_msm_bo(obj);
350 struct msm_gem_vma *vma;
352 WARN_ON(!mutex_is_locked(&msm_obj->lock));
354 list_for_each_entry(vma, &msm_obj->vmas, list) {
355 if (vma->aspace == aspace)
362 static void del_vma(struct msm_gem_vma *vma)
367 list_del(&vma->list);
371 /* Called with msm_obj->lock locked */
373 put_iova(struct drm_gem_object *obj)
375 struct msm_gem_object *msm_obj = to_msm_bo(obj);
376 struct msm_gem_vma *vma, *tmp;
378 WARN_ON(!mutex_is_locked(&msm_obj->lock));
380 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
382 msm_gem_purge_vma(vma->aspace, vma);
383 msm_gem_close_vma(vma->aspace, vma);
389 static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
390 struct msm_gem_address_space *aspace, uint64_t *iova)
392 struct msm_gem_object *msm_obj = to_msm_bo(obj);
393 struct msm_gem_vma *vma;
396 WARN_ON(!mutex_is_locked(&msm_obj->lock));
398 vma = lookup_vma(obj, aspace);
401 vma = add_vma(obj, aspace);
405 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
416 static int msm_gem_pin_iova(struct drm_gem_object *obj,
417 struct msm_gem_address_space *aspace)
419 struct msm_gem_object *msm_obj = to_msm_bo(obj);
420 struct msm_gem_vma *vma;
422 int prot = IOMMU_READ;
424 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
427 WARN_ON(!mutex_is_locked(&msm_obj->lock));
429 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
432 vma = lookup_vma(obj, aspace);
436 pages = get_pages(obj);
438 return PTR_ERR(pages);
440 return msm_gem_map_vma(aspace, vma, prot,
441 msm_obj->sgt, obj->size >> PAGE_SHIFT);
444 /* get iova and pin it. Should have a matching put */
445 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
446 struct msm_gem_address_space *aspace, uint64_t *iova)
448 struct msm_gem_object *msm_obj = to_msm_bo(obj);
452 mutex_lock(&msm_obj->lock);
454 ret = msm_gem_get_iova_locked(obj, aspace, &local);
457 ret = msm_gem_pin_iova(obj, aspace);
462 mutex_unlock(&msm_obj->lock);
467 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
468 * valid for the life of the object
470 int msm_gem_get_iova(struct drm_gem_object *obj,
471 struct msm_gem_address_space *aspace, uint64_t *iova)
473 struct msm_gem_object *msm_obj = to_msm_bo(obj);
476 mutex_lock(&msm_obj->lock);
477 ret = msm_gem_get_iova_locked(obj, aspace, iova);
478 mutex_unlock(&msm_obj->lock);
483 /* get iova without taking a reference, used in places where you have
484 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
486 uint64_t msm_gem_iova(struct drm_gem_object *obj,
487 struct msm_gem_address_space *aspace)
489 struct msm_gem_object *msm_obj = to_msm_bo(obj);
490 struct msm_gem_vma *vma;
492 mutex_lock(&msm_obj->lock);
493 vma = lookup_vma(obj, aspace);
494 mutex_unlock(&msm_obj->lock);
497 return vma ? vma->iova : 0;
501 * Unpin a iova by updating the reference counts. The memory isn't actually
502 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
505 void msm_gem_unpin_iova(struct drm_gem_object *obj,
506 struct msm_gem_address_space *aspace)
508 struct msm_gem_object *msm_obj = to_msm_bo(obj);
509 struct msm_gem_vma *vma;
511 mutex_lock(&msm_obj->lock);
512 vma = lookup_vma(obj, aspace);
515 msm_gem_unmap_vma(aspace, vma);
517 mutex_unlock(&msm_obj->lock);
520 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
521 struct drm_mode_create_dumb *args)
523 args->pitch = align_pitch(args->width, args->bpp);
524 args->size = PAGE_ALIGN(args->pitch * args->height);
525 return msm_gem_new_handle(dev, file, args->size,
526 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
529 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
530 uint32_t handle, uint64_t *offset)
532 struct drm_gem_object *obj;
535 /* GEM does all our handle to object mapping */
536 obj = drm_gem_object_lookup(file, handle);
542 *offset = msm_gem_mmap_offset(obj);
544 drm_gem_object_put_unlocked(obj);
550 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
552 struct msm_gem_object *msm_obj = to_msm_bo(obj);
555 mutex_lock(&msm_obj->lock);
557 if (WARN_ON(msm_obj->madv > madv)) {
558 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
559 msm_obj->madv, madv);
560 mutex_unlock(&msm_obj->lock);
561 return ERR_PTR(-EBUSY);
564 /* increment vmap_count *before* vmap() call, so shrinker can
565 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
566 * This guarantees that we won't try to msm_gem_vunmap() this
567 * same object from within the vmap() call (while we already
568 * hold msm_obj->lock)
570 msm_obj->vmap_count++;
572 if (!msm_obj->vaddr) {
573 struct page **pages = get_pages(obj);
575 ret = PTR_ERR(pages);
578 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
579 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
580 if (msm_obj->vaddr == NULL) {
586 mutex_unlock(&msm_obj->lock);
587 return msm_obj->vaddr;
590 msm_obj->vmap_count--;
591 mutex_unlock(&msm_obj->lock);
595 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
597 return get_vaddr(obj, MSM_MADV_WILLNEED);
601 * Don't use this! It is for the very special case of dumping
602 * submits from GPU hangs or faults, were the bo may already
603 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
606 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
608 return get_vaddr(obj, __MSM_MADV_PURGED);
611 void msm_gem_put_vaddr(struct drm_gem_object *obj)
613 struct msm_gem_object *msm_obj = to_msm_bo(obj);
615 mutex_lock(&msm_obj->lock);
616 WARN_ON(msm_obj->vmap_count < 1);
617 msm_obj->vmap_count--;
618 mutex_unlock(&msm_obj->lock);
621 /* Update madvise status, returns true if not purged, else
624 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
626 struct msm_gem_object *msm_obj = to_msm_bo(obj);
628 mutex_lock(&msm_obj->lock);
630 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
632 if (msm_obj->madv != __MSM_MADV_PURGED)
633 msm_obj->madv = madv;
635 madv = msm_obj->madv;
637 mutex_unlock(&msm_obj->lock);
639 return (madv != __MSM_MADV_PURGED);
642 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
644 struct drm_device *dev = obj->dev;
645 struct msm_gem_object *msm_obj = to_msm_bo(obj);
647 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
648 WARN_ON(!is_purgeable(msm_obj));
649 WARN_ON(obj->import_attach);
651 mutex_lock_nested(&msm_obj->lock, subclass);
655 msm_gem_vunmap_locked(obj);
659 msm_obj->madv = __MSM_MADV_PURGED;
661 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
662 drm_gem_free_mmap_offset(obj);
664 /* Our goal here is to return as much of the memory as
665 * is possible back to the system as we are called from OOM.
666 * To do this we must instruct the shmfs to drop all of its
667 * backing pages, *now*.
669 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
671 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
674 mutex_unlock(&msm_obj->lock);
677 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
679 struct msm_gem_object *msm_obj = to_msm_bo(obj);
681 WARN_ON(!mutex_is_locked(&msm_obj->lock));
683 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
686 vunmap(msm_obj->vaddr);
687 msm_obj->vaddr = NULL;
690 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
692 struct msm_gem_object *msm_obj = to_msm_bo(obj);
694 mutex_lock_nested(&msm_obj->lock, subclass);
695 msm_gem_vunmap_locked(obj);
696 mutex_unlock(&msm_obj->lock);
699 /* must be called before _move_to_active().. */
700 int msm_gem_sync_object(struct drm_gem_object *obj,
701 struct msm_fence_context *fctx, bool exclusive)
703 struct reservation_object_list *fobj;
704 struct dma_fence *fence;
707 fobj = reservation_object_get_list(obj->resv);
708 if (!fobj || (fobj->shared_count == 0)) {
709 fence = reservation_object_get_excl(obj->resv);
710 /* don't need to wait on our own fences, since ring is fifo */
711 if (fence && (fence->context != fctx->context)) {
712 ret = dma_fence_wait(fence, true);
718 if (!exclusive || !fobj)
721 for (i = 0; i < fobj->shared_count; i++) {
722 fence = rcu_dereference_protected(fobj->shared[i],
723 reservation_object_held(obj->resv));
724 if (fence->context != fctx->context) {
725 ret = dma_fence_wait(fence, true);
734 void msm_gem_move_to_active(struct drm_gem_object *obj,
735 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
737 struct msm_gem_object *msm_obj = to_msm_bo(obj);
738 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
741 reservation_object_add_excl_fence(obj->resv, fence);
743 reservation_object_add_shared_fence(obj->resv, fence);
744 list_del_init(&msm_obj->mm_list);
745 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
748 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
750 struct drm_device *dev = obj->dev;
751 struct msm_drm_private *priv = dev->dev_private;
752 struct msm_gem_object *msm_obj = to_msm_bo(obj);
754 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
757 list_del_init(&msm_obj->mm_list);
758 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
761 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
763 bool write = !!(op & MSM_PREP_WRITE);
764 unsigned long remain =
765 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
768 ret = reservation_object_wait_timeout_rcu(obj->resv, write,
771 return remain == 0 ? -EBUSY : -ETIMEDOUT;
775 /* TODO cache maintenance */
780 int msm_gem_cpu_fini(struct drm_gem_object *obj)
782 /* TODO cache maintenance */
786 #ifdef CONFIG_DEBUG_FS
787 static void describe_fence(struct dma_fence *fence, const char *type,
790 if (!dma_fence_is_signaled(fence))
791 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
792 fence->ops->get_driver_name(fence),
793 fence->ops->get_timeline_name(fence),
797 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
799 struct msm_gem_object *msm_obj = to_msm_bo(obj);
800 struct reservation_object *robj = obj->resv;
801 struct reservation_object_list *fobj;
802 struct dma_fence *fence;
803 struct msm_gem_vma *vma;
804 uint64_t off = drm_vma_node_start(&obj->vma_node);
807 mutex_lock(&msm_obj->lock);
809 switch (msm_obj->madv) {
810 case __MSM_MADV_PURGED:
813 case MSM_MADV_DONTNEED:
816 case MSM_MADV_WILLNEED:
822 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
823 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
824 obj->name, kref_read(&obj->refcount),
825 off, msm_obj->vaddr);
827 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
829 if (!list_empty(&msm_obj->vmas)) {
831 seq_puts(m, " vmas:");
833 list_for_each_entry(vma, &msm_obj->vmas, list)
834 seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
835 vma->aspace != NULL ? vma->aspace->name : NULL,
836 vma->iova, vma->mapped ? "mapped" : "unmapped",
843 fobj = rcu_dereference(robj->fence);
845 unsigned int i, shared_count = fobj->shared_count;
847 for (i = 0; i < shared_count; i++) {
848 fence = rcu_dereference(fobj->shared[i]);
849 describe_fence(fence, "Shared", m);
853 fence = rcu_dereference(robj->fence_excl);
855 describe_fence(fence, "Exclusive", m);
858 mutex_unlock(&msm_obj->lock);
861 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
863 struct msm_gem_object *msm_obj;
867 seq_puts(m, " flags id ref offset kaddr size madv name\n");
868 list_for_each_entry(msm_obj, list, mm_list) {
869 struct drm_gem_object *obj = &msm_obj->base;
871 msm_gem_describe(obj, m);
876 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
880 /* don't call directly! Use drm_gem_object_put() and friends */
881 void msm_gem_free_object(struct drm_gem_object *obj)
883 struct msm_gem_object *msm_obj = to_msm_bo(obj);
884 struct drm_device *dev = obj->dev;
885 struct msm_drm_private *priv = dev->dev_private;
887 if (llist_add(&msm_obj->freed, &priv->free_list))
888 queue_work(priv->wq, &priv->free_work);
891 static void free_object(struct msm_gem_object *msm_obj)
893 struct drm_gem_object *obj = &msm_obj->base;
894 struct drm_device *dev = obj->dev;
896 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
898 /* object should not be on active list: */
899 WARN_ON(is_active(msm_obj));
901 list_del(&msm_obj->mm_list);
903 mutex_lock(&msm_obj->lock);
907 if (obj->import_attach) {
909 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
911 /* Don't drop the pages for imported dmabuf, as they are not
912 * ours, just free the array we allocated:
915 kvfree(msm_obj->pages);
917 drm_prime_gem_destroy(obj, msm_obj->sgt);
919 msm_gem_vunmap_locked(obj);
923 drm_gem_object_release(obj);
925 mutex_unlock(&msm_obj->lock);
929 void msm_gem_free_work(struct work_struct *work)
931 struct msm_drm_private *priv =
932 container_of(work, struct msm_drm_private, free_work);
933 struct drm_device *dev = priv->dev;
934 struct llist_node *freed;
935 struct msm_gem_object *msm_obj, *next;
937 while ((freed = llist_del_all(&priv->free_list))) {
939 mutex_lock(&dev->struct_mutex);
941 llist_for_each_entry_safe(msm_obj, next,
943 free_object(msm_obj);
945 mutex_unlock(&dev->struct_mutex);
952 /* convenience method to construct a GEM buffer object, and userspace handle */
953 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
954 uint32_t size, uint32_t flags, uint32_t *handle,
957 struct drm_gem_object *obj;
960 obj = msm_gem_new(dev, size, flags);
966 msm_gem_object_set_name(obj, "%s", name);
968 ret = drm_gem_handle_create(file, obj, handle);
970 /* drop reference from allocate - handle holds it now */
971 drm_gem_object_put_unlocked(obj);
976 static int msm_gem_new_impl(struct drm_device *dev,
977 uint32_t size, uint32_t flags,
978 struct reservation_object *resv,
979 struct drm_gem_object **obj,
980 bool struct_mutex_locked)
982 struct msm_drm_private *priv = dev->dev_private;
983 struct msm_gem_object *msm_obj;
985 switch (flags & MSM_BO_CACHE_MASK) {
986 case MSM_BO_UNCACHED:
991 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
992 (flags & MSM_BO_CACHE_MASK));
996 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1000 mutex_init(&msm_obj->lock);
1002 msm_obj->flags = flags;
1003 msm_obj->madv = MSM_MADV_WILLNEED;
1006 msm_obj->base.resv = resv;
1008 INIT_LIST_HEAD(&msm_obj->submit_entry);
1009 INIT_LIST_HEAD(&msm_obj->vmas);
1011 if (struct_mutex_locked) {
1012 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1013 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1015 mutex_lock(&dev->struct_mutex);
1016 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1017 mutex_unlock(&dev->struct_mutex);
1020 *obj = &msm_obj->base;
1025 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1026 uint32_t size, uint32_t flags, bool struct_mutex_locked)
1028 struct msm_drm_private *priv = dev->dev_private;
1029 struct drm_gem_object *obj = NULL;
1030 bool use_vram = false;
1033 size = PAGE_ALIGN(size);
1035 if (!msm_use_mmu(dev))
1037 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1040 if (WARN_ON(use_vram && !priv->vram.size))
1041 return ERR_PTR(-EINVAL);
1043 /* Disallow zero sized objects as they make the underlying
1044 * infrastructure grumpy
1047 return ERR_PTR(-EINVAL);
1049 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
1054 struct msm_gem_vma *vma;
1055 struct page **pages;
1056 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1058 mutex_lock(&msm_obj->lock);
1060 vma = add_vma(obj, NULL);
1061 mutex_unlock(&msm_obj->lock);
1067 to_msm_bo(obj)->vram_node = &vma->node;
1069 drm_gem_private_object_init(dev, obj, size);
1071 pages = get_pages(obj);
1072 if (IS_ERR(pages)) {
1073 ret = PTR_ERR(pages);
1077 vma->iova = physaddr(obj);
1079 ret = drm_gem_object_init(dev, obj, size);
1083 * Our buffers are kept pinned, so allocating them from the
1084 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1085 * See comments above new_inode() why this is required _and_
1086 * expected if you're going to pin these pages.
1088 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1094 drm_gem_object_put_unlocked(obj);
1095 return ERR_PTR(ret);
1098 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1099 uint32_t size, uint32_t flags)
1101 return _msm_gem_new(dev, size, flags, true);
1104 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1105 uint32_t size, uint32_t flags)
1107 return _msm_gem_new(dev, size, flags, false);
1110 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1111 struct dma_buf *dmabuf, struct sg_table *sgt)
1113 struct msm_gem_object *msm_obj;
1114 struct drm_gem_object *obj;
1118 /* if we don't have IOMMU, don't bother pretending we can import: */
1119 if (!msm_use_mmu(dev)) {
1120 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1121 return ERR_PTR(-EINVAL);
1124 size = PAGE_ALIGN(dmabuf->size);
1126 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1130 drm_gem_private_object_init(dev, obj, size);
1132 npages = size / PAGE_SIZE;
1134 msm_obj = to_msm_bo(obj);
1135 mutex_lock(&msm_obj->lock);
1137 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1138 if (!msm_obj->pages) {
1139 mutex_unlock(&msm_obj->lock);
1144 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1146 mutex_unlock(&msm_obj->lock);
1150 mutex_unlock(&msm_obj->lock);
1154 drm_gem_object_put_unlocked(obj);
1155 return ERR_PTR(ret);
1158 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1159 uint32_t flags, struct msm_gem_address_space *aspace,
1160 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1163 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1167 return ERR_CAST(obj);
1170 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1175 vaddr = msm_gem_get_vaddr(obj);
1176 if (IS_ERR(vaddr)) {
1177 msm_gem_unpin_iova(obj, aspace);
1178 ret = PTR_ERR(vaddr);
1188 drm_gem_object_put(obj);
1190 drm_gem_object_put_unlocked(obj);
1192 return ERR_PTR(ret);
1196 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1197 uint32_t flags, struct msm_gem_address_space *aspace,
1198 struct drm_gem_object **bo, uint64_t *iova)
1200 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1203 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1204 uint32_t flags, struct msm_gem_address_space *aspace,
1205 struct drm_gem_object **bo, uint64_t *iova)
1207 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1210 void msm_gem_kernel_put(struct drm_gem_object *bo,
1211 struct msm_gem_address_space *aspace, bool locked)
1213 if (IS_ERR_OR_NULL(bo))
1216 msm_gem_put_vaddr(bo);
1217 msm_gem_unpin_iova(bo, aspace);
1220 drm_gem_object_put(bo);
1222 drm_gem_object_put_unlocked(bo);
1225 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1227 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1234 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);