1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
14 #include <drm/drm_prime.h>
17 #include "msm_fence.h"
22 static dma_addr_t physaddr(struct drm_gem_object *obj)
24 struct msm_gem_object *msm_obj = to_msm_bo(obj);
25 struct msm_drm_private *priv = obj->dev->dev_private;
26 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
30 static bool use_pages(struct drm_gem_object *obj)
32 struct msm_gem_object *msm_obj = to_msm_bo(obj);
33 return !msm_obj->vram_node;
37 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
38 * API. Really GPU cache is out of scope here (handled on cmdstream)
39 * and all we need to do is invalidate newly allocated pages before
40 * mapping to CPU as uncached/writecombine.
42 * On top of this, we have the added headache, that depending on
43 * display generation, the display's iommu may be wired up to either
44 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
45 * that here we either have dma-direct or iommu ops.
47 * Let this be a cautionary tail of abstraction gone wrong.
50 static void sync_for_device(struct msm_gem_object *msm_obj)
52 struct device *dev = msm_obj->base.dev->dev;
54 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57 static void sync_for_cpu(struct msm_gem_object *msm_obj)
59 struct device *dev = msm_obj->base.dev->dev;
61 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64 static void update_lru_active(struct drm_gem_object *obj)
66 struct msm_drm_private *priv = obj->dev->dev_private;
67 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 GEM_WARN_ON(!msm_obj->pages);
71 if (msm_obj->pin_count) {
72 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
73 } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
74 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
76 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
78 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
82 static void update_lru_locked(struct drm_gem_object *obj)
84 struct msm_drm_private *priv = obj->dev->dev_private;
85 struct msm_gem_object *msm_obj = to_msm_bo(obj);
87 msm_gem_assert_locked(&msm_obj->base);
89 if (!msm_obj->pages) {
90 GEM_WARN_ON(msm_obj->pin_count);
92 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
94 update_lru_active(obj);
98 static void update_lru(struct drm_gem_object *obj)
100 struct msm_drm_private *priv = obj->dev->dev_private;
102 mutex_lock(&priv->lru.lock);
103 update_lru_locked(obj);
104 mutex_unlock(&priv->lru.lock);
107 /* allocate pages from VRAM carveout, used when no IOMMU: */
108 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
110 struct msm_gem_object *msm_obj = to_msm_bo(obj);
111 struct msm_drm_private *priv = obj->dev->dev_private;
116 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
118 return ERR_PTR(-ENOMEM);
120 spin_lock(&priv->vram.lock);
121 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
122 spin_unlock(&priv->vram.lock);
128 paddr = physaddr(obj);
129 for (i = 0; i < npages; i++) {
130 p[i] = pfn_to_page(__phys_to_pfn(paddr));
137 static struct page **get_pages(struct drm_gem_object *obj)
139 struct msm_gem_object *msm_obj = to_msm_bo(obj);
141 msm_gem_assert_locked(obj);
143 if (!msm_obj->pages) {
144 struct drm_device *dev = obj->dev;
146 int npages = obj->size >> PAGE_SHIFT;
149 p = drm_gem_get_pages(obj);
151 p = get_pages_vram(obj, npages);
154 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
161 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
162 if (IS_ERR(msm_obj->sgt)) {
163 void *ptr = ERR_CAST(msm_obj->sgt);
165 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
170 /* For non-cached buffers, ensure the new pages are clean
171 * because display controller, GPU, etc. are not coherent:
173 if (msm_obj->flags & MSM_BO_WC)
174 sync_for_device(msm_obj);
179 return msm_obj->pages;
182 static void put_pages_vram(struct drm_gem_object *obj)
184 struct msm_gem_object *msm_obj = to_msm_bo(obj);
185 struct msm_drm_private *priv = obj->dev->dev_private;
187 spin_lock(&priv->vram.lock);
188 drm_mm_remove_node(msm_obj->vram_node);
189 spin_unlock(&priv->vram.lock);
191 kvfree(msm_obj->pages);
194 static void put_pages(struct drm_gem_object *obj)
196 struct msm_gem_object *msm_obj = to_msm_bo(obj);
198 if (msm_obj->pages) {
200 /* For non-cached buffers, ensure the new
201 * pages are clean because display controller,
202 * GPU, etc. are not coherent:
204 if (msm_obj->flags & MSM_BO_WC)
205 sync_for_cpu(msm_obj);
207 sg_free_table(msm_obj->sgt);
213 drm_gem_put_pages(obj, msm_obj->pages, true, false);
217 msm_obj->pages = NULL;
222 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
225 struct msm_drm_private *priv = obj->dev->dev_private;
226 struct msm_gem_object *msm_obj = to_msm_bo(obj);
229 msm_gem_assert_locked(obj);
231 if (GEM_WARN_ON(msm_obj->madv > madv)) {
232 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
233 msm_obj->madv, madv);
234 return ERR_PTR(-EBUSY);
241 mutex_lock(&priv->lru.lock);
242 msm_obj->pin_count++;
243 update_lru_locked(obj);
244 mutex_unlock(&priv->lru.lock);
249 struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
254 p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
260 void msm_gem_unpin_pages(struct drm_gem_object *obj)
263 msm_gem_unpin_locked(obj);
267 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
269 if (msm_obj->flags & MSM_BO_WC)
270 return pgprot_writecombine(prot);
274 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
276 struct vm_area_struct *vma = vmf->vma;
277 struct drm_gem_object *obj = vma->vm_private_data;
278 struct msm_gem_object *msm_obj = to_msm_bo(obj);
286 * vm_ops.open/drm_gem_mmap_obj and close get and put
287 * a reference on obj. So, we dont need to hold one here.
289 err = msm_gem_lock_interruptible(obj);
291 ret = VM_FAULT_NOPAGE;
295 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
297 return VM_FAULT_SIGBUS;
300 /* make sure we have pages attached now */
301 pages = get_pages(obj);
303 ret = vmf_error(PTR_ERR(pages));
307 /* We don't use vmf->pgoff since that has the fake offset: */
308 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
310 pfn = page_to_pfn(pages[pgoff]);
312 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
313 pfn, pfn << PAGE_SHIFT);
315 ret = vmf_insert_pfn(vma, vmf->address, pfn);
323 /** get mmap offset */
324 static uint64_t mmap_offset(struct drm_gem_object *obj)
326 struct drm_device *dev = obj->dev;
329 msm_gem_assert_locked(obj);
331 /* Make it mmapable */
332 ret = drm_gem_create_mmap_offset(obj);
335 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
339 return drm_vma_node_offset_addr(&obj->vma_node);
342 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
347 offset = mmap_offset(obj);
352 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
353 struct msm_gem_address_space *aspace)
355 struct msm_gem_object *msm_obj = to_msm_bo(obj);
356 struct msm_gem_vma *vma;
358 msm_gem_assert_locked(obj);
360 vma = msm_gem_vma_new(aspace);
362 return ERR_PTR(-ENOMEM);
364 list_add_tail(&vma->list, &msm_obj->vmas);
369 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
370 struct msm_gem_address_space *aspace)
372 struct msm_gem_object *msm_obj = to_msm_bo(obj);
373 struct msm_gem_vma *vma;
375 msm_gem_assert_locked(obj);
377 list_for_each_entry(vma, &msm_obj->vmas, list) {
378 if (vma->aspace == aspace)
385 static void del_vma(struct msm_gem_vma *vma)
390 list_del(&vma->list);
395 * If close is true, this also closes the VMA (releasing the allocated
396 * iova range) in addition to removing the iommu mapping. In the eviction
397 * case (!close), we keep the iova allocated, but only remove the iommu
401 put_iova_spaces(struct drm_gem_object *obj, bool close)
403 struct msm_gem_object *msm_obj = to_msm_bo(obj);
404 struct msm_gem_vma *vma;
406 msm_gem_assert_locked(obj);
408 list_for_each_entry(vma, &msm_obj->vmas, list) {
410 msm_gem_vma_purge(vma);
412 msm_gem_vma_close(vma);
417 /* Called with msm_obj locked */
419 put_iova_vmas(struct drm_gem_object *obj)
421 struct msm_gem_object *msm_obj = to_msm_bo(obj);
422 struct msm_gem_vma *vma, *tmp;
424 msm_gem_assert_locked(obj);
426 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
431 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
432 struct msm_gem_address_space *aspace,
433 u64 range_start, u64 range_end)
435 struct msm_gem_vma *vma;
437 msm_gem_assert_locked(obj);
439 vma = lookup_vma(obj, aspace);
444 vma = add_vma(obj, aspace);
448 ret = msm_gem_vma_init(vma, obj->size,
449 range_start, range_end);
455 GEM_WARN_ON(vma->iova < range_start);
456 GEM_WARN_ON((vma->iova + obj->size) > range_end);
462 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
464 struct msm_gem_object *msm_obj = to_msm_bo(obj);
466 int ret, prot = IOMMU_READ;
468 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
471 if (msm_obj->flags & MSM_BO_MAP_PRIV)
474 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
477 msm_gem_assert_locked(obj);
479 pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
481 return PTR_ERR(pages);
483 ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
485 msm_gem_unpin_locked(obj);
490 void msm_gem_unpin_locked(struct drm_gem_object *obj)
492 struct msm_drm_private *priv = obj->dev->dev_private;
493 struct msm_gem_object *msm_obj = to_msm_bo(obj);
495 msm_gem_assert_locked(obj);
497 mutex_lock(&priv->lru.lock);
498 msm_obj->pin_count--;
499 GEM_WARN_ON(msm_obj->pin_count < 0);
500 update_lru_locked(obj);
501 mutex_unlock(&priv->lru.lock);
504 /* Special unpin path for use in fence-signaling path, avoiding the need
505 * to hold the obj lock by only depending on things that a protected by
506 * the LRU lock. In particular we know that that we already have backing
507 * and and that the object's dma_resv has the fence for the current
508 * submit/job which will prevent us racing against page eviction.
510 void msm_gem_unpin_active(struct drm_gem_object *obj)
512 struct msm_drm_private *priv = obj->dev->dev_private;
513 struct msm_gem_object *msm_obj = to_msm_bo(obj);
515 mutex_lock(&priv->lru.lock);
516 msm_obj->pin_count--;
517 GEM_WARN_ON(msm_obj->pin_count < 0);
518 update_lru_active(obj);
519 mutex_unlock(&priv->lru.lock);
522 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
523 struct msm_gem_address_space *aspace)
525 return get_vma_locked(obj, aspace, 0, U64_MAX);
528 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
529 struct msm_gem_address_space *aspace, uint64_t *iova,
530 u64 range_start, u64 range_end)
532 struct msm_gem_vma *vma;
535 msm_gem_assert_locked(obj);
537 vma = get_vma_locked(obj, aspace, range_start, range_end);
541 ret = msm_gem_pin_vma_locked(obj, vma);
549 * get iova and pin it. Should have a matching put
550 * limits iova to specified range (in pages)
552 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
553 struct msm_gem_address_space *aspace, uint64_t *iova,
554 u64 range_start, u64 range_end)
559 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
565 /* get iova and pin it. Should have a matching put */
566 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
567 struct msm_gem_address_space *aspace, uint64_t *iova)
569 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
573 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
574 * valid for the life of the object
576 int msm_gem_get_iova(struct drm_gem_object *obj,
577 struct msm_gem_address_space *aspace, uint64_t *iova)
579 struct msm_gem_vma *vma;
583 vma = get_vma_locked(obj, aspace, 0, U64_MAX);
594 static int clear_iova(struct drm_gem_object *obj,
595 struct msm_gem_address_space *aspace)
597 struct msm_gem_vma *vma = lookup_vma(obj, aspace);
602 if (msm_gem_vma_inuse(vma))
605 msm_gem_vma_purge(vma);
606 msm_gem_vma_close(vma);
613 * Get the requested iova but don't pin it. Fails if the requested iova is
614 * not available. Doesn't need a put because iovas are currently valid for
615 * the life of the object.
617 * Setting an iova of zero will clear the vma.
619 int msm_gem_set_iova(struct drm_gem_object *obj,
620 struct msm_gem_address_space *aspace, uint64_t iova)
626 ret = clear_iova(obj, aspace);
628 struct msm_gem_vma *vma;
629 vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
632 } else if (GEM_WARN_ON(vma->iova != iova)) {
633 clear_iova(obj, aspace);
643 * Unpin a iova by updating the reference counts. The memory isn't actually
644 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
647 void msm_gem_unpin_iova(struct drm_gem_object *obj,
648 struct msm_gem_address_space *aspace)
650 struct msm_gem_vma *vma;
653 vma = lookup_vma(obj, aspace);
654 if (!GEM_WARN_ON(!vma)) {
655 msm_gem_vma_unpin(vma);
656 msm_gem_unpin_locked(obj);
661 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
662 struct drm_mode_create_dumb *args)
664 args->pitch = align_pitch(args->width, args->bpp);
665 args->size = PAGE_ALIGN(args->pitch * args->height);
666 return msm_gem_new_handle(dev, file, args->size,
667 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
670 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
671 uint32_t handle, uint64_t *offset)
673 struct drm_gem_object *obj;
676 /* GEM does all our handle to object mapping */
677 obj = drm_gem_object_lookup(file, handle);
683 *offset = msm_gem_mmap_offset(obj);
685 drm_gem_object_put(obj);
691 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
693 struct msm_gem_object *msm_obj = to_msm_bo(obj);
697 msm_gem_assert_locked(obj);
699 if (obj->import_attach)
700 return ERR_PTR(-ENODEV);
702 pages = msm_gem_pin_pages_locked(obj, madv);
704 return ERR_CAST(pages);
706 /* increment vmap_count *before* vmap() call, so shrinker can
707 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
708 * This guarantees that we won't try to msm_gem_vunmap() this
709 * same object from within the vmap() call (while we already
712 msm_obj->vmap_count++;
714 if (!msm_obj->vaddr) {
715 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
716 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
717 if (msm_obj->vaddr == NULL) {
723 return msm_obj->vaddr;
726 msm_obj->vmap_count--;
727 msm_gem_unpin_locked(obj);
731 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
733 return get_vaddr(obj, MSM_MADV_WILLNEED);
736 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
741 ret = msm_gem_get_vaddr_locked(obj);
748 * Don't use this! It is for the very special case of dumping
749 * submits from GPU hangs or faults, were the bo may already
750 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
753 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
755 return get_vaddr(obj, __MSM_MADV_PURGED);
758 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
760 struct msm_gem_object *msm_obj = to_msm_bo(obj);
762 msm_gem_assert_locked(obj);
763 GEM_WARN_ON(msm_obj->vmap_count < 1);
765 msm_obj->vmap_count--;
766 msm_gem_unpin_locked(obj);
769 void msm_gem_put_vaddr(struct drm_gem_object *obj)
772 msm_gem_put_vaddr_locked(obj);
776 /* Update madvise status, returns true if not purged, else
779 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
781 struct msm_drm_private *priv = obj->dev->dev_private;
782 struct msm_gem_object *msm_obj = to_msm_bo(obj);
786 mutex_lock(&priv->lru.lock);
788 if (msm_obj->madv != __MSM_MADV_PURGED)
789 msm_obj->madv = madv;
791 madv = msm_obj->madv;
793 /* If the obj is inactive, we might need to move it
794 * between inactive lists
796 update_lru_locked(obj);
798 mutex_unlock(&priv->lru.lock);
802 return (madv != __MSM_MADV_PURGED);
805 void msm_gem_purge(struct drm_gem_object *obj)
807 struct drm_device *dev = obj->dev;
808 struct msm_drm_private *priv = obj->dev->dev_private;
809 struct msm_gem_object *msm_obj = to_msm_bo(obj);
811 msm_gem_assert_locked(obj);
812 GEM_WARN_ON(!is_purgeable(msm_obj));
814 /* Get rid of any iommu mapping(s): */
815 put_iova_spaces(obj, true);
819 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
825 mutex_lock(&priv->lru.lock);
826 /* A one-way transition: */
827 msm_obj->madv = __MSM_MADV_PURGED;
828 mutex_unlock(&priv->lru.lock);
830 drm_gem_free_mmap_offset(obj);
832 /* Our goal here is to return as much of the memory as
833 * is possible back to the system as we are called from OOM.
834 * To do this we must instruct the shmfs to drop all of its
835 * backing pages, *now*.
837 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
839 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
844 * Unpin the backing pages and make them available to be swapped out.
846 void msm_gem_evict(struct drm_gem_object *obj)
848 struct drm_device *dev = obj->dev;
849 struct msm_gem_object *msm_obj = to_msm_bo(obj);
851 msm_gem_assert_locked(obj);
852 GEM_WARN_ON(is_unevictable(msm_obj));
854 /* Get rid of any iommu mapping(s): */
855 put_iova_spaces(obj, false);
857 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
862 void msm_gem_vunmap(struct drm_gem_object *obj)
864 struct msm_gem_object *msm_obj = to_msm_bo(obj);
866 msm_gem_assert_locked(obj);
868 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
871 vunmap(msm_obj->vaddr);
872 msm_obj->vaddr = NULL;
875 bool msm_gem_active(struct drm_gem_object *obj)
877 msm_gem_assert_locked(obj);
879 if (to_msm_bo(obj)->pin_count)
882 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
885 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
887 bool write = !!(op & MSM_PREP_WRITE);
888 unsigned long remain =
889 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
892 if (op & MSM_PREP_BOOST) {
893 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
897 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
900 return remain == 0 ? -EBUSY : -ETIMEDOUT;
904 /* TODO cache maintenance */
909 int msm_gem_cpu_fini(struct drm_gem_object *obj)
911 /* TODO cache maintenance */
915 #ifdef CONFIG_DEBUG_FS
916 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
917 struct msm_gem_stats *stats)
919 struct msm_gem_object *msm_obj = to_msm_bo(obj);
920 struct dma_resv *robj = obj->resv;
921 struct msm_gem_vma *vma;
922 uint64_t off = drm_vma_node_start(&obj->vma_node);
928 stats->all.size += obj->size;
930 if (msm_gem_active(obj)) {
931 stats->active.count++;
932 stats->active.size += obj->size;
935 if (msm_obj->pages) {
936 stats->resident.count++;
937 stats->resident.size += obj->size;
940 switch (msm_obj->madv) {
941 case __MSM_MADV_PURGED:
942 stats->purged.count++;
943 stats->purged.size += obj->size;
946 case MSM_MADV_DONTNEED:
947 stats->purgeable.count++;
948 stats->purgeable.size += obj->size;
951 case MSM_MADV_WILLNEED:
957 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
958 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
959 obj->name, kref_read(&obj->refcount),
960 off, msm_obj->vaddr);
962 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
964 if (!list_empty(&msm_obj->vmas)) {
966 seq_puts(m, " vmas:");
968 list_for_each_entry(vma, &msm_obj->vmas, list) {
969 const char *name, *comm;
971 struct msm_gem_address_space *aspace = vma->aspace;
972 struct task_struct *task =
973 get_pid_task(aspace->pid, PIDTYPE_PID);
975 comm = kstrdup(task->comm, GFP_KERNEL);
976 put_task_struct(task);
984 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
985 name, comm ? ":" : "", comm ? comm : "",
986 vma->aspace, vma->iova,
987 vma->mapped ? "mapped" : "unmapped",
988 msm_gem_vma_inuse(vma));
995 dma_resv_describe(robj, m);
999 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1001 struct msm_gem_stats stats = {};
1002 struct msm_gem_object *msm_obj;
1004 seq_puts(m, " flags id ref offset kaddr size madv name\n");
1005 list_for_each_entry(msm_obj, list, node) {
1006 struct drm_gem_object *obj = &msm_obj->base;
1008 msm_gem_describe(obj, m, &stats);
1011 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
1012 stats.all.count, stats.all.size);
1013 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
1014 stats.active.count, stats.active.size);
1015 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1016 stats.resident.count, stats.resident.size);
1017 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1018 stats.purgeable.count, stats.purgeable.size);
1019 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1020 stats.purged.count, stats.purged.size);
1024 /* don't call directly! Use drm_gem_object_put() */
1025 static void msm_gem_free_object(struct drm_gem_object *obj)
1027 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1028 struct drm_device *dev = obj->dev;
1029 struct msm_drm_private *priv = dev->dev_private;
1031 mutex_lock(&priv->obj_lock);
1032 list_del(&msm_obj->node);
1033 mutex_unlock(&priv->obj_lock);
1035 put_iova_spaces(obj, true);
1037 if (obj->import_attach) {
1038 GEM_WARN_ON(msm_obj->vaddr);
1040 /* Don't drop the pages for imported dmabuf, as they are not
1041 * ours, just free the array we allocated:
1043 kvfree(msm_obj->pages);
1047 drm_prime_gem_destroy(obj, msm_obj->sgt);
1049 msm_gem_vunmap(obj);
1054 drm_gem_object_release(obj);
1059 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1061 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1063 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1064 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1069 /* convenience method to construct a GEM buffer object, and userspace handle */
1070 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1071 uint32_t size, uint32_t flags, uint32_t *handle,
1074 struct drm_gem_object *obj;
1077 obj = msm_gem_new(dev, size, flags);
1080 return PTR_ERR(obj);
1083 msm_gem_object_set_name(obj, "%s", name);
1085 ret = drm_gem_handle_create(file, obj, handle);
1087 /* drop reference from allocate - handle holds it now */
1088 drm_gem_object_put(obj);
1093 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1095 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1096 enum drm_gem_object_status status = 0;
1099 status |= DRM_GEM_OBJECT_RESIDENT;
1101 if (msm_obj->madv == MSM_MADV_DONTNEED)
1102 status |= DRM_GEM_OBJECT_PURGEABLE;
1107 static const struct vm_operations_struct vm_ops = {
1108 .fault = msm_gem_fault,
1109 .open = drm_gem_vm_open,
1110 .close = drm_gem_vm_close,
1113 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1114 .free = msm_gem_free_object,
1115 .pin = msm_gem_prime_pin,
1116 .unpin = msm_gem_prime_unpin,
1117 .get_sg_table = msm_gem_prime_get_sg_table,
1118 .vmap = msm_gem_prime_vmap,
1119 .vunmap = msm_gem_prime_vunmap,
1120 .mmap = msm_gem_object_mmap,
1121 .status = msm_gem_status,
1125 static int msm_gem_new_impl(struct drm_device *dev,
1126 uint32_t size, uint32_t flags,
1127 struct drm_gem_object **obj)
1129 struct msm_drm_private *priv = dev->dev_private;
1130 struct msm_gem_object *msm_obj;
1132 switch (flags & MSM_BO_CACHE_MASK) {
1136 case MSM_BO_CACHED_COHERENT:
1137 if (priv->has_cached_coherent)
1141 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1142 (flags & MSM_BO_CACHE_MASK));
1146 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1150 msm_obj->flags = flags;
1151 msm_obj->madv = MSM_MADV_WILLNEED;
1153 INIT_LIST_HEAD(&msm_obj->node);
1154 INIT_LIST_HEAD(&msm_obj->vmas);
1156 *obj = &msm_obj->base;
1157 (*obj)->funcs = &msm_gem_object_funcs;
1162 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1164 struct msm_drm_private *priv = dev->dev_private;
1165 struct msm_gem_object *msm_obj;
1166 struct drm_gem_object *obj = NULL;
1167 bool use_vram = false;
1170 size = PAGE_ALIGN(size);
1172 if (!msm_use_mmu(dev))
1174 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1177 if (GEM_WARN_ON(use_vram && !priv->vram.size))
1178 return ERR_PTR(-EINVAL);
1180 /* Disallow zero sized objects as they make the underlying
1181 * infrastructure grumpy
1184 return ERR_PTR(-EINVAL);
1186 ret = msm_gem_new_impl(dev, size, flags, &obj);
1188 return ERR_PTR(ret);
1190 msm_obj = to_msm_bo(obj);
1193 struct msm_gem_vma *vma;
1194 struct page **pages;
1196 drm_gem_private_object_init(dev, obj, size);
1200 vma = add_vma(obj, NULL);
1201 msm_gem_unlock(obj);
1207 to_msm_bo(obj)->vram_node = &vma->node;
1210 pages = get_pages(obj);
1211 msm_gem_unlock(obj);
1212 if (IS_ERR(pages)) {
1213 ret = PTR_ERR(pages);
1217 vma->iova = physaddr(obj);
1219 ret = drm_gem_object_init(dev, obj, size);
1223 * Our buffers are kept pinned, so allocating them from the
1224 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1225 * See comments above new_inode() why this is required _and_
1226 * expected if you're going to pin these pages.
1228 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1231 drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1233 mutex_lock(&priv->obj_lock);
1234 list_add_tail(&msm_obj->node, &priv->objects);
1235 mutex_unlock(&priv->obj_lock);
1240 drm_gem_object_put(obj);
1241 return ERR_PTR(ret);
1244 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1245 struct dma_buf *dmabuf, struct sg_table *sgt)
1247 struct msm_drm_private *priv = dev->dev_private;
1248 struct msm_gem_object *msm_obj;
1249 struct drm_gem_object *obj;
1253 /* if we don't have IOMMU, don't bother pretending we can import: */
1254 if (!msm_use_mmu(dev)) {
1255 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1256 return ERR_PTR(-EINVAL);
1259 size = PAGE_ALIGN(dmabuf->size);
1261 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1263 return ERR_PTR(ret);
1265 drm_gem_private_object_init(dev, obj, size);
1267 npages = size / PAGE_SIZE;
1269 msm_obj = to_msm_bo(obj);
1272 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1273 if (!msm_obj->pages) {
1274 msm_gem_unlock(obj);
1279 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1281 msm_gem_unlock(obj);
1285 msm_gem_unlock(obj);
1287 drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1289 mutex_lock(&priv->obj_lock);
1290 list_add_tail(&msm_obj->node, &priv->objects);
1291 mutex_unlock(&priv->obj_lock);
1296 drm_gem_object_put(obj);
1297 return ERR_PTR(ret);
1300 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1301 uint32_t flags, struct msm_gem_address_space *aspace,
1302 struct drm_gem_object **bo, uint64_t *iova)
1305 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1309 return ERR_CAST(obj);
1312 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1317 vaddr = msm_gem_get_vaddr(obj);
1318 if (IS_ERR(vaddr)) {
1319 msm_gem_unpin_iova(obj, aspace);
1320 ret = PTR_ERR(vaddr);
1329 drm_gem_object_put(obj);
1331 return ERR_PTR(ret);
1335 void msm_gem_kernel_put(struct drm_gem_object *bo,
1336 struct msm_gem_address_space *aspace)
1338 if (IS_ERR_OR_NULL(bo))
1341 msm_gem_put_vaddr(bo);
1342 msm_gem_unpin_iova(bo, aspace);
1343 drm_gem_object_put(bo);
1346 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1348 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1355 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);