Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / v3d / v3d_bo.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
3
4 /**
5  * DOC: V3D GEM BO management support
6  *
7  * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
8  * GPU and the bus, allowing us to use shmem objects for our storage
9  * instead of CMA.
10  *
11  * Physically contiguous objects may still be imported to V3D, but the
12  * driver doesn't allocate physically contiguous objects on its own.
13  * Display engines requiring physically contiguous allocations should
14  * look into Mesa's "renderonly" support (as used by the Mesa pl111
15  * driver) for an example of how to integrate with V3D.
16  *
17  * Long term, we should support evicting pages from the MMU when under
18  * memory pressure (thus the v3d_bo_get_pages() refcounting), but
19  * that's not a high priority since our systems tend to not have swap.
20  */
21
22 #include <linux/dma-buf.h>
23 #include <linux/pfn_t.h>
24
25 #include "v3d_drv.h"
26 #include "uapi/drm/v3d_drm.h"
27
28 /* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
29  * it for DMA.
30  */
31 static int
32 v3d_bo_get_pages(struct v3d_bo *bo)
33 {
34         struct drm_gem_object *obj = &bo->base;
35         struct drm_device *dev = obj->dev;
36         int npages = obj->size >> PAGE_SHIFT;
37         int ret = 0;
38
39         mutex_lock(&bo->lock);
40         if (bo->pages_refcount++ != 0)
41                 goto unlock;
42
43         if (!obj->import_attach) {
44                 bo->pages = drm_gem_get_pages(obj);
45                 if (IS_ERR(bo->pages)) {
46                         ret = PTR_ERR(bo->pages);
47                         goto unlock;
48                 }
49
50                 bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
51                 if (IS_ERR(bo->sgt)) {
52                         ret = PTR_ERR(bo->sgt);
53                         goto put_pages;
54                 }
55
56                 /* Map the pages for use by the GPU. */
57                 dma_map_sg(dev->dev, bo->sgt->sgl,
58                            bo->sgt->nents, DMA_BIDIRECTIONAL);
59         } else {
60                 bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
61                 if (!bo->pages)
62                         goto put_pages;
63
64                 drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
65                                                  NULL, npages);
66
67                 /* Note that dma-bufs come in mapped. */
68         }
69
70         mutex_unlock(&bo->lock);
71
72         return 0;
73
74 put_pages:
75         drm_gem_put_pages(obj, bo->pages, true, true);
76         bo->pages = NULL;
77 unlock:
78         bo->pages_refcount--;
79         mutex_unlock(&bo->lock);
80         return ret;
81 }
82
83 static void
84 v3d_bo_put_pages(struct v3d_bo *bo)
85 {
86         struct drm_gem_object *obj = &bo->base;
87
88         mutex_lock(&bo->lock);
89         if (--bo->pages_refcount == 0) {
90                 if (!obj->import_attach) {
91                         dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
92                                      bo->sgt->nents, DMA_BIDIRECTIONAL);
93                         sg_free_table(bo->sgt);
94                         kfree(bo->sgt);
95                         drm_gem_put_pages(obj, bo->pages, true, true);
96                 } else {
97                         kfree(bo->pages);
98                 }
99         }
100         mutex_unlock(&bo->lock);
101 }
102
103 static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
104                                            size_t unaligned_size)
105 {
106         struct v3d_dev *v3d = to_v3d_dev(dev);
107         struct drm_gem_object *obj;
108         struct v3d_bo *bo;
109         size_t size = roundup(unaligned_size, PAGE_SIZE);
110         int ret;
111
112         if (size == 0)
113                 return ERR_PTR(-EINVAL);
114
115         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
116         if (!bo)
117                 return ERR_PTR(-ENOMEM);
118         obj = &bo->base;
119
120         INIT_LIST_HEAD(&bo->vmas);
121         INIT_LIST_HEAD(&bo->unref_head);
122         mutex_init(&bo->lock);
123
124         ret = drm_gem_object_init(dev, obj, size);
125         if (ret)
126                 goto free_bo;
127
128         spin_lock(&v3d->mm_lock);
129         ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
130                                          obj->size >> PAGE_SHIFT,
131                                          GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
132         spin_unlock(&v3d->mm_lock);
133         if (ret)
134                 goto free_obj;
135
136         return bo;
137
138 free_obj:
139         drm_gem_object_release(obj);
140 free_bo:
141         kfree(bo);
142         return ERR_PTR(ret);
143 }
144
145 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
146                              size_t unaligned_size)
147 {
148         struct v3d_dev *v3d = to_v3d_dev(dev);
149         struct drm_gem_object *obj;
150         struct v3d_bo *bo;
151         int ret;
152
153         bo = v3d_bo_create_struct(dev, unaligned_size);
154         if (IS_ERR(bo))
155                 return bo;
156         obj = &bo->base;
157
158         bo->resv = &bo->_resv;
159         reservation_object_init(bo->resv);
160
161         ret = v3d_bo_get_pages(bo);
162         if (ret)
163                 goto free_mm;
164
165         v3d_mmu_insert_ptes(bo);
166
167         mutex_lock(&v3d->bo_lock);
168         v3d->bo_stats.num_allocated++;
169         v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
170         mutex_unlock(&v3d->bo_lock);
171
172         return bo;
173
174 free_mm:
175         spin_lock(&v3d->mm_lock);
176         drm_mm_remove_node(&bo->node);
177         spin_unlock(&v3d->mm_lock);
178
179         drm_gem_object_release(obj);
180         kfree(bo);
181         return ERR_PTR(ret);
182 }
183
184 /* Called DRM core on the last userspace/kernel unreference of the
185  * BO.
186  */
187 void v3d_free_object(struct drm_gem_object *obj)
188 {
189         struct v3d_dev *v3d = to_v3d_dev(obj->dev);
190         struct v3d_bo *bo = to_v3d_bo(obj);
191
192         mutex_lock(&v3d->bo_lock);
193         v3d->bo_stats.num_allocated--;
194         v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
195         mutex_unlock(&v3d->bo_lock);
196
197         reservation_object_fini(&bo->_resv);
198
199         v3d_bo_put_pages(bo);
200
201         if (obj->import_attach)
202                 drm_prime_gem_destroy(obj, bo->sgt);
203
204         v3d_mmu_remove_ptes(bo);
205         spin_lock(&v3d->mm_lock);
206         drm_mm_remove_node(&bo->node);
207         spin_unlock(&v3d->mm_lock);
208
209         mutex_destroy(&bo->lock);
210
211         drm_gem_object_release(obj);
212         kfree(bo);
213 }
214
215 struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
216 {
217         struct v3d_bo *bo = to_v3d_bo(obj);
218
219         return bo->resv;
220 }
221
222 static void
223 v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
224 {
225         vma->vm_flags &= ~VM_PFNMAP;
226         vma->vm_flags |= VM_MIXEDMAP;
227         vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
228 }
229
230 int v3d_gem_fault(struct vm_fault *vmf)
231 {
232         struct vm_area_struct *vma = vmf->vma;
233         struct drm_gem_object *obj = vma->vm_private_data;
234         struct v3d_bo *bo = to_v3d_bo(obj);
235         unsigned long pfn;
236         pgoff_t pgoff;
237         int ret;
238
239         /* We don't use vmf->pgoff since that has the fake offset: */
240         pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
241         pfn = page_to_pfn(bo->pages[pgoff]);
242
243         ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
244
245         switch (ret) {
246         case -EAGAIN:
247         case 0:
248         case -ERESTARTSYS:
249         case -EINTR:
250         case -EBUSY:
251                 /*
252                  * EBUSY is ok: this just means that another thread
253                  * already did the job.
254                  */
255                 return VM_FAULT_NOPAGE;
256         case -ENOMEM:
257                 return VM_FAULT_OOM;
258         default:
259                 return VM_FAULT_SIGBUS;
260         }
261 }
262
263 int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
264 {
265         int ret;
266
267         ret = drm_gem_mmap(filp, vma);
268         if (ret)
269                 return ret;
270
271         v3d_set_mmap_vma_flags(vma);
272
273         return ret;
274 }
275
276 int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
277 {
278         int ret;
279
280         ret = drm_gem_mmap_obj(obj, obj->size, vma);
281         if (ret < 0)
282                 return ret;
283
284         v3d_set_mmap_vma_flags(vma);
285
286         return 0;
287 }
288
289 struct sg_table *
290 v3d_prime_get_sg_table(struct drm_gem_object *obj)
291 {
292         struct v3d_bo *bo = to_v3d_bo(obj);
293         int npages = obj->size >> PAGE_SHIFT;
294
295         return drm_prime_pages_to_sg(bo->pages, npages);
296 }
297
298 struct drm_gem_object *
299 v3d_prime_import_sg_table(struct drm_device *dev,
300                           struct dma_buf_attachment *attach,
301                           struct sg_table *sgt)
302 {
303         struct drm_gem_object *obj;
304         struct v3d_bo *bo;
305
306         bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
307         if (IS_ERR(bo))
308                 return ERR_CAST(bo);
309         obj = &bo->base;
310
311         bo->resv = attach->dmabuf->resv;
312
313         bo->sgt = sgt;
314         v3d_bo_get_pages(bo);
315
316         v3d_mmu_insert_ptes(bo);
317
318         return obj;
319 }
320
321 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
322                         struct drm_file *file_priv)
323 {
324         struct drm_v3d_create_bo *args = data;
325         struct v3d_bo *bo = NULL;
326         int ret;
327
328         if (args->flags != 0) {
329                 DRM_INFO("unknown create_bo flags: %d\n", args->flags);
330                 return -EINVAL;
331         }
332
333         bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
334         if (IS_ERR(bo))
335                 return PTR_ERR(bo);
336
337         args->offset = bo->node.start << PAGE_SHIFT;
338
339         ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
340         drm_gem_object_put_unlocked(&bo->base);
341
342         return ret;
343 }
344
345 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
346                       struct drm_file *file_priv)
347 {
348         struct drm_v3d_mmap_bo *args = data;
349         struct drm_gem_object *gem_obj;
350         int ret;
351
352         if (args->flags != 0) {
353                 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
354                 return -EINVAL;
355         }
356
357         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
358         if (!gem_obj) {
359                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
360                 return -ENOENT;
361         }
362
363         ret = drm_gem_create_mmap_offset(gem_obj);
364         if (ret == 0)
365                 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
366         drm_gem_object_put_unlocked(gem_obj);
367
368         return ret;
369 }
370
371 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
372                             struct drm_file *file_priv)
373 {
374         struct drm_v3d_get_bo_offset *args = data;
375         struct drm_gem_object *gem_obj;
376         struct v3d_bo *bo;
377
378         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
379         if (!gem_obj) {
380                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
381                 return -ENOENT;
382         }
383         bo = to_v3d_bo(gem_obj);
384
385         args->offset = bo->node.start << PAGE_SHIFT;
386
387         drm_gem_object_put_unlocked(gem_obj);
388         return 0;
389 }