Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[sfrench/cifs-2.6.git] / drivers / gpu / drm / vgem / vgem_drv.c
1 /*
2  * Copyright 2011 Red Hat, Inc.
3  * Copyright © 2014 The Chromium OS Authors
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software")
7  * to deal in the software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * them Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Adam Jackson <ajax@redhat.com>
25  *      Ben Widawsky <ben@bwidawsk.net>
26  */
27
28 /**
29  * This is vgem, a (non-hardware-backed) GEM service.  This is used by Mesa's
30  * software renderer and the X server for efficient buffer sharing.
31  */
32
33 #include <linux/module.h>
34 #include <linux/ramfs.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/dma-buf.h>
37 #include "vgem_drv.h"
38
39 #define DRIVER_NAME     "vgem"
40 #define DRIVER_DESC     "Virtual GEM provider"
41 #define DRIVER_DATE     "20120112"
42 #define DRIVER_MAJOR    1
43 #define DRIVER_MINOR    0
44
45 static struct vgem_device {
46         struct drm_device drm;
47         struct platform_device *platform;
48 } *vgem_device;
49
50 static void vgem_gem_free_object(struct drm_gem_object *obj)
51 {
52         struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
53
54         kvfree(vgem_obj->pages);
55
56         if (obj->import_attach)
57                 drm_prime_gem_destroy(obj, vgem_obj->table);
58
59         drm_gem_object_release(obj);
60         kfree(vgem_obj);
61 }
62
63 static int vgem_gem_fault(struct vm_fault *vmf)
64 {
65         struct vm_area_struct *vma = vmf->vma;
66         struct drm_vgem_gem_object *obj = vma->vm_private_data;
67         /* We don't use vmf->pgoff since that has the fake offset */
68         unsigned long vaddr = vmf->address;
69         int ret;
70         loff_t num_pages;
71         pgoff_t page_offset;
72         page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
73
74         num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
75
76         if (page_offset > num_pages)
77                 return VM_FAULT_SIGBUS;
78
79         if (obj->pages) {
80                 get_page(obj->pages[page_offset]);
81                 vmf->page = obj->pages[page_offset];
82                 ret = 0;
83         } else {
84                 struct page *page;
85
86                 page = shmem_read_mapping_page(
87                                         file_inode(obj->base.filp)->i_mapping,
88                                         page_offset);
89                 if (!IS_ERR(page)) {
90                         vmf->page = page;
91                         ret = 0;
92                 } else switch (PTR_ERR(page)) {
93                         case -ENOSPC:
94                         case -ENOMEM:
95                                 ret = VM_FAULT_OOM;
96                                 break;
97                         case -EBUSY:
98                                 ret = VM_FAULT_RETRY;
99                                 break;
100                         case -EFAULT:
101                         case -EINVAL:
102                                 ret = VM_FAULT_SIGBUS;
103                                 break;
104                         default:
105                                 WARN_ON(PTR_ERR(page));
106                                 ret = VM_FAULT_SIGBUS;
107                                 break;
108                 }
109
110         }
111         return ret;
112 }
113
114 static const struct vm_operations_struct vgem_gem_vm_ops = {
115         .fault = vgem_gem_fault,
116         .open = drm_gem_vm_open,
117         .close = drm_gem_vm_close,
118 };
119
120 static int vgem_open(struct drm_device *dev, struct drm_file *file)
121 {
122         struct vgem_file *vfile;
123         int ret;
124
125         vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
126         if (!vfile)
127                 return -ENOMEM;
128
129         file->driver_priv = vfile;
130
131         ret = vgem_fence_open(vfile);
132         if (ret) {
133                 kfree(vfile);
134                 return ret;
135         }
136
137         return 0;
138 }
139
140 static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
141 {
142         struct vgem_file *vfile = file->driver_priv;
143
144         vgem_fence_close(vfile);
145         kfree(vfile);
146 }
147
148 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
149                                                 unsigned long size)
150 {
151         struct drm_vgem_gem_object *obj;
152         int ret;
153
154         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
155         if (!obj)
156                 return ERR_PTR(-ENOMEM);
157
158         ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
159         if (ret) {
160                 kfree(obj);
161                 return ERR_PTR(ret);
162         }
163
164         return obj;
165 }
166
167 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
168 {
169         drm_gem_object_release(&obj->base);
170         kfree(obj);
171 }
172
173 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
174                                               struct drm_file *file,
175                                               unsigned int *handle,
176                                               unsigned long size)
177 {
178         struct drm_vgem_gem_object *obj;
179         int ret;
180
181         obj = __vgem_gem_create(dev, size);
182         if (IS_ERR(obj))
183                 return ERR_CAST(obj);
184
185         ret = drm_gem_handle_create(file, &obj->base, handle);
186         drm_gem_object_unreference_unlocked(&obj->base);
187         if (ret)
188                 goto err;
189
190         return &obj->base;
191
192 err:
193         __vgem_gem_destroy(obj);
194         return ERR_PTR(ret);
195 }
196
197 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
198                                 struct drm_mode_create_dumb *args)
199 {
200         struct drm_gem_object *gem_object;
201         u64 pitch, size;
202
203         pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
204         size = args->height * pitch;
205         if (size == 0)
206                 return -EINVAL;
207
208         gem_object = vgem_gem_create(dev, file, &args->handle, size);
209         if (IS_ERR(gem_object))
210                 return PTR_ERR(gem_object);
211
212         args->size = gem_object->size;
213         args->pitch = pitch;
214
215         DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
216
217         return 0;
218 }
219
220 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
221                              uint32_t handle, uint64_t *offset)
222 {
223         struct drm_gem_object *obj;
224         int ret;
225
226         obj = drm_gem_object_lookup(file, handle);
227         if (!obj)
228                 return -ENOENT;
229
230         if (!obj->filp) {
231                 ret = -EINVAL;
232                 goto unref;
233         }
234
235         ret = drm_gem_create_mmap_offset(obj);
236         if (ret)
237                 goto unref;
238
239         *offset = drm_vma_node_offset_addr(&obj->vma_node);
240 unref:
241         drm_gem_object_unreference_unlocked(obj);
242
243         return ret;
244 }
245
246 static struct drm_ioctl_desc vgem_ioctls[] = {
247         DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
248         DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
249 };
250
251 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
252 {
253         unsigned long flags = vma->vm_flags;
254         int ret;
255
256         ret = drm_gem_mmap(filp, vma);
257         if (ret)
258                 return ret;
259
260         /* Keep the WC mmaping set by drm_gem_mmap() but our pages
261          * are ordinary and not special.
262          */
263         vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
264         return 0;
265 }
266
267 static const struct file_operations vgem_driver_fops = {
268         .owner          = THIS_MODULE,
269         .open           = drm_open,
270         .mmap           = vgem_mmap,
271         .poll           = drm_poll,
272         .read           = drm_read,
273         .unlocked_ioctl = drm_ioctl,
274         .release        = drm_release,
275 };
276
277 static int vgem_prime_pin(struct drm_gem_object *obj)
278 {
279         long n_pages = obj->size >> PAGE_SHIFT;
280         struct page **pages;
281
282         /* Flush the object from the CPU cache so that importers can rely
283          * on coherent indirect access via the exported dma-address.
284          */
285         pages = drm_gem_get_pages(obj);
286         if (IS_ERR(pages))
287                 return PTR_ERR(pages);
288
289         drm_clflush_pages(pages, n_pages);
290         drm_gem_put_pages(obj, pages, true, false);
291
292         return 0;
293 }
294
295 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
296 {
297         struct sg_table *st;
298         struct page **pages;
299
300         pages = drm_gem_get_pages(obj);
301         if (IS_ERR(pages))
302                 return ERR_CAST(pages);
303
304         st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
305         drm_gem_put_pages(obj, pages, false, false);
306
307         return st;
308 }
309
310 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
311                                                 struct dma_buf *dma_buf)
312 {
313         struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
314
315         return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
316 }
317
318 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
319                         struct dma_buf_attachment *attach, struct sg_table *sg)
320 {
321         struct drm_vgem_gem_object *obj;
322         int npages;
323
324         obj = __vgem_gem_create(dev, attach->dmabuf->size);
325         if (IS_ERR(obj))
326                 return ERR_CAST(obj);
327
328         npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
329
330         obj->table = sg;
331         obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
332         if (!obj->pages) {
333                 __vgem_gem_destroy(obj);
334                 return ERR_PTR(-ENOMEM);
335         }
336         drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
337                                         npages);
338         return &obj->base;
339 }
340
341 static void *vgem_prime_vmap(struct drm_gem_object *obj)
342 {
343         long n_pages = obj->size >> PAGE_SHIFT;
344         struct page **pages;
345         void *addr;
346
347         pages = drm_gem_get_pages(obj);
348         if (IS_ERR(pages))
349                 return NULL;
350
351         addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
352         drm_gem_put_pages(obj, pages, false, false);
353
354         return addr;
355 }
356
357 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
358 {
359         vunmap(vaddr);
360 }
361
362 static int vgem_prime_mmap(struct drm_gem_object *obj,
363                            struct vm_area_struct *vma)
364 {
365         int ret;
366
367         if (obj->size < vma->vm_end - vma->vm_start)
368                 return -EINVAL;
369
370         if (!obj->filp)
371                 return -ENODEV;
372
373         ret = call_mmap(obj->filp, vma);
374         if (ret)
375                 return ret;
376
377         fput(vma->vm_file);
378         vma->vm_file = get_file(obj->filp);
379         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
380         vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
381
382         return 0;
383 }
384
385 static void vgem_release(struct drm_device *dev)
386 {
387         struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
388
389         platform_device_unregister(vgem->platform);
390         drm_dev_fini(&vgem->drm);
391
392         kfree(vgem);
393 }
394
395 static struct drm_driver vgem_driver = {
396         .driver_features                = DRIVER_GEM | DRIVER_PRIME,
397         .release                        = vgem_release,
398         .open                           = vgem_open,
399         .postclose                      = vgem_postclose,
400         .gem_free_object_unlocked       = vgem_gem_free_object,
401         .gem_vm_ops                     = &vgem_gem_vm_ops,
402         .ioctls                         = vgem_ioctls,
403         .num_ioctls                     = ARRAY_SIZE(vgem_ioctls),
404         .fops                           = &vgem_driver_fops,
405
406         .dumb_create                    = vgem_gem_dumb_create,
407         .dumb_map_offset                = vgem_gem_dumb_map,
408
409         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
410         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
411         .gem_prime_pin = vgem_prime_pin,
412         .gem_prime_import = vgem_prime_import,
413         .gem_prime_export = drm_gem_prime_export,
414         .gem_prime_import_sg_table = vgem_prime_import_sg_table,
415         .gem_prime_get_sg_table = vgem_prime_get_sg_table,
416         .gem_prime_vmap = vgem_prime_vmap,
417         .gem_prime_vunmap = vgem_prime_vunmap,
418         .gem_prime_mmap = vgem_prime_mmap,
419
420         .name   = DRIVER_NAME,
421         .desc   = DRIVER_DESC,
422         .date   = DRIVER_DATE,
423         .major  = DRIVER_MAJOR,
424         .minor  = DRIVER_MINOR,
425 };
426
427 static int __init vgem_init(void)
428 {
429         int ret;
430
431         vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL);
432         if (!vgem_device)
433                 return -ENOMEM;
434
435         ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
436         if (ret)
437                 goto out_free;
438
439         vgem_device->platform =
440                 platform_device_register_simple("vgem", -1, NULL, 0);
441         if (IS_ERR(vgem_device->platform)) {
442                 ret = PTR_ERR(vgem_device->platform);
443                 goto out_fini;
444         }
445
446         dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
447                                      DMA_BIT_MASK(64));
448
449         /* Final step: expose the device/driver to userspace */
450         ret  = drm_dev_register(&vgem_device->drm, 0);
451         if (ret)
452                 goto out_unregister;
453
454         return 0;
455
456 out_unregister:
457         platform_device_unregister(vgem_device->platform);
458 out_fini:
459         drm_dev_fini(&vgem_device->drm);
460 out_free:
461         kfree(vgem_device);
462         return ret;
463 }
464
465 static void __exit vgem_exit(void)
466 {
467         drm_dev_unregister(&vgem_device->drm);
468         drm_dev_unref(&vgem_device->drm);
469 }
470
471 module_init(vgem_init);
472 module_exit(vgem_exit);
473
474 MODULE_AUTHOR("Red Hat, Inc.");
475 MODULE_AUTHOR("Intel Corporation");
476 MODULE_DESCRIPTION(DRIVER_DESC);
477 MODULE_LICENSE("GPL and additional rights");