drm/vkms: Introduce GEM object functions
[sfrench/cifs-2.6.git] / drivers / gpu / drm / vkms / vkms_gem.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/dma-buf.h>
4 #include <linux/shmem_fs.h>
5 #include <linux/vmalloc.h>
6 #include <drm/drm_prime.h>
7
8 #include "vkms_drv.h"
9
10 static const struct vm_operations_struct vkms_gem_vm_ops = {
11         .fault = vkms_gem_fault,
12         .open = drm_gem_vm_open,
13         .close = drm_gem_vm_close,
14 };
15
16 static const struct drm_gem_object_funcs vkms_gem_object_funcs = {
17         .free = vkms_gem_free_object,
18         .vm_ops = &vkms_gem_vm_ops,
19 };
20
21 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
22                                                  u64 size)
23 {
24         struct vkms_gem_object *obj;
25         int ret;
26
27         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
28         if (!obj)
29                 return ERR_PTR(-ENOMEM);
30
31         obj->gem.funcs = &vkms_gem_object_funcs;
32
33         size = roundup(size, PAGE_SIZE);
34         ret = drm_gem_object_init(dev, &obj->gem, size);
35         if (ret) {
36                 kfree(obj);
37                 return ERR_PTR(ret);
38         }
39
40         mutex_init(&obj->pages_lock);
41
42         return obj;
43 }
44
45 void vkms_gem_free_object(struct drm_gem_object *obj)
46 {
47         struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
48                                                    gem);
49
50         WARN_ON(gem->pages);
51         WARN_ON(gem->vaddr);
52
53         mutex_destroy(&gem->pages_lock);
54         drm_gem_object_release(obj);
55         kfree(gem);
56 }
57
58 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
59 {
60         struct vm_area_struct *vma = vmf->vma;
61         struct vkms_gem_object *obj = vma->vm_private_data;
62         unsigned long vaddr = vmf->address;
63         pgoff_t page_offset;
64         loff_t num_pages;
65         vm_fault_t ret = VM_FAULT_SIGBUS;
66
67         page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
68         num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
69
70         if (page_offset > num_pages)
71                 return VM_FAULT_SIGBUS;
72
73         mutex_lock(&obj->pages_lock);
74         if (obj->pages) {
75                 get_page(obj->pages[page_offset]);
76                 vmf->page = obj->pages[page_offset];
77                 ret = 0;
78         }
79         mutex_unlock(&obj->pages_lock);
80         if (ret) {
81                 struct page *page;
82                 struct address_space *mapping;
83
84                 mapping = file_inode(obj->gem.filp)->i_mapping;
85                 page = shmem_read_mapping_page(mapping, page_offset);
86
87                 if (!IS_ERR(page)) {
88                         vmf->page = page;
89                         ret = 0;
90                 } else {
91                         switch (PTR_ERR(page)) {
92                         case -ENOSPC:
93                         case -ENOMEM:
94                                 ret = VM_FAULT_OOM;
95                                 break;
96                         case -EBUSY:
97                                 ret = VM_FAULT_RETRY;
98                                 break;
99                         case -EFAULT:
100                         case -EINVAL:
101                                 ret = VM_FAULT_SIGBUS;
102                                 break;
103                         default:
104                                 WARN_ON(PTR_ERR(page));
105                                 ret = VM_FAULT_SIGBUS;
106                                 break;
107                         }
108                 }
109         }
110         return ret;
111 }
112
113 static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
114                                               struct drm_file *file,
115                                               u32 *handle,
116                                               u64 size)
117 {
118         struct vkms_gem_object *obj;
119         int ret;
120
121         if (!file || !dev || !handle)
122                 return ERR_PTR(-EINVAL);
123
124         obj = __vkms_gem_create(dev, size);
125         if (IS_ERR(obj))
126                 return ERR_CAST(obj);
127
128         ret = drm_gem_handle_create(file, &obj->gem, handle);
129         if (ret)
130                 return ERR_PTR(ret);
131
132         return &obj->gem;
133 }
134
135 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
136                      struct drm_mode_create_dumb *args)
137 {
138         struct drm_gem_object *gem_obj;
139         u64 pitch, size;
140
141         if (!args || !dev || !file)
142                 return -EINVAL;
143
144         pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
145         size = pitch * args->height;
146
147         if (!size)
148                 return -EINVAL;
149
150         gem_obj = vkms_gem_create(dev, file, &args->handle, size);
151         if (IS_ERR(gem_obj))
152                 return PTR_ERR(gem_obj);
153
154         args->size = gem_obj->size;
155         args->pitch = pitch;
156
157         drm_gem_object_put(gem_obj);
158
159         DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
160
161         return 0;
162 }
163
164 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
165 {
166         struct drm_gem_object *gem_obj = &vkms_obj->gem;
167
168         if (!vkms_obj->pages) {
169                 struct page **pages = drm_gem_get_pages(gem_obj);
170
171                 if (IS_ERR(pages))
172                         return pages;
173
174                 if (cmpxchg(&vkms_obj->pages, NULL, pages))
175                         drm_gem_put_pages(gem_obj, pages, false, true);
176         }
177
178         return vkms_obj->pages;
179 }
180
181 void vkms_gem_vunmap(struct drm_gem_object *obj)
182 {
183         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
184
185         mutex_lock(&vkms_obj->pages_lock);
186         if (vkms_obj->vmap_count < 1) {
187                 WARN_ON(vkms_obj->vaddr);
188                 WARN_ON(vkms_obj->pages);
189                 mutex_unlock(&vkms_obj->pages_lock);
190                 return;
191         }
192
193         vkms_obj->vmap_count--;
194
195         if (vkms_obj->vmap_count == 0) {
196                 vunmap(vkms_obj->vaddr);
197                 vkms_obj->vaddr = NULL;
198                 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
199                 vkms_obj->pages = NULL;
200         }
201
202         mutex_unlock(&vkms_obj->pages_lock);
203 }
204
205 int vkms_gem_vmap(struct drm_gem_object *obj)
206 {
207         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
208         int ret = 0;
209
210         mutex_lock(&vkms_obj->pages_lock);
211
212         if (!vkms_obj->vaddr) {
213                 unsigned int n_pages = obj->size >> PAGE_SHIFT;
214                 struct page **pages = _get_pages(vkms_obj);
215
216                 if (IS_ERR(pages)) {
217                         ret = PTR_ERR(pages);
218                         goto out;
219                 }
220
221                 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
222                 if (!vkms_obj->vaddr)
223                         goto err_vmap;
224         }
225
226         vkms_obj->vmap_count++;
227         goto out;
228
229 err_vmap:
230         ret = -ENOMEM;
231         drm_gem_put_pages(obj, vkms_obj->pages, false, true);
232         vkms_obj->pages = NULL;
233 out:
234         mutex_unlock(&vkms_obj->pages_lock);
235         return ret;
236 }
237
238 struct drm_gem_object *
239 vkms_prime_import_sg_table(struct drm_device *dev,
240                            struct dma_buf_attachment *attach,
241                            struct sg_table *sg)
242 {
243         struct vkms_gem_object *obj;
244         int npages;
245
246         obj = __vkms_gem_create(dev, attach->dmabuf->size);
247         if (IS_ERR(obj))
248                 return ERR_CAST(obj);
249
250         npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
251         DRM_DEBUG_PRIME("Importing %d pages\n", npages);
252
253         obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
254         if (!obj->pages) {
255                 vkms_gem_free_object(&obj->gem);
256                 return ERR_PTR(-ENOMEM);
257         }
258
259         drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
260         return &obj->gem;
261 }