1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/dma-buf.h>
4 #include <linux/shmem_fs.h>
5 #include <linux/vmalloc.h>
6 #include <drm/drm_prime.h>
10 static const struct vm_operations_struct vkms_gem_vm_ops = {
11 .fault = vkms_gem_fault,
12 .open = drm_gem_vm_open,
13 .close = drm_gem_vm_close,
16 static const struct drm_gem_object_funcs vkms_gem_object_funcs = {
17 .free = vkms_gem_free_object,
18 .vm_ops = &vkms_gem_vm_ops,
21 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
24 struct vkms_gem_object *obj;
27 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
29 return ERR_PTR(-ENOMEM);
31 obj->gem.funcs = &vkms_gem_object_funcs;
33 size = roundup(size, PAGE_SIZE);
34 ret = drm_gem_object_init(dev, &obj->gem, size);
40 mutex_init(&obj->pages_lock);
45 void vkms_gem_free_object(struct drm_gem_object *obj)
47 struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
53 mutex_destroy(&gem->pages_lock);
54 drm_gem_object_release(obj);
58 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
60 struct vm_area_struct *vma = vmf->vma;
61 struct vkms_gem_object *obj = vma->vm_private_data;
62 unsigned long vaddr = vmf->address;
65 vm_fault_t ret = VM_FAULT_SIGBUS;
67 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
68 num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
70 if (page_offset > num_pages)
71 return VM_FAULT_SIGBUS;
73 mutex_lock(&obj->pages_lock);
75 get_page(obj->pages[page_offset]);
76 vmf->page = obj->pages[page_offset];
79 mutex_unlock(&obj->pages_lock);
82 struct address_space *mapping;
84 mapping = file_inode(obj->gem.filp)->i_mapping;
85 page = shmem_read_mapping_page(mapping, page_offset);
91 switch (PTR_ERR(page)) {
101 ret = VM_FAULT_SIGBUS;
104 WARN_ON(PTR_ERR(page));
105 ret = VM_FAULT_SIGBUS;
113 static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
114 struct drm_file *file,
118 struct vkms_gem_object *obj;
121 if (!file || !dev || !handle)
122 return ERR_PTR(-EINVAL);
124 obj = __vkms_gem_create(dev, size);
126 return ERR_CAST(obj);
128 ret = drm_gem_handle_create(file, &obj->gem, handle);
135 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
136 struct drm_mode_create_dumb *args)
138 struct drm_gem_object *gem_obj;
141 if (!args || !dev || !file)
144 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
145 size = pitch * args->height;
150 gem_obj = vkms_gem_create(dev, file, &args->handle, size);
152 return PTR_ERR(gem_obj);
154 args->size = gem_obj->size;
157 drm_gem_object_put(gem_obj);
159 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
164 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
166 struct drm_gem_object *gem_obj = &vkms_obj->gem;
168 if (!vkms_obj->pages) {
169 struct page **pages = drm_gem_get_pages(gem_obj);
174 if (cmpxchg(&vkms_obj->pages, NULL, pages))
175 drm_gem_put_pages(gem_obj, pages, false, true);
178 return vkms_obj->pages;
181 void vkms_gem_vunmap(struct drm_gem_object *obj)
183 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
185 mutex_lock(&vkms_obj->pages_lock);
186 if (vkms_obj->vmap_count < 1) {
187 WARN_ON(vkms_obj->vaddr);
188 WARN_ON(vkms_obj->pages);
189 mutex_unlock(&vkms_obj->pages_lock);
193 vkms_obj->vmap_count--;
195 if (vkms_obj->vmap_count == 0) {
196 vunmap(vkms_obj->vaddr);
197 vkms_obj->vaddr = NULL;
198 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
199 vkms_obj->pages = NULL;
202 mutex_unlock(&vkms_obj->pages_lock);
205 int vkms_gem_vmap(struct drm_gem_object *obj)
207 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
210 mutex_lock(&vkms_obj->pages_lock);
212 if (!vkms_obj->vaddr) {
213 unsigned int n_pages = obj->size >> PAGE_SHIFT;
214 struct page **pages = _get_pages(vkms_obj);
217 ret = PTR_ERR(pages);
221 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
222 if (!vkms_obj->vaddr)
226 vkms_obj->vmap_count++;
231 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
232 vkms_obj->pages = NULL;
234 mutex_unlock(&vkms_obj->pages_lock);
238 struct drm_gem_object *
239 vkms_prime_import_sg_table(struct drm_device *dev,
240 struct dma_buf_attachment *attach,
243 struct vkms_gem_object *obj;
246 obj = __vkms_gem_create(dev, attach->dmabuf->size);
248 return ERR_CAST(obj);
250 npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
251 DRM_DEBUG_PRIME("Importing %d pages\n", npages);
253 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
255 vkms_gem_free_object(&obj->gem);
256 return ERR_PTR(-ENOMEM);
259 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);