Merge tag 'drm-misc-next-2021-03-03' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / dma-buf / heaps / cma_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF CMA heap exporter
4  *
5  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7  *
8  * Also utilizing parts of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *      Andrew F. Davis <afd@ti.com>
11  */
12 #include <linux/cma.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-heap.h>
15 #include <linux/dma-map-ops.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24
25
26 struct cma_heap {
27         struct dma_heap *heap;
28         struct cma *cma;
29 };
30
31 struct cma_heap_buffer {
32         struct cma_heap *heap;
33         struct list_head attachments;
34         struct mutex lock;
35         unsigned long len;
36         struct page *cma_pages;
37         struct page **pages;
38         pgoff_t pagecount;
39         int vmap_cnt;
40         void *vaddr;
41 };
42
43 struct dma_heap_attachment {
44         struct device *dev;
45         struct sg_table table;
46         struct list_head list;
47         bool mapped;
48 };
49
50 static int cma_heap_attach(struct dma_buf *dmabuf,
51                            struct dma_buf_attachment *attachment)
52 {
53         struct cma_heap_buffer *buffer = dmabuf->priv;
54         struct dma_heap_attachment *a;
55         int ret;
56
57         a = kzalloc(sizeof(*a), GFP_KERNEL);
58         if (!a)
59                 return -ENOMEM;
60
61         ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
62                                         buffer->pagecount, 0,
63                                         buffer->pagecount << PAGE_SHIFT,
64                                         GFP_KERNEL);
65         if (ret) {
66                 kfree(a);
67                 return ret;
68         }
69
70         a->dev = attachment->dev;
71         INIT_LIST_HEAD(&a->list);
72         a->mapped = false;
73
74         attachment->priv = a;
75
76         mutex_lock(&buffer->lock);
77         list_add(&a->list, &buffer->attachments);
78         mutex_unlock(&buffer->lock);
79
80         return 0;
81 }
82
83 static void cma_heap_detach(struct dma_buf *dmabuf,
84                             struct dma_buf_attachment *attachment)
85 {
86         struct cma_heap_buffer *buffer = dmabuf->priv;
87         struct dma_heap_attachment *a = attachment->priv;
88
89         mutex_lock(&buffer->lock);
90         list_del(&a->list);
91         mutex_unlock(&buffer->lock);
92
93         sg_free_table(&a->table);
94         kfree(a);
95 }
96
97 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
98                                              enum dma_data_direction direction)
99 {
100         struct dma_heap_attachment *a = attachment->priv;
101         struct sg_table *table = &a->table;
102         int ret;
103
104         ret = dma_map_sgtable(attachment->dev, table, direction, 0);
105         if (ret)
106                 return ERR_PTR(-ENOMEM);
107         a->mapped = true;
108         return table;
109 }
110
111 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
112                                    struct sg_table *table,
113                                    enum dma_data_direction direction)
114 {
115         struct dma_heap_attachment *a = attachment->priv;
116
117         a->mapped = false;
118         dma_unmap_sgtable(attachment->dev, table, direction, 0);
119 }
120
121 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
122                                              enum dma_data_direction direction)
123 {
124         struct cma_heap_buffer *buffer = dmabuf->priv;
125         struct dma_heap_attachment *a;
126
127         if (buffer->vmap_cnt)
128                 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
129
130         mutex_lock(&buffer->lock);
131         list_for_each_entry(a, &buffer->attachments, list) {
132                 if (!a->mapped)
133                         continue;
134                 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
135         }
136         mutex_unlock(&buffer->lock);
137
138         return 0;
139 }
140
141 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
142                                            enum dma_data_direction direction)
143 {
144         struct cma_heap_buffer *buffer = dmabuf->priv;
145         struct dma_heap_attachment *a;
146
147         if (buffer->vmap_cnt)
148                 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
149
150         mutex_lock(&buffer->lock);
151         list_for_each_entry(a, &buffer->attachments, list) {
152                 if (!a->mapped)
153                         continue;
154                 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
155         }
156         mutex_unlock(&buffer->lock);
157
158         return 0;
159 }
160
161 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
162 {
163         struct vm_area_struct *vma = vmf->vma;
164         struct cma_heap_buffer *buffer = vma->vm_private_data;
165
166         if (vmf->pgoff > buffer->pagecount)
167                 return VM_FAULT_SIGBUS;
168
169         vmf->page = buffer->pages[vmf->pgoff];
170         get_page(vmf->page);
171
172         return 0;
173 }
174
175 static const struct vm_operations_struct dma_heap_vm_ops = {
176         .fault = cma_heap_vm_fault,
177 };
178
179 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
180 {
181         struct cma_heap_buffer *buffer = dmabuf->priv;
182
183         if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
184                 return -EINVAL;
185
186         vma->vm_ops = &dma_heap_vm_ops;
187         vma->vm_private_data = buffer;
188
189         return 0;
190 }
191
192 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
193 {
194         void *vaddr;
195
196         vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
197         if (!vaddr)
198                 return ERR_PTR(-ENOMEM);
199
200         return vaddr;
201 }
202
203 static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
204 {
205         struct cma_heap_buffer *buffer = dmabuf->priv;
206         void *vaddr;
207         int ret = 0;
208
209         mutex_lock(&buffer->lock);
210         if (buffer->vmap_cnt) {
211                 buffer->vmap_cnt++;
212                 dma_buf_map_set_vaddr(map, buffer->vaddr);
213                 goto out;
214         }
215
216         vaddr = cma_heap_do_vmap(buffer);
217         if (IS_ERR(vaddr)) {
218                 ret = PTR_ERR(vaddr);
219                 goto out;
220         }
221         buffer->vaddr = vaddr;
222         buffer->vmap_cnt++;
223         dma_buf_map_set_vaddr(map, buffer->vaddr);
224 out:
225         mutex_unlock(&buffer->lock);
226
227         return ret;
228 }
229
230 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
231 {
232         struct cma_heap_buffer *buffer = dmabuf->priv;
233
234         mutex_lock(&buffer->lock);
235         if (!--buffer->vmap_cnt) {
236                 vunmap(buffer->vaddr);
237                 buffer->vaddr = NULL;
238         }
239         mutex_unlock(&buffer->lock);
240         dma_buf_map_clear(map);
241 }
242
243 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
244 {
245         struct cma_heap_buffer *buffer = dmabuf->priv;
246         struct cma_heap *cma_heap = buffer->heap;
247
248         if (buffer->vmap_cnt > 0) {
249                 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
250                 vunmap(buffer->vaddr);
251                 buffer->vaddr = NULL;
252         }
253
254         /* free page list */
255         kfree(buffer->pages);
256         /* release memory */
257         cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
258         kfree(buffer);
259 }
260
261 static const struct dma_buf_ops cma_heap_buf_ops = {
262         .attach = cma_heap_attach,
263         .detach = cma_heap_detach,
264         .map_dma_buf = cma_heap_map_dma_buf,
265         .unmap_dma_buf = cma_heap_unmap_dma_buf,
266         .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
267         .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
268         .mmap = cma_heap_mmap,
269         .vmap = cma_heap_vmap,
270         .vunmap = cma_heap_vunmap,
271         .release = cma_heap_dma_buf_release,
272 };
273
274 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
275                                          unsigned long len,
276                                          unsigned long fd_flags,
277                                          unsigned long heap_flags)
278 {
279         struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
280         struct cma_heap_buffer *buffer;
281         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
282         size_t size = PAGE_ALIGN(len);
283         pgoff_t pagecount = size >> PAGE_SHIFT;
284         unsigned long align = get_order(size);
285         struct page *cma_pages;
286         struct dma_buf *dmabuf;
287         int ret = -ENOMEM;
288         pgoff_t pg;
289
290         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
291         if (!buffer)
292                 return ERR_PTR(-ENOMEM);
293
294         INIT_LIST_HEAD(&buffer->attachments);
295         mutex_init(&buffer->lock);
296         buffer->len = size;
297
298         if (align > CONFIG_CMA_ALIGNMENT)
299                 align = CONFIG_CMA_ALIGNMENT;
300
301         cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
302         if (!cma_pages)
303                 goto free_buffer;
304
305         /* Clear the cma pages */
306         if (PageHighMem(cma_pages)) {
307                 unsigned long nr_clear_pages = pagecount;
308                 struct page *page = cma_pages;
309
310                 while (nr_clear_pages > 0) {
311                         void *vaddr = kmap_atomic(page);
312
313                         memset(vaddr, 0, PAGE_SIZE);
314                         kunmap_atomic(vaddr);
315                         /*
316                          * Avoid wasting time zeroing memory if the process
317                          * has been killed by by SIGKILL
318                          */
319                         if (fatal_signal_pending(current))
320                                 goto free_cma;
321                         page++;
322                         nr_clear_pages--;
323                 }
324         } else {
325                 memset(page_address(cma_pages), 0, size);
326         }
327
328         buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
329         if (!buffer->pages) {
330                 ret = -ENOMEM;
331                 goto free_cma;
332         }
333
334         for (pg = 0; pg < pagecount; pg++)
335                 buffer->pages[pg] = &cma_pages[pg];
336
337         buffer->cma_pages = cma_pages;
338         buffer->heap = cma_heap;
339         buffer->pagecount = pagecount;
340
341         /* create the dmabuf */
342         exp_info.exp_name = dma_heap_get_name(heap);
343         exp_info.ops = &cma_heap_buf_ops;
344         exp_info.size = buffer->len;
345         exp_info.flags = fd_flags;
346         exp_info.priv = buffer;
347         dmabuf = dma_buf_export(&exp_info);
348         if (IS_ERR(dmabuf)) {
349                 ret = PTR_ERR(dmabuf);
350                 goto free_pages;
351         }
352         return dmabuf;
353
354 free_pages:
355         kfree(buffer->pages);
356 free_cma:
357         cma_release(cma_heap->cma, cma_pages, pagecount);
358 free_buffer:
359         kfree(buffer);
360
361         return ERR_PTR(ret);
362 }
363
364 static const struct dma_heap_ops cma_heap_ops = {
365         .allocate = cma_heap_allocate,
366 };
367
368 static int __add_cma_heap(struct cma *cma, void *data)
369 {
370         struct cma_heap *cma_heap;
371         struct dma_heap_export_info exp_info;
372
373         cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
374         if (!cma_heap)
375                 return -ENOMEM;
376         cma_heap->cma = cma;
377
378         exp_info.name = cma_get_name(cma);
379         exp_info.ops = &cma_heap_ops;
380         exp_info.priv = cma_heap;
381
382         cma_heap->heap = dma_heap_add(&exp_info);
383         if (IS_ERR(cma_heap->heap)) {
384                 int ret = PTR_ERR(cma_heap->heap);
385
386                 kfree(cma_heap);
387                 return ret;
388         }
389
390         return 0;
391 }
392
393 static int add_default_cma_heap(void)
394 {
395         struct cma *default_cma = dev_get_cma_area(NULL);
396         int ret = 0;
397
398         if (default_cma)
399                 ret = __add_cma_heap(default_cma, NULL);
400
401         return ret;
402 }
403 module_init(add_default_cma_heap);
404 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
405 MODULE_LICENSE("GPL v2");