Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[sfrench/cifs-2.6.git] / drivers / staging / android / ion / ion.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/anon_inodes.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/file.h>
15 #include <linux/freezer.h>
16 #include <linux/fs.h>
17 #include <linux/idr.h>
18 #include <linux/kthread.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/miscdevice.h>
22 #include <linux/mm.h>
23 #include <linux/mm_types.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29
30 #include "ion.h"
31
32 static struct ion_device *internal_dev;
33 static int heap_id;
34
35 /* this function should only be called while dev->lock is held */
36 static void ion_buffer_add(struct ion_device *dev,
37                            struct ion_buffer *buffer)
38 {
39         struct rb_node **p = &dev->buffers.rb_node;
40         struct rb_node *parent = NULL;
41         struct ion_buffer *entry;
42
43         while (*p) {
44                 parent = *p;
45                 entry = rb_entry(parent, struct ion_buffer, node);
46
47                 if (buffer < entry) {
48                         p = &(*p)->rb_left;
49                 } else if (buffer > entry) {
50                         p = &(*p)->rb_right;
51                 } else {
52                         pr_err("%s: buffer already found.", __func__);
53                         BUG();
54                 }
55         }
56
57         rb_link_node(&buffer->node, parent, p);
58         rb_insert_color(&buffer->node, &dev->buffers);
59 }
60
61 /* this function should only be called while dev->lock is held */
62 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
63                                             struct ion_device *dev,
64                                             unsigned long len,
65                                             unsigned long flags)
66 {
67         struct ion_buffer *buffer;
68         int ret;
69
70         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
71         if (!buffer)
72                 return ERR_PTR(-ENOMEM);
73
74         buffer->heap = heap;
75         buffer->flags = flags;
76         buffer->dev = dev;
77         buffer->size = len;
78
79         ret = heap->ops->allocate(heap, buffer, len, flags);
80
81         if (ret) {
82                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
83                         goto err2;
84
85                 ion_heap_freelist_drain(heap, 0);
86                 ret = heap->ops->allocate(heap, buffer, len, flags);
87                 if (ret)
88                         goto err2;
89         }
90
91         if (!buffer->sg_table) {
92                 WARN_ONCE(1, "This heap needs to set the sgtable");
93                 ret = -EINVAL;
94                 goto err1;
95         }
96
97         spin_lock(&heap->stat_lock);
98         heap->num_of_buffers++;
99         heap->num_of_alloc_bytes += len;
100         if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
101                 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
102         spin_unlock(&heap->stat_lock);
103
104         INIT_LIST_HEAD(&buffer->attachments);
105         mutex_init(&buffer->lock);
106         mutex_lock(&dev->buffer_lock);
107         ion_buffer_add(dev, buffer);
108         mutex_unlock(&dev->buffer_lock);
109         return buffer;
110
111 err1:
112         heap->ops->free(buffer);
113 err2:
114         kfree(buffer);
115         return ERR_PTR(ret);
116 }
117
118 void ion_buffer_destroy(struct ion_buffer *buffer)
119 {
120         if (buffer->kmap_cnt > 0) {
121                 pr_warn_once("%s: buffer still mapped in the kernel\n",
122                              __func__);
123                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
124         }
125         buffer->heap->ops->free(buffer);
126         spin_lock(&buffer->heap->stat_lock);
127         buffer->heap->num_of_buffers--;
128         buffer->heap->num_of_alloc_bytes -= buffer->size;
129         spin_unlock(&buffer->heap->stat_lock);
130
131         kfree(buffer);
132 }
133
134 static void _ion_buffer_destroy(struct ion_buffer *buffer)
135 {
136         struct ion_heap *heap = buffer->heap;
137         struct ion_device *dev = buffer->dev;
138
139         mutex_lock(&dev->buffer_lock);
140         rb_erase(&buffer->node, &dev->buffers);
141         mutex_unlock(&dev->buffer_lock);
142
143         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
144                 ion_heap_freelist_add(heap, buffer);
145         else
146                 ion_buffer_destroy(buffer);
147 }
148
149 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
150 {
151         void *vaddr;
152
153         if (buffer->kmap_cnt) {
154                 buffer->kmap_cnt++;
155                 return buffer->vaddr;
156         }
157         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
158         if (WARN_ONCE(!vaddr,
159                       "heap->ops->map_kernel should return ERR_PTR on error"))
160                 return ERR_PTR(-EINVAL);
161         if (IS_ERR(vaddr))
162                 return vaddr;
163         buffer->vaddr = vaddr;
164         buffer->kmap_cnt++;
165         return vaddr;
166 }
167
168 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
169 {
170         buffer->kmap_cnt--;
171         if (!buffer->kmap_cnt) {
172                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
173                 buffer->vaddr = NULL;
174         }
175 }
176
177 static struct sg_table *dup_sg_table(struct sg_table *table)
178 {
179         struct sg_table *new_table;
180         int ret, i;
181         struct scatterlist *sg, *new_sg;
182
183         new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
184         if (!new_table)
185                 return ERR_PTR(-ENOMEM);
186
187         ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
188         if (ret) {
189                 kfree(new_table);
190                 return ERR_PTR(-ENOMEM);
191         }
192
193         new_sg = new_table->sgl;
194         for_each_sg(table->sgl, sg, table->nents, i) {
195                 memcpy(new_sg, sg, sizeof(*sg));
196                 new_sg->dma_address = 0;
197                 new_sg = sg_next(new_sg);
198         }
199
200         return new_table;
201 }
202
203 static void free_duped_table(struct sg_table *table)
204 {
205         sg_free_table(table);
206         kfree(table);
207 }
208
209 struct ion_dma_buf_attachment {
210         struct device *dev;
211         struct sg_table *table;
212         struct list_head list;
213 };
214
215 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
216                               struct dma_buf_attachment *attachment)
217 {
218         struct ion_dma_buf_attachment *a;
219         struct sg_table *table;
220         struct ion_buffer *buffer = dmabuf->priv;
221
222         a = kzalloc(sizeof(*a), GFP_KERNEL);
223         if (!a)
224                 return -ENOMEM;
225
226         table = dup_sg_table(buffer->sg_table);
227         if (IS_ERR(table)) {
228                 kfree(a);
229                 return -ENOMEM;
230         }
231
232         a->table = table;
233         a->dev = attachment->dev;
234         INIT_LIST_HEAD(&a->list);
235
236         attachment->priv = a;
237
238         mutex_lock(&buffer->lock);
239         list_add(&a->list, &buffer->attachments);
240         mutex_unlock(&buffer->lock);
241
242         return 0;
243 }
244
245 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
246                                 struct dma_buf_attachment *attachment)
247 {
248         struct ion_dma_buf_attachment *a = attachment->priv;
249         struct ion_buffer *buffer = dmabuf->priv;
250
251         mutex_lock(&buffer->lock);
252         list_del(&a->list);
253         mutex_unlock(&buffer->lock);
254         free_duped_table(a->table);
255
256         kfree(a);
257 }
258
259 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
260                                         enum dma_data_direction direction)
261 {
262         struct ion_dma_buf_attachment *a = attachment->priv;
263         struct sg_table *table;
264
265         table = a->table;
266
267         if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
268                         direction))
269                 return ERR_PTR(-ENOMEM);
270
271         return table;
272 }
273
274 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
275                               struct sg_table *table,
276                               enum dma_data_direction direction)
277 {
278         dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
279 }
280
281 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
282 {
283         struct ion_buffer *buffer = dmabuf->priv;
284         int ret = 0;
285
286         if (!buffer->heap->ops->map_user) {
287                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
288                        __func__);
289                 return -EINVAL;
290         }
291
292         if (!(buffer->flags & ION_FLAG_CACHED))
293                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
294
295         mutex_lock(&buffer->lock);
296         /* now map it to userspace */
297         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
298         mutex_unlock(&buffer->lock);
299
300         if (ret)
301                 pr_err("%s: failure mapping buffer to userspace\n",
302                        __func__);
303
304         return ret;
305 }
306
307 static void ion_dma_buf_release(struct dma_buf *dmabuf)
308 {
309         struct ion_buffer *buffer = dmabuf->priv;
310
311         _ion_buffer_destroy(buffer);
312 }
313
314 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
315 {
316         struct ion_buffer *buffer = dmabuf->priv;
317
318         return buffer->vaddr + offset * PAGE_SIZE;
319 }
320
321 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
322                                void *ptr)
323 {
324 }
325
326 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
327                                         enum dma_data_direction direction)
328 {
329         struct ion_buffer *buffer = dmabuf->priv;
330         void *vaddr;
331         struct ion_dma_buf_attachment *a;
332         int ret = 0;
333
334         /*
335          * TODO: Move this elsewhere because we don't always need a vaddr
336          */
337         if (buffer->heap->ops->map_kernel) {
338                 mutex_lock(&buffer->lock);
339                 vaddr = ion_buffer_kmap_get(buffer);
340                 if (IS_ERR(vaddr)) {
341                         ret = PTR_ERR(vaddr);
342                         goto unlock;
343                 }
344                 mutex_unlock(&buffer->lock);
345         }
346
347         mutex_lock(&buffer->lock);
348         list_for_each_entry(a, &buffer->attachments, list) {
349                 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
350                                     direction);
351         }
352
353 unlock:
354         mutex_unlock(&buffer->lock);
355         return ret;
356 }
357
358 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
359                                       enum dma_data_direction direction)
360 {
361         struct ion_buffer *buffer = dmabuf->priv;
362         struct ion_dma_buf_attachment *a;
363
364         if (buffer->heap->ops->map_kernel) {
365                 mutex_lock(&buffer->lock);
366                 ion_buffer_kmap_put(buffer);
367                 mutex_unlock(&buffer->lock);
368         }
369
370         mutex_lock(&buffer->lock);
371         list_for_each_entry(a, &buffer->attachments, list) {
372                 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
373                                        direction);
374         }
375         mutex_unlock(&buffer->lock);
376
377         return 0;
378 }
379
380 static const struct dma_buf_ops dma_buf_ops = {
381         .map_dma_buf = ion_map_dma_buf,
382         .unmap_dma_buf = ion_unmap_dma_buf,
383         .mmap = ion_mmap,
384         .release = ion_dma_buf_release,
385         .attach = ion_dma_buf_attach,
386         .detach = ion_dma_buf_detatch,
387         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
388         .end_cpu_access = ion_dma_buf_end_cpu_access,
389         .map = ion_dma_buf_kmap,
390         .unmap = ion_dma_buf_kunmap,
391 };
392
393 int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
394 {
395         struct ion_device *dev = internal_dev;
396         struct ion_buffer *buffer = NULL;
397         struct ion_heap *heap;
398         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
399         int fd;
400         struct dma_buf *dmabuf;
401
402         pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
403                  len, heap_id_mask, flags);
404         /*
405          * traverse the list of heaps available in this system in priority
406          * order.  If the heap type is supported by the client, and matches the
407          * request of the caller allocate from it.  Repeat until allocate has
408          * succeeded or all heaps have been tried
409          */
410         len = PAGE_ALIGN(len);
411
412         if (!len)
413                 return -EINVAL;
414
415         down_read(&dev->lock);
416         plist_for_each_entry(heap, &dev->heaps, node) {
417                 /* if the caller didn't specify this heap id */
418                 if (!((1 << heap->id) & heap_id_mask))
419                         continue;
420                 buffer = ion_buffer_create(heap, dev, len, flags);
421                 if (!IS_ERR(buffer))
422                         break;
423         }
424         up_read(&dev->lock);
425
426         if (!buffer)
427                 return -ENODEV;
428
429         if (IS_ERR(buffer))
430                 return PTR_ERR(buffer);
431
432         exp_info.ops = &dma_buf_ops;
433         exp_info.size = buffer->size;
434         exp_info.flags = O_RDWR;
435         exp_info.priv = buffer;
436
437         dmabuf = dma_buf_export(&exp_info);
438         if (IS_ERR(dmabuf)) {
439                 _ion_buffer_destroy(buffer);
440                 return PTR_ERR(dmabuf);
441         }
442
443         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
444         if (fd < 0)
445                 dma_buf_put(dmabuf);
446
447         return fd;
448 }
449
450 int ion_query_heaps(struct ion_heap_query *query)
451 {
452         struct ion_device *dev = internal_dev;
453         struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
454         int ret = -EINVAL, cnt = 0, max_cnt;
455         struct ion_heap *heap;
456         struct ion_heap_data hdata;
457
458         memset(&hdata, 0, sizeof(hdata));
459
460         down_read(&dev->lock);
461         if (!buffer) {
462                 query->cnt = dev->heap_cnt;
463                 ret = 0;
464                 goto out;
465         }
466
467         if (query->cnt <= 0)
468                 goto out;
469
470         max_cnt = query->cnt;
471
472         plist_for_each_entry(heap, &dev->heaps, node) {
473                 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
474                 hdata.name[sizeof(hdata.name) - 1] = '\0';
475                 hdata.type = heap->type;
476                 hdata.heap_id = heap->id;
477
478                 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
479                         ret = -EFAULT;
480                         goto out;
481                 }
482
483                 cnt++;
484                 if (cnt >= max_cnt)
485                         break;
486         }
487
488         query->cnt = cnt;
489         ret = 0;
490 out:
491         up_read(&dev->lock);
492         return ret;
493 }
494
495 static const struct file_operations ion_fops = {
496         .owner          = THIS_MODULE,
497         .unlocked_ioctl = ion_ioctl,
498 #ifdef CONFIG_COMPAT
499         .compat_ioctl   = ion_ioctl,
500 #endif
501 };
502
503 static int debug_shrink_set(void *data, u64 val)
504 {
505         struct ion_heap *heap = data;
506         struct shrink_control sc;
507         int objs;
508
509         sc.gfp_mask = GFP_HIGHUSER;
510         sc.nr_to_scan = val;
511
512         if (!val) {
513                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
514                 sc.nr_to_scan = objs;
515         }
516
517         heap->shrinker.scan_objects(&heap->shrinker, &sc);
518         return 0;
519 }
520
521 static int debug_shrink_get(void *data, u64 *val)
522 {
523         struct ion_heap *heap = data;
524         struct shrink_control sc;
525         int objs;
526
527         sc.gfp_mask = GFP_HIGHUSER;
528         sc.nr_to_scan = 0;
529
530         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
531         *val = objs;
532         return 0;
533 }
534
535 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
536                         debug_shrink_set, "%llu\n");
537
538 void ion_device_add_heap(struct ion_heap *heap)
539 {
540         struct ion_device *dev = internal_dev;
541         int ret;
542         struct dentry *heap_root;
543         char debug_name[64];
544
545         if (!heap->ops->allocate || !heap->ops->free)
546                 pr_err("%s: can not add heap with invalid ops struct.\n",
547                        __func__);
548
549         spin_lock_init(&heap->free_lock);
550         spin_lock_init(&heap->stat_lock);
551         heap->free_list_size = 0;
552
553         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
554                 ion_heap_init_deferred_free(heap);
555
556         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
557                 ret = ion_heap_init_shrinker(heap);
558                 if (ret)
559                         pr_err("%s: Failed to register shrinker\n", __func__);
560         }
561
562         heap->dev = dev;
563         heap->num_of_buffers = 0;
564         heap->num_of_alloc_bytes = 0;
565         heap->alloc_bytes_wm = 0;
566
567         heap_root = debugfs_create_dir(heap->name, dev->debug_root);
568         debugfs_create_u64("num_of_buffers",
569                            0444, heap_root,
570                            &heap->num_of_buffers);
571         debugfs_create_u64("num_of_alloc_bytes",
572                            0444,
573                            heap_root,
574                            &heap->num_of_alloc_bytes);
575         debugfs_create_u64("alloc_bytes_wm",
576                            0444,
577                            heap_root,
578                            &heap->alloc_bytes_wm);
579
580         if (heap->shrinker.count_objects &&
581             heap->shrinker.scan_objects) {
582                 snprintf(debug_name, 64, "%s_shrink", heap->name);
583                 debugfs_create_file(debug_name,
584                                     0644,
585                                     heap_root,
586                                     heap,
587                                     &debug_shrink_fops);
588         }
589
590         down_write(&dev->lock);
591         heap->id = heap_id++;
592         /*
593          * use negative heap->id to reverse the priority -- when traversing
594          * the list later attempt higher id numbers first
595          */
596         plist_node_init(&heap->node, -heap->id);
597         plist_add(&heap->node, &dev->heaps);
598
599         dev->heap_cnt++;
600         up_write(&dev->lock);
601 }
602 EXPORT_SYMBOL(ion_device_add_heap);
603
604 static int ion_device_create(void)
605 {
606         struct ion_device *idev;
607         int ret;
608
609         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
610         if (!idev)
611                 return -ENOMEM;
612
613         idev->dev.minor = MISC_DYNAMIC_MINOR;
614         idev->dev.name = "ion";
615         idev->dev.fops = &ion_fops;
616         idev->dev.parent = NULL;
617         ret = misc_register(&idev->dev);
618         if (ret) {
619                 pr_err("ion: failed to register misc device.\n");
620                 kfree(idev);
621                 return ret;
622         }
623
624         idev->debug_root = debugfs_create_dir("ion", NULL);
625         idev->buffers = RB_ROOT;
626         mutex_init(&idev->buffer_lock);
627         init_rwsem(&idev->lock);
628         plist_head_init(&idev->heaps);
629         internal_dev = idev;
630         return 0;
631 }
632 subsys_initcall(ion_device_create);