1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
8 #include <linux/anon_inodes.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/file.h>
15 #include <linux/freezer.h>
17 #include <linux/idr.h>
18 #include <linux/kthread.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/miscdevice.h>
23 #include <linux/mm_types.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
32 static struct ion_device *internal_dev;
35 /* this function should only be called while dev->lock is held */
36 static void ion_buffer_add(struct ion_device *dev,
37 struct ion_buffer *buffer)
39 struct rb_node **p = &dev->buffers.rb_node;
40 struct rb_node *parent = NULL;
41 struct ion_buffer *entry;
45 entry = rb_entry(parent, struct ion_buffer, node);
49 } else if (buffer > entry) {
52 pr_err("%s: buffer already found.", __func__);
57 rb_link_node(&buffer->node, parent, p);
58 rb_insert_color(&buffer->node, &dev->buffers);
61 /* this function should only be called while dev->lock is held */
62 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
63 struct ion_device *dev,
67 struct ion_buffer *buffer;
70 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72 return ERR_PTR(-ENOMEM);
75 buffer->flags = flags;
79 ret = heap->ops->allocate(heap, buffer, len, flags);
82 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
85 ion_heap_freelist_drain(heap, 0);
86 ret = heap->ops->allocate(heap, buffer, len, flags);
91 if (!buffer->sg_table) {
92 WARN_ONCE(1, "This heap needs to set the sgtable");
97 spin_lock(&heap->stat_lock);
98 heap->num_of_buffers++;
99 heap->num_of_alloc_bytes += len;
100 if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
101 heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
102 spin_unlock(&heap->stat_lock);
104 INIT_LIST_HEAD(&buffer->attachments);
105 mutex_init(&buffer->lock);
106 mutex_lock(&dev->buffer_lock);
107 ion_buffer_add(dev, buffer);
108 mutex_unlock(&dev->buffer_lock);
112 heap->ops->free(buffer);
118 void ion_buffer_destroy(struct ion_buffer *buffer)
120 if (buffer->kmap_cnt > 0) {
121 pr_warn_once("%s: buffer still mapped in the kernel\n",
123 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
125 buffer->heap->ops->free(buffer);
126 spin_lock(&buffer->heap->stat_lock);
127 buffer->heap->num_of_buffers--;
128 buffer->heap->num_of_alloc_bytes -= buffer->size;
129 spin_unlock(&buffer->heap->stat_lock);
134 static void _ion_buffer_destroy(struct ion_buffer *buffer)
136 struct ion_heap *heap = buffer->heap;
137 struct ion_device *dev = buffer->dev;
139 mutex_lock(&dev->buffer_lock);
140 rb_erase(&buffer->node, &dev->buffers);
141 mutex_unlock(&dev->buffer_lock);
143 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
144 ion_heap_freelist_add(heap, buffer);
146 ion_buffer_destroy(buffer);
149 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
153 if (buffer->kmap_cnt) {
155 return buffer->vaddr;
157 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
158 if (WARN_ONCE(!vaddr,
159 "heap->ops->map_kernel should return ERR_PTR on error"))
160 return ERR_PTR(-EINVAL);
163 buffer->vaddr = vaddr;
168 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
171 if (!buffer->kmap_cnt) {
172 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
173 buffer->vaddr = NULL;
177 static struct sg_table *dup_sg_table(struct sg_table *table)
179 struct sg_table *new_table;
181 struct scatterlist *sg, *new_sg;
183 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
185 return ERR_PTR(-ENOMEM);
187 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
190 return ERR_PTR(-ENOMEM);
193 new_sg = new_table->sgl;
194 for_each_sg(table->sgl, sg, table->nents, i) {
195 memcpy(new_sg, sg, sizeof(*sg));
196 new_sg->dma_address = 0;
197 new_sg = sg_next(new_sg);
203 static void free_duped_table(struct sg_table *table)
205 sg_free_table(table);
209 struct ion_dma_buf_attachment {
211 struct sg_table *table;
212 struct list_head list;
215 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
216 struct dma_buf_attachment *attachment)
218 struct ion_dma_buf_attachment *a;
219 struct sg_table *table;
220 struct ion_buffer *buffer = dmabuf->priv;
222 a = kzalloc(sizeof(*a), GFP_KERNEL);
226 table = dup_sg_table(buffer->sg_table);
233 a->dev = attachment->dev;
234 INIT_LIST_HEAD(&a->list);
236 attachment->priv = a;
238 mutex_lock(&buffer->lock);
239 list_add(&a->list, &buffer->attachments);
240 mutex_unlock(&buffer->lock);
245 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
246 struct dma_buf_attachment *attachment)
248 struct ion_dma_buf_attachment *a = attachment->priv;
249 struct ion_buffer *buffer = dmabuf->priv;
251 mutex_lock(&buffer->lock);
253 mutex_unlock(&buffer->lock);
254 free_duped_table(a->table);
259 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
260 enum dma_data_direction direction)
262 struct ion_dma_buf_attachment *a = attachment->priv;
263 struct sg_table *table;
267 if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
269 return ERR_PTR(-ENOMEM);
274 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
275 struct sg_table *table,
276 enum dma_data_direction direction)
278 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
281 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
283 struct ion_buffer *buffer = dmabuf->priv;
286 if (!buffer->heap->ops->map_user) {
287 pr_err("%s: this heap does not define a method for mapping to userspace\n",
292 if (!(buffer->flags & ION_FLAG_CACHED))
293 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
295 mutex_lock(&buffer->lock);
296 /* now map it to userspace */
297 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
298 mutex_unlock(&buffer->lock);
301 pr_err("%s: failure mapping buffer to userspace\n",
307 static void ion_dma_buf_release(struct dma_buf *dmabuf)
309 struct ion_buffer *buffer = dmabuf->priv;
311 _ion_buffer_destroy(buffer);
314 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
316 struct ion_buffer *buffer = dmabuf->priv;
318 return buffer->vaddr + offset * PAGE_SIZE;
321 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
326 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
327 enum dma_data_direction direction)
329 struct ion_buffer *buffer = dmabuf->priv;
331 struct ion_dma_buf_attachment *a;
335 * TODO: Move this elsewhere because we don't always need a vaddr
337 if (buffer->heap->ops->map_kernel) {
338 mutex_lock(&buffer->lock);
339 vaddr = ion_buffer_kmap_get(buffer);
341 ret = PTR_ERR(vaddr);
344 mutex_unlock(&buffer->lock);
347 mutex_lock(&buffer->lock);
348 list_for_each_entry(a, &buffer->attachments, list) {
349 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
354 mutex_unlock(&buffer->lock);
358 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
359 enum dma_data_direction direction)
361 struct ion_buffer *buffer = dmabuf->priv;
362 struct ion_dma_buf_attachment *a;
364 if (buffer->heap->ops->map_kernel) {
365 mutex_lock(&buffer->lock);
366 ion_buffer_kmap_put(buffer);
367 mutex_unlock(&buffer->lock);
370 mutex_lock(&buffer->lock);
371 list_for_each_entry(a, &buffer->attachments, list) {
372 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
375 mutex_unlock(&buffer->lock);
380 static const struct dma_buf_ops dma_buf_ops = {
381 .map_dma_buf = ion_map_dma_buf,
382 .unmap_dma_buf = ion_unmap_dma_buf,
384 .release = ion_dma_buf_release,
385 .attach = ion_dma_buf_attach,
386 .detach = ion_dma_buf_detatch,
387 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
388 .end_cpu_access = ion_dma_buf_end_cpu_access,
389 .map = ion_dma_buf_kmap,
390 .unmap = ion_dma_buf_kunmap,
393 int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
395 struct ion_device *dev = internal_dev;
396 struct ion_buffer *buffer = NULL;
397 struct ion_heap *heap;
398 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
400 struct dma_buf *dmabuf;
402 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
403 len, heap_id_mask, flags);
405 * traverse the list of heaps available in this system in priority
406 * order. If the heap type is supported by the client, and matches the
407 * request of the caller allocate from it. Repeat until allocate has
408 * succeeded or all heaps have been tried
410 len = PAGE_ALIGN(len);
415 down_read(&dev->lock);
416 plist_for_each_entry(heap, &dev->heaps, node) {
417 /* if the caller didn't specify this heap id */
418 if (!((1 << heap->id) & heap_id_mask))
420 buffer = ion_buffer_create(heap, dev, len, flags);
430 return PTR_ERR(buffer);
432 exp_info.ops = &dma_buf_ops;
433 exp_info.size = buffer->size;
434 exp_info.flags = O_RDWR;
435 exp_info.priv = buffer;
437 dmabuf = dma_buf_export(&exp_info);
438 if (IS_ERR(dmabuf)) {
439 _ion_buffer_destroy(buffer);
440 return PTR_ERR(dmabuf);
443 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
450 int ion_query_heaps(struct ion_heap_query *query)
452 struct ion_device *dev = internal_dev;
453 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
454 int ret = -EINVAL, cnt = 0, max_cnt;
455 struct ion_heap *heap;
456 struct ion_heap_data hdata;
458 memset(&hdata, 0, sizeof(hdata));
460 down_read(&dev->lock);
462 query->cnt = dev->heap_cnt;
470 max_cnt = query->cnt;
472 plist_for_each_entry(heap, &dev->heaps, node) {
473 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
474 hdata.name[sizeof(hdata.name) - 1] = '\0';
475 hdata.type = heap->type;
476 hdata.heap_id = heap->id;
478 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
495 static const struct file_operations ion_fops = {
496 .owner = THIS_MODULE,
497 .unlocked_ioctl = ion_ioctl,
499 .compat_ioctl = ion_ioctl,
503 static int debug_shrink_set(void *data, u64 val)
505 struct ion_heap *heap = data;
506 struct shrink_control sc;
509 sc.gfp_mask = GFP_HIGHUSER;
513 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
514 sc.nr_to_scan = objs;
517 heap->shrinker.scan_objects(&heap->shrinker, &sc);
521 static int debug_shrink_get(void *data, u64 *val)
523 struct ion_heap *heap = data;
524 struct shrink_control sc;
527 sc.gfp_mask = GFP_HIGHUSER;
530 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
535 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
536 debug_shrink_set, "%llu\n");
538 void ion_device_add_heap(struct ion_heap *heap)
540 struct ion_device *dev = internal_dev;
542 struct dentry *heap_root;
545 if (!heap->ops->allocate || !heap->ops->free)
546 pr_err("%s: can not add heap with invalid ops struct.\n",
549 spin_lock_init(&heap->free_lock);
550 spin_lock_init(&heap->stat_lock);
551 heap->free_list_size = 0;
553 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
554 ion_heap_init_deferred_free(heap);
556 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
557 ret = ion_heap_init_shrinker(heap);
559 pr_err("%s: Failed to register shrinker\n", __func__);
563 heap->num_of_buffers = 0;
564 heap->num_of_alloc_bytes = 0;
565 heap->alloc_bytes_wm = 0;
567 heap_root = debugfs_create_dir(heap->name, dev->debug_root);
568 debugfs_create_u64("num_of_buffers",
570 &heap->num_of_buffers);
571 debugfs_create_u64("num_of_alloc_bytes",
574 &heap->num_of_alloc_bytes);
575 debugfs_create_u64("alloc_bytes_wm",
578 &heap->alloc_bytes_wm);
580 if (heap->shrinker.count_objects &&
581 heap->shrinker.scan_objects) {
582 snprintf(debug_name, 64, "%s_shrink", heap->name);
583 debugfs_create_file(debug_name,
590 down_write(&dev->lock);
591 heap->id = heap_id++;
593 * use negative heap->id to reverse the priority -- when traversing
594 * the list later attempt higher id numbers first
596 plist_node_init(&heap->node, -heap->id);
597 plist_add(&heap->node, &dev->heaps);
600 up_write(&dev->lock);
602 EXPORT_SYMBOL(ion_device_add_heap);
604 static int ion_device_create(void)
606 struct ion_device *idev;
609 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
613 idev->dev.minor = MISC_DYNAMIC_MINOR;
614 idev->dev.name = "ion";
615 idev->dev.fops = &ion_fops;
616 idev->dev.parent = NULL;
617 ret = misc_register(&idev->dev);
619 pr_err("ion: failed to register misc device.\n");
624 idev->debug_root = debugfs_create_dir("ion", NULL);
625 idev->buffers = RB_ROOT;
626 mutex_init(&idev->buffer_lock);
627 init_rwsem(&idev->lock);
628 plist_head_init(&idev->heaps);
632 subsys_initcall(ion_device_create);