Merge tag 'for-linus-20180623' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / staging / android / ion / ion_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion_heap.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <linux/err.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/mm.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
17 #include "ion.h"
18
19 void *ion_heap_map_kernel(struct ion_heap *heap,
20                           struct ion_buffer *buffer)
21 {
22         struct scatterlist *sg;
23         int i, j;
24         void *vaddr;
25         pgprot_t pgprot;
26         struct sg_table *table = buffer->sg_table;
27         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
28         struct page **pages = vmalloc(array_size(npages,
29                                                  sizeof(struct page *)));
30         struct page **tmp = pages;
31
32         if (!pages)
33                 return NULL;
34
35         if (buffer->flags & ION_FLAG_CACHED)
36                 pgprot = PAGE_KERNEL;
37         else
38                 pgprot = pgprot_writecombine(PAGE_KERNEL);
39
40         for_each_sg(table->sgl, sg, table->nents, i) {
41                 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
42                 struct page *page = sg_page(sg);
43
44                 BUG_ON(i >= npages);
45                 for (j = 0; j < npages_this_entry; j++)
46                         *(tmp++) = page++;
47         }
48         vaddr = vmap(pages, npages, VM_MAP, pgprot);
49         vfree(pages);
50
51         if (!vaddr)
52                 return ERR_PTR(-ENOMEM);
53
54         return vaddr;
55 }
56
57 void ion_heap_unmap_kernel(struct ion_heap *heap,
58                            struct ion_buffer *buffer)
59 {
60         vunmap(buffer->vaddr);
61 }
62
63 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
64                       struct vm_area_struct *vma)
65 {
66         struct sg_table *table = buffer->sg_table;
67         unsigned long addr = vma->vm_start;
68         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
69         struct scatterlist *sg;
70         int i;
71         int ret;
72
73         for_each_sg(table->sgl, sg, table->nents, i) {
74                 struct page *page = sg_page(sg);
75                 unsigned long remainder = vma->vm_end - addr;
76                 unsigned long len = sg->length;
77
78                 if (offset >= sg->length) {
79                         offset -= sg->length;
80                         continue;
81                 } else if (offset) {
82                         page += offset / PAGE_SIZE;
83                         len = sg->length - offset;
84                         offset = 0;
85                 }
86                 len = min(len, remainder);
87                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
88                                       vma->vm_page_prot);
89                 if (ret)
90                         return ret;
91                 addr += len;
92                 if (addr >= vma->vm_end)
93                         return 0;
94         }
95         return 0;
96 }
97
98 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
99 {
100         void *addr = vm_map_ram(pages, num, -1, pgprot);
101
102         if (!addr)
103                 return -ENOMEM;
104         memset(addr, 0, PAGE_SIZE * num);
105         vm_unmap_ram(addr, num);
106
107         return 0;
108 }
109
110 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
111                                 pgprot_t pgprot)
112 {
113         int p = 0;
114         int ret = 0;
115         struct sg_page_iter piter;
116         struct page *pages[32];
117
118         for_each_sg_page(sgl, &piter, nents, 0) {
119                 pages[p++] = sg_page_iter_page(&piter);
120                 if (p == ARRAY_SIZE(pages)) {
121                         ret = ion_heap_clear_pages(pages, p, pgprot);
122                         if (ret)
123                                 return ret;
124                         p = 0;
125                 }
126         }
127         if (p)
128                 ret = ion_heap_clear_pages(pages, p, pgprot);
129
130         return ret;
131 }
132
133 int ion_heap_buffer_zero(struct ion_buffer *buffer)
134 {
135         struct sg_table *table = buffer->sg_table;
136         pgprot_t pgprot;
137
138         if (buffer->flags & ION_FLAG_CACHED)
139                 pgprot = PAGE_KERNEL;
140         else
141                 pgprot = pgprot_writecombine(PAGE_KERNEL);
142
143         return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
144 }
145
146 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
147 {
148         struct scatterlist sg;
149
150         sg_init_table(&sg, 1);
151         sg_set_page(&sg, page, size, 0);
152         return ion_heap_sglist_zero(&sg, 1, pgprot);
153 }
154
155 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
156 {
157         spin_lock(&heap->free_lock);
158         list_add(&buffer->list, &heap->free_list);
159         heap->free_list_size += buffer->size;
160         spin_unlock(&heap->free_lock);
161         wake_up(&heap->waitqueue);
162 }
163
164 size_t ion_heap_freelist_size(struct ion_heap *heap)
165 {
166         size_t size;
167
168         spin_lock(&heap->free_lock);
169         size = heap->free_list_size;
170         spin_unlock(&heap->free_lock);
171
172         return size;
173 }
174
175 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
176                                        bool skip_pools)
177 {
178         struct ion_buffer *buffer;
179         size_t total_drained = 0;
180
181         if (ion_heap_freelist_size(heap) == 0)
182                 return 0;
183
184         spin_lock(&heap->free_lock);
185         if (size == 0)
186                 size = heap->free_list_size;
187
188         while (!list_empty(&heap->free_list)) {
189                 if (total_drained >= size)
190                         break;
191                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
192                                           list);
193                 list_del(&buffer->list);
194                 heap->free_list_size -= buffer->size;
195                 if (skip_pools)
196                         buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
197                 total_drained += buffer->size;
198                 spin_unlock(&heap->free_lock);
199                 ion_buffer_destroy(buffer);
200                 spin_lock(&heap->free_lock);
201         }
202         spin_unlock(&heap->free_lock);
203
204         return total_drained;
205 }
206
207 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
208 {
209         return _ion_heap_freelist_drain(heap, size, false);
210 }
211
212 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
213 {
214         return _ion_heap_freelist_drain(heap, size, true);
215 }
216
217 static int ion_heap_deferred_free(void *data)
218 {
219         struct ion_heap *heap = data;
220
221         while (true) {
222                 struct ion_buffer *buffer;
223
224                 wait_event_freezable(heap->waitqueue,
225                                      ion_heap_freelist_size(heap) > 0);
226
227                 spin_lock(&heap->free_lock);
228                 if (list_empty(&heap->free_list)) {
229                         spin_unlock(&heap->free_lock);
230                         continue;
231                 }
232                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
233                                           list);
234                 list_del(&buffer->list);
235                 heap->free_list_size -= buffer->size;
236                 spin_unlock(&heap->free_lock);
237                 ion_buffer_destroy(buffer);
238         }
239
240         return 0;
241 }
242
243 int ion_heap_init_deferred_free(struct ion_heap *heap)
244 {
245         struct sched_param param = { .sched_priority = 0 };
246
247         INIT_LIST_HEAD(&heap->free_list);
248         init_waitqueue_head(&heap->waitqueue);
249         heap->task = kthread_run(ion_heap_deferred_free, heap,
250                                  "%s", heap->name);
251         if (IS_ERR(heap->task)) {
252                 pr_err("%s: creating thread for deferred free failed\n",
253                        __func__);
254                 return PTR_ERR_OR_ZERO(heap->task);
255         }
256         sched_setscheduler(heap->task, SCHED_IDLE, &param);
257         return 0;
258 }
259
260 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
261                                            struct shrink_control *sc)
262 {
263         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
264                                              shrinker);
265         int total = 0;
266
267         total = ion_heap_freelist_size(heap) / PAGE_SIZE;
268         if (heap->ops->shrink)
269                 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
270         return total;
271 }
272
273 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
274                                           struct shrink_control *sc)
275 {
276         struct ion_heap *heap = container_of(shrinker, struct ion_heap,
277                                              shrinker);
278         int freed = 0;
279         int to_scan = sc->nr_to_scan;
280
281         if (to_scan == 0)
282                 return 0;
283
284         /*
285          * shrink the free list first, no point in zeroing the memory if we're
286          * just going to reclaim it. Also, skip any possible page pooling.
287          */
288         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
289                 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
290                                 PAGE_SIZE;
291
292         to_scan -= freed;
293         if (to_scan <= 0)
294                 return freed;
295
296         if (heap->ops->shrink)
297                 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
298         return freed;
299 }
300
301 int ion_heap_init_shrinker(struct ion_heap *heap)
302 {
303         heap->shrinker.count_objects = ion_heap_shrink_count;
304         heap->shrinker.scan_objects = ion_heap_shrink_scan;
305         heap->shrinker.seeks = DEFAULT_SEEKS;
306         heap->shrinker.batch = 0;
307
308         return register_shrinker(&heap->shrinker);
309 }