Merge tag 'for-4.20/libata-20181021' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / android / binder_alloc.c
1 /* binder_alloc.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2017 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/list.h>
21 #include <linux/sched/mm.h>
22 #include <linux/module.h>
23 #include <linux/rtmutex.h>
24 #include <linux/rbtree.h>
25 #include <linux/seq_file.h>
26 #include <linux/vmalloc.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/list_lru.h>
30 #include <linux/ratelimit.h>
31 #include <asm/cacheflush.h>
32 #include "binder_alloc.h"
33 #include "binder_trace.h"
34
35 struct list_lru binder_alloc_lru;
36
37 static DEFINE_MUTEX(binder_alloc_mmap_lock);
38
39 enum {
40         BINDER_DEBUG_USER_ERROR             = 1U << 0,
41         BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
42         BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
43         BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
44 };
45 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
46
47 module_param_named(debug_mask, binder_alloc_debug_mask,
48                    uint, 0644);
49
50 #define binder_alloc_debug(mask, x...) \
51         do { \
52                 if (binder_alloc_debug_mask & mask) \
53                         pr_info_ratelimited(x); \
54         } while (0)
55
56 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
57 {
58         return list_entry(buffer->entry.next, struct binder_buffer, entry);
59 }
60
61 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
62 {
63         return list_entry(buffer->entry.prev, struct binder_buffer, entry);
64 }
65
66 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
67                                        struct binder_buffer *buffer)
68 {
69         if (list_is_last(&buffer->entry, &alloc->buffers))
70                 return (u8 *)alloc->buffer +
71                         alloc->buffer_size - (u8 *)buffer->data;
72         return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
73 }
74
75 static void binder_insert_free_buffer(struct binder_alloc *alloc,
76                                       struct binder_buffer *new_buffer)
77 {
78         struct rb_node **p = &alloc->free_buffers.rb_node;
79         struct rb_node *parent = NULL;
80         struct binder_buffer *buffer;
81         size_t buffer_size;
82         size_t new_buffer_size;
83
84         BUG_ON(!new_buffer->free);
85
86         new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
87
88         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
89                      "%d: add free buffer, size %zd, at %pK\n",
90                       alloc->pid, new_buffer_size, new_buffer);
91
92         while (*p) {
93                 parent = *p;
94                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
95                 BUG_ON(!buffer->free);
96
97                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
98
99                 if (new_buffer_size < buffer_size)
100                         p = &parent->rb_left;
101                 else
102                         p = &parent->rb_right;
103         }
104         rb_link_node(&new_buffer->rb_node, parent, p);
105         rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
106 }
107
108 static void binder_insert_allocated_buffer_locked(
109                 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
110 {
111         struct rb_node **p = &alloc->allocated_buffers.rb_node;
112         struct rb_node *parent = NULL;
113         struct binder_buffer *buffer;
114
115         BUG_ON(new_buffer->free);
116
117         while (*p) {
118                 parent = *p;
119                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
120                 BUG_ON(buffer->free);
121
122                 if (new_buffer->data < buffer->data)
123                         p = &parent->rb_left;
124                 else if (new_buffer->data > buffer->data)
125                         p = &parent->rb_right;
126                 else
127                         BUG();
128         }
129         rb_link_node(&new_buffer->rb_node, parent, p);
130         rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
131 }
132
133 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
134                 struct binder_alloc *alloc,
135                 uintptr_t user_ptr)
136 {
137         struct rb_node *n = alloc->allocated_buffers.rb_node;
138         struct binder_buffer *buffer;
139         void *kern_ptr;
140
141         kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
142
143         while (n) {
144                 buffer = rb_entry(n, struct binder_buffer, rb_node);
145                 BUG_ON(buffer->free);
146
147                 if (kern_ptr < buffer->data)
148                         n = n->rb_left;
149                 else if (kern_ptr > buffer->data)
150                         n = n->rb_right;
151                 else {
152                         /*
153                          * Guard against user threads attempting to
154                          * free the buffer twice
155                          */
156                         if (buffer->free_in_progress) {
157                                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
158                                                    "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
159                                                    alloc->pid, current->pid,
160                                                    (u64)user_ptr);
161                                 return NULL;
162                         }
163                         buffer->free_in_progress = 1;
164                         return buffer;
165                 }
166         }
167         return NULL;
168 }
169
170 /**
171  * binder_alloc_buffer_lookup() - get buffer given user ptr
172  * @alloc:      binder_alloc for this proc
173  * @user_ptr:   User pointer to buffer data
174  *
175  * Validate userspace pointer to buffer data and return buffer corresponding to
176  * that user pointer. Search the rb tree for buffer that matches user data
177  * pointer.
178  *
179  * Return:      Pointer to buffer or NULL
180  */
181 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
182                                                    uintptr_t user_ptr)
183 {
184         struct binder_buffer *buffer;
185
186         mutex_lock(&alloc->mutex);
187         buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
188         mutex_unlock(&alloc->mutex);
189         return buffer;
190 }
191
192 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
193                                     void *start, void *end)
194 {
195         void *page_addr;
196         unsigned long user_page_addr;
197         struct binder_lru_page *page;
198         struct vm_area_struct *vma = NULL;
199         struct mm_struct *mm = NULL;
200         bool need_mm = false;
201
202         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
203                      "%d: %s pages %pK-%pK\n", alloc->pid,
204                      allocate ? "allocate" : "free", start, end);
205
206         if (end <= start)
207                 return 0;
208
209         trace_binder_update_page_range(alloc, allocate, start, end);
210
211         if (allocate == 0)
212                 goto free_range;
213
214         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
215                 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
216                 if (!page->page_ptr) {
217                         need_mm = true;
218                         break;
219                 }
220         }
221
222         if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
223                 mm = alloc->vma_vm_mm;
224
225         if (mm) {
226                 down_read(&mm->mmap_sem);
227                 vma = alloc->vma;
228         }
229
230         if (!vma && need_mm) {
231                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
232                                    "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
233                                    alloc->pid);
234                 goto err_no_vma;
235         }
236
237         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
238                 int ret;
239                 bool on_lru;
240                 size_t index;
241
242                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
243                 page = &alloc->pages[index];
244
245                 if (page->page_ptr) {
246                         trace_binder_alloc_lru_start(alloc, index);
247
248                         on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
249                         WARN_ON(!on_lru);
250
251                         trace_binder_alloc_lru_end(alloc, index);
252                         continue;
253                 }
254
255                 if (WARN_ON(!vma))
256                         goto err_page_ptr_cleared;
257
258                 trace_binder_alloc_page_start(alloc, index);
259                 page->page_ptr = alloc_page(GFP_KERNEL |
260                                             __GFP_HIGHMEM |
261                                             __GFP_ZERO);
262                 if (!page->page_ptr) {
263                         pr_err("%d: binder_alloc_buf failed for page at %pK\n",
264                                 alloc->pid, page_addr);
265                         goto err_alloc_page_failed;
266                 }
267                 page->alloc = alloc;
268                 INIT_LIST_HEAD(&page->lru);
269
270                 ret = map_kernel_range_noflush((unsigned long)page_addr,
271                                                PAGE_SIZE, PAGE_KERNEL,
272                                                &page->page_ptr);
273                 flush_cache_vmap((unsigned long)page_addr,
274                                 (unsigned long)page_addr + PAGE_SIZE);
275                 if (ret != 1) {
276                         pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
277                                alloc->pid, page_addr);
278                         goto err_map_kernel_failed;
279                 }
280                 user_page_addr =
281                         (uintptr_t)page_addr + alloc->user_buffer_offset;
282                 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
283                 if (ret) {
284                         pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
285                                alloc->pid, user_page_addr);
286                         goto err_vm_insert_page_failed;
287                 }
288
289                 if (index + 1 > alloc->pages_high)
290                         alloc->pages_high = index + 1;
291
292                 trace_binder_alloc_page_end(alloc, index);
293                 /* vm_insert_page does not seem to increment the refcount */
294         }
295         if (mm) {
296                 up_read(&mm->mmap_sem);
297                 mmput(mm);
298         }
299         return 0;
300
301 free_range:
302         for (page_addr = end - PAGE_SIZE; page_addr >= start;
303              page_addr -= PAGE_SIZE) {
304                 bool ret;
305                 size_t index;
306
307                 index = (page_addr - alloc->buffer) / PAGE_SIZE;
308                 page = &alloc->pages[index];
309
310                 trace_binder_free_lru_start(alloc, index);
311
312                 ret = list_lru_add(&binder_alloc_lru, &page->lru);
313                 WARN_ON(!ret);
314
315                 trace_binder_free_lru_end(alloc, index);
316                 continue;
317
318 err_vm_insert_page_failed:
319                 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
320 err_map_kernel_failed:
321                 __free_page(page->page_ptr);
322                 page->page_ptr = NULL;
323 err_alloc_page_failed:
324 err_page_ptr_cleared:
325                 ;
326         }
327 err_no_vma:
328         if (mm) {
329                 up_read(&mm->mmap_sem);
330                 mmput(mm);
331         }
332         return vma ? -ENOMEM : -ESRCH;
333 }
334
335
336 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
337                 struct vm_area_struct *vma)
338 {
339         if (vma)
340                 alloc->vma_vm_mm = vma->vm_mm;
341         /*
342          * If we see alloc->vma is not NULL, buffer data structures set up
343          * completely. Look at smp_rmb side binder_alloc_get_vma.
344          * We also want to guarantee new alloc->vma_vm_mm is always visible
345          * if alloc->vma is set.
346          */
347         smp_wmb();
348         alloc->vma = vma;
349 }
350
351 static inline struct vm_area_struct *binder_alloc_get_vma(
352                 struct binder_alloc *alloc)
353 {
354         struct vm_area_struct *vma = NULL;
355
356         if (alloc->vma) {
357                 /* Look at description in binder_alloc_set_vma */
358                 smp_rmb();
359                 vma = alloc->vma;
360         }
361         return vma;
362 }
363
364 static struct binder_buffer *binder_alloc_new_buf_locked(
365                                 struct binder_alloc *alloc,
366                                 size_t data_size,
367                                 size_t offsets_size,
368                                 size_t extra_buffers_size,
369                                 int is_async)
370 {
371         struct rb_node *n = alloc->free_buffers.rb_node;
372         struct binder_buffer *buffer;
373         size_t buffer_size;
374         struct rb_node *best_fit = NULL;
375         void *has_page_addr;
376         void *end_page_addr;
377         size_t size, data_offsets_size;
378         int ret;
379
380         if (!binder_alloc_get_vma(alloc)) {
381                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
382                                    "%d: binder_alloc_buf, no vma\n",
383                                    alloc->pid);
384                 return ERR_PTR(-ESRCH);
385         }
386
387         data_offsets_size = ALIGN(data_size, sizeof(void *)) +
388                 ALIGN(offsets_size, sizeof(void *));
389
390         if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
391                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
392                                 "%d: got transaction with invalid size %zd-%zd\n",
393                                 alloc->pid, data_size, offsets_size);
394                 return ERR_PTR(-EINVAL);
395         }
396         size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
397         if (size < data_offsets_size || size < extra_buffers_size) {
398                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
399                                 "%d: got transaction with invalid extra_buffers_size %zd\n",
400                                 alloc->pid, extra_buffers_size);
401                 return ERR_PTR(-EINVAL);
402         }
403         if (is_async &&
404             alloc->free_async_space < size + sizeof(struct binder_buffer)) {
405                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
406                              "%d: binder_alloc_buf size %zd failed, no async space left\n",
407                               alloc->pid, size);
408                 return ERR_PTR(-ENOSPC);
409         }
410
411         /* Pad 0-size buffers so they get assigned unique addresses */
412         size = max(size, sizeof(void *));
413
414         while (n) {
415                 buffer = rb_entry(n, struct binder_buffer, rb_node);
416                 BUG_ON(!buffer->free);
417                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
418
419                 if (size < buffer_size) {
420                         best_fit = n;
421                         n = n->rb_left;
422                 } else if (size > buffer_size)
423                         n = n->rb_right;
424                 else {
425                         best_fit = n;
426                         break;
427                 }
428         }
429         if (best_fit == NULL) {
430                 size_t allocated_buffers = 0;
431                 size_t largest_alloc_size = 0;
432                 size_t total_alloc_size = 0;
433                 size_t free_buffers = 0;
434                 size_t largest_free_size = 0;
435                 size_t total_free_size = 0;
436
437                 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
438                      n = rb_next(n)) {
439                         buffer = rb_entry(n, struct binder_buffer, rb_node);
440                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
441                         allocated_buffers++;
442                         total_alloc_size += buffer_size;
443                         if (buffer_size > largest_alloc_size)
444                                 largest_alloc_size = buffer_size;
445                 }
446                 for (n = rb_first(&alloc->free_buffers); n != NULL;
447                      n = rb_next(n)) {
448                         buffer = rb_entry(n, struct binder_buffer, rb_node);
449                         buffer_size = binder_alloc_buffer_size(alloc, buffer);
450                         free_buffers++;
451                         total_free_size += buffer_size;
452                         if (buffer_size > largest_free_size)
453                                 largest_free_size = buffer_size;
454                 }
455                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
456                                    "%d: binder_alloc_buf size %zd failed, no address space\n",
457                                    alloc->pid, size);
458                 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
459                                    "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
460                                    total_alloc_size, allocated_buffers,
461                                    largest_alloc_size, total_free_size,
462                                    free_buffers, largest_free_size);
463                 return ERR_PTR(-ENOSPC);
464         }
465         if (n == NULL) {
466                 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
467                 buffer_size = binder_alloc_buffer_size(alloc, buffer);
468         }
469
470         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
471                      "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
472                       alloc->pid, size, buffer, buffer_size);
473
474         has_page_addr =
475                 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
476         WARN_ON(n && buffer_size != size);
477         end_page_addr =
478                 (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
479         if (end_page_addr > has_page_addr)
480                 end_page_addr = has_page_addr;
481         ret = binder_update_page_range(alloc, 1,
482             (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
483         if (ret)
484                 return ERR_PTR(ret);
485
486         if (buffer_size != size) {
487                 struct binder_buffer *new_buffer;
488
489                 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
490                 if (!new_buffer) {
491                         pr_err("%s: %d failed to alloc new buffer struct\n",
492                                __func__, alloc->pid);
493                         goto err_alloc_buf_struct_failed;
494                 }
495                 new_buffer->data = (u8 *)buffer->data + size;
496                 list_add(&new_buffer->entry, &buffer->entry);
497                 new_buffer->free = 1;
498                 binder_insert_free_buffer(alloc, new_buffer);
499         }
500
501         rb_erase(best_fit, &alloc->free_buffers);
502         buffer->free = 0;
503         buffer->free_in_progress = 0;
504         binder_insert_allocated_buffer_locked(alloc, buffer);
505         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
506                      "%d: binder_alloc_buf size %zd got %pK\n",
507                       alloc->pid, size, buffer);
508         buffer->data_size = data_size;
509         buffer->offsets_size = offsets_size;
510         buffer->async_transaction = is_async;
511         buffer->extra_buffers_size = extra_buffers_size;
512         if (is_async) {
513                 alloc->free_async_space -= size + sizeof(struct binder_buffer);
514                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
515                              "%d: binder_alloc_buf size %zd async free %zd\n",
516                               alloc->pid, size, alloc->free_async_space);
517         }
518         return buffer;
519
520 err_alloc_buf_struct_failed:
521         binder_update_page_range(alloc, 0,
522                                  (void *)PAGE_ALIGN((uintptr_t)buffer->data),
523                                  end_page_addr);
524         return ERR_PTR(-ENOMEM);
525 }
526
527 /**
528  * binder_alloc_new_buf() - Allocate a new binder buffer
529  * @alloc:              binder_alloc for this proc
530  * @data_size:          size of user data buffer
531  * @offsets_size:       user specified buffer offset
532  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
533  * @is_async:           buffer for async transaction
534  *
535  * Allocate a new buffer given the requested sizes. Returns
536  * the kernel version of the buffer pointer. The size allocated
537  * is the sum of the three given sizes (each rounded up to
538  * pointer-sized boundary)
539  *
540  * Return:      The allocated buffer or %NULL if error
541  */
542 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
543                                            size_t data_size,
544                                            size_t offsets_size,
545                                            size_t extra_buffers_size,
546                                            int is_async)
547 {
548         struct binder_buffer *buffer;
549
550         mutex_lock(&alloc->mutex);
551         buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
552                                              extra_buffers_size, is_async);
553         mutex_unlock(&alloc->mutex);
554         return buffer;
555 }
556
557 static void *buffer_start_page(struct binder_buffer *buffer)
558 {
559         return (void *)((uintptr_t)buffer->data & PAGE_MASK);
560 }
561
562 static void *prev_buffer_end_page(struct binder_buffer *buffer)
563 {
564         return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
565 }
566
567 static void binder_delete_free_buffer(struct binder_alloc *alloc,
568                                       struct binder_buffer *buffer)
569 {
570         struct binder_buffer *prev, *next = NULL;
571         bool to_free = true;
572         BUG_ON(alloc->buffers.next == &buffer->entry);
573         prev = binder_buffer_prev(buffer);
574         BUG_ON(!prev->free);
575         if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
576                 to_free = false;
577                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
578                                    "%d: merge free, buffer %pK share page with %pK\n",
579                                    alloc->pid, buffer->data, prev->data);
580         }
581
582         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
583                 next = binder_buffer_next(buffer);
584                 if (buffer_start_page(next) == buffer_start_page(buffer)) {
585                         to_free = false;
586                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
587                                            "%d: merge free, buffer %pK share page with %pK\n",
588                                            alloc->pid,
589                                            buffer->data,
590                                            next->data);
591                 }
592         }
593
594         if (PAGE_ALIGNED(buffer->data)) {
595                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
596                                    "%d: merge free, buffer start %pK is page aligned\n",
597                                    alloc->pid, buffer->data);
598                 to_free = false;
599         }
600
601         if (to_free) {
602                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
603                                    "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
604                                    alloc->pid, buffer->data,
605                                    prev->data, next ? next->data : NULL);
606                 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
607                                          buffer_start_page(buffer) + PAGE_SIZE);
608         }
609         list_del(&buffer->entry);
610         kfree(buffer);
611 }
612
613 static void binder_free_buf_locked(struct binder_alloc *alloc,
614                                    struct binder_buffer *buffer)
615 {
616         size_t size, buffer_size;
617
618         buffer_size = binder_alloc_buffer_size(alloc, buffer);
619
620         size = ALIGN(buffer->data_size, sizeof(void *)) +
621                 ALIGN(buffer->offsets_size, sizeof(void *)) +
622                 ALIGN(buffer->extra_buffers_size, sizeof(void *));
623
624         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
625                      "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
626                       alloc->pid, buffer, size, buffer_size);
627
628         BUG_ON(buffer->free);
629         BUG_ON(size > buffer_size);
630         BUG_ON(buffer->transaction != NULL);
631         BUG_ON(buffer->data < alloc->buffer);
632         BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
633
634         if (buffer->async_transaction) {
635                 alloc->free_async_space += size + sizeof(struct binder_buffer);
636
637                 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
638                              "%d: binder_free_buf size %zd async free %zd\n",
639                               alloc->pid, size, alloc->free_async_space);
640         }
641
642         binder_update_page_range(alloc, 0,
643                 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
644                 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
645
646         rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
647         buffer->free = 1;
648         if (!list_is_last(&buffer->entry, &alloc->buffers)) {
649                 struct binder_buffer *next = binder_buffer_next(buffer);
650
651                 if (next->free) {
652                         rb_erase(&next->rb_node, &alloc->free_buffers);
653                         binder_delete_free_buffer(alloc, next);
654                 }
655         }
656         if (alloc->buffers.next != &buffer->entry) {
657                 struct binder_buffer *prev = binder_buffer_prev(buffer);
658
659                 if (prev->free) {
660                         binder_delete_free_buffer(alloc, buffer);
661                         rb_erase(&prev->rb_node, &alloc->free_buffers);
662                         buffer = prev;
663                 }
664         }
665         binder_insert_free_buffer(alloc, buffer);
666 }
667
668 /**
669  * binder_alloc_free_buf() - free a binder buffer
670  * @alloc:      binder_alloc for this proc
671  * @buffer:     kernel pointer to buffer
672  *
673  * Free the buffer allocated via binder_alloc_new_buffer()
674  */
675 void binder_alloc_free_buf(struct binder_alloc *alloc,
676                             struct binder_buffer *buffer)
677 {
678         mutex_lock(&alloc->mutex);
679         binder_free_buf_locked(alloc, buffer);
680         mutex_unlock(&alloc->mutex);
681 }
682
683 /**
684  * binder_alloc_mmap_handler() - map virtual address space for proc
685  * @alloc:      alloc structure for this proc
686  * @vma:        vma passed to mmap()
687  *
688  * Called by binder_mmap() to initialize the space specified in
689  * vma for allocating binder buffers
690  *
691  * Return:
692  *      0 = success
693  *      -EBUSY = address space already mapped
694  *      -ENOMEM = failed to map memory to given address space
695  */
696 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
697                               struct vm_area_struct *vma)
698 {
699         int ret;
700         struct vm_struct *area;
701         const char *failure_string;
702         struct binder_buffer *buffer;
703
704         mutex_lock(&binder_alloc_mmap_lock);
705         if (alloc->buffer) {
706                 ret = -EBUSY;
707                 failure_string = "already mapped";
708                 goto err_already_mapped;
709         }
710
711         area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
712         if (area == NULL) {
713                 ret = -ENOMEM;
714                 failure_string = "get_vm_area";
715                 goto err_get_vm_area_failed;
716         }
717         alloc->buffer = area->addr;
718         alloc->user_buffer_offset =
719                 vma->vm_start - (uintptr_t)alloc->buffer;
720         mutex_unlock(&binder_alloc_mmap_lock);
721
722 #ifdef CONFIG_CPU_CACHE_VIPT
723         if (cache_is_vipt_aliasing()) {
724                 while (CACHE_COLOUR(
725                                 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
726                         pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
727                                 __func__, alloc->pid, vma->vm_start,
728                                 vma->vm_end, alloc->buffer);
729                         vma->vm_start += PAGE_SIZE;
730                 }
731         }
732 #endif
733         alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
734                                sizeof(alloc->pages[0]),
735                                GFP_KERNEL);
736         if (alloc->pages == NULL) {
737                 ret = -ENOMEM;
738                 failure_string = "alloc page array";
739                 goto err_alloc_pages_failed;
740         }
741         alloc->buffer_size = vma->vm_end - vma->vm_start;
742
743         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
744         if (!buffer) {
745                 ret = -ENOMEM;
746                 failure_string = "alloc buffer struct";
747                 goto err_alloc_buf_struct_failed;
748         }
749
750         buffer->data = alloc->buffer;
751         list_add(&buffer->entry, &alloc->buffers);
752         buffer->free = 1;
753         binder_insert_free_buffer(alloc, buffer);
754         alloc->free_async_space = alloc->buffer_size / 2;
755         binder_alloc_set_vma(alloc, vma);
756         mmgrab(alloc->vma_vm_mm);
757
758         return 0;
759
760 err_alloc_buf_struct_failed:
761         kfree(alloc->pages);
762         alloc->pages = NULL;
763 err_alloc_pages_failed:
764         mutex_lock(&binder_alloc_mmap_lock);
765         vfree(alloc->buffer);
766         alloc->buffer = NULL;
767 err_get_vm_area_failed:
768 err_already_mapped:
769         mutex_unlock(&binder_alloc_mmap_lock);
770         binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
771                            "%s: %d %lx-%lx %s failed %d\n", __func__,
772                            alloc->pid, vma->vm_start, vma->vm_end,
773                            failure_string, ret);
774         return ret;
775 }
776
777
778 void binder_alloc_deferred_release(struct binder_alloc *alloc)
779 {
780         struct rb_node *n;
781         int buffers, page_count;
782         struct binder_buffer *buffer;
783
784         buffers = 0;
785         mutex_lock(&alloc->mutex);
786         BUG_ON(alloc->vma);
787
788         while ((n = rb_first(&alloc->allocated_buffers))) {
789                 buffer = rb_entry(n, struct binder_buffer, rb_node);
790
791                 /* Transaction should already have been freed */
792                 BUG_ON(buffer->transaction);
793
794                 binder_free_buf_locked(alloc, buffer);
795                 buffers++;
796         }
797
798         while (!list_empty(&alloc->buffers)) {
799                 buffer = list_first_entry(&alloc->buffers,
800                                           struct binder_buffer, entry);
801                 WARN_ON(!buffer->free);
802
803                 list_del(&buffer->entry);
804                 WARN_ON_ONCE(!list_empty(&alloc->buffers));
805                 kfree(buffer);
806         }
807
808         page_count = 0;
809         if (alloc->pages) {
810                 int i;
811
812                 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
813                         void *page_addr;
814                         bool on_lru;
815
816                         if (!alloc->pages[i].page_ptr)
817                                 continue;
818
819                         on_lru = list_lru_del(&binder_alloc_lru,
820                                               &alloc->pages[i].lru);
821                         page_addr = alloc->buffer + i * PAGE_SIZE;
822                         binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
823                                      "%s: %d: page %d at %pK %s\n",
824                                      __func__, alloc->pid, i, page_addr,
825                                      on_lru ? "on lru" : "active");
826                         unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
827                         __free_page(alloc->pages[i].page_ptr);
828                         page_count++;
829                 }
830                 kfree(alloc->pages);
831                 vfree(alloc->buffer);
832         }
833         mutex_unlock(&alloc->mutex);
834         if (alloc->vma_vm_mm)
835                 mmdrop(alloc->vma_vm_mm);
836
837         binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
838                      "%s: %d buffers %d, pages %d\n",
839                      __func__, alloc->pid, buffers, page_count);
840 }
841
842 static void print_binder_buffer(struct seq_file *m, const char *prefix,
843                                 struct binder_buffer *buffer)
844 {
845         seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
846                    prefix, buffer->debug_id, buffer->data,
847                    buffer->data_size, buffer->offsets_size,
848                    buffer->extra_buffers_size,
849                    buffer->transaction ? "active" : "delivered");
850 }
851
852 /**
853  * binder_alloc_print_allocated() - print buffer info
854  * @m:     seq_file for output via seq_printf()
855  * @alloc: binder_alloc for this proc
856  *
857  * Prints information about every buffer associated with
858  * the binder_alloc state to the given seq_file
859  */
860 void binder_alloc_print_allocated(struct seq_file *m,
861                                   struct binder_alloc *alloc)
862 {
863         struct rb_node *n;
864
865         mutex_lock(&alloc->mutex);
866         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
867                 print_binder_buffer(m, "  buffer",
868                                     rb_entry(n, struct binder_buffer, rb_node));
869         mutex_unlock(&alloc->mutex);
870 }
871
872 /**
873  * binder_alloc_print_pages() - print page usage
874  * @m:     seq_file for output via seq_printf()
875  * @alloc: binder_alloc for this proc
876  */
877 void binder_alloc_print_pages(struct seq_file *m,
878                               struct binder_alloc *alloc)
879 {
880         struct binder_lru_page *page;
881         int i;
882         int active = 0;
883         int lru = 0;
884         int free = 0;
885
886         mutex_lock(&alloc->mutex);
887         for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
888                 page = &alloc->pages[i];
889                 if (!page->page_ptr)
890                         free++;
891                 else if (list_empty(&page->lru))
892                         active++;
893                 else
894                         lru++;
895         }
896         mutex_unlock(&alloc->mutex);
897         seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
898         seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
899 }
900
901 /**
902  * binder_alloc_get_allocated_count() - return count of buffers
903  * @alloc: binder_alloc for this proc
904  *
905  * Return: count of allocated buffers
906  */
907 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
908 {
909         struct rb_node *n;
910         int count = 0;
911
912         mutex_lock(&alloc->mutex);
913         for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
914                 count++;
915         mutex_unlock(&alloc->mutex);
916         return count;
917 }
918
919
920 /**
921  * binder_alloc_vma_close() - invalidate address space
922  * @alloc: binder_alloc for this proc
923  *
924  * Called from binder_vma_close() when releasing address space.
925  * Clears alloc->vma to prevent new incoming transactions from
926  * allocating more buffers.
927  */
928 void binder_alloc_vma_close(struct binder_alloc *alloc)
929 {
930         binder_alloc_set_vma(alloc, NULL);
931 }
932
933 /**
934  * binder_alloc_free_page() - shrinker callback to free pages
935  * @item:   item to free
936  * @lock:   lock protecting the item
937  * @cb_arg: callback argument
938  *
939  * Called from list_lru_walk() in binder_shrink_scan() to free
940  * up pages when the system is under memory pressure.
941  */
942 enum lru_status binder_alloc_free_page(struct list_head *item,
943                                        struct list_lru_one *lru,
944                                        spinlock_t *lock,
945                                        void *cb_arg)
946 {
947         struct mm_struct *mm = NULL;
948         struct binder_lru_page *page = container_of(item,
949                                                     struct binder_lru_page,
950                                                     lru);
951         struct binder_alloc *alloc;
952         uintptr_t page_addr;
953         size_t index;
954         struct vm_area_struct *vma;
955
956         alloc = page->alloc;
957         if (!mutex_trylock(&alloc->mutex))
958                 goto err_get_alloc_mutex_failed;
959
960         if (!page->page_ptr)
961                 goto err_page_already_freed;
962
963         index = page - alloc->pages;
964         page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
965         vma = binder_alloc_get_vma(alloc);
966         if (vma) {
967                 if (!mmget_not_zero(alloc->vma_vm_mm))
968                         goto err_mmget;
969                 mm = alloc->vma_vm_mm;
970                 if (!down_write_trylock(&mm->mmap_sem))
971                         goto err_down_write_mmap_sem_failed;
972         }
973
974         list_lru_isolate(lru, item);
975         spin_unlock(lock);
976
977         if (vma) {
978                 trace_binder_unmap_user_start(alloc, index);
979
980                 zap_page_range(vma,
981                                page_addr + alloc->user_buffer_offset,
982                                PAGE_SIZE);
983
984                 trace_binder_unmap_user_end(alloc, index);
985
986                 up_write(&mm->mmap_sem);
987                 mmput(mm);
988         }
989
990         trace_binder_unmap_kernel_start(alloc, index);
991
992         unmap_kernel_range(page_addr, PAGE_SIZE);
993         __free_page(page->page_ptr);
994         page->page_ptr = NULL;
995
996         trace_binder_unmap_kernel_end(alloc, index);
997
998         spin_lock(lock);
999         mutex_unlock(&alloc->mutex);
1000         return LRU_REMOVED_RETRY;
1001
1002 err_down_write_mmap_sem_failed:
1003         mmput_async(mm);
1004 err_mmget:
1005 err_page_already_freed:
1006         mutex_unlock(&alloc->mutex);
1007 err_get_alloc_mutex_failed:
1008         return LRU_SKIP;
1009 }
1010
1011 static unsigned long
1012 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1013 {
1014         unsigned long ret = list_lru_count(&binder_alloc_lru);
1015         return ret;
1016 }
1017
1018 static unsigned long
1019 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1020 {
1021         unsigned long ret;
1022
1023         ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1024                             NULL, sc->nr_to_scan);
1025         return ret;
1026 }
1027
1028 static struct shrinker binder_shrinker = {
1029         .count_objects = binder_shrink_count,
1030         .scan_objects = binder_shrink_scan,
1031         .seeks = DEFAULT_SEEKS,
1032 };
1033
1034 /**
1035  * binder_alloc_init() - called by binder_open() for per-proc initialization
1036  * @alloc: binder_alloc for this proc
1037  *
1038  * Called from binder_open() to initialize binder_alloc fields for
1039  * new binder proc
1040  */
1041 void binder_alloc_init(struct binder_alloc *alloc)
1042 {
1043         alloc->pid = current->group_leader->pid;
1044         mutex_init(&alloc->mutex);
1045         INIT_LIST_HEAD(&alloc->buffers);
1046 }
1047
1048 int binder_alloc_shrinker_init(void)
1049 {
1050         int ret = list_lru_init(&binder_alloc_lru);
1051
1052         if (ret == 0) {
1053                 ret = register_shrinker(&binder_shrinker);
1054                 if (ret)
1055                         list_lru_destroy(&binder_alloc_lru);
1056         }
1057         return ret;
1058 }