Merge tag 'trace-v6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux...
[sfrench/cifs-2.6.git] / drivers / android / binder.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70
71 #include <uapi/linux/android/binder.h>
72
73 #include <linux/cacheflush.h>
74
75 #include "binder_internal.h"
76 #include "binder_trace.h"
77
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
80
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
84
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
91
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
94
95 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
96
97 enum {
98         BINDER_DEBUG_USER_ERROR             = 1U << 0,
99         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
100         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
101         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
102         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
103         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
104         BINDER_DEBUG_READ_WRITE             = 1U << 6,
105         BINDER_DEBUG_USER_REFS              = 1U << 7,
106         BINDER_DEBUG_THREADS                = 1U << 8,
107         BINDER_DEBUG_TRANSACTION            = 1U << 9,
108         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
109         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
110         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
111         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
112         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
113 };
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
120
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
123
124 static int binder_set_stop_on_user_error(const char *val,
125                                          const struct kernel_param *kp)
126 {
127         int ret;
128
129         ret = param_set_int(val, kp);
130         if (binder_stop_on_user_error < 2)
131                 wake_up(&binder_user_error_wait);
132         return ret;
133 }
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135         param_get_int, &binder_stop_on_user_error, 0644);
136
137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 {
139         struct va_format vaf;
140         va_list args;
141
142         if (binder_debug_mask & mask) {
143                 va_start(args, format);
144                 vaf.va = &args;
145                 vaf.fmt = format;
146                 pr_info_ratelimited("%pV", &vaf);
147                 va_end(args);
148         }
149 }
150
151 #define binder_txn_error(x...) \
152         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153
154 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 {
156         struct va_format vaf;
157         va_list args;
158
159         if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160                 va_start(args, format);
161                 vaf.va = &args;
162                 vaf.fmt = format;
163                 pr_info_ratelimited("%pV", &vaf);
164                 va_end(args);
165         }
166
167         if (binder_stop_on_user_error)
168                 binder_stop_on_user_error = 2;
169 }
170
171 #define binder_set_extended_error(ee, _id, _command, _param) \
172         do { \
173                 (ee)->id = _id; \
174                 (ee)->command = _command; \
175                 (ee)->param = _param; \
176         } while (0)
177
178 #define to_flat_binder_object(hdr) \
179         container_of(hdr, struct flat_binder_object, hdr)
180
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182
183 #define to_binder_buffer_object(hdr) \
184         container_of(hdr, struct binder_buffer_object, hdr)
185
186 #define to_binder_fd_array_object(hdr) \
187         container_of(hdr, struct binder_fd_array_object, hdr)
188
189 static struct binder_stats binder_stats;
190
191 static inline void binder_stats_deleted(enum binder_stat_types type)
192 {
193         atomic_inc(&binder_stats.obj_deleted[type]);
194 }
195
196 static inline void binder_stats_created(enum binder_stat_types type)
197 {
198         atomic_inc(&binder_stats.obj_created[type]);
199 }
200
201 struct binder_transaction_log_entry {
202         int debug_id;
203         int debug_id_done;
204         int call_type;
205         int from_proc;
206         int from_thread;
207         int target_handle;
208         int to_proc;
209         int to_thread;
210         int to_node;
211         int data_size;
212         int offsets_size;
213         int return_error_line;
214         uint32_t return_error;
215         uint32_t return_error_param;
216         char context_name[BINDERFS_MAX_NAME + 1];
217 };
218
219 struct binder_transaction_log {
220         atomic_t cur;
221         bool full;
222         struct binder_transaction_log_entry entry[32];
223 };
224
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
227
228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229         struct binder_transaction_log *log)
230 {
231         struct binder_transaction_log_entry *e;
232         unsigned int cur = atomic_inc_return(&log->cur);
233
234         if (cur >= ARRAY_SIZE(log->entry))
235                 log->full = true;
236         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237         WRITE_ONCE(e->debug_id_done, 0);
238         /*
239          * write-barrier to synchronize access to e->debug_id_done.
240          * We make sure the initialized 0 value is seen before
241          * memset() other fields are zeroed by memset.
242          */
243         smp_wmb();
244         memset(e, 0, sizeof(*e));
245         return e;
246 }
247
248 enum binder_deferred_state {
249         BINDER_DEFERRED_FLUSH        = 0x01,
250         BINDER_DEFERRED_RELEASE      = 0x02,
251 };
252
253 enum {
254         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
255         BINDER_LOOPER_STATE_ENTERED     = 0x02,
256         BINDER_LOOPER_STATE_EXITED      = 0x04,
257         BINDER_LOOPER_STATE_INVALID     = 0x08,
258         BINDER_LOOPER_STATE_WAITING     = 0x10,
259         BINDER_LOOPER_STATE_POLL        = 0x20,
260 };
261
262 /**
263  * binder_proc_lock() - Acquire outer lock for given binder_proc
264  * @proc:         struct binder_proc to acquire
265  *
266  * Acquires proc->outer_lock. Used to protect binder_ref
267  * structures associated with the given proc.
268  */
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 static void
271 _binder_proc_lock(struct binder_proc *proc, int line)
272         __acquires(&proc->outer_lock)
273 {
274         binder_debug(BINDER_DEBUG_SPINLOCKS,
275                      "%s: line=%d\n", __func__, line);
276         spin_lock(&proc->outer_lock);
277 }
278
279 /**
280  * binder_proc_unlock() - Release spinlock for given binder_proc
281  * @proc:                struct binder_proc to acquire
282  *
283  * Release lock acquired via binder_proc_lock()
284  */
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 static void
287 _binder_proc_unlock(struct binder_proc *proc, int line)
288         __releases(&proc->outer_lock)
289 {
290         binder_debug(BINDER_DEBUG_SPINLOCKS,
291                      "%s: line=%d\n", __func__, line);
292         spin_unlock(&proc->outer_lock);
293 }
294
295 /**
296  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297  * @proc:         struct binder_proc to acquire
298  *
299  * Acquires proc->inner_lock. Used to protect todo lists
300  */
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 static void
303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304         __acquires(&proc->inner_lock)
305 {
306         binder_debug(BINDER_DEBUG_SPINLOCKS,
307                      "%s: line=%d\n", __func__, line);
308         spin_lock(&proc->inner_lock);
309 }
310
311 /**
312  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313  * @proc:         struct binder_proc to acquire
314  *
315  * Release lock acquired via binder_inner_proc_lock()
316  */
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 static void
319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320         __releases(&proc->inner_lock)
321 {
322         binder_debug(BINDER_DEBUG_SPINLOCKS,
323                      "%s: line=%d\n", __func__, line);
324         spin_unlock(&proc->inner_lock);
325 }
326
327 /**
328  * binder_node_lock() - Acquire spinlock for given binder_node
329  * @node:         struct binder_node to acquire
330  *
331  * Acquires node->lock. Used to protect binder_node fields
332  */
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 static void
335 _binder_node_lock(struct binder_node *node, int line)
336         __acquires(&node->lock)
337 {
338         binder_debug(BINDER_DEBUG_SPINLOCKS,
339                      "%s: line=%d\n", __func__, line);
340         spin_lock(&node->lock);
341 }
342
343 /**
344  * binder_node_unlock() - Release spinlock for given binder_proc
345  * @node:         struct binder_node to acquire
346  *
347  * Release lock acquired via binder_node_lock()
348  */
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 static void
351 _binder_node_unlock(struct binder_node *node, int line)
352         __releases(&node->lock)
353 {
354         binder_debug(BINDER_DEBUG_SPINLOCKS,
355                      "%s: line=%d\n", __func__, line);
356         spin_unlock(&node->lock);
357 }
358
359 /**
360  * binder_node_inner_lock() - Acquire node and inner locks
361  * @node:         struct binder_node to acquire
362  *
363  * Acquires node->lock. If node->proc also acquires
364  * proc->inner_lock. Used to protect binder_node fields
365  */
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 static void
368 _binder_node_inner_lock(struct binder_node *node, int line)
369         __acquires(&node->lock) __acquires(&node->proc->inner_lock)
370 {
371         binder_debug(BINDER_DEBUG_SPINLOCKS,
372                      "%s: line=%d\n", __func__, line);
373         spin_lock(&node->lock);
374         if (node->proc)
375                 binder_inner_proc_lock(node->proc);
376         else
377                 /* annotation for sparse */
378                 __acquire(&node->proc->inner_lock);
379 }
380
381 /**
382  * binder_node_inner_unlock() - Release node and inner locks
383  * @node:         struct binder_node to acquire
384  *
385  * Release lock acquired via binder_node_lock()
386  */
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 static void
389 _binder_node_inner_unlock(struct binder_node *node, int line)
390         __releases(&node->lock) __releases(&node->proc->inner_lock)
391 {
392         struct binder_proc *proc = node->proc;
393
394         binder_debug(BINDER_DEBUG_SPINLOCKS,
395                      "%s: line=%d\n", __func__, line);
396         if (proc)
397                 binder_inner_proc_unlock(proc);
398         else
399                 /* annotation for sparse */
400                 __release(&node->proc->inner_lock);
401         spin_unlock(&node->lock);
402 }
403
404 static bool binder_worklist_empty_ilocked(struct list_head *list)
405 {
406         return list_empty(list);
407 }
408
409 /**
410  * binder_worklist_empty() - Check if no items on the work list
411  * @proc:       binder_proc associated with list
412  * @list:       list to check
413  *
414  * Return: true if there are no items on list, else false
415  */
416 static bool binder_worklist_empty(struct binder_proc *proc,
417                                   struct list_head *list)
418 {
419         bool ret;
420
421         binder_inner_proc_lock(proc);
422         ret = binder_worklist_empty_ilocked(list);
423         binder_inner_proc_unlock(proc);
424         return ret;
425 }
426
427 /**
428  * binder_enqueue_work_ilocked() - Add an item to the work list
429  * @work:         struct binder_work to add to list
430  * @target_list:  list to add work to
431  *
432  * Adds the work to the specified list. Asserts that work
433  * is not already on a list.
434  *
435  * Requires the proc->inner_lock to be held.
436  */
437 static void
438 binder_enqueue_work_ilocked(struct binder_work *work,
439                            struct list_head *target_list)
440 {
441         BUG_ON(target_list == NULL);
442         BUG_ON(work->entry.next && !list_empty(&work->entry));
443         list_add_tail(&work->entry, target_list);
444 }
445
446 /**
447  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448  * @thread:       thread to queue work to
449  * @work:         struct binder_work to add to list
450  *
451  * Adds the work to the todo list of the thread. Doesn't set the process_todo
452  * flag, which means that (if it wasn't already set) the thread will go to
453  * sleep without handling this work when it calls read.
454  *
455  * Requires the proc->inner_lock to be held.
456  */
457 static void
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459                                             struct binder_work *work)
460 {
461         WARN_ON(!list_empty(&thread->waiting_thread_node));
462         binder_enqueue_work_ilocked(work, &thread->todo);
463 }
464
465 /**
466  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467  * @thread:       thread to queue work to
468  * @work:         struct binder_work to add to list
469  *
470  * Adds the work to the todo list of the thread, and enables processing
471  * of the todo queue.
472  *
473  * Requires the proc->inner_lock to be held.
474  */
475 static void
476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477                                    struct binder_work *work)
478 {
479         WARN_ON(!list_empty(&thread->waiting_thread_node));
480         binder_enqueue_work_ilocked(work, &thread->todo);
481
482         /* (e)poll-based threads require an explicit wakeup signal when
483          * queuing their own work; they rely on these events to consume
484          * messages without I/O block. Without it, threads risk waiting
485          * indefinitely without handling the work.
486          */
487         if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488             thread->pid == current->pid && !thread->process_todo)
489                 wake_up_interruptible_sync(&thread->wait);
490
491         thread->process_todo = true;
492 }
493
494 /**
495  * binder_enqueue_thread_work() - Add an item to the thread work list
496  * @thread:       thread to queue work to
497  * @work:         struct binder_work to add to list
498  *
499  * Adds the work to the todo list of the thread, and enables processing
500  * of the todo queue.
501  */
502 static void
503 binder_enqueue_thread_work(struct binder_thread *thread,
504                            struct binder_work *work)
505 {
506         binder_inner_proc_lock(thread->proc);
507         binder_enqueue_thread_work_ilocked(thread, work);
508         binder_inner_proc_unlock(thread->proc);
509 }
510
511 static void
512 binder_dequeue_work_ilocked(struct binder_work *work)
513 {
514         list_del_init(&work->entry);
515 }
516
517 /**
518  * binder_dequeue_work() - Removes an item from the work list
519  * @proc:         binder_proc associated with list
520  * @work:         struct binder_work to remove from list
521  *
522  * Removes the specified work item from whatever list it is on.
523  * Can safely be called if work is not on any list.
524  */
525 static void
526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527 {
528         binder_inner_proc_lock(proc);
529         binder_dequeue_work_ilocked(work);
530         binder_inner_proc_unlock(proc);
531 }
532
533 static struct binder_work *binder_dequeue_work_head_ilocked(
534                                         struct list_head *list)
535 {
536         struct binder_work *w;
537
538         w = list_first_entry_or_null(list, struct binder_work, entry);
539         if (w)
540                 list_del_init(&w->entry);
541         return w;
542 }
543
544 static void
545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546 static void binder_free_thread(struct binder_thread *thread);
547 static void binder_free_proc(struct binder_proc *proc);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549
550 static bool binder_has_work_ilocked(struct binder_thread *thread,
551                                     bool do_proc_work)
552 {
553         return thread->process_todo ||
554                 thread->looper_need_return ||
555                 (do_proc_work &&
556                  !binder_worklist_empty_ilocked(&thread->proc->todo));
557 }
558
559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560 {
561         bool has_work;
562
563         binder_inner_proc_lock(thread->proc);
564         has_work = binder_has_work_ilocked(thread, do_proc_work);
565         binder_inner_proc_unlock(thread->proc);
566
567         return has_work;
568 }
569
570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571 {
572         return !thread->transaction_stack &&
573                 binder_worklist_empty_ilocked(&thread->todo) &&
574                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
575                                    BINDER_LOOPER_STATE_REGISTERED));
576 }
577
578 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
579                                                bool sync)
580 {
581         struct rb_node *n;
582         struct binder_thread *thread;
583
584         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
585                 thread = rb_entry(n, struct binder_thread, rb_node);
586                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
587                     binder_available_for_proc_work_ilocked(thread)) {
588                         if (sync)
589                                 wake_up_interruptible_sync(&thread->wait);
590                         else
591                                 wake_up_interruptible(&thread->wait);
592                 }
593         }
594 }
595
596 /**
597  * binder_select_thread_ilocked() - selects a thread for doing proc work.
598  * @proc:       process to select a thread from
599  *
600  * Note that calling this function moves the thread off the waiting_threads
601  * list, so it can only be woken up by the caller of this function, or a
602  * signal. Therefore, callers *should* always wake up the thread this function
603  * returns.
604  *
605  * Return:      If there's a thread currently waiting for process work,
606  *              returns that thread. Otherwise returns NULL.
607  */
608 static struct binder_thread *
609 binder_select_thread_ilocked(struct binder_proc *proc)
610 {
611         struct binder_thread *thread;
612
613         assert_spin_locked(&proc->inner_lock);
614         thread = list_first_entry_or_null(&proc->waiting_threads,
615                                           struct binder_thread,
616                                           waiting_thread_node);
617
618         if (thread)
619                 list_del_init(&thread->waiting_thread_node);
620
621         return thread;
622 }
623
624 /**
625  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
626  * @proc:       process to wake up a thread in
627  * @thread:     specific thread to wake-up (may be NULL)
628  * @sync:       whether to do a synchronous wake-up
629  *
630  * This function wakes up a thread in the @proc process.
631  * The caller may provide a specific thread to wake-up in
632  * the @thread parameter. If @thread is NULL, this function
633  * will wake up threads that have called poll().
634  *
635  * Note that for this function to work as expected, callers
636  * should first call binder_select_thread() to find a thread
637  * to handle the work (if they don't have a thread already),
638  * and pass the result into the @thread parameter.
639  */
640 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
641                                          struct binder_thread *thread,
642                                          bool sync)
643 {
644         assert_spin_locked(&proc->inner_lock);
645
646         if (thread) {
647                 if (sync)
648                         wake_up_interruptible_sync(&thread->wait);
649                 else
650                         wake_up_interruptible(&thread->wait);
651                 return;
652         }
653
654         /* Didn't find a thread waiting for proc work; this can happen
655          * in two scenarios:
656          * 1. All threads are busy handling transactions
657          *    In that case, one of those threads should call back into
658          *    the kernel driver soon and pick up this work.
659          * 2. Threads are using the (e)poll interface, in which case
660          *    they may be blocked on the waitqueue without having been
661          *    added to waiting_threads. For this case, we just iterate
662          *    over all threads not handling transaction work, and
663          *    wake them all up. We wake all because we don't know whether
664          *    a thread that called into (e)poll is handling non-binder
665          *    work currently.
666          */
667         binder_wakeup_poll_threads_ilocked(proc, sync);
668 }
669
670 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
671 {
672         struct binder_thread *thread = binder_select_thread_ilocked(proc);
673
674         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
675 }
676
677 static void binder_set_nice(long nice)
678 {
679         long min_nice;
680
681         if (can_nice(current, nice)) {
682                 set_user_nice(current, nice);
683                 return;
684         }
685         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
686         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
687                      "%d: nice value %ld not allowed use %ld instead\n",
688                       current->pid, nice, min_nice);
689         set_user_nice(current, min_nice);
690         if (min_nice <= MAX_NICE)
691                 return;
692         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
693 }
694
695 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
696                                                    binder_uintptr_t ptr)
697 {
698         struct rb_node *n = proc->nodes.rb_node;
699         struct binder_node *node;
700
701         assert_spin_locked(&proc->inner_lock);
702
703         while (n) {
704                 node = rb_entry(n, struct binder_node, rb_node);
705
706                 if (ptr < node->ptr)
707                         n = n->rb_left;
708                 else if (ptr > node->ptr)
709                         n = n->rb_right;
710                 else {
711                         /*
712                          * take an implicit weak reference
713                          * to ensure node stays alive until
714                          * call to binder_put_node()
715                          */
716                         binder_inc_node_tmpref_ilocked(node);
717                         return node;
718                 }
719         }
720         return NULL;
721 }
722
723 static struct binder_node *binder_get_node(struct binder_proc *proc,
724                                            binder_uintptr_t ptr)
725 {
726         struct binder_node *node;
727
728         binder_inner_proc_lock(proc);
729         node = binder_get_node_ilocked(proc, ptr);
730         binder_inner_proc_unlock(proc);
731         return node;
732 }
733
734 static struct binder_node *binder_init_node_ilocked(
735                                                 struct binder_proc *proc,
736                                                 struct binder_node *new_node,
737                                                 struct flat_binder_object *fp)
738 {
739         struct rb_node **p = &proc->nodes.rb_node;
740         struct rb_node *parent = NULL;
741         struct binder_node *node;
742         binder_uintptr_t ptr = fp ? fp->binder : 0;
743         binder_uintptr_t cookie = fp ? fp->cookie : 0;
744         __u32 flags = fp ? fp->flags : 0;
745
746         assert_spin_locked(&proc->inner_lock);
747
748         while (*p) {
749
750                 parent = *p;
751                 node = rb_entry(parent, struct binder_node, rb_node);
752
753                 if (ptr < node->ptr)
754                         p = &(*p)->rb_left;
755                 else if (ptr > node->ptr)
756                         p = &(*p)->rb_right;
757                 else {
758                         /*
759                          * A matching node is already in
760                          * the rb tree. Abandon the init
761                          * and return it.
762                          */
763                         binder_inc_node_tmpref_ilocked(node);
764                         return node;
765                 }
766         }
767         node = new_node;
768         binder_stats_created(BINDER_STAT_NODE);
769         node->tmp_refs++;
770         rb_link_node(&node->rb_node, parent, p);
771         rb_insert_color(&node->rb_node, &proc->nodes);
772         node->debug_id = atomic_inc_return(&binder_last_id);
773         node->proc = proc;
774         node->ptr = ptr;
775         node->cookie = cookie;
776         node->work.type = BINDER_WORK_NODE;
777         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
778         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
779         node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
780         spin_lock_init(&node->lock);
781         INIT_LIST_HEAD(&node->work.entry);
782         INIT_LIST_HEAD(&node->async_todo);
783         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
784                      "%d:%d node %d u%016llx c%016llx created\n",
785                      proc->pid, current->pid, node->debug_id,
786                      (u64)node->ptr, (u64)node->cookie);
787
788         return node;
789 }
790
791 static struct binder_node *binder_new_node(struct binder_proc *proc,
792                                            struct flat_binder_object *fp)
793 {
794         struct binder_node *node;
795         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
796
797         if (!new_node)
798                 return NULL;
799         binder_inner_proc_lock(proc);
800         node = binder_init_node_ilocked(proc, new_node, fp);
801         binder_inner_proc_unlock(proc);
802         if (node != new_node)
803                 /*
804                  * The node was already added by another thread
805                  */
806                 kfree(new_node);
807
808         return node;
809 }
810
811 static void binder_free_node(struct binder_node *node)
812 {
813         kfree(node);
814         binder_stats_deleted(BINDER_STAT_NODE);
815 }
816
817 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
818                                     int internal,
819                                     struct list_head *target_list)
820 {
821         struct binder_proc *proc = node->proc;
822
823         assert_spin_locked(&node->lock);
824         if (proc)
825                 assert_spin_locked(&proc->inner_lock);
826         if (strong) {
827                 if (internal) {
828                         if (target_list == NULL &&
829                             node->internal_strong_refs == 0 &&
830                             !(node->proc &&
831                               node == node->proc->context->binder_context_mgr_node &&
832                               node->has_strong_ref)) {
833                                 pr_err("invalid inc strong node for %d\n",
834                                         node->debug_id);
835                                 return -EINVAL;
836                         }
837                         node->internal_strong_refs++;
838                 } else
839                         node->local_strong_refs++;
840                 if (!node->has_strong_ref && target_list) {
841                         struct binder_thread *thread = container_of(target_list,
842                                                     struct binder_thread, todo);
843                         binder_dequeue_work_ilocked(&node->work);
844                         BUG_ON(&thread->todo != target_list);
845                         binder_enqueue_deferred_thread_work_ilocked(thread,
846                                                                    &node->work);
847                 }
848         } else {
849                 if (!internal)
850                         node->local_weak_refs++;
851                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
852                         if (target_list == NULL) {
853                                 pr_err("invalid inc weak node for %d\n",
854                                         node->debug_id);
855                                 return -EINVAL;
856                         }
857                         /*
858                          * See comment above
859                          */
860                         binder_enqueue_work_ilocked(&node->work, target_list);
861                 }
862         }
863         return 0;
864 }
865
866 static int binder_inc_node(struct binder_node *node, int strong, int internal,
867                            struct list_head *target_list)
868 {
869         int ret;
870
871         binder_node_inner_lock(node);
872         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
873         binder_node_inner_unlock(node);
874
875         return ret;
876 }
877
878 static bool binder_dec_node_nilocked(struct binder_node *node,
879                                      int strong, int internal)
880 {
881         struct binder_proc *proc = node->proc;
882
883         assert_spin_locked(&node->lock);
884         if (proc)
885                 assert_spin_locked(&proc->inner_lock);
886         if (strong) {
887                 if (internal)
888                         node->internal_strong_refs--;
889                 else
890                         node->local_strong_refs--;
891                 if (node->local_strong_refs || node->internal_strong_refs)
892                         return false;
893         } else {
894                 if (!internal)
895                         node->local_weak_refs--;
896                 if (node->local_weak_refs || node->tmp_refs ||
897                                 !hlist_empty(&node->refs))
898                         return false;
899         }
900
901         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
902                 if (list_empty(&node->work.entry)) {
903                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
904                         binder_wakeup_proc_ilocked(proc);
905                 }
906         } else {
907                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
908                     !node->local_weak_refs && !node->tmp_refs) {
909                         if (proc) {
910                                 binder_dequeue_work_ilocked(&node->work);
911                                 rb_erase(&node->rb_node, &proc->nodes);
912                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
913                                              "refless node %d deleted\n",
914                                              node->debug_id);
915                         } else {
916                                 BUG_ON(!list_empty(&node->work.entry));
917                                 spin_lock(&binder_dead_nodes_lock);
918                                 /*
919                                  * tmp_refs could have changed so
920                                  * check it again
921                                  */
922                                 if (node->tmp_refs) {
923                                         spin_unlock(&binder_dead_nodes_lock);
924                                         return false;
925                                 }
926                                 hlist_del(&node->dead_node);
927                                 spin_unlock(&binder_dead_nodes_lock);
928                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
929                                              "dead node %d deleted\n",
930                                              node->debug_id);
931                         }
932                         return true;
933                 }
934         }
935         return false;
936 }
937
938 static void binder_dec_node(struct binder_node *node, int strong, int internal)
939 {
940         bool free_node;
941
942         binder_node_inner_lock(node);
943         free_node = binder_dec_node_nilocked(node, strong, internal);
944         binder_node_inner_unlock(node);
945         if (free_node)
946                 binder_free_node(node);
947 }
948
949 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
950 {
951         /*
952          * No call to binder_inc_node() is needed since we
953          * don't need to inform userspace of any changes to
954          * tmp_refs
955          */
956         node->tmp_refs++;
957 }
958
959 /**
960  * binder_inc_node_tmpref() - take a temporary reference on node
961  * @node:       node to reference
962  *
963  * Take reference on node to prevent the node from being freed
964  * while referenced only by a local variable. The inner lock is
965  * needed to serialize with the node work on the queue (which
966  * isn't needed after the node is dead). If the node is dead
967  * (node->proc is NULL), use binder_dead_nodes_lock to protect
968  * node->tmp_refs against dead-node-only cases where the node
969  * lock cannot be acquired (eg traversing the dead node list to
970  * print nodes)
971  */
972 static void binder_inc_node_tmpref(struct binder_node *node)
973 {
974         binder_node_lock(node);
975         if (node->proc)
976                 binder_inner_proc_lock(node->proc);
977         else
978                 spin_lock(&binder_dead_nodes_lock);
979         binder_inc_node_tmpref_ilocked(node);
980         if (node->proc)
981                 binder_inner_proc_unlock(node->proc);
982         else
983                 spin_unlock(&binder_dead_nodes_lock);
984         binder_node_unlock(node);
985 }
986
987 /**
988  * binder_dec_node_tmpref() - remove a temporary reference on node
989  * @node:       node to reference
990  *
991  * Release temporary reference on node taken via binder_inc_node_tmpref()
992  */
993 static void binder_dec_node_tmpref(struct binder_node *node)
994 {
995         bool free_node;
996
997         binder_node_inner_lock(node);
998         if (!node->proc)
999                 spin_lock(&binder_dead_nodes_lock);
1000         else
1001                 __acquire(&binder_dead_nodes_lock);
1002         node->tmp_refs--;
1003         BUG_ON(node->tmp_refs < 0);
1004         if (!node->proc)
1005                 spin_unlock(&binder_dead_nodes_lock);
1006         else
1007                 __release(&binder_dead_nodes_lock);
1008         /*
1009          * Call binder_dec_node() to check if all refcounts are 0
1010          * and cleanup is needed. Calling with strong=0 and internal=1
1011          * causes no actual reference to be released in binder_dec_node().
1012          * If that changes, a change is needed here too.
1013          */
1014         free_node = binder_dec_node_nilocked(node, 0, 1);
1015         binder_node_inner_unlock(node);
1016         if (free_node)
1017                 binder_free_node(node);
1018 }
1019
1020 static void binder_put_node(struct binder_node *node)
1021 {
1022         binder_dec_node_tmpref(node);
1023 }
1024
1025 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1026                                                  u32 desc, bool need_strong_ref)
1027 {
1028         struct rb_node *n = proc->refs_by_desc.rb_node;
1029         struct binder_ref *ref;
1030
1031         while (n) {
1032                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1033
1034                 if (desc < ref->data.desc) {
1035                         n = n->rb_left;
1036                 } else if (desc > ref->data.desc) {
1037                         n = n->rb_right;
1038                 } else if (need_strong_ref && !ref->data.strong) {
1039                         binder_user_error("tried to use weak ref as strong ref\n");
1040                         return NULL;
1041                 } else {
1042                         return ref;
1043                 }
1044         }
1045         return NULL;
1046 }
1047
1048 /**
1049  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1050  * @proc:       binder_proc that owns the ref
1051  * @node:       binder_node of target
1052  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1053  *
1054  * Look up the ref for the given node and return it if it exists
1055  *
1056  * If it doesn't exist and the caller provides a newly allocated
1057  * ref, initialize the fields of the newly allocated ref and insert
1058  * into the given proc rb_trees and node refs list.
1059  *
1060  * Return:      the ref for node. It is possible that another thread
1061  *              allocated/initialized the ref first in which case the
1062  *              returned ref would be different than the passed-in
1063  *              new_ref. new_ref must be kfree'd by the caller in
1064  *              this case.
1065  */
1066 static struct binder_ref *binder_get_ref_for_node_olocked(
1067                                         struct binder_proc *proc,
1068                                         struct binder_node *node,
1069                                         struct binder_ref *new_ref)
1070 {
1071         struct binder_context *context = proc->context;
1072         struct rb_node **p = &proc->refs_by_node.rb_node;
1073         struct rb_node *parent = NULL;
1074         struct binder_ref *ref;
1075         struct rb_node *n;
1076
1077         while (*p) {
1078                 parent = *p;
1079                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1080
1081                 if (node < ref->node)
1082                         p = &(*p)->rb_left;
1083                 else if (node > ref->node)
1084                         p = &(*p)->rb_right;
1085                 else
1086                         return ref;
1087         }
1088         if (!new_ref)
1089                 return NULL;
1090
1091         binder_stats_created(BINDER_STAT_REF);
1092         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1093         new_ref->proc = proc;
1094         new_ref->node = node;
1095         rb_link_node(&new_ref->rb_node_node, parent, p);
1096         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1097
1098         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1099         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1100                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1101                 if (ref->data.desc > new_ref->data.desc)
1102                         break;
1103                 new_ref->data.desc = ref->data.desc + 1;
1104         }
1105
1106         p = &proc->refs_by_desc.rb_node;
1107         while (*p) {
1108                 parent = *p;
1109                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1110
1111                 if (new_ref->data.desc < ref->data.desc)
1112                         p = &(*p)->rb_left;
1113                 else if (new_ref->data.desc > ref->data.desc)
1114                         p = &(*p)->rb_right;
1115                 else
1116                         BUG();
1117         }
1118         rb_link_node(&new_ref->rb_node_desc, parent, p);
1119         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1120
1121         binder_node_lock(node);
1122         hlist_add_head(&new_ref->node_entry, &node->refs);
1123
1124         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1125                      "%d new ref %d desc %d for node %d\n",
1126                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1127                       node->debug_id);
1128         binder_node_unlock(node);
1129         return new_ref;
1130 }
1131
1132 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1133 {
1134         bool delete_node = false;
1135
1136         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1137                      "%d delete ref %d desc %d for node %d\n",
1138                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1139                       ref->node->debug_id);
1140
1141         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1142         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1143
1144         binder_node_inner_lock(ref->node);
1145         if (ref->data.strong)
1146                 binder_dec_node_nilocked(ref->node, 1, 1);
1147
1148         hlist_del(&ref->node_entry);
1149         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1150         binder_node_inner_unlock(ref->node);
1151         /*
1152          * Clear ref->node unless we want the caller to free the node
1153          */
1154         if (!delete_node) {
1155                 /*
1156                  * The caller uses ref->node to determine
1157                  * whether the node needs to be freed. Clear
1158                  * it since the node is still alive.
1159                  */
1160                 ref->node = NULL;
1161         }
1162
1163         if (ref->death) {
1164                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1165                              "%d delete ref %d desc %d has death notification\n",
1166                               ref->proc->pid, ref->data.debug_id,
1167                               ref->data.desc);
1168                 binder_dequeue_work(ref->proc, &ref->death->work);
1169                 binder_stats_deleted(BINDER_STAT_DEATH);
1170         }
1171         binder_stats_deleted(BINDER_STAT_REF);
1172 }
1173
1174 /**
1175  * binder_inc_ref_olocked() - increment the ref for given handle
1176  * @ref:         ref to be incremented
1177  * @strong:      if true, strong increment, else weak
1178  * @target_list: list to queue node work on
1179  *
1180  * Increment the ref. @ref->proc->outer_lock must be held on entry
1181  *
1182  * Return: 0, if successful, else errno
1183  */
1184 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1185                                   struct list_head *target_list)
1186 {
1187         int ret;
1188
1189         if (strong) {
1190                 if (ref->data.strong == 0) {
1191                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1192                         if (ret)
1193                                 return ret;
1194                 }
1195                 ref->data.strong++;
1196         } else {
1197                 if (ref->data.weak == 0) {
1198                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1199                         if (ret)
1200                                 return ret;
1201                 }
1202                 ref->data.weak++;
1203         }
1204         return 0;
1205 }
1206
1207 /**
1208  * binder_dec_ref_olocked() - dec the ref for given handle
1209  * @ref:        ref to be decremented
1210  * @strong:     if true, strong decrement, else weak
1211  *
1212  * Decrement the ref.
1213  *
1214  * Return: %true if ref is cleaned up and ready to be freed.
1215  */
1216 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1217 {
1218         if (strong) {
1219                 if (ref->data.strong == 0) {
1220                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1221                                           ref->proc->pid, ref->data.debug_id,
1222                                           ref->data.desc, ref->data.strong,
1223                                           ref->data.weak);
1224                         return false;
1225                 }
1226                 ref->data.strong--;
1227                 if (ref->data.strong == 0)
1228                         binder_dec_node(ref->node, strong, 1);
1229         } else {
1230                 if (ref->data.weak == 0) {
1231                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1232                                           ref->proc->pid, ref->data.debug_id,
1233                                           ref->data.desc, ref->data.strong,
1234                                           ref->data.weak);
1235                         return false;
1236                 }
1237                 ref->data.weak--;
1238         }
1239         if (ref->data.strong == 0 && ref->data.weak == 0) {
1240                 binder_cleanup_ref_olocked(ref);
1241                 return true;
1242         }
1243         return false;
1244 }
1245
1246 /**
1247  * binder_get_node_from_ref() - get the node from the given proc/desc
1248  * @proc:       proc containing the ref
1249  * @desc:       the handle associated with the ref
1250  * @need_strong_ref: if true, only return node if ref is strong
1251  * @rdata:      the id/refcount data for the ref
1252  *
1253  * Given a proc and ref handle, return the associated binder_node
1254  *
1255  * Return: a binder_node or NULL if not found or not strong when strong required
1256  */
1257 static struct binder_node *binder_get_node_from_ref(
1258                 struct binder_proc *proc,
1259                 u32 desc, bool need_strong_ref,
1260                 struct binder_ref_data *rdata)
1261 {
1262         struct binder_node *node;
1263         struct binder_ref *ref;
1264
1265         binder_proc_lock(proc);
1266         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1267         if (!ref)
1268                 goto err_no_ref;
1269         node = ref->node;
1270         /*
1271          * Take an implicit reference on the node to ensure
1272          * it stays alive until the call to binder_put_node()
1273          */
1274         binder_inc_node_tmpref(node);
1275         if (rdata)
1276                 *rdata = ref->data;
1277         binder_proc_unlock(proc);
1278
1279         return node;
1280
1281 err_no_ref:
1282         binder_proc_unlock(proc);
1283         return NULL;
1284 }
1285
1286 /**
1287  * binder_free_ref() - free the binder_ref
1288  * @ref:        ref to free
1289  *
1290  * Free the binder_ref. Free the binder_node indicated by ref->node
1291  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1292  */
1293 static void binder_free_ref(struct binder_ref *ref)
1294 {
1295         if (ref->node)
1296                 binder_free_node(ref->node);
1297         kfree(ref->death);
1298         kfree(ref);
1299 }
1300
1301 /**
1302  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1303  * @proc:       proc containing the ref
1304  * @desc:       the handle associated with the ref
1305  * @increment:  true=inc reference, false=dec reference
1306  * @strong:     true=strong reference, false=weak reference
1307  * @rdata:      the id/refcount data for the ref
1308  *
1309  * Given a proc and ref handle, increment or decrement the ref
1310  * according to "increment" arg.
1311  *
1312  * Return: 0 if successful, else errno
1313  */
1314 static int binder_update_ref_for_handle(struct binder_proc *proc,
1315                 uint32_t desc, bool increment, bool strong,
1316                 struct binder_ref_data *rdata)
1317 {
1318         int ret = 0;
1319         struct binder_ref *ref;
1320         bool delete_ref = false;
1321
1322         binder_proc_lock(proc);
1323         ref = binder_get_ref_olocked(proc, desc, strong);
1324         if (!ref) {
1325                 ret = -EINVAL;
1326                 goto err_no_ref;
1327         }
1328         if (increment)
1329                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1330         else
1331                 delete_ref = binder_dec_ref_olocked(ref, strong);
1332
1333         if (rdata)
1334                 *rdata = ref->data;
1335         binder_proc_unlock(proc);
1336
1337         if (delete_ref)
1338                 binder_free_ref(ref);
1339         return ret;
1340
1341 err_no_ref:
1342         binder_proc_unlock(proc);
1343         return ret;
1344 }
1345
1346 /**
1347  * binder_dec_ref_for_handle() - dec the ref for given handle
1348  * @proc:       proc containing the ref
1349  * @desc:       the handle associated with the ref
1350  * @strong:     true=strong reference, false=weak reference
1351  * @rdata:      the id/refcount data for the ref
1352  *
1353  * Just calls binder_update_ref_for_handle() to decrement the ref.
1354  *
1355  * Return: 0 if successful, else errno
1356  */
1357 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1358                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1359 {
1360         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1361 }
1362
1363
1364 /**
1365  * binder_inc_ref_for_node() - increment the ref for given proc/node
1366  * @proc:        proc containing the ref
1367  * @node:        target node
1368  * @strong:      true=strong reference, false=weak reference
1369  * @target_list: worklist to use if node is incremented
1370  * @rdata:       the id/refcount data for the ref
1371  *
1372  * Given a proc and node, increment the ref. Create the ref if it
1373  * doesn't already exist
1374  *
1375  * Return: 0 if successful, else errno
1376  */
1377 static int binder_inc_ref_for_node(struct binder_proc *proc,
1378                         struct binder_node *node,
1379                         bool strong,
1380                         struct list_head *target_list,
1381                         struct binder_ref_data *rdata)
1382 {
1383         struct binder_ref *ref;
1384         struct binder_ref *new_ref = NULL;
1385         int ret = 0;
1386
1387         binder_proc_lock(proc);
1388         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1389         if (!ref) {
1390                 binder_proc_unlock(proc);
1391                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1392                 if (!new_ref)
1393                         return -ENOMEM;
1394                 binder_proc_lock(proc);
1395                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1396         }
1397         ret = binder_inc_ref_olocked(ref, strong, target_list);
1398         *rdata = ref->data;
1399         if (ret && ref == new_ref) {
1400                 /*
1401                  * Cleanup the failed reference here as the target
1402                  * could now be dead and have already released its
1403                  * references by now. Calling on the new reference
1404                  * with strong=0 and a tmp_refs will not decrement
1405                  * the node. The new_ref gets kfree'd below.
1406                  */
1407                 binder_cleanup_ref_olocked(new_ref);
1408                 ref = NULL;
1409         }
1410
1411         binder_proc_unlock(proc);
1412         if (new_ref && ref != new_ref)
1413                 /*
1414                  * Another thread created the ref first so
1415                  * free the one we allocated
1416                  */
1417                 kfree(new_ref);
1418         return ret;
1419 }
1420
1421 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1422                                            struct binder_transaction *t)
1423 {
1424         BUG_ON(!target_thread);
1425         assert_spin_locked(&target_thread->proc->inner_lock);
1426         BUG_ON(target_thread->transaction_stack != t);
1427         BUG_ON(target_thread->transaction_stack->from != target_thread);
1428         target_thread->transaction_stack =
1429                 target_thread->transaction_stack->from_parent;
1430         t->from = NULL;
1431 }
1432
1433 /**
1434  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1435  * @thread:     thread to decrement
1436  *
1437  * A thread needs to be kept alive while being used to create or
1438  * handle a transaction. binder_get_txn_from() is used to safely
1439  * extract t->from from a binder_transaction and keep the thread
1440  * indicated by t->from from being freed. When done with that
1441  * binder_thread, this function is called to decrement the
1442  * tmp_ref and free if appropriate (thread has been released
1443  * and no transaction being processed by the driver)
1444  */
1445 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1446 {
1447         /*
1448          * atomic is used to protect the counter value while
1449          * it cannot reach zero or thread->is_dead is false
1450          */
1451         binder_inner_proc_lock(thread->proc);
1452         atomic_dec(&thread->tmp_ref);
1453         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1454                 binder_inner_proc_unlock(thread->proc);
1455                 binder_free_thread(thread);
1456                 return;
1457         }
1458         binder_inner_proc_unlock(thread->proc);
1459 }
1460
1461 /**
1462  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1463  * @proc:       proc to decrement
1464  *
1465  * A binder_proc needs to be kept alive while being used to create or
1466  * handle a transaction. proc->tmp_ref is incremented when
1467  * creating a new transaction or the binder_proc is currently in-use
1468  * by threads that are being released. When done with the binder_proc,
1469  * this function is called to decrement the counter and free the
1470  * proc if appropriate (proc has been released, all threads have
1471  * been released and not currenly in-use to process a transaction).
1472  */
1473 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1474 {
1475         binder_inner_proc_lock(proc);
1476         proc->tmp_ref--;
1477         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1478                         !proc->tmp_ref) {
1479                 binder_inner_proc_unlock(proc);
1480                 binder_free_proc(proc);
1481                 return;
1482         }
1483         binder_inner_proc_unlock(proc);
1484 }
1485
1486 /**
1487  * binder_get_txn_from() - safely extract the "from" thread in transaction
1488  * @t:  binder transaction for t->from
1489  *
1490  * Atomically return the "from" thread and increment the tmp_ref
1491  * count for the thread to ensure it stays alive until
1492  * binder_thread_dec_tmpref() is called.
1493  *
1494  * Return: the value of t->from
1495  */
1496 static struct binder_thread *binder_get_txn_from(
1497                 struct binder_transaction *t)
1498 {
1499         struct binder_thread *from;
1500
1501         spin_lock(&t->lock);
1502         from = t->from;
1503         if (from)
1504                 atomic_inc(&from->tmp_ref);
1505         spin_unlock(&t->lock);
1506         return from;
1507 }
1508
1509 /**
1510  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1511  * @t:  binder transaction for t->from
1512  *
1513  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1514  * to guarantee that the thread cannot be released while operating on it.
1515  * The caller must call binder_inner_proc_unlock() to release the inner lock
1516  * as well as call binder_dec_thread_txn() to release the reference.
1517  *
1518  * Return: the value of t->from
1519  */
1520 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1521                 struct binder_transaction *t)
1522         __acquires(&t->from->proc->inner_lock)
1523 {
1524         struct binder_thread *from;
1525
1526         from = binder_get_txn_from(t);
1527         if (!from) {
1528                 __acquire(&from->proc->inner_lock);
1529                 return NULL;
1530         }
1531         binder_inner_proc_lock(from->proc);
1532         if (t->from) {
1533                 BUG_ON(from != t->from);
1534                 return from;
1535         }
1536         binder_inner_proc_unlock(from->proc);
1537         __acquire(&from->proc->inner_lock);
1538         binder_thread_dec_tmpref(from);
1539         return NULL;
1540 }
1541
1542 /**
1543  * binder_free_txn_fixups() - free unprocessed fd fixups
1544  * @t:  binder transaction for t->from
1545  *
1546  * If the transaction is being torn down prior to being
1547  * processed by the target process, free all of the
1548  * fd fixups and fput the file structs. It is safe to
1549  * call this function after the fixups have been
1550  * processed -- in that case, the list will be empty.
1551  */
1552 static void binder_free_txn_fixups(struct binder_transaction *t)
1553 {
1554         struct binder_txn_fd_fixup *fixup, *tmp;
1555
1556         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1557                 fput(fixup->file);
1558                 if (fixup->target_fd >= 0)
1559                         put_unused_fd(fixup->target_fd);
1560                 list_del(&fixup->fixup_entry);
1561                 kfree(fixup);
1562         }
1563 }
1564
1565 static void binder_txn_latency_free(struct binder_transaction *t)
1566 {
1567         int from_proc, from_thread, to_proc, to_thread;
1568
1569         spin_lock(&t->lock);
1570         from_proc = t->from ? t->from->proc->pid : 0;
1571         from_thread = t->from ? t->from->pid : 0;
1572         to_proc = t->to_proc ? t->to_proc->pid : 0;
1573         to_thread = t->to_thread ? t->to_thread->pid : 0;
1574         spin_unlock(&t->lock);
1575
1576         trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1577 }
1578
1579 static void binder_free_transaction(struct binder_transaction *t)
1580 {
1581         struct binder_proc *target_proc = t->to_proc;
1582
1583         if (target_proc) {
1584                 binder_inner_proc_lock(target_proc);
1585                 target_proc->outstanding_txns--;
1586                 if (target_proc->outstanding_txns < 0)
1587                         pr_warn("%s: Unexpected outstanding_txns %d\n",
1588                                 __func__, target_proc->outstanding_txns);
1589                 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1590                         wake_up_interruptible_all(&target_proc->freeze_wait);
1591                 if (t->buffer)
1592                         t->buffer->transaction = NULL;
1593                 binder_inner_proc_unlock(target_proc);
1594         }
1595         if (trace_binder_txn_latency_free_enabled())
1596                 binder_txn_latency_free(t);
1597         /*
1598          * If the transaction has no target_proc, then
1599          * t->buffer->transaction has already been cleared.
1600          */
1601         binder_free_txn_fixups(t);
1602         kfree(t);
1603         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1604 }
1605
1606 static void binder_send_failed_reply(struct binder_transaction *t,
1607                                      uint32_t error_code)
1608 {
1609         struct binder_thread *target_thread;
1610         struct binder_transaction *next;
1611
1612         BUG_ON(t->flags & TF_ONE_WAY);
1613         while (1) {
1614                 target_thread = binder_get_txn_from_and_acq_inner(t);
1615                 if (target_thread) {
1616                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1617                                      "send failed reply for transaction %d to %d:%d\n",
1618                                       t->debug_id,
1619                                       target_thread->proc->pid,
1620                                       target_thread->pid);
1621
1622                         binder_pop_transaction_ilocked(target_thread, t);
1623                         if (target_thread->reply_error.cmd == BR_OK) {
1624                                 target_thread->reply_error.cmd = error_code;
1625                                 binder_enqueue_thread_work_ilocked(
1626                                         target_thread,
1627                                         &target_thread->reply_error.work);
1628                                 wake_up_interruptible(&target_thread->wait);
1629                         } else {
1630                                 /*
1631                                  * Cannot get here for normal operation, but
1632                                  * we can if multiple synchronous transactions
1633                                  * are sent without blocking for responses.
1634                                  * Just ignore the 2nd error in this case.
1635                                  */
1636                                 pr_warn("Unexpected reply error: %u\n",
1637                                         target_thread->reply_error.cmd);
1638                         }
1639                         binder_inner_proc_unlock(target_thread->proc);
1640                         binder_thread_dec_tmpref(target_thread);
1641                         binder_free_transaction(t);
1642                         return;
1643                 }
1644                 __release(&target_thread->proc->inner_lock);
1645                 next = t->from_parent;
1646
1647                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1648                              "send failed reply for transaction %d, target dead\n",
1649                              t->debug_id);
1650
1651                 binder_free_transaction(t);
1652                 if (next == NULL) {
1653                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1654                                      "reply failed, no target thread at root\n");
1655                         return;
1656                 }
1657                 t = next;
1658                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1659                              "reply failed, no target thread -- retry %d\n",
1660                               t->debug_id);
1661         }
1662 }
1663
1664 /**
1665  * binder_cleanup_transaction() - cleans up undelivered transaction
1666  * @t:          transaction that needs to be cleaned up
1667  * @reason:     reason the transaction wasn't delivered
1668  * @error_code: error to return to caller (if synchronous call)
1669  */
1670 static void binder_cleanup_transaction(struct binder_transaction *t,
1671                                        const char *reason,
1672                                        uint32_t error_code)
1673 {
1674         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1675                 binder_send_failed_reply(t, error_code);
1676         } else {
1677                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1678                         "undelivered transaction %d, %s\n",
1679                         t->debug_id, reason);
1680                 binder_free_transaction(t);
1681         }
1682 }
1683
1684 /**
1685  * binder_get_object() - gets object and checks for valid metadata
1686  * @proc:       binder_proc owning the buffer
1687  * @u:          sender's user pointer to base of buffer
1688  * @buffer:     binder_buffer that we're parsing.
1689  * @offset:     offset in the @buffer at which to validate an object.
1690  * @object:     struct binder_object to read into
1691  *
1692  * Copy the binder object at the given offset into @object. If @u is
1693  * provided then the copy is from the sender's buffer. If not, then
1694  * it is copied from the target's @buffer.
1695  *
1696  * Return:      If there's a valid metadata object at @offset, the
1697  *              size of that object. Otherwise, it returns zero. The object
1698  *              is read into the struct binder_object pointed to by @object.
1699  */
1700 static size_t binder_get_object(struct binder_proc *proc,
1701                                 const void __user *u,
1702                                 struct binder_buffer *buffer,
1703                                 unsigned long offset,
1704                                 struct binder_object *object)
1705 {
1706         size_t read_size;
1707         struct binder_object_header *hdr;
1708         size_t object_size = 0;
1709
1710         read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1711         if (offset > buffer->data_size || read_size < sizeof(*hdr))
1712                 return 0;
1713         if (u) {
1714                 if (copy_from_user(object, u + offset, read_size))
1715                         return 0;
1716         } else {
1717                 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1718                                                   offset, read_size))
1719                         return 0;
1720         }
1721
1722         /* Ok, now see if we read a complete object. */
1723         hdr = &object->hdr;
1724         switch (hdr->type) {
1725         case BINDER_TYPE_BINDER:
1726         case BINDER_TYPE_WEAK_BINDER:
1727         case BINDER_TYPE_HANDLE:
1728         case BINDER_TYPE_WEAK_HANDLE:
1729                 object_size = sizeof(struct flat_binder_object);
1730                 break;
1731         case BINDER_TYPE_FD:
1732                 object_size = sizeof(struct binder_fd_object);
1733                 break;
1734         case BINDER_TYPE_PTR:
1735                 object_size = sizeof(struct binder_buffer_object);
1736                 break;
1737         case BINDER_TYPE_FDA:
1738                 object_size = sizeof(struct binder_fd_array_object);
1739                 break;
1740         default:
1741                 return 0;
1742         }
1743         if (offset <= buffer->data_size - object_size &&
1744             buffer->data_size >= object_size)
1745                 return object_size;
1746         else
1747                 return 0;
1748 }
1749
1750 /**
1751  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1752  * @proc:       binder_proc owning the buffer
1753  * @b:          binder_buffer containing the object
1754  * @object:     struct binder_object to read into
1755  * @index:      index in offset array at which the binder_buffer_object is
1756  *              located
1757  * @start_offset: points to the start of the offset array
1758  * @object_offsetp: offset of @object read from @b
1759  * @num_valid:  the number of valid offsets in the offset array
1760  *
1761  * Return:      If @index is within the valid range of the offset array
1762  *              described by @start and @num_valid, and if there's a valid
1763  *              binder_buffer_object at the offset found in index @index
1764  *              of the offset array, that object is returned. Otherwise,
1765  *              %NULL is returned.
1766  *              Note that the offset found in index @index itself is not
1767  *              verified; this function assumes that @num_valid elements
1768  *              from @start were previously verified to have valid offsets.
1769  *              If @object_offsetp is non-NULL, then the offset within
1770  *              @b is written to it.
1771  */
1772 static struct binder_buffer_object *binder_validate_ptr(
1773                                                 struct binder_proc *proc,
1774                                                 struct binder_buffer *b,
1775                                                 struct binder_object *object,
1776                                                 binder_size_t index,
1777                                                 binder_size_t start_offset,
1778                                                 binder_size_t *object_offsetp,
1779                                                 binder_size_t num_valid)
1780 {
1781         size_t object_size;
1782         binder_size_t object_offset;
1783         unsigned long buffer_offset;
1784
1785         if (index >= num_valid)
1786                 return NULL;
1787
1788         buffer_offset = start_offset + sizeof(binder_size_t) * index;
1789         if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1790                                           b, buffer_offset,
1791                                           sizeof(object_offset)))
1792                 return NULL;
1793         object_size = binder_get_object(proc, NULL, b, object_offset, object);
1794         if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1795                 return NULL;
1796         if (object_offsetp)
1797                 *object_offsetp = object_offset;
1798
1799         return &object->bbo;
1800 }
1801
1802 /**
1803  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1804  * @proc:               binder_proc owning the buffer
1805  * @b:                  transaction buffer
1806  * @objects_start_offset: offset to start of objects buffer
1807  * @buffer_obj_offset:  offset to binder_buffer_object in which to fix up
1808  * @fixup_offset:       start offset in @buffer to fix up
1809  * @last_obj_offset:    offset to last binder_buffer_object that we fixed
1810  * @last_min_offset:    minimum fixup offset in object at @last_obj_offset
1811  *
1812  * Return:              %true if a fixup in buffer @buffer at offset @offset is
1813  *                      allowed.
1814  *
1815  * For safety reasons, we only allow fixups inside a buffer to happen
1816  * at increasing offsets; additionally, we only allow fixup on the last
1817  * buffer object that was verified, or one of its parents.
1818  *
1819  * Example of what is allowed:
1820  *
1821  * A
1822  *   B (parent = A, offset = 0)
1823  *   C (parent = A, offset = 16)
1824  *     D (parent = C, offset = 0)
1825  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1826  *
1827  * Examples of what is not allowed:
1828  *
1829  * Decreasing offsets within the same parent:
1830  * A
1831  *   C (parent = A, offset = 16)
1832  *   B (parent = A, offset = 0) // decreasing offset within A
1833  *
1834  * Referring to a parent that wasn't the last object or any of its parents:
1835  * A
1836  *   B (parent = A, offset = 0)
1837  *   C (parent = A, offset = 0)
1838  *   C (parent = A, offset = 16)
1839  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1840  */
1841 static bool binder_validate_fixup(struct binder_proc *proc,
1842                                   struct binder_buffer *b,
1843                                   binder_size_t objects_start_offset,
1844                                   binder_size_t buffer_obj_offset,
1845                                   binder_size_t fixup_offset,
1846                                   binder_size_t last_obj_offset,
1847                                   binder_size_t last_min_offset)
1848 {
1849         if (!last_obj_offset) {
1850                 /* Nothing to fix up in */
1851                 return false;
1852         }
1853
1854         while (last_obj_offset != buffer_obj_offset) {
1855                 unsigned long buffer_offset;
1856                 struct binder_object last_object;
1857                 struct binder_buffer_object *last_bbo;
1858                 size_t object_size = binder_get_object(proc, NULL, b,
1859                                                        last_obj_offset,
1860                                                        &last_object);
1861                 if (object_size != sizeof(*last_bbo))
1862                         return false;
1863
1864                 last_bbo = &last_object.bbo;
1865                 /*
1866                  * Safe to retrieve the parent of last_obj, since it
1867                  * was already previously verified by the driver.
1868                  */
1869                 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1870                         return false;
1871                 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1872                 buffer_offset = objects_start_offset +
1873                         sizeof(binder_size_t) * last_bbo->parent;
1874                 if (binder_alloc_copy_from_buffer(&proc->alloc,
1875                                                   &last_obj_offset,
1876                                                   b, buffer_offset,
1877                                                   sizeof(last_obj_offset)))
1878                         return false;
1879         }
1880         return (fixup_offset >= last_min_offset);
1881 }
1882
1883 /**
1884  * struct binder_task_work_cb - for deferred close
1885  *
1886  * @twork:                callback_head for task work
1887  * @fd:                   fd to close
1888  *
1889  * Structure to pass task work to be handled after
1890  * returning from binder_ioctl() via task_work_add().
1891  */
1892 struct binder_task_work_cb {
1893         struct callback_head twork;
1894         struct file *file;
1895 };
1896
1897 /**
1898  * binder_do_fd_close() - close list of file descriptors
1899  * @twork:      callback head for task work
1900  *
1901  * It is not safe to call ksys_close() during the binder_ioctl()
1902  * function if there is a chance that binder's own file descriptor
1903  * might be closed. This is to meet the requirements for using
1904  * fdget() (see comments for __fget_light()). Therefore use
1905  * task_work_add() to schedule the close operation once we have
1906  * returned from binder_ioctl(). This function is a callback
1907  * for that mechanism and does the actual ksys_close() on the
1908  * given file descriptor.
1909  */
1910 static void binder_do_fd_close(struct callback_head *twork)
1911 {
1912         struct binder_task_work_cb *twcb = container_of(twork,
1913                         struct binder_task_work_cb, twork);
1914
1915         fput(twcb->file);
1916         kfree(twcb);
1917 }
1918
1919 /**
1920  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1921  * @fd:         file-descriptor to close
1922  *
1923  * See comments in binder_do_fd_close(). This function is used to schedule
1924  * a file-descriptor to be closed after returning from binder_ioctl().
1925  */
1926 static void binder_deferred_fd_close(int fd)
1927 {
1928         struct binder_task_work_cb *twcb;
1929
1930         twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1931         if (!twcb)
1932                 return;
1933         init_task_work(&twcb->twork, binder_do_fd_close);
1934         twcb->file = file_close_fd(fd);
1935         if (twcb->file) {
1936                 // pin it until binder_do_fd_close(); see comments there
1937                 get_file(twcb->file);
1938                 filp_close(twcb->file, current->files);
1939                 task_work_add(current, &twcb->twork, TWA_RESUME);
1940         } else {
1941                 kfree(twcb);
1942         }
1943 }
1944
1945 static void binder_transaction_buffer_release(struct binder_proc *proc,
1946                                               struct binder_thread *thread,
1947                                               struct binder_buffer *buffer,
1948                                               binder_size_t off_end_offset,
1949                                               bool is_failure)
1950 {
1951         int debug_id = buffer->debug_id;
1952         binder_size_t off_start_offset, buffer_offset;
1953
1954         binder_debug(BINDER_DEBUG_TRANSACTION,
1955                      "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1956                      proc->pid, buffer->debug_id,
1957                      buffer->data_size, buffer->offsets_size,
1958                      (unsigned long long)off_end_offset);
1959
1960         if (buffer->target_node)
1961                 binder_dec_node(buffer->target_node, 1, 0);
1962
1963         off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1964
1965         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1966              buffer_offset += sizeof(binder_size_t)) {
1967                 struct binder_object_header *hdr;
1968                 size_t object_size = 0;
1969                 struct binder_object object;
1970                 binder_size_t object_offset;
1971
1972                 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1973                                                    buffer, buffer_offset,
1974                                                    sizeof(object_offset)))
1975                         object_size = binder_get_object(proc, NULL, buffer,
1976                                                         object_offset, &object);
1977                 if (object_size == 0) {
1978                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1979                                debug_id, (u64)object_offset, buffer->data_size);
1980                         continue;
1981                 }
1982                 hdr = &object.hdr;
1983                 switch (hdr->type) {
1984                 case BINDER_TYPE_BINDER:
1985                 case BINDER_TYPE_WEAK_BINDER: {
1986                         struct flat_binder_object *fp;
1987                         struct binder_node *node;
1988
1989                         fp = to_flat_binder_object(hdr);
1990                         node = binder_get_node(proc, fp->binder);
1991                         if (node == NULL) {
1992                                 pr_err("transaction release %d bad node %016llx\n",
1993                                        debug_id, (u64)fp->binder);
1994                                 break;
1995                         }
1996                         binder_debug(BINDER_DEBUG_TRANSACTION,
1997                                      "        node %d u%016llx\n",
1998                                      node->debug_id, (u64)node->ptr);
1999                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2000                                         0);
2001                         binder_put_node(node);
2002                 } break;
2003                 case BINDER_TYPE_HANDLE:
2004                 case BINDER_TYPE_WEAK_HANDLE: {
2005                         struct flat_binder_object *fp;
2006                         struct binder_ref_data rdata;
2007                         int ret;
2008
2009                         fp = to_flat_binder_object(hdr);
2010                         ret = binder_dec_ref_for_handle(proc, fp->handle,
2011                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2012
2013                         if (ret) {
2014                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
2015                                  debug_id, fp->handle, ret);
2016                                 break;
2017                         }
2018                         binder_debug(BINDER_DEBUG_TRANSACTION,
2019                                      "        ref %d desc %d\n",
2020                                      rdata.debug_id, rdata.desc);
2021                 } break;
2022
2023                 case BINDER_TYPE_FD: {
2024                         /*
2025                          * No need to close the file here since user-space
2026                          * closes it for successfully delivered
2027                          * transactions. For transactions that weren't
2028                          * delivered, the new fd was never allocated so
2029                          * there is no need to close and the fput on the
2030                          * file is done when the transaction is torn
2031                          * down.
2032                          */
2033                 } break;
2034                 case BINDER_TYPE_PTR:
2035                         /*
2036                          * Nothing to do here, this will get cleaned up when the
2037                          * transaction buffer gets freed
2038                          */
2039                         break;
2040                 case BINDER_TYPE_FDA: {
2041                         struct binder_fd_array_object *fda;
2042                         struct binder_buffer_object *parent;
2043                         struct binder_object ptr_object;
2044                         binder_size_t fda_offset;
2045                         size_t fd_index;
2046                         binder_size_t fd_buf_size;
2047                         binder_size_t num_valid;
2048
2049                         if (is_failure) {
2050                                 /*
2051                                  * The fd fixups have not been applied so no
2052                                  * fds need to be closed.
2053                                  */
2054                                 continue;
2055                         }
2056
2057                         num_valid = (buffer_offset - off_start_offset) /
2058                                                 sizeof(binder_size_t);
2059                         fda = to_binder_fd_array_object(hdr);
2060                         parent = binder_validate_ptr(proc, buffer, &ptr_object,
2061                                                      fda->parent,
2062                                                      off_start_offset,
2063                                                      NULL,
2064                                                      num_valid);
2065                         if (!parent) {
2066                                 pr_err("transaction release %d bad parent offset\n",
2067                                        debug_id);
2068                                 continue;
2069                         }
2070                         fd_buf_size = sizeof(u32) * fda->num_fds;
2071                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2072                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2073                                        debug_id, (u64)fda->num_fds);
2074                                 continue;
2075                         }
2076                         if (fd_buf_size > parent->length ||
2077                             fda->parent_offset > parent->length - fd_buf_size) {
2078                                 /* No space for all file descriptors here. */
2079                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2080                                        debug_id, (u64)fda->num_fds);
2081                                 continue;
2082                         }
2083                         /*
2084                          * the source data for binder_buffer_object is visible
2085                          * to user-space and the @buffer element is the user
2086                          * pointer to the buffer_object containing the fd_array.
2087                          * Convert the address to an offset relative to
2088                          * the base of the transaction buffer.
2089                          */
2090                         fda_offset = parent->buffer - buffer->user_data +
2091                                 fda->parent_offset;
2092                         for (fd_index = 0; fd_index < fda->num_fds;
2093                              fd_index++) {
2094                                 u32 fd;
2095                                 int err;
2096                                 binder_size_t offset = fda_offset +
2097                                         fd_index * sizeof(fd);
2098
2099                                 err = binder_alloc_copy_from_buffer(
2100                                                 &proc->alloc, &fd, buffer,
2101                                                 offset, sizeof(fd));
2102                                 WARN_ON(err);
2103                                 if (!err) {
2104                                         binder_deferred_fd_close(fd);
2105                                         /*
2106                                          * Need to make sure the thread goes
2107                                          * back to userspace to complete the
2108                                          * deferred close
2109                                          */
2110                                         if (thread)
2111                                                 thread->looper_need_return = true;
2112                                 }
2113                         }
2114                 } break;
2115                 default:
2116                         pr_err("transaction release %d bad object type %x\n",
2117                                 debug_id, hdr->type);
2118                         break;
2119                 }
2120         }
2121 }
2122
2123 /* Clean up all the objects in the buffer */
2124 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2125                                                 struct binder_thread *thread,
2126                                                 struct binder_buffer *buffer,
2127                                                 bool is_failure)
2128 {
2129         binder_size_t off_end_offset;
2130
2131         off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2132         off_end_offset += buffer->offsets_size;
2133
2134         binder_transaction_buffer_release(proc, thread, buffer,
2135                                           off_end_offset, is_failure);
2136 }
2137
2138 static int binder_translate_binder(struct flat_binder_object *fp,
2139                                    struct binder_transaction *t,
2140                                    struct binder_thread *thread)
2141 {
2142         struct binder_node *node;
2143         struct binder_proc *proc = thread->proc;
2144         struct binder_proc *target_proc = t->to_proc;
2145         struct binder_ref_data rdata;
2146         int ret = 0;
2147
2148         node = binder_get_node(proc, fp->binder);
2149         if (!node) {
2150                 node = binder_new_node(proc, fp);
2151                 if (!node)
2152                         return -ENOMEM;
2153         }
2154         if (fp->cookie != node->cookie) {
2155                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2156                                   proc->pid, thread->pid, (u64)fp->binder,
2157                                   node->debug_id, (u64)fp->cookie,
2158                                   (u64)node->cookie);
2159                 ret = -EINVAL;
2160                 goto done;
2161         }
2162         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2163                 ret = -EPERM;
2164                 goto done;
2165         }
2166
2167         ret = binder_inc_ref_for_node(target_proc, node,
2168                         fp->hdr.type == BINDER_TYPE_BINDER,
2169                         &thread->todo, &rdata);
2170         if (ret)
2171                 goto done;
2172
2173         if (fp->hdr.type == BINDER_TYPE_BINDER)
2174                 fp->hdr.type = BINDER_TYPE_HANDLE;
2175         else
2176                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2177         fp->binder = 0;
2178         fp->handle = rdata.desc;
2179         fp->cookie = 0;
2180
2181         trace_binder_transaction_node_to_ref(t, node, &rdata);
2182         binder_debug(BINDER_DEBUG_TRANSACTION,
2183                      "        node %d u%016llx -> ref %d desc %d\n",
2184                      node->debug_id, (u64)node->ptr,
2185                      rdata.debug_id, rdata.desc);
2186 done:
2187         binder_put_node(node);
2188         return ret;
2189 }
2190
2191 static int binder_translate_handle(struct flat_binder_object *fp,
2192                                    struct binder_transaction *t,
2193                                    struct binder_thread *thread)
2194 {
2195         struct binder_proc *proc = thread->proc;
2196         struct binder_proc *target_proc = t->to_proc;
2197         struct binder_node *node;
2198         struct binder_ref_data src_rdata;
2199         int ret = 0;
2200
2201         node = binder_get_node_from_ref(proc, fp->handle,
2202                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2203         if (!node) {
2204                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2205                                   proc->pid, thread->pid, fp->handle);
2206                 return -EINVAL;
2207         }
2208         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2209                 ret = -EPERM;
2210                 goto done;
2211         }
2212
2213         binder_node_lock(node);
2214         if (node->proc == target_proc) {
2215                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2216                         fp->hdr.type = BINDER_TYPE_BINDER;
2217                 else
2218                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2219                 fp->binder = node->ptr;
2220                 fp->cookie = node->cookie;
2221                 if (node->proc)
2222                         binder_inner_proc_lock(node->proc);
2223                 else
2224                         __acquire(&node->proc->inner_lock);
2225                 binder_inc_node_nilocked(node,
2226                                          fp->hdr.type == BINDER_TYPE_BINDER,
2227                                          0, NULL);
2228                 if (node->proc)
2229                         binder_inner_proc_unlock(node->proc);
2230                 else
2231                         __release(&node->proc->inner_lock);
2232                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2233                 binder_debug(BINDER_DEBUG_TRANSACTION,
2234                              "        ref %d desc %d -> node %d u%016llx\n",
2235                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2236                              (u64)node->ptr);
2237                 binder_node_unlock(node);
2238         } else {
2239                 struct binder_ref_data dest_rdata;
2240
2241                 binder_node_unlock(node);
2242                 ret = binder_inc_ref_for_node(target_proc, node,
2243                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2244                                 NULL, &dest_rdata);
2245                 if (ret)
2246                         goto done;
2247
2248                 fp->binder = 0;
2249                 fp->handle = dest_rdata.desc;
2250                 fp->cookie = 0;
2251                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2252                                                     &dest_rdata);
2253                 binder_debug(BINDER_DEBUG_TRANSACTION,
2254                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2255                              src_rdata.debug_id, src_rdata.desc,
2256                              dest_rdata.debug_id, dest_rdata.desc,
2257                              node->debug_id);
2258         }
2259 done:
2260         binder_put_node(node);
2261         return ret;
2262 }
2263
2264 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2265                                struct binder_transaction *t,
2266                                struct binder_thread *thread,
2267                                struct binder_transaction *in_reply_to)
2268 {
2269         struct binder_proc *proc = thread->proc;
2270         struct binder_proc *target_proc = t->to_proc;
2271         struct binder_txn_fd_fixup *fixup;
2272         struct file *file;
2273         int ret = 0;
2274         bool target_allows_fd;
2275
2276         if (in_reply_to)
2277                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2278         else
2279                 target_allows_fd = t->buffer->target_node->accept_fds;
2280         if (!target_allows_fd) {
2281                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2282                                   proc->pid, thread->pid,
2283                                   in_reply_to ? "reply" : "transaction",
2284                                   fd);
2285                 ret = -EPERM;
2286                 goto err_fd_not_accepted;
2287         }
2288
2289         file = fget(fd);
2290         if (!file) {
2291                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2292                                   proc->pid, thread->pid, fd);
2293                 ret = -EBADF;
2294                 goto err_fget;
2295         }
2296         ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2297         if (ret < 0) {
2298                 ret = -EPERM;
2299                 goto err_security;
2300         }
2301
2302         /*
2303          * Add fixup record for this transaction. The allocation
2304          * of the fd in the target needs to be done from a
2305          * target thread.
2306          */
2307         fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2308         if (!fixup) {
2309                 ret = -ENOMEM;
2310                 goto err_alloc;
2311         }
2312         fixup->file = file;
2313         fixup->offset = fd_offset;
2314         fixup->target_fd = -1;
2315         trace_binder_transaction_fd_send(t, fd, fixup->offset);
2316         list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2317
2318         return ret;
2319
2320 err_alloc:
2321 err_security:
2322         fput(file);
2323 err_fget:
2324 err_fd_not_accepted:
2325         return ret;
2326 }
2327
2328 /**
2329  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2330  * @offset      offset in target buffer to fixup
2331  * @skip_size   bytes to skip in copy (fixup will be written later)
2332  * @fixup_data  data to write at fixup offset
2333  * @node        list node
2334  *
2335  * This is used for the pointer fixup list (pf) which is created and consumed
2336  * during binder_transaction() and is only accessed locally. No
2337  * locking is necessary.
2338  *
2339  * The list is ordered by @offset.
2340  */
2341 struct binder_ptr_fixup {
2342         binder_size_t offset;
2343         size_t skip_size;
2344         binder_uintptr_t fixup_data;
2345         struct list_head node;
2346 };
2347
2348 /**
2349  * struct binder_sg_copy - scatter-gather data to be copied
2350  * @offset              offset in target buffer
2351  * @sender_uaddr        user address in source buffer
2352  * @length              bytes to copy
2353  * @node                list node
2354  *
2355  * This is used for the sg copy list (sgc) which is created and consumed
2356  * during binder_transaction() and is only accessed locally. No
2357  * locking is necessary.
2358  *
2359  * The list is ordered by @offset.
2360  */
2361 struct binder_sg_copy {
2362         binder_size_t offset;
2363         const void __user *sender_uaddr;
2364         size_t length;
2365         struct list_head node;
2366 };
2367
2368 /**
2369  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2370  * @alloc:      binder_alloc associated with @buffer
2371  * @buffer:     binder buffer in target process
2372  * @sgc_head:   list_head of scatter-gather copy list
2373  * @pf_head:    list_head of pointer fixup list
2374  *
2375  * Processes all elements of @sgc_head, applying fixups from @pf_head
2376  * and copying the scatter-gather data from the source process' user
2377  * buffer to the target's buffer. It is expected that the list creation
2378  * and processing all occurs during binder_transaction() so these lists
2379  * are only accessed in local context.
2380  *
2381  * Return: 0=success, else -errno
2382  */
2383 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2384                                          struct binder_buffer *buffer,
2385                                          struct list_head *sgc_head,
2386                                          struct list_head *pf_head)
2387 {
2388         int ret = 0;
2389         struct binder_sg_copy *sgc, *tmpsgc;
2390         struct binder_ptr_fixup *tmppf;
2391         struct binder_ptr_fixup *pf =
2392                 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2393                                          node);
2394
2395         list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2396                 size_t bytes_copied = 0;
2397
2398                 while (bytes_copied < sgc->length) {
2399                         size_t copy_size;
2400                         size_t bytes_left = sgc->length - bytes_copied;
2401                         size_t offset = sgc->offset + bytes_copied;
2402
2403                         /*
2404                          * We copy up to the fixup (pointed to by pf)
2405                          */
2406                         copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2407                                        : bytes_left;
2408                         if (!ret && copy_size)
2409                                 ret = binder_alloc_copy_user_to_buffer(
2410                                                 alloc, buffer,
2411                                                 offset,
2412                                                 sgc->sender_uaddr + bytes_copied,
2413                                                 copy_size);
2414                         bytes_copied += copy_size;
2415                         if (copy_size != bytes_left) {
2416                                 BUG_ON(!pf);
2417                                 /* we stopped at a fixup offset */
2418                                 if (pf->skip_size) {
2419                                         /*
2420                                          * we are just skipping. This is for
2421                                          * BINDER_TYPE_FDA where the translated
2422                                          * fds will be fixed up when we get
2423                                          * to target context.
2424                                          */
2425                                         bytes_copied += pf->skip_size;
2426                                 } else {
2427                                         /* apply the fixup indicated by pf */
2428                                         if (!ret)
2429                                                 ret = binder_alloc_copy_to_buffer(
2430                                                         alloc, buffer,
2431                                                         pf->offset,
2432                                                         &pf->fixup_data,
2433                                                         sizeof(pf->fixup_data));
2434                                         bytes_copied += sizeof(pf->fixup_data);
2435                                 }
2436                                 list_del(&pf->node);
2437                                 kfree(pf);
2438                                 pf = list_first_entry_or_null(pf_head,
2439                                                 struct binder_ptr_fixup, node);
2440                         }
2441                 }
2442                 list_del(&sgc->node);
2443                 kfree(sgc);
2444         }
2445         list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2446                 BUG_ON(pf->skip_size == 0);
2447                 list_del(&pf->node);
2448                 kfree(pf);
2449         }
2450         BUG_ON(!list_empty(sgc_head));
2451
2452         return ret > 0 ? -EINVAL : ret;
2453 }
2454
2455 /**
2456  * binder_cleanup_deferred_txn_lists() - free specified lists
2457  * @sgc_head:   list_head of scatter-gather copy list
2458  * @pf_head:    list_head of pointer fixup list
2459  *
2460  * Called to clean up @sgc_head and @pf_head if there is an
2461  * error.
2462  */
2463 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2464                                               struct list_head *pf_head)
2465 {
2466         struct binder_sg_copy *sgc, *tmpsgc;
2467         struct binder_ptr_fixup *pf, *tmppf;
2468
2469         list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2470                 list_del(&sgc->node);
2471                 kfree(sgc);
2472         }
2473         list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2474                 list_del(&pf->node);
2475                 kfree(pf);
2476         }
2477 }
2478
2479 /**
2480  * binder_defer_copy() - queue a scatter-gather buffer for copy
2481  * @sgc_head:           list_head of scatter-gather copy list
2482  * @offset:             binder buffer offset in target process
2483  * @sender_uaddr:       user address in source process
2484  * @length:             bytes to copy
2485  *
2486  * Specify a scatter-gather block to be copied. The actual copy must
2487  * be deferred until all the needed fixups are identified and queued.
2488  * Then the copy and fixups are done together so un-translated values
2489  * from the source are never visible in the target buffer.
2490  *
2491  * We are guaranteed that repeated calls to this function will have
2492  * monotonically increasing @offset values so the list will naturally
2493  * be ordered.
2494  *
2495  * Return: 0=success, else -errno
2496  */
2497 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2498                              const void __user *sender_uaddr, size_t length)
2499 {
2500         struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2501
2502         if (!bc)
2503                 return -ENOMEM;
2504
2505         bc->offset = offset;
2506         bc->sender_uaddr = sender_uaddr;
2507         bc->length = length;
2508         INIT_LIST_HEAD(&bc->node);
2509
2510         /*
2511          * We are guaranteed that the deferred copies are in-order
2512          * so just add to the tail.
2513          */
2514         list_add_tail(&bc->node, sgc_head);
2515
2516         return 0;
2517 }
2518
2519 /**
2520  * binder_add_fixup() - queue a fixup to be applied to sg copy
2521  * @pf_head:    list_head of binder ptr fixup list
2522  * @offset:     binder buffer offset in target process
2523  * @fixup:      bytes to be copied for fixup
2524  * @skip_size:  bytes to skip when copying (fixup will be applied later)
2525  *
2526  * Add the specified fixup to a list ordered by @offset. When copying
2527  * the scatter-gather buffers, the fixup will be copied instead of
2528  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2529  * will be applied later (in target process context), so we just skip
2530  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2531  * value in @fixup.
2532  *
2533  * This function is called *mostly* in @offset order, but there are
2534  * exceptions. Since out-of-order inserts are relatively uncommon,
2535  * we insert the new element by searching backward from the tail of
2536  * the list.
2537  *
2538  * Return: 0=success, else -errno
2539  */
2540 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2541                             binder_uintptr_t fixup, size_t skip_size)
2542 {
2543         struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2544         struct binder_ptr_fixup *tmppf;
2545
2546         if (!pf)
2547                 return -ENOMEM;
2548
2549         pf->offset = offset;
2550         pf->fixup_data = fixup;
2551         pf->skip_size = skip_size;
2552         INIT_LIST_HEAD(&pf->node);
2553
2554         /* Fixups are *mostly* added in-order, but there are some
2555          * exceptions. Look backwards through list for insertion point.
2556          */
2557         list_for_each_entry_reverse(tmppf, pf_head, node) {
2558                 if (tmppf->offset < pf->offset) {
2559                         list_add(&pf->node, &tmppf->node);
2560                         return 0;
2561                 }
2562         }
2563         /*
2564          * if we get here, then the new offset is the lowest so
2565          * insert at the head
2566          */
2567         list_add(&pf->node, pf_head);
2568         return 0;
2569 }
2570
2571 static int binder_translate_fd_array(struct list_head *pf_head,
2572                                      struct binder_fd_array_object *fda,
2573                                      const void __user *sender_ubuffer,
2574                                      struct binder_buffer_object *parent,
2575                                      struct binder_buffer_object *sender_uparent,
2576                                      struct binder_transaction *t,
2577                                      struct binder_thread *thread,
2578                                      struct binder_transaction *in_reply_to)
2579 {
2580         binder_size_t fdi, fd_buf_size;
2581         binder_size_t fda_offset;
2582         const void __user *sender_ufda_base;
2583         struct binder_proc *proc = thread->proc;
2584         int ret;
2585
2586         if (fda->num_fds == 0)
2587                 return 0;
2588
2589         fd_buf_size = sizeof(u32) * fda->num_fds;
2590         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2591                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2592                                   proc->pid, thread->pid, (u64)fda->num_fds);
2593                 return -EINVAL;
2594         }
2595         if (fd_buf_size > parent->length ||
2596             fda->parent_offset > parent->length - fd_buf_size) {
2597                 /* No space for all file descriptors here. */
2598                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2599                                   proc->pid, thread->pid, (u64)fda->num_fds);
2600                 return -EINVAL;
2601         }
2602         /*
2603          * the source data for binder_buffer_object is visible
2604          * to user-space and the @buffer element is the user
2605          * pointer to the buffer_object containing the fd_array.
2606          * Convert the address to an offset relative to
2607          * the base of the transaction buffer.
2608          */
2609         fda_offset = parent->buffer - t->buffer->user_data +
2610                 fda->parent_offset;
2611         sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2612                                 fda->parent_offset;
2613
2614         if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2615             !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2616                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2617                                   proc->pid, thread->pid);
2618                 return -EINVAL;
2619         }
2620         ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2621         if (ret)
2622                 return ret;
2623
2624         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2625                 u32 fd;
2626                 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2627                 binder_size_t sender_uoffset = fdi * sizeof(fd);
2628
2629                 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2630                 if (!ret)
2631                         ret = binder_translate_fd(fd, offset, t, thread,
2632                                                   in_reply_to);
2633                 if (ret)
2634                         return ret > 0 ? -EINVAL : ret;
2635         }
2636         return 0;
2637 }
2638
2639 static int binder_fixup_parent(struct list_head *pf_head,
2640                                struct binder_transaction *t,
2641                                struct binder_thread *thread,
2642                                struct binder_buffer_object *bp,
2643                                binder_size_t off_start_offset,
2644                                binder_size_t num_valid,
2645                                binder_size_t last_fixup_obj_off,
2646                                binder_size_t last_fixup_min_off)
2647 {
2648         struct binder_buffer_object *parent;
2649         struct binder_buffer *b = t->buffer;
2650         struct binder_proc *proc = thread->proc;
2651         struct binder_proc *target_proc = t->to_proc;
2652         struct binder_object object;
2653         binder_size_t buffer_offset;
2654         binder_size_t parent_offset;
2655
2656         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2657                 return 0;
2658
2659         parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2660                                      off_start_offset, &parent_offset,
2661                                      num_valid);
2662         if (!parent) {
2663                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2664                                   proc->pid, thread->pid);
2665                 return -EINVAL;
2666         }
2667
2668         if (!binder_validate_fixup(target_proc, b, off_start_offset,
2669                                    parent_offset, bp->parent_offset,
2670                                    last_fixup_obj_off,
2671                                    last_fixup_min_off)) {
2672                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2673                                   proc->pid, thread->pid);
2674                 return -EINVAL;
2675         }
2676
2677         if (parent->length < sizeof(binder_uintptr_t) ||
2678             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2679                 /* No space for a pointer here! */
2680                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2681                                   proc->pid, thread->pid);
2682                 return -EINVAL;
2683         }
2684
2685         buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2686
2687         return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2688 }
2689
2690 /**
2691  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2692  * @t1: the pending async txn in the frozen process
2693  * @t2: the new async txn to supersede the outdated pending one
2694  *
2695  * Return:  true if t2 can supersede t1
2696  *          false if t2 can not supersede t1
2697  */
2698 static bool binder_can_update_transaction(struct binder_transaction *t1,
2699                                           struct binder_transaction *t2)
2700 {
2701         if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2702             (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2703                 return false;
2704         if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2705             t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2706             t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2707             t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2708                 return true;
2709         return false;
2710 }
2711
2712 /**
2713  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2714  * @t:           new async transaction
2715  * @target_list: list to find outdated transaction
2716  *
2717  * Return: the outdated transaction if found
2718  *         NULL if no outdated transacton can be found
2719  *
2720  * Requires the proc->inner_lock to be held.
2721  */
2722 static struct binder_transaction *
2723 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2724                                          struct list_head *target_list)
2725 {
2726         struct binder_work *w;
2727
2728         list_for_each_entry(w, target_list, entry) {
2729                 struct binder_transaction *t_queued;
2730
2731                 if (w->type != BINDER_WORK_TRANSACTION)
2732                         continue;
2733                 t_queued = container_of(w, struct binder_transaction, work);
2734                 if (binder_can_update_transaction(t_queued, t))
2735                         return t_queued;
2736         }
2737         return NULL;
2738 }
2739
2740 /**
2741  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2742  * @t:          transaction to send
2743  * @proc:       process to send the transaction to
2744  * @thread:     thread in @proc to send the transaction to (may be NULL)
2745  *
2746  * This function queues a transaction to the specified process. It will try
2747  * to find a thread in the target process to handle the transaction and
2748  * wake it up. If no thread is found, the work is queued to the proc
2749  * waitqueue.
2750  *
2751  * If the @thread parameter is not NULL, the transaction is always queued
2752  * to the waitlist of that specific thread.
2753  *
2754  * Return:      0 if the transaction was successfully queued
2755  *              BR_DEAD_REPLY if the target process or thread is dead
2756  *              BR_FROZEN_REPLY if the target process or thread is frozen and
2757  *                      the sync transaction was rejected
2758  *              BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2759  *              and the async transaction was successfully queued
2760  */
2761 static int binder_proc_transaction(struct binder_transaction *t,
2762                                     struct binder_proc *proc,
2763                                     struct binder_thread *thread)
2764 {
2765         struct binder_node *node = t->buffer->target_node;
2766         bool oneway = !!(t->flags & TF_ONE_WAY);
2767         bool pending_async = false;
2768         struct binder_transaction *t_outdated = NULL;
2769         bool frozen = false;
2770
2771         BUG_ON(!node);
2772         binder_node_lock(node);
2773         if (oneway) {
2774                 BUG_ON(thread);
2775                 if (node->has_async_transaction)
2776                         pending_async = true;
2777                 else
2778                         node->has_async_transaction = true;
2779         }
2780
2781         binder_inner_proc_lock(proc);
2782         if (proc->is_frozen) {
2783                 frozen = true;
2784                 proc->sync_recv |= !oneway;
2785                 proc->async_recv |= oneway;
2786         }
2787
2788         if ((frozen && !oneway) || proc->is_dead ||
2789                         (thread && thread->is_dead)) {
2790                 binder_inner_proc_unlock(proc);
2791                 binder_node_unlock(node);
2792                 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2793         }
2794
2795         if (!thread && !pending_async)
2796                 thread = binder_select_thread_ilocked(proc);
2797
2798         if (thread) {
2799                 binder_enqueue_thread_work_ilocked(thread, &t->work);
2800         } else if (!pending_async) {
2801                 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2802         } else {
2803                 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2804                         t_outdated = binder_find_outdated_transaction_ilocked(t,
2805                                                                               &node->async_todo);
2806                         if (t_outdated) {
2807                                 binder_debug(BINDER_DEBUG_TRANSACTION,
2808                                              "txn %d supersedes %d\n",
2809                                              t->debug_id, t_outdated->debug_id);
2810                                 list_del_init(&t_outdated->work.entry);
2811                                 proc->outstanding_txns--;
2812                         }
2813                 }
2814                 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2815         }
2816
2817         if (!pending_async)
2818                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2819
2820         proc->outstanding_txns++;
2821         binder_inner_proc_unlock(proc);
2822         binder_node_unlock(node);
2823
2824         /*
2825          * To reduce potential contention, free the outdated transaction and
2826          * buffer after releasing the locks.
2827          */
2828         if (t_outdated) {
2829                 struct binder_buffer *buffer = t_outdated->buffer;
2830
2831                 t_outdated->buffer = NULL;
2832                 buffer->transaction = NULL;
2833                 trace_binder_transaction_update_buffer_release(buffer);
2834                 binder_release_entire_buffer(proc, NULL, buffer, false);
2835                 binder_alloc_free_buf(&proc->alloc, buffer);
2836                 kfree(t_outdated);
2837                 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2838         }
2839
2840         if (oneway && frozen)
2841                 return BR_TRANSACTION_PENDING_FROZEN;
2842
2843         return 0;
2844 }
2845
2846 /**
2847  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2848  * @node:         struct binder_node for which to get refs
2849  * @procp:        returns @node->proc if valid
2850  * @error:        if no @procp then returns BR_DEAD_REPLY
2851  *
2852  * User-space normally keeps the node alive when creating a transaction
2853  * since it has a reference to the target. The local strong ref keeps it
2854  * alive if the sending process dies before the target process processes
2855  * the transaction. If the source process is malicious or has a reference
2856  * counting bug, relying on the local strong ref can fail.
2857  *
2858  * Since user-space can cause the local strong ref to go away, we also take
2859  * a tmpref on the node to ensure it survives while we are constructing
2860  * the transaction. We also need a tmpref on the proc while we are
2861  * constructing the transaction, so we take that here as well.
2862  *
2863  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2864  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2865  * target proc has died, @error is set to BR_DEAD_REPLY.
2866  */
2867 static struct binder_node *binder_get_node_refs_for_txn(
2868                 struct binder_node *node,
2869                 struct binder_proc **procp,
2870                 uint32_t *error)
2871 {
2872         struct binder_node *target_node = NULL;
2873
2874         binder_node_inner_lock(node);
2875         if (node->proc) {
2876                 target_node = node;
2877                 binder_inc_node_nilocked(node, 1, 0, NULL);
2878                 binder_inc_node_tmpref_ilocked(node);
2879                 node->proc->tmp_ref++;
2880                 *procp = node->proc;
2881         } else
2882                 *error = BR_DEAD_REPLY;
2883         binder_node_inner_unlock(node);
2884
2885         return target_node;
2886 }
2887
2888 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2889                                       uint32_t command, int32_t param)
2890 {
2891         struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2892
2893         if (!from) {
2894                 /* annotation for sparse */
2895                 __release(&from->proc->inner_lock);
2896                 return;
2897         }
2898
2899         /* don't override existing errors */
2900         if (from->ee.command == BR_OK)
2901                 binder_set_extended_error(&from->ee, id, command, param);
2902         binder_inner_proc_unlock(from->proc);
2903         binder_thread_dec_tmpref(from);
2904 }
2905
2906 static void binder_transaction(struct binder_proc *proc,
2907                                struct binder_thread *thread,
2908                                struct binder_transaction_data *tr, int reply,
2909                                binder_size_t extra_buffers_size)
2910 {
2911         int ret;
2912         struct binder_transaction *t;
2913         struct binder_work *w;
2914         struct binder_work *tcomplete;
2915         binder_size_t buffer_offset = 0;
2916         binder_size_t off_start_offset, off_end_offset;
2917         binder_size_t off_min;
2918         binder_size_t sg_buf_offset, sg_buf_end_offset;
2919         binder_size_t user_offset = 0;
2920         struct binder_proc *target_proc = NULL;
2921         struct binder_thread *target_thread = NULL;
2922         struct binder_node *target_node = NULL;
2923         struct binder_transaction *in_reply_to = NULL;
2924         struct binder_transaction_log_entry *e;
2925         uint32_t return_error = 0;
2926         uint32_t return_error_param = 0;
2927         uint32_t return_error_line = 0;
2928         binder_size_t last_fixup_obj_off = 0;
2929         binder_size_t last_fixup_min_off = 0;
2930         struct binder_context *context = proc->context;
2931         int t_debug_id = atomic_inc_return(&binder_last_id);
2932         ktime_t t_start_time = ktime_get();
2933         char *secctx = NULL;
2934         u32 secctx_sz = 0;
2935         struct list_head sgc_head;
2936         struct list_head pf_head;
2937         const void __user *user_buffer = (const void __user *)
2938                                 (uintptr_t)tr->data.ptr.buffer;
2939         INIT_LIST_HEAD(&sgc_head);
2940         INIT_LIST_HEAD(&pf_head);
2941
2942         e = binder_transaction_log_add(&binder_transaction_log);
2943         e->debug_id = t_debug_id;
2944         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2945         e->from_proc = proc->pid;
2946         e->from_thread = thread->pid;
2947         e->target_handle = tr->target.handle;
2948         e->data_size = tr->data_size;
2949         e->offsets_size = tr->offsets_size;
2950         strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2951
2952         binder_inner_proc_lock(proc);
2953         binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2954         binder_inner_proc_unlock(proc);
2955
2956         if (reply) {
2957                 binder_inner_proc_lock(proc);
2958                 in_reply_to = thread->transaction_stack;
2959                 if (in_reply_to == NULL) {
2960                         binder_inner_proc_unlock(proc);
2961                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2962                                           proc->pid, thread->pid);
2963                         return_error = BR_FAILED_REPLY;
2964                         return_error_param = -EPROTO;
2965                         return_error_line = __LINE__;
2966                         goto err_empty_call_stack;
2967                 }
2968                 if (in_reply_to->to_thread != thread) {
2969                         spin_lock(&in_reply_to->lock);
2970                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2971                                 proc->pid, thread->pid, in_reply_to->debug_id,
2972                                 in_reply_to->to_proc ?
2973                                 in_reply_to->to_proc->pid : 0,
2974                                 in_reply_to->to_thread ?
2975                                 in_reply_to->to_thread->pid : 0);
2976                         spin_unlock(&in_reply_to->lock);
2977                         binder_inner_proc_unlock(proc);
2978                         return_error = BR_FAILED_REPLY;
2979                         return_error_param = -EPROTO;
2980                         return_error_line = __LINE__;
2981                         in_reply_to = NULL;
2982                         goto err_bad_call_stack;
2983                 }
2984                 thread->transaction_stack = in_reply_to->to_parent;
2985                 binder_inner_proc_unlock(proc);
2986                 binder_set_nice(in_reply_to->saved_priority);
2987                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2988                 if (target_thread == NULL) {
2989                         /* annotation for sparse */
2990                         __release(&target_thread->proc->inner_lock);
2991                         binder_txn_error("%d:%d reply target not found\n",
2992                                 thread->pid, proc->pid);
2993                         return_error = BR_DEAD_REPLY;
2994                         return_error_line = __LINE__;
2995                         goto err_dead_binder;
2996                 }
2997                 if (target_thread->transaction_stack != in_reply_to) {
2998                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2999                                 proc->pid, thread->pid,
3000                                 target_thread->transaction_stack ?
3001                                 target_thread->transaction_stack->debug_id : 0,
3002                                 in_reply_to->debug_id);
3003                         binder_inner_proc_unlock(target_thread->proc);
3004                         return_error = BR_FAILED_REPLY;
3005                         return_error_param = -EPROTO;
3006                         return_error_line = __LINE__;
3007                         in_reply_to = NULL;
3008                         target_thread = NULL;
3009                         goto err_dead_binder;
3010                 }
3011                 target_proc = target_thread->proc;
3012                 target_proc->tmp_ref++;
3013                 binder_inner_proc_unlock(target_thread->proc);
3014         } else {
3015                 if (tr->target.handle) {
3016                         struct binder_ref *ref;
3017
3018                         /*
3019                          * There must already be a strong ref
3020                          * on this node. If so, do a strong
3021                          * increment on the node to ensure it
3022                          * stays alive until the transaction is
3023                          * done.
3024                          */
3025                         binder_proc_lock(proc);
3026                         ref = binder_get_ref_olocked(proc, tr->target.handle,
3027                                                      true);
3028                         if (ref) {
3029                                 target_node = binder_get_node_refs_for_txn(
3030                                                 ref->node, &target_proc,
3031                                                 &return_error);
3032                         } else {
3033                                 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3034                                                   proc->pid, thread->pid, tr->target.handle);
3035                                 return_error = BR_FAILED_REPLY;
3036                         }
3037                         binder_proc_unlock(proc);
3038                 } else {
3039                         mutex_lock(&context->context_mgr_node_lock);
3040                         target_node = context->binder_context_mgr_node;
3041                         if (target_node)
3042                                 target_node = binder_get_node_refs_for_txn(
3043                                                 target_node, &target_proc,
3044                                                 &return_error);
3045                         else
3046                                 return_error = BR_DEAD_REPLY;
3047                         mutex_unlock(&context->context_mgr_node_lock);
3048                         if (target_node && target_proc->pid == proc->pid) {
3049                                 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3050                                                   proc->pid, thread->pid);
3051                                 return_error = BR_FAILED_REPLY;
3052                                 return_error_param = -EINVAL;
3053                                 return_error_line = __LINE__;
3054                                 goto err_invalid_target_handle;
3055                         }
3056                 }
3057                 if (!target_node) {
3058                         binder_txn_error("%d:%d cannot find target node\n",
3059                                 thread->pid, proc->pid);
3060                         /*
3061                          * return_error is set above
3062                          */
3063                         return_error_param = -EINVAL;
3064                         return_error_line = __LINE__;
3065                         goto err_dead_binder;
3066                 }
3067                 e->to_node = target_node->debug_id;
3068                 if (WARN_ON(proc == target_proc)) {
3069                         binder_txn_error("%d:%d self transactions not allowed\n",
3070                                 thread->pid, proc->pid);
3071                         return_error = BR_FAILED_REPLY;
3072                         return_error_param = -EINVAL;
3073                         return_error_line = __LINE__;
3074                         goto err_invalid_target_handle;
3075                 }
3076                 if (security_binder_transaction(proc->cred,
3077                                                 target_proc->cred) < 0) {
3078                         binder_txn_error("%d:%d transaction credentials failed\n",
3079                                 thread->pid, proc->pid);
3080                         return_error = BR_FAILED_REPLY;
3081                         return_error_param = -EPERM;
3082                         return_error_line = __LINE__;
3083                         goto err_invalid_target_handle;
3084                 }
3085                 binder_inner_proc_lock(proc);
3086
3087                 w = list_first_entry_or_null(&thread->todo,
3088                                              struct binder_work, entry);
3089                 if (!(tr->flags & TF_ONE_WAY) && w &&
3090                     w->type == BINDER_WORK_TRANSACTION) {
3091                         /*
3092                          * Do not allow new outgoing transaction from a
3093                          * thread that has a transaction at the head of
3094                          * its todo list. Only need to check the head
3095                          * because binder_select_thread_ilocked picks a
3096                          * thread from proc->waiting_threads to enqueue
3097                          * the transaction, and nothing is queued to the
3098                          * todo list while the thread is on waiting_threads.
3099                          */
3100                         binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3101                                           proc->pid, thread->pid);
3102                         binder_inner_proc_unlock(proc);
3103                         return_error = BR_FAILED_REPLY;
3104                         return_error_param = -EPROTO;
3105                         return_error_line = __LINE__;
3106                         goto err_bad_todo_list;
3107                 }
3108
3109                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3110                         struct binder_transaction *tmp;
3111
3112                         tmp = thread->transaction_stack;
3113                         if (tmp->to_thread != thread) {
3114                                 spin_lock(&tmp->lock);
3115                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3116                                         proc->pid, thread->pid, tmp->debug_id,
3117                                         tmp->to_proc ? tmp->to_proc->pid : 0,
3118                                         tmp->to_thread ?
3119                                         tmp->to_thread->pid : 0);
3120                                 spin_unlock(&tmp->lock);
3121                                 binder_inner_proc_unlock(proc);
3122                                 return_error = BR_FAILED_REPLY;
3123                                 return_error_param = -EPROTO;
3124                                 return_error_line = __LINE__;
3125                                 goto err_bad_call_stack;
3126                         }
3127                         while (tmp) {
3128                                 struct binder_thread *from;
3129
3130                                 spin_lock(&tmp->lock);
3131                                 from = tmp->from;
3132                                 if (from && from->proc == target_proc) {
3133                                         atomic_inc(&from->tmp_ref);
3134                                         target_thread = from;
3135                                         spin_unlock(&tmp->lock);
3136                                         break;
3137                                 }
3138                                 spin_unlock(&tmp->lock);
3139                                 tmp = tmp->from_parent;
3140                         }
3141                 }
3142                 binder_inner_proc_unlock(proc);
3143         }
3144         if (target_thread)
3145                 e->to_thread = target_thread->pid;
3146         e->to_proc = target_proc->pid;
3147
3148         /* TODO: reuse incoming transaction for reply */
3149         t = kzalloc(sizeof(*t), GFP_KERNEL);
3150         if (t == NULL) {
3151                 binder_txn_error("%d:%d cannot allocate transaction\n",
3152                         thread->pid, proc->pid);
3153                 return_error = BR_FAILED_REPLY;
3154                 return_error_param = -ENOMEM;
3155                 return_error_line = __LINE__;
3156                 goto err_alloc_t_failed;
3157         }
3158         INIT_LIST_HEAD(&t->fd_fixups);
3159         binder_stats_created(BINDER_STAT_TRANSACTION);
3160         spin_lock_init(&t->lock);
3161
3162         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3163         if (tcomplete == NULL) {
3164                 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3165                         thread->pid, proc->pid);
3166                 return_error = BR_FAILED_REPLY;
3167                 return_error_param = -ENOMEM;
3168                 return_error_line = __LINE__;
3169                 goto err_alloc_tcomplete_failed;
3170         }
3171         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3172
3173         t->debug_id = t_debug_id;
3174         t->start_time = t_start_time;
3175
3176         if (reply)
3177                 binder_debug(BINDER_DEBUG_TRANSACTION,
3178                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3179                              proc->pid, thread->pid, t->debug_id,
3180                              target_proc->pid, target_thread->pid,
3181                              (u64)tr->data.ptr.buffer,
3182                              (u64)tr->data.ptr.offsets,
3183                              (u64)tr->data_size, (u64)tr->offsets_size,
3184                              (u64)extra_buffers_size);
3185         else
3186                 binder_debug(BINDER_DEBUG_TRANSACTION,
3187                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3188                              proc->pid, thread->pid, t->debug_id,
3189                              target_proc->pid, target_node->debug_id,
3190                              (u64)tr->data.ptr.buffer,
3191                              (u64)tr->data.ptr.offsets,
3192                              (u64)tr->data_size, (u64)tr->offsets_size,
3193                              (u64)extra_buffers_size);
3194
3195         if (!reply && !(tr->flags & TF_ONE_WAY))
3196                 t->from = thread;
3197         else
3198                 t->from = NULL;
3199         t->from_pid = proc->pid;
3200         t->from_tid = thread->pid;
3201         t->sender_euid = task_euid(proc->tsk);
3202         t->to_proc = target_proc;
3203         t->to_thread = target_thread;
3204         t->code = tr->code;
3205         t->flags = tr->flags;
3206         t->priority = task_nice(current);
3207
3208         if (target_node && target_node->txn_security_ctx) {
3209                 u32 secid;
3210                 size_t added_size;
3211
3212                 security_cred_getsecid(proc->cred, &secid);
3213                 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3214                 if (ret) {
3215                         binder_txn_error("%d:%d failed to get security context\n",
3216                                 thread->pid, proc->pid);
3217                         return_error = BR_FAILED_REPLY;
3218                         return_error_param = ret;
3219                         return_error_line = __LINE__;
3220                         goto err_get_secctx_failed;
3221                 }
3222                 added_size = ALIGN(secctx_sz, sizeof(u64));
3223                 extra_buffers_size += added_size;
3224                 if (extra_buffers_size < added_size) {
3225                         binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3226                                 thread->pid, proc->pid);
3227                         return_error = BR_FAILED_REPLY;
3228                         return_error_param = -EINVAL;
3229                         return_error_line = __LINE__;
3230                         goto err_bad_extra_size;
3231                 }
3232         }
3233
3234         trace_binder_transaction(reply, t, target_node);
3235
3236         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3237                 tr->offsets_size, extra_buffers_size,
3238                 !reply && (t->flags & TF_ONE_WAY));
3239         if (IS_ERR(t->buffer)) {
3240                 char *s;
3241
3242                 ret = PTR_ERR(t->buffer);
3243                 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3244                         : (ret == -ENOSPC) ? ": no space left"
3245                         : (ret == -ENOMEM) ? ": memory allocation failed"
3246                         : "";
3247                 binder_txn_error("cannot allocate buffer%s", s);
3248
3249                 return_error_param = PTR_ERR(t->buffer);
3250                 return_error = return_error_param == -ESRCH ?
3251                         BR_DEAD_REPLY : BR_FAILED_REPLY;
3252                 return_error_line = __LINE__;
3253                 t->buffer = NULL;
3254                 goto err_binder_alloc_buf_failed;
3255         }
3256         if (secctx) {
3257                 int err;
3258                 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3259                                     ALIGN(tr->offsets_size, sizeof(void *)) +
3260                                     ALIGN(extra_buffers_size, sizeof(void *)) -
3261                                     ALIGN(secctx_sz, sizeof(u64));
3262
3263                 t->security_ctx = t->buffer->user_data + buf_offset;
3264                 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3265                                                   t->buffer, buf_offset,
3266                                                   secctx, secctx_sz);
3267                 if (err) {
3268                         t->security_ctx = 0;
3269                         WARN_ON(1);
3270                 }
3271                 security_release_secctx(secctx, secctx_sz);
3272                 secctx = NULL;
3273         }
3274         t->buffer->debug_id = t->debug_id;
3275         t->buffer->transaction = t;
3276         t->buffer->target_node = target_node;
3277         t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3278         trace_binder_transaction_alloc_buf(t->buffer);
3279
3280         if (binder_alloc_copy_user_to_buffer(
3281                                 &target_proc->alloc,
3282                                 t->buffer,
3283                                 ALIGN(tr->data_size, sizeof(void *)),
3284                                 (const void __user *)
3285                                         (uintptr_t)tr->data.ptr.offsets,
3286                                 tr->offsets_size)) {
3287                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3288                                 proc->pid, thread->pid);
3289                 return_error = BR_FAILED_REPLY;
3290                 return_error_param = -EFAULT;
3291                 return_error_line = __LINE__;
3292                 goto err_copy_data_failed;
3293         }
3294         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3295                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3296                                 proc->pid, thread->pid, (u64)tr->offsets_size);
3297                 return_error = BR_FAILED_REPLY;
3298                 return_error_param = -EINVAL;
3299                 return_error_line = __LINE__;
3300                 goto err_bad_offset;
3301         }
3302         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3303                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3304                                   proc->pid, thread->pid,
3305                                   (u64)extra_buffers_size);
3306                 return_error = BR_FAILED_REPLY;
3307                 return_error_param = -EINVAL;
3308                 return_error_line = __LINE__;
3309                 goto err_bad_offset;
3310         }
3311         off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3312         buffer_offset = off_start_offset;
3313         off_end_offset = off_start_offset + tr->offsets_size;
3314         sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3315         sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3316                 ALIGN(secctx_sz, sizeof(u64));
3317         off_min = 0;
3318         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3319              buffer_offset += sizeof(binder_size_t)) {
3320                 struct binder_object_header *hdr;
3321                 size_t object_size;
3322                 struct binder_object object;
3323                 binder_size_t object_offset;
3324                 binder_size_t copy_size;
3325
3326                 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3327                                                   &object_offset,
3328                                                   t->buffer,
3329                                                   buffer_offset,
3330                                                   sizeof(object_offset))) {
3331                         binder_txn_error("%d:%d copy offset from buffer failed\n",
3332                                 thread->pid, proc->pid);
3333                         return_error = BR_FAILED_REPLY;
3334                         return_error_param = -EINVAL;
3335                         return_error_line = __LINE__;
3336                         goto err_bad_offset;
3337                 }
3338
3339                 /*
3340                  * Copy the source user buffer up to the next object
3341                  * that will be processed.
3342                  */
3343                 copy_size = object_offset - user_offset;
3344                 if (copy_size && (user_offset > object_offset ||
3345                                 binder_alloc_copy_user_to_buffer(
3346                                         &target_proc->alloc,
3347                                         t->buffer, user_offset,
3348                                         user_buffer + user_offset,
3349                                         copy_size))) {
3350                         binder_user_error("%d:%d got transaction with invalid data ptr\n",
3351                                         proc->pid, thread->pid);
3352                         return_error = BR_FAILED_REPLY;
3353                         return_error_param = -EFAULT;
3354                         return_error_line = __LINE__;
3355                         goto err_copy_data_failed;
3356                 }
3357                 object_size = binder_get_object(target_proc, user_buffer,
3358                                 t->buffer, object_offset, &object);
3359                 if (object_size == 0 || object_offset < off_min) {
3360                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3361                                           proc->pid, thread->pid,
3362                                           (u64)object_offset,
3363                                           (u64)off_min,
3364                                           (u64)t->buffer->data_size);
3365                         return_error = BR_FAILED_REPLY;
3366                         return_error_param = -EINVAL;
3367                         return_error_line = __LINE__;
3368                         goto err_bad_offset;
3369                 }
3370                 /*
3371                  * Set offset to the next buffer fragment to be
3372                  * copied
3373                  */
3374                 user_offset = object_offset + object_size;
3375
3376                 hdr = &object.hdr;
3377                 off_min = object_offset + object_size;
3378                 switch (hdr->type) {
3379                 case BINDER_TYPE_BINDER:
3380                 case BINDER_TYPE_WEAK_BINDER: {
3381                         struct flat_binder_object *fp;
3382
3383                         fp = to_flat_binder_object(hdr);
3384                         ret = binder_translate_binder(fp, t, thread);
3385
3386                         if (ret < 0 ||
3387                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3388                                                         t->buffer,
3389                                                         object_offset,
3390                                                         fp, sizeof(*fp))) {
3391                                 binder_txn_error("%d:%d translate binder failed\n",
3392                                         thread->pid, proc->pid);
3393                                 return_error = BR_FAILED_REPLY;
3394                                 return_error_param = ret;
3395                                 return_error_line = __LINE__;
3396                                 goto err_translate_failed;
3397                         }
3398                 } break;
3399                 case BINDER_TYPE_HANDLE:
3400                 case BINDER_TYPE_WEAK_HANDLE: {
3401                         struct flat_binder_object *fp;
3402
3403                         fp = to_flat_binder_object(hdr);
3404                         ret = binder_translate_handle(fp, t, thread);
3405                         if (ret < 0 ||
3406                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3407                                                         t->buffer,
3408                                                         object_offset,
3409                                                         fp, sizeof(*fp))) {
3410                                 binder_txn_error("%d:%d translate handle failed\n",
3411                                         thread->pid, proc->pid);
3412                                 return_error = BR_FAILED_REPLY;
3413                                 return_error_param = ret;
3414                                 return_error_line = __LINE__;
3415                                 goto err_translate_failed;
3416                         }
3417                 } break;
3418
3419                 case BINDER_TYPE_FD: {
3420                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
3421                         binder_size_t fd_offset = object_offset +
3422                                 (uintptr_t)&fp->fd - (uintptr_t)fp;
3423                         int ret = binder_translate_fd(fp->fd, fd_offset, t,
3424                                                       thread, in_reply_to);
3425
3426                         fp->pad_binder = 0;
3427                         if (ret < 0 ||
3428                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3429                                                         t->buffer,
3430                                                         object_offset,
3431                                                         fp, sizeof(*fp))) {
3432                                 binder_txn_error("%d:%d translate fd failed\n",
3433                                         thread->pid, proc->pid);
3434                                 return_error = BR_FAILED_REPLY;
3435                                 return_error_param = ret;
3436                                 return_error_line = __LINE__;
3437                                 goto err_translate_failed;
3438                         }
3439                 } break;
3440                 case BINDER_TYPE_FDA: {
3441                         struct binder_object ptr_object;
3442                         binder_size_t parent_offset;
3443                         struct binder_object user_object;
3444                         size_t user_parent_size;
3445                         struct binder_fd_array_object *fda =
3446                                 to_binder_fd_array_object(hdr);
3447                         size_t num_valid = (buffer_offset - off_start_offset) /
3448                                                 sizeof(binder_size_t);
3449                         struct binder_buffer_object *parent =
3450                                 binder_validate_ptr(target_proc, t->buffer,
3451                                                     &ptr_object, fda->parent,
3452                                                     off_start_offset,
3453                                                     &parent_offset,
3454                                                     num_valid);
3455                         if (!parent) {
3456                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3457                                                   proc->pid, thread->pid);
3458                                 return_error = BR_FAILED_REPLY;
3459                                 return_error_param = -EINVAL;
3460                                 return_error_line = __LINE__;
3461                                 goto err_bad_parent;
3462                         }
3463                         if (!binder_validate_fixup(target_proc, t->buffer,
3464                                                    off_start_offset,
3465                                                    parent_offset,
3466                                                    fda->parent_offset,
3467                                                    last_fixup_obj_off,
3468                                                    last_fixup_min_off)) {
3469                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3470                                                   proc->pid, thread->pid);
3471                                 return_error = BR_FAILED_REPLY;
3472                                 return_error_param = -EINVAL;
3473                                 return_error_line = __LINE__;
3474                                 goto err_bad_parent;
3475                         }
3476                         /*
3477                          * We need to read the user version of the parent
3478                          * object to get the original user offset
3479                          */
3480                         user_parent_size =
3481                                 binder_get_object(proc, user_buffer, t->buffer,
3482                                                   parent_offset, &user_object);
3483                         if (user_parent_size != sizeof(user_object.bbo)) {
3484                                 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3485                                                   proc->pid, thread->pid,
3486                                                   user_parent_size,
3487                                                   sizeof(user_object.bbo));
3488                                 return_error = BR_FAILED_REPLY;
3489                                 return_error_param = -EINVAL;
3490                                 return_error_line = __LINE__;
3491                                 goto err_bad_parent;
3492                         }
3493                         ret = binder_translate_fd_array(&pf_head, fda,
3494                                                         user_buffer, parent,
3495                                                         &user_object.bbo, t,
3496                                                         thread, in_reply_to);
3497                         if (!ret)
3498                                 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3499                                                                   t->buffer,
3500                                                                   object_offset,
3501                                                                   fda, sizeof(*fda));
3502                         if (ret) {
3503                                 binder_txn_error("%d:%d translate fd array failed\n",
3504                                         thread->pid, proc->pid);
3505                                 return_error = BR_FAILED_REPLY;
3506                                 return_error_param = ret > 0 ? -EINVAL : ret;
3507                                 return_error_line = __LINE__;
3508                                 goto err_translate_failed;
3509                         }
3510                         last_fixup_obj_off = parent_offset;
3511                         last_fixup_min_off =
3512                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3513                 } break;
3514                 case BINDER_TYPE_PTR: {
3515                         struct binder_buffer_object *bp =
3516                                 to_binder_buffer_object(hdr);
3517                         size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3518                         size_t num_valid;
3519
3520                         if (bp->length > buf_left) {
3521                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3522                                                   proc->pid, thread->pid);
3523                                 return_error = BR_FAILED_REPLY;
3524                                 return_error_param = -EINVAL;
3525                                 return_error_line = __LINE__;
3526                                 goto err_bad_offset;
3527                         }
3528                         ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3529                                 (const void __user *)(uintptr_t)bp->buffer,
3530                                 bp->length);
3531                         if (ret) {
3532                                 binder_txn_error("%d:%d deferred copy failed\n",
3533                                         thread->pid, proc->pid);
3534                                 return_error = BR_FAILED_REPLY;
3535                                 return_error_param = ret;
3536                                 return_error_line = __LINE__;
3537                                 goto err_translate_failed;
3538                         }
3539                         /* Fixup buffer pointer to target proc address space */
3540                         bp->buffer = t->buffer->user_data + sg_buf_offset;
3541                         sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3542
3543                         num_valid = (buffer_offset - off_start_offset) /
3544                                         sizeof(binder_size_t);
3545                         ret = binder_fixup_parent(&pf_head, t,
3546                                                   thread, bp,
3547                                                   off_start_offset,
3548                                                   num_valid,
3549                                                   last_fixup_obj_off,
3550                                                   last_fixup_min_off);
3551                         if (ret < 0 ||
3552                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3553                                                         t->buffer,
3554                                                         object_offset,
3555                                                         bp, sizeof(*bp))) {
3556                                 binder_txn_error("%d:%d failed to fixup parent\n",
3557                                         thread->pid, proc->pid);
3558                                 return_error = BR_FAILED_REPLY;
3559                                 return_error_param = ret;
3560                                 return_error_line = __LINE__;
3561                                 goto err_translate_failed;
3562                         }
3563                         last_fixup_obj_off = object_offset;
3564                         last_fixup_min_off = 0;
3565                 } break;
3566                 default:
3567                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3568                                 proc->pid, thread->pid, hdr->type);
3569                         return_error = BR_FAILED_REPLY;
3570                         return_error_param = -EINVAL;
3571                         return_error_line = __LINE__;
3572                         goto err_bad_object_type;
3573                 }
3574         }
3575         /* Done processing objects, copy the rest of the buffer */
3576         if (binder_alloc_copy_user_to_buffer(
3577                                 &target_proc->alloc,
3578                                 t->buffer, user_offset,
3579                                 user_buffer + user_offset,
3580                                 tr->data_size - user_offset)) {
3581                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3582                                 proc->pid, thread->pid);
3583                 return_error = BR_FAILED_REPLY;
3584                 return_error_param = -EFAULT;
3585                 return_error_line = __LINE__;
3586                 goto err_copy_data_failed;
3587         }
3588
3589         ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3590                                             &sgc_head, &pf_head);
3591         if (ret) {
3592                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3593                                   proc->pid, thread->pid);
3594                 return_error = BR_FAILED_REPLY;
3595                 return_error_param = ret;
3596                 return_error_line = __LINE__;
3597                 goto err_copy_data_failed;
3598         }
3599         if (t->buffer->oneway_spam_suspect)
3600                 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3601         else
3602                 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3603         t->work.type = BINDER_WORK_TRANSACTION;
3604
3605         if (reply) {
3606                 binder_enqueue_thread_work(thread, tcomplete);
3607                 binder_inner_proc_lock(target_proc);
3608                 if (target_thread->is_dead) {
3609                         return_error = BR_DEAD_REPLY;
3610                         binder_inner_proc_unlock(target_proc);
3611                         goto err_dead_proc_or_thread;
3612                 }
3613                 BUG_ON(t->buffer->async_transaction != 0);
3614                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3615                 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3616                 target_proc->outstanding_txns++;
3617                 binder_inner_proc_unlock(target_proc);
3618                 wake_up_interruptible_sync(&target_thread->wait);
3619                 binder_free_transaction(in_reply_to);
3620         } else if (!(t->flags & TF_ONE_WAY)) {
3621                 BUG_ON(t->buffer->async_transaction != 0);
3622                 binder_inner_proc_lock(proc);
3623                 /*
3624                  * Defer the TRANSACTION_COMPLETE, so we don't return to
3625                  * userspace immediately; this allows the target process to
3626                  * immediately start processing this transaction, reducing
3627                  * latency. We will then return the TRANSACTION_COMPLETE when
3628                  * the target replies (or there is an error).
3629                  */
3630                 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3631                 t->need_reply = 1;
3632                 t->from_parent = thread->transaction_stack;
3633                 thread->transaction_stack = t;
3634                 binder_inner_proc_unlock(proc);
3635                 return_error = binder_proc_transaction(t,
3636                                 target_proc, target_thread);
3637                 if (return_error) {
3638                         binder_inner_proc_lock(proc);
3639                         binder_pop_transaction_ilocked(thread, t);
3640                         binder_inner_proc_unlock(proc);
3641                         goto err_dead_proc_or_thread;
3642                 }
3643         } else {
3644                 BUG_ON(target_node == NULL);
3645                 BUG_ON(t->buffer->async_transaction != 1);
3646                 return_error = binder_proc_transaction(t, target_proc, NULL);
3647                 /*
3648                  * Let the caller know when async transaction reaches a frozen
3649                  * process and is put in a pending queue, waiting for the target
3650                  * process to be unfrozen.
3651                  */
3652                 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3653                         tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3654                 binder_enqueue_thread_work(thread, tcomplete);
3655                 if (return_error &&
3656                     return_error != BR_TRANSACTION_PENDING_FROZEN)
3657                         goto err_dead_proc_or_thread;
3658         }
3659         if (target_thread)
3660                 binder_thread_dec_tmpref(target_thread);
3661         binder_proc_dec_tmpref(target_proc);
3662         if (target_node)
3663                 binder_dec_node_tmpref(target_node);
3664         /*
3665          * write barrier to synchronize with initialization
3666          * of log entry
3667          */
3668         smp_wmb();
3669         WRITE_ONCE(e->debug_id_done, t_debug_id);
3670         return;
3671
3672 err_dead_proc_or_thread:
3673         binder_txn_error("%d:%d dead process or thread\n",
3674                 thread->pid, proc->pid);
3675         return_error_line = __LINE__;
3676         binder_dequeue_work(proc, tcomplete);
3677 err_translate_failed:
3678 err_bad_object_type:
3679 err_bad_offset:
3680 err_bad_parent:
3681 err_copy_data_failed:
3682         binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3683         binder_free_txn_fixups(t);
3684         trace_binder_transaction_failed_buffer_release(t->buffer);
3685         binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3686                                           buffer_offset, true);
3687         if (target_node)
3688                 binder_dec_node_tmpref(target_node);
3689         target_node = NULL;
3690         t->buffer->transaction = NULL;
3691         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3692 err_binder_alloc_buf_failed:
3693 err_bad_extra_size:
3694         if (secctx)
3695                 security_release_secctx(secctx, secctx_sz);
3696 err_get_secctx_failed:
3697         kfree(tcomplete);
3698         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3699 err_alloc_tcomplete_failed:
3700         if (trace_binder_txn_latency_free_enabled())
3701                 binder_txn_latency_free(t);
3702         kfree(t);
3703         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3704 err_alloc_t_failed:
3705 err_bad_todo_list:
3706 err_bad_call_stack:
3707 err_empty_call_stack:
3708 err_dead_binder:
3709 err_invalid_target_handle:
3710         if (target_node) {
3711                 binder_dec_node(target_node, 1, 0);
3712                 binder_dec_node_tmpref(target_node);
3713         }
3714
3715         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3716                      "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3717                      proc->pid, thread->pid, reply ? "reply" :
3718                      (tr->flags & TF_ONE_WAY ? "async" : "call"),
3719                      target_proc ? target_proc->pid : 0,
3720                      target_thread ? target_thread->pid : 0,
3721                      t_debug_id, return_error, return_error_param,
3722                      (u64)tr->data_size, (u64)tr->offsets_size,
3723                      return_error_line);
3724
3725         if (target_thread)
3726                 binder_thread_dec_tmpref(target_thread);
3727         if (target_proc)
3728                 binder_proc_dec_tmpref(target_proc);
3729
3730         {
3731                 struct binder_transaction_log_entry *fe;
3732
3733                 e->return_error = return_error;
3734                 e->return_error_param = return_error_param;
3735                 e->return_error_line = return_error_line;
3736                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3737                 *fe = *e;
3738                 /*
3739                  * write barrier to synchronize with initialization
3740                  * of log entry
3741                  */
3742                 smp_wmb();
3743                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3744                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3745         }
3746
3747         BUG_ON(thread->return_error.cmd != BR_OK);
3748         if (in_reply_to) {
3749                 binder_set_txn_from_error(in_reply_to, t_debug_id,
3750                                 return_error, return_error_param);
3751                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3752                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3753                 binder_send_failed_reply(in_reply_to, return_error);
3754         } else {
3755                 binder_inner_proc_lock(proc);
3756                 binder_set_extended_error(&thread->ee, t_debug_id,
3757                                 return_error, return_error_param);
3758                 binder_inner_proc_unlock(proc);
3759                 thread->return_error.cmd = return_error;
3760                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3761         }
3762 }
3763
3764 /**
3765  * binder_free_buf() - free the specified buffer
3766  * @proc:       binder proc that owns buffer
3767  * @buffer:     buffer to be freed
3768  * @is_failure: failed to send transaction
3769  *
3770  * If buffer for an async transaction, enqueue the next async
3771  * transaction from the node.
3772  *
3773  * Cleanup buffer and free it.
3774  */
3775 static void
3776 binder_free_buf(struct binder_proc *proc,
3777                 struct binder_thread *thread,
3778                 struct binder_buffer *buffer, bool is_failure)
3779 {
3780         binder_inner_proc_lock(proc);
3781         if (buffer->transaction) {
3782                 buffer->transaction->buffer = NULL;
3783                 buffer->transaction = NULL;
3784         }
3785         binder_inner_proc_unlock(proc);
3786         if (buffer->async_transaction && buffer->target_node) {
3787                 struct binder_node *buf_node;
3788                 struct binder_work *w;
3789
3790                 buf_node = buffer->target_node;
3791                 binder_node_inner_lock(buf_node);
3792                 BUG_ON(!buf_node->has_async_transaction);
3793                 BUG_ON(buf_node->proc != proc);
3794                 w = binder_dequeue_work_head_ilocked(
3795                                 &buf_node->async_todo);
3796                 if (!w) {
3797                         buf_node->has_async_transaction = false;
3798                 } else {
3799                         binder_enqueue_work_ilocked(
3800                                         w, &proc->todo);
3801                         binder_wakeup_proc_ilocked(proc);
3802                 }
3803                 binder_node_inner_unlock(buf_node);
3804         }
3805         trace_binder_transaction_buffer_release(buffer);
3806         binder_release_entire_buffer(proc, thread, buffer, is_failure);
3807         binder_alloc_free_buf(&proc->alloc, buffer);
3808 }
3809
3810 static int binder_thread_write(struct binder_proc *proc,
3811                         struct binder_thread *thread,
3812                         binder_uintptr_t binder_buffer, size_t size,
3813                         binder_size_t *consumed)
3814 {
3815         uint32_t cmd;
3816         struct binder_context *context = proc->context;
3817         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3818         void __user *ptr = buffer + *consumed;
3819         void __user *end = buffer + size;
3820
3821         while (ptr < end && thread->return_error.cmd == BR_OK) {
3822                 int ret;
3823
3824                 if (get_user(cmd, (uint32_t __user *)ptr))
3825                         return -EFAULT;
3826                 ptr += sizeof(uint32_t);
3827                 trace_binder_command(cmd);
3828                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3829                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3830                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3831                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3832                 }
3833                 switch (cmd) {
3834                 case BC_INCREFS:
3835                 case BC_ACQUIRE:
3836                 case BC_RELEASE:
3837                 case BC_DECREFS: {
3838                         uint32_t target;
3839                         const char *debug_string;
3840                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3841                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3842                         struct binder_ref_data rdata;
3843
3844                         if (get_user(target, (uint32_t __user *)ptr))
3845                                 return -EFAULT;
3846
3847                         ptr += sizeof(uint32_t);
3848                         ret = -1;
3849                         if (increment && !target) {
3850                                 struct binder_node *ctx_mgr_node;
3851
3852                                 mutex_lock(&context->context_mgr_node_lock);
3853                                 ctx_mgr_node = context->binder_context_mgr_node;
3854                                 if (ctx_mgr_node) {
3855                                         if (ctx_mgr_node->proc == proc) {
3856                                                 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3857                                                                   proc->pid, thread->pid);
3858                                                 mutex_unlock(&context->context_mgr_node_lock);
3859                                                 return -EINVAL;
3860                                         }
3861                                         ret = binder_inc_ref_for_node(
3862                                                         proc, ctx_mgr_node,
3863                                                         strong, NULL, &rdata);
3864                                 }
3865                                 mutex_unlock(&context->context_mgr_node_lock);
3866                         }
3867                         if (ret)
3868                                 ret = binder_update_ref_for_handle(
3869                                                 proc, target, increment, strong,
3870                                                 &rdata);
3871                         if (!ret && rdata.desc != target) {
3872                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3873                                         proc->pid, thread->pid,
3874                                         target, rdata.desc);
3875                         }
3876                         switch (cmd) {
3877                         case BC_INCREFS:
3878                                 debug_string = "IncRefs";
3879                                 break;
3880                         case BC_ACQUIRE:
3881                                 debug_string = "Acquire";
3882                                 break;
3883                         case BC_RELEASE:
3884                                 debug_string = "Release";
3885                                 break;
3886                         case BC_DECREFS:
3887                         default:
3888                                 debug_string = "DecRefs";
3889                                 break;
3890                         }
3891                         if (ret) {
3892                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3893                                         proc->pid, thread->pid, debug_string,
3894                                         strong, target, ret);
3895                                 break;
3896                         }
3897                         binder_debug(BINDER_DEBUG_USER_REFS,
3898                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3899                                      proc->pid, thread->pid, debug_string,
3900                                      rdata.debug_id, rdata.desc, rdata.strong,
3901                                      rdata.weak);
3902                         break;
3903                 }
3904                 case BC_INCREFS_DONE:
3905                 case BC_ACQUIRE_DONE: {
3906                         binder_uintptr_t node_ptr;
3907                         binder_uintptr_t cookie;
3908                         struct binder_node *node;
3909                         bool free_node;
3910
3911                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3912                                 return -EFAULT;
3913                         ptr += sizeof(binder_uintptr_t);
3914                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3915                                 return -EFAULT;
3916                         ptr += sizeof(binder_uintptr_t);
3917                         node = binder_get_node(proc, node_ptr);
3918                         if (node == NULL) {
3919                                 binder_user_error("%d:%d %s u%016llx no match\n",
3920                                         proc->pid, thread->pid,
3921                                         cmd == BC_INCREFS_DONE ?
3922                                         "BC_INCREFS_DONE" :
3923                                         "BC_ACQUIRE_DONE",
3924                                         (u64)node_ptr);
3925                                 break;
3926                         }
3927                         if (cookie != node->cookie) {
3928                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3929                                         proc->pid, thread->pid,
3930                                         cmd == BC_INCREFS_DONE ?
3931                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3932                                         (u64)node_ptr, node->debug_id,
3933                                         (u64)cookie, (u64)node->cookie);
3934                                 binder_put_node(node);
3935                                 break;
3936                         }
3937                         binder_node_inner_lock(node);
3938                         if (cmd == BC_ACQUIRE_DONE) {
3939                                 if (node->pending_strong_ref == 0) {
3940                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3941                                                 proc->pid, thread->pid,
3942                                                 node->debug_id);
3943                                         binder_node_inner_unlock(node);
3944                                         binder_put_node(node);
3945                                         break;
3946                                 }
3947                                 node->pending_strong_ref = 0;
3948                         } else {
3949                                 if (node->pending_weak_ref == 0) {
3950                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3951                                                 proc->pid, thread->pid,
3952                                                 node->debug_id);
3953                                         binder_node_inner_unlock(node);
3954                                         binder_put_node(node);
3955                                         break;
3956                                 }
3957                                 node->pending_weak_ref = 0;
3958                         }
3959                         free_node = binder_dec_node_nilocked(node,
3960                                         cmd == BC_ACQUIRE_DONE, 0);
3961                         WARN_ON(free_node);
3962                         binder_debug(BINDER_DEBUG_USER_REFS,
3963                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3964                                      proc->pid, thread->pid,
3965                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3966                                      node->debug_id, node->local_strong_refs,
3967                                      node->local_weak_refs, node->tmp_refs);
3968                         binder_node_inner_unlock(node);
3969                         binder_put_node(node);
3970                         break;
3971                 }
3972                 case BC_ATTEMPT_ACQUIRE:
3973                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3974                         return -EINVAL;
3975                 case BC_ACQUIRE_RESULT:
3976                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3977                         return -EINVAL;
3978
3979                 case BC_FREE_BUFFER: {
3980                         binder_uintptr_t data_ptr;
3981                         struct binder_buffer *buffer;
3982
3983                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3984                                 return -EFAULT;
3985                         ptr += sizeof(binder_uintptr_t);
3986
3987                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3988                                                               data_ptr);
3989                         if (IS_ERR_OR_NULL(buffer)) {
3990                                 if (PTR_ERR(buffer) == -EPERM) {
3991                                         binder_user_error(
3992                                                 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3993                                                 proc->pid, thread->pid,
3994                                                 (u64)data_ptr);
3995                                 } else {
3996                                         binder_user_error(
3997                                                 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3998                                                 proc->pid, thread->pid,
3999                                                 (u64)data_ptr);
4000                                 }
4001                                 break;
4002                         }
4003                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
4004                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4005                                      proc->pid, thread->pid, (u64)data_ptr,
4006                                      buffer->debug_id,
4007                                      buffer->transaction ? "active" : "finished");
4008                         binder_free_buf(proc, thread, buffer, false);
4009                         break;
4010                 }
4011
4012                 case BC_TRANSACTION_SG:
4013                 case BC_REPLY_SG: {
4014                         struct binder_transaction_data_sg tr;
4015
4016                         if (copy_from_user(&tr, ptr, sizeof(tr)))
4017                                 return -EFAULT;
4018                         ptr += sizeof(tr);
4019                         binder_transaction(proc, thread, &tr.transaction_data,
4020                                            cmd == BC_REPLY_SG, tr.buffers_size);
4021                         break;
4022                 }
4023                 case BC_TRANSACTION:
4024                 case BC_REPLY: {
4025                         struct binder_transaction_data tr;
4026
4027                         if (copy_from_user(&tr, ptr, sizeof(tr)))
4028                                 return -EFAULT;
4029                         ptr += sizeof(tr);
4030                         binder_transaction(proc, thread, &tr,
4031                                            cmd == BC_REPLY, 0);
4032                         break;
4033                 }
4034
4035                 case BC_REGISTER_LOOPER:
4036                         binder_debug(BINDER_DEBUG_THREADS,
4037                                      "%d:%d BC_REGISTER_LOOPER\n",
4038                                      proc->pid, thread->pid);
4039                         binder_inner_proc_lock(proc);
4040                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4041                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4042                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4043                                         proc->pid, thread->pid);
4044                         } else if (proc->requested_threads == 0) {
4045                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4046                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4047                                         proc->pid, thread->pid);
4048                         } else {
4049                                 proc->requested_threads--;
4050                                 proc->requested_threads_started++;
4051                         }
4052                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4053                         binder_inner_proc_unlock(proc);
4054                         break;
4055                 case BC_ENTER_LOOPER:
4056                         binder_debug(BINDER_DEBUG_THREADS,
4057                                      "%d:%d BC_ENTER_LOOPER\n",
4058                                      proc->pid, thread->pid);
4059                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4060                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4061                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4062                                         proc->pid, thread->pid);
4063                         }
4064                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4065                         break;
4066                 case BC_EXIT_LOOPER:
4067                         binder_debug(BINDER_DEBUG_THREADS,
4068                                      "%d:%d BC_EXIT_LOOPER\n",
4069                                      proc->pid, thread->pid);
4070                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
4071                         break;
4072
4073                 case BC_REQUEST_DEATH_NOTIFICATION:
4074                 case BC_CLEAR_DEATH_NOTIFICATION: {
4075                         uint32_t target;
4076                         binder_uintptr_t cookie;
4077                         struct binder_ref *ref;
4078                         struct binder_ref_death *death = NULL;
4079
4080                         if (get_user(target, (uint32_t __user *)ptr))
4081                                 return -EFAULT;
4082                         ptr += sizeof(uint32_t);
4083                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4084                                 return -EFAULT;
4085                         ptr += sizeof(binder_uintptr_t);
4086                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4087                                 /*
4088                                  * Allocate memory for death notification
4089                                  * before taking lock
4090                                  */
4091                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
4092                                 if (death == NULL) {
4093                                         WARN_ON(thread->return_error.cmd !=
4094                                                 BR_OK);
4095                                         thread->return_error.cmd = BR_ERROR;
4096                                         binder_enqueue_thread_work(
4097                                                 thread,
4098                                                 &thread->return_error.work);
4099                                         binder_debug(
4100                                                 BINDER_DEBUG_FAILED_TRANSACTION,
4101                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4102                                                 proc->pid, thread->pid);
4103                                         break;
4104                                 }
4105                         }
4106                         binder_proc_lock(proc);
4107                         ref = binder_get_ref_olocked(proc, target, false);
4108                         if (ref == NULL) {
4109                                 binder_user_error("%d:%d %s invalid ref %d\n",
4110                                         proc->pid, thread->pid,
4111                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4112                                         "BC_REQUEST_DEATH_NOTIFICATION" :
4113                                         "BC_CLEAR_DEATH_NOTIFICATION",
4114                                         target);
4115                                 binder_proc_unlock(proc);
4116                                 kfree(death);
4117                                 break;
4118                         }
4119
4120                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4121                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4122                                      proc->pid, thread->pid,
4123                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4124                                      "BC_REQUEST_DEATH_NOTIFICATION" :
4125                                      "BC_CLEAR_DEATH_NOTIFICATION",
4126                                      (u64)cookie, ref->data.debug_id,
4127                                      ref->data.desc, ref->data.strong,
4128                                      ref->data.weak, ref->node->debug_id);
4129
4130                         binder_node_lock(ref->node);
4131                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4132                                 if (ref->death) {
4133                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4134                                                 proc->pid, thread->pid);
4135                                         binder_node_unlock(ref->node);
4136                                         binder_proc_unlock(proc);
4137                                         kfree(death);
4138                                         break;
4139                                 }
4140                                 binder_stats_created(BINDER_STAT_DEATH);
4141                                 INIT_LIST_HEAD(&death->work.entry);
4142                                 death->cookie = cookie;
4143                                 ref->death = death;
4144                                 if (ref->node->proc == NULL) {
4145                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4146
4147                                         binder_inner_proc_lock(proc);
4148                                         binder_enqueue_work_ilocked(
4149                                                 &ref->death->work, &proc->todo);
4150                                         binder_wakeup_proc_ilocked(proc);
4151                                         binder_inner_proc_unlock(proc);
4152                                 }
4153                         } else {
4154                                 if (ref->death == NULL) {
4155                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4156                                                 proc->pid, thread->pid);
4157                                         binder_node_unlock(ref->node);
4158                                         binder_proc_unlock(proc);
4159                                         break;
4160                                 }
4161                                 death = ref->death;
4162                                 if (death->cookie != cookie) {
4163                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4164                                                 proc->pid, thread->pid,
4165                                                 (u64)death->cookie,
4166                                                 (u64)cookie);
4167                                         binder_node_unlock(ref->node);
4168                                         binder_proc_unlock(proc);
4169                                         break;
4170                                 }
4171                                 ref->death = NULL;
4172                                 binder_inner_proc_lock(proc);
4173                                 if (list_empty(&death->work.entry)) {
4174                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4175                                         if (thread->looper &
4176                                             (BINDER_LOOPER_STATE_REGISTERED |
4177                                              BINDER_LOOPER_STATE_ENTERED))
4178                                                 binder_enqueue_thread_work_ilocked(
4179                                                                 thread,
4180                                                                 &death->work);
4181                                         else {
4182                                                 binder_enqueue_work_ilocked(
4183                                                                 &death->work,
4184                                                                 &proc->todo);
4185                                                 binder_wakeup_proc_ilocked(
4186                                                                 proc);
4187                                         }
4188                                 } else {
4189                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4190                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4191                                 }
4192                                 binder_inner_proc_unlock(proc);
4193                         }
4194                         binder_node_unlock(ref->node);
4195                         binder_proc_unlock(proc);
4196                 } break;
4197                 case BC_DEAD_BINDER_DONE: {
4198                         struct binder_work *w;
4199                         binder_uintptr_t cookie;
4200                         struct binder_ref_death *death = NULL;
4201
4202                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4203                                 return -EFAULT;
4204
4205                         ptr += sizeof(cookie);
4206                         binder_inner_proc_lock(proc);
4207                         list_for_each_entry(w, &proc->delivered_death,
4208                                             entry) {
4209                                 struct binder_ref_death *tmp_death =
4210                                         container_of(w,
4211                                                      struct binder_ref_death,
4212                                                      work);
4213
4214                                 if (tmp_death->cookie == cookie) {
4215                                         death = tmp_death;
4216                                         break;
4217                                 }
4218                         }
4219                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
4220                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4221                                      proc->pid, thread->pid, (u64)cookie,
4222                                      death);
4223                         if (death == NULL) {
4224                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4225                                         proc->pid, thread->pid, (u64)cookie);
4226                                 binder_inner_proc_unlock(proc);
4227                                 break;
4228                         }
4229                         binder_dequeue_work_ilocked(&death->work);
4230                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4231                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4232                                 if (thread->looper &
4233                                         (BINDER_LOOPER_STATE_REGISTERED |
4234                                          BINDER_LOOPER_STATE_ENTERED))
4235                                         binder_enqueue_thread_work_ilocked(
4236                                                 thread, &death->work);
4237                                 else {
4238                                         binder_enqueue_work_ilocked(
4239                                                         &death->work,
4240                                                         &proc->todo);
4241                                         binder_wakeup_proc_ilocked(proc);
4242                                 }
4243                         }
4244                         binder_inner_proc_unlock(proc);
4245                 } break;
4246
4247                 default:
4248                         pr_err("%d:%d unknown command %u\n",
4249                                proc->pid, thread->pid, cmd);
4250                         return -EINVAL;
4251                 }
4252                 *consumed = ptr - buffer;
4253         }
4254         return 0;
4255 }
4256
4257 static void binder_stat_br(struct binder_proc *proc,
4258                            struct binder_thread *thread, uint32_t cmd)
4259 {
4260         trace_binder_return(cmd);
4261         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4262                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4263                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4264                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4265         }
4266 }
4267
4268 static int binder_put_node_cmd(struct binder_proc *proc,
4269                                struct binder_thread *thread,
4270                                void __user **ptrp,
4271                                binder_uintptr_t node_ptr,
4272                                binder_uintptr_t node_cookie,
4273                                int node_debug_id,
4274                                uint32_t cmd, const char *cmd_name)
4275 {
4276         void __user *ptr = *ptrp;
4277
4278         if (put_user(cmd, (uint32_t __user *)ptr))
4279                 return -EFAULT;
4280         ptr += sizeof(uint32_t);
4281
4282         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4283                 return -EFAULT;
4284         ptr += sizeof(binder_uintptr_t);
4285
4286         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4287                 return -EFAULT;
4288         ptr += sizeof(binder_uintptr_t);
4289
4290         binder_stat_br(proc, thread, cmd);
4291         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4292                      proc->pid, thread->pid, cmd_name, node_debug_id,
4293                      (u64)node_ptr, (u64)node_cookie);
4294
4295         *ptrp = ptr;
4296         return 0;
4297 }
4298
4299 static int binder_wait_for_work(struct binder_thread *thread,
4300                                 bool do_proc_work)
4301 {
4302         DEFINE_WAIT(wait);
4303         struct binder_proc *proc = thread->proc;
4304         int ret = 0;
4305
4306         binder_inner_proc_lock(proc);
4307         for (;;) {
4308                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4309                 if (binder_has_work_ilocked(thread, do_proc_work))
4310                         break;
4311                 if (do_proc_work)
4312                         list_add(&thread->waiting_thread_node,
4313                                  &proc->waiting_threads);
4314                 binder_inner_proc_unlock(proc);
4315                 schedule();
4316                 binder_inner_proc_lock(proc);
4317                 list_del_init(&thread->waiting_thread_node);
4318                 if (signal_pending(current)) {
4319                         ret = -EINTR;
4320                         break;
4321                 }
4322         }
4323         finish_wait(&thread->wait, &wait);
4324         binder_inner_proc_unlock(proc);
4325
4326         return ret;
4327 }
4328
4329 /**
4330  * binder_apply_fd_fixups() - finish fd translation
4331  * @proc:         binder_proc associated @t->buffer
4332  * @t:  binder transaction with list of fd fixups
4333  *
4334  * Now that we are in the context of the transaction target
4335  * process, we can allocate and install fds. Process the
4336  * list of fds to translate and fixup the buffer with the
4337  * new fds first and only then install the files.
4338  *
4339  * If we fail to allocate an fd, skip the install and release
4340  * any fds that have already been allocated.
4341  */
4342 static int binder_apply_fd_fixups(struct binder_proc *proc,
4343                                   struct binder_transaction *t)
4344 {
4345         struct binder_txn_fd_fixup *fixup, *tmp;
4346         int ret = 0;
4347
4348         list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4349                 int fd = get_unused_fd_flags(O_CLOEXEC);
4350
4351                 if (fd < 0) {
4352                         binder_debug(BINDER_DEBUG_TRANSACTION,
4353                                      "failed fd fixup txn %d fd %d\n",
4354                                      t->debug_id, fd);
4355                         ret = -ENOMEM;
4356                         goto err;
4357                 }
4358                 binder_debug(BINDER_DEBUG_TRANSACTION,
4359                              "fd fixup txn %d fd %d\n",
4360                              t->debug_id, fd);
4361                 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4362                 fixup->target_fd = fd;
4363                 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4364                                                 fixup->offset, &fd,
4365                                                 sizeof(u32))) {
4366                         ret = -EINVAL;
4367                         goto err;
4368                 }
4369         }
4370         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4371                 fd_install(fixup->target_fd, fixup->file);
4372                 list_del(&fixup->fixup_entry);
4373                 kfree(fixup);
4374         }
4375
4376         return ret;
4377
4378 err:
4379         binder_free_txn_fixups(t);
4380         return ret;
4381 }
4382
4383 static int binder_thread_read(struct binder_proc *proc,
4384                               struct binder_thread *thread,
4385                               binder_uintptr_t binder_buffer, size_t size,
4386                               binder_size_t *consumed, int non_block)
4387 {
4388         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4389         void __user *ptr = buffer + *consumed;
4390         void __user *end = buffer + size;
4391
4392         int ret = 0;
4393         int wait_for_proc_work;
4394
4395         if (*consumed == 0) {
4396                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4397                         return -EFAULT;
4398                 ptr += sizeof(uint32_t);
4399         }
4400
4401 retry:
4402         binder_inner_proc_lock(proc);
4403         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4404         binder_inner_proc_unlock(proc);
4405
4406         thread->looper |= BINDER_LOOPER_STATE_WAITING;
4407
4408         trace_binder_wait_for_work(wait_for_proc_work,
4409                                    !!thread->transaction_stack,
4410                                    !binder_worklist_empty(proc, &thread->todo));
4411         if (wait_for_proc_work) {
4412                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4413                                         BINDER_LOOPER_STATE_ENTERED))) {
4414                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4415                                 proc->pid, thread->pid, thread->looper);
4416                         wait_event_interruptible(binder_user_error_wait,
4417                                                  binder_stop_on_user_error < 2);
4418                 }
4419                 binder_set_nice(proc->default_priority);
4420         }
4421
4422         if (non_block) {
4423                 if (!binder_has_work(thread, wait_for_proc_work))
4424                         ret = -EAGAIN;
4425         } else {
4426                 ret = binder_wait_for_work(thread, wait_for_proc_work);
4427         }
4428
4429         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4430
4431         if (ret)
4432                 return ret;
4433
4434         while (1) {
4435                 uint32_t cmd;
4436                 struct binder_transaction_data_secctx tr;
4437                 struct binder_transaction_data *trd = &tr.transaction_data;
4438                 struct binder_work *w = NULL;
4439                 struct list_head *list = NULL;
4440                 struct binder_transaction *t = NULL;
4441                 struct binder_thread *t_from;
4442                 size_t trsize = sizeof(*trd);
4443
4444                 binder_inner_proc_lock(proc);
4445                 if (!binder_worklist_empty_ilocked(&thread->todo))
4446                         list = &thread->todo;
4447                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4448                            wait_for_proc_work)
4449                         list = &proc->todo;
4450                 else {
4451                         binder_inner_proc_unlock(proc);
4452
4453                         /* no data added */
4454                         if (ptr - buffer == 4 && !thread->looper_need_return)
4455                                 goto retry;
4456                         break;
4457                 }
4458
4459                 if (end - ptr < sizeof(tr) + 4) {
4460                         binder_inner_proc_unlock(proc);
4461                         break;
4462                 }
4463                 w = binder_dequeue_work_head_ilocked(list);
4464                 if (binder_worklist_empty_ilocked(&thread->todo))
4465                         thread->process_todo = false;
4466
4467                 switch (w->type) {
4468                 case BINDER_WORK_TRANSACTION: {
4469                         binder_inner_proc_unlock(proc);
4470                         t = container_of(w, struct binder_transaction, work);
4471                 } break;
4472                 case BINDER_WORK_RETURN_ERROR: {
4473                         struct binder_error *e = container_of(
4474                                         w, struct binder_error, work);
4475
4476                         WARN_ON(e->cmd == BR_OK);
4477                         binder_inner_proc_unlock(proc);
4478                         if (put_user(e->cmd, (uint32_t __user *)ptr))
4479                                 return -EFAULT;
4480                         cmd = e->cmd;
4481                         e->cmd = BR_OK;
4482                         ptr += sizeof(uint32_t);
4483
4484                         binder_stat_br(proc, thread, cmd);
4485                 } break;
4486                 case BINDER_WORK_TRANSACTION_COMPLETE:
4487                 case BINDER_WORK_TRANSACTION_PENDING:
4488                 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4489                         if (proc->oneway_spam_detection_enabled &&
4490                                    w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4491                                 cmd = BR_ONEWAY_SPAM_SUSPECT;
4492                         else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4493                                 cmd = BR_TRANSACTION_PENDING_FROZEN;
4494                         else
4495                                 cmd = BR_TRANSACTION_COMPLETE;
4496                         binder_inner_proc_unlock(proc);
4497                         kfree(w);
4498                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4499                         if (put_user(cmd, (uint32_t __user *)ptr))
4500                                 return -EFAULT;
4501                         ptr += sizeof(uint32_t);
4502
4503                         binder_stat_br(proc, thread, cmd);
4504                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4505                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
4506                                      proc->pid, thread->pid);
4507                 } break;
4508                 case BINDER_WORK_NODE: {
4509                         struct binder_node *node = container_of(w, struct binder_node, work);
4510                         int strong, weak;
4511                         binder_uintptr_t node_ptr = node->ptr;
4512                         binder_uintptr_t node_cookie = node->cookie;
4513                         int node_debug_id = node->debug_id;
4514                         int has_weak_ref;
4515                         int has_strong_ref;
4516                         void __user *orig_ptr = ptr;
4517
4518                         BUG_ON(proc != node->proc);
4519                         strong = node->internal_strong_refs ||
4520                                         node->local_strong_refs;
4521                         weak = !hlist_empty(&node->refs) ||
4522                                         node->local_weak_refs ||
4523                                         node->tmp_refs || strong;
4524                         has_strong_ref = node->has_strong_ref;
4525                         has_weak_ref = node->has_weak_ref;
4526
4527                         if (weak && !has_weak_ref) {
4528                                 node->has_weak_ref = 1;
4529                                 node->pending_weak_ref = 1;
4530                                 node->local_weak_refs++;
4531                         }
4532                         if (strong && !has_strong_ref) {
4533                                 node->has_strong_ref = 1;
4534                                 node->pending_strong_ref = 1;
4535                                 node->local_strong_refs++;
4536                         }
4537                         if (!strong && has_strong_ref)
4538                                 node->has_strong_ref = 0;
4539                         if (!weak && has_weak_ref)
4540                                 node->has_weak_ref = 0;
4541                         if (!weak && !strong) {
4542                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4543                                              "%d:%d node %d u%016llx c%016llx deleted\n",
4544                                              proc->pid, thread->pid,
4545                                              node_debug_id,
4546                                              (u64)node_ptr,
4547                                              (u64)node_cookie);
4548                                 rb_erase(&node->rb_node, &proc->nodes);
4549                                 binder_inner_proc_unlock(proc);
4550                                 binder_node_lock(node);
4551                                 /*
4552                                  * Acquire the node lock before freeing the
4553                                  * node to serialize with other threads that
4554                                  * may have been holding the node lock while
4555                                  * decrementing this node (avoids race where
4556                                  * this thread frees while the other thread
4557                                  * is unlocking the node after the final
4558                                  * decrement)
4559                                  */
4560                                 binder_node_unlock(node);
4561                                 binder_free_node(node);
4562                         } else
4563                                 binder_inner_proc_unlock(proc);
4564
4565                         if (weak && !has_weak_ref)
4566                                 ret = binder_put_node_cmd(
4567                                                 proc, thread, &ptr, node_ptr,
4568                                                 node_cookie, node_debug_id,
4569                                                 BR_INCREFS, "BR_INCREFS");
4570                         if (!ret && strong && !has_strong_ref)
4571                                 ret = binder_put_node_cmd(
4572                                                 proc, thread, &ptr, node_ptr,
4573                                                 node_cookie, node_debug_id,
4574                                                 BR_ACQUIRE, "BR_ACQUIRE");
4575                         if (!ret && !strong && has_strong_ref)
4576                                 ret = binder_put_node_cmd(
4577                                                 proc, thread, &ptr, node_ptr,
4578                                                 node_cookie, node_debug_id,
4579                                                 BR_RELEASE, "BR_RELEASE");
4580                         if (!ret && !weak && has_weak_ref)
4581                                 ret = binder_put_node_cmd(
4582                                                 proc, thread, &ptr, node_ptr,
4583                                                 node_cookie, node_debug_id,
4584                                                 BR_DECREFS, "BR_DECREFS");
4585                         if (orig_ptr == ptr)
4586                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4587                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
4588                                              proc->pid, thread->pid,
4589                                              node_debug_id,
4590                                              (u64)node_ptr,
4591                                              (u64)node_cookie);
4592                         if (ret)
4593                                 return ret;
4594                 } break;
4595                 case BINDER_WORK_DEAD_BINDER:
4596                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4597                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4598                         struct binder_ref_death *death;
4599                         uint32_t cmd;
4600                         binder_uintptr_t cookie;
4601
4602                         death = container_of(w, struct binder_ref_death, work);
4603                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4604                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4605                         else
4606                                 cmd = BR_DEAD_BINDER;
4607                         cookie = death->cookie;
4608
4609                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4610                                      "%d:%d %s %016llx\n",
4611                                       proc->pid, thread->pid,
4612                                       cmd == BR_DEAD_BINDER ?
4613                                       "BR_DEAD_BINDER" :
4614                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4615                                       (u64)cookie);
4616                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4617                                 binder_inner_proc_unlock(proc);
4618                                 kfree(death);
4619                                 binder_stats_deleted(BINDER_STAT_DEATH);
4620                         } else {
4621                                 binder_enqueue_work_ilocked(
4622                                                 w, &proc->delivered_death);
4623                                 binder_inner_proc_unlock(proc);
4624                         }
4625                         if (put_user(cmd, (uint32_t __user *)ptr))
4626                                 return -EFAULT;
4627                         ptr += sizeof(uint32_t);
4628                         if (put_user(cookie,
4629                                      (binder_uintptr_t __user *)ptr))
4630                                 return -EFAULT;
4631                         ptr += sizeof(binder_uintptr_t);
4632                         binder_stat_br(proc, thread, cmd);
4633                         if (cmd == BR_DEAD_BINDER)
4634                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4635                 } break;
4636                 default:
4637                         binder_inner_proc_unlock(proc);
4638                         pr_err("%d:%d: bad work type %d\n",
4639                                proc->pid, thread->pid, w->type);
4640                         break;
4641                 }
4642
4643                 if (!t)
4644                         continue;
4645
4646                 BUG_ON(t->buffer == NULL);
4647                 if (t->buffer->target_node) {
4648                         struct binder_node *target_node = t->buffer->target_node;
4649
4650                         trd->target.ptr = target_node->ptr;
4651                         trd->cookie =  target_node->cookie;
4652                         t->saved_priority = task_nice(current);
4653                         if (t->priority < target_node->min_priority &&
4654                             !(t->flags & TF_ONE_WAY))
4655                                 binder_set_nice(t->priority);
4656                         else if (!(t->flags & TF_ONE_WAY) ||
4657                                  t->saved_priority > target_node->min_priority)
4658                                 binder_set_nice(target_node->min_priority);
4659                         cmd = BR_TRANSACTION;
4660                 } else {
4661                         trd->target.ptr = 0;
4662                         trd->cookie = 0;
4663                         cmd = BR_REPLY;
4664                 }
4665                 trd->code = t->code;
4666                 trd->flags = t->flags;
4667                 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4668
4669                 t_from = binder_get_txn_from(t);
4670                 if (t_from) {
4671                         struct task_struct *sender = t_from->proc->tsk;
4672
4673                         trd->sender_pid =
4674                                 task_tgid_nr_ns(sender,
4675                                                 task_active_pid_ns(current));
4676                 } else {
4677                         trd->sender_pid = 0;
4678                 }
4679
4680                 ret = binder_apply_fd_fixups(proc, t);
4681                 if (ret) {
4682                         struct binder_buffer *buffer = t->buffer;
4683                         bool oneway = !!(t->flags & TF_ONE_WAY);
4684                         int tid = t->debug_id;
4685
4686                         if (t_from)
4687                                 binder_thread_dec_tmpref(t_from);
4688                         buffer->transaction = NULL;
4689                         binder_cleanup_transaction(t, "fd fixups failed",
4690                                                    BR_FAILED_REPLY);
4691                         binder_free_buf(proc, thread, buffer, true);
4692                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4693                                      "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4694                                      proc->pid, thread->pid,
4695                                      oneway ? "async " :
4696                                         (cmd == BR_REPLY ? "reply " : ""),
4697                                      tid, BR_FAILED_REPLY, ret, __LINE__);
4698                         if (cmd == BR_REPLY) {
4699                                 cmd = BR_FAILED_REPLY;
4700                                 if (put_user(cmd, (uint32_t __user *)ptr))
4701                                         return -EFAULT;
4702                                 ptr += sizeof(uint32_t);
4703                                 binder_stat_br(proc, thread, cmd);
4704                                 break;
4705                         }
4706                         continue;
4707                 }
4708                 trd->data_size = t->buffer->data_size;
4709                 trd->offsets_size = t->buffer->offsets_size;
4710                 trd->data.ptr.buffer = t->buffer->user_data;
4711                 trd->data.ptr.offsets = trd->data.ptr.buffer +
4712                                         ALIGN(t->buffer->data_size,
4713                                             sizeof(void *));
4714
4715                 tr.secctx = t->security_ctx;
4716                 if (t->security_ctx) {
4717                         cmd = BR_TRANSACTION_SEC_CTX;
4718                         trsize = sizeof(tr);
4719                 }
4720                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4721                         if (t_from)
4722                                 binder_thread_dec_tmpref(t_from);
4723
4724                         binder_cleanup_transaction(t, "put_user failed",
4725                                                    BR_FAILED_REPLY);
4726
4727                         return -EFAULT;
4728                 }
4729                 ptr += sizeof(uint32_t);
4730                 if (copy_to_user(ptr, &tr, trsize)) {
4731                         if (t_from)
4732                                 binder_thread_dec_tmpref(t_from);
4733
4734                         binder_cleanup_transaction(t, "copy_to_user failed",
4735                                                    BR_FAILED_REPLY);
4736
4737                         return -EFAULT;
4738                 }
4739                 ptr += trsize;
4740
4741                 trace_binder_transaction_received(t);
4742                 binder_stat_br(proc, thread, cmd);
4743                 binder_debug(BINDER_DEBUG_TRANSACTION,
4744                              "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4745                              proc->pid, thread->pid,
4746                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4747                                 (cmd == BR_TRANSACTION_SEC_CTX) ?
4748                                      "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4749                              t->debug_id, t_from ? t_from->proc->pid : 0,
4750                              t_from ? t_from->pid : 0, cmd,
4751                              t->buffer->data_size, t->buffer->offsets_size,
4752                              (u64)trd->data.ptr.buffer,
4753                              (u64)trd->data.ptr.offsets);
4754
4755                 if (t_from)
4756                         binder_thread_dec_tmpref(t_from);
4757                 t->buffer->allow_user_free = 1;
4758                 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4759                         binder_inner_proc_lock(thread->proc);
4760                         t->to_parent = thread->transaction_stack;
4761                         t->to_thread = thread;
4762                         thread->transaction_stack = t;
4763                         binder_inner_proc_unlock(thread->proc);
4764                 } else {
4765                         binder_free_transaction(t);
4766                 }
4767                 break;
4768         }
4769
4770 done:
4771
4772         *consumed = ptr - buffer;
4773         binder_inner_proc_lock(proc);
4774         if (proc->requested_threads == 0 &&
4775             list_empty(&thread->proc->waiting_threads) &&
4776             proc->requested_threads_started < proc->max_threads &&
4777             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4778              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4779              /*spawn a new thread if we leave this out */) {
4780                 proc->requested_threads++;
4781                 binder_inner_proc_unlock(proc);
4782                 binder_debug(BINDER_DEBUG_THREADS,
4783                              "%d:%d BR_SPAWN_LOOPER\n",
4784                              proc->pid, thread->pid);
4785                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4786                         return -EFAULT;
4787                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4788         } else
4789                 binder_inner_proc_unlock(proc);
4790         return 0;
4791 }
4792
4793 static void binder_release_work(struct binder_proc *proc,
4794                                 struct list_head *list)
4795 {
4796         struct binder_work *w;
4797         enum binder_work_type wtype;
4798
4799         while (1) {
4800                 binder_inner_proc_lock(proc);
4801                 w = binder_dequeue_work_head_ilocked(list);
4802                 wtype = w ? w->type : 0;
4803                 binder_inner_proc_unlock(proc);
4804                 if (!w)
4805                         return;
4806
4807                 switch (wtype) {
4808                 case BINDER_WORK_TRANSACTION: {
4809                         struct binder_transaction *t;
4810
4811                         t = container_of(w, struct binder_transaction, work);
4812
4813                         binder_cleanup_transaction(t, "process died.",
4814                                                    BR_DEAD_REPLY);
4815                 } break;
4816                 case BINDER_WORK_RETURN_ERROR: {
4817                         struct binder_error *e = container_of(
4818                                         w, struct binder_error, work);
4819
4820                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4821                                 "undelivered TRANSACTION_ERROR: %u\n",
4822                                 e->cmd);
4823                 } break;
4824                 case BINDER_WORK_TRANSACTION_PENDING:
4825                 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4826                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4827                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4828                                 "undelivered TRANSACTION_COMPLETE\n");
4829                         kfree(w);
4830                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4831                 } break;
4832                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4833                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4834                         struct binder_ref_death *death;
4835
4836                         death = container_of(w, struct binder_ref_death, work);
4837                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4838                                 "undelivered death notification, %016llx\n",
4839                                 (u64)death->cookie);
4840                         kfree(death);
4841                         binder_stats_deleted(BINDER_STAT_DEATH);
4842                 } break;
4843                 case BINDER_WORK_NODE:
4844                         break;
4845                 default:
4846                         pr_err("unexpected work type, %d, not freed\n",
4847                                wtype);
4848                         break;
4849                 }
4850         }
4851
4852 }
4853
4854 static struct binder_thread *binder_get_thread_ilocked(
4855                 struct binder_proc *proc, struct binder_thread *new_thread)
4856 {
4857         struct binder_thread *thread = NULL;
4858         struct rb_node *parent = NULL;
4859         struct rb_node **p = &proc->threads.rb_node;
4860
4861         while (*p) {
4862                 parent = *p;
4863                 thread = rb_entry(parent, struct binder_thread, rb_node);
4864
4865                 if (current->pid < thread->pid)
4866                         p = &(*p)->rb_left;
4867                 else if (current->pid > thread->pid)
4868                         p = &(*p)->rb_right;
4869                 else
4870                         return thread;
4871         }
4872         if (!new_thread)
4873                 return NULL;
4874         thread = new_thread;
4875         binder_stats_created(BINDER_STAT_THREAD);
4876         thread->proc = proc;
4877         thread->pid = current->pid;
4878         atomic_set(&thread->tmp_ref, 0);
4879         init_waitqueue_head(&thread->wait);
4880         INIT_LIST_HEAD(&thread->todo);
4881         rb_link_node(&thread->rb_node, parent, p);
4882         rb_insert_color(&thread->rb_node, &proc->threads);
4883         thread->looper_need_return = true;
4884         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4885         thread->return_error.cmd = BR_OK;
4886         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4887         thread->reply_error.cmd = BR_OK;
4888         thread->ee.command = BR_OK;
4889         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4890         return thread;
4891 }
4892
4893 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4894 {
4895         struct binder_thread *thread;
4896         struct binder_thread *new_thread;
4897
4898         binder_inner_proc_lock(proc);
4899         thread = binder_get_thread_ilocked(proc, NULL);
4900         binder_inner_proc_unlock(proc);
4901         if (!thread) {
4902                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4903                 if (new_thread == NULL)
4904                         return NULL;
4905                 binder_inner_proc_lock(proc);
4906                 thread = binder_get_thread_ilocked(proc, new_thread);
4907                 binder_inner_proc_unlock(proc);
4908                 if (thread != new_thread)
4909                         kfree(new_thread);
4910         }
4911         return thread;
4912 }
4913
4914 static void binder_free_proc(struct binder_proc *proc)
4915 {
4916         struct binder_device *device;
4917
4918         BUG_ON(!list_empty(&proc->todo));
4919         BUG_ON(!list_empty(&proc->delivered_death));
4920         if (proc->outstanding_txns)
4921                 pr_warn("%s: Unexpected outstanding_txns %d\n",
4922                         __func__, proc->outstanding_txns);
4923         device = container_of(proc->context, struct binder_device, context);
4924         if (refcount_dec_and_test(&device->ref)) {
4925                 kfree(proc->context->name);
4926                 kfree(device);
4927         }
4928         binder_alloc_deferred_release(&proc->alloc);
4929         put_task_struct(proc->tsk);
4930         put_cred(proc->cred);
4931         binder_stats_deleted(BINDER_STAT_PROC);
4932         kfree(proc);
4933 }
4934
4935 static void binder_free_thread(struct binder_thread *thread)
4936 {
4937         BUG_ON(!list_empty(&thread->todo));
4938         binder_stats_deleted(BINDER_STAT_THREAD);
4939         binder_proc_dec_tmpref(thread->proc);
4940         kfree(thread);
4941 }
4942
4943 static int binder_thread_release(struct binder_proc *proc,
4944                                  struct binder_thread *thread)
4945 {
4946         struct binder_transaction *t;
4947         struct binder_transaction *send_reply = NULL;
4948         int active_transactions = 0;
4949         struct binder_transaction *last_t = NULL;
4950
4951         binder_inner_proc_lock(thread->proc);
4952         /*
4953          * take a ref on the proc so it survives
4954          * after we remove this thread from proc->threads.
4955          * The corresponding dec is when we actually
4956          * free the thread in binder_free_thread()
4957          */
4958         proc->tmp_ref++;
4959         /*
4960          * take a ref on this thread to ensure it
4961          * survives while we are releasing it
4962          */
4963         atomic_inc(&thread->tmp_ref);
4964         rb_erase(&thread->rb_node, &proc->threads);
4965         t = thread->transaction_stack;
4966         if (t) {
4967                 spin_lock(&t->lock);
4968                 if (t->to_thread == thread)
4969                         send_reply = t;
4970         } else {
4971                 __acquire(&t->lock);
4972         }
4973         thread->is_dead = true;
4974
4975         while (t) {
4976                 last_t = t;
4977                 active_transactions++;
4978                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4979                              "release %d:%d transaction %d %s, still active\n",
4980                               proc->pid, thread->pid,
4981                              t->debug_id,
4982                              (t->to_thread == thread) ? "in" : "out");
4983
4984                 if (t->to_thread == thread) {
4985                         thread->proc->outstanding_txns--;
4986                         t->to_proc = NULL;
4987                         t->to_thread = NULL;
4988                         if (t->buffer) {
4989                                 t->buffer->transaction = NULL;
4990                                 t->buffer = NULL;
4991                         }
4992                         t = t->to_parent;
4993                 } else if (t->from == thread) {
4994                         t->from = NULL;
4995                         t = t->from_parent;
4996                 } else
4997                         BUG();
4998                 spin_unlock(&last_t->lock);
4999                 if (t)
5000                         spin_lock(&t->lock);
5001                 else
5002                         __acquire(&t->lock);
5003         }
5004         /* annotation for sparse, lock not acquired in last iteration above */
5005         __release(&t->lock);
5006
5007         /*
5008          * If this thread used poll, make sure we remove the waitqueue from any
5009          * poll data structures holding it.
5010          */
5011         if (thread->looper & BINDER_LOOPER_STATE_POLL)
5012                 wake_up_pollfree(&thread->wait);
5013
5014         binder_inner_proc_unlock(thread->proc);
5015
5016         /*
5017          * This is needed to avoid races between wake_up_pollfree() above and
5018          * someone else removing the last entry from the queue for other reasons
5019          * (e.g. ep_remove_wait_queue() being called due to an epoll file
5020          * descriptor being closed).  Such other users hold an RCU read lock, so
5021          * we can be sure they're done after we call synchronize_rcu().
5022          */
5023         if (thread->looper & BINDER_LOOPER_STATE_POLL)
5024                 synchronize_rcu();
5025
5026         if (send_reply)
5027                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5028         binder_release_work(proc, &thread->todo);
5029         binder_thread_dec_tmpref(thread);
5030         return active_transactions;
5031 }
5032
5033 static __poll_t binder_poll(struct file *filp,
5034                                 struct poll_table_struct *wait)
5035 {
5036         struct binder_proc *proc = filp->private_data;
5037         struct binder_thread *thread = NULL;
5038         bool wait_for_proc_work;
5039
5040         thread = binder_get_thread(proc);
5041         if (!thread)
5042                 return EPOLLERR;
5043
5044         binder_inner_proc_lock(thread->proc);
5045         thread->looper |= BINDER_LOOPER_STATE_POLL;
5046         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5047
5048         binder_inner_proc_unlock(thread->proc);
5049
5050         poll_wait(filp, &thread->wait, wait);
5051
5052         if (binder_has_work(thread, wait_for_proc_work))
5053                 return EPOLLIN;
5054
5055         return 0;
5056 }
5057
5058 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5059                                 struct binder_thread *thread)
5060 {
5061         int ret = 0;
5062         struct binder_proc *proc = filp->private_data;
5063         void __user *ubuf = (void __user *)arg;
5064         struct binder_write_read bwr;
5065
5066         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5067                 ret = -EFAULT;
5068                 goto out;
5069         }
5070         binder_debug(BINDER_DEBUG_READ_WRITE,
5071                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5072                      proc->pid, thread->pid,
5073                      (u64)bwr.write_size, (u64)bwr.write_buffer,
5074                      (u64)bwr.read_size, (u64)bwr.read_buffer);
5075
5076         if (bwr.write_size > 0) {
5077                 ret = binder_thread_write(proc, thread,
5078                                           bwr.write_buffer,
5079                                           bwr.write_size,
5080                                           &bwr.write_consumed);
5081                 trace_binder_write_done(ret);
5082                 if (ret < 0) {
5083                         bwr.read_consumed = 0;
5084                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5085                                 ret = -EFAULT;
5086                         goto out;
5087                 }
5088         }
5089         if (bwr.read_size > 0) {
5090                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5091                                          bwr.read_size,
5092                                          &bwr.read_consumed,
5093                                          filp->f_flags & O_NONBLOCK);
5094                 trace_binder_read_done(ret);
5095                 binder_inner_proc_lock(proc);
5096                 if (!binder_worklist_empty_ilocked(&proc->todo))
5097                         binder_wakeup_proc_ilocked(proc);
5098                 binder_inner_proc_unlock(proc);
5099                 if (ret < 0) {
5100                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5101                                 ret = -EFAULT;
5102                         goto out;
5103                 }
5104         }
5105         binder_debug(BINDER_DEBUG_READ_WRITE,
5106                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5107                      proc->pid, thread->pid,
5108                      (u64)bwr.write_consumed, (u64)bwr.write_size,
5109                      (u64)bwr.read_consumed, (u64)bwr.read_size);
5110         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5111                 ret = -EFAULT;
5112                 goto out;
5113         }
5114 out:
5115         return ret;
5116 }
5117
5118 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5119                                     struct flat_binder_object *fbo)
5120 {
5121         int ret = 0;
5122         struct binder_proc *proc = filp->private_data;
5123         struct binder_context *context = proc->context;
5124         struct binder_node *new_node;
5125         kuid_t curr_euid = current_euid();
5126
5127         mutex_lock(&context->context_mgr_node_lock);
5128         if (context->binder_context_mgr_node) {
5129                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5130                 ret = -EBUSY;
5131                 goto out;
5132         }
5133         ret = security_binder_set_context_mgr(proc->cred);
5134         if (ret < 0)
5135                 goto out;
5136         if (uid_valid(context->binder_context_mgr_uid)) {
5137                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5138                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5139                                from_kuid(&init_user_ns, curr_euid),
5140                                from_kuid(&init_user_ns,
5141                                          context->binder_context_mgr_uid));
5142                         ret = -EPERM;
5143                         goto out;
5144                 }
5145         } else {
5146                 context->binder_context_mgr_uid = curr_euid;
5147         }
5148         new_node = binder_new_node(proc, fbo);
5149         if (!new_node) {
5150                 ret = -ENOMEM;
5151                 goto out;
5152         }
5153         binder_node_lock(new_node);
5154         new_node->local_weak_refs++;
5155         new_node->local_strong_refs++;
5156         new_node->has_strong_ref = 1;
5157         new_node->has_weak_ref = 1;
5158         context->binder_context_mgr_node = new_node;
5159         binder_node_unlock(new_node);
5160         binder_put_node(new_node);
5161 out:
5162         mutex_unlock(&context->context_mgr_node_lock);
5163         return ret;
5164 }
5165
5166 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5167                 struct binder_node_info_for_ref *info)
5168 {
5169         struct binder_node *node;
5170         struct binder_context *context = proc->context;
5171         __u32 handle = info->handle;
5172
5173         if (info->strong_count || info->weak_count || info->reserved1 ||
5174             info->reserved2 || info->reserved3) {
5175                 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5176                                   proc->pid);
5177                 return -EINVAL;
5178         }
5179
5180         /* This ioctl may only be used by the context manager */
5181         mutex_lock(&context->context_mgr_node_lock);
5182         if (!context->binder_context_mgr_node ||
5183                 context->binder_context_mgr_node->proc != proc) {
5184                 mutex_unlock(&context->context_mgr_node_lock);
5185                 return -EPERM;
5186         }
5187         mutex_unlock(&context->context_mgr_node_lock);
5188
5189         node = binder_get_node_from_ref(proc, handle, true, NULL);
5190         if (!node)
5191                 return -EINVAL;
5192
5193         info->strong_count = node->local_strong_refs +
5194                 node->internal_strong_refs;
5195         info->weak_count = node->local_weak_refs;
5196
5197         binder_put_node(node);
5198
5199         return 0;
5200 }
5201
5202 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5203                                 struct binder_node_debug_info *info)
5204 {
5205         struct rb_node *n;
5206         binder_uintptr_t ptr = info->ptr;
5207
5208         memset(info, 0, sizeof(*info));
5209
5210         binder_inner_proc_lock(proc);
5211         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5212                 struct binder_node *node = rb_entry(n, struct binder_node,
5213                                                     rb_node);
5214                 if (node->ptr > ptr) {
5215                         info->ptr = node->ptr;
5216                         info->cookie = node->cookie;
5217                         info->has_strong_ref = node->has_strong_ref;
5218                         info->has_weak_ref = node->has_weak_ref;
5219                         break;
5220                 }
5221         }
5222         binder_inner_proc_unlock(proc);
5223
5224         return 0;
5225 }
5226
5227 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5228 {
5229         struct rb_node *n;
5230         struct binder_thread *thread;
5231
5232         if (proc->outstanding_txns > 0)
5233                 return true;
5234
5235         for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5236                 thread = rb_entry(n, struct binder_thread, rb_node);
5237                 if (thread->transaction_stack)
5238                         return true;
5239         }
5240         return false;
5241 }
5242
5243 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5244                                struct binder_proc *target_proc)
5245 {
5246         int ret = 0;
5247
5248         if (!info->enable) {
5249                 binder_inner_proc_lock(target_proc);
5250                 target_proc->sync_recv = false;
5251                 target_proc->async_recv = false;
5252                 target_proc->is_frozen = false;
5253                 binder_inner_proc_unlock(target_proc);
5254                 return 0;
5255         }
5256
5257         /*
5258          * Freezing the target. Prevent new transactions by
5259          * setting frozen state. If timeout specified, wait
5260          * for transactions to drain.
5261          */
5262         binder_inner_proc_lock(target_proc);
5263         target_proc->sync_recv = false;
5264         target_proc->async_recv = false;
5265         target_proc->is_frozen = true;
5266         binder_inner_proc_unlock(target_proc);
5267
5268         if (info->timeout_ms > 0)
5269                 ret = wait_event_interruptible_timeout(
5270                         target_proc->freeze_wait,
5271                         (!target_proc->outstanding_txns),
5272                         msecs_to_jiffies(info->timeout_ms));
5273
5274         /* Check pending transactions that wait for reply */
5275         if (ret >= 0) {
5276                 binder_inner_proc_lock(target_proc);
5277                 if (binder_txns_pending_ilocked(target_proc))
5278                         ret = -EAGAIN;
5279                 binder_inner_proc_unlock(target_proc);
5280         }
5281
5282         if (ret < 0) {
5283                 binder_inner_proc_lock(target_proc);
5284                 target_proc->is_frozen = false;
5285                 binder_inner_proc_unlock(target_proc);
5286         }
5287
5288         return ret;
5289 }
5290
5291 static int binder_ioctl_get_freezer_info(
5292                                 struct binder_frozen_status_info *info)
5293 {
5294         struct binder_proc *target_proc;
5295         bool found = false;
5296         __u32 txns_pending;
5297
5298         info->sync_recv = 0;
5299         info->async_recv = 0;
5300
5301         mutex_lock(&binder_procs_lock);
5302         hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5303                 if (target_proc->pid == info->pid) {
5304                         found = true;
5305                         binder_inner_proc_lock(target_proc);
5306                         txns_pending = binder_txns_pending_ilocked(target_proc);
5307                         info->sync_recv |= target_proc->sync_recv |
5308                                         (txns_pending << 1);
5309                         info->async_recv |= target_proc->async_recv;
5310                         binder_inner_proc_unlock(target_proc);
5311                 }
5312         }
5313         mutex_unlock(&binder_procs_lock);
5314
5315         if (!found)
5316                 return -EINVAL;
5317
5318         return 0;
5319 }
5320
5321 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5322                                            void __user *ubuf)
5323 {
5324         struct binder_extended_error ee;
5325
5326         binder_inner_proc_lock(thread->proc);
5327         ee = thread->ee;
5328         binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5329         binder_inner_proc_unlock(thread->proc);
5330
5331         if (copy_to_user(ubuf, &ee, sizeof(ee)))
5332                 return -EFAULT;
5333
5334         return 0;
5335 }
5336
5337 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5338 {
5339         int ret;
5340         struct binder_proc *proc = filp->private_data;
5341         struct binder_thread *thread;
5342         void __user *ubuf = (void __user *)arg;
5343
5344         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5345                         proc->pid, current->pid, cmd, arg);*/
5346
5347         binder_selftest_alloc(&proc->alloc);
5348
5349         trace_binder_ioctl(cmd, arg);
5350
5351         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5352         if (ret)
5353                 goto err_unlocked;
5354
5355         thread = binder_get_thread(proc);
5356         if (thread == NULL) {
5357                 ret = -ENOMEM;
5358                 goto err;
5359         }
5360
5361         switch (cmd) {
5362         case BINDER_WRITE_READ:
5363                 ret = binder_ioctl_write_read(filp, arg, thread);
5364                 if (ret)
5365                         goto err;
5366                 break;
5367         case BINDER_SET_MAX_THREADS: {
5368                 int max_threads;
5369
5370                 if (copy_from_user(&max_threads, ubuf,
5371                                    sizeof(max_threads))) {
5372                         ret = -EINVAL;
5373                         goto err;
5374                 }
5375                 binder_inner_proc_lock(proc);
5376                 proc->max_threads = max_threads;
5377                 binder_inner_proc_unlock(proc);
5378                 break;
5379         }
5380         case BINDER_SET_CONTEXT_MGR_EXT: {
5381                 struct flat_binder_object fbo;
5382
5383                 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5384                         ret = -EINVAL;
5385                         goto err;
5386                 }
5387                 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5388                 if (ret)
5389                         goto err;
5390                 break;
5391         }
5392         case BINDER_SET_CONTEXT_MGR:
5393                 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5394                 if (ret)
5395                         goto err;
5396                 break;
5397         case BINDER_THREAD_EXIT:
5398                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5399                              proc->pid, thread->pid);
5400                 binder_thread_release(proc, thread);
5401                 thread = NULL;
5402                 break;
5403         case BINDER_VERSION: {
5404                 struct binder_version __user *ver = ubuf;
5405
5406                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5407                              &ver->protocol_version)) {
5408                         ret = -EINVAL;
5409                         goto err;
5410                 }
5411                 break;
5412         }
5413         case BINDER_GET_NODE_INFO_FOR_REF: {
5414                 struct binder_node_info_for_ref info;
5415
5416                 if (copy_from_user(&info, ubuf, sizeof(info))) {
5417                         ret = -EFAULT;
5418                         goto err;
5419                 }
5420
5421                 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5422                 if (ret < 0)
5423                         goto err;
5424
5425                 if (copy_to_user(ubuf, &info, sizeof(info))) {
5426                         ret = -EFAULT;
5427                         goto err;
5428                 }
5429
5430                 break;
5431         }
5432         case BINDER_GET_NODE_DEBUG_INFO: {
5433                 struct binder_node_debug_info info;
5434
5435                 if (copy_from_user(&info, ubuf, sizeof(info))) {
5436                         ret = -EFAULT;
5437                         goto err;
5438                 }
5439
5440                 ret = binder_ioctl_get_node_debug_info(proc, &info);
5441                 if (ret < 0)
5442                         goto err;
5443
5444                 if (copy_to_user(ubuf, &info, sizeof(info))) {
5445                         ret = -EFAULT;
5446                         goto err;
5447                 }
5448                 break;
5449         }
5450         case BINDER_FREEZE: {
5451                 struct binder_freeze_info info;
5452                 struct binder_proc **target_procs = NULL, *target_proc;
5453                 int target_procs_count = 0, i = 0;
5454
5455                 ret = 0;
5456
5457                 if (copy_from_user(&info, ubuf, sizeof(info))) {
5458                         ret = -EFAULT;
5459                         goto err;
5460                 }
5461
5462                 mutex_lock(&binder_procs_lock);
5463                 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5464                         if (target_proc->pid == info.pid)
5465                                 target_procs_count++;
5466                 }
5467
5468                 if (target_procs_count == 0) {
5469                         mutex_unlock(&binder_procs_lock);
5470                         ret = -EINVAL;
5471                         goto err;
5472                 }
5473
5474                 target_procs = kcalloc(target_procs_count,
5475                                        sizeof(struct binder_proc *),
5476                                        GFP_KERNEL);
5477
5478                 if (!target_procs) {
5479                         mutex_unlock(&binder_procs_lock);
5480                         ret = -ENOMEM;
5481                         goto err;
5482                 }
5483
5484                 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5485                         if (target_proc->pid != info.pid)
5486                                 continue;
5487
5488                         binder_inner_proc_lock(target_proc);
5489                         target_proc->tmp_ref++;
5490                         binder_inner_proc_unlock(target_proc);
5491
5492                         target_procs[i++] = target_proc;
5493                 }
5494                 mutex_unlock(&binder_procs_lock);
5495
5496                 for (i = 0; i < target_procs_count; i++) {
5497                         if (ret >= 0)
5498                                 ret = binder_ioctl_freeze(&info,
5499                                                           target_procs[i]);
5500
5501                         binder_proc_dec_tmpref(target_procs[i]);
5502                 }
5503
5504                 kfree(target_procs);
5505
5506                 if (ret < 0)
5507                         goto err;
5508                 break;
5509         }
5510         case BINDER_GET_FROZEN_INFO: {
5511                 struct binder_frozen_status_info info;
5512
5513                 if (copy_from_user(&info, ubuf, sizeof(info))) {
5514                         ret = -EFAULT;
5515                         goto err;
5516                 }
5517
5518                 ret = binder_ioctl_get_freezer_info(&info);
5519                 if (ret < 0)
5520                         goto err;
5521
5522                 if (copy_to_user(ubuf, &info, sizeof(info))) {
5523                         ret = -EFAULT;
5524                         goto err;
5525                 }
5526                 break;
5527         }
5528         case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5529                 uint32_t enable;
5530
5531                 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5532                         ret = -EFAULT;
5533                         goto err;
5534                 }
5535                 binder_inner_proc_lock(proc);
5536                 proc->oneway_spam_detection_enabled = (bool)enable;
5537                 binder_inner_proc_unlock(proc);
5538                 break;
5539         }
5540         case BINDER_GET_EXTENDED_ERROR:
5541                 ret = binder_ioctl_get_extended_error(thread, ubuf);
5542                 if (ret < 0)
5543                         goto err;
5544                 break;
5545         default:
5546                 ret = -EINVAL;
5547                 goto err;
5548         }
5549         ret = 0;
5550 err:
5551         if (thread)
5552                 thread->looper_need_return = false;
5553         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5554         if (ret && ret != -EINTR)
5555                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5556 err_unlocked:
5557         trace_binder_ioctl_done(ret);
5558         return ret;
5559 }
5560
5561 static void binder_vma_open(struct vm_area_struct *vma)
5562 {
5563         struct binder_proc *proc = vma->vm_private_data;
5564
5565         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5566                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5567                      proc->pid, vma->vm_start, vma->vm_end,
5568                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5569                      (unsigned long)pgprot_val(vma->vm_page_prot));
5570 }
5571
5572 static void binder_vma_close(struct vm_area_struct *vma)
5573 {
5574         struct binder_proc *proc = vma->vm_private_data;
5575
5576         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5577                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5578                      proc->pid, vma->vm_start, vma->vm_end,
5579                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5580                      (unsigned long)pgprot_val(vma->vm_page_prot));
5581         binder_alloc_vma_close(&proc->alloc);
5582 }
5583
5584 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5585 {
5586         return VM_FAULT_SIGBUS;
5587 }
5588
5589 static const struct vm_operations_struct binder_vm_ops = {
5590         .open = binder_vma_open,
5591         .close = binder_vma_close,
5592         .fault = binder_vm_fault,
5593 };
5594
5595 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5596 {
5597         struct binder_proc *proc = filp->private_data;
5598
5599         if (proc->tsk != current->group_leader)
5600                 return -EINVAL;
5601
5602         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5603                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5604                      __func__, proc->pid, vma->vm_start, vma->vm_end,
5605                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5606                      (unsigned long)pgprot_val(vma->vm_page_prot));
5607
5608         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5609                 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5610                        proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5611                 return -EPERM;
5612         }
5613         vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5614
5615         vma->vm_ops = &binder_vm_ops;
5616         vma->vm_private_data = proc;
5617
5618         return binder_alloc_mmap_handler(&proc->alloc, vma);
5619 }
5620
5621 static int binder_open(struct inode *nodp, struct file *filp)
5622 {
5623         struct binder_proc *proc, *itr;
5624         struct binder_device *binder_dev;
5625         struct binderfs_info *info;
5626         struct dentry *binder_binderfs_dir_entry_proc = NULL;
5627         bool existing_pid = false;
5628
5629         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5630                      current->group_leader->pid, current->pid);
5631
5632         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5633         if (proc == NULL)
5634                 return -ENOMEM;
5635         spin_lock_init(&proc->inner_lock);
5636         spin_lock_init(&proc->outer_lock);
5637         get_task_struct(current->group_leader);
5638         proc->tsk = current->group_leader;
5639         proc->cred = get_cred(filp->f_cred);
5640         INIT_LIST_HEAD(&proc->todo);
5641         init_waitqueue_head(&proc->freeze_wait);
5642         proc->default_priority = task_nice(current);
5643         /* binderfs stashes devices in i_private */
5644         if (is_binderfs_device(nodp)) {
5645                 binder_dev = nodp->i_private;
5646                 info = nodp->i_sb->s_fs_info;
5647                 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5648         } else {
5649                 binder_dev = container_of(filp->private_data,
5650                                           struct binder_device, miscdev);
5651         }
5652         refcount_inc(&binder_dev->ref);
5653         proc->context = &binder_dev->context;
5654         binder_alloc_init(&proc->alloc);
5655
5656         binder_stats_created(BINDER_STAT_PROC);
5657         proc->pid = current->group_leader->pid;
5658         INIT_LIST_HEAD(&proc->delivered_death);
5659         INIT_LIST_HEAD(&proc->waiting_threads);
5660         filp->private_data = proc;
5661
5662         mutex_lock(&binder_procs_lock);
5663         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5664                 if (itr->pid == proc->pid) {
5665                         existing_pid = true;
5666                         break;
5667                 }
5668         }
5669         hlist_add_head(&proc->proc_node, &binder_procs);
5670         mutex_unlock(&binder_procs_lock);
5671
5672         if (binder_debugfs_dir_entry_proc && !existing_pid) {
5673                 char strbuf[11];
5674
5675                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5676                 /*
5677                  * proc debug entries are shared between contexts.
5678                  * Only create for the first PID to avoid debugfs log spamming
5679                  * The printing code will anyway print all contexts for a given
5680                  * PID so this is not a problem.
5681                  */
5682                 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5683                         binder_debugfs_dir_entry_proc,
5684                         (void *)(unsigned long)proc->pid,
5685                         &proc_fops);
5686         }
5687
5688         if (binder_binderfs_dir_entry_proc && !existing_pid) {
5689                 char strbuf[11];
5690                 struct dentry *binderfs_entry;
5691
5692                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5693                 /*
5694                  * Similar to debugfs, the process specific log file is shared
5695                  * between contexts. Only create for the first PID.
5696                  * This is ok since same as debugfs, the log file will contain
5697                  * information on all contexts of a given PID.
5698                  */
5699                 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5700                         strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5701                 if (!IS_ERR(binderfs_entry)) {
5702                         proc->binderfs_entry = binderfs_entry;
5703                 } else {
5704                         int error;
5705
5706                         error = PTR_ERR(binderfs_entry);
5707                         pr_warn("Unable to create file %s in binderfs (error %d)\n",
5708                                 strbuf, error);
5709                 }
5710         }
5711
5712         return 0;
5713 }
5714
5715 static int binder_flush(struct file *filp, fl_owner_t id)
5716 {
5717         struct binder_proc *proc = filp->private_data;
5718
5719         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5720
5721         return 0;
5722 }
5723
5724 static void binder_deferred_flush(struct binder_proc *proc)
5725 {
5726         struct rb_node *n;
5727         int wake_count = 0;
5728
5729         binder_inner_proc_lock(proc);
5730         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5731                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5732
5733                 thread->looper_need_return = true;
5734                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5735                         wake_up_interruptible(&thread->wait);
5736                         wake_count++;
5737                 }
5738         }
5739         binder_inner_proc_unlock(proc);
5740
5741         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5742                      "binder_flush: %d woke %d threads\n", proc->pid,
5743                      wake_count);
5744 }
5745
5746 static int binder_release(struct inode *nodp, struct file *filp)
5747 {
5748         struct binder_proc *proc = filp->private_data;
5749
5750         debugfs_remove(proc->debugfs_entry);
5751
5752         if (proc->binderfs_entry) {
5753                 binderfs_remove_file(proc->binderfs_entry);
5754                 proc->binderfs_entry = NULL;
5755         }
5756
5757         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5758
5759         return 0;
5760 }
5761
5762 static int binder_node_release(struct binder_node *node, int refs)
5763 {
5764         struct binder_ref *ref;
5765         int death = 0;
5766         struct binder_proc *proc = node->proc;
5767
5768         binder_release_work(proc, &node->async_todo);
5769
5770         binder_node_lock(node);
5771         binder_inner_proc_lock(proc);
5772         binder_dequeue_work_ilocked(&node->work);
5773         /*
5774          * The caller must have taken a temporary ref on the node,
5775          */
5776         BUG_ON(!node->tmp_refs);
5777         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5778                 binder_inner_proc_unlock(proc);
5779                 binder_node_unlock(node);
5780                 binder_free_node(node);
5781
5782                 return refs;
5783         }
5784
5785         node->proc = NULL;
5786         node->local_strong_refs = 0;
5787         node->local_weak_refs = 0;
5788         binder_inner_proc_unlock(proc);
5789
5790         spin_lock(&binder_dead_nodes_lock);
5791         hlist_add_head(&node->dead_node, &binder_dead_nodes);
5792         spin_unlock(&binder_dead_nodes_lock);
5793
5794         hlist_for_each_entry(ref, &node->refs, node_entry) {
5795                 refs++;
5796                 /*
5797                  * Need the node lock to synchronize
5798                  * with new notification requests and the
5799                  * inner lock to synchronize with queued
5800                  * death notifications.
5801                  */
5802                 binder_inner_proc_lock(ref->proc);
5803                 if (!ref->death) {
5804                         binder_inner_proc_unlock(ref->proc);
5805                         continue;
5806                 }
5807
5808                 death++;
5809
5810                 BUG_ON(!list_empty(&ref->death->work.entry));
5811                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5812                 binder_enqueue_work_ilocked(&ref->death->work,
5813                                             &ref->proc->todo);
5814                 binder_wakeup_proc_ilocked(ref->proc);
5815                 binder_inner_proc_unlock(ref->proc);
5816         }
5817
5818         binder_debug(BINDER_DEBUG_DEAD_BINDER,
5819                      "node %d now dead, refs %d, death %d\n",
5820                      node->debug_id, refs, death);
5821         binder_node_unlock(node);
5822         binder_put_node(node);
5823
5824         return refs;
5825 }
5826
5827 static void binder_deferred_release(struct binder_proc *proc)
5828 {
5829         struct binder_context *context = proc->context;
5830         struct rb_node *n;
5831         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5832
5833         mutex_lock(&binder_procs_lock);
5834         hlist_del(&proc->proc_node);
5835         mutex_unlock(&binder_procs_lock);
5836
5837         mutex_lock(&context->context_mgr_node_lock);
5838         if (context->binder_context_mgr_node &&
5839             context->binder_context_mgr_node->proc == proc) {
5840                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5841                              "%s: %d context_mgr_node gone\n",
5842                              __func__, proc->pid);
5843                 context->binder_context_mgr_node = NULL;
5844         }
5845         mutex_unlock(&context->context_mgr_node_lock);
5846         binder_inner_proc_lock(proc);
5847         /*
5848          * Make sure proc stays alive after we
5849          * remove all the threads
5850          */
5851         proc->tmp_ref++;
5852
5853         proc->is_dead = true;
5854         proc->is_frozen = false;
5855         proc->sync_recv = false;
5856         proc->async_recv = false;
5857         threads = 0;
5858         active_transactions = 0;
5859         while ((n = rb_first(&proc->threads))) {
5860                 struct binder_thread *thread;
5861
5862                 thread = rb_entry(n, struct binder_thread, rb_node);
5863                 binder_inner_proc_unlock(proc);
5864                 threads++;
5865                 active_transactions += binder_thread_release(proc, thread);
5866                 binder_inner_proc_lock(proc);
5867         }
5868
5869         nodes = 0;
5870         incoming_refs = 0;
5871         while ((n = rb_first(&proc->nodes))) {
5872                 struct binder_node *node;
5873
5874                 node = rb_entry(n, struct binder_node, rb_node);
5875                 nodes++;
5876                 /*
5877                  * take a temporary ref on the node before
5878                  * calling binder_node_release() which will either
5879                  * kfree() the node or call binder_put_node()
5880                  */
5881                 binder_inc_node_tmpref_ilocked(node);
5882                 rb_erase(&node->rb_node, &proc->nodes);
5883                 binder_inner_proc_unlock(proc);
5884                 incoming_refs = binder_node_release(node, incoming_refs);
5885                 binder_inner_proc_lock(proc);
5886         }
5887         binder_inner_proc_unlock(proc);
5888
5889         outgoing_refs = 0;
5890         binder_proc_lock(proc);
5891         while ((n = rb_first(&proc->refs_by_desc))) {
5892                 struct binder_ref *ref;
5893
5894                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5895                 outgoing_refs++;
5896                 binder_cleanup_ref_olocked(ref);
5897                 binder_proc_unlock(proc);
5898                 binder_free_ref(ref);
5899                 binder_proc_lock(proc);
5900         }
5901         binder_proc_unlock(proc);
5902
5903         binder_release_work(proc, &proc->todo);
5904         binder_release_work(proc, &proc->delivered_death);
5905
5906         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5907                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5908                      __func__, proc->pid, threads, nodes, incoming_refs,
5909                      outgoing_refs, active_transactions);
5910
5911         binder_proc_dec_tmpref(proc);
5912 }
5913
5914 static void binder_deferred_func(struct work_struct *work)
5915 {
5916         struct binder_proc *proc;
5917
5918         int defer;
5919
5920         do {
5921                 mutex_lock(&binder_deferred_lock);
5922                 if (!hlist_empty(&binder_deferred_list)) {
5923                         proc = hlist_entry(binder_deferred_list.first,
5924                                         struct binder_proc, deferred_work_node);
5925                         hlist_del_init(&proc->deferred_work_node);
5926                         defer = proc->deferred_work;
5927                         proc->deferred_work = 0;
5928                 } else {
5929                         proc = NULL;
5930                         defer = 0;
5931                 }
5932                 mutex_unlock(&binder_deferred_lock);
5933
5934                 if (defer & BINDER_DEFERRED_FLUSH)
5935                         binder_deferred_flush(proc);
5936
5937                 if (defer & BINDER_DEFERRED_RELEASE)
5938                         binder_deferred_release(proc); /* frees proc */
5939         } while (proc);
5940 }
5941 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5942
5943 static void
5944 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5945 {
5946         mutex_lock(&binder_deferred_lock);
5947         proc->deferred_work |= defer;
5948         if (hlist_unhashed(&proc->deferred_work_node)) {
5949                 hlist_add_head(&proc->deferred_work_node,
5950                                 &binder_deferred_list);
5951                 schedule_work(&binder_deferred_work);
5952         }
5953         mutex_unlock(&binder_deferred_lock);
5954 }
5955
5956 static void print_binder_transaction_ilocked(struct seq_file *m,
5957                                              struct binder_proc *proc,
5958                                              const char *prefix,
5959                                              struct binder_transaction *t)
5960 {
5961         struct binder_proc *to_proc;
5962         struct binder_buffer *buffer = t->buffer;
5963         ktime_t current_time = ktime_get();
5964
5965         spin_lock(&t->lock);
5966         to_proc = t->to_proc;
5967         seq_printf(m,
5968                    "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
5969                    prefix, t->debug_id, t,
5970                    t->from_pid,
5971                    t->from_tid,
5972                    to_proc ? to_proc->pid : 0,
5973                    t->to_thread ? t->to_thread->pid : 0,
5974                    t->code, t->flags, t->priority, t->need_reply,
5975                    ktime_ms_delta(current_time, t->start_time));
5976         spin_unlock(&t->lock);
5977
5978         if (proc != to_proc) {
5979                 /*
5980                  * Can only safely deref buffer if we are holding the
5981                  * correct proc inner lock for this node
5982                  */
5983                 seq_puts(m, "\n");
5984                 return;
5985         }
5986
5987         if (buffer == NULL) {
5988                 seq_puts(m, " buffer free\n");
5989                 return;
5990         }
5991         if (buffer->target_node)
5992                 seq_printf(m, " node %d", buffer->target_node->debug_id);
5993         seq_printf(m, " size %zd:%zd offset %lx\n",
5994                    buffer->data_size, buffer->offsets_size,
5995                    proc->alloc.buffer - buffer->user_data);
5996 }
5997
5998 static void print_binder_work_ilocked(struct seq_file *m,
5999                                      struct binder_proc *proc,
6000                                      const char *prefix,
6001                                      const char *transaction_prefix,
6002                                      struct binder_work *w)
6003 {
6004         struct binder_node *node;
6005         struct binder_transaction *t;
6006
6007         switch (w->type) {
6008         case BINDER_WORK_TRANSACTION:
6009                 t = container_of(w, struct binder_transaction, work);
6010                 print_binder_transaction_ilocked(
6011                                 m, proc, transaction_prefix, t);
6012                 break;
6013         case BINDER_WORK_RETURN_ERROR: {
6014                 struct binder_error *e = container_of(
6015                                 w, struct binder_error, work);
6016
6017                 seq_printf(m, "%stransaction error: %u\n",
6018                            prefix, e->cmd);
6019         } break;
6020         case BINDER_WORK_TRANSACTION_COMPLETE:
6021                 seq_printf(m, "%stransaction complete\n", prefix);
6022                 break;
6023         case BINDER_WORK_NODE:
6024                 node = container_of(w, struct binder_node, work);
6025                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6026                            prefix, node->debug_id,
6027                            (u64)node->ptr, (u64)node->cookie);
6028                 break;
6029         case BINDER_WORK_DEAD_BINDER:
6030                 seq_printf(m, "%shas dead binder\n", prefix);
6031                 break;
6032         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6033                 seq_printf(m, "%shas cleared dead binder\n", prefix);
6034                 break;
6035         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6036                 seq_printf(m, "%shas cleared death notification\n", prefix);
6037                 break;
6038         default:
6039                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6040                 break;
6041         }
6042 }
6043
6044 static void print_binder_thread_ilocked(struct seq_file *m,
6045                                         struct binder_thread *thread,
6046                                         int print_always)
6047 {
6048         struct binder_transaction *t;
6049         struct binder_work *w;
6050         size_t start_pos = m->count;
6051         size_t header_pos;
6052
6053         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6054                         thread->pid, thread->looper,
6055                         thread->looper_need_return,
6056                         atomic_read(&thread->tmp_ref));
6057         header_pos = m->count;
6058         t = thread->transaction_stack;
6059         while (t) {
6060                 if (t->from == thread) {
6061                         print_binder_transaction_ilocked(m, thread->proc,
6062                                         "    outgoing transaction", t);
6063                         t = t->from_parent;
6064                 } else if (t->to_thread == thread) {
6065                         print_binder_transaction_ilocked(m, thread->proc,
6066                                                  "    incoming transaction", t);
6067                         t = t->to_parent;
6068                 } else {
6069                         print_binder_transaction_ilocked(m, thread->proc,
6070                                         "    bad transaction", t);
6071                         t = NULL;
6072                 }
6073         }
6074         list_for_each_entry(w, &thread->todo, entry) {
6075                 print_binder_work_ilocked(m, thread->proc, "    ",
6076                                           "    pending transaction", w);
6077         }
6078         if (!print_always && m->count == header_pos)
6079                 m->count = start_pos;
6080 }
6081
6082 static void print_binder_node_nilocked(struct seq_file *m,
6083                                        struct binder_node *node)
6084 {
6085         struct binder_ref *ref;
6086         struct binder_work *w;
6087         int count;
6088
6089         count = hlist_count_nodes(&node->refs);
6090
6091         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6092                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
6093                    node->has_strong_ref, node->has_weak_ref,
6094                    node->local_strong_refs, node->local_weak_refs,
6095                    node->internal_strong_refs, count, node->tmp_refs);
6096         if (count) {
6097                 seq_puts(m, " proc");
6098                 hlist_for_each_entry(ref, &node->refs, node_entry)
6099                         seq_printf(m, " %d", ref->proc->pid);
6100         }
6101         seq_puts(m, "\n");
6102         if (node->proc) {
6103                 list_for_each_entry(w, &node->async_todo, entry)
6104                         print_binder_work_ilocked(m, node->proc, "    ",
6105                                           "    pending async transaction", w);
6106         }
6107 }
6108
6109 static void print_binder_ref_olocked(struct seq_file *m,
6110                                      struct binder_ref *ref)
6111 {
6112         binder_node_lock(ref->node);
6113         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6114                    ref->data.debug_id, ref->data.desc,
6115                    ref->node->proc ? "" : "dead ",
6116                    ref->node->debug_id, ref->data.strong,
6117                    ref->data.weak, ref->death);
6118         binder_node_unlock(ref->node);
6119 }
6120
6121 static void print_binder_proc(struct seq_file *m,
6122                               struct binder_proc *proc, int print_all)
6123 {
6124         struct binder_work *w;
6125         struct rb_node *n;
6126         size_t start_pos = m->count;
6127         size_t header_pos;
6128         struct binder_node *last_node = NULL;
6129
6130         seq_printf(m, "proc %d\n", proc->pid);
6131         seq_printf(m, "context %s\n", proc->context->name);
6132         header_pos = m->count;
6133
6134         binder_inner_proc_lock(proc);
6135         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6136                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6137                                                 rb_node), print_all);
6138
6139         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6140                 struct binder_node *node = rb_entry(n, struct binder_node,
6141                                                     rb_node);
6142                 if (!print_all && !node->has_async_transaction)
6143                         continue;
6144
6145                 /*
6146                  * take a temporary reference on the node so it
6147                  * survives and isn't removed from the tree
6148                  * while we print it.
6149                  */
6150                 binder_inc_node_tmpref_ilocked(node);
6151                 /* Need to drop inner lock to take node lock */
6152                 binder_inner_proc_unlock(proc);
6153                 if (last_node)
6154                         binder_put_node(last_node);
6155                 binder_node_inner_lock(node);
6156                 print_binder_node_nilocked(m, node);
6157                 binder_node_inner_unlock(node);
6158                 last_node = node;
6159                 binder_inner_proc_lock(proc);
6160         }
6161         binder_inner_proc_unlock(proc);
6162         if (last_node)
6163                 binder_put_node(last_node);
6164
6165         if (print_all) {
6166                 binder_proc_lock(proc);
6167                 for (n = rb_first(&proc->refs_by_desc);
6168                      n != NULL;
6169                      n = rb_next(n))
6170                         print_binder_ref_olocked(m, rb_entry(n,
6171                                                             struct binder_ref,
6172                                                             rb_node_desc));
6173                 binder_proc_unlock(proc);
6174         }
6175         binder_alloc_print_allocated(m, &proc->alloc);
6176         binder_inner_proc_lock(proc);
6177         list_for_each_entry(w, &proc->todo, entry)
6178                 print_binder_work_ilocked(m, proc, "  ",
6179                                           "  pending transaction", w);
6180         list_for_each_entry(w, &proc->delivered_death, entry) {
6181                 seq_puts(m, "  has delivered dead binder\n");
6182                 break;
6183         }
6184         binder_inner_proc_unlock(proc);
6185         if (!print_all && m->count == header_pos)
6186                 m->count = start_pos;
6187 }
6188
6189 static const char * const binder_return_strings[] = {
6190         "BR_ERROR",
6191         "BR_OK",
6192         "BR_TRANSACTION",
6193         "BR_REPLY",
6194         "BR_ACQUIRE_RESULT",
6195         "BR_DEAD_REPLY",
6196         "BR_TRANSACTION_COMPLETE",
6197         "BR_INCREFS",
6198         "BR_ACQUIRE",
6199         "BR_RELEASE",
6200         "BR_DECREFS",
6201         "BR_ATTEMPT_ACQUIRE",
6202         "BR_NOOP",
6203         "BR_SPAWN_LOOPER",
6204         "BR_FINISHED",
6205         "BR_DEAD_BINDER",
6206         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6207         "BR_FAILED_REPLY",
6208         "BR_FROZEN_REPLY",
6209         "BR_ONEWAY_SPAM_SUSPECT",
6210         "BR_TRANSACTION_PENDING_FROZEN"
6211 };
6212
6213 static const char * const binder_command_strings[] = {
6214         "BC_TRANSACTION",
6215         "BC_REPLY",
6216         "BC_ACQUIRE_RESULT",
6217         "BC_FREE_BUFFER",
6218         "BC_INCREFS",
6219         "BC_ACQUIRE",
6220         "BC_RELEASE",
6221         "BC_DECREFS",
6222         "BC_INCREFS_DONE",
6223         "BC_ACQUIRE_DONE",
6224         "BC_ATTEMPT_ACQUIRE",
6225         "BC_REGISTER_LOOPER",
6226         "BC_ENTER_LOOPER",
6227         "BC_EXIT_LOOPER",
6228         "BC_REQUEST_DEATH_NOTIFICATION",
6229         "BC_CLEAR_DEATH_NOTIFICATION",
6230         "BC_DEAD_BINDER_DONE",
6231         "BC_TRANSACTION_SG",
6232         "BC_REPLY_SG",
6233 };
6234
6235 static const char * const binder_objstat_strings[] = {
6236         "proc",
6237         "thread",
6238         "node",
6239         "ref",
6240         "death",
6241         "transaction",
6242         "transaction_complete"
6243 };
6244
6245 static void print_binder_stats(struct seq_file *m, const char *prefix,
6246                                struct binder_stats *stats)
6247 {
6248         int i;
6249
6250         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6251                      ARRAY_SIZE(binder_command_strings));
6252         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6253                 int temp = atomic_read(&stats->bc[i]);
6254
6255                 if (temp)
6256                         seq_printf(m, "%s%s: %d\n", prefix,
6257                                    binder_command_strings[i], temp);
6258         }
6259
6260         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6261                      ARRAY_SIZE(binder_return_strings));
6262         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6263                 int temp = atomic_read(&stats->br[i]);
6264
6265                 if (temp)
6266                         seq_printf(m, "%s%s: %d\n", prefix,
6267                                    binder_return_strings[i], temp);
6268         }
6269
6270         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6271                      ARRAY_SIZE(binder_objstat_strings));
6272         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6273                      ARRAY_SIZE(stats->obj_deleted));
6274         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6275                 int created = atomic_read(&stats->obj_created[i]);
6276                 int deleted = atomic_read(&stats->obj_deleted[i]);
6277
6278                 if (created || deleted)
6279                         seq_printf(m, "%s%s: active %d total %d\n",
6280                                 prefix,
6281                                 binder_objstat_strings[i],
6282                                 created - deleted,
6283                                 created);
6284         }
6285 }
6286
6287 static void print_binder_proc_stats(struct seq_file *m,
6288                                     struct binder_proc *proc)
6289 {
6290         struct binder_work *w;
6291         struct binder_thread *thread;
6292         struct rb_node *n;
6293         int count, strong, weak, ready_threads;
6294         size_t free_async_space =
6295                 binder_alloc_get_free_async_space(&proc->alloc);
6296
6297         seq_printf(m, "proc %d\n", proc->pid);
6298         seq_printf(m, "context %s\n", proc->context->name);
6299         count = 0;
6300         ready_threads = 0;
6301         binder_inner_proc_lock(proc);
6302         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6303                 count++;
6304
6305         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6306                 ready_threads++;
6307
6308         seq_printf(m, "  threads: %d\n", count);
6309         seq_printf(m, "  requested threads: %d+%d/%d\n"
6310                         "  ready threads %d\n"
6311                         "  free async space %zd\n", proc->requested_threads,
6312                         proc->requested_threads_started, proc->max_threads,
6313                         ready_threads,
6314                         free_async_space);
6315         count = 0;
6316         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6317                 count++;
6318         binder_inner_proc_unlock(proc);
6319         seq_printf(m, "  nodes: %d\n", count);
6320         count = 0;
6321         strong = 0;
6322         weak = 0;
6323         binder_proc_lock(proc);
6324         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6325                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6326                                                   rb_node_desc);
6327                 count++;
6328                 strong += ref->data.strong;
6329                 weak += ref->data.weak;
6330         }
6331         binder_proc_unlock(proc);
6332         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6333
6334         count = binder_alloc_get_allocated_count(&proc->alloc);
6335         seq_printf(m, "  buffers: %d\n", count);
6336
6337         binder_alloc_print_pages(m, &proc->alloc);
6338
6339         count = 0;
6340         binder_inner_proc_lock(proc);
6341         list_for_each_entry(w, &proc->todo, entry) {
6342                 if (w->type == BINDER_WORK_TRANSACTION)
6343                         count++;
6344         }
6345         binder_inner_proc_unlock(proc);
6346         seq_printf(m, "  pending transactions: %d\n", count);
6347
6348         print_binder_stats(m, "  ", &proc->stats);
6349 }
6350
6351 static int state_show(struct seq_file *m, void *unused)
6352 {
6353         struct binder_proc *proc;
6354         struct binder_node *node;
6355         struct binder_node *last_node = NULL;
6356
6357         seq_puts(m, "binder state:\n");
6358
6359         spin_lock(&binder_dead_nodes_lock);
6360         if (!hlist_empty(&binder_dead_nodes))
6361                 seq_puts(m, "dead nodes:\n");
6362         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6363                 /*
6364                  * take a temporary reference on the node so it
6365                  * survives and isn't removed from the list
6366                  * while we print it.
6367                  */
6368                 node->tmp_refs++;
6369                 spin_unlock(&binder_dead_nodes_lock);
6370                 if (last_node)
6371                         binder_put_node(last_node);
6372                 binder_node_lock(node);
6373                 print_binder_node_nilocked(m, node);
6374                 binder_node_unlock(node);
6375                 last_node = node;
6376                 spin_lock(&binder_dead_nodes_lock);
6377         }
6378         spin_unlock(&binder_dead_nodes_lock);
6379         if (last_node)
6380                 binder_put_node(last_node);
6381
6382         mutex_lock(&binder_procs_lock);
6383         hlist_for_each_entry(proc, &binder_procs, proc_node)
6384                 print_binder_proc(m, proc, 1);
6385         mutex_unlock(&binder_procs_lock);
6386
6387         return 0;
6388 }
6389
6390 static int stats_show(struct seq_file *m, void *unused)
6391 {
6392         struct binder_proc *proc;
6393
6394         seq_puts(m, "binder stats:\n");
6395
6396         print_binder_stats(m, "", &binder_stats);
6397
6398         mutex_lock(&binder_procs_lock);
6399         hlist_for_each_entry(proc, &binder_procs, proc_node)
6400                 print_binder_proc_stats(m, proc);
6401         mutex_unlock(&binder_procs_lock);
6402
6403         return 0;
6404 }
6405
6406 static int transactions_show(struct seq_file *m, void *unused)
6407 {
6408         struct binder_proc *proc;
6409
6410         seq_puts(m, "binder transactions:\n");
6411         mutex_lock(&binder_procs_lock);
6412         hlist_for_each_entry(proc, &binder_procs, proc_node)
6413                 print_binder_proc(m, proc, 0);
6414         mutex_unlock(&binder_procs_lock);
6415
6416         return 0;
6417 }
6418
6419 static int proc_show(struct seq_file *m, void *unused)
6420 {
6421         struct binder_proc *itr;
6422         int pid = (unsigned long)m->private;
6423
6424         mutex_lock(&binder_procs_lock);
6425         hlist_for_each_entry(itr, &binder_procs, proc_node) {
6426                 if (itr->pid == pid) {
6427                         seq_puts(m, "binder proc state:\n");
6428                         print_binder_proc(m, itr, 1);
6429                 }
6430         }
6431         mutex_unlock(&binder_procs_lock);
6432
6433         return 0;
6434 }
6435
6436 static void print_binder_transaction_log_entry(struct seq_file *m,
6437                                         struct binder_transaction_log_entry *e)
6438 {
6439         int debug_id = READ_ONCE(e->debug_id_done);
6440         /*
6441          * read barrier to guarantee debug_id_done read before
6442          * we print the log values
6443          */
6444         smp_rmb();
6445         seq_printf(m,
6446                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6447                    e->debug_id, (e->call_type == 2) ? "reply" :
6448                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6449                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
6450                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
6451                    e->return_error, e->return_error_param,
6452                    e->return_error_line);
6453         /*
6454          * read-barrier to guarantee read of debug_id_done after
6455          * done printing the fields of the entry
6456          */
6457         smp_rmb();
6458         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6459                         "\n" : " (incomplete)\n");
6460 }
6461
6462 static int transaction_log_show(struct seq_file *m, void *unused)
6463 {
6464         struct binder_transaction_log *log = m->private;
6465         unsigned int log_cur = atomic_read(&log->cur);
6466         unsigned int count;
6467         unsigned int cur;
6468         int i;
6469
6470         count = log_cur + 1;
6471         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6472                 0 : count % ARRAY_SIZE(log->entry);
6473         if (count > ARRAY_SIZE(log->entry) || log->full)
6474                 count = ARRAY_SIZE(log->entry);
6475         for (i = 0; i < count; i++) {
6476                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6477
6478                 print_binder_transaction_log_entry(m, &log->entry[index]);
6479         }
6480         return 0;
6481 }
6482
6483 const struct file_operations binder_fops = {
6484         .owner = THIS_MODULE,
6485         .poll = binder_poll,
6486         .unlocked_ioctl = binder_ioctl,
6487         .compat_ioctl = compat_ptr_ioctl,
6488         .mmap = binder_mmap,
6489         .open = binder_open,
6490         .flush = binder_flush,
6491         .release = binder_release,
6492 };
6493
6494 DEFINE_SHOW_ATTRIBUTE(state);
6495 DEFINE_SHOW_ATTRIBUTE(stats);
6496 DEFINE_SHOW_ATTRIBUTE(transactions);
6497 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6498
6499 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6500         {
6501                 .name = "state",
6502                 .mode = 0444,
6503                 .fops = &state_fops,
6504                 .data = NULL,
6505         },
6506         {
6507                 .name = "stats",
6508                 .mode = 0444,
6509                 .fops = &stats_fops,
6510                 .data = NULL,
6511         },
6512         {
6513                 .name = "transactions",
6514                 .mode = 0444,
6515                 .fops = &transactions_fops,
6516                 .data = NULL,
6517         },
6518         {
6519                 .name = "transaction_log",
6520                 .mode = 0444,
6521                 .fops = &transaction_log_fops,
6522                 .data = &binder_transaction_log,
6523         },
6524         {
6525                 .name = "failed_transaction_log",
6526                 .mode = 0444,
6527                 .fops = &transaction_log_fops,
6528                 .data = &binder_transaction_log_failed,
6529         },
6530         {} /* terminator */
6531 };
6532
6533 static int __init init_binder_device(const char *name)
6534 {
6535         int ret;
6536         struct binder_device *binder_device;
6537
6538         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6539         if (!binder_device)
6540                 return -ENOMEM;
6541
6542         binder_device->miscdev.fops = &binder_fops;
6543         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6544         binder_device->miscdev.name = name;
6545
6546         refcount_set(&binder_device->ref, 1);
6547         binder_device->context.binder_context_mgr_uid = INVALID_UID;
6548         binder_device->context.name = name;
6549         mutex_init(&binder_device->context.context_mgr_node_lock);
6550
6551         ret = misc_register(&binder_device->miscdev);
6552         if (ret < 0) {
6553                 kfree(binder_device);
6554                 return ret;
6555         }
6556
6557         hlist_add_head(&binder_device->hlist, &binder_devices);
6558
6559         return ret;
6560 }
6561
6562 static int __init binder_init(void)
6563 {
6564         int ret;
6565         char *device_name, *device_tmp;
6566         struct binder_device *device;
6567         struct hlist_node *tmp;
6568         char *device_names = NULL;
6569         const struct binder_debugfs_entry *db_entry;
6570
6571         ret = binder_alloc_shrinker_init();
6572         if (ret)
6573                 return ret;
6574
6575         atomic_set(&binder_transaction_log.cur, ~0U);
6576         atomic_set(&binder_transaction_log_failed.cur, ~0U);
6577
6578         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6579
6580         binder_for_each_debugfs_entry(db_entry)
6581                 debugfs_create_file(db_entry->name,
6582                                         db_entry->mode,
6583                                         binder_debugfs_dir_entry_root,
6584                                         db_entry->data,
6585                                         db_entry->fops);
6586
6587         binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6588                                                 binder_debugfs_dir_entry_root);
6589
6590         if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6591             strcmp(binder_devices_param, "") != 0) {
6592                 /*
6593                 * Copy the module_parameter string, because we don't want to
6594                 * tokenize it in-place.
6595                  */
6596                 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6597                 if (!device_names) {
6598                         ret = -ENOMEM;
6599                         goto err_alloc_device_names_failed;
6600                 }
6601
6602                 device_tmp = device_names;
6603                 while ((device_name = strsep(&device_tmp, ","))) {
6604                         ret = init_binder_device(device_name);
6605                         if (ret)
6606                                 goto err_init_binder_device_failed;
6607                 }
6608         }
6609
6610         ret = init_binderfs();
6611         if (ret)
6612                 goto err_init_binder_device_failed;
6613
6614         return ret;
6615
6616 err_init_binder_device_failed:
6617         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6618                 misc_deregister(&device->miscdev);
6619                 hlist_del(&device->hlist);
6620                 kfree(device);
6621         }
6622
6623         kfree(device_names);
6624
6625 err_alloc_device_names_failed:
6626         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6627         binder_alloc_shrinker_exit();
6628
6629         return ret;
6630 }
6631
6632 device_initcall(binder_init);
6633
6634 #define CREATE_TRACE_POINTS
6635 #include "binder_trace.h"
6636
6637 MODULE_LICENSE("GPL v2");