Merge tag 'pm-4.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[sfrench/cifs-2.6.git] / drivers / android / binder.c
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 /*
19  * Locking overview
20  *
21  * There are 3 main spinlocks which must be acquired in the
22  * order shown:
23  *
24  * 1) proc->outer_lock : protects binder_ref
25  *    binder_proc_lock() and binder_proc_unlock() are
26  *    used to acq/rel.
27  * 2) node->lock : protects most fields of binder_node.
28  *    binder_node_lock() and binder_node_unlock() are
29  *    used to acq/rel
30  * 3) proc->inner_lock : protects the thread and node lists
31  *    (proc->threads, proc->waiting_threads, proc->nodes)
32  *    and all todo lists associated with the binder_proc
33  *    (proc->todo, thread->todo, proc->delivered_death and
34  *    node->async_todo), as well as thread->transaction_stack
35  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
36  *    are used to acq/rel
37  *
38  * Any lock under procA must never be nested under any lock at the same
39  * level or below on procB.
40  *
41  * Functions that require a lock held on entry indicate which lock
42  * in the suffix of the function name:
43  *
44  * foo_olocked() : requires node->outer_lock
45  * foo_nlocked() : requires node->lock
46  * foo_ilocked() : requires proc->inner_lock
47  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48  * foo_nilocked(): requires node->lock and proc->inner_lock
49  * ...
50  */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
74
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
77 #endif
78
79 #include <uapi/linux/android/binder.h>
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
82
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96
97 #define BINDER_DEBUG_ENTRY(name) \
98 static int binder_##name##_open(struct inode *inode, struct file *file) \
99 { \
100         return single_open(file, binder_##name##_show, inode->i_private); \
101 } \
102 \
103 static const struct file_operations binder_##name##_fops = { \
104         .owner = THIS_MODULE, \
105         .open = binder_##name##_open, \
106         .read = seq_read, \
107         .llseek = seq_lseek, \
108         .release = single_release, \
109 }
110
111 static int binder_proc_show(struct seq_file *m, void *unused);
112 BINDER_DEBUG_ENTRY(proc);
113
114 /* This is only defined in include/asm-arm/sizes.h */
115 #ifndef SZ_1K
116 #define SZ_1K                               0x400
117 #endif
118
119 #ifndef SZ_4M
120 #define SZ_4M                               0x400000
121 #endif
122
123 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
124
125 enum {
126         BINDER_DEBUG_USER_ERROR             = 1U << 0,
127         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
128         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
129         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
130         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
131         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
132         BINDER_DEBUG_READ_WRITE             = 1U << 6,
133         BINDER_DEBUG_USER_REFS              = 1U << 7,
134         BINDER_DEBUG_THREADS                = 1U << 8,
135         BINDER_DEBUG_TRANSACTION            = 1U << 9,
136         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
137         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
138         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
139         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
140         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
141 };
142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
145
146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147 module_param_named(devices, binder_devices_param, charp, 0444);
148
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150 static int binder_stop_on_user_error;
151
152 static int binder_set_stop_on_user_error(const char *val,
153                                          const struct kernel_param *kp)
154 {
155         int ret;
156
157         ret = param_set_int(val, kp);
158         if (binder_stop_on_user_error < 2)
159                 wake_up(&binder_user_error_wait);
160         return ret;
161 }
162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163         param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
164
165 #define binder_debug(mask, x...) \
166         do { \
167                 if (binder_debug_mask & mask) \
168                         pr_info(x); \
169         } while (0)
170
171 #define binder_user_error(x...) \
172         do { \
173                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
174                         pr_info(x); \
175                 if (binder_stop_on_user_error) \
176                         binder_stop_on_user_error = 2; \
177         } while (0)
178
179 #define to_flat_binder_object(hdr) \
180         container_of(hdr, struct flat_binder_object, hdr)
181
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
184 #define to_binder_buffer_object(hdr) \
185         container_of(hdr, struct binder_buffer_object, hdr)
186
187 #define to_binder_fd_array_object(hdr) \
188         container_of(hdr, struct binder_fd_array_object, hdr)
189
190 enum binder_stat_types {
191         BINDER_STAT_PROC,
192         BINDER_STAT_THREAD,
193         BINDER_STAT_NODE,
194         BINDER_STAT_REF,
195         BINDER_STAT_DEATH,
196         BINDER_STAT_TRANSACTION,
197         BINDER_STAT_TRANSACTION_COMPLETE,
198         BINDER_STAT_COUNT
199 };
200
201 struct binder_stats {
202         atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203         atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204         atomic_t obj_created[BINDER_STAT_COUNT];
205         atomic_t obj_deleted[BINDER_STAT_COUNT];
206 };
207
208 static struct binder_stats binder_stats;
209
210 static inline void binder_stats_deleted(enum binder_stat_types type)
211 {
212         atomic_inc(&binder_stats.obj_deleted[type]);
213 }
214
215 static inline void binder_stats_created(enum binder_stat_types type)
216 {
217         atomic_inc(&binder_stats.obj_created[type]);
218 }
219
220 struct binder_transaction_log_entry {
221         int debug_id;
222         int debug_id_done;
223         int call_type;
224         int from_proc;
225         int from_thread;
226         int target_handle;
227         int to_proc;
228         int to_thread;
229         int to_node;
230         int data_size;
231         int offsets_size;
232         int return_error_line;
233         uint32_t return_error;
234         uint32_t return_error_param;
235         const char *context_name;
236 };
237 struct binder_transaction_log {
238         atomic_t cur;
239         bool full;
240         struct binder_transaction_log_entry entry[32];
241 };
242 static struct binder_transaction_log binder_transaction_log;
243 static struct binder_transaction_log binder_transaction_log_failed;
244
245 static struct binder_transaction_log_entry *binder_transaction_log_add(
246         struct binder_transaction_log *log)
247 {
248         struct binder_transaction_log_entry *e;
249         unsigned int cur = atomic_inc_return(&log->cur);
250
251         if (cur >= ARRAY_SIZE(log->entry))
252                 log->full = 1;
253         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254         WRITE_ONCE(e->debug_id_done, 0);
255         /*
256          * write-barrier to synchronize access to e->debug_id_done.
257          * We make sure the initialized 0 value is seen before
258          * memset() other fields are zeroed by memset.
259          */
260         smp_wmb();
261         memset(e, 0, sizeof(*e));
262         return e;
263 }
264
265 struct binder_context {
266         struct binder_node *binder_context_mgr_node;
267         struct mutex context_mgr_node_lock;
268
269         kuid_t binder_context_mgr_uid;
270         const char *name;
271 };
272
273 struct binder_device {
274         struct hlist_node hlist;
275         struct miscdevice miscdev;
276         struct binder_context context;
277 };
278
279 /**
280  * struct binder_work - work enqueued on a worklist
281  * @entry:             node enqueued on list
282  * @type:              type of work to be performed
283  *
284  * There are separate work lists for proc, thread, and node (async).
285  */
286 struct binder_work {
287         struct list_head entry;
288
289         enum {
290                 BINDER_WORK_TRANSACTION = 1,
291                 BINDER_WORK_TRANSACTION_COMPLETE,
292                 BINDER_WORK_RETURN_ERROR,
293                 BINDER_WORK_NODE,
294                 BINDER_WORK_DEAD_BINDER,
295                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297         } type;
298 };
299
300 struct binder_error {
301         struct binder_work work;
302         uint32_t cmd;
303 };
304
305 /**
306  * struct binder_node - binder node bookkeeping
307  * @debug_id:             unique ID for debugging
308  *                        (invariant after initialized)
309  * @lock:                 lock for node fields
310  * @work:                 worklist element for node work
311  *                        (protected by @proc->inner_lock)
312  * @rb_node:              element for proc->nodes tree
313  *                        (protected by @proc->inner_lock)
314  * @dead_node:            element for binder_dead_nodes list
315  *                        (protected by binder_dead_nodes_lock)
316  * @proc:                 binder_proc that owns this node
317  *                        (invariant after initialized)
318  * @refs:                 list of references on this node
319  *                        (protected by @lock)
320  * @internal_strong_refs: used to take strong references when
321  *                        initiating a transaction
322  *                        (protected by @proc->inner_lock if @proc
323  *                        and by @lock)
324  * @local_weak_refs:      weak user refs from local process
325  *                        (protected by @proc->inner_lock if @proc
326  *                        and by @lock)
327  * @local_strong_refs:    strong user refs from local process
328  *                        (protected by @proc->inner_lock if @proc
329  *                        and by @lock)
330  * @tmp_refs:             temporary kernel refs
331  *                        (protected by @proc->inner_lock while @proc
332  *                        is valid, and by binder_dead_nodes_lock
333  *                        if @proc is NULL. During inc/dec and node release
334  *                        it is also protected by @lock to provide safety
335  *                        as the node dies and @proc becomes NULL)
336  * @ptr:                  userspace pointer for node
337  *                        (invariant, no lock needed)
338  * @cookie:               userspace cookie for node
339  *                        (invariant, no lock needed)
340  * @has_strong_ref:       userspace notified of strong ref
341  *                        (protected by @proc->inner_lock if @proc
342  *                        and by @lock)
343  * @pending_strong_ref:   userspace has acked notification of strong ref
344  *                        (protected by @proc->inner_lock if @proc
345  *                        and by @lock)
346  * @has_weak_ref:         userspace notified of weak ref
347  *                        (protected by @proc->inner_lock if @proc
348  *                        and by @lock)
349  * @pending_weak_ref:     userspace has acked notification of weak ref
350  *                        (protected by @proc->inner_lock if @proc
351  *                        and by @lock)
352  * @has_async_transaction: async transaction to node in progress
353  *                        (protected by @lock)
354  * @accept_fds:           file descriptor operations supported for node
355  *                        (invariant after initialized)
356  * @min_priority:         minimum scheduling priority
357  *                        (invariant after initialized)
358  * @async_todo:           list of async work items
359  *                        (protected by @proc->inner_lock)
360  *
361  * Bookkeeping structure for binder nodes.
362  */
363 struct binder_node {
364         int debug_id;
365         spinlock_t lock;
366         struct binder_work work;
367         union {
368                 struct rb_node rb_node;
369                 struct hlist_node dead_node;
370         };
371         struct binder_proc *proc;
372         struct hlist_head refs;
373         int internal_strong_refs;
374         int local_weak_refs;
375         int local_strong_refs;
376         int tmp_refs;
377         binder_uintptr_t ptr;
378         binder_uintptr_t cookie;
379         struct {
380                 /*
381                  * bitfield elements protected by
382                  * proc inner_lock
383                  */
384                 u8 has_strong_ref:1;
385                 u8 pending_strong_ref:1;
386                 u8 has_weak_ref:1;
387                 u8 pending_weak_ref:1;
388         };
389         struct {
390                 /*
391                  * invariant after initialization
392                  */
393                 u8 accept_fds:1;
394                 u8 min_priority;
395         };
396         bool has_async_transaction;
397         struct list_head async_todo;
398 };
399
400 struct binder_ref_death {
401         /**
402          * @work: worklist element for death notifications
403          *        (protected by inner_lock of the proc that
404          *        this ref belongs to)
405          */
406         struct binder_work work;
407         binder_uintptr_t cookie;
408 };
409
410 /**
411  * struct binder_ref_data - binder_ref counts and id
412  * @debug_id:        unique ID for the ref
413  * @desc:            unique userspace handle for ref
414  * @strong:          strong ref count (debugging only if not locked)
415  * @weak:            weak ref count (debugging only if not locked)
416  *
417  * Structure to hold ref count and ref id information. Since
418  * the actual ref can only be accessed with a lock, this structure
419  * is used to return information about the ref to callers of
420  * ref inc/dec functions.
421  */
422 struct binder_ref_data {
423         int debug_id;
424         uint32_t desc;
425         int strong;
426         int weak;
427 };
428
429 /**
430  * struct binder_ref - struct to track references on nodes
431  * @data:        binder_ref_data containing id, handle, and current refcounts
432  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433  * @rb_node_node: node for lookup by @node in proc's rb_tree
434  * @node_entry:  list entry for node->refs list in target node
435  *               (protected by @node->lock)
436  * @proc:        binder_proc containing ref
437  * @node:        binder_node of target node. When cleaning up a
438  *               ref for deletion in binder_cleanup_ref, a non-NULL
439  *               @node indicates the node must be freed
440  * @death:       pointer to death notification (ref_death) if requested
441  *               (protected by @node->lock)
442  *
443  * Structure to track references from procA to target node (on procB). This
444  * structure is unsafe to access without holding @proc->outer_lock.
445  */
446 struct binder_ref {
447         /* Lookups needed: */
448         /*   node + proc => ref (transaction) */
449         /*   desc + proc => ref (transaction, inc/dec ref) */
450         /*   node => refs + procs (proc exit) */
451         struct binder_ref_data data;
452         struct rb_node rb_node_desc;
453         struct rb_node rb_node_node;
454         struct hlist_node node_entry;
455         struct binder_proc *proc;
456         struct binder_node *node;
457         struct binder_ref_death *death;
458 };
459
460 enum binder_deferred_state {
461         BINDER_DEFERRED_PUT_FILES    = 0x01,
462         BINDER_DEFERRED_FLUSH        = 0x02,
463         BINDER_DEFERRED_RELEASE      = 0x04,
464 };
465
466 /**
467  * struct binder_proc - binder process bookkeeping
468  * @proc_node:            element for binder_procs list
469  * @threads:              rbtree of binder_threads in this proc
470  *                        (protected by @inner_lock)
471  * @nodes:                rbtree of binder nodes associated with
472  *                        this proc ordered by node->ptr
473  *                        (protected by @inner_lock)
474  * @refs_by_desc:         rbtree of refs ordered by ref->desc
475  *                        (protected by @outer_lock)
476  * @refs_by_node:         rbtree of refs ordered by ref->node
477  *                        (protected by @outer_lock)
478  * @waiting_threads:      threads currently waiting for proc work
479  *                        (protected by @inner_lock)
480  * @pid                   PID of group_leader of process
481  *                        (invariant after initialized)
482  * @tsk                   task_struct for group_leader of process
483  *                        (invariant after initialized)
484  * @files                 files_struct for process
485  *                        (invariant after initialized)
486  * @deferred_work_node:   element for binder_deferred_list
487  *                        (protected by binder_deferred_lock)
488  * @deferred_work:        bitmap of deferred work to perform
489  *                        (protected by binder_deferred_lock)
490  * @is_dead:              process is dead and awaiting free
491  *                        when outstanding transactions are cleaned up
492  *                        (protected by @inner_lock)
493  * @todo:                 list of work for this process
494  *                        (protected by @inner_lock)
495  * @wait:                 wait queue head to wait for proc work
496  *                        (invariant after initialized)
497  * @stats:                per-process binder statistics
498  *                        (atomics, no lock needed)
499  * @delivered_death:      list of delivered death notification
500  *                        (protected by @inner_lock)
501  * @max_threads:          cap on number of binder threads
502  *                        (protected by @inner_lock)
503  * @requested_threads:    number of binder threads requested but not
504  *                        yet started. In current implementation, can
505  *                        only be 0 or 1.
506  *                        (protected by @inner_lock)
507  * @requested_threads_started: number binder threads started
508  *                        (protected by @inner_lock)
509  * @tmp_ref:              temporary reference to indicate proc is in use
510  *                        (protected by @inner_lock)
511  * @default_priority:     default scheduler priority
512  *                        (invariant after initialized)
513  * @debugfs_entry:        debugfs node
514  * @alloc:                binder allocator bookkeeping
515  * @context:              binder_context for this proc
516  *                        (invariant after initialized)
517  * @inner_lock:           can nest under outer_lock and/or node lock
518  * @outer_lock:           no nesting under innor or node lock
519  *                        Lock order: 1) outer, 2) node, 3) inner
520  *
521  * Bookkeeping structure for binder processes
522  */
523 struct binder_proc {
524         struct hlist_node proc_node;
525         struct rb_root threads;
526         struct rb_root nodes;
527         struct rb_root refs_by_desc;
528         struct rb_root refs_by_node;
529         struct list_head waiting_threads;
530         int pid;
531         struct task_struct *tsk;
532         struct files_struct *files;
533         struct hlist_node deferred_work_node;
534         int deferred_work;
535         bool is_dead;
536
537         struct list_head todo;
538         wait_queue_head_t wait;
539         struct binder_stats stats;
540         struct list_head delivered_death;
541         int max_threads;
542         int requested_threads;
543         int requested_threads_started;
544         int tmp_ref;
545         long default_priority;
546         struct dentry *debugfs_entry;
547         struct binder_alloc alloc;
548         struct binder_context *context;
549         spinlock_t inner_lock;
550         spinlock_t outer_lock;
551 };
552
553 enum {
554         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
555         BINDER_LOOPER_STATE_ENTERED     = 0x02,
556         BINDER_LOOPER_STATE_EXITED      = 0x04,
557         BINDER_LOOPER_STATE_INVALID     = 0x08,
558         BINDER_LOOPER_STATE_WAITING     = 0x10,
559         BINDER_LOOPER_STATE_POLL        = 0x20,
560 };
561
562 /**
563  * struct binder_thread - binder thread bookkeeping
564  * @proc:                 binder process for this thread
565  *                        (invariant after initialization)
566  * @rb_node:              element for proc->threads rbtree
567  *                        (protected by @proc->inner_lock)
568  * @waiting_thread_node:  element for @proc->waiting_threads list
569  *                        (protected by @proc->inner_lock)
570  * @pid:                  PID for this thread
571  *                        (invariant after initialization)
572  * @looper:               bitmap of looping state
573  *                        (only accessed by this thread)
574  * @looper_needs_return:  looping thread needs to exit driver
575  *                        (no lock needed)
576  * @transaction_stack:    stack of in-progress transactions for this thread
577  *                        (protected by @proc->inner_lock)
578  * @todo:                 list of work to do for this thread
579  *                        (protected by @proc->inner_lock)
580  * @return_error:         transaction errors reported by this thread
581  *                        (only accessed by this thread)
582  * @reply_error:          transaction errors reported by target thread
583  *                        (protected by @proc->inner_lock)
584  * @wait:                 wait queue for thread work
585  * @stats:                per-thread statistics
586  *                        (atomics, no lock needed)
587  * @tmp_ref:              temporary reference to indicate thread is in use
588  *                        (atomic since @proc->inner_lock cannot
589  *                        always be acquired)
590  * @is_dead:              thread is dead and awaiting free
591  *                        when outstanding transactions are cleaned up
592  *                        (protected by @proc->inner_lock)
593  *
594  * Bookkeeping structure for binder threads.
595  */
596 struct binder_thread {
597         struct binder_proc *proc;
598         struct rb_node rb_node;
599         struct list_head waiting_thread_node;
600         int pid;
601         int looper;              /* only modified by this thread */
602         bool looper_need_return; /* can be written by other thread */
603         struct binder_transaction *transaction_stack;
604         struct list_head todo;
605         struct binder_error return_error;
606         struct binder_error reply_error;
607         wait_queue_head_t wait;
608         struct binder_stats stats;
609         atomic_t tmp_ref;
610         bool is_dead;
611 };
612
613 struct binder_transaction {
614         int debug_id;
615         struct binder_work work;
616         struct binder_thread *from;
617         struct binder_transaction *from_parent;
618         struct binder_proc *to_proc;
619         struct binder_thread *to_thread;
620         struct binder_transaction *to_parent;
621         unsigned need_reply:1;
622         /* unsigned is_dead:1; */       /* not used at the moment */
623
624         struct binder_buffer *buffer;
625         unsigned int    code;
626         unsigned int    flags;
627         long    priority;
628         long    saved_priority;
629         kuid_t  sender_euid;
630         /**
631          * @lock:  protects @from, @to_proc, and @to_thread
632          *
633          * @from, @to_proc, and @to_thread can be set to NULL
634          * during thread teardown
635          */
636         spinlock_t lock;
637 };
638
639 /**
640  * binder_proc_lock() - Acquire outer lock for given binder_proc
641  * @proc:         struct binder_proc to acquire
642  *
643  * Acquires proc->outer_lock. Used to protect binder_ref
644  * structures associated with the given proc.
645  */
646 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
647 static void
648 _binder_proc_lock(struct binder_proc *proc, int line)
649 {
650         binder_debug(BINDER_DEBUG_SPINLOCKS,
651                      "%s: line=%d\n", __func__, line);
652         spin_lock(&proc->outer_lock);
653 }
654
655 /**
656  * binder_proc_unlock() - Release spinlock for given binder_proc
657  * @proc:         struct binder_proc to acquire
658  *
659  * Release lock acquired via binder_proc_lock()
660  */
661 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
662 static void
663 _binder_proc_unlock(struct binder_proc *proc, int line)
664 {
665         binder_debug(BINDER_DEBUG_SPINLOCKS,
666                      "%s: line=%d\n", __func__, line);
667         spin_unlock(&proc->outer_lock);
668 }
669
670 /**
671  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
672  * @proc:         struct binder_proc to acquire
673  *
674  * Acquires proc->inner_lock. Used to protect todo lists
675  */
676 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
677 static void
678 _binder_inner_proc_lock(struct binder_proc *proc, int line)
679 {
680         binder_debug(BINDER_DEBUG_SPINLOCKS,
681                      "%s: line=%d\n", __func__, line);
682         spin_lock(&proc->inner_lock);
683 }
684
685 /**
686  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
687  * @proc:         struct binder_proc to acquire
688  *
689  * Release lock acquired via binder_inner_proc_lock()
690  */
691 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
692 static void
693 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
694 {
695         binder_debug(BINDER_DEBUG_SPINLOCKS,
696                      "%s: line=%d\n", __func__, line);
697         spin_unlock(&proc->inner_lock);
698 }
699
700 /**
701  * binder_node_lock() - Acquire spinlock for given binder_node
702  * @node:         struct binder_node to acquire
703  *
704  * Acquires node->lock. Used to protect binder_node fields
705  */
706 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
707 static void
708 _binder_node_lock(struct binder_node *node, int line)
709 {
710         binder_debug(BINDER_DEBUG_SPINLOCKS,
711                      "%s: line=%d\n", __func__, line);
712         spin_lock(&node->lock);
713 }
714
715 /**
716  * binder_node_unlock() - Release spinlock for given binder_proc
717  * @node:         struct binder_node to acquire
718  *
719  * Release lock acquired via binder_node_lock()
720  */
721 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
722 static void
723 _binder_node_unlock(struct binder_node *node, int line)
724 {
725         binder_debug(BINDER_DEBUG_SPINLOCKS,
726                      "%s: line=%d\n", __func__, line);
727         spin_unlock(&node->lock);
728 }
729
730 /**
731  * binder_node_inner_lock() - Acquire node and inner locks
732  * @node:         struct binder_node to acquire
733  *
734  * Acquires node->lock. If node->proc also acquires
735  * proc->inner_lock. Used to protect binder_node fields
736  */
737 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
738 static void
739 _binder_node_inner_lock(struct binder_node *node, int line)
740 {
741         binder_debug(BINDER_DEBUG_SPINLOCKS,
742                      "%s: line=%d\n", __func__, line);
743         spin_lock(&node->lock);
744         if (node->proc)
745                 binder_inner_proc_lock(node->proc);
746 }
747
748 /**
749  * binder_node_unlock() - Release node and inner locks
750  * @node:         struct binder_node to acquire
751  *
752  * Release lock acquired via binder_node_lock()
753  */
754 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
755 static void
756 _binder_node_inner_unlock(struct binder_node *node, int line)
757 {
758         struct binder_proc *proc = node->proc;
759
760         binder_debug(BINDER_DEBUG_SPINLOCKS,
761                      "%s: line=%d\n", __func__, line);
762         if (proc)
763                 binder_inner_proc_unlock(proc);
764         spin_unlock(&node->lock);
765 }
766
767 static bool binder_worklist_empty_ilocked(struct list_head *list)
768 {
769         return list_empty(list);
770 }
771
772 /**
773  * binder_worklist_empty() - Check if no items on the work list
774  * @proc:       binder_proc associated with list
775  * @list:       list to check
776  *
777  * Return: true if there are no items on list, else false
778  */
779 static bool binder_worklist_empty(struct binder_proc *proc,
780                                   struct list_head *list)
781 {
782         bool ret;
783
784         binder_inner_proc_lock(proc);
785         ret = binder_worklist_empty_ilocked(list);
786         binder_inner_proc_unlock(proc);
787         return ret;
788 }
789
790 static void
791 binder_enqueue_work_ilocked(struct binder_work *work,
792                            struct list_head *target_list)
793 {
794         BUG_ON(target_list == NULL);
795         BUG_ON(work->entry.next && !list_empty(&work->entry));
796         list_add_tail(&work->entry, target_list);
797 }
798
799 /**
800  * binder_enqueue_work() - Add an item to the work list
801  * @proc:         binder_proc associated with list
802  * @work:         struct binder_work to add to list
803  * @target_list:  list to add work to
804  *
805  * Adds the work to the specified list. Asserts that work
806  * is not already on a list.
807  */
808 static void
809 binder_enqueue_work(struct binder_proc *proc,
810                     struct binder_work *work,
811                     struct list_head *target_list)
812 {
813         binder_inner_proc_lock(proc);
814         binder_enqueue_work_ilocked(work, target_list);
815         binder_inner_proc_unlock(proc);
816 }
817
818 static void
819 binder_dequeue_work_ilocked(struct binder_work *work)
820 {
821         list_del_init(&work->entry);
822 }
823
824 /**
825  * binder_dequeue_work() - Removes an item from the work list
826  * @proc:         binder_proc associated with list
827  * @work:         struct binder_work to remove from list
828  *
829  * Removes the specified work item from whatever list it is on.
830  * Can safely be called if work is not on any list.
831  */
832 static void
833 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
834 {
835         binder_inner_proc_lock(proc);
836         binder_dequeue_work_ilocked(work);
837         binder_inner_proc_unlock(proc);
838 }
839
840 static struct binder_work *binder_dequeue_work_head_ilocked(
841                                         struct list_head *list)
842 {
843         struct binder_work *w;
844
845         w = list_first_entry_or_null(list, struct binder_work, entry);
846         if (w)
847                 list_del_init(&w->entry);
848         return w;
849 }
850
851 /**
852  * binder_dequeue_work_head() - Dequeues the item at head of list
853  * @proc:         binder_proc associated with list
854  * @list:         list to dequeue head
855  *
856  * Removes the head of the list if there are items on the list
857  *
858  * Return: pointer dequeued binder_work, NULL if list was empty
859  */
860 static struct binder_work *binder_dequeue_work_head(
861                                         struct binder_proc *proc,
862                                         struct list_head *list)
863 {
864         struct binder_work *w;
865
866         binder_inner_proc_lock(proc);
867         w = binder_dequeue_work_head_ilocked(list);
868         binder_inner_proc_unlock(proc);
869         return w;
870 }
871
872 static void
873 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
874 static void binder_free_thread(struct binder_thread *thread);
875 static void binder_free_proc(struct binder_proc *proc);
876 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
877
878 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
879 {
880         struct files_struct *files = proc->files;
881         unsigned long rlim_cur;
882         unsigned long irqs;
883
884         if (files == NULL)
885                 return -ESRCH;
886
887         if (!lock_task_sighand(proc->tsk, &irqs))
888                 return -EMFILE;
889
890         rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
891         unlock_task_sighand(proc->tsk, &irqs);
892
893         return __alloc_fd(files, 0, rlim_cur, flags);
894 }
895
896 /*
897  * copied from fd_install
898  */
899 static void task_fd_install(
900         struct binder_proc *proc, unsigned int fd, struct file *file)
901 {
902         if (proc->files)
903                 __fd_install(proc->files, fd, file);
904 }
905
906 /*
907  * copied from sys_close
908  */
909 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
910 {
911         int retval;
912
913         if (proc->files == NULL)
914                 return -ESRCH;
915
916         retval = __close_fd(proc->files, fd);
917         /* can't restart close syscall because file table entry was cleared */
918         if (unlikely(retval == -ERESTARTSYS ||
919                      retval == -ERESTARTNOINTR ||
920                      retval == -ERESTARTNOHAND ||
921                      retval == -ERESTART_RESTARTBLOCK))
922                 retval = -EINTR;
923
924         return retval;
925 }
926
927 static bool binder_has_work_ilocked(struct binder_thread *thread,
928                                     bool do_proc_work)
929 {
930         return !binder_worklist_empty_ilocked(&thread->todo) ||
931                 thread->looper_need_return ||
932                 (do_proc_work &&
933                  !binder_worklist_empty_ilocked(&thread->proc->todo));
934 }
935
936 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
937 {
938         bool has_work;
939
940         binder_inner_proc_lock(thread->proc);
941         has_work = binder_has_work_ilocked(thread, do_proc_work);
942         binder_inner_proc_unlock(thread->proc);
943
944         return has_work;
945 }
946
947 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
948 {
949         return !thread->transaction_stack &&
950                 binder_worklist_empty_ilocked(&thread->todo) &&
951                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
952                                    BINDER_LOOPER_STATE_REGISTERED));
953 }
954
955 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
956                                                bool sync)
957 {
958         struct rb_node *n;
959         struct binder_thread *thread;
960
961         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
962                 thread = rb_entry(n, struct binder_thread, rb_node);
963                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
964                     binder_available_for_proc_work_ilocked(thread)) {
965                         if (sync)
966                                 wake_up_interruptible_sync(&thread->wait);
967                         else
968                                 wake_up_interruptible(&thread->wait);
969                 }
970         }
971 }
972
973 /**
974  * binder_select_thread_ilocked() - selects a thread for doing proc work.
975  * @proc:       process to select a thread from
976  *
977  * Note that calling this function moves the thread off the waiting_threads
978  * list, so it can only be woken up by the caller of this function, or a
979  * signal. Therefore, callers *should* always wake up the thread this function
980  * returns.
981  *
982  * Return:      If there's a thread currently waiting for process work,
983  *              returns that thread. Otherwise returns NULL.
984  */
985 static struct binder_thread *
986 binder_select_thread_ilocked(struct binder_proc *proc)
987 {
988         struct binder_thread *thread;
989
990         assert_spin_locked(&proc->inner_lock);
991         thread = list_first_entry_or_null(&proc->waiting_threads,
992                                           struct binder_thread,
993                                           waiting_thread_node);
994
995         if (thread)
996                 list_del_init(&thread->waiting_thread_node);
997
998         return thread;
999 }
1000
1001 /**
1002  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1003  * @proc:       process to wake up a thread in
1004  * @thread:     specific thread to wake-up (may be NULL)
1005  * @sync:       whether to do a synchronous wake-up
1006  *
1007  * This function wakes up a thread in the @proc process.
1008  * The caller may provide a specific thread to wake-up in
1009  * the @thread parameter. If @thread is NULL, this function
1010  * will wake up threads that have called poll().
1011  *
1012  * Note that for this function to work as expected, callers
1013  * should first call binder_select_thread() to find a thread
1014  * to handle the work (if they don't have a thread already),
1015  * and pass the result into the @thread parameter.
1016  */
1017 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1018                                          struct binder_thread *thread,
1019                                          bool sync)
1020 {
1021         assert_spin_locked(&proc->inner_lock);
1022
1023         if (thread) {
1024                 if (sync)
1025                         wake_up_interruptible_sync(&thread->wait);
1026                 else
1027                         wake_up_interruptible(&thread->wait);
1028                 return;
1029         }
1030
1031         /* Didn't find a thread waiting for proc work; this can happen
1032          * in two scenarios:
1033          * 1. All threads are busy handling transactions
1034          *    In that case, one of those threads should call back into
1035          *    the kernel driver soon and pick up this work.
1036          * 2. Threads are using the (e)poll interface, in which case
1037          *    they may be blocked on the waitqueue without having been
1038          *    added to waiting_threads. For this case, we just iterate
1039          *    over all threads not handling transaction work, and
1040          *    wake them all up. We wake all because we don't know whether
1041          *    a thread that called into (e)poll is handling non-binder
1042          *    work currently.
1043          */
1044         binder_wakeup_poll_threads_ilocked(proc, sync);
1045 }
1046
1047 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1048 {
1049         struct binder_thread *thread = binder_select_thread_ilocked(proc);
1050
1051         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1052 }
1053
1054 static void binder_set_nice(long nice)
1055 {
1056         long min_nice;
1057
1058         if (can_nice(current, nice)) {
1059                 set_user_nice(current, nice);
1060                 return;
1061         }
1062         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1063         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1064                      "%d: nice value %ld not allowed use %ld instead\n",
1065                       current->pid, nice, min_nice);
1066         set_user_nice(current, min_nice);
1067         if (min_nice <= MAX_NICE)
1068                 return;
1069         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1070 }
1071
1072 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1073                                                    binder_uintptr_t ptr)
1074 {
1075         struct rb_node *n = proc->nodes.rb_node;
1076         struct binder_node *node;
1077
1078         assert_spin_locked(&proc->inner_lock);
1079
1080         while (n) {
1081                 node = rb_entry(n, struct binder_node, rb_node);
1082
1083                 if (ptr < node->ptr)
1084                         n = n->rb_left;
1085                 else if (ptr > node->ptr)
1086                         n = n->rb_right;
1087                 else {
1088                         /*
1089                          * take an implicit weak reference
1090                          * to ensure node stays alive until
1091                          * call to binder_put_node()
1092                          */
1093                         binder_inc_node_tmpref_ilocked(node);
1094                         return node;
1095                 }
1096         }
1097         return NULL;
1098 }
1099
1100 static struct binder_node *binder_get_node(struct binder_proc *proc,
1101                                            binder_uintptr_t ptr)
1102 {
1103         struct binder_node *node;
1104
1105         binder_inner_proc_lock(proc);
1106         node = binder_get_node_ilocked(proc, ptr);
1107         binder_inner_proc_unlock(proc);
1108         return node;
1109 }
1110
1111 static struct binder_node *binder_init_node_ilocked(
1112                                                 struct binder_proc *proc,
1113                                                 struct binder_node *new_node,
1114                                                 struct flat_binder_object *fp)
1115 {
1116         struct rb_node **p = &proc->nodes.rb_node;
1117         struct rb_node *parent = NULL;
1118         struct binder_node *node;
1119         binder_uintptr_t ptr = fp ? fp->binder : 0;
1120         binder_uintptr_t cookie = fp ? fp->cookie : 0;
1121         __u32 flags = fp ? fp->flags : 0;
1122
1123         assert_spin_locked(&proc->inner_lock);
1124
1125         while (*p) {
1126
1127                 parent = *p;
1128                 node = rb_entry(parent, struct binder_node, rb_node);
1129
1130                 if (ptr < node->ptr)
1131                         p = &(*p)->rb_left;
1132                 else if (ptr > node->ptr)
1133                         p = &(*p)->rb_right;
1134                 else {
1135                         /*
1136                          * A matching node is already in
1137                          * the rb tree. Abandon the init
1138                          * and return it.
1139                          */
1140                         binder_inc_node_tmpref_ilocked(node);
1141                         return node;
1142                 }
1143         }
1144         node = new_node;
1145         binder_stats_created(BINDER_STAT_NODE);
1146         node->tmp_refs++;
1147         rb_link_node(&node->rb_node, parent, p);
1148         rb_insert_color(&node->rb_node, &proc->nodes);
1149         node->debug_id = atomic_inc_return(&binder_last_id);
1150         node->proc = proc;
1151         node->ptr = ptr;
1152         node->cookie = cookie;
1153         node->work.type = BINDER_WORK_NODE;
1154         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1155         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1156         spin_lock_init(&node->lock);
1157         INIT_LIST_HEAD(&node->work.entry);
1158         INIT_LIST_HEAD(&node->async_todo);
1159         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1160                      "%d:%d node %d u%016llx c%016llx created\n",
1161                      proc->pid, current->pid, node->debug_id,
1162                      (u64)node->ptr, (u64)node->cookie);
1163
1164         return node;
1165 }
1166
1167 static struct binder_node *binder_new_node(struct binder_proc *proc,
1168                                            struct flat_binder_object *fp)
1169 {
1170         struct binder_node *node;
1171         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1172
1173         if (!new_node)
1174                 return NULL;
1175         binder_inner_proc_lock(proc);
1176         node = binder_init_node_ilocked(proc, new_node, fp);
1177         binder_inner_proc_unlock(proc);
1178         if (node != new_node)
1179                 /*
1180                  * The node was already added by another thread
1181                  */
1182                 kfree(new_node);
1183
1184         return node;
1185 }
1186
1187 static void binder_free_node(struct binder_node *node)
1188 {
1189         kfree(node);
1190         binder_stats_deleted(BINDER_STAT_NODE);
1191 }
1192
1193 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1194                                     int internal,
1195                                     struct list_head *target_list)
1196 {
1197         struct binder_proc *proc = node->proc;
1198
1199         assert_spin_locked(&node->lock);
1200         if (proc)
1201                 assert_spin_locked(&proc->inner_lock);
1202         if (strong) {
1203                 if (internal) {
1204                         if (target_list == NULL &&
1205                             node->internal_strong_refs == 0 &&
1206                             !(node->proc &&
1207                               node == node->proc->context->binder_context_mgr_node &&
1208                               node->has_strong_ref)) {
1209                                 pr_err("invalid inc strong node for %d\n",
1210                                         node->debug_id);
1211                                 return -EINVAL;
1212                         }
1213                         node->internal_strong_refs++;
1214                 } else
1215                         node->local_strong_refs++;
1216                 if (!node->has_strong_ref && target_list) {
1217                         binder_dequeue_work_ilocked(&node->work);
1218                         binder_enqueue_work_ilocked(&node->work, target_list);
1219                 }
1220         } else {
1221                 if (!internal)
1222                         node->local_weak_refs++;
1223                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1224                         if (target_list == NULL) {
1225                                 pr_err("invalid inc weak node for %d\n",
1226                                         node->debug_id);
1227                                 return -EINVAL;
1228                         }
1229                         binder_enqueue_work_ilocked(&node->work, target_list);
1230                 }
1231         }
1232         return 0;
1233 }
1234
1235 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1236                            struct list_head *target_list)
1237 {
1238         int ret;
1239
1240         binder_node_inner_lock(node);
1241         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1242         binder_node_inner_unlock(node);
1243
1244         return ret;
1245 }
1246
1247 static bool binder_dec_node_nilocked(struct binder_node *node,
1248                                      int strong, int internal)
1249 {
1250         struct binder_proc *proc = node->proc;
1251
1252         assert_spin_locked(&node->lock);
1253         if (proc)
1254                 assert_spin_locked(&proc->inner_lock);
1255         if (strong) {
1256                 if (internal)
1257                         node->internal_strong_refs--;
1258                 else
1259                         node->local_strong_refs--;
1260                 if (node->local_strong_refs || node->internal_strong_refs)
1261                         return false;
1262         } else {
1263                 if (!internal)
1264                         node->local_weak_refs--;
1265                 if (node->local_weak_refs || node->tmp_refs ||
1266                                 !hlist_empty(&node->refs))
1267                         return false;
1268         }
1269
1270         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1271                 if (list_empty(&node->work.entry)) {
1272                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
1273                         binder_wakeup_proc_ilocked(proc);
1274                 }
1275         } else {
1276                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1277                     !node->local_weak_refs && !node->tmp_refs) {
1278                         if (proc) {
1279                                 binder_dequeue_work_ilocked(&node->work);
1280                                 rb_erase(&node->rb_node, &proc->nodes);
1281                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1282                                              "refless node %d deleted\n",
1283                                              node->debug_id);
1284                         } else {
1285                                 BUG_ON(!list_empty(&node->work.entry));
1286                                 spin_lock(&binder_dead_nodes_lock);
1287                                 /*
1288                                  * tmp_refs could have changed so
1289                                  * check it again
1290                                  */
1291                                 if (node->tmp_refs) {
1292                                         spin_unlock(&binder_dead_nodes_lock);
1293                                         return false;
1294                                 }
1295                                 hlist_del(&node->dead_node);
1296                                 spin_unlock(&binder_dead_nodes_lock);
1297                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1298                                              "dead node %d deleted\n",
1299                                              node->debug_id);
1300                         }
1301                         return true;
1302                 }
1303         }
1304         return false;
1305 }
1306
1307 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1308 {
1309         bool free_node;
1310
1311         binder_node_inner_lock(node);
1312         free_node = binder_dec_node_nilocked(node, strong, internal);
1313         binder_node_inner_unlock(node);
1314         if (free_node)
1315                 binder_free_node(node);
1316 }
1317
1318 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1319 {
1320         /*
1321          * No call to binder_inc_node() is needed since we
1322          * don't need to inform userspace of any changes to
1323          * tmp_refs
1324          */
1325         node->tmp_refs++;
1326 }
1327
1328 /**
1329  * binder_inc_node_tmpref() - take a temporary reference on node
1330  * @node:       node to reference
1331  *
1332  * Take reference on node to prevent the node from being freed
1333  * while referenced only by a local variable. The inner lock is
1334  * needed to serialize with the node work on the queue (which
1335  * isn't needed after the node is dead). If the node is dead
1336  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1337  * node->tmp_refs against dead-node-only cases where the node
1338  * lock cannot be acquired (eg traversing the dead node list to
1339  * print nodes)
1340  */
1341 static void binder_inc_node_tmpref(struct binder_node *node)
1342 {
1343         binder_node_lock(node);
1344         if (node->proc)
1345                 binder_inner_proc_lock(node->proc);
1346         else
1347                 spin_lock(&binder_dead_nodes_lock);
1348         binder_inc_node_tmpref_ilocked(node);
1349         if (node->proc)
1350                 binder_inner_proc_unlock(node->proc);
1351         else
1352                 spin_unlock(&binder_dead_nodes_lock);
1353         binder_node_unlock(node);
1354 }
1355
1356 /**
1357  * binder_dec_node_tmpref() - remove a temporary reference on node
1358  * @node:       node to reference
1359  *
1360  * Release temporary reference on node taken via binder_inc_node_tmpref()
1361  */
1362 static void binder_dec_node_tmpref(struct binder_node *node)
1363 {
1364         bool free_node;
1365
1366         binder_node_inner_lock(node);
1367         if (!node->proc)
1368                 spin_lock(&binder_dead_nodes_lock);
1369         node->tmp_refs--;
1370         BUG_ON(node->tmp_refs < 0);
1371         if (!node->proc)
1372                 spin_unlock(&binder_dead_nodes_lock);
1373         /*
1374          * Call binder_dec_node() to check if all refcounts are 0
1375          * and cleanup is needed. Calling with strong=0 and internal=1
1376          * causes no actual reference to be released in binder_dec_node().
1377          * If that changes, a change is needed here too.
1378          */
1379         free_node = binder_dec_node_nilocked(node, 0, 1);
1380         binder_node_inner_unlock(node);
1381         if (free_node)
1382                 binder_free_node(node);
1383 }
1384
1385 static void binder_put_node(struct binder_node *node)
1386 {
1387         binder_dec_node_tmpref(node);
1388 }
1389
1390 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1391                                                  u32 desc, bool need_strong_ref)
1392 {
1393         struct rb_node *n = proc->refs_by_desc.rb_node;
1394         struct binder_ref *ref;
1395
1396         while (n) {
1397                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1398
1399                 if (desc < ref->data.desc) {
1400                         n = n->rb_left;
1401                 } else if (desc > ref->data.desc) {
1402                         n = n->rb_right;
1403                 } else if (need_strong_ref && !ref->data.strong) {
1404                         binder_user_error("tried to use weak ref as strong ref\n");
1405                         return NULL;
1406                 } else {
1407                         return ref;
1408                 }
1409         }
1410         return NULL;
1411 }
1412
1413 /**
1414  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1415  * @proc:       binder_proc that owns the ref
1416  * @node:       binder_node of target
1417  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1418  *
1419  * Look up the ref for the given node and return it if it exists
1420  *
1421  * If it doesn't exist and the caller provides a newly allocated
1422  * ref, initialize the fields of the newly allocated ref and insert
1423  * into the given proc rb_trees and node refs list.
1424  *
1425  * Return:      the ref for node. It is possible that another thread
1426  *              allocated/initialized the ref first in which case the
1427  *              returned ref would be different than the passed-in
1428  *              new_ref. new_ref must be kfree'd by the caller in
1429  *              this case.
1430  */
1431 static struct binder_ref *binder_get_ref_for_node_olocked(
1432                                         struct binder_proc *proc,
1433                                         struct binder_node *node,
1434                                         struct binder_ref *new_ref)
1435 {
1436         struct binder_context *context = proc->context;
1437         struct rb_node **p = &proc->refs_by_node.rb_node;
1438         struct rb_node *parent = NULL;
1439         struct binder_ref *ref;
1440         struct rb_node *n;
1441
1442         while (*p) {
1443                 parent = *p;
1444                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1445
1446                 if (node < ref->node)
1447                         p = &(*p)->rb_left;
1448                 else if (node > ref->node)
1449                         p = &(*p)->rb_right;
1450                 else
1451                         return ref;
1452         }
1453         if (!new_ref)
1454                 return NULL;
1455
1456         binder_stats_created(BINDER_STAT_REF);
1457         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1458         new_ref->proc = proc;
1459         new_ref->node = node;
1460         rb_link_node(&new_ref->rb_node_node, parent, p);
1461         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1462
1463         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1464         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1465                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466                 if (ref->data.desc > new_ref->data.desc)
1467                         break;
1468                 new_ref->data.desc = ref->data.desc + 1;
1469         }
1470
1471         p = &proc->refs_by_desc.rb_node;
1472         while (*p) {
1473                 parent = *p;
1474                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1475
1476                 if (new_ref->data.desc < ref->data.desc)
1477                         p = &(*p)->rb_left;
1478                 else if (new_ref->data.desc > ref->data.desc)
1479                         p = &(*p)->rb_right;
1480                 else
1481                         BUG();
1482         }
1483         rb_link_node(&new_ref->rb_node_desc, parent, p);
1484         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1485
1486         binder_node_lock(node);
1487         hlist_add_head(&new_ref->node_entry, &node->refs);
1488
1489         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1490                      "%d new ref %d desc %d for node %d\n",
1491                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1492                       node->debug_id);
1493         binder_node_unlock(node);
1494         return new_ref;
1495 }
1496
1497 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1498 {
1499         bool delete_node = false;
1500
1501         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1502                      "%d delete ref %d desc %d for node %d\n",
1503                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1504                       ref->node->debug_id);
1505
1506         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1507         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1508
1509         binder_node_inner_lock(ref->node);
1510         if (ref->data.strong)
1511                 binder_dec_node_nilocked(ref->node, 1, 1);
1512
1513         hlist_del(&ref->node_entry);
1514         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1515         binder_node_inner_unlock(ref->node);
1516         /*
1517          * Clear ref->node unless we want the caller to free the node
1518          */
1519         if (!delete_node) {
1520                 /*
1521                  * The caller uses ref->node to determine
1522                  * whether the node needs to be freed. Clear
1523                  * it since the node is still alive.
1524                  */
1525                 ref->node = NULL;
1526         }
1527
1528         if (ref->death) {
1529                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1530                              "%d delete ref %d desc %d has death notification\n",
1531                               ref->proc->pid, ref->data.debug_id,
1532                               ref->data.desc);
1533                 binder_dequeue_work(ref->proc, &ref->death->work);
1534                 binder_stats_deleted(BINDER_STAT_DEATH);
1535         }
1536         binder_stats_deleted(BINDER_STAT_REF);
1537 }
1538
1539 /**
1540  * binder_inc_ref_olocked() - increment the ref for given handle
1541  * @ref:         ref to be incremented
1542  * @strong:      if true, strong increment, else weak
1543  * @target_list: list to queue node work on
1544  *
1545  * Increment the ref. @ref->proc->outer_lock must be held on entry
1546  *
1547  * Return: 0, if successful, else errno
1548  */
1549 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1550                                   struct list_head *target_list)
1551 {
1552         int ret;
1553
1554         if (strong) {
1555                 if (ref->data.strong == 0) {
1556                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1557                         if (ret)
1558                                 return ret;
1559                 }
1560                 ref->data.strong++;
1561         } else {
1562                 if (ref->data.weak == 0) {
1563                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1564                         if (ret)
1565                                 return ret;
1566                 }
1567                 ref->data.weak++;
1568         }
1569         return 0;
1570 }
1571
1572 /**
1573  * binder_dec_ref() - dec the ref for given handle
1574  * @ref:        ref to be decremented
1575  * @strong:     if true, strong decrement, else weak
1576  *
1577  * Decrement the ref.
1578  *
1579  * Return: true if ref is cleaned up and ready to be freed
1580  */
1581 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1582 {
1583         if (strong) {
1584                 if (ref->data.strong == 0) {
1585                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586                                           ref->proc->pid, ref->data.debug_id,
1587                                           ref->data.desc, ref->data.strong,
1588                                           ref->data.weak);
1589                         return false;
1590                 }
1591                 ref->data.strong--;
1592                 if (ref->data.strong == 0)
1593                         binder_dec_node(ref->node, strong, 1);
1594         } else {
1595                 if (ref->data.weak == 0) {
1596                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597                                           ref->proc->pid, ref->data.debug_id,
1598                                           ref->data.desc, ref->data.strong,
1599                                           ref->data.weak);
1600                         return false;
1601                 }
1602                 ref->data.weak--;
1603         }
1604         if (ref->data.strong == 0 && ref->data.weak == 0) {
1605                 binder_cleanup_ref_olocked(ref);
1606                 return true;
1607         }
1608         return false;
1609 }
1610
1611 /**
1612  * binder_get_node_from_ref() - get the node from the given proc/desc
1613  * @proc:       proc containing the ref
1614  * @desc:       the handle associated with the ref
1615  * @need_strong_ref: if true, only return node if ref is strong
1616  * @rdata:      the id/refcount data for the ref
1617  *
1618  * Given a proc and ref handle, return the associated binder_node
1619  *
1620  * Return: a binder_node or NULL if not found or not strong when strong required
1621  */
1622 static struct binder_node *binder_get_node_from_ref(
1623                 struct binder_proc *proc,
1624                 u32 desc, bool need_strong_ref,
1625                 struct binder_ref_data *rdata)
1626 {
1627         struct binder_node *node;
1628         struct binder_ref *ref;
1629
1630         binder_proc_lock(proc);
1631         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1632         if (!ref)
1633                 goto err_no_ref;
1634         node = ref->node;
1635         /*
1636          * Take an implicit reference on the node to ensure
1637          * it stays alive until the call to binder_put_node()
1638          */
1639         binder_inc_node_tmpref(node);
1640         if (rdata)
1641                 *rdata = ref->data;
1642         binder_proc_unlock(proc);
1643
1644         return node;
1645
1646 err_no_ref:
1647         binder_proc_unlock(proc);
1648         return NULL;
1649 }
1650
1651 /**
1652  * binder_free_ref() - free the binder_ref
1653  * @ref:        ref to free
1654  *
1655  * Free the binder_ref. Free the binder_node indicated by ref->node
1656  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1657  */
1658 static void binder_free_ref(struct binder_ref *ref)
1659 {
1660         if (ref->node)
1661                 binder_free_node(ref->node);
1662         kfree(ref->death);
1663         kfree(ref);
1664 }
1665
1666 /**
1667  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1668  * @proc:       proc containing the ref
1669  * @desc:       the handle associated with the ref
1670  * @increment:  true=inc reference, false=dec reference
1671  * @strong:     true=strong reference, false=weak reference
1672  * @rdata:      the id/refcount data for the ref
1673  *
1674  * Given a proc and ref handle, increment or decrement the ref
1675  * according to "increment" arg.
1676  *
1677  * Return: 0 if successful, else errno
1678  */
1679 static int binder_update_ref_for_handle(struct binder_proc *proc,
1680                 uint32_t desc, bool increment, bool strong,
1681                 struct binder_ref_data *rdata)
1682 {
1683         int ret = 0;
1684         struct binder_ref *ref;
1685         bool delete_ref = false;
1686
1687         binder_proc_lock(proc);
1688         ref = binder_get_ref_olocked(proc, desc, strong);
1689         if (!ref) {
1690                 ret = -EINVAL;
1691                 goto err_no_ref;
1692         }
1693         if (increment)
1694                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1695         else
1696                 delete_ref = binder_dec_ref_olocked(ref, strong);
1697
1698         if (rdata)
1699                 *rdata = ref->data;
1700         binder_proc_unlock(proc);
1701
1702         if (delete_ref)
1703                 binder_free_ref(ref);
1704         return ret;
1705
1706 err_no_ref:
1707         binder_proc_unlock(proc);
1708         return ret;
1709 }
1710
1711 /**
1712  * binder_dec_ref_for_handle() - dec the ref for given handle
1713  * @proc:       proc containing the ref
1714  * @desc:       the handle associated with the ref
1715  * @strong:     true=strong reference, false=weak reference
1716  * @rdata:      the id/refcount data for the ref
1717  *
1718  * Just calls binder_update_ref_for_handle() to decrement the ref.
1719  *
1720  * Return: 0 if successful, else errno
1721  */
1722 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1723                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1724 {
1725         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1726 }
1727
1728
1729 /**
1730  * binder_inc_ref_for_node() - increment the ref for given proc/node
1731  * @proc:        proc containing the ref
1732  * @node:        target node
1733  * @strong:      true=strong reference, false=weak reference
1734  * @target_list: worklist to use if node is incremented
1735  * @rdata:       the id/refcount data for the ref
1736  *
1737  * Given a proc and node, increment the ref. Create the ref if it
1738  * doesn't already exist
1739  *
1740  * Return: 0 if successful, else errno
1741  */
1742 static int binder_inc_ref_for_node(struct binder_proc *proc,
1743                         struct binder_node *node,
1744                         bool strong,
1745                         struct list_head *target_list,
1746                         struct binder_ref_data *rdata)
1747 {
1748         struct binder_ref *ref;
1749         struct binder_ref *new_ref = NULL;
1750         int ret = 0;
1751
1752         binder_proc_lock(proc);
1753         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1754         if (!ref) {
1755                 binder_proc_unlock(proc);
1756                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1757                 if (!new_ref)
1758                         return -ENOMEM;
1759                 binder_proc_lock(proc);
1760                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1761         }
1762         ret = binder_inc_ref_olocked(ref, strong, target_list);
1763         *rdata = ref->data;
1764         binder_proc_unlock(proc);
1765         if (new_ref && ref != new_ref)
1766                 /*
1767                  * Another thread created the ref first so
1768                  * free the one we allocated
1769                  */
1770                 kfree(new_ref);
1771         return ret;
1772 }
1773
1774 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1775                                            struct binder_transaction *t)
1776 {
1777         BUG_ON(!target_thread);
1778         assert_spin_locked(&target_thread->proc->inner_lock);
1779         BUG_ON(target_thread->transaction_stack != t);
1780         BUG_ON(target_thread->transaction_stack->from != target_thread);
1781         target_thread->transaction_stack =
1782                 target_thread->transaction_stack->from_parent;
1783         t->from = NULL;
1784 }
1785
1786 /**
1787  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1788  * @thread:     thread to decrement
1789  *
1790  * A thread needs to be kept alive while being used to create or
1791  * handle a transaction. binder_get_txn_from() is used to safely
1792  * extract t->from from a binder_transaction and keep the thread
1793  * indicated by t->from from being freed. When done with that
1794  * binder_thread, this function is called to decrement the
1795  * tmp_ref and free if appropriate (thread has been released
1796  * and no transaction being processed by the driver)
1797  */
1798 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1799 {
1800         /*
1801          * atomic is used to protect the counter value while
1802          * it cannot reach zero or thread->is_dead is false
1803          */
1804         binder_inner_proc_lock(thread->proc);
1805         atomic_dec(&thread->tmp_ref);
1806         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1807                 binder_inner_proc_unlock(thread->proc);
1808                 binder_free_thread(thread);
1809                 return;
1810         }
1811         binder_inner_proc_unlock(thread->proc);
1812 }
1813
1814 /**
1815  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1816  * @proc:       proc to decrement
1817  *
1818  * A binder_proc needs to be kept alive while being used to create or
1819  * handle a transaction. proc->tmp_ref is incremented when
1820  * creating a new transaction or the binder_proc is currently in-use
1821  * by threads that are being released. When done with the binder_proc,
1822  * this function is called to decrement the counter and free the
1823  * proc if appropriate (proc has been released, all threads have
1824  * been released and not currenly in-use to process a transaction).
1825  */
1826 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827 {
1828         binder_inner_proc_lock(proc);
1829         proc->tmp_ref--;
1830         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831                         !proc->tmp_ref) {
1832                 binder_inner_proc_unlock(proc);
1833                 binder_free_proc(proc);
1834                 return;
1835         }
1836         binder_inner_proc_unlock(proc);
1837 }
1838
1839 /**
1840  * binder_get_txn_from() - safely extract the "from" thread in transaction
1841  * @t:  binder transaction for t->from
1842  *
1843  * Atomically return the "from" thread and increment the tmp_ref
1844  * count for the thread to ensure it stays alive until
1845  * binder_thread_dec_tmpref() is called.
1846  *
1847  * Return: the value of t->from
1848  */
1849 static struct binder_thread *binder_get_txn_from(
1850                 struct binder_transaction *t)
1851 {
1852         struct binder_thread *from;
1853
1854         spin_lock(&t->lock);
1855         from = t->from;
1856         if (from)
1857                 atomic_inc(&from->tmp_ref);
1858         spin_unlock(&t->lock);
1859         return from;
1860 }
1861
1862 /**
1863  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1864  * @t:  binder transaction for t->from
1865  *
1866  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1867  * to guarantee that the thread cannot be released while operating on it.
1868  * The caller must call binder_inner_proc_unlock() to release the inner lock
1869  * as well as call binder_dec_thread_txn() to release the reference.
1870  *
1871  * Return: the value of t->from
1872  */
1873 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1874                 struct binder_transaction *t)
1875 {
1876         struct binder_thread *from;
1877
1878         from = binder_get_txn_from(t);
1879         if (!from)
1880                 return NULL;
1881         binder_inner_proc_lock(from->proc);
1882         if (t->from) {
1883                 BUG_ON(from != t->from);
1884                 return from;
1885         }
1886         binder_inner_proc_unlock(from->proc);
1887         binder_thread_dec_tmpref(from);
1888         return NULL;
1889 }
1890
1891 static void binder_free_transaction(struct binder_transaction *t)
1892 {
1893         if (t->buffer)
1894                 t->buffer->transaction = NULL;
1895         kfree(t);
1896         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1897 }
1898
1899 static void binder_send_failed_reply(struct binder_transaction *t,
1900                                      uint32_t error_code)
1901 {
1902         struct binder_thread *target_thread;
1903         struct binder_transaction *next;
1904
1905         BUG_ON(t->flags & TF_ONE_WAY);
1906         while (1) {
1907                 target_thread = binder_get_txn_from_and_acq_inner(t);
1908                 if (target_thread) {
1909                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1910                                      "send failed reply for transaction %d to %d:%d\n",
1911                                       t->debug_id,
1912                                       target_thread->proc->pid,
1913                                       target_thread->pid);
1914
1915                         binder_pop_transaction_ilocked(target_thread, t);
1916                         if (target_thread->reply_error.cmd == BR_OK) {
1917                                 target_thread->reply_error.cmd = error_code;
1918                                 binder_enqueue_work_ilocked(
1919                                         &target_thread->reply_error.work,
1920                                         &target_thread->todo);
1921                                 wake_up_interruptible(&target_thread->wait);
1922                         } else {
1923                                 WARN(1, "Unexpected reply error: %u\n",
1924                                                 target_thread->reply_error.cmd);
1925                         }
1926                         binder_inner_proc_unlock(target_thread->proc);
1927                         binder_thread_dec_tmpref(target_thread);
1928                         binder_free_transaction(t);
1929                         return;
1930                 }
1931                 next = t->from_parent;
1932
1933                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1934                              "send failed reply for transaction %d, target dead\n",
1935                              t->debug_id);
1936
1937                 binder_free_transaction(t);
1938                 if (next == NULL) {
1939                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1940                                      "reply failed, no target thread at root\n");
1941                         return;
1942                 }
1943                 t = next;
1944                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1945                              "reply failed, no target thread -- retry %d\n",
1946                               t->debug_id);
1947         }
1948 }
1949
1950 /**
1951  * binder_cleanup_transaction() - cleans up undelivered transaction
1952  * @t:          transaction that needs to be cleaned up
1953  * @reason:     reason the transaction wasn't delivered
1954  * @error_code: error to return to caller (if synchronous call)
1955  */
1956 static void binder_cleanup_transaction(struct binder_transaction *t,
1957                                        const char *reason,
1958                                        uint32_t error_code)
1959 {
1960         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1961                 binder_send_failed_reply(t, error_code);
1962         } else {
1963                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1964                         "undelivered transaction %d, %s\n",
1965                         t->debug_id, reason);
1966                 binder_free_transaction(t);
1967         }
1968 }
1969
1970 /**
1971  * binder_validate_object() - checks for a valid metadata object in a buffer.
1972  * @buffer:     binder_buffer that we're parsing.
1973  * @offset:     offset in the buffer at which to validate an object.
1974  *
1975  * Return:      If there's a valid metadata object at @offset in @buffer, the
1976  *              size of that object. Otherwise, it returns zero.
1977  */
1978 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1979 {
1980         /* Check if we can read a header first */
1981         struct binder_object_header *hdr;
1982         size_t object_size = 0;
1983
1984         if (offset > buffer->data_size - sizeof(*hdr) ||
1985             buffer->data_size < sizeof(*hdr) ||
1986             !IS_ALIGNED(offset, sizeof(u32)))
1987                 return 0;
1988
1989         /* Ok, now see if we can read a complete object. */
1990         hdr = (struct binder_object_header *)(buffer->data + offset);
1991         switch (hdr->type) {
1992         case BINDER_TYPE_BINDER:
1993         case BINDER_TYPE_WEAK_BINDER:
1994         case BINDER_TYPE_HANDLE:
1995         case BINDER_TYPE_WEAK_HANDLE:
1996                 object_size = sizeof(struct flat_binder_object);
1997                 break;
1998         case BINDER_TYPE_FD:
1999                 object_size = sizeof(struct binder_fd_object);
2000                 break;
2001         case BINDER_TYPE_PTR:
2002                 object_size = sizeof(struct binder_buffer_object);
2003                 break;
2004         case BINDER_TYPE_FDA:
2005                 object_size = sizeof(struct binder_fd_array_object);
2006                 break;
2007         default:
2008                 return 0;
2009         }
2010         if (offset <= buffer->data_size - object_size &&
2011             buffer->data_size >= object_size)
2012                 return object_size;
2013         else
2014                 return 0;
2015 }
2016
2017 /**
2018  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2019  * @b:          binder_buffer containing the object
2020  * @index:      index in offset array at which the binder_buffer_object is
2021  *              located
2022  * @start:      points to the start of the offset array
2023  * @num_valid:  the number of valid offsets in the offset array
2024  *
2025  * Return:      If @index is within the valid range of the offset array
2026  *              described by @start and @num_valid, and if there's a valid
2027  *              binder_buffer_object at the offset found in index @index
2028  *              of the offset array, that object is returned. Otherwise,
2029  *              %NULL is returned.
2030  *              Note that the offset found in index @index itself is not
2031  *              verified; this function assumes that @num_valid elements
2032  *              from @start were previously verified to have valid offsets.
2033  */
2034 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2035                                                         binder_size_t index,
2036                                                         binder_size_t *start,
2037                                                         binder_size_t num_valid)
2038 {
2039         struct binder_buffer_object *buffer_obj;
2040         binder_size_t *offp;
2041
2042         if (index >= num_valid)
2043                 return NULL;
2044
2045         offp = start + index;
2046         buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2047         if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2048                 return NULL;
2049
2050         return buffer_obj;
2051 }
2052
2053 /**
2054  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2055  * @b:                  transaction buffer
2056  * @objects_start       start of objects buffer
2057  * @buffer:             binder_buffer_object in which to fix up
2058  * @offset:             start offset in @buffer to fix up
2059  * @last_obj:           last binder_buffer_object that we fixed up in
2060  * @last_min_offset:    minimum fixup offset in @last_obj
2061  *
2062  * Return:              %true if a fixup in buffer @buffer at offset @offset is
2063  *                      allowed.
2064  *
2065  * For safety reasons, we only allow fixups inside a buffer to happen
2066  * at increasing offsets; additionally, we only allow fixup on the last
2067  * buffer object that was verified, or one of its parents.
2068  *
2069  * Example of what is allowed:
2070  *
2071  * A
2072  *   B (parent = A, offset = 0)
2073  *   C (parent = A, offset = 16)
2074  *     D (parent = C, offset = 0)
2075  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2076  *
2077  * Examples of what is not allowed:
2078  *
2079  * Decreasing offsets within the same parent:
2080  * A
2081  *   C (parent = A, offset = 16)
2082  *   B (parent = A, offset = 0) // decreasing offset within A
2083  *
2084  * Referring to a parent that wasn't the last object or any of its parents:
2085  * A
2086  *   B (parent = A, offset = 0)
2087  *   C (parent = A, offset = 0)
2088  *   C (parent = A, offset = 16)
2089  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2090  */
2091 static bool binder_validate_fixup(struct binder_buffer *b,
2092                                   binder_size_t *objects_start,
2093                                   struct binder_buffer_object *buffer,
2094                                   binder_size_t fixup_offset,
2095                                   struct binder_buffer_object *last_obj,
2096                                   binder_size_t last_min_offset)
2097 {
2098         if (!last_obj) {
2099                 /* Nothing to fix up in */
2100                 return false;
2101         }
2102
2103         while (last_obj != buffer) {
2104                 /*
2105                  * Safe to retrieve the parent of last_obj, since it
2106                  * was already previously verified by the driver.
2107                  */
2108                 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2109                         return false;
2110                 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2111                 last_obj = (struct binder_buffer_object *)
2112                         (b->data + *(objects_start + last_obj->parent));
2113         }
2114         return (fixup_offset >= last_min_offset);
2115 }
2116
2117 static void binder_transaction_buffer_release(struct binder_proc *proc,
2118                                               struct binder_buffer *buffer,
2119                                               binder_size_t *failed_at)
2120 {
2121         binder_size_t *offp, *off_start, *off_end;
2122         int debug_id = buffer->debug_id;
2123
2124         binder_debug(BINDER_DEBUG_TRANSACTION,
2125                      "%d buffer release %d, size %zd-%zd, failed at %p\n",
2126                      proc->pid, buffer->debug_id,
2127                      buffer->data_size, buffer->offsets_size, failed_at);
2128
2129         if (buffer->target_node)
2130                 binder_dec_node(buffer->target_node, 1, 0);
2131
2132         off_start = (binder_size_t *)(buffer->data +
2133                                       ALIGN(buffer->data_size, sizeof(void *)));
2134         if (failed_at)
2135                 off_end = failed_at;
2136         else
2137                 off_end = (void *)off_start + buffer->offsets_size;
2138         for (offp = off_start; offp < off_end; offp++) {
2139                 struct binder_object_header *hdr;
2140                 size_t object_size = binder_validate_object(buffer, *offp);
2141
2142                 if (object_size == 0) {
2143                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2144                                debug_id, (u64)*offp, buffer->data_size);
2145                         continue;
2146                 }
2147                 hdr = (struct binder_object_header *)(buffer->data + *offp);
2148                 switch (hdr->type) {
2149                 case BINDER_TYPE_BINDER:
2150                 case BINDER_TYPE_WEAK_BINDER: {
2151                         struct flat_binder_object *fp;
2152                         struct binder_node *node;
2153
2154                         fp = to_flat_binder_object(hdr);
2155                         node = binder_get_node(proc, fp->binder);
2156                         if (node == NULL) {
2157                                 pr_err("transaction release %d bad node %016llx\n",
2158                                        debug_id, (u64)fp->binder);
2159                                 break;
2160                         }
2161                         binder_debug(BINDER_DEBUG_TRANSACTION,
2162                                      "        node %d u%016llx\n",
2163                                      node->debug_id, (u64)node->ptr);
2164                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2165                                         0);
2166                         binder_put_node(node);
2167                 } break;
2168                 case BINDER_TYPE_HANDLE:
2169                 case BINDER_TYPE_WEAK_HANDLE: {
2170                         struct flat_binder_object *fp;
2171                         struct binder_ref_data rdata;
2172                         int ret;
2173
2174                         fp = to_flat_binder_object(hdr);
2175                         ret = binder_dec_ref_for_handle(proc, fp->handle,
2176                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2177
2178                         if (ret) {
2179                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
2180                                  debug_id, fp->handle, ret);
2181                                 break;
2182                         }
2183                         binder_debug(BINDER_DEBUG_TRANSACTION,
2184                                      "        ref %d desc %d\n",
2185                                      rdata.debug_id, rdata.desc);
2186                 } break;
2187
2188                 case BINDER_TYPE_FD: {
2189                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2190
2191                         binder_debug(BINDER_DEBUG_TRANSACTION,
2192                                      "        fd %d\n", fp->fd);
2193                         if (failed_at)
2194                                 task_close_fd(proc, fp->fd);
2195                 } break;
2196                 case BINDER_TYPE_PTR:
2197                         /*
2198                          * Nothing to do here, this will get cleaned up when the
2199                          * transaction buffer gets freed
2200                          */
2201                         break;
2202                 case BINDER_TYPE_FDA: {
2203                         struct binder_fd_array_object *fda;
2204                         struct binder_buffer_object *parent;
2205                         uintptr_t parent_buffer;
2206                         u32 *fd_array;
2207                         size_t fd_index;
2208                         binder_size_t fd_buf_size;
2209
2210                         fda = to_binder_fd_array_object(hdr);
2211                         parent = binder_validate_ptr(buffer, fda->parent,
2212                                                      off_start,
2213                                                      offp - off_start);
2214                         if (!parent) {
2215                                 pr_err("transaction release %d bad parent offset\n",
2216                                        debug_id);
2217                                 continue;
2218                         }
2219                         /*
2220                          * Since the parent was already fixed up, convert it
2221                          * back to kernel address space to access it
2222                          */
2223                         parent_buffer = parent->buffer -
2224                                 binder_alloc_get_user_buffer_offset(
2225                                                 &proc->alloc);
2226
2227                         fd_buf_size = sizeof(u32) * fda->num_fds;
2228                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2229                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2230                                        debug_id, (u64)fda->num_fds);
2231                                 continue;
2232                         }
2233                         if (fd_buf_size > parent->length ||
2234                             fda->parent_offset > parent->length - fd_buf_size) {
2235                                 /* No space for all file descriptors here. */
2236                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2237                                        debug_id, (u64)fda->num_fds);
2238                                 continue;
2239                         }
2240                         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2241                         for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2242                                 task_close_fd(proc, fd_array[fd_index]);
2243                 } break;
2244                 default:
2245                         pr_err("transaction release %d bad object type %x\n",
2246                                 debug_id, hdr->type);
2247                         break;
2248                 }
2249         }
2250 }
2251
2252 static int binder_translate_binder(struct flat_binder_object *fp,
2253                                    struct binder_transaction *t,
2254                                    struct binder_thread *thread)
2255 {
2256         struct binder_node *node;
2257         struct binder_proc *proc = thread->proc;
2258         struct binder_proc *target_proc = t->to_proc;
2259         struct binder_ref_data rdata;
2260         int ret = 0;
2261
2262         node = binder_get_node(proc, fp->binder);
2263         if (!node) {
2264                 node = binder_new_node(proc, fp);
2265                 if (!node)
2266                         return -ENOMEM;
2267         }
2268         if (fp->cookie != node->cookie) {
2269                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2270                                   proc->pid, thread->pid, (u64)fp->binder,
2271                                   node->debug_id, (u64)fp->cookie,
2272                                   (u64)node->cookie);
2273                 ret = -EINVAL;
2274                 goto done;
2275         }
2276         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2277                 ret = -EPERM;
2278                 goto done;
2279         }
2280
2281         ret = binder_inc_ref_for_node(target_proc, node,
2282                         fp->hdr.type == BINDER_TYPE_BINDER,
2283                         &thread->todo, &rdata);
2284         if (ret)
2285                 goto done;
2286
2287         if (fp->hdr.type == BINDER_TYPE_BINDER)
2288                 fp->hdr.type = BINDER_TYPE_HANDLE;
2289         else
2290                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2291         fp->binder = 0;
2292         fp->handle = rdata.desc;
2293         fp->cookie = 0;
2294
2295         trace_binder_transaction_node_to_ref(t, node, &rdata);
2296         binder_debug(BINDER_DEBUG_TRANSACTION,
2297                      "        node %d u%016llx -> ref %d desc %d\n",
2298                      node->debug_id, (u64)node->ptr,
2299                      rdata.debug_id, rdata.desc);
2300 done:
2301         binder_put_node(node);
2302         return ret;
2303 }
2304
2305 static int binder_translate_handle(struct flat_binder_object *fp,
2306                                    struct binder_transaction *t,
2307                                    struct binder_thread *thread)
2308 {
2309         struct binder_proc *proc = thread->proc;
2310         struct binder_proc *target_proc = t->to_proc;
2311         struct binder_node *node;
2312         struct binder_ref_data src_rdata;
2313         int ret = 0;
2314
2315         node = binder_get_node_from_ref(proc, fp->handle,
2316                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2317         if (!node) {
2318                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2319                                   proc->pid, thread->pid, fp->handle);
2320                 return -EINVAL;
2321         }
2322         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2323                 ret = -EPERM;
2324                 goto done;
2325         }
2326
2327         binder_node_lock(node);
2328         if (node->proc == target_proc) {
2329                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2330                         fp->hdr.type = BINDER_TYPE_BINDER;
2331                 else
2332                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2333                 fp->binder = node->ptr;
2334                 fp->cookie = node->cookie;
2335                 if (node->proc)
2336                         binder_inner_proc_lock(node->proc);
2337                 binder_inc_node_nilocked(node,
2338                                          fp->hdr.type == BINDER_TYPE_BINDER,
2339                                          0, NULL);
2340                 if (node->proc)
2341                         binder_inner_proc_unlock(node->proc);
2342                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2343                 binder_debug(BINDER_DEBUG_TRANSACTION,
2344                              "        ref %d desc %d -> node %d u%016llx\n",
2345                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2346                              (u64)node->ptr);
2347                 binder_node_unlock(node);
2348         } else {
2349                 struct binder_ref_data dest_rdata;
2350
2351                 binder_node_unlock(node);
2352                 ret = binder_inc_ref_for_node(target_proc, node,
2353                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2354                                 NULL, &dest_rdata);
2355                 if (ret)
2356                         goto done;
2357
2358                 fp->binder = 0;
2359                 fp->handle = dest_rdata.desc;
2360                 fp->cookie = 0;
2361                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2362                                                     &dest_rdata);
2363                 binder_debug(BINDER_DEBUG_TRANSACTION,
2364                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2365                              src_rdata.debug_id, src_rdata.desc,
2366                              dest_rdata.debug_id, dest_rdata.desc,
2367                              node->debug_id);
2368         }
2369 done:
2370         binder_put_node(node);
2371         return ret;
2372 }
2373
2374 static int binder_translate_fd(int fd,
2375                                struct binder_transaction *t,
2376                                struct binder_thread *thread,
2377                                struct binder_transaction *in_reply_to)
2378 {
2379         struct binder_proc *proc = thread->proc;
2380         struct binder_proc *target_proc = t->to_proc;
2381         int target_fd;
2382         struct file *file;
2383         int ret;
2384         bool target_allows_fd;
2385
2386         if (in_reply_to)
2387                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2388         else
2389                 target_allows_fd = t->buffer->target_node->accept_fds;
2390         if (!target_allows_fd) {
2391                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2392                                   proc->pid, thread->pid,
2393                                   in_reply_to ? "reply" : "transaction",
2394                                   fd);
2395                 ret = -EPERM;
2396                 goto err_fd_not_accepted;
2397         }
2398
2399         file = fget(fd);
2400         if (!file) {
2401                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2402                                   proc->pid, thread->pid, fd);
2403                 ret = -EBADF;
2404                 goto err_fget;
2405         }
2406         ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2407         if (ret < 0) {
2408                 ret = -EPERM;
2409                 goto err_security;
2410         }
2411
2412         target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2413         if (target_fd < 0) {
2414                 ret = -ENOMEM;
2415                 goto err_get_unused_fd;
2416         }
2417         task_fd_install(target_proc, target_fd, file);
2418         trace_binder_transaction_fd(t, fd, target_fd);
2419         binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
2420                      fd, target_fd);
2421
2422         return target_fd;
2423
2424 err_get_unused_fd:
2425 err_security:
2426         fput(file);
2427 err_fget:
2428 err_fd_not_accepted:
2429         return ret;
2430 }
2431
2432 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2433                                      struct binder_buffer_object *parent,
2434                                      struct binder_transaction *t,
2435                                      struct binder_thread *thread,
2436                                      struct binder_transaction *in_reply_to)
2437 {
2438         binder_size_t fdi, fd_buf_size, num_installed_fds;
2439         int target_fd;
2440         uintptr_t parent_buffer;
2441         u32 *fd_array;
2442         struct binder_proc *proc = thread->proc;
2443         struct binder_proc *target_proc = t->to_proc;
2444
2445         fd_buf_size = sizeof(u32) * fda->num_fds;
2446         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2447                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2448                                   proc->pid, thread->pid, (u64)fda->num_fds);
2449                 return -EINVAL;
2450         }
2451         if (fd_buf_size > parent->length ||
2452             fda->parent_offset > parent->length - fd_buf_size) {
2453                 /* No space for all file descriptors here. */
2454                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2455                                   proc->pid, thread->pid, (u64)fda->num_fds);
2456                 return -EINVAL;
2457         }
2458         /*
2459          * Since the parent was already fixed up, convert it
2460          * back to the kernel address space to access it
2461          */
2462         parent_buffer = parent->buffer -
2463                 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2464         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2465         if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2466                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2467                                   proc->pid, thread->pid);
2468                 return -EINVAL;
2469         }
2470         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2471                 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2472                                                 in_reply_to);
2473                 if (target_fd < 0)
2474                         goto err_translate_fd_failed;
2475                 fd_array[fdi] = target_fd;
2476         }
2477         return 0;
2478
2479 err_translate_fd_failed:
2480         /*
2481          * Failed to allocate fd or security error, free fds
2482          * installed so far.
2483          */
2484         num_installed_fds = fdi;
2485         for (fdi = 0; fdi < num_installed_fds; fdi++)
2486                 task_close_fd(target_proc, fd_array[fdi]);
2487         return target_fd;
2488 }
2489
2490 static int binder_fixup_parent(struct binder_transaction *t,
2491                                struct binder_thread *thread,
2492                                struct binder_buffer_object *bp,
2493                                binder_size_t *off_start,
2494                                binder_size_t num_valid,
2495                                struct binder_buffer_object *last_fixup_obj,
2496                                binder_size_t last_fixup_min_off)
2497 {
2498         struct binder_buffer_object *parent;
2499         u8 *parent_buffer;
2500         struct binder_buffer *b = t->buffer;
2501         struct binder_proc *proc = thread->proc;
2502         struct binder_proc *target_proc = t->to_proc;
2503
2504         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2505                 return 0;
2506
2507         parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2508         if (!parent) {
2509                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2510                                   proc->pid, thread->pid);
2511                 return -EINVAL;
2512         }
2513
2514         if (!binder_validate_fixup(b, off_start,
2515                                    parent, bp->parent_offset,
2516                                    last_fixup_obj,
2517                                    last_fixup_min_off)) {
2518                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2519                                   proc->pid, thread->pid);
2520                 return -EINVAL;
2521         }
2522
2523         if (parent->length < sizeof(binder_uintptr_t) ||
2524             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2525                 /* No space for a pointer here! */
2526                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2527                                   proc->pid, thread->pid);
2528                 return -EINVAL;
2529         }
2530         parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2531                         binder_alloc_get_user_buffer_offset(
2532                                 &target_proc->alloc));
2533         *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2534
2535         return 0;
2536 }
2537
2538 /**
2539  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2540  * @t:          transaction to send
2541  * @proc:       process to send the transaction to
2542  * @thread:     thread in @proc to send the transaction to (may be NULL)
2543  *
2544  * This function queues a transaction to the specified process. It will try
2545  * to find a thread in the target process to handle the transaction and
2546  * wake it up. If no thread is found, the work is queued to the proc
2547  * waitqueue.
2548  *
2549  * If the @thread parameter is not NULL, the transaction is always queued
2550  * to the waitlist of that specific thread.
2551  *
2552  * Return:      true if the transactions was successfully queued
2553  *              false if the target process or thread is dead
2554  */
2555 static bool binder_proc_transaction(struct binder_transaction *t,
2556                                     struct binder_proc *proc,
2557                                     struct binder_thread *thread)
2558 {
2559         struct list_head *target_list = NULL;
2560         struct binder_node *node = t->buffer->target_node;
2561         bool oneway = !!(t->flags & TF_ONE_WAY);
2562         bool wakeup = true;
2563
2564         BUG_ON(!node);
2565         binder_node_lock(node);
2566         if (oneway) {
2567                 BUG_ON(thread);
2568                 if (node->has_async_transaction) {
2569                         target_list = &node->async_todo;
2570                         wakeup = false;
2571                 } else {
2572                         node->has_async_transaction = 1;
2573                 }
2574         }
2575
2576         binder_inner_proc_lock(proc);
2577
2578         if (proc->is_dead || (thread && thread->is_dead)) {
2579                 binder_inner_proc_unlock(proc);
2580                 binder_node_unlock(node);
2581                 return false;
2582         }
2583
2584         if (!thread && !target_list)
2585                 thread = binder_select_thread_ilocked(proc);
2586
2587         if (thread)
2588                 target_list = &thread->todo;
2589         else if (!target_list)
2590                 target_list = &proc->todo;
2591         else
2592                 BUG_ON(target_list != &node->async_todo);
2593
2594         binder_enqueue_work_ilocked(&t->work, target_list);
2595
2596         if (wakeup)
2597                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2598
2599         binder_inner_proc_unlock(proc);
2600         binder_node_unlock(node);
2601
2602         return true;
2603 }
2604
2605 /**
2606  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2607  * @node:         struct binder_node for which to get refs
2608  * @proc:         returns @node->proc if valid
2609  * @error:        if no @proc then returns BR_DEAD_REPLY
2610  *
2611  * User-space normally keeps the node alive when creating a transaction
2612  * since it has a reference to the target. The local strong ref keeps it
2613  * alive if the sending process dies before the target process processes
2614  * the transaction. If the source process is malicious or has a reference
2615  * counting bug, relying on the local strong ref can fail.
2616  *
2617  * Since user-space can cause the local strong ref to go away, we also take
2618  * a tmpref on the node to ensure it survives while we are constructing
2619  * the transaction. We also need a tmpref on the proc while we are
2620  * constructing the transaction, so we take that here as well.
2621  *
2622  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2623  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2624  * target proc has died, @error is set to BR_DEAD_REPLY
2625  */
2626 static struct binder_node *binder_get_node_refs_for_txn(
2627                 struct binder_node *node,
2628                 struct binder_proc **procp,
2629                 uint32_t *error)
2630 {
2631         struct binder_node *target_node = NULL;
2632
2633         binder_node_inner_lock(node);
2634         if (node->proc) {
2635                 target_node = node;
2636                 binder_inc_node_nilocked(node, 1, 0, NULL);
2637                 binder_inc_node_tmpref_ilocked(node);
2638                 node->proc->tmp_ref++;
2639                 *procp = node->proc;
2640         } else
2641                 *error = BR_DEAD_REPLY;
2642         binder_node_inner_unlock(node);
2643
2644         return target_node;
2645 }
2646
2647 static void binder_transaction(struct binder_proc *proc,
2648                                struct binder_thread *thread,
2649                                struct binder_transaction_data *tr, int reply,
2650                                binder_size_t extra_buffers_size)
2651 {
2652         int ret;
2653         struct binder_transaction *t;
2654         struct binder_work *tcomplete;
2655         binder_size_t *offp, *off_end, *off_start;
2656         binder_size_t off_min;
2657         u8 *sg_bufp, *sg_buf_end;
2658         struct binder_proc *target_proc = NULL;
2659         struct binder_thread *target_thread = NULL;
2660         struct binder_node *target_node = NULL;
2661         struct binder_transaction *in_reply_to = NULL;
2662         struct binder_transaction_log_entry *e;
2663         uint32_t return_error = 0;
2664         uint32_t return_error_param = 0;
2665         uint32_t return_error_line = 0;
2666         struct binder_buffer_object *last_fixup_obj = NULL;
2667         binder_size_t last_fixup_min_off = 0;
2668         struct binder_context *context = proc->context;
2669         int t_debug_id = atomic_inc_return(&binder_last_id);
2670
2671         e = binder_transaction_log_add(&binder_transaction_log);
2672         e->debug_id = t_debug_id;
2673         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2674         e->from_proc = proc->pid;
2675         e->from_thread = thread->pid;
2676         e->target_handle = tr->target.handle;
2677         e->data_size = tr->data_size;
2678         e->offsets_size = tr->offsets_size;
2679         e->context_name = proc->context->name;
2680
2681         if (reply) {
2682                 binder_inner_proc_lock(proc);
2683                 in_reply_to = thread->transaction_stack;
2684                 if (in_reply_to == NULL) {
2685                         binder_inner_proc_unlock(proc);
2686                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2687                                           proc->pid, thread->pid);
2688                         return_error = BR_FAILED_REPLY;
2689                         return_error_param = -EPROTO;
2690                         return_error_line = __LINE__;
2691                         goto err_empty_call_stack;
2692                 }
2693                 if (in_reply_to->to_thread != thread) {
2694                         spin_lock(&in_reply_to->lock);
2695                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2696                                 proc->pid, thread->pid, in_reply_to->debug_id,
2697                                 in_reply_to->to_proc ?
2698                                 in_reply_to->to_proc->pid : 0,
2699                                 in_reply_to->to_thread ?
2700                                 in_reply_to->to_thread->pid : 0);
2701                         spin_unlock(&in_reply_to->lock);
2702                         binder_inner_proc_unlock(proc);
2703                         return_error = BR_FAILED_REPLY;
2704                         return_error_param = -EPROTO;
2705                         return_error_line = __LINE__;
2706                         in_reply_to = NULL;
2707                         goto err_bad_call_stack;
2708                 }
2709                 thread->transaction_stack = in_reply_to->to_parent;
2710                 binder_inner_proc_unlock(proc);
2711                 binder_set_nice(in_reply_to->saved_priority);
2712                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2713                 if (target_thread == NULL) {
2714                         return_error = BR_DEAD_REPLY;
2715                         return_error_line = __LINE__;
2716                         goto err_dead_binder;
2717                 }
2718                 if (target_thread->transaction_stack != in_reply_to) {
2719                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2720                                 proc->pid, thread->pid,
2721                                 target_thread->transaction_stack ?
2722                                 target_thread->transaction_stack->debug_id : 0,
2723                                 in_reply_to->debug_id);
2724                         binder_inner_proc_unlock(target_thread->proc);
2725                         return_error = BR_FAILED_REPLY;
2726                         return_error_param = -EPROTO;
2727                         return_error_line = __LINE__;
2728                         in_reply_to = NULL;
2729                         target_thread = NULL;
2730                         goto err_dead_binder;
2731                 }
2732                 target_proc = target_thread->proc;
2733                 target_proc->tmp_ref++;
2734                 binder_inner_proc_unlock(target_thread->proc);
2735         } else {
2736                 if (tr->target.handle) {
2737                         struct binder_ref *ref;
2738
2739                         /*
2740                          * There must already be a strong ref
2741                          * on this node. If so, do a strong
2742                          * increment on the node to ensure it
2743                          * stays alive until the transaction is
2744                          * done.
2745                          */
2746                         binder_proc_lock(proc);
2747                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2748                                                      true);
2749                         if (ref) {
2750                                 target_node = binder_get_node_refs_for_txn(
2751                                                 ref->node, &target_proc,
2752                                                 &return_error);
2753                         } else {
2754                                 binder_user_error("%d:%d got transaction to invalid handle\n",
2755                                                   proc->pid, thread->pid);
2756                                 return_error = BR_FAILED_REPLY;
2757                         }
2758                         binder_proc_unlock(proc);
2759                 } else {
2760                         mutex_lock(&context->context_mgr_node_lock);
2761                         target_node = context->binder_context_mgr_node;
2762                         if (target_node)
2763                                 target_node = binder_get_node_refs_for_txn(
2764                                                 target_node, &target_proc,
2765                                                 &return_error);
2766                         else
2767                                 return_error = BR_DEAD_REPLY;
2768                         mutex_unlock(&context->context_mgr_node_lock);
2769                 }
2770                 if (!target_node) {
2771                         /*
2772                          * return_error is set above
2773                          */
2774                         return_error_param = -EINVAL;
2775                         return_error_line = __LINE__;
2776                         goto err_dead_binder;
2777                 }
2778                 e->to_node = target_node->debug_id;
2779                 if (security_binder_transaction(proc->tsk,
2780                                                 target_proc->tsk) < 0) {
2781                         return_error = BR_FAILED_REPLY;
2782                         return_error_param = -EPERM;
2783                         return_error_line = __LINE__;
2784                         goto err_invalid_target_handle;
2785                 }
2786                 binder_inner_proc_lock(proc);
2787                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2788                         struct binder_transaction *tmp;
2789
2790                         tmp = thread->transaction_stack;
2791                         if (tmp->to_thread != thread) {
2792                                 spin_lock(&tmp->lock);
2793                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2794                                         proc->pid, thread->pid, tmp->debug_id,
2795                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2796                                         tmp->to_thread ?
2797                                         tmp->to_thread->pid : 0);
2798                                 spin_unlock(&tmp->lock);
2799                                 binder_inner_proc_unlock(proc);
2800                                 return_error = BR_FAILED_REPLY;
2801                                 return_error_param = -EPROTO;
2802                                 return_error_line = __LINE__;
2803                                 goto err_bad_call_stack;
2804                         }
2805                         while (tmp) {
2806                                 struct binder_thread *from;
2807
2808                                 spin_lock(&tmp->lock);
2809                                 from = tmp->from;
2810                                 if (from && from->proc == target_proc) {
2811                                         atomic_inc(&from->tmp_ref);
2812                                         target_thread = from;
2813                                         spin_unlock(&tmp->lock);
2814                                         break;
2815                                 }
2816                                 spin_unlock(&tmp->lock);
2817                                 tmp = tmp->from_parent;
2818                         }
2819                 }
2820                 binder_inner_proc_unlock(proc);
2821         }
2822         if (target_thread)
2823                 e->to_thread = target_thread->pid;
2824         e->to_proc = target_proc->pid;
2825
2826         /* TODO: reuse incoming transaction for reply */
2827         t = kzalloc(sizeof(*t), GFP_KERNEL);
2828         if (t == NULL) {
2829                 return_error = BR_FAILED_REPLY;
2830                 return_error_param = -ENOMEM;
2831                 return_error_line = __LINE__;
2832                 goto err_alloc_t_failed;
2833         }
2834         binder_stats_created(BINDER_STAT_TRANSACTION);
2835         spin_lock_init(&t->lock);
2836
2837         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2838         if (tcomplete == NULL) {
2839                 return_error = BR_FAILED_REPLY;
2840                 return_error_param = -ENOMEM;
2841                 return_error_line = __LINE__;
2842                 goto err_alloc_tcomplete_failed;
2843         }
2844         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2845
2846         t->debug_id = t_debug_id;
2847
2848         if (reply)
2849                 binder_debug(BINDER_DEBUG_TRANSACTION,
2850                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2851                              proc->pid, thread->pid, t->debug_id,
2852                              target_proc->pid, target_thread->pid,
2853                              (u64)tr->data.ptr.buffer,
2854                              (u64)tr->data.ptr.offsets,
2855                              (u64)tr->data_size, (u64)tr->offsets_size,
2856                              (u64)extra_buffers_size);
2857         else
2858                 binder_debug(BINDER_DEBUG_TRANSACTION,
2859                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2860                              proc->pid, thread->pid, t->debug_id,
2861                              target_proc->pid, target_node->debug_id,
2862                              (u64)tr->data.ptr.buffer,
2863                              (u64)tr->data.ptr.offsets,
2864                              (u64)tr->data_size, (u64)tr->offsets_size,
2865                              (u64)extra_buffers_size);
2866
2867         if (!reply && !(tr->flags & TF_ONE_WAY))
2868                 t->from = thread;
2869         else
2870                 t->from = NULL;
2871         t->sender_euid = task_euid(proc->tsk);
2872         t->to_proc = target_proc;
2873         t->to_thread = target_thread;
2874         t->code = tr->code;
2875         t->flags = tr->flags;
2876         t->priority = task_nice(current);
2877
2878         trace_binder_transaction(reply, t, target_node);
2879
2880         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2881                 tr->offsets_size, extra_buffers_size,
2882                 !reply && (t->flags & TF_ONE_WAY));
2883         if (IS_ERR(t->buffer)) {
2884                 /*
2885                  * -ESRCH indicates VMA cleared. The target is dying.
2886                  */
2887                 return_error_param = PTR_ERR(t->buffer);
2888                 return_error = return_error_param == -ESRCH ?
2889                         BR_DEAD_REPLY : BR_FAILED_REPLY;
2890                 return_error_line = __LINE__;
2891                 t->buffer = NULL;
2892                 goto err_binder_alloc_buf_failed;
2893         }
2894         t->buffer->allow_user_free = 0;
2895         t->buffer->debug_id = t->debug_id;
2896         t->buffer->transaction = t;
2897         t->buffer->target_node = target_node;
2898         trace_binder_transaction_alloc_buf(t->buffer);
2899         off_start = (binder_size_t *)(t->buffer->data +
2900                                       ALIGN(tr->data_size, sizeof(void *)));
2901         offp = off_start;
2902
2903         if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2904                            tr->data.ptr.buffer, tr->data_size)) {
2905                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2906                                 proc->pid, thread->pid);
2907                 return_error = BR_FAILED_REPLY;
2908                 return_error_param = -EFAULT;
2909                 return_error_line = __LINE__;
2910                 goto err_copy_data_failed;
2911         }
2912         if (copy_from_user(offp, (const void __user *)(uintptr_t)
2913                            tr->data.ptr.offsets, tr->offsets_size)) {
2914                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2915                                 proc->pid, thread->pid);
2916                 return_error = BR_FAILED_REPLY;
2917                 return_error_param = -EFAULT;
2918                 return_error_line = __LINE__;
2919                 goto err_copy_data_failed;
2920         }
2921         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2922                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2923                                 proc->pid, thread->pid, (u64)tr->offsets_size);
2924                 return_error = BR_FAILED_REPLY;
2925                 return_error_param = -EINVAL;
2926                 return_error_line = __LINE__;
2927                 goto err_bad_offset;
2928         }
2929         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2930                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2931                                   proc->pid, thread->pid,
2932                                   (u64)extra_buffers_size);
2933                 return_error = BR_FAILED_REPLY;
2934                 return_error_param = -EINVAL;
2935                 return_error_line = __LINE__;
2936                 goto err_bad_offset;
2937         }
2938         off_end = (void *)off_start + tr->offsets_size;
2939         sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2940         sg_buf_end = sg_bufp + extra_buffers_size;
2941         off_min = 0;
2942         for (; offp < off_end; offp++) {
2943                 struct binder_object_header *hdr;
2944                 size_t object_size = binder_validate_object(t->buffer, *offp);
2945
2946                 if (object_size == 0 || *offp < off_min) {
2947                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2948                                           proc->pid, thread->pid, (u64)*offp,
2949                                           (u64)off_min,
2950                                           (u64)t->buffer->data_size);
2951                         return_error = BR_FAILED_REPLY;
2952                         return_error_param = -EINVAL;
2953                         return_error_line = __LINE__;
2954                         goto err_bad_offset;
2955                 }
2956
2957                 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2958                 off_min = *offp + object_size;
2959                 switch (hdr->type) {
2960                 case BINDER_TYPE_BINDER:
2961                 case BINDER_TYPE_WEAK_BINDER: {
2962                         struct flat_binder_object *fp;
2963
2964                         fp = to_flat_binder_object(hdr);
2965                         ret = binder_translate_binder(fp, t, thread);
2966                         if (ret < 0) {
2967                                 return_error = BR_FAILED_REPLY;
2968                                 return_error_param = ret;
2969                                 return_error_line = __LINE__;
2970                                 goto err_translate_failed;
2971                         }
2972                 } break;
2973                 case BINDER_TYPE_HANDLE:
2974                 case BINDER_TYPE_WEAK_HANDLE: {
2975                         struct flat_binder_object *fp;
2976
2977                         fp = to_flat_binder_object(hdr);
2978                         ret = binder_translate_handle(fp, t, thread);
2979                         if (ret < 0) {
2980                                 return_error = BR_FAILED_REPLY;
2981                                 return_error_param = ret;
2982                                 return_error_line = __LINE__;
2983                                 goto err_translate_failed;
2984                         }
2985                 } break;
2986
2987                 case BINDER_TYPE_FD: {
2988                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2989                         int target_fd = binder_translate_fd(fp->fd, t, thread,
2990                                                             in_reply_to);
2991
2992                         if (target_fd < 0) {
2993                                 return_error = BR_FAILED_REPLY;
2994                                 return_error_param = target_fd;
2995                                 return_error_line = __LINE__;
2996                                 goto err_translate_failed;
2997                         }
2998                         fp->pad_binder = 0;
2999                         fp->fd = target_fd;
3000                 } break;
3001                 case BINDER_TYPE_FDA: {
3002                         struct binder_fd_array_object *fda =
3003                                 to_binder_fd_array_object(hdr);
3004                         struct binder_buffer_object *parent =
3005                                 binder_validate_ptr(t->buffer, fda->parent,
3006                                                     off_start,
3007                                                     offp - off_start);
3008                         if (!parent) {
3009                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3010                                                   proc->pid, thread->pid);
3011                                 return_error = BR_FAILED_REPLY;
3012                                 return_error_param = -EINVAL;
3013                                 return_error_line = __LINE__;
3014                                 goto err_bad_parent;
3015                         }
3016                         if (!binder_validate_fixup(t->buffer, off_start,
3017                                                    parent, fda->parent_offset,
3018                                                    last_fixup_obj,
3019                                                    last_fixup_min_off)) {
3020                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3021                                                   proc->pid, thread->pid);
3022                                 return_error = BR_FAILED_REPLY;
3023                                 return_error_param = -EINVAL;
3024                                 return_error_line = __LINE__;
3025                                 goto err_bad_parent;
3026                         }
3027                         ret = binder_translate_fd_array(fda, parent, t, thread,
3028                                                         in_reply_to);
3029                         if (ret < 0) {
3030                                 return_error = BR_FAILED_REPLY;
3031                                 return_error_param = ret;
3032                                 return_error_line = __LINE__;
3033                                 goto err_translate_failed;
3034                         }
3035                         last_fixup_obj = parent;
3036                         last_fixup_min_off =
3037                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3038                 } break;
3039                 case BINDER_TYPE_PTR: {
3040                         struct binder_buffer_object *bp =
3041                                 to_binder_buffer_object(hdr);
3042                         size_t buf_left = sg_buf_end - sg_bufp;
3043
3044                         if (bp->length > buf_left) {
3045                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3046                                                   proc->pid, thread->pid);
3047                                 return_error = BR_FAILED_REPLY;
3048                                 return_error_param = -EINVAL;
3049                                 return_error_line = __LINE__;
3050                                 goto err_bad_offset;
3051                         }
3052                         if (copy_from_user(sg_bufp,
3053                                            (const void __user *)(uintptr_t)
3054                                            bp->buffer, bp->length)) {
3055                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3056                                                   proc->pid, thread->pid);
3057                                 return_error_param = -EFAULT;
3058                                 return_error = BR_FAILED_REPLY;
3059                                 return_error_line = __LINE__;
3060                                 goto err_copy_data_failed;
3061                         }
3062                         /* Fixup buffer pointer to target proc address space */
3063                         bp->buffer = (uintptr_t)sg_bufp +
3064                                 binder_alloc_get_user_buffer_offset(
3065                                                 &target_proc->alloc);
3066                         sg_bufp += ALIGN(bp->length, sizeof(u64));
3067
3068                         ret = binder_fixup_parent(t, thread, bp, off_start,
3069                                                   offp - off_start,
3070                                                   last_fixup_obj,
3071                                                   last_fixup_min_off);
3072                         if (ret < 0) {
3073                                 return_error = BR_FAILED_REPLY;
3074                                 return_error_param = ret;
3075                                 return_error_line = __LINE__;
3076                                 goto err_translate_failed;
3077                         }
3078                         last_fixup_obj = bp;
3079                         last_fixup_min_off = 0;
3080                 } break;
3081                 default:
3082                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3083                                 proc->pid, thread->pid, hdr->type);
3084                         return_error = BR_FAILED_REPLY;
3085                         return_error_param = -EINVAL;
3086                         return_error_line = __LINE__;
3087                         goto err_bad_object_type;
3088                 }
3089         }
3090         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3091         binder_enqueue_work(proc, tcomplete, &thread->todo);
3092         t->work.type = BINDER_WORK_TRANSACTION;
3093
3094         if (reply) {
3095                 binder_inner_proc_lock(target_proc);
3096                 if (target_thread->is_dead) {
3097                         binder_inner_proc_unlock(target_proc);
3098                         goto err_dead_proc_or_thread;
3099                 }
3100                 BUG_ON(t->buffer->async_transaction != 0);
3101                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3102                 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
3103                 binder_inner_proc_unlock(target_proc);
3104                 wake_up_interruptible_sync(&target_thread->wait);
3105                 binder_free_transaction(in_reply_to);
3106         } else if (!(t->flags & TF_ONE_WAY)) {
3107                 BUG_ON(t->buffer->async_transaction != 0);
3108                 binder_inner_proc_lock(proc);
3109                 t->need_reply = 1;
3110                 t->from_parent = thread->transaction_stack;
3111                 thread->transaction_stack = t;
3112                 binder_inner_proc_unlock(proc);
3113                 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3114                         binder_inner_proc_lock(proc);
3115                         binder_pop_transaction_ilocked(thread, t);
3116                         binder_inner_proc_unlock(proc);
3117                         goto err_dead_proc_or_thread;
3118                 }
3119         } else {
3120                 BUG_ON(target_node == NULL);
3121                 BUG_ON(t->buffer->async_transaction != 1);
3122                 if (!binder_proc_transaction(t, target_proc, NULL))
3123                         goto err_dead_proc_or_thread;
3124         }
3125         if (target_thread)
3126                 binder_thread_dec_tmpref(target_thread);
3127         binder_proc_dec_tmpref(target_proc);
3128         if (target_node)
3129                 binder_dec_node_tmpref(target_node);
3130         /*
3131          * write barrier to synchronize with initialization
3132          * of log entry
3133          */
3134         smp_wmb();
3135         WRITE_ONCE(e->debug_id_done, t_debug_id);
3136         return;
3137
3138 err_dead_proc_or_thread:
3139         return_error = BR_DEAD_REPLY;
3140         return_error_line = __LINE__;
3141         binder_dequeue_work(proc, tcomplete);
3142 err_translate_failed:
3143 err_bad_object_type:
3144 err_bad_offset:
3145 err_bad_parent:
3146 err_copy_data_failed:
3147         trace_binder_transaction_failed_buffer_release(t->buffer);
3148         binder_transaction_buffer_release(target_proc, t->buffer, offp);
3149         if (target_node)
3150                 binder_dec_node_tmpref(target_node);
3151         target_node = NULL;
3152         t->buffer->transaction = NULL;
3153         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3154 err_binder_alloc_buf_failed:
3155         kfree(tcomplete);
3156         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3157 err_alloc_tcomplete_failed:
3158         kfree(t);
3159         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3160 err_alloc_t_failed:
3161 err_bad_call_stack:
3162 err_empty_call_stack:
3163 err_dead_binder:
3164 err_invalid_target_handle:
3165         if (target_thread)
3166                 binder_thread_dec_tmpref(target_thread);
3167         if (target_proc)
3168                 binder_proc_dec_tmpref(target_proc);
3169         if (target_node) {
3170                 binder_dec_node(target_node, 1, 0);
3171                 binder_dec_node_tmpref(target_node);
3172         }
3173
3174         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3175                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3176                      proc->pid, thread->pid, return_error, return_error_param,
3177                      (u64)tr->data_size, (u64)tr->offsets_size,
3178                      return_error_line);
3179
3180         {
3181                 struct binder_transaction_log_entry *fe;
3182
3183                 e->return_error = return_error;
3184                 e->return_error_param = return_error_param;
3185                 e->return_error_line = return_error_line;
3186                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3187                 *fe = *e;
3188                 /*
3189                  * write barrier to synchronize with initialization
3190                  * of log entry
3191                  */
3192                 smp_wmb();
3193                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3194                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3195         }
3196
3197         BUG_ON(thread->return_error.cmd != BR_OK);
3198         if (in_reply_to) {
3199                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3200                 binder_enqueue_work(thread->proc,
3201                                     &thread->return_error.work,
3202                                     &thread->todo);
3203                 binder_send_failed_reply(in_reply_to, return_error);
3204         } else {
3205                 thread->return_error.cmd = return_error;
3206                 binder_enqueue_work(thread->proc,
3207                                     &thread->return_error.work,
3208                                     &thread->todo);
3209         }
3210 }
3211
3212 static int binder_thread_write(struct binder_proc *proc,
3213                         struct binder_thread *thread,
3214                         binder_uintptr_t binder_buffer, size_t size,
3215                         binder_size_t *consumed)
3216 {
3217         uint32_t cmd;
3218         struct binder_context *context = proc->context;
3219         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3220         void __user *ptr = buffer + *consumed;
3221         void __user *end = buffer + size;
3222
3223         while (ptr < end && thread->return_error.cmd == BR_OK) {
3224                 int ret;
3225
3226                 if (get_user(cmd, (uint32_t __user *)ptr))
3227                         return -EFAULT;
3228                 ptr += sizeof(uint32_t);
3229                 trace_binder_command(cmd);
3230                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3231                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3232                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3233                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3234                 }
3235                 switch (cmd) {
3236                 case BC_INCREFS:
3237                 case BC_ACQUIRE:
3238                 case BC_RELEASE:
3239                 case BC_DECREFS: {
3240                         uint32_t target;
3241                         const char *debug_string;
3242                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3243                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3244                         struct binder_ref_data rdata;
3245
3246                         if (get_user(target, (uint32_t __user *)ptr))
3247                                 return -EFAULT;
3248
3249                         ptr += sizeof(uint32_t);
3250                         ret = -1;
3251                         if (increment && !target) {
3252                                 struct binder_node *ctx_mgr_node;
3253                                 mutex_lock(&context->context_mgr_node_lock);
3254                                 ctx_mgr_node = context->binder_context_mgr_node;
3255                                 if (ctx_mgr_node)
3256                                         ret = binder_inc_ref_for_node(
3257                                                         proc, ctx_mgr_node,
3258                                                         strong, NULL, &rdata);
3259                                 mutex_unlock(&context->context_mgr_node_lock);
3260                         }
3261                         if (ret)
3262                                 ret = binder_update_ref_for_handle(
3263                                                 proc, target, increment, strong,
3264                                                 &rdata);
3265                         if (!ret && rdata.desc != target) {
3266                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3267                                         proc->pid, thread->pid,
3268                                         target, rdata.desc);
3269                         }
3270                         switch (cmd) {
3271                         case BC_INCREFS:
3272                                 debug_string = "IncRefs";
3273                                 break;
3274                         case BC_ACQUIRE:
3275                                 debug_string = "Acquire";
3276                                 break;
3277                         case BC_RELEASE:
3278                                 debug_string = "Release";
3279                                 break;
3280                         case BC_DECREFS:
3281                         default:
3282                                 debug_string = "DecRefs";
3283                                 break;
3284                         }
3285                         if (ret) {
3286                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3287                                         proc->pid, thread->pid, debug_string,
3288                                         strong, target, ret);
3289                                 break;
3290                         }
3291                         binder_debug(BINDER_DEBUG_USER_REFS,
3292                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3293                                      proc->pid, thread->pid, debug_string,
3294                                      rdata.debug_id, rdata.desc, rdata.strong,
3295                                      rdata.weak);
3296                         break;
3297                 }
3298                 case BC_INCREFS_DONE:
3299                 case BC_ACQUIRE_DONE: {
3300                         binder_uintptr_t node_ptr;
3301                         binder_uintptr_t cookie;
3302                         struct binder_node *node;
3303                         bool free_node;
3304
3305                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3306                                 return -EFAULT;
3307                         ptr += sizeof(binder_uintptr_t);
3308                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3309                                 return -EFAULT;
3310                         ptr += sizeof(binder_uintptr_t);
3311                         node = binder_get_node(proc, node_ptr);
3312                         if (node == NULL) {
3313                                 binder_user_error("%d:%d %s u%016llx no match\n",
3314                                         proc->pid, thread->pid,
3315                                         cmd == BC_INCREFS_DONE ?
3316                                         "BC_INCREFS_DONE" :
3317                                         "BC_ACQUIRE_DONE",
3318                                         (u64)node_ptr);
3319                                 break;
3320                         }
3321                         if (cookie != node->cookie) {
3322                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3323                                         proc->pid, thread->pid,
3324                                         cmd == BC_INCREFS_DONE ?
3325                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3326                                         (u64)node_ptr, node->debug_id,
3327                                         (u64)cookie, (u64)node->cookie);
3328                                 binder_put_node(node);
3329                                 break;
3330                         }
3331                         binder_node_inner_lock(node);
3332                         if (cmd == BC_ACQUIRE_DONE) {
3333                                 if (node->pending_strong_ref == 0) {
3334                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3335                                                 proc->pid, thread->pid,
3336                                                 node->debug_id);
3337                                         binder_node_inner_unlock(node);
3338                                         binder_put_node(node);
3339                                         break;
3340                                 }
3341                                 node->pending_strong_ref = 0;
3342                         } else {
3343                                 if (node->pending_weak_ref == 0) {
3344                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3345                                                 proc->pid, thread->pid,
3346                                                 node->debug_id);
3347                                         binder_node_inner_unlock(node);
3348                                         binder_put_node(node);
3349                                         break;
3350                                 }
3351                                 node->pending_weak_ref = 0;
3352                         }
3353                         free_node = binder_dec_node_nilocked(node,
3354                                         cmd == BC_ACQUIRE_DONE, 0);
3355                         WARN_ON(free_node);
3356                         binder_debug(BINDER_DEBUG_USER_REFS,
3357                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3358                                      proc->pid, thread->pid,
3359                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3360                                      node->debug_id, node->local_strong_refs,
3361                                      node->local_weak_refs, node->tmp_refs);
3362                         binder_node_inner_unlock(node);
3363                         binder_put_node(node);
3364                         break;
3365                 }
3366                 case BC_ATTEMPT_ACQUIRE:
3367                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3368                         return -EINVAL;
3369                 case BC_ACQUIRE_RESULT:
3370                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3371                         return -EINVAL;
3372
3373                 case BC_FREE_BUFFER: {
3374                         binder_uintptr_t data_ptr;
3375                         struct binder_buffer *buffer;
3376
3377                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3378                                 return -EFAULT;
3379                         ptr += sizeof(binder_uintptr_t);
3380
3381                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3382                                                               data_ptr);
3383                         if (buffer == NULL) {
3384                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3385                                         proc->pid, thread->pid, (u64)data_ptr);
3386                                 break;
3387                         }
3388                         if (!buffer->allow_user_free) {
3389                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3390                                         proc->pid, thread->pid, (u64)data_ptr);
3391                                 break;
3392                         }
3393                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3394                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3395                                      proc->pid, thread->pid, (u64)data_ptr,
3396                                      buffer->debug_id,
3397                                      buffer->transaction ? "active" : "finished");
3398
3399                         if (buffer->transaction) {
3400                                 buffer->transaction->buffer = NULL;
3401                                 buffer->transaction = NULL;
3402                         }
3403                         if (buffer->async_transaction && buffer->target_node) {
3404                                 struct binder_node *buf_node;
3405                                 struct binder_work *w;
3406
3407                                 buf_node = buffer->target_node;
3408                                 binder_node_inner_lock(buf_node);
3409                                 BUG_ON(!buf_node->has_async_transaction);
3410                                 BUG_ON(buf_node->proc != proc);
3411                                 w = binder_dequeue_work_head_ilocked(
3412                                                 &buf_node->async_todo);
3413                                 if (!w) {
3414                                         buf_node->has_async_transaction = 0;
3415                                 } else {
3416                                         binder_enqueue_work_ilocked(
3417                                                         w, &proc->todo);
3418                                         binder_wakeup_proc_ilocked(proc);
3419                                 }
3420                                 binder_node_inner_unlock(buf_node);
3421                         }
3422                         trace_binder_transaction_buffer_release(buffer);
3423                         binder_transaction_buffer_release(proc, buffer, NULL);
3424                         binder_alloc_free_buf(&proc->alloc, buffer);
3425                         break;
3426                 }
3427
3428                 case BC_TRANSACTION_SG:
3429                 case BC_REPLY_SG: {
3430                         struct binder_transaction_data_sg tr;
3431
3432                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3433                                 return -EFAULT;
3434                         ptr += sizeof(tr);
3435                         binder_transaction(proc, thread, &tr.transaction_data,
3436                                            cmd == BC_REPLY_SG, tr.buffers_size);
3437                         break;
3438                 }
3439                 case BC_TRANSACTION:
3440                 case BC_REPLY: {
3441                         struct binder_transaction_data tr;
3442
3443                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3444                                 return -EFAULT;
3445                         ptr += sizeof(tr);
3446                         binder_transaction(proc, thread, &tr,
3447                                            cmd == BC_REPLY, 0);
3448                         break;
3449                 }
3450
3451                 case BC_REGISTER_LOOPER:
3452                         binder_debug(BINDER_DEBUG_THREADS,
3453                                      "%d:%d BC_REGISTER_LOOPER\n",
3454                                      proc->pid, thread->pid);
3455                         binder_inner_proc_lock(proc);
3456                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3457                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3458                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3459                                         proc->pid, thread->pid);
3460                         } else if (proc->requested_threads == 0) {
3461                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3462                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3463                                         proc->pid, thread->pid);
3464                         } else {
3465                                 proc->requested_threads--;
3466                                 proc->requested_threads_started++;
3467                         }
3468                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3469                         binder_inner_proc_unlock(proc);
3470                         break;
3471                 case BC_ENTER_LOOPER:
3472                         binder_debug(BINDER_DEBUG_THREADS,
3473                                      "%d:%d BC_ENTER_LOOPER\n",
3474                                      proc->pid, thread->pid);
3475                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3476                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3477                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3478                                         proc->pid, thread->pid);
3479                         }
3480                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3481                         break;
3482                 case BC_EXIT_LOOPER:
3483                         binder_debug(BINDER_DEBUG_THREADS,
3484                                      "%d:%d BC_EXIT_LOOPER\n",
3485                                      proc->pid, thread->pid);
3486                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3487                         break;
3488
3489                 case BC_REQUEST_DEATH_NOTIFICATION:
3490                 case BC_CLEAR_DEATH_NOTIFICATION: {
3491                         uint32_t target;
3492                         binder_uintptr_t cookie;
3493                         struct binder_ref *ref;
3494                         struct binder_ref_death *death = NULL;
3495
3496                         if (get_user(target, (uint32_t __user *)ptr))
3497                                 return -EFAULT;
3498                         ptr += sizeof(uint32_t);
3499                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3500                                 return -EFAULT;
3501                         ptr += sizeof(binder_uintptr_t);
3502                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3503                                 /*
3504                                  * Allocate memory for death notification
3505                                  * before taking lock
3506                                  */
3507                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3508                                 if (death == NULL) {
3509                                         WARN_ON(thread->return_error.cmd !=
3510                                                 BR_OK);
3511                                         thread->return_error.cmd = BR_ERROR;
3512                                         binder_enqueue_work(
3513                                                 thread->proc,
3514                                                 &thread->return_error.work,
3515                                                 &thread->todo);
3516                                         binder_debug(
3517                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3518                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3519                                                 proc->pid, thread->pid);
3520                                         break;
3521                                 }
3522                         }
3523                         binder_proc_lock(proc);
3524                         ref = binder_get_ref_olocked(proc, target, false);
3525                         if (ref == NULL) {
3526                                 binder_user_error("%d:%d %s invalid ref %d\n",
3527                                         proc->pid, thread->pid,
3528                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3529                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3530                                         "BC_CLEAR_DEATH_NOTIFICATION",
3531                                         target);
3532                                 binder_proc_unlock(proc);
3533                                 kfree(death);
3534                                 break;
3535                         }
3536
3537                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3538                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3539                                      proc->pid, thread->pid,
3540                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3541                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3542                                      "BC_CLEAR_DEATH_NOTIFICATION",
3543                                      (u64)cookie, ref->data.debug_id,
3544                                      ref->data.desc, ref->data.strong,
3545                                      ref->data.weak, ref->node->debug_id);
3546
3547                         binder_node_lock(ref->node);
3548                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3549                                 if (ref->death) {
3550                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3551                                                 proc->pid, thread->pid);
3552                                         binder_node_unlock(ref->node);
3553                                         binder_proc_unlock(proc);
3554                                         kfree(death);
3555                                         break;
3556                                 }
3557                                 binder_stats_created(BINDER_STAT_DEATH);
3558                                 INIT_LIST_HEAD(&death->work.entry);
3559                                 death->cookie = cookie;
3560                                 ref->death = death;
3561                                 if (ref->node->proc == NULL) {
3562                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3563
3564                                         binder_inner_proc_lock(proc);
3565                                         binder_enqueue_work_ilocked(
3566                                                 &ref->death->work, &proc->todo);
3567                                         binder_wakeup_proc_ilocked(proc);
3568                                         binder_inner_proc_unlock(proc);
3569                                 }
3570                         } else {
3571                                 if (ref->death == NULL) {
3572                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3573                                                 proc->pid, thread->pid);
3574                                         binder_node_unlock(ref->node);
3575                                         binder_proc_unlock(proc);
3576                                         break;
3577                                 }
3578                                 death = ref->death;
3579                                 if (death->cookie != cookie) {
3580                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3581                                                 proc->pid, thread->pid,
3582                                                 (u64)death->cookie,
3583                                                 (u64)cookie);
3584                                         binder_node_unlock(ref->node);
3585                                         binder_proc_unlock(proc);
3586                                         break;
3587                                 }
3588                                 ref->death = NULL;
3589                                 binder_inner_proc_lock(proc);
3590                                 if (list_empty(&death->work.entry)) {
3591                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3592                                         if (thread->looper &
3593                                             (BINDER_LOOPER_STATE_REGISTERED |
3594                                              BINDER_LOOPER_STATE_ENTERED))
3595                                                 binder_enqueue_work_ilocked(
3596                                                                 &death->work,
3597                                                                 &thread->todo);
3598                                         else {
3599                                                 binder_enqueue_work_ilocked(
3600                                                                 &death->work,
3601                                                                 &proc->todo);
3602                                                 binder_wakeup_proc_ilocked(
3603                                                                 proc);
3604                                         }
3605                                 } else {
3606                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3607                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3608                                 }
3609                                 binder_inner_proc_unlock(proc);
3610                         }
3611                         binder_node_unlock(ref->node);
3612                         binder_proc_unlock(proc);
3613                 } break;
3614                 case BC_DEAD_BINDER_DONE: {
3615                         struct binder_work *w;
3616                         binder_uintptr_t cookie;
3617                         struct binder_ref_death *death = NULL;
3618
3619                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3620                                 return -EFAULT;
3621
3622                         ptr += sizeof(cookie);
3623                         binder_inner_proc_lock(proc);
3624                         list_for_each_entry(w, &proc->delivered_death,
3625                                             entry) {
3626                                 struct binder_ref_death *tmp_death =
3627                                         container_of(w,
3628                                                      struct binder_ref_death,
3629                                                      work);
3630
3631                                 if (tmp_death->cookie == cookie) {
3632                                         death = tmp_death;
3633                                         break;
3634                                 }
3635                         }
3636                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3637                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3638                                      proc->pid, thread->pid, (u64)cookie,
3639                                      death);
3640                         if (death == NULL) {
3641                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3642                                         proc->pid, thread->pid, (u64)cookie);
3643                                 binder_inner_proc_unlock(proc);
3644                                 break;
3645                         }
3646                         binder_dequeue_work_ilocked(&death->work);
3647                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3648                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3649                                 if (thread->looper &
3650                                         (BINDER_LOOPER_STATE_REGISTERED |
3651                                          BINDER_LOOPER_STATE_ENTERED))
3652                                         binder_enqueue_work_ilocked(
3653                                                 &death->work, &thread->todo);
3654                                 else {
3655                                         binder_enqueue_work_ilocked(
3656                                                         &death->work,
3657                                                         &proc->todo);
3658                                         binder_wakeup_proc_ilocked(proc);
3659                                 }
3660                         }
3661                         binder_inner_proc_unlock(proc);
3662                 } break;
3663
3664                 default:
3665                         pr_err("%d:%d unknown command %d\n",
3666                                proc->pid, thread->pid, cmd);
3667                         return -EINVAL;
3668                 }
3669                 *consumed = ptr - buffer;
3670         }
3671         return 0;
3672 }
3673
3674 static void binder_stat_br(struct binder_proc *proc,
3675                            struct binder_thread *thread, uint32_t cmd)
3676 {
3677         trace_binder_return(cmd);
3678         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3679                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3680                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3681                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3682         }
3683 }
3684
3685 static int binder_put_node_cmd(struct binder_proc *proc,
3686                                struct binder_thread *thread,
3687                                void __user **ptrp,
3688                                binder_uintptr_t node_ptr,
3689                                binder_uintptr_t node_cookie,
3690                                int node_debug_id,
3691                                uint32_t cmd, const char *cmd_name)
3692 {
3693         void __user *ptr = *ptrp;
3694
3695         if (put_user(cmd, (uint32_t __user *)ptr))
3696                 return -EFAULT;
3697         ptr += sizeof(uint32_t);
3698
3699         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3700                 return -EFAULT;
3701         ptr += sizeof(binder_uintptr_t);
3702
3703         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3704                 return -EFAULT;
3705         ptr += sizeof(binder_uintptr_t);
3706
3707         binder_stat_br(proc, thread, cmd);
3708         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3709                      proc->pid, thread->pid, cmd_name, node_debug_id,
3710                      (u64)node_ptr, (u64)node_cookie);
3711
3712         *ptrp = ptr;
3713         return 0;
3714 }
3715
3716 static int binder_wait_for_work(struct binder_thread *thread,
3717                                 bool do_proc_work)
3718 {
3719         DEFINE_WAIT(wait);
3720         struct binder_proc *proc = thread->proc;
3721         int ret = 0;
3722
3723         freezer_do_not_count();
3724         binder_inner_proc_lock(proc);
3725         for (;;) {
3726                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3727                 if (binder_has_work_ilocked(thread, do_proc_work))
3728                         break;
3729                 if (do_proc_work)
3730                         list_add(&thread->waiting_thread_node,
3731                                  &proc->waiting_threads);
3732                 binder_inner_proc_unlock(proc);
3733                 schedule();
3734                 binder_inner_proc_lock(proc);
3735                 list_del_init(&thread->waiting_thread_node);
3736                 if (signal_pending(current)) {
3737                         ret = -ERESTARTSYS;
3738                         break;
3739                 }
3740         }
3741         finish_wait(&thread->wait, &wait);
3742         binder_inner_proc_unlock(proc);
3743         freezer_count();
3744
3745         return ret;
3746 }
3747
3748 static int binder_thread_read(struct binder_proc *proc,
3749                               struct binder_thread *thread,
3750                               binder_uintptr_t binder_buffer, size_t size,
3751                               binder_size_t *consumed, int non_block)
3752 {
3753         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3754         void __user *ptr = buffer + *consumed;
3755         void __user *end = buffer + size;
3756
3757         int ret = 0;
3758         int wait_for_proc_work;
3759
3760         if (*consumed == 0) {
3761                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3762                         return -EFAULT;
3763                 ptr += sizeof(uint32_t);
3764         }
3765
3766 retry:
3767         binder_inner_proc_lock(proc);
3768         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3769         binder_inner_proc_unlock(proc);
3770
3771         thread->looper |= BINDER_LOOPER_STATE_WAITING;
3772
3773         trace_binder_wait_for_work(wait_for_proc_work,
3774                                    !!thread->transaction_stack,
3775                                    !binder_worklist_empty(proc, &thread->todo));
3776         if (wait_for_proc_work) {
3777                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3778                                         BINDER_LOOPER_STATE_ENTERED))) {
3779                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3780                                 proc->pid, thread->pid, thread->looper);
3781                         wait_event_interruptible(binder_user_error_wait,
3782                                                  binder_stop_on_user_error < 2);
3783                 }
3784                 binder_set_nice(proc->default_priority);
3785         }
3786
3787         if (non_block) {
3788                 if (!binder_has_work(thread, wait_for_proc_work))
3789                         ret = -EAGAIN;
3790         } else {
3791                 ret = binder_wait_for_work(thread, wait_for_proc_work);
3792         }
3793
3794         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3795
3796         if (ret)
3797                 return ret;
3798
3799         while (1) {
3800                 uint32_t cmd;
3801                 struct binder_transaction_data tr;
3802                 struct binder_work *w = NULL;
3803                 struct list_head *list = NULL;
3804                 struct binder_transaction *t = NULL;
3805                 struct binder_thread *t_from;
3806
3807                 binder_inner_proc_lock(proc);
3808                 if (!binder_worklist_empty_ilocked(&thread->todo))
3809                         list = &thread->todo;
3810                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3811                            wait_for_proc_work)
3812                         list = &proc->todo;
3813                 else {
3814                         binder_inner_proc_unlock(proc);
3815
3816                         /* no data added */
3817                         if (ptr - buffer == 4 && !thread->looper_need_return)
3818                                 goto retry;
3819                         break;
3820                 }
3821
3822                 if (end - ptr < sizeof(tr) + 4) {
3823                         binder_inner_proc_unlock(proc);
3824                         break;
3825                 }
3826                 w = binder_dequeue_work_head_ilocked(list);
3827
3828                 switch (w->type) {
3829                 case BINDER_WORK_TRANSACTION: {
3830                         binder_inner_proc_unlock(proc);
3831                         t = container_of(w, struct binder_transaction, work);
3832                 } break;
3833                 case BINDER_WORK_RETURN_ERROR: {
3834                         struct binder_error *e = container_of(
3835                                         w, struct binder_error, work);
3836
3837                         WARN_ON(e->cmd == BR_OK);
3838                         binder_inner_proc_unlock(proc);
3839                         if (put_user(e->cmd, (uint32_t __user *)ptr))
3840                                 return -EFAULT;
3841                         e->cmd = BR_OK;
3842                         ptr += sizeof(uint32_t);
3843
3844                         binder_stat_br(proc, thread, e->cmd);
3845                 } break;
3846                 case BINDER_WORK_TRANSACTION_COMPLETE: {
3847                         binder_inner_proc_unlock(proc);
3848                         cmd = BR_TRANSACTION_COMPLETE;
3849                         if (put_user(cmd, (uint32_t __user *)ptr))
3850                                 return -EFAULT;
3851                         ptr += sizeof(uint32_t);
3852
3853                         binder_stat_br(proc, thread, cmd);
3854                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3855                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
3856                                      proc->pid, thread->pid);
3857                         kfree(w);
3858                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3859                 } break;
3860                 case BINDER_WORK_NODE: {
3861                         struct binder_node *node = container_of(w, struct binder_node, work);
3862                         int strong, weak;
3863                         binder_uintptr_t node_ptr = node->ptr;
3864                         binder_uintptr_t node_cookie = node->cookie;
3865                         int node_debug_id = node->debug_id;
3866                         int has_weak_ref;
3867                         int has_strong_ref;
3868                         void __user *orig_ptr = ptr;
3869
3870                         BUG_ON(proc != node->proc);
3871                         strong = node->internal_strong_refs ||
3872                                         node->local_strong_refs;
3873                         weak = !hlist_empty(&node->refs) ||
3874                                         node->local_weak_refs ||
3875                                         node->tmp_refs || strong;
3876                         has_strong_ref = node->has_strong_ref;
3877                         has_weak_ref = node->has_weak_ref;
3878
3879                         if (weak && !has_weak_ref) {
3880                                 node->has_weak_ref = 1;
3881                                 node->pending_weak_ref = 1;
3882                                 node->local_weak_refs++;
3883                         }
3884                         if (strong && !has_strong_ref) {
3885                                 node->has_strong_ref = 1;
3886                                 node->pending_strong_ref = 1;
3887                                 node->local_strong_refs++;
3888                         }
3889                         if (!strong && has_strong_ref)
3890                                 node->has_strong_ref = 0;
3891                         if (!weak && has_weak_ref)
3892                                 node->has_weak_ref = 0;
3893                         if (!weak && !strong) {
3894                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3895                                              "%d:%d node %d u%016llx c%016llx deleted\n",
3896                                              proc->pid, thread->pid,
3897                                              node_debug_id,
3898                                              (u64)node_ptr,
3899                                              (u64)node_cookie);
3900                                 rb_erase(&node->rb_node, &proc->nodes);
3901                                 binder_inner_proc_unlock(proc);
3902                                 binder_node_lock(node);
3903                                 /*
3904                                  * Acquire the node lock before freeing the
3905                                  * node to serialize with other threads that
3906                                  * may have been holding the node lock while
3907                                  * decrementing this node (avoids race where
3908                                  * this thread frees while the other thread
3909                                  * is unlocking the node after the final
3910                                  * decrement)
3911                                  */
3912                                 binder_node_unlock(node);
3913                                 binder_free_node(node);
3914                         } else
3915                                 binder_inner_proc_unlock(proc);
3916
3917                         if (weak && !has_weak_ref)
3918                                 ret = binder_put_node_cmd(
3919                                                 proc, thread, &ptr, node_ptr,
3920                                                 node_cookie, node_debug_id,
3921                                                 BR_INCREFS, "BR_INCREFS");
3922                         if (!ret && strong && !has_strong_ref)
3923                                 ret = binder_put_node_cmd(
3924                                                 proc, thread, &ptr, node_ptr,
3925                                                 node_cookie, node_debug_id,
3926                                                 BR_ACQUIRE, "BR_ACQUIRE");
3927                         if (!ret && !strong && has_strong_ref)
3928                                 ret = binder_put_node_cmd(
3929                                                 proc, thread, &ptr, node_ptr,
3930                                                 node_cookie, node_debug_id,
3931                                                 BR_RELEASE, "BR_RELEASE");
3932                         if (!ret && !weak && has_weak_ref)
3933                                 ret = binder_put_node_cmd(
3934                                                 proc, thread, &ptr, node_ptr,
3935                                                 node_cookie, node_debug_id,
3936                                                 BR_DECREFS, "BR_DECREFS");
3937                         if (orig_ptr == ptr)
3938                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3939                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
3940                                              proc->pid, thread->pid,
3941                                              node_debug_id,
3942                                              (u64)node_ptr,
3943                                              (u64)node_cookie);
3944                         if (ret)
3945                                 return ret;
3946                 } break;
3947                 case BINDER_WORK_DEAD_BINDER:
3948                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3949                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3950                         struct binder_ref_death *death;
3951                         uint32_t cmd;
3952                         binder_uintptr_t cookie;
3953
3954                         death = container_of(w, struct binder_ref_death, work);
3955                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3956                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3957                         else
3958                                 cmd = BR_DEAD_BINDER;
3959                         cookie = death->cookie;
3960
3961                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3962                                      "%d:%d %s %016llx\n",
3963                                       proc->pid, thread->pid,
3964                                       cmd == BR_DEAD_BINDER ?
3965                                       "BR_DEAD_BINDER" :
3966                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3967                                       (u64)cookie);
3968                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3969                                 binder_inner_proc_unlock(proc);
3970                                 kfree(death);
3971                                 binder_stats_deleted(BINDER_STAT_DEATH);
3972                         } else {
3973                                 binder_enqueue_work_ilocked(
3974                                                 w, &proc->delivered_death);
3975                                 binder_inner_proc_unlock(proc);
3976                         }
3977                         if (put_user(cmd, (uint32_t __user *)ptr))
3978                                 return -EFAULT;
3979                         ptr += sizeof(uint32_t);
3980                         if (put_user(cookie,
3981                                      (binder_uintptr_t __user *)ptr))
3982                                 return -EFAULT;
3983                         ptr += sizeof(binder_uintptr_t);
3984                         binder_stat_br(proc, thread, cmd);
3985                         if (cmd == BR_DEAD_BINDER)
3986                                 goto done; /* DEAD_BINDER notifications can cause transactions */
3987                 } break;
3988                 }
3989
3990                 if (!t)
3991                         continue;
3992
3993                 BUG_ON(t->buffer == NULL);
3994                 if (t->buffer->target_node) {
3995                         struct binder_node *target_node = t->buffer->target_node;
3996
3997                         tr.target.ptr = target_node->ptr;
3998                         tr.cookie =  target_node->cookie;
3999                         t->saved_priority = task_nice(current);
4000                         if (t->priority < target_node->min_priority &&
4001                             !(t->flags & TF_ONE_WAY))
4002                                 binder_set_nice(t->priority);
4003                         else if (!(t->flags & TF_ONE_WAY) ||
4004                                  t->saved_priority > target_node->min_priority)
4005                                 binder_set_nice(target_node->min_priority);
4006                         cmd = BR_TRANSACTION;
4007                 } else {
4008                         tr.target.ptr = 0;
4009                         tr.cookie = 0;
4010                         cmd = BR_REPLY;
4011                 }
4012                 tr.code = t->code;
4013                 tr.flags = t->flags;
4014                 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4015
4016                 t_from = binder_get_txn_from(t);
4017                 if (t_from) {
4018                         struct task_struct *sender = t_from->proc->tsk;
4019
4020                         tr.sender_pid = task_tgid_nr_ns(sender,
4021                                                         task_active_pid_ns(current));
4022                 } else {
4023                         tr.sender_pid = 0;
4024                 }
4025
4026                 tr.data_size = t->buffer->data_size;
4027                 tr.offsets_size = t->buffer->offsets_size;
4028                 tr.data.ptr.buffer = (binder_uintptr_t)
4029                         ((uintptr_t)t->buffer->data +
4030                         binder_alloc_get_user_buffer_offset(&proc->alloc));
4031                 tr.data.ptr.offsets = tr.data.ptr.buffer +
4032                                         ALIGN(t->buffer->data_size,
4033                                             sizeof(void *));
4034
4035                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4036                         if (t_from)
4037                                 binder_thread_dec_tmpref(t_from);
4038
4039                         binder_cleanup_transaction(t, "put_user failed",
4040                                                    BR_FAILED_REPLY);
4041
4042                         return -EFAULT;
4043                 }
4044                 ptr += sizeof(uint32_t);
4045                 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4046                         if (t_from)
4047                                 binder_thread_dec_tmpref(t_from);
4048
4049                         binder_cleanup_transaction(t, "copy_to_user failed",
4050                                                    BR_FAILED_REPLY);
4051
4052                         return -EFAULT;
4053                 }
4054                 ptr += sizeof(tr);
4055
4056                 trace_binder_transaction_received(t);
4057                 binder_stat_br(proc, thread, cmd);
4058                 binder_debug(BINDER_DEBUG_TRANSACTION,
4059                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4060                              proc->pid, thread->pid,
4061                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4062                              "BR_REPLY",
4063                              t->debug_id, t_from ? t_from->proc->pid : 0,
4064                              t_from ? t_from->pid : 0, cmd,
4065                              t->buffer->data_size, t->buffer->offsets_size,
4066                              (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4067
4068                 if (t_from)
4069                         binder_thread_dec_tmpref(t_from);
4070                 t->buffer->allow_user_free = 1;
4071                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4072                         binder_inner_proc_lock(thread->proc);
4073                         t->to_parent = thread->transaction_stack;
4074                         t->to_thread = thread;
4075                         thread->transaction_stack = t;
4076                         binder_inner_proc_unlock(thread->proc);
4077                 } else {
4078                         binder_free_transaction(t);
4079                 }
4080                 break;
4081         }
4082
4083 done:
4084
4085         *consumed = ptr - buffer;
4086         binder_inner_proc_lock(proc);
4087         if (proc->requested_threads == 0 &&
4088             list_empty(&thread->proc->waiting_threads) &&
4089             proc->requested_threads_started < proc->max_threads &&
4090             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4091              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4092              /*spawn a new thread if we leave this out */) {
4093                 proc->requested_threads++;
4094                 binder_inner_proc_unlock(proc);
4095                 binder_debug(BINDER_DEBUG_THREADS,
4096                              "%d:%d BR_SPAWN_LOOPER\n",
4097                              proc->pid, thread->pid);
4098                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4099                         return -EFAULT;
4100                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4101         } else
4102                 binder_inner_proc_unlock(proc);
4103         return 0;
4104 }
4105
4106 static void binder_release_work(struct binder_proc *proc,
4107                                 struct list_head *list)
4108 {
4109         struct binder_work *w;
4110
4111         while (1) {
4112                 w = binder_dequeue_work_head(proc, list);
4113                 if (!w)
4114                         return;
4115
4116                 switch (w->type) {
4117                 case BINDER_WORK_TRANSACTION: {
4118                         struct binder_transaction *t;
4119
4120                         t = container_of(w, struct binder_transaction, work);
4121
4122                         binder_cleanup_transaction(t, "process died.",
4123                                                    BR_DEAD_REPLY);
4124                 } break;
4125                 case BINDER_WORK_RETURN_ERROR: {
4126                         struct binder_error *e = container_of(
4127                                         w, struct binder_error, work);
4128
4129                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4130                                 "undelivered TRANSACTION_ERROR: %u\n",
4131                                 e->cmd);
4132                 } break;
4133                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4134                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4135                                 "undelivered TRANSACTION_COMPLETE\n");
4136                         kfree(w);
4137                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4138                 } break;
4139                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4140                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4141                         struct binder_ref_death *death;
4142
4143                         death = container_of(w, struct binder_ref_death, work);
4144                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4145                                 "undelivered death notification, %016llx\n",
4146                                 (u64)death->cookie);
4147                         kfree(death);
4148                         binder_stats_deleted(BINDER_STAT_DEATH);
4149                 } break;
4150                 default:
4151                         pr_err("unexpected work type, %d, not freed\n",
4152                                w->type);
4153                         break;
4154                 }
4155         }
4156
4157 }
4158
4159 static struct binder_thread *binder_get_thread_ilocked(
4160                 struct binder_proc *proc, struct binder_thread *new_thread)
4161 {
4162         struct binder_thread *thread = NULL;
4163         struct rb_node *parent = NULL;
4164         struct rb_node **p = &proc->threads.rb_node;
4165
4166         while (*p) {
4167                 parent = *p;
4168                 thread = rb_entry(parent, struct binder_thread, rb_node);
4169
4170                 if (current->pid < thread->pid)
4171                         p = &(*p)->rb_left;
4172                 else if (current->pid > thread->pid)
4173                         p = &(*p)->rb_right;
4174                 else
4175                         return thread;
4176         }
4177         if (!new_thread)
4178                 return NULL;
4179         thread = new_thread;
4180         binder_stats_created(BINDER_STAT_THREAD);
4181         thread->proc = proc;
4182         thread->pid = current->pid;
4183         atomic_set(&thread->tmp_ref, 0);
4184         init_waitqueue_head(&thread->wait);
4185         INIT_LIST_HEAD(&thread->todo);
4186         rb_link_node(&thread->rb_node, parent, p);
4187         rb_insert_color(&thread->rb_node, &proc->threads);
4188         thread->looper_need_return = true;
4189         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4190         thread->return_error.cmd = BR_OK;
4191         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4192         thread->reply_error.cmd = BR_OK;
4193         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4194         return thread;
4195 }
4196
4197 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4198 {
4199         struct binder_thread *thread;
4200         struct binder_thread *new_thread;
4201
4202         binder_inner_proc_lock(proc);
4203         thread = binder_get_thread_ilocked(proc, NULL);
4204         binder_inner_proc_unlock(proc);
4205         if (!thread) {
4206                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4207                 if (new_thread == NULL)
4208                         return NULL;
4209                 binder_inner_proc_lock(proc);
4210                 thread = binder_get_thread_ilocked(proc, new_thread);
4211                 binder_inner_proc_unlock(proc);
4212                 if (thread != new_thread)
4213                         kfree(new_thread);
4214         }
4215         return thread;
4216 }
4217
4218 static void binder_free_proc(struct binder_proc *proc)
4219 {
4220         BUG_ON(!list_empty(&proc->todo));
4221         BUG_ON(!list_empty(&proc->delivered_death));
4222         binder_alloc_deferred_release(&proc->alloc);
4223         put_task_struct(proc->tsk);
4224         binder_stats_deleted(BINDER_STAT_PROC);
4225         kfree(proc);
4226 }
4227
4228 static void binder_free_thread(struct binder_thread *thread)
4229 {
4230         BUG_ON(!list_empty(&thread->todo));
4231         binder_stats_deleted(BINDER_STAT_THREAD);
4232         binder_proc_dec_tmpref(thread->proc);
4233         kfree(thread);
4234 }
4235
4236 static int binder_thread_release(struct binder_proc *proc,
4237                                  struct binder_thread *thread)
4238 {
4239         struct binder_transaction *t;
4240         struct binder_transaction *send_reply = NULL;
4241         int active_transactions = 0;
4242         struct binder_transaction *last_t = NULL;
4243
4244         binder_inner_proc_lock(thread->proc);
4245         /*
4246          * take a ref on the proc so it survives
4247          * after we remove this thread from proc->threads.
4248          * The corresponding dec is when we actually
4249          * free the thread in binder_free_thread()
4250          */
4251         proc->tmp_ref++;
4252         /*
4253          * take a ref on this thread to ensure it
4254          * survives while we are releasing it
4255          */
4256         atomic_inc(&thread->tmp_ref);
4257         rb_erase(&thread->rb_node, &proc->threads);
4258         t = thread->transaction_stack;
4259         if (t) {
4260                 spin_lock(&t->lock);
4261                 if (t->to_thread == thread)
4262                         send_reply = t;
4263         }
4264         thread->is_dead = true;
4265
4266         while (t) {
4267                 last_t = t;
4268                 active_transactions++;
4269                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4270                              "release %d:%d transaction %d %s, still active\n",
4271                               proc->pid, thread->pid,
4272                              t->debug_id,
4273                              (t->to_thread == thread) ? "in" : "out");
4274
4275                 if (t->to_thread == thread) {
4276                         t->to_proc = NULL;
4277                         t->to_thread = NULL;
4278                         if (t->buffer) {
4279                                 t->buffer->transaction = NULL;
4280                                 t->buffer = NULL;
4281                         }
4282                         t = t->to_parent;
4283                 } else if (t->from == thread) {
4284                         t->from = NULL;
4285                         t = t->from_parent;
4286                 } else
4287                         BUG();
4288                 spin_unlock(&last_t->lock);
4289                 if (t)
4290                         spin_lock(&t->lock);
4291         }
4292         binder_inner_proc_unlock(thread->proc);
4293
4294         if (send_reply)
4295                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4296         binder_release_work(proc, &thread->todo);
4297         binder_thread_dec_tmpref(thread);
4298         return active_transactions;
4299 }
4300
4301 static unsigned int binder_poll(struct file *filp,
4302                                 struct poll_table_struct *wait)
4303 {
4304         struct binder_proc *proc = filp->private_data;
4305         struct binder_thread *thread = NULL;
4306         bool wait_for_proc_work;
4307
4308         thread = binder_get_thread(proc);
4309
4310         binder_inner_proc_lock(thread->proc);
4311         thread->looper |= BINDER_LOOPER_STATE_POLL;
4312         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4313
4314         binder_inner_proc_unlock(thread->proc);
4315
4316         poll_wait(filp, &thread->wait, wait);
4317
4318         if (binder_has_work(thread, wait_for_proc_work))
4319                 return POLLIN;
4320
4321         return 0;
4322 }
4323
4324 static int binder_ioctl_write_read(struct file *filp,
4325                                 unsigned int cmd, unsigned long arg,
4326                                 struct binder_thread *thread)
4327 {
4328         int ret = 0;
4329         struct binder_proc *proc = filp->private_data;
4330         unsigned int size = _IOC_SIZE(cmd);
4331         void __user *ubuf = (void __user *)arg;
4332         struct binder_write_read bwr;
4333
4334         if (size != sizeof(struct binder_write_read)) {
4335                 ret = -EINVAL;
4336                 goto out;
4337         }
4338         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4339                 ret = -EFAULT;
4340                 goto out;
4341         }
4342         binder_debug(BINDER_DEBUG_READ_WRITE,
4343                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4344                      proc->pid, thread->pid,
4345                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4346                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4347
4348         if (bwr.write_size > 0) {
4349                 ret = binder_thread_write(proc, thread,
4350                                           bwr.write_buffer,
4351                                           bwr.write_size,
4352                                           &bwr.write_consumed);
4353                 trace_binder_write_done(ret);
4354                 if (ret < 0) {
4355                         bwr.read_consumed = 0;
4356                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4357                                 ret = -EFAULT;
4358                         goto out;
4359                 }
4360         }
4361         if (bwr.read_size > 0) {
4362                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4363                                          bwr.read_size,
4364                                          &bwr.read_consumed,
4365                                          filp->f_flags & O_NONBLOCK);
4366                 trace_binder_read_done(ret);
4367                 binder_inner_proc_lock(proc);
4368                 if (!binder_worklist_empty_ilocked(&proc->todo))
4369                         binder_wakeup_proc_ilocked(proc);
4370                 binder_inner_proc_unlock(proc);
4371                 if (ret < 0) {
4372                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4373                                 ret = -EFAULT;
4374                         goto out;
4375                 }
4376         }
4377         binder_debug(BINDER_DEBUG_READ_WRITE,
4378                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4379                      proc->pid, thread->pid,
4380                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4381                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4382         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4383                 ret = -EFAULT;
4384                 goto out;
4385         }
4386 out:
4387         return ret;
4388 }
4389
4390 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4391 {
4392         int ret = 0;
4393         struct binder_proc *proc = filp->private_data;
4394         struct binder_context *context = proc->context;
4395         struct binder_node *new_node;
4396         kuid_t curr_euid = current_euid();
4397
4398         mutex_lock(&context->context_mgr_node_lock);
4399         if (context->binder_context_mgr_node) {
4400                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4401                 ret = -EBUSY;
4402                 goto out;
4403         }
4404         ret = security_binder_set_context_mgr(proc->tsk);
4405         if (ret < 0)
4406                 goto out;
4407         if (uid_valid(context->binder_context_mgr_uid)) {
4408                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4409                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4410                                from_kuid(&init_user_ns, curr_euid),
4411                                from_kuid(&init_user_ns,
4412                                          context->binder_context_mgr_uid));
4413                         ret = -EPERM;
4414                         goto out;
4415                 }
4416         } else {
4417                 context->binder_context_mgr_uid = curr_euid;
4418         }
4419         new_node = binder_new_node(proc, NULL);
4420         if (!new_node) {
4421                 ret = -ENOMEM;
4422                 goto out;
4423         }
4424         binder_node_lock(new_node);
4425         new_node->local_weak_refs++;
4426         new_node->local_strong_refs++;
4427         new_node->has_strong_ref = 1;
4428         new_node->has_weak_ref = 1;
4429         context->binder_context_mgr_node = new_node;
4430         binder_node_unlock(new_node);
4431         binder_put_node(new_node);
4432 out:
4433         mutex_unlock(&context->context_mgr_node_lock);
4434         return ret;
4435 }
4436
4437 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4438                                 struct binder_node_debug_info *info)
4439 {
4440         struct rb_node *n;
4441         binder_uintptr_t ptr = info->ptr;
4442
4443         memset(info, 0, sizeof(*info));
4444
4445         binder_inner_proc_lock(proc);
4446         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4447                 struct binder_node *node = rb_entry(n, struct binder_node,
4448                                                     rb_node);
4449                 if (node->ptr > ptr) {
4450                         info->ptr = node->ptr;
4451                         info->cookie = node->cookie;
4452                         info->has_strong_ref = node->has_strong_ref;
4453                         info->has_weak_ref = node->has_weak_ref;
4454                         break;
4455                 }
4456         }
4457         binder_inner_proc_unlock(proc);
4458
4459         return 0;
4460 }
4461
4462 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4463 {
4464         int ret;
4465         struct binder_proc *proc = filp->private_data;
4466         struct binder_thread *thread;
4467         unsigned int size = _IOC_SIZE(cmd);
4468         void __user *ubuf = (void __user *)arg;
4469
4470         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4471                         proc->pid, current->pid, cmd, arg);*/
4472
4473         binder_selftest_alloc(&proc->alloc);
4474
4475         trace_binder_ioctl(cmd, arg);
4476
4477         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4478         if (ret)
4479                 goto err_unlocked;
4480
4481         thread = binder_get_thread(proc);
4482         if (thread == NULL) {
4483                 ret = -ENOMEM;
4484                 goto err;
4485         }
4486
4487         switch (cmd) {
4488         case BINDER_WRITE_READ:
4489                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4490                 if (ret)
4491                         goto err;
4492                 break;
4493         case BINDER_SET_MAX_THREADS: {
4494                 int max_threads;
4495
4496                 if (copy_from_user(&max_threads, ubuf,
4497                                    sizeof(max_threads))) {
4498                         ret = -EINVAL;
4499                         goto err;
4500                 }
4501                 binder_inner_proc_lock(proc);
4502                 proc->max_threads = max_threads;
4503                 binder_inner_proc_unlock(proc);
4504                 break;
4505         }
4506         case BINDER_SET_CONTEXT_MGR:
4507                 ret = binder_ioctl_set_ctx_mgr(filp);
4508                 if (ret)
4509                         goto err;
4510                 break;
4511         case BINDER_THREAD_EXIT:
4512                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4513                              proc->pid, thread->pid);
4514                 binder_thread_release(proc, thread);
4515                 thread = NULL;
4516                 break;
4517         case BINDER_VERSION: {
4518                 struct binder_version __user *ver = ubuf;
4519
4520                 if (size != sizeof(struct binder_version)) {
4521                         ret = -EINVAL;
4522                         goto err;
4523                 }
4524                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4525                              &ver->protocol_version)) {
4526                         ret = -EINVAL;
4527                         goto err;
4528                 }
4529                 break;
4530         }
4531         case BINDER_GET_NODE_DEBUG_INFO: {
4532                 struct binder_node_debug_info info;
4533
4534                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4535                         ret = -EFAULT;
4536                         goto err;
4537                 }
4538
4539                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4540                 if (ret < 0)
4541                         goto err;
4542
4543                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4544                         ret = -EFAULT;
4545                         goto err;
4546                 }
4547                 break;
4548         }
4549         default:
4550                 ret = -EINVAL;
4551                 goto err;
4552         }
4553         ret = 0;
4554 err:
4555         if (thread)
4556                 thread->looper_need_return = false;
4557         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4558         if (ret && ret != -ERESTARTSYS)
4559                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4560 err_unlocked:
4561         trace_binder_ioctl_done(ret);
4562         return ret;
4563 }
4564
4565 static void binder_vma_open(struct vm_area_struct *vma)
4566 {
4567         struct binder_proc *proc = vma->vm_private_data;
4568
4569         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4570                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4571                      proc->pid, vma->vm_start, vma->vm_end,
4572                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4573                      (unsigned long)pgprot_val(vma->vm_page_prot));
4574 }
4575
4576 static void binder_vma_close(struct vm_area_struct *vma)
4577 {
4578         struct binder_proc *proc = vma->vm_private_data;
4579
4580         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4581                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4582                      proc->pid, vma->vm_start, vma->vm_end,
4583                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4584                      (unsigned long)pgprot_val(vma->vm_page_prot));
4585         binder_alloc_vma_close(&proc->alloc);
4586         binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4587 }
4588
4589 static int binder_vm_fault(struct vm_fault *vmf)
4590 {
4591         return VM_FAULT_SIGBUS;
4592 }
4593
4594 static const struct vm_operations_struct binder_vm_ops = {
4595         .open = binder_vma_open,
4596         .close = binder_vma_close,
4597         .fault = binder_vm_fault,
4598 };
4599
4600 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4601 {
4602         int ret;
4603         struct binder_proc *proc = filp->private_data;
4604         const char *failure_string;
4605
4606         if (proc->tsk != current->group_leader)
4607                 return -EINVAL;
4608
4609         if ((vma->vm_end - vma->vm_start) > SZ_4M)
4610                 vma->vm_end = vma->vm_start + SZ_4M;
4611
4612         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4613                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4614                      __func__, proc->pid, vma->vm_start, vma->vm_end,
4615                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4616                      (unsigned long)pgprot_val(vma->vm_page_prot));
4617
4618         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4619                 ret = -EPERM;
4620                 failure_string = "bad vm_flags";
4621                 goto err_bad_arg;
4622         }
4623         vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4624         vma->vm_ops = &binder_vm_ops;
4625         vma->vm_private_data = proc;
4626
4627         ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4628         if (ret)
4629                 return ret;
4630         proc->files = get_files_struct(current);
4631         return 0;
4632
4633 err_bad_arg:
4634         pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4635                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4636         return ret;
4637 }
4638
4639 static int binder_open(struct inode *nodp, struct file *filp)
4640 {
4641         struct binder_proc *proc;
4642         struct binder_device *binder_dev;
4643
4644         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4645                      current->group_leader->pid, current->pid);
4646
4647         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4648         if (proc == NULL)
4649                 return -ENOMEM;
4650         spin_lock_init(&proc->inner_lock);
4651         spin_lock_init(&proc->outer_lock);
4652         get_task_struct(current->group_leader);
4653         proc->tsk = current->group_leader;
4654         INIT_LIST_HEAD(&proc->todo);
4655         proc->default_priority = task_nice(current);
4656         binder_dev = container_of(filp->private_data, struct binder_device,
4657                                   miscdev);
4658         proc->context = &binder_dev->context;
4659         binder_alloc_init(&proc->alloc);
4660
4661         binder_stats_created(BINDER_STAT_PROC);
4662         proc->pid = current->group_leader->pid;
4663         INIT_LIST_HEAD(&proc->delivered_death);
4664         INIT_LIST_HEAD(&proc->waiting_threads);
4665         filp->private_data = proc;
4666
4667         mutex_lock(&binder_procs_lock);
4668         hlist_add_head(&proc->proc_node, &binder_procs);
4669         mutex_unlock(&binder_procs_lock);
4670
4671         if (binder_debugfs_dir_entry_proc) {
4672                 char strbuf[11];
4673
4674                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4675                 /*
4676                  * proc debug entries are shared between contexts, so
4677                  * this will fail if the process tries to open the driver
4678                  * again with a different context. The priting code will
4679                  * anyway print all contexts that a given PID has, so this
4680                  * is not a problem.
4681                  */
4682                 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4683                         binder_debugfs_dir_entry_proc,
4684                         (void *)(unsigned long)proc->pid,
4685                         &binder_proc_fops);
4686         }
4687
4688         return 0;
4689 }
4690
4691 static int binder_flush(struct file *filp, fl_owner_t id)
4692 {
4693         struct binder_proc *proc = filp->private_data;
4694
4695         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4696
4697         return 0;
4698 }
4699
4700 static void binder_deferred_flush(struct binder_proc *proc)
4701 {
4702         struct rb_node *n;
4703         int wake_count = 0;
4704
4705         binder_inner_proc_lock(proc);
4706         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4707                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4708
4709                 thread->looper_need_return = true;
4710                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4711                         wake_up_interruptible(&thread->wait);
4712                         wake_count++;
4713                 }
4714         }
4715         binder_inner_proc_unlock(proc);
4716
4717         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4718                      "binder_flush: %d woke %d threads\n", proc->pid,
4719                      wake_count);
4720 }
4721
4722 static int binder_release(struct inode *nodp, struct file *filp)
4723 {
4724         struct binder_proc *proc = filp->private_data;
4725
4726         debugfs_remove(proc->debugfs_entry);
4727         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4728
4729         return 0;
4730 }
4731
4732 static int binder_node_release(struct binder_node *node, int refs)
4733 {
4734         struct binder_ref *ref;
4735         int death = 0;
4736         struct binder_proc *proc = node->proc;
4737
4738         binder_release_work(proc, &node->async_todo);
4739
4740         binder_node_lock(node);
4741         binder_inner_proc_lock(proc);
4742         binder_dequeue_work_ilocked(&node->work);
4743         /*
4744          * The caller must have taken a temporary ref on the node,
4745          */
4746         BUG_ON(!node->tmp_refs);
4747         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4748                 binder_inner_proc_unlock(proc);
4749                 binder_node_unlock(node);
4750                 binder_free_node(node);
4751
4752                 return refs;
4753         }
4754
4755         node->proc = NULL;
4756         node->local_strong_refs = 0;
4757         node->local_weak_refs = 0;
4758         binder_inner_proc_unlock(proc);
4759
4760         spin_lock(&binder_dead_nodes_lock);
4761         hlist_add_head(&node->dead_node, &binder_dead_nodes);
4762         spin_unlock(&binder_dead_nodes_lock);
4763
4764         hlist_for_each_entry(ref, &node->refs, node_entry) {
4765                 refs++;
4766                 /*
4767                  * Need the node lock to synchronize
4768                  * with new notification requests and the
4769                  * inner lock to synchronize with queued
4770                  * death notifications.
4771                  */
4772                 binder_inner_proc_lock(ref->proc);
4773                 if (!ref->death) {
4774                         binder_inner_proc_unlock(ref->proc);
4775                         continue;
4776                 }
4777
4778                 death++;
4779
4780                 BUG_ON(!list_empty(&ref->death->work.entry));
4781                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4782                 binder_enqueue_work_ilocked(&ref->death->work,
4783                                             &ref->proc->todo);
4784                 binder_wakeup_proc_ilocked(ref->proc);
4785                 binder_inner_proc_unlock(ref->proc);
4786         }
4787
4788         binder_debug(BINDER_DEBUG_DEAD_BINDER,
4789                      "node %d now dead, refs %d, death %d\n",
4790                      node->debug_id, refs, death);
4791         binder_node_unlock(node);
4792         binder_put_node(node);
4793
4794         return refs;
4795 }
4796
4797 static void binder_deferred_release(struct binder_proc *proc)
4798 {
4799         struct binder_context *context = proc->context;
4800         struct rb_node *n;
4801         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4802
4803         BUG_ON(proc->files);
4804
4805         mutex_lock(&binder_procs_lock);
4806         hlist_del(&proc->proc_node);
4807         mutex_unlock(&binder_procs_lock);
4808
4809         mutex_lock(&context->context_mgr_node_lock);
4810         if (context->binder_context_mgr_node &&
4811             context->binder_context_mgr_node->proc == proc) {
4812                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4813                              "%s: %d context_mgr_node gone\n",
4814                              __func__, proc->pid);
4815                 context->binder_context_mgr_node = NULL;
4816         }
4817         mutex_unlock(&context->context_mgr_node_lock);
4818         binder_inner_proc_lock(proc);
4819         /*
4820          * Make sure proc stays alive after we
4821          * remove all the threads
4822          */
4823         proc->tmp_ref++;
4824
4825         proc->is_dead = true;
4826         threads = 0;
4827         active_transactions = 0;
4828         while ((n = rb_first(&proc->threads))) {
4829                 struct binder_thread *thread;
4830
4831                 thread = rb_entry(n, struct binder_thread, rb_node);
4832                 binder_inner_proc_unlock(proc);
4833                 threads++;
4834                 active_transactions += binder_thread_release(proc, thread);
4835                 binder_inner_proc_lock(proc);
4836         }
4837
4838         nodes = 0;
4839         incoming_refs = 0;
4840         while ((n = rb_first(&proc->nodes))) {
4841                 struct binder_node *node;
4842
4843                 node = rb_entry(n, struct binder_node, rb_node);
4844                 nodes++;
4845                 /*
4846                  * take a temporary ref on the node before
4847                  * calling binder_node_release() which will either
4848                  * kfree() the node or call binder_put_node()
4849                  */
4850                 binder_inc_node_tmpref_ilocked(node);
4851                 rb_erase(&node->rb_node, &proc->nodes);
4852                 binder_inner_proc_unlock(proc);
4853                 incoming_refs = binder_node_release(node, incoming_refs);
4854                 binder_inner_proc_lock(proc);
4855         }
4856         binder_inner_proc_unlock(proc);
4857
4858         outgoing_refs = 0;
4859         binder_proc_lock(proc);
4860         while ((n = rb_first(&proc->refs_by_desc))) {
4861                 struct binder_ref *ref;
4862
4863                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4864                 outgoing_refs++;
4865                 binder_cleanup_ref_olocked(ref);
4866                 binder_proc_unlock(proc);
4867                 binder_free_ref(ref);
4868                 binder_proc_lock(proc);
4869         }
4870         binder_proc_unlock(proc);
4871
4872         binder_release_work(proc, &proc->todo);
4873         binder_release_work(proc, &proc->delivered_death);
4874
4875         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4876                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4877                      __func__, proc->pid, threads, nodes, incoming_refs,
4878                      outgoing_refs, active_transactions);
4879
4880         binder_proc_dec_tmpref(proc);
4881 }
4882
4883 static void binder_deferred_func(struct work_struct *work)
4884 {
4885         struct binder_proc *proc;
4886         struct files_struct *files;
4887
4888         int defer;
4889
4890         do {
4891                 mutex_lock(&binder_deferred_lock);
4892                 if (!hlist_empty(&binder_deferred_list)) {
4893                         proc = hlist_entry(binder_deferred_list.first,
4894                                         struct binder_proc, deferred_work_node);
4895                         hlist_del_init(&proc->deferred_work_node);
4896                         defer = proc->deferred_work;
4897                         proc->deferred_work = 0;
4898                 } else {
4899                         proc = NULL;
4900                         defer = 0;
4901                 }
4902                 mutex_unlock(&binder_deferred_lock);
4903
4904                 files = NULL;
4905                 if (defer & BINDER_DEFERRED_PUT_FILES) {
4906                         files = proc->files;
4907                         if (files)
4908                                 proc->files = NULL;
4909                 }
4910
4911                 if (defer & BINDER_DEFERRED_FLUSH)
4912                         binder_deferred_flush(proc);
4913
4914                 if (defer & BINDER_DEFERRED_RELEASE)
4915                         binder_deferred_release(proc); /* frees proc */
4916
4917                 if (files)
4918                         put_files_struct(files);
4919         } while (proc);
4920 }
4921 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4922
4923 static void
4924 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4925 {
4926         mutex_lock(&binder_deferred_lock);
4927         proc->deferred_work |= defer;
4928         if (hlist_unhashed(&proc->deferred_work_node)) {
4929                 hlist_add_head(&proc->deferred_work_node,
4930                                 &binder_deferred_list);
4931                 schedule_work(&binder_deferred_work);
4932         }
4933         mutex_unlock(&binder_deferred_lock);
4934 }
4935
4936 static void print_binder_transaction_ilocked(struct seq_file *m,
4937                                              struct binder_proc *proc,
4938                                              const char *prefix,
4939                                              struct binder_transaction *t)
4940 {
4941         struct binder_proc *to_proc;
4942         struct binder_buffer *buffer = t->buffer;
4943
4944         spin_lock(&t->lock);
4945         to_proc = t->to_proc;
4946         seq_printf(m,
4947                    "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4948                    prefix, t->debug_id, t,
4949                    t->from ? t->from->proc->pid : 0,
4950                    t->from ? t->from->pid : 0,
4951                    to_proc ? to_proc->pid : 0,
4952                    t->to_thread ? t->to_thread->pid : 0,
4953                    t->code, t->flags, t->priority, t->need_reply);
4954         spin_unlock(&t->lock);
4955
4956         if (proc != to_proc) {
4957                 /*
4958                  * Can only safely deref buffer if we are holding the
4959                  * correct proc inner lock for this node
4960                  */
4961                 seq_puts(m, "\n");
4962                 return;
4963         }
4964
4965         if (buffer == NULL) {
4966                 seq_puts(m, " buffer free\n");
4967                 return;
4968         }
4969         if (buffer->target_node)
4970                 seq_printf(m, " node %d", buffer->target_node->debug_id);
4971         seq_printf(m, " size %zd:%zd data %p\n",
4972                    buffer->data_size, buffer->offsets_size,
4973                    buffer->data);
4974 }
4975
4976 static void print_binder_work_ilocked(struct seq_file *m,
4977                                      struct binder_proc *proc,
4978                                      const char *prefix,
4979                                      const char *transaction_prefix,
4980                                      struct binder_work *w)
4981 {
4982         struct binder_node *node;
4983         struct binder_transaction *t;
4984
4985         switch (w->type) {
4986         case BINDER_WORK_TRANSACTION:
4987                 t = container_of(w, struct binder_transaction, work);
4988                 print_binder_transaction_ilocked(
4989                                 m, proc, transaction_prefix, t);
4990                 break;
4991         case BINDER_WORK_RETURN_ERROR: {
4992                 struct binder_error *e = container_of(
4993                                 w, struct binder_error, work);
4994
4995                 seq_printf(m, "%stransaction error: %u\n",
4996                            prefix, e->cmd);
4997         } break;
4998         case BINDER_WORK_TRANSACTION_COMPLETE:
4999                 seq_printf(m, "%stransaction complete\n", prefix);
5000                 break;
5001         case BINDER_WORK_NODE:
5002                 node = container_of(w, struct binder_node, work);
5003                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5004                            prefix, node->debug_id,
5005                            (u64)node->ptr, (u64)node->cookie);
5006                 break;
5007         case BINDER_WORK_DEAD_BINDER:
5008                 seq_printf(m, "%shas dead binder\n", prefix);
5009                 break;
5010         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5011                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5012                 break;
5013         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5014                 seq_printf(m, "%shas cleared death notification\n", prefix);
5015                 break;
5016         default:
5017                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5018                 break;
5019         }
5020 }
5021
5022 static void print_binder_thread_ilocked(struct seq_file *m,
5023                                         struct binder_thread *thread,
5024                                         int print_always)
5025 {
5026         struct binder_transaction *t;
5027         struct binder_work *w;
5028         size_t start_pos = m->count;
5029         size_t header_pos;
5030
5031         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5032                         thread->pid, thread->looper,
5033                         thread->looper_need_return,
5034                         atomic_read(&thread->tmp_ref));
5035         header_pos = m->count;
5036         t = thread->transaction_stack;
5037         while (t) {
5038                 if (t->from == thread) {
5039                         print_binder_transaction_ilocked(m, thread->proc,
5040                                         "    outgoing transaction", t);
5041                         t = t->from_parent;
5042                 } else if (t->to_thread == thread) {
5043                         print_binder_transaction_ilocked(m, thread->proc,
5044                                                  "    incoming transaction", t);
5045                         t = t->to_parent;
5046                 } else {
5047                         print_binder_transaction_ilocked(m, thread->proc,
5048                                         "    bad transaction", t);
5049                         t = NULL;
5050                 }
5051         }
5052         list_for_each_entry(w, &thread->todo, entry) {
5053                 print_binder_work_ilocked(m, thread->proc, "    ",
5054                                           "    pending transaction", w);
5055         }
5056         if (!print_always && m->count == header_pos)
5057                 m->count = start_pos;
5058 }
5059
5060 static void print_binder_node_nilocked(struct seq_file *m,
5061                                        struct binder_node *node)
5062 {
5063         struct binder_ref *ref;
5064         struct binder_work *w;
5065         int count;
5066
5067         count = 0;
5068         hlist_for_each_entry(ref, &node->refs, node_entry)
5069                 count++;
5070
5071         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5072                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5073                    node->has_strong_ref, node->has_weak_ref,
5074                    node->local_strong_refs, node->local_weak_refs,
5075                    node->internal_strong_refs, count, node->tmp_refs);
5076         if (count) {
5077                 seq_puts(m, " proc");
5078                 hlist_for_each_entry(ref, &node->refs, node_entry)
5079                         seq_printf(m, " %d", ref->proc->pid);
5080         }
5081         seq_puts(m, "\n");
5082         if (node->proc) {
5083                 list_for_each_entry(w, &node->async_todo, entry)
5084                         print_binder_work_ilocked(m, node->proc, "    ",
5085                                           "    pending async transaction", w);
5086         }
5087 }
5088
5089 static void print_binder_ref_olocked(struct seq_file *m,
5090                                      struct binder_ref *ref)
5091 {
5092         binder_node_lock(ref->node);
5093         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5094                    ref->data.debug_id, ref->data.desc,
5095                    ref->node->proc ? "" : "dead ",
5096                    ref->node->debug_id, ref->data.strong,
5097                    ref->data.weak, ref->death);
5098         binder_node_unlock(ref->node);
5099 }
5100
5101 static void print_binder_proc(struct seq_file *m,
5102                               struct binder_proc *proc, int print_all)
5103 {
5104         struct binder_work *w;
5105         struct rb_node *n;
5106         size_t start_pos = m->count;
5107         size_t header_pos;
5108         struct binder_node *last_node = NULL;
5109
5110         seq_printf(m, "proc %d\n", proc->pid);
5111         seq_printf(m, "context %s\n", proc->context->name);
5112         header_pos = m->count;
5113
5114         binder_inner_proc_lock(proc);
5115         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5116                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5117                                                 rb_node), print_all);
5118
5119         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5120                 struct binder_node *node = rb_entry(n, struct binder_node,
5121                                                     rb_node);
5122                 /*
5123                  * take a temporary reference on the node so it
5124                  * survives and isn't removed from the tree
5125                  * while we print it.
5126                  */
5127                 binder_inc_node_tmpref_ilocked(node);
5128                 /* Need to drop inner lock to take node lock */
5129                 binder_inner_proc_unlock(proc);
5130                 if (last_node)
5131                         binder_put_node(last_node);
5132                 binder_node_inner_lock(node);
5133                 print_binder_node_nilocked(m, node);
5134                 binder_node_inner_unlock(node);
5135                 last_node = node;
5136                 binder_inner_proc_lock(proc);
5137         }
5138         binder_inner_proc_unlock(proc);
5139         if (last_node)
5140                 binder_put_node(last_node);
5141
5142         if (print_all) {
5143                 binder_proc_lock(proc);
5144                 for (n = rb_first(&proc->refs_by_desc);
5145                      n != NULL;
5146                      n = rb_next(n))
5147                         print_binder_ref_olocked(m, rb_entry(n,
5148                                                             struct binder_ref,
5149                                                             rb_node_desc));
5150                 binder_proc_unlock(proc);
5151         }
5152         binder_alloc_print_allocated(m, &proc->alloc);
5153         binder_inner_proc_lock(proc);
5154         list_for_each_entry(w, &proc->todo, entry)
5155                 print_binder_work_ilocked(m, proc, "  ",
5156                                           "  pending transaction", w);
5157         list_for_each_entry(w, &proc->delivered_death, entry) {
5158                 seq_puts(m, "  has delivered dead binder\n");
5159                 break;
5160         }
5161         binder_inner_proc_unlock(proc);
5162         if (!print_all && m->count == header_pos)
5163                 m->count = start_pos;
5164 }
5165
5166 static const char * const binder_return_strings[] = {
5167         "BR_ERROR",
5168         "BR_OK",
5169         "BR_TRANSACTION",
5170         "BR_REPLY",
5171         "BR_ACQUIRE_RESULT",
5172         "BR_DEAD_REPLY",
5173         "BR_TRANSACTION_COMPLETE",
5174         "BR_INCREFS",
5175         "BR_ACQUIRE",
5176         "BR_RELEASE",
5177         "BR_DECREFS",
5178         "BR_ATTEMPT_ACQUIRE",
5179         "BR_NOOP",
5180         "BR_SPAWN_LOOPER",
5181         "BR_FINISHED",
5182         "BR_DEAD_BINDER",
5183         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5184         "BR_FAILED_REPLY"
5185 };
5186
5187 static const char * const binder_command_strings[] = {
5188         "BC_TRANSACTION",
5189         "BC_REPLY",
5190         "BC_ACQUIRE_RESULT",
5191         "BC_FREE_BUFFER",
5192         "BC_INCREFS",
5193         "BC_ACQUIRE",
5194         "BC_RELEASE",
5195         "BC_DECREFS",
5196         "BC_INCREFS_DONE",
5197         "BC_ACQUIRE_DONE",
5198         "BC_ATTEMPT_ACQUIRE",
5199         "BC_REGISTER_LOOPER",
5200         "BC_ENTER_LOOPER",
5201         "BC_EXIT_LOOPER",
5202         "BC_REQUEST_DEATH_NOTIFICATION",
5203         "BC_CLEAR_DEATH_NOTIFICATION",
5204         "BC_DEAD_BINDER_DONE",
5205         "BC_TRANSACTION_SG",
5206         "BC_REPLY_SG",
5207 };
5208
5209 static const char * const binder_objstat_strings[] = {
5210         "proc",
5211         "thread",
5212         "node",
5213         "ref",
5214         "death",
5215         "transaction",
5216         "transaction_complete"
5217 };
5218
5219 static void print_binder_stats(struct seq_file *m, const char *prefix,
5220                                struct binder_stats *stats)
5221 {
5222         int i;
5223
5224         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5225                      ARRAY_SIZE(binder_command_strings));
5226         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5227                 int temp = atomic_read(&stats->bc[i]);
5228
5229                 if (temp)
5230                         seq_printf(m, "%s%s: %d\n", prefix,
5231                                    binder_command_strings[i], temp);
5232         }
5233
5234         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5235                      ARRAY_SIZE(binder_return_strings));
5236         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5237                 int temp = atomic_read(&stats->br[i]);
5238
5239                 if (temp)
5240                         seq_printf(m, "%s%s: %d\n", prefix,
5241                                    binder_return_strings[i], temp);
5242         }
5243
5244         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5245                      ARRAY_SIZE(binder_objstat_strings));
5246         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5247                      ARRAY_SIZE(stats->obj_deleted));
5248         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5249                 int created = atomic_read(&stats->obj_created[i]);
5250                 int deleted = atomic_read(&stats->obj_deleted[i]);
5251
5252                 if (created || deleted)
5253                         seq_printf(m, "%s%s: active %d total %d\n",
5254                                 prefix,
5255                                 binder_objstat_strings[i],
5256                                 created - deleted,
5257                                 created);
5258         }
5259 }
5260
5261 static void print_binder_proc_stats(struct seq_file *m,
5262                                     struct binder_proc *proc)
5263 {
5264         struct binder_work *w;
5265         struct binder_thread *thread;
5266         struct rb_node *n;
5267         int count, strong, weak, ready_threads;
5268         size_t free_async_space =
5269                 binder_alloc_get_free_async_space(&proc->alloc);
5270
5271         seq_printf(m, "proc %d\n", proc->pid);
5272         seq_printf(m, "context %s\n", proc->context->name);
5273         count = 0;
5274         ready_threads = 0;
5275         binder_inner_proc_lock(proc);
5276         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5277                 count++;
5278
5279         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5280                 ready_threads++;
5281
5282         seq_printf(m, "  threads: %d\n", count);
5283         seq_printf(m, "  requested threads: %d+%d/%d\n"
5284                         "  ready threads %d\n"
5285                         "  free async space %zd\n", proc->requested_threads,
5286                         proc->requested_threads_started, proc->max_threads,
5287                         ready_threads,
5288                         free_async_space);
5289         count = 0;
5290         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5291                 count++;
5292         binder_inner_proc_unlock(proc);
5293         seq_printf(m, "  nodes: %d\n", count);
5294         count = 0;
5295         strong = 0;
5296         weak = 0;
5297         binder_proc_lock(proc);
5298         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5299                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5300                                                   rb_node_desc);
5301                 count++;
5302                 strong += ref->data.strong;
5303                 weak += ref->data.weak;
5304         }
5305         binder_proc_unlock(proc);
5306         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5307
5308         count = binder_alloc_get_allocated_count(&proc->alloc);
5309         seq_printf(m, "  buffers: %d\n", count);
5310
5311         binder_alloc_print_pages(m, &proc->alloc);
5312
5313         count = 0;
5314         binder_inner_proc_lock(proc);
5315         list_for_each_entry(w, &proc->todo, entry) {
5316                 if (w->type == BINDER_WORK_TRANSACTION)
5317                         count++;
5318         }
5319         binder_inner_proc_unlock(proc);
5320         seq_printf(m, "  pending transactions: %d\n", count);
5321
5322         print_binder_stats(m, "  ", &proc->stats);
5323 }
5324
5325
5326 static int binder_state_show(struct seq_file *m, void *unused)
5327 {
5328         struct binder_proc *proc;
5329         struct binder_node *node;
5330         struct binder_node *last_node = NULL;
5331
5332         seq_puts(m, "binder state:\n");
5333
5334         spin_lock(&binder_dead_nodes_lock);
5335         if (!hlist_empty(&binder_dead_nodes))
5336                 seq_puts(m, "dead nodes:\n");
5337         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5338                 /*
5339                  * take a temporary reference on the node so it
5340                  * survives and isn't removed from the list
5341                  * while we print it.
5342                  */
5343                 node->tmp_refs++;
5344                 spin_unlock(&binder_dead_nodes_lock);
5345                 if (last_node)
5346                         binder_put_node(last_node);
5347                 binder_node_lock(node);
5348                 print_binder_node_nilocked(m, node);
5349                 binder_node_unlock(node);
5350                 last_node = node;
5351                 spin_lock(&binder_dead_nodes_lock);
5352         }
5353         spin_unlock(&binder_dead_nodes_lock);
5354         if (last_node)
5355                 binder_put_node(last_node);
5356
5357         mutex_lock(&binder_procs_lock);
5358         hlist_for_each_entry(proc, &binder_procs, proc_node)
5359                 print_binder_proc(m, proc, 1);
5360         mutex_unlock(&binder_procs_lock);
5361
5362         return 0;
5363 }
5364
5365 static int binder_stats_show(struct seq_file *m, void *unused)
5366 {
5367         struct binder_proc *proc;
5368
5369         seq_puts(m, "binder stats:\n");
5370
5371         print_binder_stats(m, "", &binder_stats);
5372
5373         mutex_lock(&binder_procs_lock);
5374         hlist_for_each_entry(proc, &binder_procs, proc_node)
5375                 print_binder_proc_stats(m, proc);
5376         mutex_unlock(&binder_procs_lock);
5377
5378         return 0;
5379 }
5380
5381 static int binder_transactions_show(struct seq_file *m, void *unused)
5382 {
5383         struct binder_proc *proc;
5384
5385         seq_puts(m, "binder transactions:\n");
5386         mutex_lock(&binder_procs_lock);
5387         hlist_for_each_entry(proc, &binder_procs, proc_node)
5388                 print_binder_proc(m, proc, 0);
5389         mutex_unlock(&binder_procs_lock);
5390
5391         return 0;
5392 }
5393
5394 static int binder_proc_show(struct seq_file *m, void *unused)
5395 {
5396         struct binder_proc *itr;
5397         int pid = (unsigned long)m->private;
5398
5399         mutex_lock(&binder_procs_lock);
5400         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5401                 if (itr->pid == pid) {
5402                         seq_puts(m, "binder proc state:\n");
5403                         print_binder_proc(m, itr, 1);
5404                 }
5405         }
5406         mutex_unlock(&binder_procs_lock);
5407
5408         return 0;
5409 }
5410
5411 static void print_binder_transaction_log_entry(struct seq_file *m,
5412                                         struct binder_transaction_log_entry *e)
5413 {
5414         int debug_id = READ_ONCE(e->debug_id_done);
5415         /*
5416          * read barrier to guarantee debug_id_done read before
5417          * we print the log values
5418          */
5419         smp_rmb();
5420         seq_printf(m,
5421                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5422                    e->debug_id, (e->call_type == 2) ? "reply" :
5423                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5424                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5425                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5426                    e->return_error, e->return_error_param,
5427                    e->return_error_line);
5428         /*
5429          * read-barrier to guarantee read of debug_id_done after
5430          * done printing the fields of the entry
5431          */
5432         smp_rmb();
5433         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5434                         "\n" : " (incomplete)\n");
5435 }
5436
5437 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5438 {
5439         struct binder_transaction_log *log = m->private;
5440         unsigned int log_cur = atomic_read(&log->cur);
5441         unsigned int count;
5442         unsigned int cur;
5443         int i;
5444
5445         count = log_cur + 1;
5446         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5447                 0 : count % ARRAY_SIZE(log->entry);
5448         if (count > ARRAY_SIZE(log->entry) || log->full)
5449                 count = ARRAY_SIZE(log->entry);
5450         for (i = 0; i < count; i++) {
5451                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5452
5453                 print_binder_transaction_log_entry(m, &log->entry[index]);
5454         }
5455         return 0;
5456 }
5457
5458 static const struct file_operations binder_fops = {
5459         .owner = THIS_MODULE,
5460         .poll = binder_poll,
5461         .unlocked_ioctl = binder_ioctl,
5462         .compat_ioctl = binder_ioctl,
5463         .mmap = binder_mmap,
5464         .open = binder_open,
5465         .flush = binder_flush,
5466         .release = binder_release,
5467 };
5468
5469 BINDER_DEBUG_ENTRY(state);
5470 BINDER_DEBUG_ENTRY(stats);
5471 BINDER_DEBUG_ENTRY(transactions);
5472 BINDER_DEBUG_ENTRY(transaction_log);
5473
5474 static int __init init_binder_device(const char *name)
5475 {
5476         int ret;
5477         struct binder_device *binder_device;
5478
5479         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5480         if (!binder_device)
5481                 return -ENOMEM;
5482
5483         binder_device->miscdev.fops = &binder_fops;
5484         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5485         binder_device->miscdev.name = name;
5486
5487         binder_device->context.binder_context_mgr_uid = INVALID_UID;
5488         binder_device->context.name = name;
5489         mutex_init(&binder_device->context.context_mgr_node_lock);
5490
5491         ret = misc_register(&binder_device->miscdev);
5492         if (ret < 0) {
5493                 kfree(binder_device);
5494                 return ret;
5495         }
5496
5497         hlist_add_head(&binder_device->hlist, &binder_devices);
5498
5499         return ret;
5500 }
5501
5502 static int __init binder_init(void)
5503 {
5504         int ret;
5505         char *device_name, *device_names, *device_tmp;
5506         struct binder_device *device;
5507         struct hlist_node *tmp;
5508
5509         binder_alloc_shrinker_init();
5510
5511         atomic_set(&binder_transaction_log.cur, ~0U);
5512         atomic_set(&binder_transaction_log_failed.cur, ~0U);
5513
5514         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5515         if (binder_debugfs_dir_entry_root)
5516                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5517                                                  binder_debugfs_dir_entry_root);
5518
5519         if (binder_debugfs_dir_entry_root) {
5520                 debugfs_create_file("state",
5521                                     S_IRUGO,
5522                                     binder_debugfs_dir_entry_root,
5523                                     NULL,
5524                                     &binder_state_fops);
5525                 debugfs_create_file("stats",
5526                                     S_IRUGO,
5527                                     binder_debugfs_dir_entry_root,
5528                                     NULL,
5529                                     &binder_stats_fops);
5530                 debugfs_create_file("transactions",
5531                                     S_IRUGO,
5532                                     binder_debugfs_dir_entry_root,
5533                                     NULL,
5534                                     &binder_transactions_fops);
5535                 debugfs_create_file("transaction_log",
5536                                     S_IRUGO,
5537                                     binder_debugfs_dir_entry_root,
5538                                     &binder_transaction_log,
5539                                     &binder_transaction_log_fops);
5540                 debugfs_create_file("failed_transaction_log",
5541                                     S_IRUGO,
5542                                     binder_debugfs_dir_entry_root,
5543                                     &binder_transaction_log_failed,
5544                                     &binder_transaction_log_fops);
5545         }
5546
5547         /*
5548          * Copy the module_parameter string, because we don't want to
5549          * tokenize it in-place.
5550          */
5551         device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5552         if (!device_names) {
5553                 ret = -ENOMEM;
5554                 goto err_alloc_device_names_failed;
5555         }
5556         strcpy(device_names, binder_devices_param);
5557
5558         device_tmp = device_names;
5559         while ((device_name = strsep(&device_tmp, ","))) {
5560                 ret = init_binder_device(device_name);
5561                 if (ret)
5562                         goto err_init_binder_device_failed;
5563         }
5564
5565         return ret;
5566
5567 err_init_binder_device_failed:
5568         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5569                 misc_deregister(&device->miscdev);
5570                 hlist_del(&device->hlist);
5571                 kfree(device);
5572         }
5573
5574         kfree(device_names);
5575
5576 err_alloc_device_names_failed:
5577         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5578
5579         return ret;
5580 }
5581
5582 device_initcall(binder_init);
5583
5584 #define CREATE_TRACE_POINTS
5585 #include "binder_trace.h"
5586
5587 MODULE_LICENSE("GPL v2");