Merge remote-tracking branch 'asoc/topic/rockchip' into asoc-next
[sfrench/cifs-2.6.git] / drivers / android / binder.c
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 /*
19  * Locking overview
20  *
21  * There are 3 main spinlocks which must be acquired in the
22  * order shown:
23  *
24  * 1) proc->outer_lock : protects binder_ref
25  *    binder_proc_lock() and binder_proc_unlock() are
26  *    used to acq/rel.
27  * 2) node->lock : protects most fields of binder_node.
28  *    binder_node_lock() and binder_node_unlock() are
29  *    used to acq/rel
30  * 3) proc->inner_lock : protects the thread and node lists
31  *    (proc->threads, proc->waiting_threads, proc->nodes)
32  *    and all todo lists associated with the binder_proc
33  *    (proc->todo, thread->todo, proc->delivered_death and
34  *    node->async_todo), as well as thread->transaction_stack
35  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
36  *    are used to acq/rel
37  *
38  * Any lock under procA must never be nested under any lock at the same
39  * level or below on procB.
40  *
41  * Functions that require a lock held on entry indicate which lock
42  * in the suffix of the function name:
43  *
44  * foo_olocked() : requires node->outer_lock
45  * foo_nlocked() : requires node->lock
46  * foo_ilocked() : requires proc->inner_lock
47  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48  * foo_nilocked(): requires node->lock and proc->inner_lock
49  * ...
50  */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
74
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
77 #endif
78
79 #include <uapi/linux/android/binder.h>
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
82
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96
97 #define BINDER_DEBUG_ENTRY(name) \
98 static int binder_##name##_open(struct inode *inode, struct file *file) \
99 { \
100         return single_open(file, binder_##name##_show, inode->i_private); \
101 } \
102 \
103 static const struct file_operations binder_##name##_fops = { \
104         .owner = THIS_MODULE, \
105         .open = binder_##name##_open, \
106         .read = seq_read, \
107         .llseek = seq_lseek, \
108         .release = single_release, \
109 }
110
111 static int binder_proc_show(struct seq_file *m, void *unused);
112 BINDER_DEBUG_ENTRY(proc);
113
114 /* This is only defined in include/asm-arm/sizes.h */
115 #ifndef SZ_1K
116 #define SZ_1K                               0x400
117 #endif
118
119 #ifndef SZ_4M
120 #define SZ_4M                               0x400000
121 #endif
122
123 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
124
125 enum {
126         BINDER_DEBUG_USER_ERROR             = 1U << 0,
127         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
128         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
129         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
130         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
131         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
132         BINDER_DEBUG_READ_WRITE             = 1U << 6,
133         BINDER_DEBUG_USER_REFS              = 1U << 7,
134         BINDER_DEBUG_THREADS                = 1U << 8,
135         BINDER_DEBUG_TRANSACTION            = 1U << 9,
136         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
137         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
138         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
139         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
140         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
141 };
142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
145
146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147 module_param_named(devices, binder_devices_param, charp, 0444);
148
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150 static int binder_stop_on_user_error;
151
152 static int binder_set_stop_on_user_error(const char *val,
153                                          const struct kernel_param *kp)
154 {
155         int ret;
156
157         ret = param_set_int(val, kp);
158         if (binder_stop_on_user_error < 2)
159                 wake_up(&binder_user_error_wait);
160         return ret;
161 }
162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163         param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
164
165 #define binder_debug(mask, x...) \
166         do { \
167                 if (binder_debug_mask & mask) \
168                         pr_info(x); \
169         } while (0)
170
171 #define binder_user_error(x...) \
172         do { \
173                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
174                         pr_info(x); \
175                 if (binder_stop_on_user_error) \
176                         binder_stop_on_user_error = 2; \
177         } while (0)
178
179 #define to_flat_binder_object(hdr) \
180         container_of(hdr, struct flat_binder_object, hdr)
181
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
184 #define to_binder_buffer_object(hdr) \
185         container_of(hdr, struct binder_buffer_object, hdr)
186
187 #define to_binder_fd_array_object(hdr) \
188         container_of(hdr, struct binder_fd_array_object, hdr)
189
190 enum binder_stat_types {
191         BINDER_STAT_PROC,
192         BINDER_STAT_THREAD,
193         BINDER_STAT_NODE,
194         BINDER_STAT_REF,
195         BINDER_STAT_DEATH,
196         BINDER_STAT_TRANSACTION,
197         BINDER_STAT_TRANSACTION_COMPLETE,
198         BINDER_STAT_COUNT
199 };
200
201 struct binder_stats {
202         atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203         atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204         atomic_t obj_created[BINDER_STAT_COUNT];
205         atomic_t obj_deleted[BINDER_STAT_COUNT];
206 };
207
208 static struct binder_stats binder_stats;
209
210 static inline void binder_stats_deleted(enum binder_stat_types type)
211 {
212         atomic_inc(&binder_stats.obj_deleted[type]);
213 }
214
215 static inline void binder_stats_created(enum binder_stat_types type)
216 {
217         atomic_inc(&binder_stats.obj_created[type]);
218 }
219
220 struct binder_transaction_log_entry {
221         int debug_id;
222         int debug_id_done;
223         int call_type;
224         int from_proc;
225         int from_thread;
226         int target_handle;
227         int to_proc;
228         int to_thread;
229         int to_node;
230         int data_size;
231         int offsets_size;
232         int return_error_line;
233         uint32_t return_error;
234         uint32_t return_error_param;
235         const char *context_name;
236 };
237 struct binder_transaction_log {
238         atomic_t cur;
239         bool full;
240         struct binder_transaction_log_entry entry[32];
241 };
242 static struct binder_transaction_log binder_transaction_log;
243 static struct binder_transaction_log binder_transaction_log_failed;
244
245 static struct binder_transaction_log_entry *binder_transaction_log_add(
246         struct binder_transaction_log *log)
247 {
248         struct binder_transaction_log_entry *e;
249         unsigned int cur = atomic_inc_return(&log->cur);
250
251         if (cur >= ARRAY_SIZE(log->entry))
252                 log->full = 1;
253         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254         WRITE_ONCE(e->debug_id_done, 0);
255         /*
256          * write-barrier to synchronize access to e->debug_id_done.
257          * We make sure the initialized 0 value is seen before
258          * memset() other fields are zeroed by memset.
259          */
260         smp_wmb();
261         memset(e, 0, sizeof(*e));
262         return e;
263 }
264
265 struct binder_context {
266         struct binder_node *binder_context_mgr_node;
267         struct mutex context_mgr_node_lock;
268
269         kuid_t binder_context_mgr_uid;
270         const char *name;
271 };
272
273 struct binder_device {
274         struct hlist_node hlist;
275         struct miscdevice miscdev;
276         struct binder_context context;
277 };
278
279 /**
280  * struct binder_work - work enqueued on a worklist
281  * @entry:             node enqueued on list
282  * @type:              type of work to be performed
283  *
284  * There are separate work lists for proc, thread, and node (async).
285  */
286 struct binder_work {
287         struct list_head entry;
288
289         enum {
290                 BINDER_WORK_TRANSACTION = 1,
291                 BINDER_WORK_TRANSACTION_COMPLETE,
292                 BINDER_WORK_RETURN_ERROR,
293                 BINDER_WORK_NODE,
294                 BINDER_WORK_DEAD_BINDER,
295                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297         } type;
298 };
299
300 struct binder_error {
301         struct binder_work work;
302         uint32_t cmd;
303 };
304
305 /**
306  * struct binder_node - binder node bookkeeping
307  * @debug_id:             unique ID for debugging
308  *                        (invariant after initialized)
309  * @lock:                 lock for node fields
310  * @work:                 worklist element for node work
311  *                        (protected by @proc->inner_lock)
312  * @rb_node:              element for proc->nodes tree
313  *                        (protected by @proc->inner_lock)
314  * @dead_node:            element for binder_dead_nodes list
315  *                        (protected by binder_dead_nodes_lock)
316  * @proc:                 binder_proc that owns this node
317  *                        (invariant after initialized)
318  * @refs:                 list of references on this node
319  *                        (protected by @lock)
320  * @internal_strong_refs: used to take strong references when
321  *                        initiating a transaction
322  *                        (protected by @proc->inner_lock if @proc
323  *                        and by @lock)
324  * @local_weak_refs:      weak user refs from local process
325  *                        (protected by @proc->inner_lock if @proc
326  *                        and by @lock)
327  * @local_strong_refs:    strong user refs from local process
328  *                        (protected by @proc->inner_lock if @proc
329  *                        and by @lock)
330  * @tmp_refs:             temporary kernel refs
331  *                        (protected by @proc->inner_lock while @proc
332  *                        is valid, and by binder_dead_nodes_lock
333  *                        if @proc is NULL. During inc/dec and node release
334  *                        it is also protected by @lock to provide safety
335  *                        as the node dies and @proc becomes NULL)
336  * @ptr:                  userspace pointer for node
337  *                        (invariant, no lock needed)
338  * @cookie:               userspace cookie for node
339  *                        (invariant, no lock needed)
340  * @has_strong_ref:       userspace notified of strong ref
341  *                        (protected by @proc->inner_lock if @proc
342  *                        and by @lock)
343  * @pending_strong_ref:   userspace has acked notification of strong ref
344  *                        (protected by @proc->inner_lock if @proc
345  *                        and by @lock)
346  * @has_weak_ref:         userspace notified of weak ref
347  *                        (protected by @proc->inner_lock if @proc
348  *                        and by @lock)
349  * @pending_weak_ref:     userspace has acked notification of weak ref
350  *                        (protected by @proc->inner_lock if @proc
351  *                        and by @lock)
352  * @has_async_transaction: async transaction to node in progress
353  *                        (protected by @lock)
354  * @accept_fds:           file descriptor operations supported for node
355  *                        (invariant after initialized)
356  * @min_priority:         minimum scheduling priority
357  *                        (invariant after initialized)
358  * @async_todo:           list of async work items
359  *                        (protected by @proc->inner_lock)
360  *
361  * Bookkeeping structure for binder nodes.
362  */
363 struct binder_node {
364         int debug_id;
365         spinlock_t lock;
366         struct binder_work work;
367         union {
368                 struct rb_node rb_node;
369                 struct hlist_node dead_node;
370         };
371         struct binder_proc *proc;
372         struct hlist_head refs;
373         int internal_strong_refs;
374         int local_weak_refs;
375         int local_strong_refs;
376         int tmp_refs;
377         binder_uintptr_t ptr;
378         binder_uintptr_t cookie;
379         struct {
380                 /*
381                  * bitfield elements protected by
382                  * proc inner_lock
383                  */
384                 u8 has_strong_ref:1;
385                 u8 pending_strong_ref:1;
386                 u8 has_weak_ref:1;
387                 u8 pending_weak_ref:1;
388         };
389         struct {
390                 /*
391                  * invariant after initialization
392                  */
393                 u8 accept_fds:1;
394                 u8 min_priority;
395         };
396         bool has_async_transaction;
397         struct list_head async_todo;
398 };
399
400 struct binder_ref_death {
401         /**
402          * @work: worklist element for death notifications
403          *        (protected by inner_lock of the proc that
404          *        this ref belongs to)
405          */
406         struct binder_work work;
407         binder_uintptr_t cookie;
408 };
409
410 /**
411  * struct binder_ref_data - binder_ref counts and id
412  * @debug_id:        unique ID for the ref
413  * @desc:            unique userspace handle for ref
414  * @strong:          strong ref count (debugging only if not locked)
415  * @weak:            weak ref count (debugging only if not locked)
416  *
417  * Structure to hold ref count and ref id information. Since
418  * the actual ref can only be accessed with a lock, this structure
419  * is used to return information about the ref to callers of
420  * ref inc/dec functions.
421  */
422 struct binder_ref_data {
423         int debug_id;
424         uint32_t desc;
425         int strong;
426         int weak;
427 };
428
429 /**
430  * struct binder_ref - struct to track references on nodes
431  * @data:        binder_ref_data containing id, handle, and current refcounts
432  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433  * @rb_node_node: node for lookup by @node in proc's rb_tree
434  * @node_entry:  list entry for node->refs list in target node
435  *               (protected by @node->lock)
436  * @proc:        binder_proc containing ref
437  * @node:        binder_node of target node. When cleaning up a
438  *               ref for deletion in binder_cleanup_ref, a non-NULL
439  *               @node indicates the node must be freed
440  * @death:       pointer to death notification (ref_death) if requested
441  *               (protected by @node->lock)
442  *
443  * Structure to track references from procA to target node (on procB). This
444  * structure is unsafe to access without holding @proc->outer_lock.
445  */
446 struct binder_ref {
447         /* Lookups needed: */
448         /*   node + proc => ref (transaction) */
449         /*   desc + proc => ref (transaction, inc/dec ref) */
450         /*   node => refs + procs (proc exit) */
451         struct binder_ref_data data;
452         struct rb_node rb_node_desc;
453         struct rb_node rb_node_node;
454         struct hlist_node node_entry;
455         struct binder_proc *proc;
456         struct binder_node *node;
457         struct binder_ref_death *death;
458 };
459
460 enum binder_deferred_state {
461         BINDER_DEFERRED_PUT_FILES    = 0x01,
462         BINDER_DEFERRED_FLUSH        = 0x02,
463         BINDER_DEFERRED_RELEASE      = 0x04,
464 };
465
466 /**
467  * struct binder_proc - binder process bookkeeping
468  * @proc_node:            element for binder_procs list
469  * @threads:              rbtree of binder_threads in this proc
470  *                        (protected by @inner_lock)
471  * @nodes:                rbtree of binder nodes associated with
472  *                        this proc ordered by node->ptr
473  *                        (protected by @inner_lock)
474  * @refs_by_desc:         rbtree of refs ordered by ref->desc
475  *                        (protected by @outer_lock)
476  * @refs_by_node:         rbtree of refs ordered by ref->node
477  *                        (protected by @outer_lock)
478  * @waiting_threads:      threads currently waiting for proc work
479  *                        (protected by @inner_lock)
480  * @pid                   PID of group_leader of process
481  *                        (invariant after initialized)
482  * @tsk                   task_struct for group_leader of process
483  *                        (invariant after initialized)
484  * @files                 files_struct for process
485  *                        (protected by @files_lock)
486  * @files_lock            mutex to protect @files
487  * @deferred_work_node:   element for binder_deferred_list
488  *                        (protected by binder_deferred_lock)
489  * @deferred_work:        bitmap of deferred work to perform
490  *                        (protected by binder_deferred_lock)
491  * @is_dead:              process is dead and awaiting free
492  *                        when outstanding transactions are cleaned up
493  *                        (protected by @inner_lock)
494  * @todo:                 list of work for this process
495  *                        (protected by @inner_lock)
496  * @wait:                 wait queue head to wait for proc work
497  *                        (invariant after initialized)
498  * @stats:                per-process binder statistics
499  *                        (atomics, no lock needed)
500  * @delivered_death:      list of delivered death notification
501  *                        (protected by @inner_lock)
502  * @max_threads:          cap on number of binder threads
503  *                        (protected by @inner_lock)
504  * @requested_threads:    number of binder threads requested but not
505  *                        yet started. In current implementation, can
506  *                        only be 0 or 1.
507  *                        (protected by @inner_lock)
508  * @requested_threads_started: number binder threads started
509  *                        (protected by @inner_lock)
510  * @tmp_ref:              temporary reference to indicate proc is in use
511  *                        (protected by @inner_lock)
512  * @default_priority:     default scheduler priority
513  *                        (invariant after initialized)
514  * @debugfs_entry:        debugfs node
515  * @alloc:                binder allocator bookkeeping
516  * @context:              binder_context for this proc
517  *                        (invariant after initialized)
518  * @inner_lock:           can nest under outer_lock and/or node lock
519  * @outer_lock:           no nesting under innor or node lock
520  *                        Lock order: 1) outer, 2) node, 3) inner
521  *
522  * Bookkeeping structure for binder processes
523  */
524 struct binder_proc {
525         struct hlist_node proc_node;
526         struct rb_root threads;
527         struct rb_root nodes;
528         struct rb_root refs_by_desc;
529         struct rb_root refs_by_node;
530         struct list_head waiting_threads;
531         int pid;
532         struct task_struct *tsk;
533         struct files_struct *files;
534         struct mutex files_lock;
535         struct hlist_node deferred_work_node;
536         int deferred_work;
537         bool is_dead;
538
539         struct list_head todo;
540         wait_queue_head_t wait;
541         struct binder_stats stats;
542         struct list_head delivered_death;
543         int max_threads;
544         int requested_threads;
545         int requested_threads_started;
546         int tmp_ref;
547         long default_priority;
548         struct dentry *debugfs_entry;
549         struct binder_alloc alloc;
550         struct binder_context *context;
551         spinlock_t inner_lock;
552         spinlock_t outer_lock;
553 };
554
555 enum {
556         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
557         BINDER_LOOPER_STATE_ENTERED     = 0x02,
558         BINDER_LOOPER_STATE_EXITED      = 0x04,
559         BINDER_LOOPER_STATE_INVALID     = 0x08,
560         BINDER_LOOPER_STATE_WAITING     = 0x10,
561         BINDER_LOOPER_STATE_POLL        = 0x20,
562 };
563
564 /**
565  * struct binder_thread - binder thread bookkeeping
566  * @proc:                 binder process for this thread
567  *                        (invariant after initialization)
568  * @rb_node:              element for proc->threads rbtree
569  *                        (protected by @proc->inner_lock)
570  * @waiting_thread_node:  element for @proc->waiting_threads list
571  *                        (protected by @proc->inner_lock)
572  * @pid:                  PID for this thread
573  *                        (invariant after initialization)
574  * @looper:               bitmap of looping state
575  *                        (only accessed by this thread)
576  * @looper_needs_return:  looping thread needs to exit driver
577  *                        (no lock needed)
578  * @transaction_stack:    stack of in-progress transactions for this thread
579  *                        (protected by @proc->inner_lock)
580  * @todo:                 list of work to do for this thread
581  *                        (protected by @proc->inner_lock)
582  * @return_error:         transaction errors reported by this thread
583  *                        (only accessed by this thread)
584  * @reply_error:          transaction errors reported by target thread
585  *                        (protected by @proc->inner_lock)
586  * @wait:                 wait queue for thread work
587  * @stats:                per-thread statistics
588  *                        (atomics, no lock needed)
589  * @tmp_ref:              temporary reference to indicate thread is in use
590  *                        (atomic since @proc->inner_lock cannot
591  *                        always be acquired)
592  * @is_dead:              thread is dead and awaiting free
593  *                        when outstanding transactions are cleaned up
594  *                        (protected by @proc->inner_lock)
595  *
596  * Bookkeeping structure for binder threads.
597  */
598 struct binder_thread {
599         struct binder_proc *proc;
600         struct rb_node rb_node;
601         struct list_head waiting_thread_node;
602         int pid;
603         int looper;              /* only modified by this thread */
604         bool looper_need_return; /* can be written by other thread */
605         struct binder_transaction *transaction_stack;
606         struct list_head todo;
607         struct binder_error return_error;
608         struct binder_error reply_error;
609         wait_queue_head_t wait;
610         struct binder_stats stats;
611         atomic_t tmp_ref;
612         bool is_dead;
613 };
614
615 struct binder_transaction {
616         int debug_id;
617         struct binder_work work;
618         struct binder_thread *from;
619         struct binder_transaction *from_parent;
620         struct binder_proc *to_proc;
621         struct binder_thread *to_thread;
622         struct binder_transaction *to_parent;
623         unsigned need_reply:1;
624         /* unsigned is_dead:1; */       /* not used at the moment */
625
626         struct binder_buffer *buffer;
627         unsigned int    code;
628         unsigned int    flags;
629         long    priority;
630         long    saved_priority;
631         kuid_t  sender_euid;
632         /**
633          * @lock:  protects @from, @to_proc, and @to_thread
634          *
635          * @from, @to_proc, and @to_thread can be set to NULL
636          * during thread teardown
637          */
638         spinlock_t lock;
639 };
640
641 /**
642  * binder_proc_lock() - Acquire outer lock for given binder_proc
643  * @proc:         struct binder_proc to acquire
644  *
645  * Acquires proc->outer_lock. Used to protect binder_ref
646  * structures associated with the given proc.
647  */
648 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
649 static void
650 _binder_proc_lock(struct binder_proc *proc, int line)
651 {
652         binder_debug(BINDER_DEBUG_SPINLOCKS,
653                      "%s: line=%d\n", __func__, line);
654         spin_lock(&proc->outer_lock);
655 }
656
657 /**
658  * binder_proc_unlock() - Release spinlock for given binder_proc
659  * @proc:         struct binder_proc to acquire
660  *
661  * Release lock acquired via binder_proc_lock()
662  */
663 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
664 static void
665 _binder_proc_unlock(struct binder_proc *proc, int line)
666 {
667         binder_debug(BINDER_DEBUG_SPINLOCKS,
668                      "%s: line=%d\n", __func__, line);
669         spin_unlock(&proc->outer_lock);
670 }
671
672 /**
673  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
674  * @proc:         struct binder_proc to acquire
675  *
676  * Acquires proc->inner_lock. Used to protect todo lists
677  */
678 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
679 static void
680 _binder_inner_proc_lock(struct binder_proc *proc, int line)
681 {
682         binder_debug(BINDER_DEBUG_SPINLOCKS,
683                      "%s: line=%d\n", __func__, line);
684         spin_lock(&proc->inner_lock);
685 }
686
687 /**
688  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
689  * @proc:         struct binder_proc to acquire
690  *
691  * Release lock acquired via binder_inner_proc_lock()
692  */
693 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
694 static void
695 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
696 {
697         binder_debug(BINDER_DEBUG_SPINLOCKS,
698                      "%s: line=%d\n", __func__, line);
699         spin_unlock(&proc->inner_lock);
700 }
701
702 /**
703  * binder_node_lock() - Acquire spinlock for given binder_node
704  * @node:         struct binder_node to acquire
705  *
706  * Acquires node->lock. Used to protect binder_node fields
707  */
708 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
709 static void
710 _binder_node_lock(struct binder_node *node, int line)
711 {
712         binder_debug(BINDER_DEBUG_SPINLOCKS,
713                      "%s: line=%d\n", __func__, line);
714         spin_lock(&node->lock);
715 }
716
717 /**
718  * binder_node_unlock() - Release spinlock for given binder_proc
719  * @node:         struct binder_node to acquire
720  *
721  * Release lock acquired via binder_node_lock()
722  */
723 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
724 static void
725 _binder_node_unlock(struct binder_node *node, int line)
726 {
727         binder_debug(BINDER_DEBUG_SPINLOCKS,
728                      "%s: line=%d\n", __func__, line);
729         spin_unlock(&node->lock);
730 }
731
732 /**
733  * binder_node_inner_lock() - Acquire node and inner locks
734  * @node:         struct binder_node to acquire
735  *
736  * Acquires node->lock. If node->proc also acquires
737  * proc->inner_lock. Used to protect binder_node fields
738  */
739 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
740 static void
741 _binder_node_inner_lock(struct binder_node *node, int line)
742 {
743         binder_debug(BINDER_DEBUG_SPINLOCKS,
744                      "%s: line=%d\n", __func__, line);
745         spin_lock(&node->lock);
746         if (node->proc)
747                 binder_inner_proc_lock(node->proc);
748 }
749
750 /**
751  * binder_node_unlock() - Release node and inner locks
752  * @node:         struct binder_node to acquire
753  *
754  * Release lock acquired via binder_node_lock()
755  */
756 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
757 static void
758 _binder_node_inner_unlock(struct binder_node *node, int line)
759 {
760         struct binder_proc *proc = node->proc;
761
762         binder_debug(BINDER_DEBUG_SPINLOCKS,
763                      "%s: line=%d\n", __func__, line);
764         if (proc)
765                 binder_inner_proc_unlock(proc);
766         spin_unlock(&node->lock);
767 }
768
769 static bool binder_worklist_empty_ilocked(struct list_head *list)
770 {
771         return list_empty(list);
772 }
773
774 /**
775  * binder_worklist_empty() - Check if no items on the work list
776  * @proc:       binder_proc associated with list
777  * @list:       list to check
778  *
779  * Return: true if there are no items on list, else false
780  */
781 static bool binder_worklist_empty(struct binder_proc *proc,
782                                   struct list_head *list)
783 {
784         bool ret;
785
786         binder_inner_proc_lock(proc);
787         ret = binder_worklist_empty_ilocked(list);
788         binder_inner_proc_unlock(proc);
789         return ret;
790 }
791
792 static void
793 binder_enqueue_work_ilocked(struct binder_work *work,
794                            struct list_head *target_list)
795 {
796         BUG_ON(target_list == NULL);
797         BUG_ON(work->entry.next && !list_empty(&work->entry));
798         list_add_tail(&work->entry, target_list);
799 }
800
801 /**
802  * binder_enqueue_work() - Add an item to the work list
803  * @proc:         binder_proc associated with list
804  * @work:         struct binder_work to add to list
805  * @target_list:  list to add work to
806  *
807  * Adds the work to the specified list. Asserts that work
808  * is not already on a list.
809  */
810 static void
811 binder_enqueue_work(struct binder_proc *proc,
812                     struct binder_work *work,
813                     struct list_head *target_list)
814 {
815         binder_inner_proc_lock(proc);
816         binder_enqueue_work_ilocked(work, target_list);
817         binder_inner_proc_unlock(proc);
818 }
819
820 static void
821 binder_dequeue_work_ilocked(struct binder_work *work)
822 {
823         list_del_init(&work->entry);
824 }
825
826 /**
827  * binder_dequeue_work() - Removes an item from the work list
828  * @proc:         binder_proc associated with list
829  * @work:         struct binder_work to remove from list
830  *
831  * Removes the specified work item from whatever list it is on.
832  * Can safely be called if work is not on any list.
833  */
834 static void
835 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
836 {
837         binder_inner_proc_lock(proc);
838         binder_dequeue_work_ilocked(work);
839         binder_inner_proc_unlock(proc);
840 }
841
842 static struct binder_work *binder_dequeue_work_head_ilocked(
843                                         struct list_head *list)
844 {
845         struct binder_work *w;
846
847         w = list_first_entry_or_null(list, struct binder_work, entry);
848         if (w)
849                 list_del_init(&w->entry);
850         return w;
851 }
852
853 /**
854  * binder_dequeue_work_head() - Dequeues the item at head of list
855  * @proc:         binder_proc associated with list
856  * @list:         list to dequeue head
857  *
858  * Removes the head of the list if there are items on the list
859  *
860  * Return: pointer dequeued binder_work, NULL if list was empty
861  */
862 static struct binder_work *binder_dequeue_work_head(
863                                         struct binder_proc *proc,
864                                         struct list_head *list)
865 {
866         struct binder_work *w;
867
868         binder_inner_proc_lock(proc);
869         w = binder_dequeue_work_head_ilocked(list);
870         binder_inner_proc_unlock(proc);
871         return w;
872 }
873
874 static void
875 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
876 static void binder_free_thread(struct binder_thread *thread);
877 static void binder_free_proc(struct binder_proc *proc);
878 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
879
880 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
881 {
882         unsigned long rlim_cur;
883         unsigned long irqs;
884         int ret;
885
886         mutex_lock(&proc->files_lock);
887         if (proc->files == NULL) {
888                 ret = -ESRCH;
889                 goto err;
890         }
891         if (!lock_task_sighand(proc->tsk, &irqs)) {
892                 ret = -EMFILE;
893                 goto err;
894         }
895         rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
896         unlock_task_sighand(proc->tsk, &irqs);
897
898         ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
899 err:
900         mutex_unlock(&proc->files_lock);
901         return ret;
902 }
903
904 /*
905  * copied from fd_install
906  */
907 static void task_fd_install(
908         struct binder_proc *proc, unsigned int fd, struct file *file)
909 {
910         mutex_lock(&proc->files_lock);
911         if (proc->files)
912                 __fd_install(proc->files, fd, file);
913         mutex_unlock(&proc->files_lock);
914 }
915
916 /*
917  * copied from sys_close
918  */
919 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
920 {
921         int retval;
922
923         mutex_lock(&proc->files_lock);
924         if (proc->files == NULL) {
925                 retval = -ESRCH;
926                 goto err;
927         }
928         retval = __close_fd(proc->files, fd);
929         /* can't restart close syscall because file table entry was cleared */
930         if (unlikely(retval == -ERESTARTSYS ||
931                      retval == -ERESTARTNOINTR ||
932                      retval == -ERESTARTNOHAND ||
933                      retval == -ERESTART_RESTARTBLOCK))
934                 retval = -EINTR;
935 err:
936         mutex_unlock(&proc->files_lock);
937         return retval;
938 }
939
940 static bool binder_has_work_ilocked(struct binder_thread *thread,
941                                     bool do_proc_work)
942 {
943         return !binder_worklist_empty_ilocked(&thread->todo) ||
944                 thread->looper_need_return ||
945                 (do_proc_work &&
946                  !binder_worklist_empty_ilocked(&thread->proc->todo));
947 }
948
949 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
950 {
951         bool has_work;
952
953         binder_inner_proc_lock(thread->proc);
954         has_work = binder_has_work_ilocked(thread, do_proc_work);
955         binder_inner_proc_unlock(thread->proc);
956
957         return has_work;
958 }
959
960 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
961 {
962         return !thread->transaction_stack &&
963                 binder_worklist_empty_ilocked(&thread->todo) &&
964                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
965                                    BINDER_LOOPER_STATE_REGISTERED));
966 }
967
968 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
969                                                bool sync)
970 {
971         struct rb_node *n;
972         struct binder_thread *thread;
973
974         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
975                 thread = rb_entry(n, struct binder_thread, rb_node);
976                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
977                     binder_available_for_proc_work_ilocked(thread)) {
978                         if (sync)
979                                 wake_up_interruptible_sync(&thread->wait);
980                         else
981                                 wake_up_interruptible(&thread->wait);
982                 }
983         }
984 }
985
986 /**
987  * binder_select_thread_ilocked() - selects a thread for doing proc work.
988  * @proc:       process to select a thread from
989  *
990  * Note that calling this function moves the thread off the waiting_threads
991  * list, so it can only be woken up by the caller of this function, or a
992  * signal. Therefore, callers *should* always wake up the thread this function
993  * returns.
994  *
995  * Return:      If there's a thread currently waiting for process work,
996  *              returns that thread. Otherwise returns NULL.
997  */
998 static struct binder_thread *
999 binder_select_thread_ilocked(struct binder_proc *proc)
1000 {
1001         struct binder_thread *thread;
1002
1003         assert_spin_locked(&proc->inner_lock);
1004         thread = list_first_entry_or_null(&proc->waiting_threads,
1005                                           struct binder_thread,
1006                                           waiting_thread_node);
1007
1008         if (thread)
1009                 list_del_init(&thread->waiting_thread_node);
1010
1011         return thread;
1012 }
1013
1014 /**
1015  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1016  * @proc:       process to wake up a thread in
1017  * @thread:     specific thread to wake-up (may be NULL)
1018  * @sync:       whether to do a synchronous wake-up
1019  *
1020  * This function wakes up a thread in the @proc process.
1021  * The caller may provide a specific thread to wake-up in
1022  * the @thread parameter. If @thread is NULL, this function
1023  * will wake up threads that have called poll().
1024  *
1025  * Note that for this function to work as expected, callers
1026  * should first call binder_select_thread() to find a thread
1027  * to handle the work (if they don't have a thread already),
1028  * and pass the result into the @thread parameter.
1029  */
1030 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1031                                          struct binder_thread *thread,
1032                                          bool sync)
1033 {
1034         assert_spin_locked(&proc->inner_lock);
1035
1036         if (thread) {
1037                 if (sync)
1038                         wake_up_interruptible_sync(&thread->wait);
1039                 else
1040                         wake_up_interruptible(&thread->wait);
1041                 return;
1042         }
1043
1044         /* Didn't find a thread waiting for proc work; this can happen
1045          * in two scenarios:
1046          * 1. All threads are busy handling transactions
1047          *    In that case, one of those threads should call back into
1048          *    the kernel driver soon and pick up this work.
1049          * 2. Threads are using the (e)poll interface, in which case
1050          *    they may be blocked on the waitqueue without having been
1051          *    added to waiting_threads. For this case, we just iterate
1052          *    over all threads not handling transaction work, and
1053          *    wake them all up. We wake all because we don't know whether
1054          *    a thread that called into (e)poll is handling non-binder
1055          *    work currently.
1056          */
1057         binder_wakeup_poll_threads_ilocked(proc, sync);
1058 }
1059
1060 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1061 {
1062         struct binder_thread *thread = binder_select_thread_ilocked(proc);
1063
1064         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1065 }
1066
1067 static void binder_set_nice(long nice)
1068 {
1069         long min_nice;
1070
1071         if (can_nice(current, nice)) {
1072                 set_user_nice(current, nice);
1073                 return;
1074         }
1075         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1076         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1077                      "%d: nice value %ld not allowed use %ld instead\n",
1078                       current->pid, nice, min_nice);
1079         set_user_nice(current, min_nice);
1080         if (min_nice <= MAX_NICE)
1081                 return;
1082         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1083 }
1084
1085 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1086                                                    binder_uintptr_t ptr)
1087 {
1088         struct rb_node *n = proc->nodes.rb_node;
1089         struct binder_node *node;
1090
1091         assert_spin_locked(&proc->inner_lock);
1092
1093         while (n) {
1094                 node = rb_entry(n, struct binder_node, rb_node);
1095
1096                 if (ptr < node->ptr)
1097                         n = n->rb_left;
1098                 else if (ptr > node->ptr)
1099                         n = n->rb_right;
1100                 else {
1101                         /*
1102                          * take an implicit weak reference
1103                          * to ensure node stays alive until
1104                          * call to binder_put_node()
1105                          */
1106                         binder_inc_node_tmpref_ilocked(node);
1107                         return node;
1108                 }
1109         }
1110         return NULL;
1111 }
1112
1113 static struct binder_node *binder_get_node(struct binder_proc *proc,
1114                                            binder_uintptr_t ptr)
1115 {
1116         struct binder_node *node;
1117
1118         binder_inner_proc_lock(proc);
1119         node = binder_get_node_ilocked(proc, ptr);
1120         binder_inner_proc_unlock(proc);
1121         return node;
1122 }
1123
1124 static struct binder_node *binder_init_node_ilocked(
1125                                                 struct binder_proc *proc,
1126                                                 struct binder_node *new_node,
1127                                                 struct flat_binder_object *fp)
1128 {
1129         struct rb_node **p = &proc->nodes.rb_node;
1130         struct rb_node *parent = NULL;
1131         struct binder_node *node;
1132         binder_uintptr_t ptr = fp ? fp->binder : 0;
1133         binder_uintptr_t cookie = fp ? fp->cookie : 0;
1134         __u32 flags = fp ? fp->flags : 0;
1135
1136         assert_spin_locked(&proc->inner_lock);
1137
1138         while (*p) {
1139
1140                 parent = *p;
1141                 node = rb_entry(parent, struct binder_node, rb_node);
1142
1143                 if (ptr < node->ptr)
1144                         p = &(*p)->rb_left;
1145                 else if (ptr > node->ptr)
1146                         p = &(*p)->rb_right;
1147                 else {
1148                         /*
1149                          * A matching node is already in
1150                          * the rb tree. Abandon the init
1151                          * and return it.
1152                          */
1153                         binder_inc_node_tmpref_ilocked(node);
1154                         return node;
1155                 }
1156         }
1157         node = new_node;
1158         binder_stats_created(BINDER_STAT_NODE);
1159         node->tmp_refs++;
1160         rb_link_node(&node->rb_node, parent, p);
1161         rb_insert_color(&node->rb_node, &proc->nodes);
1162         node->debug_id = atomic_inc_return(&binder_last_id);
1163         node->proc = proc;
1164         node->ptr = ptr;
1165         node->cookie = cookie;
1166         node->work.type = BINDER_WORK_NODE;
1167         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1168         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1169         spin_lock_init(&node->lock);
1170         INIT_LIST_HEAD(&node->work.entry);
1171         INIT_LIST_HEAD(&node->async_todo);
1172         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1173                      "%d:%d node %d u%016llx c%016llx created\n",
1174                      proc->pid, current->pid, node->debug_id,
1175                      (u64)node->ptr, (u64)node->cookie);
1176
1177         return node;
1178 }
1179
1180 static struct binder_node *binder_new_node(struct binder_proc *proc,
1181                                            struct flat_binder_object *fp)
1182 {
1183         struct binder_node *node;
1184         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1185
1186         if (!new_node)
1187                 return NULL;
1188         binder_inner_proc_lock(proc);
1189         node = binder_init_node_ilocked(proc, new_node, fp);
1190         binder_inner_proc_unlock(proc);
1191         if (node != new_node)
1192                 /*
1193                  * The node was already added by another thread
1194                  */
1195                 kfree(new_node);
1196
1197         return node;
1198 }
1199
1200 static void binder_free_node(struct binder_node *node)
1201 {
1202         kfree(node);
1203         binder_stats_deleted(BINDER_STAT_NODE);
1204 }
1205
1206 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1207                                     int internal,
1208                                     struct list_head *target_list)
1209 {
1210         struct binder_proc *proc = node->proc;
1211
1212         assert_spin_locked(&node->lock);
1213         if (proc)
1214                 assert_spin_locked(&proc->inner_lock);
1215         if (strong) {
1216                 if (internal) {
1217                         if (target_list == NULL &&
1218                             node->internal_strong_refs == 0 &&
1219                             !(node->proc &&
1220                               node == node->proc->context->binder_context_mgr_node &&
1221                               node->has_strong_ref)) {
1222                                 pr_err("invalid inc strong node for %d\n",
1223                                         node->debug_id);
1224                                 return -EINVAL;
1225                         }
1226                         node->internal_strong_refs++;
1227                 } else
1228                         node->local_strong_refs++;
1229                 if (!node->has_strong_ref && target_list) {
1230                         binder_dequeue_work_ilocked(&node->work);
1231                         binder_enqueue_work_ilocked(&node->work, target_list);
1232                 }
1233         } else {
1234                 if (!internal)
1235                         node->local_weak_refs++;
1236                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1237                         if (target_list == NULL) {
1238                                 pr_err("invalid inc weak node for %d\n",
1239                                         node->debug_id);
1240                                 return -EINVAL;
1241                         }
1242                         binder_enqueue_work_ilocked(&node->work, target_list);
1243                 }
1244         }
1245         return 0;
1246 }
1247
1248 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1249                            struct list_head *target_list)
1250 {
1251         int ret;
1252
1253         binder_node_inner_lock(node);
1254         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1255         binder_node_inner_unlock(node);
1256
1257         return ret;
1258 }
1259
1260 static bool binder_dec_node_nilocked(struct binder_node *node,
1261                                      int strong, int internal)
1262 {
1263         struct binder_proc *proc = node->proc;
1264
1265         assert_spin_locked(&node->lock);
1266         if (proc)
1267                 assert_spin_locked(&proc->inner_lock);
1268         if (strong) {
1269                 if (internal)
1270                         node->internal_strong_refs--;
1271                 else
1272                         node->local_strong_refs--;
1273                 if (node->local_strong_refs || node->internal_strong_refs)
1274                         return false;
1275         } else {
1276                 if (!internal)
1277                         node->local_weak_refs--;
1278                 if (node->local_weak_refs || node->tmp_refs ||
1279                                 !hlist_empty(&node->refs))
1280                         return false;
1281         }
1282
1283         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1284                 if (list_empty(&node->work.entry)) {
1285                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
1286                         binder_wakeup_proc_ilocked(proc);
1287                 }
1288         } else {
1289                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1290                     !node->local_weak_refs && !node->tmp_refs) {
1291                         if (proc) {
1292                                 binder_dequeue_work_ilocked(&node->work);
1293                                 rb_erase(&node->rb_node, &proc->nodes);
1294                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1295                                              "refless node %d deleted\n",
1296                                              node->debug_id);
1297                         } else {
1298                                 BUG_ON(!list_empty(&node->work.entry));
1299                                 spin_lock(&binder_dead_nodes_lock);
1300                                 /*
1301                                  * tmp_refs could have changed so
1302                                  * check it again
1303                                  */
1304                                 if (node->tmp_refs) {
1305                                         spin_unlock(&binder_dead_nodes_lock);
1306                                         return false;
1307                                 }
1308                                 hlist_del(&node->dead_node);
1309                                 spin_unlock(&binder_dead_nodes_lock);
1310                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1311                                              "dead node %d deleted\n",
1312                                              node->debug_id);
1313                         }
1314                         return true;
1315                 }
1316         }
1317         return false;
1318 }
1319
1320 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1321 {
1322         bool free_node;
1323
1324         binder_node_inner_lock(node);
1325         free_node = binder_dec_node_nilocked(node, strong, internal);
1326         binder_node_inner_unlock(node);
1327         if (free_node)
1328                 binder_free_node(node);
1329 }
1330
1331 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1332 {
1333         /*
1334          * No call to binder_inc_node() is needed since we
1335          * don't need to inform userspace of any changes to
1336          * tmp_refs
1337          */
1338         node->tmp_refs++;
1339 }
1340
1341 /**
1342  * binder_inc_node_tmpref() - take a temporary reference on node
1343  * @node:       node to reference
1344  *
1345  * Take reference on node to prevent the node from being freed
1346  * while referenced only by a local variable. The inner lock is
1347  * needed to serialize with the node work on the queue (which
1348  * isn't needed after the node is dead). If the node is dead
1349  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1350  * node->tmp_refs against dead-node-only cases where the node
1351  * lock cannot be acquired (eg traversing the dead node list to
1352  * print nodes)
1353  */
1354 static void binder_inc_node_tmpref(struct binder_node *node)
1355 {
1356         binder_node_lock(node);
1357         if (node->proc)
1358                 binder_inner_proc_lock(node->proc);
1359         else
1360                 spin_lock(&binder_dead_nodes_lock);
1361         binder_inc_node_tmpref_ilocked(node);
1362         if (node->proc)
1363                 binder_inner_proc_unlock(node->proc);
1364         else
1365                 spin_unlock(&binder_dead_nodes_lock);
1366         binder_node_unlock(node);
1367 }
1368
1369 /**
1370  * binder_dec_node_tmpref() - remove a temporary reference on node
1371  * @node:       node to reference
1372  *
1373  * Release temporary reference on node taken via binder_inc_node_tmpref()
1374  */
1375 static void binder_dec_node_tmpref(struct binder_node *node)
1376 {
1377         bool free_node;
1378
1379         binder_node_inner_lock(node);
1380         if (!node->proc)
1381                 spin_lock(&binder_dead_nodes_lock);
1382         node->tmp_refs--;
1383         BUG_ON(node->tmp_refs < 0);
1384         if (!node->proc)
1385                 spin_unlock(&binder_dead_nodes_lock);
1386         /*
1387          * Call binder_dec_node() to check if all refcounts are 0
1388          * and cleanup is needed. Calling with strong=0 and internal=1
1389          * causes no actual reference to be released in binder_dec_node().
1390          * If that changes, a change is needed here too.
1391          */
1392         free_node = binder_dec_node_nilocked(node, 0, 1);
1393         binder_node_inner_unlock(node);
1394         if (free_node)
1395                 binder_free_node(node);
1396 }
1397
1398 static void binder_put_node(struct binder_node *node)
1399 {
1400         binder_dec_node_tmpref(node);
1401 }
1402
1403 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1404                                                  u32 desc, bool need_strong_ref)
1405 {
1406         struct rb_node *n = proc->refs_by_desc.rb_node;
1407         struct binder_ref *ref;
1408
1409         while (n) {
1410                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1411
1412                 if (desc < ref->data.desc) {
1413                         n = n->rb_left;
1414                 } else if (desc > ref->data.desc) {
1415                         n = n->rb_right;
1416                 } else if (need_strong_ref && !ref->data.strong) {
1417                         binder_user_error("tried to use weak ref as strong ref\n");
1418                         return NULL;
1419                 } else {
1420                         return ref;
1421                 }
1422         }
1423         return NULL;
1424 }
1425
1426 /**
1427  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1428  * @proc:       binder_proc that owns the ref
1429  * @node:       binder_node of target
1430  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1431  *
1432  * Look up the ref for the given node and return it if it exists
1433  *
1434  * If it doesn't exist and the caller provides a newly allocated
1435  * ref, initialize the fields of the newly allocated ref and insert
1436  * into the given proc rb_trees and node refs list.
1437  *
1438  * Return:      the ref for node. It is possible that another thread
1439  *              allocated/initialized the ref first in which case the
1440  *              returned ref would be different than the passed-in
1441  *              new_ref. new_ref must be kfree'd by the caller in
1442  *              this case.
1443  */
1444 static struct binder_ref *binder_get_ref_for_node_olocked(
1445                                         struct binder_proc *proc,
1446                                         struct binder_node *node,
1447                                         struct binder_ref *new_ref)
1448 {
1449         struct binder_context *context = proc->context;
1450         struct rb_node **p = &proc->refs_by_node.rb_node;
1451         struct rb_node *parent = NULL;
1452         struct binder_ref *ref;
1453         struct rb_node *n;
1454
1455         while (*p) {
1456                 parent = *p;
1457                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1458
1459                 if (node < ref->node)
1460                         p = &(*p)->rb_left;
1461                 else if (node > ref->node)
1462                         p = &(*p)->rb_right;
1463                 else
1464                         return ref;
1465         }
1466         if (!new_ref)
1467                 return NULL;
1468
1469         binder_stats_created(BINDER_STAT_REF);
1470         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1471         new_ref->proc = proc;
1472         new_ref->node = node;
1473         rb_link_node(&new_ref->rb_node_node, parent, p);
1474         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1475
1476         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1477         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1478                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1479                 if (ref->data.desc > new_ref->data.desc)
1480                         break;
1481                 new_ref->data.desc = ref->data.desc + 1;
1482         }
1483
1484         p = &proc->refs_by_desc.rb_node;
1485         while (*p) {
1486                 parent = *p;
1487                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1488
1489                 if (new_ref->data.desc < ref->data.desc)
1490                         p = &(*p)->rb_left;
1491                 else if (new_ref->data.desc > ref->data.desc)
1492                         p = &(*p)->rb_right;
1493                 else
1494                         BUG();
1495         }
1496         rb_link_node(&new_ref->rb_node_desc, parent, p);
1497         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1498
1499         binder_node_lock(node);
1500         hlist_add_head(&new_ref->node_entry, &node->refs);
1501
1502         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1503                      "%d new ref %d desc %d for node %d\n",
1504                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1505                       node->debug_id);
1506         binder_node_unlock(node);
1507         return new_ref;
1508 }
1509
1510 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1511 {
1512         bool delete_node = false;
1513
1514         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1515                      "%d delete ref %d desc %d for node %d\n",
1516                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1517                       ref->node->debug_id);
1518
1519         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1520         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1521
1522         binder_node_inner_lock(ref->node);
1523         if (ref->data.strong)
1524                 binder_dec_node_nilocked(ref->node, 1, 1);
1525
1526         hlist_del(&ref->node_entry);
1527         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1528         binder_node_inner_unlock(ref->node);
1529         /*
1530          * Clear ref->node unless we want the caller to free the node
1531          */
1532         if (!delete_node) {
1533                 /*
1534                  * The caller uses ref->node to determine
1535                  * whether the node needs to be freed. Clear
1536                  * it since the node is still alive.
1537                  */
1538                 ref->node = NULL;
1539         }
1540
1541         if (ref->death) {
1542                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1543                              "%d delete ref %d desc %d has death notification\n",
1544                               ref->proc->pid, ref->data.debug_id,
1545                               ref->data.desc);
1546                 binder_dequeue_work(ref->proc, &ref->death->work);
1547                 binder_stats_deleted(BINDER_STAT_DEATH);
1548         }
1549         binder_stats_deleted(BINDER_STAT_REF);
1550 }
1551
1552 /**
1553  * binder_inc_ref_olocked() - increment the ref for given handle
1554  * @ref:         ref to be incremented
1555  * @strong:      if true, strong increment, else weak
1556  * @target_list: list to queue node work on
1557  *
1558  * Increment the ref. @ref->proc->outer_lock must be held on entry
1559  *
1560  * Return: 0, if successful, else errno
1561  */
1562 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1563                                   struct list_head *target_list)
1564 {
1565         int ret;
1566
1567         if (strong) {
1568                 if (ref->data.strong == 0) {
1569                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1570                         if (ret)
1571                                 return ret;
1572                 }
1573                 ref->data.strong++;
1574         } else {
1575                 if (ref->data.weak == 0) {
1576                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1577                         if (ret)
1578                                 return ret;
1579                 }
1580                 ref->data.weak++;
1581         }
1582         return 0;
1583 }
1584
1585 /**
1586  * binder_dec_ref() - dec the ref for given handle
1587  * @ref:        ref to be decremented
1588  * @strong:     if true, strong decrement, else weak
1589  *
1590  * Decrement the ref.
1591  *
1592  * Return: true if ref is cleaned up and ready to be freed
1593  */
1594 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1595 {
1596         if (strong) {
1597                 if (ref->data.strong == 0) {
1598                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1599                                           ref->proc->pid, ref->data.debug_id,
1600                                           ref->data.desc, ref->data.strong,
1601                                           ref->data.weak);
1602                         return false;
1603                 }
1604                 ref->data.strong--;
1605                 if (ref->data.strong == 0)
1606                         binder_dec_node(ref->node, strong, 1);
1607         } else {
1608                 if (ref->data.weak == 0) {
1609                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1610                                           ref->proc->pid, ref->data.debug_id,
1611                                           ref->data.desc, ref->data.strong,
1612                                           ref->data.weak);
1613                         return false;
1614                 }
1615                 ref->data.weak--;
1616         }
1617         if (ref->data.strong == 0 && ref->data.weak == 0) {
1618                 binder_cleanup_ref_olocked(ref);
1619                 return true;
1620         }
1621         return false;
1622 }
1623
1624 /**
1625  * binder_get_node_from_ref() - get the node from the given proc/desc
1626  * @proc:       proc containing the ref
1627  * @desc:       the handle associated with the ref
1628  * @need_strong_ref: if true, only return node if ref is strong
1629  * @rdata:      the id/refcount data for the ref
1630  *
1631  * Given a proc and ref handle, return the associated binder_node
1632  *
1633  * Return: a binder_node or NULL if not found or not strong when strong required
1634  */
1635 static struct binder_node *binder_get_node_from_ref(
1636                 struct binder_proc *proc,
1637                 u32 desc, bool need_strong_ref,
1638                 struct binder_ref_data *rdata)
1639 {
1640         struct binder_node *node;
1641         struct binder_ref *ref;
1642
1643         binder_proc_lock(proc);
1644         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1645         if (!ref)
1646                 goto err_no_ref;
1647         node = ref->node;
1648         /*
1649          * Take an implicit reference on the node to ensure
1650          * it stays alive until the call to binder_put_node()
1651          */
1652         binder_inc_node_tmpref(node);
1653         if (rdata)
1654                 *rdata = ref->data;
1655         binder_proc_unlock(proc);
1656
1657         return node;
1658
1659 err_no_ref:
1660         binder_proc_unlock(proc);
1661         return NULL;
1662 }
1663
1664 /**
1665  * binder_free_ref() - free the binder_ref
1666  * @ref:        ref to free
1667  *
1668  * Free the binder_ref. Free the binder_node indicated by ref->node
1669  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1670  */
1671 static void binder_free_ref(struct binder_ref *ref)
1672 {
1673         if (ref->node)
1674                 binder_free_node(ref->node);
1675         kfree(ref->death);
1676         kfree(ref);
1677 }
1678
1679 /**
1680  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1681  * @proc:       proc containing the ref
1682  * @desc:       the handle associated with the ref
1683  * @increment:  true=inc reference, false=dec reference
1684  * @strong:     true=strong reference, false=weak reference
1685  * @rdata:      the id/refcount data for the ref
1686  *
1687  * Given a proc and ref handle, increment or decrement the ref
1688  * according to "increment" arg.
1689  *
1690  * Return: 0 if successful, else errno
1691  */
1692 static int binder_update_ref_for_handle(struct binder_proc *proc,
1693                 uint32_t desc, bool increment, bool strong,
1694                 struct binder_ref_data *rdata)
1695 {
1696         int ret = 0;
1697         struct binder_ref *ref;
1698         bool delete_ref = false;
1699
1700         binder_proc_lock(proc);
1701         ref = binder_get_ref_olocked(proc, desc, strong);
1702         if (!ref) {
1703                 ret = -EINVAL;
1704                 goto err_no_ref;
1705         }
1706         if (increment)
1707                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1708         else
1709                 delete_ref = binder_dec_ref_olocked(ref, strong);
1710
1711         if (rdata)
1712                 *rdata = ref->data;
1713         binder_proc_unlock(proc);
1714
1715         if (delete_ref)
1716                 binder_free_ref(ref);
1717         return ret;
1718
1719 err_no_ref:
1720         binder_proc_unlock(proc);
1721         return ret;
1722 }
1723
1724 /**
1725  * binder_dec_ref_for_handle() - dec the ref for given handle
1726  * @proc:       proc containing the ref
1727  * @desc:       the handle associated with the ref
1728  * @strong:     true=strong reference, false=weak reference
1729  * @rdata:      the id/refcount data for the ref
1730  *
1731  * Just calls binder_update_ref_for_handle() to decrement the ref.
1732  *
1733  * Return: 0 if successful, else errno
1734  */
1735 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1736                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1737 {
1738         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1739 }
1740
1741
1742 /**
1743  * binder_inc_ref_for_node() - increment the ref for given proc/node
1744  * @proc:        proc containing the ref
1745  * @node:        target node
1746  * @strong:      true=strong reference, false=weak reference
1747  * @target_list: worklist to use if node is incremented
1748  * @rdata:       the id/refcount data for the ref
1749  *
1750  * Given a proc and node, increment the ref. Create the ref if it
1751  * doesn't already exist
1752  *
1753  * Return: 0 if successful, else errno
1754  */
1755 static int binder_inc_ref_for_node(struct binder_proc *proc,
1756                         struct binder_node *node,
1757                         bool strong,
1758                         struct list_head *target_list,
1759                         struct binder_ref_data *rdata)
1760 {
1761         struct binder_ref *ref;
1762         struct binder_ref *new_ref = NULL;
1763         int ret = 0;
1764
1765         binder_proc_lock(proc);
1766         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1767         if (!ref) {
1768                 binder_proc_unlock(proc);
1769                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1770                 if (!new_ref)
1771                         return -ENOMEM;
1772                 binder_proc_lock(proc);
1773                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1774         }
1775         ret = binder_inc_ref_olocked(ref, strong, target_list);
1776         *rdata = ref->data;
1777         binder_proc_unlock(proc);
1778         if (new_ref && ref != new_ref)
1779                 /*
1780                  * Another thread created the ref first so
1781                  * free the one we allocated
1782                  */
1783                 kfree(new_ref);
1784         return ret;
1785 }
1786
1787 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1788                                            struct binder_transaction *t)
1789 {
1790         BUG_ON(!target_thread);
1791         assert_spin_locked(&target_thread->proc->inner_lock);
1792         BUG_ON(target_thread->transaction_stack != t);
1793         BUG_ON(target_thread->transaction_stack->from != target_thread);
1794         target_thread->transaction_stack =
1795                 target_thread->transaction_stack->from_parent;
1796         t->from = NULL;
1797 }
1798
1799 /**
1800  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1801  * @thread:     thread to decrement
1802  *
1803  * A thread needs to be kept alive while being used to create or
1804  * handle a transaction. binder_get_txn_from() is used to safely
1805  * extract t->from from a binder_transaction and keep the thread
1806  * indicated by t->from from being freed. When done with that
1807  * binder_thread, this function is called to decrement the
1808  * tmp_ref and free if appropriate (thread has been released
1809  * and no transaction being processed by the driver)
1810  */
1811 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1812 {
1813         /*
1814          * atomic is used to protect the counter value while
1815          * it cannot reach zero or thread->is_dead is false
1816          */
1817         binder_inner_proc_lock(thread->proc);
1818         atomic_dec(&thread->tmp_ref);
1819         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1820                 binder_inner_proc_unlock(thread->proc);
1821                 binder_free_thread(thread);
1822                 return;
1823         }
1824         binder_inner_proc_unlock(thread->proc);
1825 }
1826
1827 /**
1828  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1829  * @proc:       proc to decrement
1830  *
1831  * A binder_proc needs to be kept alive while being used to create or
1832  * handle a transaction. proc->tmp_ref is incremented when
1833  * creating a new transaction or the binder_proc is currently in-use
1834  * by threads that are being released. When done with the binder_proc,
1835  * this function is called to decrement the counter and free the
1836  * proc if appropriate (proc has been released, all threads have
1837  * been released and not currenly in-use to process a transaction).
1838  */
1839 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1840 {
1841         binder_inner_proc_lock(proc);
1842         proc->tmp_ref--;
1843         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1844                         !proc->tmp_ref) {
1845                 binder_inner_proc_unlock(proc);
1846                 binder_free_proc(proc);
1847                 return;
1848         }
1849         binder_inner_proc_unlock(proc);
1850 }
1851
1852 /**
1853  * binder_get_txn_from() - safely extract the "from" thread in transaction
1854  * @t:  binder transaction for t->from
1855  *
1856  * Atomically return the "from" thread and increment the tmp_ref
1857  * count for the thread to ensure it stays alive until
1858  * binder_thread_dec_tmpref() is called.
1859  *
1860  * Return: the value of t->from
1861  */
1862 static struct binder_thread *binder_get_txn_from(
1863                 struct binder_transaction *t)
1864 {
1865         struct binder_thread *from;
1866
1867         spin_lock(&t->lock);
1868         from = t->from;
1869         if (from)
1870                 atomic_inc(&from->tmp_ref);
1871         spin_unlock(&t->lock);
1872         return from;
1873 }
1874
1875 /**
1876  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1877  * @t:  binder transaction for t->from
1878  *
1879  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1880  * to guarantee that the thread cannot be released while operating on it.
1881  * The caller must call binder_inner_proc_unlock() to release the inner lock
1882  * as well as call binder_dec_thread_txn() to release the reference.
1883  *
1884  * Return: the value of t->from
1885  */
1886 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1887                 struct binder_transaction *t)
1888 {
1889         struct binder_thread *from;
1890
1891         from = binder_get_txn_from(t);
1892         if (!from)
1893                 return NULL;
1894         binder_inner_proc_lock(from->proc);
1895         if (t->from) {
1896                 BUG_ON(from != t->from);
1897                 return from;
1898         }
1899         binder_inner_proc_unlock(from->proc);
1900         binder_thread_dec_tmpref(from);
1901         return NULL;
1902 }
1903
1904 static void binder_free_transaction(struct binder_transaction *t)
1905 {
1906         if (t->buffer)
1907                 t->buffer->transaction = NULL;
1908         kfree(t);
1909         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1910 }
1911
1912 static void binder_send_failed_reply(struct binder_transaction *t,
1913                                      uint32_t error_code)
1914 {
1915         struct binder_thread *target_thread;
1916         struct binder_transaction *next;
1917
1918         BUG_ON(t->flags & TF_ONE_WAY);
1919         while (1) {
1920                 target_thread = binder_get_txn_from_and_acq_inner(t);
1921                 if (target_thread) {
1922                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1923                                      "send failed reply for transaction %d to %d:%d\n",
1924                                       t->debug_id,
1925                                       target_thread->proc->pid,
1926                                       target_thread->pid);
1927
1928                         binder_pop_transaction_ilocked(target_thread, t);
1929                         if (target_thread->reply_error.cmd == BR_OK) {
1930                                 target_thread->reply_error.cmd = error_code;
1931                                 binder_enqueue_work_ilocked(
1932                                         &target_thread->reply_error.work,
1933                                         &target_thread->todo);
1934                                 wake_up_interruptible(&target_thread->wait);
1935                         } else {
1936                                 WARN(1, "Unexpected reply error: %u\n",
1937                                                 target_thread->reply_error.cmd);
1938                         }
1939                         binder_inner_proc_unlock(target_thread->proc);
1940                         binder_thread_dec_tmpref(target_thread);
1941                         binder_free_transaction(t);
1942                         return;
1943                 }
1944                 next = t->from_parent;
1945
1946                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1947                              "send failed reply for transaction %d, target dead\n",
1948                              t->debug_id);
1949
1950                 binder_free_transaction(t);
1951                 if (next == NULL) {
1952                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1953                                      "reply failed, no target thread at root\n");
1954                         return;
1955                 }
1956                 t = next;
1957                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1958                              "reply failed, no target thread -- retry %d\n",
1959                               t->debug_id);
1960         }
1961 }
1962
1963 /**
1964  * binder_cleanup_transaction() - cleans up undelivered transaction
1965  * @t:          transaction that needs to be cleaned up
1966  * @reason:     reason the transaction wasn't delivered
1967  * @error_code: error to return to caller (if synchronous call)
1968  */
1969 static void binder_cleanup_transaction(struct binder_transaction *t,
1970                                        const char *reason,
1971                                        uint32_t error_code)
1972 {
1973         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1974                 binder_send_failed_reply(t, error_code);
1975         } else {
1976                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1977                         "undelivered transaction %d, %s\n",
1978                         t->debug_id, reason);
1979                 binder_free_transaction(t);
1980         }
1981 }
1982
1983 /**
1984  * binder_validate_object() - checks for a valid metadata object in a buffer.
1985  * @buffer:     binder_buffer that we're parsing.
1986  * @offset:     offset in the buffer at which to validate an object.
1987  *
1988  * Return:      If there's a valid metadata object at @offset in @buffer, the
1989  *              size of that object. Otherwise, it returns zero.
1990  */
1991 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1992 {
1993         /* Check if we can read a header first */
1994         struct binder_object_header *hdr;
1995         size_t object_size = 0;
1996
1997         if (offset > buffer->data_size - sizeof(*hdr) ||
1998             buffer->data_size < sizeof(*hdr) ||
1999             !IS_ALIGNED(offset, sizeof(u32)))
2000                 return 0;
2001
2002         /* Ok, now see if we can read a complete object. */
2003         hdr = (struct binder_object_header *)(buffer->data + offset);
2004         switch (hdr->type) {
2005         case BINDER_TYPE_BINDER:
2006         case BINDER_TYPE_WEAK_BINDER:
2007         case BINDER_TYPE_HANDLE:
2008         case BINDER_TYPE_WEAK_HANDLE:
2009                 object_size = sizeof(struct flat_binder_object);
2010                 break;
2011         case BINDER_TYPE_FD:
2012                 object_size = sizeof(struct binder_fd_object);
2013                 break;
2014         case BINDER_TYPE_PTR:
2015                 object_size = sizeof(struct binder_buffer_object);
2016                 break;
2017         case BINDER_TYPE_FDA:
2018                 object_size = sizeof(struct binder_fd_array_object);
2019                 break;
2020         default:
2021                 return 0;
2022         }
2023         if (offset <= buffer->data_size - object_size &&
2024             buffer->data_size >= object_size)
2025                 return object_size;
2026         else
2027                 return 0;
2028 }
2029
2030 /**
2031  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2032  * @b:          binder_buffer containing the object
2033  * @index:      index in offset array at which the binder_buffer_object is
2034  *              located
2035  * @start:      points to the start of the offset array
2036  * @num_valid:  the number of valid offsets in the offset array
2037  *
2038  * Return:      If @index is within the valid range of the offset array
2039  *              described by @start and @num_valid, and if there's a valid
2040  *              binder_buffer_object at the offset found in index @index
2041  *              of the offset array, that object is returned. Otherwise,
2042  *              %NULL is returned.
2043  *              Note that the offset found in index @index itself is not
2044  *              verified; this function assumes that @num_valid elements
2045  *              from @start were previously verified to have valid offsets.
2046  */
2047 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2048                                                         binder_size_t index,
2049                                                         binder_size_t *start,
2050                                                         binder_size_t num_valid)
2051 {
2052         struct binder_buffer_object *buffer_obj;
2053         binder_size_t *offp;
2054
2055         if (index >= num_valid)
2056                 return NULL;
2057
2058         offp = start + index;
2059         buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2060         if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2061                 return NULL;
2062
2063         return buffer_obj;
2064 }
2065
2066 /**
2067  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2068  * @b:                  transaction buffer
2069  * @objects_start       start of objects buffer
2070  * @buffer:             binder_buffer_object in which to fix up
2071  * @offset:             start offset in @buffer to fix up
2072  * @last_obj:           last binder_buffer_object that we fixed up in
2073  * @last_min_offset:    minimum fixup offset in @last_obj
2074  *
2075  * Return:              %true if a fixup in buffer @buffer at offset @offset is
2076  *                      allowed.
2077  *
2078  * For safety reasons, we only allow fixups inside a buffer to happen
2079  * at increasing offsets; additionally, we only allow fixup on the last
2080  * buffer object that was verified, or one of its parents.
2081  *
2082  * Example of what is allowed:
2083  *
2084  * A
2085  *   B (parent = A, offset = 0)
2086  *   C (parent = A, offset = 16)
2087  *     D (parent = C, offset = 0)
2088  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2089  *
2090  * Examples of what is not allowed:
2091  *
2092  * Decreasing offsets within the same parent:
2093  * A
2094  *   C (parent = A, offset = 16)
2095  *   B (parent = A, offset = 0) // decreasing offset within A
2096  *
2097  * Referring to a parent that wasn't the last object or any of its parents:
2098  * A
2099  *   B (parent = A, offset = 0)
2100  *   C (parent = A, offset = 0)
2101  *   C (parent = A, offset = 16)
2102  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2103  */
2104 static bool binder_validate_fixup(struct binder_buffer *b,
2105                                   binder_size_t *objects_start,
2106                                   struct binder_buffer_object *buffer,
2107                                   binder_size_t fixup_offset,
2108                                   struct binder_buffer_object *last_obj,
2109                                   binder_size_t last_min_offset)
2110 {
2111         if (!last_obj) {
2112                 /* Nothing to fix up in */
2113                 return false;
2114         }
2115
2116         while (last_obj != buffer) {
2117                 /*
2118                  * Safe to retrieve the parent of last_obj, since it
2119                  * was already previously verified by the driver.
2120                  */
2121                 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2122                         return false;
2123                 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2124                 last_obj = (struct binder_buffer_object *)
2125                         (b->data + *(objects_start + last_obj->parent));
2126         }
2127         return (fixup_offset >= last_min_offset);
2128 }
2129
2130 static void binder_transaction_buffer_release(struct binder_proc *proc,
2131                                               struct binder_buffer *buffer,
2132                                               binder_size_t *failed_at)
2133 {
2134         binder_size_t *offp, *off_start, *off_end;
2135         int debug_id = buffer->debug_id;
2136
2137         binder_debug(BINDER_DEBUG_TRANSACTION,
2138                      "%d buffer release %d, size %zd-%zd, failed at %p\n",
2139                      proc->pid, buffer->debug_id,
2140                      buffer->data_size, buffer->offsets_size, failed_at);
2141
2142         if (buffer->target_node)
2143                 binder_dec_node(buffer->target_node, 1, 0);
2144
2145         off_start = (binder_size_t *)(buffer->data +
2146                                       ALIGN(buffer->data_size, sizeof(void *)));
2147         if (failed_at)
2148                 off_end = failed_at;
2149         else
2150                 off_end = (void *)off_start + buffer->offsets_size;
2151         for (offp = off_start; offp < off_end; offp++) {
2152                 struct binder_object_header *hdr;
2153                 size_t object_size = binder_validate_object(buffer, *offp);
2154
2155                 if (object_size == 0) {
2156                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2157                                debug_id, (u64)*offp, buffer->data_size);
2158                         continue;
2159                 }
2160                 hdr = (struct binder_object_header *)(buffer->data + *offp);
2161                 switch (hdr->type) {
2162                 case BINDER_TYPE_BINDER:
2163                 case BINDER_TYPE_WEAK_BINDER: {
2164                         struct flat_binder_object *fp;
2165                         struct binder_node *node;
2166
2167                         fp = to_flat_binder_object(hdr);
2168                         node = binder_get_node(proc, fp->binder);
2169                         if (node == NULL) {
2170                                 pr_err("transaction release %d bad node %016llx\n",
2171                                        debug_id, (u64)fp->binder);
2172                                 break;
2173                         }
2174                         binder_debug(BINDER_DEBUG_TRANSACTION,
2175                                      "        node %d u%016llx\n",
2176                                      node->debug_id, (u64)node->ptr);
2177                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2178                                         0);
2179                         binder_put_node(node);
2180                 } break;
2181                 case BINDER_TYPE_HANDLE:
2182                 case BINDER_TYPE_WEAK_HANDLE: {
2183                         struct flat_binder_object *fp;
2184                         struct binder_ref_data rdata;
2185                         int ret;
2186
2187                         fp = to_flat_binder_object(hdr);
2188                         ret = binder_dec_ref_for_handle(proc, fp->handle,
2189                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2190
2191                         if (ret) {
2192                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
2193                                  debug_id, fp->handle, ret);
2194                                 break;
2195                         }
2196                         binder_debug(BINDER_DEBUG_TRANSACTION,
2197                                      "        ref %d desc %d\n",
2198                                      rdata.debug_id, rdata.desc);
2199                 } break;
2200
2201                 case BINDER_TYPE_FD: {
2202                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2203
2204                         binder_debug(BINDER_DEBUG_TRANSACTION,
2205                                      "        fd %d\n", fp->fd);
2206                         if (failed_at)
2207                                 task_close_fd(proc, fp->fd);
2208                 } break;
2209                 case BINDER_TYPE_PTR:
2210                         /*
2211                          * Nothing to do here, this will get cleaned up when the
2212                          * transaction buffer gets freed
2213                          */
2214                         break;
2215                 case BINDER_TYPE_FDA: {
2216                         struct binder_fd_array_object *fda;
2217                         struct binder_buffer_object *parent;
2218                         uintptr_t parent_buffer;
2219                         u32 *fd_array;
2220                         size_t fd_index;
2221                         binder_size_t fd_buf_size;
2222
2223                         fda = to_binder_fd_array_object(hdr);
2224                         parent = binder_validate_ptr(buffer, fda->parent,
2225                                                      off_start,
2226                                                      offp - off_start);
2227                         if (!parent) {
2228                                 pr_err("transaction release %d bad parent offset\n",
2229                                        debug_id);
2230                                 continue;
2231                         }
2232                         /*
2233                          * Since the parent was already fixed up, convert it
2234                          * back to kernel address space to access it
2235                          */
2236                         parent_buffer = parent->buffer -
2237                                 binder_alloc_get_user_buffer_offset(
2238                                                 &proc->alloc);
2239
2240                         fd_buf_size = sizeof(u32) * fda->num_fds;
2241                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2242                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2243                                        debug_id, (u64)fda->num_fds);
2244                                 continue;
2245                         }
2246                         if (fd_buf_size > parent->length ||
2247                             fda->parent_offset > parent->length - fd_buf_size) {
2248                                 /* No space for all file descriptors here. */
2249                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2250                                        debug_id, (u64)fda->num_fds);
2251                                 continue;
2252                         }
2253                         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2254                         for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2255                                 task_close_fd(proc, fd_array[fd_index]);
2256                 } break;
2257                 default:
2258                         pr_err("transaction release %d bad object type %x\n",
2259                                 debug_id, hdr->type);
2260                         break;
2261                 }
2262         }
2263 }
2264
2265 static int binder_translate_binder(struct flat_binder_object *fp,
2266                                    struct binder_transaction *t,
2267                                    struct binder_thread *thread)
2268 {
2269         struct binder_node *node;
2270         struct binder_proc *proc = thread->proc;
2271         struct binder_proc *target_proc = t->to_proc;
2272         struct binder_ref_data rdata;
2273         int ret = 0;
2274
2275         node = binder_get_node(proc, fp->binder);
2276         if (!node) {
2277                 node = binder_new_node(proc, fp);
2278                 if (!node)
2279                         return -ENOMEM;
2280         }
2281         if (fp->cookie != node->cookie) {
2282                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2283                                   proc->pid, thread->pid, (u64)fp->binder,
2284                                   node->debug_id, (u64)fp->cookie,
2285                                   (u64)node->cookie);
2286                 ret = -EINVAL;
2287                 goto done;
2288         }
2289         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2290                 ret = -EPERM;
2291                 goto done;
2292         }
2293
2294         ret = binder_inc_ref_for_node(target_proc, node,
2295                         fp->hdr.type == BINDER_TYPE_BINDER,
2296                         &thread->todo, &rdata);
2297         if (ret)
2298                 goto done;
2299
2300         if (fp->hdr.type == BINDER_TYPE_BINDER)
2301                 fp->hdr.type = BINDER_TYPE_HANDLE;
2302         else
2303                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2304         fp->binder = 0;
2305         fp->handle = rdata.desc;
2306         fp->cookie = 0;
2307
2308         trace_binder_transaction_node_to_ref(t, node, &rdata);
2309         binder_debug(BINDER_DEBUG_TRANSACTION,
2310                      "        node %d u%016llx -> ref %d desc %d\n",
2311                      node->debug_id, (u64)node->ptr,
2312                      rdata.debug_id, rdata.desc);
2313 done:
2314         binder_put_node(node);
2315         return ret;
2316 }
2317
2318 static int binder_translate_handle(struct flat_binder_object *fp,
2319                                    struct binder_transaction *t,
2320                                    struct binder_thread *thread)
2321 {
2322         struct binder_proc *proc = thread->proc;
2323         struct binder_proc *target_proc = t->to_proc;
2324         struct binder_node *node;
2325         struct binder_ref_data src_rdata;
2326         int ret = 0;
2327
2328         node = binder_get_node_from_ref(proc, fp->handle,
2329                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2330         if (!node) {
2331                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2332                                   proc->pid, thread->pid, fp->handle);
2333                 return -EINVAL;
2334         }
2335         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2336                 ret = -EPERM;
2337                 goto done;
2338         }
2339
2340         binder_node_lock(node);
2341         if (node->proc == target_proc) {
2342                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2343                         fp->hdr.type = BINDER_TYPE_BINDER;
2344                 else
2345                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2346                 fp->binder = node->ptr;
2347                 fp->cookie = node->cookie;
2348                 if (node->proc)
2349                         binder_inner_proc_lock(node->proc);
2350                 binder_inc_node_nilocked(node,
2351                                          fp->hdr.type == BINDER_TYPE_BINDER,
2352                                          0, NULL);
2353                 if (node->proc)
2354                         binder_inner_proc_unlock(node->proc);
2355                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2356                 binder_debug(BINDER_DEBUG_TRANSACTION,
2357                              "        ref %d desc %d -> node %d u%016llx\n",
2358                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2359                              (u64)node->ptr);
2360                 binder_node_unlock(node);
2361         } else {
2362                 struct binder_ref_data dest_rdata;
2363
2364                 binder_node_unlock(node);
2365                 ret = binder_inc_ref_for_node(target_proc, node,
2366                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2367                                 NULL, &dest_rdata);
2368                 if (ret)
2369                         goto done;
2370
2371                 fp->binder = 0;
2372                 fp->handle = dest_rdata.desc;
2373                 fp->cookie = 0;
2374                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2375                                                     &dest_rdata);
2376                 binder_debug(BINDER_DEBUG_TRANSACTION,
2377                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2378                              src_rdata.debug_id, src_rdata.desc,
2379                              dest_rdata.debug_id, dest_rdata.desc,
2380                              node->debug_id);
2381         }
2382 done:
2383         binder_put_node(node);
2384         return ret;
2385 }
2386
2387 static int binder_translate_fd(int fd,
2388                                struct binder_transaction *t,
2389                                struct binder_thread *thread,
2390                                struct binder_transaction *in_reply_to)
2391 {
2392         struct binder_proc *proc = thread->proc;
2393         struct binder_proc *target_proc = t->to_proc;
2394         int target_fd;
2395         struct file *file;
2396         int ret;
2397         bool target_allows_fd;
2398
2399         if (in_reply_to)
2400                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2401         else
2402                 target_allows_fd = t->buffer->target_node->accept_fds;
2403         if (!target_allows_fd) {
2404                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2405                                   proc->pid, thread->pid,
2406                                   in_reply_to ? "reply" : "transaction",
2407                                   fd);
2408                 ret = -EPERM;
2409                 goto err_fd_not_accepted;
2410         }
2411
2412         file = fget(fd);
2413         if (!file) {
2414                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2415                                   proc->pid, thread->pid, fd);
2416                 ret = -EBADF;
2417                 goto err_fget;
2418         }
2419         ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2420         if (ret < 0) {
2421                 ret = -EPERM;
2422                 goto err_security;
2423         }
2424
2425         target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2426         if (target_fd < 0) {
2427                 ret = -ENOMEM;
2428                 goto err_get_unused_fd;
2429         }
2430         task_fd_install(target_proc, target_fd, file);
2431         trace_binder_transaction_fd(t, fd, target_fd);
2432         binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
2433                      fd, target_fd);
2434
2435         return target_fd;
2436
2437 err_get_unused_fd:
2438 err_security:
2439         fput(file);
2440 err_fget:
2441 err_fd_not_accepted:
2442         return ret;
2443 }
2444
2445 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2446                                      struct binder_buffer_object *parent,
2447                                      struct binder_transaction *t,
2448                                      struct binder_thread *thread,
2449                                      struct binder_transaction *in_reply_to)
2450 {
2451         binder_size_t fdi, fd_buf_size, num_installed_fds;
2452         int target_fd;
2453         uintptr_t parent_buffer;
2454         u32 *fd_array;
2455         struct binder_proc *proc = thread->proc;
2456         struct binder_proc *target_proc = t->to_proc;
2457
2458         fd_buf_size = sizeof(u32) * fda->num_fds;
2459         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2460                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2461                                   proc->pid, thread->pid, (u64)fda->num_fds);
2462                 return -EINVAL;
2463         }
2464         if (fd_buf_size > parent->length ||
2465             fda->parent_offset > parent->length - fd_buf_size) {
2466                 /* No space for all file descriptors here. */
2467                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2468                                   proc->pid, thread->pid, (u64)fda->num_fds);
2469                 return -EINVAL;
2470         }
2471         /*
2472          * Since the parent was already fixed up, convert it
2473          * back to the kernel address space to access it
2474          */
2475         parent_buffer = parent->buffer -
2476                 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2477         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2478         if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2479                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2480                                   proc->pid, thread->pid);
2481                 return -EINVAL;
2482         }
2483         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2484                 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2485                                                 in_reply_to);
2486                 if (target_fd < 0)
2487                         goto err_translate_fd_failed;
2488                 fd_array[fdi] = target_fd;
2489         }
2490         return 0;
2491
2492 err_translate_fd_failed:
2493         /*
2494          * Failed to allocate fd or security error, free fds
2495          * installed so far.
2496          */
2497         num_installed_fds = fdi;
2498         for (fdi = 0; fdi < num_installed_fds; fdi++)
2499                 task_close_fd(target_proc, fd_array[fdi]);
2500         return target_fd;
2501 }
2502
2503 static int binder_fixup_parent(struct binder_transaction *t,
2504                                struct binder_thread *thread,
2505                                struct binder_buffer_object *bp,
2506                                binder_size_t *off_start,
2507                                binder_size_t num_valid,
2508                                struct binder_buffer_object *last_fixup_obj,
2509                                binder_size_t last_fixup_min_off)
2510 {
2511         struct binder_buffer_object *parent;
2512         u8 *parent_buffer;
2513         struct binder_buffer *b = t->buffer;
2514         struct binder_proc *proc = thread->proc;
2515         struct binder_proc *target_proc = t->to_proc;
2516
2517         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2518                 return 0;
2519
2520         parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2521         if (!parent) {
2522                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2523                                   proc->pid, thread->pid);
2524                 return -EINVAL;
2525         }
2526
2527         if (!binder_validate_fixup(b, off_start,
2528                                    parent, bp->parent_offset,
2529                                    last_fixup_obj,
2530                                    last_fixup_min_off)) {
2531                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2532                                   proc->pid, thread->pid);
2533                 return -EINVAL;
2534         }
2535
2536         if (parent->length < sizeof(binder_uintptr_t) ||
2537             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2538                 /* No space for a pointer here! */
2539                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2540                                   proc->pid, thread->pid);
2541                 return -EINVAL;
2542         }
2543         parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2544                         binder_alloc_get_user_buffer_offset(
2545                                 &target_proc->alloc));
2546         *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2547
2548         return 0;
2549 }
2550
2551 /**
2552  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2553  * @t:          transaction to send
2554  * @proc:       process to send the transaction to
2555  * @thread:     thread in @proc to send the transaction to (may be NULL)
2556  *
2557  * This function queues a transaction to the specified process. It will try
2558  * to find a thread in the target process to handle the transaction and
2559  * wake it up. If no thread is found, the work is queued to the proc
2560  * waitqueue.
2561  *
2562  * If the @thread parameter is not NULL, the transaction is always queued
2563  * to the waitlist of that specific thread.
2564  *
2565  * Return:      true if the transactions was successfully queued
2566  *              false if the target process or thread is dead
2567  */
2568 static bool binder_proc_transaction(struct binder_transaction *t,
2569                                     struct binder_proc *proc,
2570                                     struct binder_thread *thread)
2571 {
2572         struct list_head *target_list = NULL;
2573         struct binder_node *node = t->buffer->target_node;
2574         bool oneway = !!(t->flags & TF_ONE_WAY);
2575         bool wakeup = true;
2576
2577         BUG_ON(!node);
2578         binder_node_lock(node);
2579         if (oneway) {
2580                 BUG_ON(thread);
2581                 if (node->has_async_transaction) {
2582                         target_list = &node->async_todo;
2583                         wakeup = false;
2584                 } else {
2585                         node->has_async_transaction = 1;
2586                 }
2587         }
2588
2589         binder_inner_proc_lock(proc);
2590
2591         if (proc->is_dead || (thread && thread->is_dead)) {
2592                 binder_inner_proc_unlock(proc);
2593                 binder_node_unlock(node);
2594                 return false;
2595         }
2596
2597         if (!thread && !target_list)
2598                 thread = binder_select_thread_ilocked(proc);
2599
2600         if (thread)
2601                 target_list = &thread->todo;
2602         else if (!target_list)
2603                 target_list = &proc->todo;
2604         else
2605                 BUG_ON(target_list != &node->async_todo);
2606
2607         binder_enqueue_work_ilocked(&t->work, target_list);
2608
2609         if (wakeup)
2610                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2611
2612         binder_inner_proc_unlock(proc);
2613         binder_node_unlock(node);
2614
2615         return true;
2616 }
2617
2618 /**
2619  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2620  * @node:         struct binder_node for which to get refs
2621  * @proc:         returns @node->proc if valid
2622  * @error:        if no @proc then returns BR_DEAD_REPLY
2623  *
2624  * User-space normally keeps the node alive when creating a transaction
2625  * since it has a reference to the target. The local strong ref keeps it
2626  * alive if the sending process dies before the target process processes
2627  * the transaction. If the source process is malicious or has a reference
2628  * counting bug, relying on the local strong ref can fail.
2629  *
2630  * Since user-space can cause the local strong ref to go away, we also take
2631  * a tmpref on the node to ensure it survives while we are constructing
2632  * the transaction. We also need a tmpref on the proc while we are
2633  * constructing the transaction, so we take that here as well.
2634  *
2635  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2636  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2637  * target proc has died, @error is set to BR_DEAD_REPLY
2638  */
2639 static struct binder_node *binder_get_node_refs_for_txn(
2640                 struct binder_node *node,
2641                 struct binder_proc **procp,
2642                 uint32_t *error)
2643 {
2644         struct binder_node *target_node = NULL;
2645
2646         binder_node_inner_lock(node);
2647         if (node->proc) {
2648                 target_node = node;
2649                 binder_inc_node_nilocked(node, 1, 0, NULL);
2650                 binder_inc_node_tmpref_ilocked(node);
2651                 node->proc->tmp_ref++;
2652                 *procp = node->proc;
2653         } else
2654                 *error = BR_DEAD_REPLY;
2655         binder_node_inner_unlock(node);
2656
2657         return target_node;
2658 }
2659
2660 static void binder_transaction(struct binder_proc *proc,
2661                                struct binder_thread *thread,
2662                                struct binder_transaction_data *tr, int reply,
2663                                binder_size_t extra_buffers_size)
2664 {
2665         int ret;
2666         struct binder_transaction *t;
2667         struct binder_work *tcomplete;
2668         binder_size_t *offp, *off_end, *off_start;
2669         binder_size_t off_min;
2670         u8 *sg_bufp, *sg_buf_end;
2671         struct binder_proc *target_proc = NULL;
2672         struct binder_thread *target_thread = NULL;
2673         struct binder_node *target_node = NULL;
2674         struct binder_transaction *in_reply_to = NULL;
2675         struct binder_transaction_log_entry *e;
2676         uint32_t return_error = 0;
2677         uint32_t return_error_param = 0;
2678         uint32_t return_error_line = 0;
2679         struct binder_buffer_object *last_fixup_obj = NULL;
2680         binder_size_t last_fixup_min_off = 0;
2681         struct binder_context *context = proc->context;
2682         int t_debug_id = atomic_inc_return(&binder_last_id);
2683
2684         e = binder_transaction_log_add(&binder_transaction_log);
2685         e->debug_id = t_debug_id;
2686         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2687         e->from_proc = proc->pid;
2688         e->from_thread = thread->pid;
2689         e->target_handle = tr->target.handle;
2690         e->data_size = tr->data_size;
2691         e->offsets_size = tr->offsets_size;
2692         e->context_name = proc->context->name;
2693
2694         if (reply) {
2695                 binder_inner_proc_lock(proc);
2696                 in_reply_to = thread->transaction_stack;
2697                 if (in_reply_to == NULL) {
2698                         binder_inner_proc_unlock(proc);
2699                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2700                                           proc->pid, thread->pid);
2701                         return_error = BR_FAILED_REPLY;
2702                         return_error_param = -EPROTO;
2703                         return_error_line = __LINE__;
2704                         goto err_empty_call_stack;
2705                 }
2706                 if (in_reply_to->to_thread != thread) {
2707                         spin_lock(&in_reply_to->lock);
2708                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2709                                 proc->pid, thread->pid, in_reply_to->debug_id,
2710                                 in_reply_to->to_proc ?
2711                                 in_reply_to->to_proc->pid : 0,
2712                                 in_reply_to->to_thread ?
2713                                 in_reply_to->to_thread->pid : 0);
2714                         spin_unlock(&in_reply_to->lock);
2715                         binder_inner_proc_unlock(proc);
2716                         return_error = BR_FAILED_REPLY;
2717                         return_error_param = -EPROTO;
2718                         return_error_line = __LINE__;
2719                         in_reply_to = NULL;
2720                         goto err_bad_call_stack;
2721                 }
2722                 thread->transaction_stack = in_reply_to->to_parent;
2723                 binder_inner_proc_unlock(proc);
2724                 binder_set_nice(in_reply_to->saved_priority);
2725                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2726                 if (target_thread == NULL) {
2727                         return_error = BR_DEAD_REPLY;
2728                         return_error_line = __LINE__;
2729                         goto err_dead_binder;
2730                 }
2731                 if (target_thread->transaction_stack != in_reply_to) {
2732                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2733                                 proc->pid, thread->pid,
2734                                 target_thread->transaction_stack ?
2735                                 target_thread->transaction_stack->debug_id : 0,
2736                                 in_reply_to->debug_id);
2737                         binder_inner_proc_unlock(target_thread->proc);
2738                         return_error = BR_FAILED_REPLY;
2739                         return_error_param = -EPROTO;
2740                         return_error_line = __LINE__;
2741                         in_reply_to = NULL;
2742                         target_thread = NULL;
2743                         goto err_dead_binder;
2744                 }
2745                 target_proc = target_thread->proc;
2746                 target_proc->tmp_ref++;
2747                 binder_inner_proc_unlock(target_thread->proc);
2748         } else {
2749                 if (tr->target.handle) {
2750                         struct binder_ref *ref;
2751
2752                         /*
2753                          * There must already be a strong ref
2754                          * on this node. If so, do a strong
2755                          * increment on the node to ensure it
2756                          * stays alive until the transaction is
2757                          * done.
2758                          */
2759                         binder_proc_lock(proc);
2760                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2761                                                      true);
2762                         if (ref) {
2763                                 target_node = binder_get_node_refs_for_txn(
2764                                                 ref->node, &target_proc,
2765                                                 &return_error);
2766                         } else {
2767                                 binder_user_error("%d:%d got transaction to invalid handle\n",
2768                                                   proc->pid, thread->pid);
2769                                 return_error = BR_FAILED_REPLY;
2770                         }
2771                         binder_proc_unlock(proc);
2772                 } else {
2773                         mutex_lock(&context->context_mgr_node_lock);
2774                         target_node = context->binder_context_mgr_node;
2775                         if (target_node)
2776                                 target_node = binder_get_node_refs_for_txn(
2777                                                 target_node, &target_proc,
2778                                                 &return_error);
2779                         else
2780                                 return_error = BR_DEAD_REPLY;
2781                         mutex_unlock(&context->context_mgr_node_lock);
2782                 }
2783                 if (!target_node) {
2784                         /*
2785                          * return_error is set above
2786                          */
2787                         return_error_param = -EINVAL;
2788                         return_error_line = __LINE__;
2789                         goto err_dead_binder;
2790                 }
2791                 e->to_node = target_node->debug_id;
2792                 if (security_binder_transaction(proc->tsk,
2793                                                 target_proc->tsk) < 0) {
2794                         return_error = BR_FAILED_REPLY;
2795                         return_error_param = -EPERM;
2796                         return_error_line = __LINE__;
2797                         goto err_invalid_target_handle;
2798                 }
2799                 binder_inner_proc_lock(proc);
2800                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2801                         struct binder_transaction *tmp;
2802
2803                         tmp = thread->transaction_stack;
2804                         if (tmp->to_thread != thread) {
2805                                 spin_lock(&tmp->lock);
2806                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2807                                         proc->pid, thread->pid, tmp->debug_id,
2808                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2809                                         tmp->to_thread ?
2810                                         tmp->to_thread->pid : 0);
2811                                 spin_unlock(&tmp->lock);
2812                                 binder_inner_proc_unlock(proc);
2813                                 return_error = BR_FAILED_REPLY;
2814                                 return_error_param = -EPROTO;
2815                                 return_error_line = __LINE__;
2816                                 goto err_bad_call_stack;
2817                         }
2818                         while (tmp) {
2819                                 struct binder_thread *from;
2820
2821                                 spin_lock(&tmp->lock);
2822                                 from = tmp->from;
2823                                 if (from && from->proc == target_proc) {
2824                                         atomic_inc(&from->tmp_ref);
2825                                         target_thread = from;
2826                                         spin_unlock(&tmp->lock);
2827                                         break;
2828                                 }
2829                                 spin_unlock(&tmp->lock);
2830                                 tmp = tmp->from_parent;
2831                         }
2832                 }
2833                 binder_inner_proc_unlock(proc);
2834         }
2835         if (target_thread)
2836                 e->to_thread = target_thread->pid;
2837         e->to_proc = target_proc->pid;
2838
2839         /* TODO: reuse incoming transaction for reply */
2840         t = kzalloc(sizeof(*t), GFP_KERNEL);
2841         if (t == NULL) {
2842                 return_error = BR_FAILED_REPLY;
2843                 return_error_param = -ENOMEM;
2844                 return_error_line = __LINE__;
2845                 goto err_alloc_t_failed;
2846         }
2847         binder_stats_created(BINDER_STAT_TRANSACTION);
2848         spin_lock_init(&t->lock);
2849
2850         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2851         if (tcomplete == NULL) {
2852                 return_error = BR_FAILED_REPLY;
2853                 return_error_param = -ENOMEM;
2854                 return_error_line = __LINE__;
2855                 goto err_alloc_tcomplete_failed;
2856         }
2857         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2858
2859         t->debug_id = t_debug_id;
2860
2861         if (reply)
2862                 binder_debug(BINDER_DEBUG_TRANSACTION,
2863                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2864                              proc->pid, thread->pid, t->debug_id,
2865                              target_proc->pid, target_thread->pid,
2866                              (u64)tr->data.ptr.buffer,
2867                              (u64)tr->data.ptr.offsets,
2868                              (u64)tr->data_size, (u64)tr->offsets_size,
2869                              (u64)extra_buffers_size);
2870         else
2871                 binder_debug(BINDER_DEBUG_TRANSACTION,
2872                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2873                              proc->pid, thread->pid, t->debug_id,
2874                              target_proc->pid, target_node->debug_id,
2875                              (u64)tr->data.ptr.buffer,
2876                              (u64)tr->data.ptr.offsets,
2877                              (u64)tr->data_size, (u64)tr->offsets_size,
2878                              (u64)extra_buffers_size);
2879
2880         if (!reply && !(tr->flags & TF_ONE_WAY))
2881                 t->from = thread;
2882         else
2883                 t->from = NULL;
2884         t->sender_euid = task_euid(proc->tsk);
2885         t->to_proc = target_proc;
2886         t->to_thread = target_thread;
2887         t->code = tr->code;
2888         t->flags = tr->flags;
2889         t->priority = task_nice(current);
2890
2891         trace_binder_transaction(reply, t, target_node);
2892
2893         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2894                 tr->offsets_size, extra_buffers_size,
2895                 !reply && (t->flags & TF_ONE_WAY));
2896         if (IS_ERR(t->buffer)) {
2897                 /*
2898                  * -ESRCH indicates VMA cleared. The target is dying.
2899                  */
2900                 return_error_param = PTR_ERR(t->buffer);
2901                 return_error = return_error_param == -ESRCH ?
2902                         BR_DEAD_REPLY : BR_FAILED_REPLY;
2903                 return_error_line = __LINE__;
2904                 t->buffer = NULL;
2905                 goto err_binder_alloc_buf_failed;
2906         }
2907         t->buffer->allow_user_free = 0;
2908         t->buffer->debug_id = t->debug_id;
2909         t->buffer->transaction = t;
2910         t->buffer->target_node = target_node;
2911         trace_binder_transaction_alloc_buf(t->buffer);
2912         off_start = (binder_size_t *)(t->buffer->data +
2913                                       ALIGN(tr->data_size, sizeof(void *)));
2914         offp = off_start;
2915
2916         if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2917                            tr->data.ptr.buffer, tr->data_size)) {
2918                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2919                                 proc->pid, thread->pid);
2920                 return_error = BR_FAILED_REPLY;
2921                 return_error_param = -EFAULT;
2922                 return_error_line = __LINE__;
2923                 goto err_copy_data_failed;
2924         }
2925         if (copy_from_user(offp, (const void __user *)(uintptr_t)
2926                            tr->data.ptr.offsets, tr->offsets_size)) {
2927                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2928                                 proc->pid, thread->pid);
2929                 return_error = BR_FAILED_REPLY;
2930                 return_error_param = -EFAULT;
2931                 return_error_line = __LINE__;
2932                 goto err_copy_data_failed;
2933         }
2934         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2935                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2936                                 proc->pid, thread->pid, (u64)tr->offsets_size);
2937                 return_error = BR_FAILED_REPLY;
2938                 return_error_param = -EINVAL;
2939                 return_error_line = __LINE__;
2940                 goto err_bad_offset;
2941         }
2942         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2943                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2944                                   proc->pid, thread->pid,
2945                                   (u64)extra_buffers_size);
2946                 return_error = BR_FAILED_REPLY;
2947                 return_error_param = -EINVAL;
2948                 return_error_line = __LINE__;
2949                 goto err_bad_offset;
2950         }
2951         off_end = (void *)off_start + tr->offsets_size;
2952         sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2953         sg_buf_end = sg_bufp + extra_buffers_size;
2954         off_min = 0;
2955         for (; offp < off_end; offp++) {
2956                 struct binder_object_header *hdr;
2957                 size_t object_size = binder_validate_object(t->buffer, *offp);
2958
2959                 if (object_size == 0 || *offp < off_min) {
2960                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2961                                           proc->pid, thread->pid, (u64)*offp,
2962                                           (u64)off_min,
2963                                           (u64)t->buffer->data_size);
2964                         return_error = BR_FAILED_REPLY;
2965                         return_error_param = -EINVAL;
2966                         return_error_line = __LINE__;
2967                         goto err_bad_offset;
2968                 }
2969
2970                 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2971                 off_min = *offp + object_size;
2972                 switch (hdr->type) {
2973                 case BINDER_TYPE_BINDER:
2974                 case BINDER_TYPE_WEAK_BINDER: {
2975                         struct flat_binder_object *fp;
2976
2977                         fp = to_flat_binder_object(hdr);
2978                         ret = binder_translate_binder(fp, t, thread);
2979                         if (ret < 0) {
2980                                 return_error = BR_FAILED_REPLY;
2981                                 return_error_param = ret;
2982                                 return_error_line = __LINE__;
2983                                 goto err_translate_failed;
2984                         }
2985                 } break;
2986                 case BINDER_TYPE_HANDLE:
2987                 case BINDER_TYPE_WEAK_HANDLE: {
2988                         struct flat_binder_object *fp;
2989
2990                         fp = to_flat_binder_object(hdr);
2991                         ret = binder_translate_handle(fp, t, thread);
2992                         if (ret < 0) {
2993                                 return_error = BR_FAILED_REPLY;
2994                                 return_error_param = ret;
2995                                 return_error_line = __LINE__;
2996                                 goto err_translate_failed;
2997                         }
2998                 } break;
2999
3000                 case BINDER_TYPE_FD: {
3001                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
3002                         int target_fd = binder_translate_fd(fp->fd, t, thread,
3003                                                             in_reply_to);
3004
3005                         if (target_fd < 0) {
3006                                 return_error = BR_FAILED_REPLY;
3007                                 return_error_param = target_fd;
3008                                 return_error_line = __LINE__;
3009                                 goto err_translate_failed;
3010                         }
3011                         fp->pad_binder = 0;
3012                         fp->fd = target_fd;
3013                 } break;
3014                 case BINDER_TYPE_FDA: {
3015                         struct binder_fd_array_object *fda =
3016                                 to_binder_fd_array_object(hdr);
3017                         struct binder_buffer_object *parent =
3018                                 binder_validate_ptr(t->buffer, fda->parent,
3019                                                     off_start,
3020                                                     offp - off_start);
3021                         if (!parent) {
3022                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3023                                                   proc->pid, thread->pid);
3024                                 return_error = BR_FAILED_REPLY;
3025                                 return_error_param = -EINVAL;
3026                                 return_error_line = __LINE__;
3027                                 goto err_bad_parent;
3028                         }
3029                         if (!binder_validate_fixup(t->buffer, off_start,
3030                                                    parent, fda->parent_offset,
3031                                                    last_fixup_obj,
3032                                                    last_fixup_min_off)) {
3033                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3034                                                   proc->pid, thread->pid);
3035                                 return_error = BR_FAILED_REPLY;
3036                                 return_error_param = -EINVAL;
3037                                 return_error_line = __LINE__;
3038                                 goto err_bad_parent;
3039                         }
3040                         ret = binder_translate_fd_array(fda, parent, t, thread,
3041                                                         in_reply_to);
3042                         if (ret < 0) {
3043                                 return_error = BR_FAILED_REPLY;
3044                                 return_error_param = ret;
3045                                 return_error_line = __LINE__;
3046                                 goto err_translate_failed;
3047                         }
3048                         last_fixup_obj = parent;
3049                         last_fixup_min_off =
3050                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3051                 } break;
3052                 case BINDER_TYPE_PTR: {
3053                         struct binder_buffer_object *bp =
3054                                 to_binder_buffer_object(hdr);
3055                         size_t buf_left = sg_buf_end - sg_bufp;
3056
3057                         if (bp->length > buf_left) {
3058                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3059                                                   proc->pid, thread->pid);
3060                                 return_error = BR_FAILED_REPLY;
3061                                 return_error_param = -EINVAL;
3062                                 return_error_line = __LINE__;
3063                                 goto err_bad_offset;
3064                         }
3065                         if (copy_from_user(sg_bufp,
3066                                            (const void __user *)(uintptr_t)
3067                                            bp->buffer, bp->length)) {
3068                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3069                                                   proc->pid, thread->pid);
3070                                 return_error_param = -EFAULT;
3071                                 return_error = BR_FAILED_REPLY;
3072                                 return_error_line = __LINE__;
3073                                 goto err_copy_data_failed;
3074                         }
3075                         /* Fixup buffer pointer to target proc address space */
3076                         bp->buffer = (uintptr_t)sg_bufp +
3077                                 binder_alloc_get_user_buffer_offset(
3078                                                 &target_proc->alloc);
3079                         sg_bufp += ALIGN(bp->length, sizeof(u64));
3080
3081                         ret = binder_fixup_parent(t, thread, bp, off_start,
3082                                                   offp - off_start,
3083                                                   last_fixup_obj,
3084                                                   last_fixup_min_off);
3085                         if (ret < 0) {
3086                                 return_error = BR_FAILED_REPLY;
3087                                 return_error_param = ret;
3088                                 return_error_line = __LINE__;
3089                                 goto err_translate_failed;
3090                         }
3091                         last_fixup_obj = bp;
3092                         last_fixup_min_off = 0;
3093                 } break;
3094                 default:
3095                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3096                                 proc->pid, thread->pid, hdr->type);
3097                         return_error = BR_FAILED_REPLY;
3098                         return_error_param = -EINVAL;
3099                         return_error_line = __LINE__;
3100                         goto err_bad_object_type;
3101                 }
3102         }
3103         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3104         binder_enqueue_work(proc, tcomplete, &thread->todo);
3105         t->work.type = BINDER_WORK_TRANSACTION;
3106
3107         if (reply) {
3108                 binder_inner_proc_lock(target_proc);
3109                 if (target_thread->is_dead) {
3110                         binder_inner_proc_unlock(target_proc);
3111                         goto err_dead_proc_or_thread;
3112                 }
3113                 BUG_ON(t->buffer->async_transaction != 0);
3114                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3115                 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
3116                 binder_inner_proc_unlock(target_proc);
3117                 wake_up_interruptible_sync(&target_thread->wait);
3118                 binder_free_transaction(in_reply_to);
3119         } else if (!(t->flags & TF_ONE_WAY)) {
3120                 BUG_ON(t->buffer->async_transaction != 0);
3121                 binder_inner_proc_lock(proc);
3122                 t->need_reply = 1;
3123                 t->from_parent = thread->transaction_stack;
3124                 thread->transaction_stack = t;
3125                 binder_inner_proc_unlock(proc);
3126                 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3127                         binder_inner_proc_lock(proc);
3128                         binder_pop_transaction_ilocked(thread, t);
3129                         binder_inner_proc_unlock(proc);
3130                         goto err_dead_proc_or_thread;
3131                 }
3132         } else {
3133                 BUG_ON(target_node == NULL);
3134                 BUG_ON(t->buffer->async_transaction != 1);
3135                 if (!binder_proc_transaction(t, target_proc, NULL))
3136                         goto err_dead_proc_or_thread;
3137         }
3138         if (target_thread)
3139                 binder_thread_dec_tmpref(target_thread);
3140         binder_proc_dec_tmpref(target_proc);
3141         if (target_node)
3142                 binder_dec_node_tmpref(target_node);
3143         /*
3144          * write barrier to synchronize with initialization
3145          * of log entry
3146          */
3147         smp_wmb();
3148         WRITE_ONCE(e->debug_id_done, t_debug_id);
3149         return;
3150
3151 err_dead_proc_or_thread:
3152         return_error = BR_DEAD_REPLY;
3153         return_error_line = __LINE__;
3154         binder_dequeue_work(proc, tcomplete);
3155 err_translate_failed:
3156 err_bad_object_type:
3157 err_bad_offset:
3158 err_bad_parent:
3159 err_copy_data_failed:
3160         trace_binder_transaction_failed_buffer_release(t->buffer);
3161         binder_transaction_buffer_release(target_proc, t->buffer, offp);
3162         if (target_node)
3163                 binder_dec_node_tmpref(target_node);
3164         target_node = NULL;
3165         t->buffer->transaction = NULL;
3166         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3167 err_binder_alloc_buf_failed:
3168         kfree(tcomplete);
3169         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3170 err_alloc_tcomplete_failed:
3171         kfree(t);
3172         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3173 err_alloc_t_failed:
3174 err_bad_call_stack:
3175 err_empty_call_stack:
3176 err_dead_binder:
3177 err_invalid_target_handle:
3178         if (target_thread)
3179                 binder_thread_dec_tmpref(target_thread);
3180         if (target_proc)
3181                 binder_proc_dec_tmpref(target_proc);
3182         if (target_node) {
3183                 binder_dec_node(target_node, 1, 0);
3184                 binder_dec_node_tmpref(target_node);
3185         }
3186
3187         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3188                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3189                      proc->pid, thread->pid, return_error, return_error_param,
3190                      (u64)tr->data_size, (u64)tr->offsets_size,
3191                      return_error_line);
3192
3193         {
3194                 struct binder_transaction_log_entry *fe;
3195
3196                 e->return_error = return_error;
3197                 e->return_error_param = return_error_param;
3198                 e->return_error_line = return_error_line;
3199                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3200                 *fe = *e;
3201                 /*
3202                  * write barrier to synchronize with initialization
3203                  * of log entry
3204                  */
3205                 smp_wmb();
3206                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3207                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3208         }
3209
3210         BUG_ON(thread->return_error.cmd != BR_OK);
3211         if (in_reply_to) {
3212                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3213                 binder_enqueue_work(thread->proc,
3214                                     &thread->return_error.work,
3215                                     &thread->todo);
3216                 binder_send_failed_reply(in_reply_to, return_error);
3217         } else {
3218                 thread->return_error.cmd = return_error;
3219                 binder_enqueue_work(thread->proc,
3220                                     &thread->return_error.work,
3221                                     &thread->todo);
3222         }
3223 }
3224
3225 static int binder_thread_write(struct binder_proc *proc,
3226                         struct binder_thread *thread,
3227                         binder_uintptr_t binder_buffer, size_t size,
3228                         binder_size_t *consumed)
3229 {
3230         uint32_t cmd;
3231         struct binder_context *context = proc->context;
3232         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3233         void __user *ptr = buffer + *consumed;
3234         void __user *end = buffer + size;
3235
3236         while (ptr < end && thread->return_error.cmd == BR_OK) {
3237                 int ret;
3238
3239                 if (get_user(cmd, (uint32_t __user *)ptr))
3240                         return -EFAULT;
3241                 ptr += sizeof(uint32_t);
3242                 trace_binder_command(cmd);
3243                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3244                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3245                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3246                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3247                 }
3248                 switch (cmd) {
3249                 case BC_INCREFS:
3250                 case BC_ACQUIRE:
3251                 case BC_RELEASE:
3252                 case BC_DECREFS: {
3253                         uint32_t target;
3254                         const char *debug_string;
3255                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3256                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3257                         struct binder_ref_data rdata;
3258
3259                         if (get_user(target, (uint32_t __user *)ptr))
3260                                 return -EFAULT;
3261
3262                         ptr += sizeof(uint32_t);
3263                         ret = -1;
3264                         if (increment && !target) {
3265                                 struct binder_node *ctx_mgr_node;
3266                                 mutex_lock(&context->context_mgr_node_lock);
3267                                 ctx_mgr_node = context->binder_context_mgr_node;
3268                                 if (ctx_mgr_node)
3269                                         ret = binder_inc_ref_for_node(
3270                                                         proc, ctx_mgr_node,
3271                                                         strong, NULL, &rdata);
3272                                 mutex_unlock(&context->context_mgr_node_lock);
3273                         }
3274                         if (ret)
3275                                 ret = binder_update_ref_for_handle(
3276                                                 proc, target, increment, strong,
3277                                                 &rdata);
3278                         if (!ret && rdata.desc != target) {
3279                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3280                                         proc->pid, thread->pid,
3281                                         target, rdata.desc);
3282                         }
3283                         switch (cmd) {
3284                         case BC_INCREFS:
3285                                 debug_string = "IncRefs";
3286                                 break;
3287                         case BC_ACQUIRE:
3288                                 debug_string = "Acquire";
3289                                 break;
3290                         case BC_RELEASE:
3291                                 debug_string = "Release";
3292                                 break;
3293                         case BC_DECREFS:
3294                         default:
3295                                 debug_string = "DecRefs";
3296                                 break;
3297                         }
3298                         if (ret) {
3299                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3300                                         proc->pid, thread->pid, debug_string,
3301                                         strong, target, ret);
3302                                 break;
3303                         }
3304                         binder_debug(BINDER_DEBUG_USER_REFS,
3305                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3306                                      proc->pid, thread->pid, debug_string,
3307                                      rdata.debug_id, rdata.desc, rdata.strong,
3308                                      rdata.weak);
3309                         break;
3310                 }
3311                 case BC_INCREFS_DONE:
3312                 case BC_ACQUIRE_DONE: {
3313                         binder_uintptr_t node_ptr;
3314                         binder_uintptr_t cookie;
3315                         struct binder_node *node;
3316                         bool free_node;
3317
3318                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3319                                 return -EFAULT;
3320                         ptr += sizeof(binder_uintptr_t);
3321                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3322                                 return -EFAULT;
3323                         ptr += sizeof(binder_uintptr_t);
3324                         node = binder_get_node(proc, node_ptr);
3325                         if (node == NULL) {
3326                                 binder_user_error("%d:%d %s u%016llx no match\n",
3327                                         proc->pid, thread->pid,
3328                                         cmd == BC_INCREFS_DONE ?
3329                                         "BC_INCREFS_DONE" :
3330                                         "BC_ACQUIRE_DONE",
3331                                         (u64)node_ptr);
3332                                 break;
3333                         }
3334                         if (cookie != node->cookie) {
3335                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3336                                         proc->pid, thread->pid,
3337                                         cmd == BC_INCREFS_DONE ?
3338                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3339                                         (u64)node_ptr, node->debug_id,
3340                                         (u64)cookie, (u64)node->cookie);
3341                                 binder_put_node(node);
3342                                 break;
3343                         }
3344                         binder_node_inner_lock(node);
3345                         if (cmd == BC_ACQUIRE_DONE) {
3346                                 if (node->pending_strong_ref == 0) {
3347                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3348                                                 proc->pid, thread->pid,
3349                                                 node->debug_id);
3350                                         binder_node_inner_unlock(node);
3351                                         binder_put_node(node);
3352                                         break;
3353                                 }
3354                                 node->pending_strong_ref = 0;
3355                         } else {
3356                                 if (node->pending_weak_ref == 0) {
3357                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3358                                                 proc->pid, thread->pid,
3359                                                 node->debug_id);
3360                                         binder_node_inner_unlock(node);
3361                                         binder_put_node(node);
3362                                         break;
3363                                 }
3364                                 node->pending_weak_ref = 0;
3365                         }
3366                         free_node = binder_dec_node_nilocked(node,
3367                                         cmd == BC_ACQUIRE_DONE, 0);
3368                         WARN_ON(free_node);
3369                         binder_debug(BINDER_DEBUG_USER_REFS,
3370                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3371                                      proc->pid, thread->pid,
3372                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3373                                      node->debug_id, node->local_strong_refs,
3374                                      node->local_weak_refs, node->tmp_refs);
3375                         binder_node_inner_unlock(node);
3376                         binder_put_node(node);
3377                         break;
3378                 }
3379                 case BC_ATTEMPT_ACQUIRE:
3380                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3381                         return -EINVAL;
3382                 case BC_ACQUIRE_RESULT:
3383                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3384                         return -EINVAL;
3385
3386                 case BC_FREE_BUFFER: {
3387                         binder_uintptr_t data_ptr;
3388                         struct binder_buffer *buffer;
3389
3390                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3391                                 return -EFAULT;
3392                         ptr += sizeof(binder_uintptr_t);
3393
3394                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3395                                                               data_ptr);
3396                         if (buffer == NULL) {
3397                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3398                                         proc->pid, thread->pid, (u64)data_ptr);
3399                                 break;
3400                         }
3401                         if (!buffer->allow_user_free) {
3402                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3403                                         proc->pid, thread->pid, (u64)data_ptr);
3404                                 break;
3405                         }
3406                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3407                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3408                                      proc->pid, thread->pid, (u64)data_ptr,
3409                                      buffer->debug_id,
3410                                      buffer->transaction ? "active" : "finished");
3411
3412                         if (buffer->transaction) {
3413                                 buffer->transaction->buffer = NULL;
3414                                 buffer->transaction = NULL;
3415                         }
3416                         if (buffer->async_transaction && buffer->target_node) {
3417                                 struct binder_node *buf_node;
3418                                 struct binder_work *w;
3419
3420                                 buf_node = buffer->target_node;
3421                                 binder_node_inner_lock(buf_node);
3422                                 BUG_ON(!buf_node->has_async_transaction);
3423                                 BUG_ON(buf_node->proc != proc);
3424                                 w = binder_dequeue_work_head_ilocked(
3425                                                 &buf_node->async_todo);
3426                                 if (!w) {
3427                                         buf_node->has_async_transaction = 0;
3428                                 } else {
3429                                         binder_enqueue_work_ilocked(
3430                                                         w, &proc->todo);
3431                                         binder_wakeup_proc_ilocked(proc);
3432                                 }
3433                                 binder_node_inner_unlock(buf_node);
3434                         }
3435                         trace_binder_transaction_buffer_release(buffer);
3436                         binder_transaction_buffer_release(proc, buffer, NULL);
3437                         binder_alloc_free_buf(&proc->alloc, buffer);
3438                         break;
3439                 }
3440
3441                 case BC_TRANSACTION_SG:
3442                 case BC_REPLY_SG: {
3443                         struct binder_transaction_data_sg tr;
3444
3445                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3446                                 return -EFAULT;
3447                         ptr += sizeof(tr);
3448                         binder_transaction(proc, thread, &tr.transaction_data,
3449                                            cmd == BC_REPLY_SG, tr.buffers_size);
3450                         break;
3451                 }
3452                 case BC_TRANSACTION:
3453                 case BC_REPLY: {
3454                         struct binder_transaction_data tr;
3455
3456                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3457                                 return -EFAULT;
3458                         ptr += sizeof(tr);
3459                         binder_transaction(proc, thread, &tr,
3460                                            cmd == BC_REPLY, 0);
3461                         break;
3462                 }
3463
3464                 case BC_REGISTER_LOOPER:
3465                         binder_debug(BINDER_DEBUG_THREADS,
3466                                      "%d:%d BC_REGISTER_LOOPER\n",
3467                                      proc->pid, thread->pid);
3468                         binder_inner_proc_lock(proc);
3469                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3470                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3471                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3472                                         proc->pid, thread->pid);
3473                         } else if (proc->requested_threads == 0) {
3474                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3475                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3476                                         proc->pid, thread->pid);
3477                         } else {
3478                                 proc->requested_threads--;
3479                                 proc->requested_threads_started++;
3480                         }
3481                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3482                         binder_inner_proc_unlock(proc);
3483                         break;
3484                 case BC_ENTER_LOOPER:
3485                         binder_debug(BINDER_DEBUG_THREADS,
3486                                      "%d:%d BC_ENTER_LOOPER\n",
3487                                      proc->pid, thread->pid);
3488                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3489                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3490                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3491                                         proc->pid, thread->pid);
3492                         }
3493                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3494                         break;
3495                 case BC_EXIT_LOOPER:
3496                         binder_debug(BINDER_DEBUG_THREADS,
3497                                      "%d:%d BC_EXIT_LOOPER\n",
3498                                      proc->pid, thread->pid);
3499                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3500                         break;
3501
3502                 case BC_REQUEST_DEATH_NOTIFICATION:
3503                 case BC_CLEAR_DEATH_NOTIFICATION: {
3504                         uint32_t target;
3505                         binder_uintptr_t cookie;
3506                         struct binder_ref *ref;
3507                         struct binder_ref_death *death = NULL;
3508
3509                         if (get_user(target, (uint32_t __user *)ptr))
3510                                 return -EFAULT;
3511                         ptr += sizeof(uint32_t);
3512                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3513                                 return -EFAULT;
3514                         ptr += sizeof(binder_uintptr_t);
3515                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3516                                 /*
3517                                  * Allocate memory for death notification
3518                                  * before taking lock
3519                                  */
3520                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3521                                 if (death == NULL) {
3522                                         WARN_ON(thread->return_error.cmd !=
3523                                                 BR_OK);
3524                                         thread->return_error.cmd = BR_ERROR;
3525                                         binder_enqueue_work(
3526                                                 thread->proc,
3527                                                 &thread->return_error.work,
3528                                                 &thread->todo);
3529                                         binder_debug(
3530                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3531                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3532                                                 proc->pid, thread->pid);
3533                                         break;
3534                                 }
3535                         }
3536                         binder_proc_lock(proc);
3537                         ref = binder_get_ref_olocked(proc, target, false);
3538                         if (ref == NULL) {
3539                                 binder_user_error("%d:%d %s invalid ref %d\n",
3540                                         proc->pid, thread->pid,
3541                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3542                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3543                                         "BC_CLEAR_DEATH_NOTIFICATION",
3544                                         target);
3545                                 binder_proc_unlock(proc);
3546                                 kfree(death);
3547                                 break;
3548                         }
3549
3550                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3551                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3552                                      proc->pid, thread->pid,
3553                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3554                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3555                                      "BC_CLEAR_DEATH_NOTIFICATION",
3556                                      (u64)cookie, ref->data.debug_id,
3557                                      ref->data.desc, ref->data.strong,
3558                                      ref->data.weak, ref->node->debug_id);
3559
3560                         binder_node_lock(ref->node);
3561                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3562                                 if (ref->death) {
3563                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3564                                                 proc->pid, thread->pid);
3565                                         binder_node_unlock(ref->node);
3566                                         binder_proc_unlock(proc);
3567                                         kfree(death);
3568                                         break;
3569                                 }
3570                                 binder_stats_created(BINDER_STAT_DEATH);
3571                                 INIT_LIST_HEAD(&death->work.entry);
3572                                 death->cookie = cookie;
3573                                 ref->death = death;
3574                                 if (ref->node->proc == NULL) {
3575                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3576
3577                                         binder_inner_proc_lock(proc);
3578                                         binder_enqueue_work_ilocked(
3579                                                 &ref->death->work, &proc->todo);
3580                                         binder_wakeup_proc_ilocked(proc);
3581                                         binder_inner_proc_unlock(proc);
3582                                 }
3583                         } else {
3584                                 if (ref->death == NULL) {
3585                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3586                                                 proc->pid, thread->pid);
3587                                         binder_node_unlock(ref->node);
3588                                         binder_proc_unlock(proc);
3589                                         break;
3590                                 }
3591                                 death = ref->death;
3592                                 if (death->cookie != cookie) {
3593                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3594                                                 proc->pid, thread->pid,
3595                                                 (u64)death->cookie,
3596                                                 (u64)cookie);
3597                                         binder_node_unlock(ref->node);
3598                                         binder_proc_unlock(proc);
3599                                         break;
3600                                 }
3601                                 ref->death = NULL;
3602                                 binder_inner_proc_lock(proc);
3603                                 if (list_empty(&death->work.entry)) {
3604                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3605                                         if (thread->looper &
3606                                             (BINDER_LOOPER_STATE_REGISTERED |
3607                                              BINDER_LOOPER_STATE_ENTERED))
3608                                                 binder_enqueue_work_ilocked(
3609                                                                 &death->work,
3610                                                                 &thread->todo);
3611                                         else {
3612                                                 binder_enqueue_work_ilocked(
3613                                                                 &death->work,
3614                                                                 &proc->todo);
3615                                                 binder_wakeup_proc_ilocked(
3616                                                                 proc);
3617                                         }
3618                                 } else {
3619                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3620                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3621                                 }
3622                                 binder_inner_proc_unlock(proc);
3623                         }
3624                         binder_node_unlock(ref->node);
3625                         binder_proc_unlock(proc);
3626                 } break;
3627                 case BC_DEAD_BINDER_DONE: {
3628                         struct binder_work *w;
3629                         binder_uintptr_t cookie;
3630                         struct binder_ref_death *death = NULL;
3631
3632                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3633                                 return -EFAULT;
3634
3635                         ptr += sizeof(cookie);
3636                         binder_inner_proc_lock(proc);
3637                         list_for_each_entry(w, &proc->delivered_death,
3638                                             entry) {
3639                                 struct binder_ref_death *tmp_death =
3640                                         container_of(w,
3641                                                      struct binder_ref_death,
3642                                                      work);
3643
3644                                 if (tmp_death->cookie == cookie) {
3645                                         death = tmp_death;
3646                                         break;
3647                                 }
3648                         }
3649                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3650                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3651                                      proc->pid, thread->pid, (u64)cookie,
3652                                      death);
3653                         if (death == NULL) {
3654                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3655                                         proc->pid, thread->pid, (u64)cookie);
3656                                 binder_inner_proc_unlock(proc);
3657                                 break;
3658                         }
3659                         binder_dequeue_work_ilocked(&death->work);
3660                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3661                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3662                                 if (thread->looper &
3663                                         (BINDER_LOOPER_STATE_REGISTERED |
3664                                          BINDER_LOOPER_STATE_ENTERED))
3665                                         binder_enqueue_work_ilocked(
3666                                                 &death->work, &thread->todo);
3667                                 else {
3668                                         binder_enqueue_work_ilocked(
3669                                                         &death->work,
3670                                                         &proc->todo);
3671                                         binder_wakeup_proc_ilocked(proc);
3672                                 }
3673                         }
3674                         binder_inner_proc_unlock(proc);
3675                 } break;
3676
3677                 default:
3678                         pr_err("%d:%d unknown command %d\n",
3679                                proc->pid, thread->pid, cmd);
3680                         return -EINVAL;
3681                 }
3682                 *consumed = ptr - buffer;
3683         }
3684         return 0;
3685 }
3686
3687 static void binder_stat_br(struct binder_proc *proc,
3688                            struct binder_thread *thread, uint32_t cmd)
3689 {
3690         trace_binder_return(cmd);
3691         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3692                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3693                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3694                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3695         }
3696 }
3697
3698 static int binder_put_node_cmd(struct binder_proc *proc,
3699                                struct binder_thread *thread,
3700                                void __user **ptrp,
3701                                binder_uintptr_t node_ptr,
3702                                binder_uintptr_t node_cookie,
3703                                int node_debug_id,
3704                                uint32_t cmd, const char *cmd_name)
3705 {
3706         void __user *ptr = *ptrp;
3707
3708         if (put_user(cmd, (uint32_t __user *)ptr))
3709                 return -EFAULT;
3710         ptr += sizeof(uint32_t);
3711
3712         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3713                 return -EFAULT;
3714         ptr += sizeof(binder_uintptr_t);
3715
3716         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3717                 return -EFAULT;
3718         ptr += sizeof(binder_uintptr_t);
3719
3720         binder_stat_br(proc, thread, cmd);
3721         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3722                      proc->pid, thread->pid, cmd_name, node_debug_id,
3723                      (u64)node_ptr, (u64)node_cookie);
3724
3725         *ptrp = ptr;
3726         return 0;
3727 }
3728
3729 static int binder_wait_for_work(struct binder_thread *thread,
3730                                 bool do_proc_work)
3731 {
3732         DEFINE_WAIT(wait);
3733         struct binder_proc *proc = thread->proc;
3734         int ret = 0;
3735
3736         freezer_do_not_count();
3737         binder_inner_proc_lock(proc);
3738         for (;;) {
3739                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3740                 if (binder_has_work_ilocked(thread, do_proc_work))
3741                         break;
3742                 if (do_proc_work)
3743                         list_add(&thread->waiting_thread_node,
3744                                  &proc->waiting_threads);
3745                 binder_inner_proc_unlock(proc);
3746                 schedule();
3747                 binder_inner_proc_lock(proc);
3748                 list_del_init(&thread->waiting_thread_node);
3749                 if (signal_pending(current)) {
3750                         ret = -ERESTARTSYS;
3751                         break;
3752                 }
3753         }
3754         finish_wait(&thread->wait, &wait);
3755         binder_inner_proc_unlock(proc);
3756         freezer_count();
3757
3758         return ret;
3759 }
3760
3761 static int binder_thread_read(struct binder_proc *proc,
3762                               struct binder_thread *thread,
3763                               binder_uintptr_t binder_buffer, size_t size,
3764                               binder_size_t *consumed, int non_block)
3765 {
3766         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3767         void __user *ptr = buffer + *consumed;
3768         void __user *end = buffer + size;
3769
3770         int ret = 0;
3771         int wait_for_proc_work;
3772
3773         if (*consumed == 0) {
3774                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3775                         return -EFAULT;
3776                 ptr += sizeof(uint32_t);
3777         }
3778
3779 retry:
3780         binder_inner_proc_lock(proc);
3781         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3782         binder_inner_proc_unlock(proc);
3783
3784         thread->looper |= BINDER_LOOPER_STATE_WAITING;
3785
3786         trace_binder_wait_for_work(wait_for_proc_work,
3787                                    !!thread->transaction_stack,
3788                                    !binder_worklist_empty(proc, &thread->todo));
3789         if (wait_for_proc_work) {
3790                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3791                                         BINDER_LOOPER_STATE_ENTERED))) {
3792                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3793                                 proc->pid, thread->pid, thread->looper);
3794                         wait_event_interruptible(binder_user_error_wait,
3795                                                  binder_stop_on_user_error < 2);
3796                 }
3797                 binder_set_nice(proc->default_priority);
3798         }
3799
3800         if (non_block) {
3801                 if (!binder_has_work(thread, wait_for_proc_work))
3802                         ret = -EAGAIN;
3803         } else {
3804                 ret = binder_wait_for_work(thread, wait_for_proc_work);
3805         }
3806
3807         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3808
3809         if (ret)
3810                 return ret;
3811
3812         while (1) {
3813                 uint32_t cmd;
3814                 struct binder_transaction_data tr;
3815                 struct binder_work *w = NULL;
3816                 struct list_head *list = NULL;
3817                 struct binder_transaction *t = NULL;
3818                 struct binder_thread *t_from;
3819
3820                 binder_inner_proc_lock(proc);
3821                 if (!binder_worklist_empty_ilocked(&thread->todo))
3822                         list = &thread->todo;
3823                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3824                            wait_for_proc_work)
3825                         list = &proc->todo;
3826                 else {
3827                         binder_inner_proc_unlock(proc);
3828
3829                         /* no data added */
3830                         if (ptr - buffer == 4 && !thread->looper_need_return)
3831                                 goto retry;
3832                         break;
3833                 }
3834
3835                 if (end - ptr < sizeof(tr) + 4) {
3836                         binder_inner_proc_unlock(proc);
3837                         break;
3838                 }
3839                 w = binder_dequeue_work_head_ilocked(list);
3840
3841                 switch (w->type) {
3842                 case BINDER_WORK_TRANSACTION: {
3843                         binder_inner_proc_unlock(proc);
3844                         t = container_of(w, struct binder_transaction, work);
3845                 } break;
3846                 case BINDER_WORK_RETURN_ERROR: {
3847                         struct binder_error *e = container_of(
3848                                         w, struct binder_error, work);
3849
3850                         WARN_ON(e->cmd == BR_OK);
3851                         binder_inner_proc_unlock(proc);
3852                         if (put_user(e->cmd, (uint32_t __user *)ptr))
3853                                 return -EFAULT;
3854                         e->cmd = BR_OK;
3855                         ptr += sizeof(uint32_t);
3856
3857                         binder_stat_br(proc, thread, e->cmd);
3858                 } break;
3859                 case BINDER_WORK_TRANSACTION_COMPLETE: {
3860                         binder_inner_proc_unlock(proc);
3861                         cmd = BR_TRANSACTION_COMPLETE;
3862                         if (put_user(cmd, (uint32_t __user *)ptr))
3863                                 return -EFAULT;
3864                         ptr += sizeof(uint32_t);
3865
3866                         binder_stat_br(proc, thread, cmd);
3867                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3868                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
3869                                      proc->pid, thread->pid);
3870                         kfree(w);
3871                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3872                 } break;
3873                 case BINDER_WORK_NODE: {
3874                         struct binder_node *node = container_of(w, struct binder_node, work);
3875                         int strong, weak;
3876                         binder_uintptr_t node_ptr = node->ptr;
3877                         binder_uintptr_t node_cookie = node->cookie;
3878                         int node_debug_id = node->debug_id;
3879                         int has_weak_ref;
3880                         int has_strong_ref;
3881                         void __user *orig_ptr = ptr;
3882
3883                         BUG_ON(proc != node->proc);
3884                         strong = node->internal_strong_refs ||
3885                                         node->local_strong_refs;
3886                         weak = !hlist_empty(&node->refs) ||
3887                                         node->local_weak_refs ||
3888                                         node->tmp_refs || strong;
3889                         has_strong_ref = node->has_strong_ref;
3890                         has_weak_ref = node->has_weak_ref;
3891
3892                         if (weak && !has_weak_ref) {
3893                                 node->has_weak_ref = 1;
3894                                 node->pending_weak_ref = 1;
3895                                 node->local_weak_refs++;
3896                         }
3897                         if (strong && !has_strong_ref) {
3898                                 node->has_strong_ref = 1;
3899                                 node->pending_strong_ref = 1;
3900                                 node->local_strong_refs++;
3901                         }
3902                         if (!strong && has_strong_ref)
3903                                 node->has_strong_ref = 0;
3904                         if (!weak && has_weak_ref)
3905                                 node->has_weak_ref = 0;
3906                         if (!weak && !strong) {
3907                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3908                                              "%d:%d node %d u%016llx c%016llx deleted\n",
3909                                              proc->pid, thread->pid,
3910                                              node_debug_id,
3911                                              (u64)node_ptr,
3912                                              (u64)node_cookie);
3913                                 rb_erase(&node->rb_node, &proc->nodes);
3914                                 binder_inner_proc_unlock(proc);
3915                                 binder_node_lock(node);
3916                                 /*
3917                                  * Acquire the node lock before freeing the
3918                                  * node to serialize with other threads that
3919                                  * may have been holding the node lock while
3920                                  * decrementing this node (avoids race where
3921                                  * this thread frees while the other thread
3922                                  * is unlocking the node after the final
3923                                  * decrement)
3924                                  */
3925                                 binder_node_unlock(node);
3926                                 binder_free_node(node);
3927                         } else
3928                                 binder_inner_proc_unlock(proc);
3929
3930                         if (weak && !has_weak_ref)
3931                                 ret = binder_put_node_cmd(
3932                                                 proc, thread, &ptr, node_ptr,
3933                                                 node_cookie, node_debug_id,
3934                                                 BR_INCREFS, "BR_INCREFS");
3935                         if (!ret && strong && !has_strong_ref)
3936                                 ret = binder_put_node_cmd(
3937                                                 proc, thread, &ptr, node_ptr,
3938                                                 node_cookie, node_debug_id,
3939                                                 BR_ACQUIRE, "BR_ACQUIRE");
3940                         if (!ret && !strong && has_strong_ref)
3941                                 ret = binder_put_node_cmd(
3942                                                 proc, thread, &ptr, node_ptr,
3943                                                 node_cookie, node_debug_id,
3944                                                 BR_RELEASE, "BR_RELEASE");
3945                         if (!ret && !weak && has_weak_ref)
3946                                 ret = binder_put_node_cmd(
3947                                                 proc, thread, &ptr, node_ptr,
3948                                                 node_cookie, node_debug_id,
3949                                                 BR_DECREFS, "BR_DECREFS");
3950                         if (orig_ptr == ptr)
3951                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3952                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
3953                                              proc->pid, thread->pid,
3954                                              node_debug_id,
3955                                              (u64)node_ptr,
3956                                              (u64)node_cookie);
3957                         if (ret)
3958                                 return ret;
3959                 } break;
3960                 case BINDER_WORK_DEAD_BINDER:
3961                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3962                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3963                         struct binder_ref_death *death;
3964                         uint32_t cmd;
3965                         binder_uintptr_t cookie;
3966
3967                         death = container_of(w, struct binder_ref_death, work);
3968                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3969                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3970                         else
3971                                 cmd = BR_DEAD_BINDER;
3972                         cookie = death->cookie;
3973
3974                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3975                                      "%d:%d %s %016llx\n",
3976                                       proc->pid, thread->pid,
3977                                       cmd == BR_DEAD_BINDER ?
3978                                       "BR_DEAD_BINDER" :
3979                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3980                                       (u64)cookie);
3981                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3982                                 binder_inner_proc_unlock(proc);
3983                                 kfree(death);
3984                                 binder_stats_deleted(BINDER_STAT_DEATH);
3985                         } else {
3986                                 binder_enqueue_work_ilocked(
3987                                                 w, &proc->delivered_death);
3988                                 binder_inner_proc_unlock(proc);
3989                         }
3990                         if (put_user(cmd, (uint32_t __user *)ptr))
3991                                 return -EFAULT;
3992                         ptr += sizeof(uint32_t);
3993                         if (put_user(cookie,
3994                                      (binder_uintptr_t __user *)ptr))
3995                                 return -EFAULT;
3996                         ptr += sizeof(binder_uintptr_t);
3997                         binder_stat_br(proc, thread, cmd);
3998                         if (cmd == BR_DEAD_BINDER)
3999                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4000                 } break;
4001                 }
4002
4003                 if (!t)
4004                         continue;
4005
4006                 BUG_ON(t->buffer == NULL);
4007                 if (t->buffer->target_node) {
4008                         struct binder_node *target_node = t->buffer->target_node;
4009
4010                         tr.target.ptr = target_node->ptr;
4011                         tr.cookie =  target_node->cookie;
4012                         t->saved_priority = task_nice(current);
4013                         if (t->priority < target_node->min_priority &&
4014                             !(t->flags & TF_ONE_WAY))
4015                                 binder_set_nice(t->priority);
4016                         else if (!(t->flags & TF_ONE_WAY) ||
4017                                  t->saved_priority > target_node->min_priority)
4018                                 binder_set_nice(target_node->min_priority);
4019                         cmd = BR_TRANSACTION;
4020                 } else {
4021                         tr.target.ptr = 0;
4022                         tr.cookie = 0;
4023                         cmd = BR_REPLY;
4024                 }
4025                 tr.code = t->code;
4026                 tr.flags = t->flags;
4027                 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4028
4029                 t_from = binder_get_txn_from(t);
4030                 if (t_from) {
4031                         struct task_struct *sender = t_from->proc->tsk;
4032
4033                         tr.sender_pid = task_tgid_nr_ns(sender,
4034                                                         task_active_pid_ns(current));
4035                 } else {
4036                         tr.sender_pid = 0;
4037                 }
4038
4039                 tr.data_size = t->buffer->data_size;
4040                 tr.offsets_size = t->buffer->offsets_size;
4041                 tr.data.ptr.buffer = (binder_uintptr_t)
4042                         ((uintptr_t)t->buffer->data +
4043                         binder_alloc_get_user_buffer_offset(&proc->alloc));
4044                 tr.data.ptr.offsets = tr.data.ptr.buffer +
4045                                         ALIGN(t->buffer->data_size,
4046                                             sizeof(void *));
4047
4048                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4049                         if (t_from)
4050                                 binder_thread_dec_tmpref(t_from);
4051
4052                         binder_cleanup_transaction(t, "put_user failed",
4053                                                    BR_FAILED_REPLY);
4054
4055                         return -EFAULT;
4056                 }
4057                 ptr += sizeof(uint32_t);
4058                 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4059                         if (t_from)
4060                                 binder_thread_dec_tmpref(t_from);
4061
4062                         binder_cleanup_transaction(t, "copy_to_user failed",
4063                                                    BR_FAILED_REPLY);
4064
4065                         return -EFAULT;
4066                 }
4067                 ptr += sizeof(tr);
4068
4069                 trace_binder_transaction_received(t);
4070                 binder_stat_br(proc, thread, cmd);
4071                 binder_debug(BINDER_DEBUG_TRANSACTION,
4072                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4073                              proc->pid, thread->pid,
4074                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4075                              "BR_REPLY",
4076                              t->debug_id, t_from ? t_from->proc->pid : 0,
4077                              t_from ? t_from->pid : 0, cmd,
4078                              t->buffer->data_size, t->buffer->offsets_size,
4079                              (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4080
4081                 if (t_from)
4082                         binder_thread_dec_tmpref(t_from);
4083                 t->buffer->allow_user_free = 1;
4084                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4085                         binder_inner_proc_lock(thread->proc);
4086                         t->to_parent = thread->transaction_stack;
4087                         t->to_thread = thread;
4088                         thread->transaction_stack = t;
4089                         binder_inner_proc_unlock(thread->proc);
4090                 } else {
4091                         binder_free_transaction(t);
4092                 }
4093                 break;
4094         }
4095
4096 done:
4097
4098         *consumed = ptr - buffer;
4099         binder_inner_proc_lock(proc);
4100         if (proc->requested_threads == 0 &&
4101             list_empty(&thread->proc->waiting_threads) &&
4102             proc->requested_threads_started < proc->max_threads &&
4103             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4104              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4105              /*spawn a new thread if we leave this out */) {
4106                 proc->requested_threads++;
4107                 binder_inner_proc_unlock(proc);
4108                 binder_debug(BINDER_DEBUG_THREADS,
4109                              "%d:%d BR_SPAWN_LOOPER\n",
4110                              proc->pid, thread->pid);
4111                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4112                         return -EFAULT;
4113                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4114         } else
4115                 binder_inner_proc_unlock(proc);
4116         return 0;
4117 }
4118
4119 static void binder_release_work(struct binder_proc *proc,
4120                                 struct list_head *list)
4121 {
4122         struct binder_work *w;
4123
4124         while (1) {
4125                 w = binder_dequeue_work_head(proc, list);
4126                 if (!w)
4127                         return;
4128
4129                 switch (w->type) {
4130                 case BINDER_WORK_TRANSACTION: {
4131                         struct binder_transaction *t;
4132
4133                         t = container_of(w, struct binder_transaction, work);
4134
4135                         binder_cleanup_transaction(t, "process died.",
4136                                                    BR_DEAD_REPLY);
4137                 } break;
4138                 case BINDER_WORK_RETURN_ERROR: {
4139                         struct binder_error *e = container_of(
4140                                         w, struct binder_error, work);
4141
4142                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4143                                 "undelivered TRANSACTION_ERROR: %u\n",
4144                                 e->cmd);
4145                 } break;
4146                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4147                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4148                                 "undelivered TRANSACTION_COMPLETE\n");
4149                         kfree(w);
4150                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4151                 } break;
4152                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4153                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4154                         struct binder_ref_death *death;
4155
4156                         death = container_of(w, struct binder_ref_death, work);
4157                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4158                                 "undelivered death notification, %016llx\n",
4159                                 (u64)death->cookie);
4160                         kfree(death);
4161                         binder_stats_deleted(BINDER_STAT_DEATH);
4162                 } break;
4163                 default:
4164                         pr_err("unexpected work type, %d, not freed\n",
4165                                w->type);
4166                         break;
4167                 }
4168         }
4169
4170 }
4171
4172 static struct binder_thread *binder_get_thread_ilocked(
4173                 struct binder_proc *proc, struct binder_thread *new_thread)
4174 {
4175         struct binder_thread *thread = NULL;
4176         struct rb_node *parent = NULL;
4177         struct rb_node **p = &proc->threads.rb_node;
4178
4179         while (*p) {
4180                 parent = *p;
4181                 thread = rb_entry(parent, struct binder_thread, rb_node);
4182
4183                 if (current->pid < thread->pid)
4184                         p = &(*p)->rb_left;
4185                 else if (current->pid > thread->pid)
4186                         p = &(*p)->rb_right;
4187                 else
4188                         return thread;
4189         }
4190         if (!new_thread)
4191                 return NULL;
4192         thread = new_thread;
4193         binder_stats_created(BINDER_STAT_THREAD);
4194         thread->proc = proc;
4195         thread->pid = current->pid;
4196         atomic_set(&thread->tmp_ref, 0);
4197         init_waitqueue_head(&thread->wait);
4198         INIT_LIST_HEAD(&thread->todo);
4199         rb_link_node(&thread->rb_node, parent, p);
4200         rb_insert_color(&thread->rb_node, &proc->threads);
4201         thread->looper_need_return = true;
4202         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4203         thread->return_error.cmd = BR_OK;
4204         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4205         thread->reply_error.cmd = BR_OK;
4206         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4207         return thread;
4208 }
4209
4210 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4211 {
4212         struct binder_thread *thread;
4213         struct binder_thread *new_thread;
4214
4215         binder_inner_proc_lock(proc);
4216         thread = binder_get_thread_ilocked(proc, NULL);
4217         binder_inner_proc_unlock(proc);
4218         if (!thread) {
4219                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4220                 if (new_thread == NULL)
4221                         return NULL;
4222                 binder_inner_proc_lock(proc);
4223                 thread = binder_get_thread_ilocked(proc, new_thread);
4224                 binder_inner_proc_unlock(proc);
4225                 if (thread != new_thread)
4226                         kfree(new_thread);
4227         }
4228         return thread;
4229 }
4230
4231 static void binder_free_proc(struct binder_proc *proc)
4232 {
4233         BUG_ON(!list_empty(&proc->todo));
4234         BUG_ON(!list_empty(&proc->delivered_death));
4235         binder_alloc_deferred_release(&proc->alloc);
4236         put_task_struct(proc->tsk);
4237         binder_stats_deleted(BINDER_STAT_PROC);
4238         kfree(proc);
4239 }
4240
4241 static void binder_free_thread(struct binder_thread *thread)
4242 {
4243         BUG_ON(!list_empty(&thread->todo));
4244         binder_stats_deleted(BINDER_STAT_THREAD);
4245         binder_proc_dec_tmpref(thread->proc);
4246         kfree(thread);
4247 }
4248
4249 static int binder_thread_release(struct binder_proc *proc,
4250                                  struct binder_thread *thread)
4251 {
4252         struct binder_transaction *t;
4253         struct binder_transaction *send_reply = NULL;
4254         int active_transactions = 0;
4255         struct binder_transaction *last_t = NULL;
4256
4257         binder_inner_proc_lock(thread->proc);
4258         /*
4259          * take a ref on the proc so it survives
4260          * after we remove this thread from proc->threads.
4261          * The corresponding dec is when we actually
4262          * free the thread in binder_free_thread()
4263          */
4264         proc->tmp_ref++;
4265         /*
4266          * take a ref on this thread to ensure it
4267          * survives while we are releasing it
4268          */
4269         atomic_inc(&thread->tmp_ref);
4270         rb_erase(&thread->rb_node, &proc->threads);
4271         t = thread->transaction_stack;
4272         if (t) {
4273                 spin_lock(&t->lock);
4274                 if (t->to_thread == thread)
4275                         send_reply = t;
4276         }
4277         thread->is_dead = true;
4278
4279         while (t) {
4280                 last_t = t;
4281                 active_transactions++;
4282                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4283                              "release %d:%d transaction %d %s, still active\n",
4284                               proc->pid, thread->pid,
4285                              t->debug_id,
4286                              (t->to_thread == thread) ? "in" : "out");
4287
4288                 if (t->to_thread == thread) {
4289                         t->to_proc = NULL;
4290                         t->to_thread = NULL;
4291                         if (t->buffer) {
4292                                 t->buffer->transaction = NULL;
4293                                 t->buffer = NULL;
4294                         }
4295                         t = t->to_parent;
4296                 } else if (t->from == thread) {
4297                         t->from = NULL;
4298                         t = t->from_parent;
4299                 } else
4300                         BUG();
4301                 spin_unlock(&last_t->lock);
4302                 if (t)
4303                         spin_lock(&t->lock);
4304         }
4305         binder_inner_proc_unlock(thread->proc);
4306
4307         if (send_reply)
4308                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4309         binder_release_work(proc, &thread->todo);
4310         binder_thread_dec_tmpref(thread);
4311         return active_transactions;
4312 }
4313
4314 static unsigned int binder_poll(struct file *filp,
4315                                 struct poll_table_struct *wait)
4316 {
4317         struct binder_proc *proc = filp->private_data;
4318         struct binder_thread *thread = NULL;
4319         bool wait_for_proc_work;
4320
4321         thread = binder_get_thread(proc);
4322
4323         binder_inner_proc_lock(thread->proc);
4324         thread->looper |= BINDER_LOOPER_STATE_POLL;
4325         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4326
4327         binder_inner_proc_unlock(thread->proc);
4328
4329         poll_wait(filp, &thread->wait, wait);
4330
4331         if (binder_has_work(thread, wait_for_proc_work))
4332                 return POLLIN;
4333
4334         return 0;
4335 }
4336
4337 static int binder_ioctl_write_read(struct file *filp,
4338                                 unsigned int cmd, unsigned long arg,
4339                                 struct binder_thread *thread)
4340 {
4341         int ret = 0;
4342         struct binder_proc *proc = filp->private_data;
4343         unsigned int size = _IOC_SIZE(cmd);
4344         void __user *ubuf = (void __user *)arg;
4345         struct binder_write_read bwr;
4346
4347         if (size != sizeof(struct binder_write_read)) {
4348                 ret = -EINVAL;
4349                 goto out;
4350         }
4351         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4352                 ret = -EFAULT;
4353                 goto out;
4354         }
4355         binder_debug(BINDER_DEBUG_READ_WRITE,
4356                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4357                      proc->pid, thread->pid,
4358                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4359                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4360
4361         if (bwr.write_size > 0) {
4362                 ret = binder_thread_write(proc, thread,
4363                                           bwr.write_buffer,
4364                                           bwr.write_size,
4365                                           &bwr.write_consumed);
4366                 trace_binder_write_done(ret);
4367                 if (ret < 0) {
4368                         bwr.read_consumed = 0;
4369                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4370                                 ret = -EFAULT;
4371                         goto out;
4372                 }
4373         }
4374         if (bwr.read_size > 0) {
4375                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4376                                          bwr.read_size,
4377                                          &bwr.read_consumed,
4378                                          filp->f_flags & O_NONBLOCK);
4379                 trace_binder_read_done(ret);
4380                 binder_inner_proc_lock(proc);
4381                 if (!binder_worklist_empty_ilocked(&proc->todo))
4382                         binder_wakeup_proc_ilocked(proc);
4383                 binder_inner_proc_unlock(proc);
4384                 if (ret < 0) {
4385                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4386                                 ret = -EFAULT;
4387                         goto out;
4388                 }
4389         }
4390         binder_debug(BINDER_DEBUG_READ_WRITE,
4391                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4392                      proc->pid, thread->pid,
4393                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4394                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4395         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4396                 ret = -EFAULT;
4397                 goto out;
4398         }
4399 out:
4400         return ret;
4401 }
4402
4403 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4404 {
4405         int ret = 0;
4406         struct binder_proc *proc = filp->private_data;
4407         struct binder_context *context = proc->context;
4408         struct binder_node *new_node;
4409         kuid_t curr_euid = current_euid();
4410
4411         mutex_lock(&context->context_mgr_node_lock);
4412         if (context->binder_context_mgr_node) {
4413                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4414                 ret = -EBUSY;
4415                 goto out;
4416         }
4417         ret = security_binder_set_context_mgr(proc->tsk);
4418         if (ret < 0)
4419                 goto out;
4420         if (uid_valid(context->binder_context_mgr_uid)) {
4421                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4422                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4423                                from_kuid(&init_user_ns, curr_euid),
4424                                from_kuid(&init_user_ns,
4425                                          context->binder_context_mgr_uid));
4426                         ret = -EPERM;
4427                         goto out;
4428                 }
4429         } else {
4430                 context->binder_context_mgr_uid = curr_euid;
4431         }
4432         new_node = binder_new_node(proc, NULL);
4433         if (!new_node) {
4434                 ret = -ENOMEM;
4435                 goto out;
4436         }
4437         binder_node_lock(new_node);
4438         new_node->local_weak_refs++;
4439         new_node->local_strong_refs++;
4440         new_node->has_strong_ref = 1;
4441         new_node->has_weak_ref = 1;
4442         context->binder_context_mgr_node = new_node;
4443         binder_node_unlock(new_node);
4444         binder_put_node(new_node);
4445 out:
4446         mutex_unlock(&context->context_mgr_node_lock);
4447         return ret;
4448 }
4449
4450 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4451                                 struct binder_node_debug_info *info)
4452 {
4453         struct rb_node *n;
4454         binder_uintptr_t ptr = info->ptr;
4455
4456         memset(info, 0, sizeof(*info));
4457
4458         binder_inner_proc_lock(proc);
4459         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4460                 struct binder_node *node = rb_entry(n, struct binder_node,
4461                                                     rb_node);
4462                 if (node->ptr > ptr) {
4463                         info->ptr = node->ptr;
4464                         info->cookie = node->cookie;
4465                         info->has_strong_ref = node->has_strong_ref;
4466                         info->has_weak_ref = node->has_weak_ref;
4467                         break;
4468                 }
4469         }
4470         binder_inner_proc_unlock(proc);
4471
4472         return 0;
4473 }
4474
4475 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4476 {
4477         int ret;
4478         struct binder_proc *proc = filp->private_data;
4479         struct binder_thread *thread;
4480         unsigned int size = _IOC_SIZE(cmd);
4481         void __user *ubuf = (void __user *)arg;
4482
4483         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4484                         proc->pid, current->pid, cmd, arg);*/
4485
4486         binder_selftest_alloc(&proc->alloc);
4487
4488         trace_binder_ioctl(cmd, arg);
4489
4490         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4491         if (ret)
4492                 goto err_unlocked;
4493
4494         thread = binder_get_thread(proc);
4495         if (thread == NULL) {
4496                 ret = -ENOMEM;
4497                 goto err;
4498         }
4499
4500         switch (cmd) {
4501         case BINDER_WRITE_READ:
4502                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4503                 if (ret)
4504                         goto err;
4505                 break;
4506         case BINDER_SET_MAX_THREADS: {
4507                 int max_threads;
4508
4509                 if (copy_from_user(&max_threads, ubuf,
4510                                    sizeof(max_threads))) {
4511                         ret = -EINVAL;
4512                         goto err;
4513                 }
4514                 binder_inner_proc_lock(proc);
4515                 proc->max_threads = max_threads;
4516                 binder_inner_proc_unlock(proc);
4517                 break;
4518         }
4519         case BINDER_SET_CONTEXT_MGR:
4520                 ret = binder_ioctl_set_ctx_mgr(filp);
4521                 if (ret)
4522                         goto err;
4523                 break;
4524         case BINDER_THREAD_EXIT:
4525                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4526                              proc->pid, thread->pid);
4527                 binder_thread_release(proc, thread);
4528                 thread = NULL;
4529                 break;
4530         case BINDER_VERSION: {
4531                 struct binder_version __user *ver = ubuf;
4532
4533                 if (size != sizeof(struct binder_version)) {
4534                         ret = -EINVAL;
4535                         goto err;
4536                 }
4537                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4538                              &ver->protocol_version)) {
4539                         ret = -EINVAL;
4540                         goto err;
4541                 }
4542                 break;
4543         }
4544         case BINDER_GET_NODE_DEBUG_INFO: {
4545                 struct binder_node_debug_info info;
4546
4547                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4548                         ret = -EFAULT;
4549                         goto err;
4550                 }
4551
4552                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4553                 if (ret < 0)
4554                         goto err;
4555
4556                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4557                         ret = -EFAULT;
4558                         goto err;
4559                 }
4560                 break;
4561         }
4562         default:
4563                 ret = -EINVAL;
4564                 goto err;
4565         }
4566         ret = 0;
4567 err:
4568         if (thread)
4569                 thread->looper_need_return = false;
4570         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4571         if (ret && ret != -ERESTARTSYS)
4572                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4573 err_unlocked:
4574         trace_binder_ioctl_done(ret);
4575         return ret;
4576 }
4577
4578 static void binder_vma_open(struct vm_area_struct *vma)
4579 {
4580         struct binder_proc *proc = vma->vm_private_data;
4581
4582         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4583                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4584                      proc->pid, vma->vm_start, vma->vm_end,
4585                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4586                      (unsigned long)pgprot_val(vma->vm_page_prot));
4587 }
4588
4589 static void binder_vma_close(struct vm_area_struct *vma)
4590 {
4591         struct binder_proc *proc = vma->vm_private_data;
4592
4593         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4594                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4595                      proc->pid, vma->vm_start, vma->vm_end,
4596                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4597                      (unsigned long)pgprot_val(vma->vm_page_prot));
4598         binder_alloc_vma_close(&proc->alloc);
4599         binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4600 }
4601
4602 static int binder_vm_fault(struct vm_fault *vmf)
4603 {
4604         return VM_FAULT_SIGBUS;
4605 }
4606
4607 static const struct vm_operations_struct binder_vm_ops = {
4608         .open = binder_vma_open,
4609         .close = binder_vma_close,
4610         .fault = binder_vm_fault,
4611 };
4612
4613 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4614 {
4615         int ret;
4616         struct binder_proc *proc = filp->private_data;
4617         const char *failure_string;
4618
4619         if (proc->tsk != current->group_leader)
4620                 return -EINVAL;
4621
4622         if ((vma->vm_end - vma->vm_start) > SZ_4M)
4623                 vma->vm_end = vma->vm_start + SZ_4M;
4624
4625         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4626                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4627                      __func__, proc->pid, vma->vm_start, vma->vm_end,
4628                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4629                      (unsigned long)pgprot_val(vma->vm_page_prot));
4630
4631         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4632                 ret = -EPERM;
4633                 failure_string = "bad vm_flags";
4634                 goto err_bad_arg;
4635         }
4636         vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4637         vma->vm_ops = &binder_vm_ops;
4638         vma->vm_private_data = proc;
4639
4640         ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4641         if (ret)
4642                 return ret;
4643         mutex_lock(&proc->files_lock);
4644         proc->files = get_files_struct(current);
4645         mutex_unlock(&proc->files_lock);
4646         return 0;
4647
4648 err_bad_arg:
4649         pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4650                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4651         return ret;
4652 }
4653
4654 static int binder_open(struct inode *nodp, struct file *filp)
4655 {
4656         struct binder_proc *proc;
4657         struct binder_device *binder_dev;
4658
4659         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4660                      current->group_leader->pid, current->pid);
4661
4662         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4663         if (proc == NULL)
4664                 return -ENOMEM;
4665         spin_lock_init(&proc->inner_lock);
4666         spin_lock_init(&proc->outer_lock);
4667         get_task_struct(current->group_leader);
4668         proc->tsk = current->group_leader;
4669         mutex_init(&proc->files_lock);
4670         INIT_LIST_HEAD(&proc->todo);
4671         proc->default_priority = task_nice(current);
4672         binder_dev = container_of(filp->private_data, struct binder_device,
4673                                   miscdev);
4674         proc->context = &binder_dev->context;
4675         binder_alloc_init(&proc->alloc);
4676
4677         binder_stats_created(BINDER_STAT_PROC);
4678         proc->pid = current->group_leader->pid;
4679         INIT_LIST_HEAD(&proc->delivered_death);
4680         INIT_LIST_HEAD(&proc->waiting_threads);
4681         filp->private_data = proc;
4682
4683         mutex_lock(&binder_procs_lock);
4684         hlist_add_head(&proc->proc_node, &binder_procs);
4685         mutex_unlock(&binder_procs_lock);
4686
4687         if (binder_debugfs_dir_entry_proc) {
4688                 char strbuf[11];
4689
4690                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4691                 /*
4692                  * proc debug entries are shared between contexts, so
4693                  * this will fail if the process tries to open the driver
4694                  * again with a different context. The priting code will
4695                  * anyway print all contexts that a given PID has, so this
4696                  * is not a problem.
4697                  */
4698                 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4699                         binder_debugfs_dir_entry_proc,
4700                         (void *)(unsigned long)proc->pid,
4701                         &binder_proc_fops);
4702         }
4703
4704         return 0;
4705 }
4706
4707 static int binder_flush(struct file *filp, fl_owner_t id)
4708 {
4709         struct binder_proc *proc = filp->private_data;
4710
4711         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4712
4713         return 0;
4714 }
4715
4716 static void binder_deferred_flush(struct binder_proc *proc)
4717 {
4718         struct rb_node *n;
4719         int wake_count = 0;
4720
4721         binder_inner_proc_lock(proc);
4722         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4723                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4724
4725                 thread->looper_need_return = true;
4726                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4727                         wake_up_interruptible(&thread->wait);
4728                         wake_count++;
4729                 }
4730         }
4731         binder_inner_proc_unlock(proc);
4732
4733         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4734                      "binder_flush: %d woke %d threads\n", proc->pid,
4735                      wake_count);
4736 }
4737
4738 static int binder_release(struct inode *nodp, struct file *filp)
4739 {
4740         struct binder_proc *proc = filp->private_data;
4741
4742         debugfs_remove(proc->debugfs_entry);
4743         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4744
4745         return 0;
4746 }
4747
4748 static int binder_node_release(struct binder_node *node, int refs)
4749 {
4750         struct binder_ref *ref;
4751         int death = 0;
4752         struct binder_proc *proc = node->proc;
4753
4754         binder_release_work(proc, &node->async_todo);
4755
4756         binder_node_lock(node);
4757         binder_inner_proc_lock(proc);
4758         binder_dequeue_work_ilocked(&node->work);
4759         /*
4760          * The caller must have taken a temporary ref on the node,
4761          */
4762         BUG_ON(!node->tmp_refs);
4763         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4764                 binder_inner_proc_unlock(proc);
4765                 binder_node_unlock(node);
4766                 binder_free_node(node);
4767
4768                 return refs;
4769         }
4770
4771         node->proc = NULL;
4772         node->local_strong_refs = 0;
4773         node->local_weak_refs = 0;
4774         binder_inner_proc_unlock(proc);
4775
4776         spin_lock(&binder_dead_nodes_lock);
4777         hlist_add_head(&node->dead_node, &binder_dead_nodes);
4778         spin_unlock(&binder_dead_nodes_lock);
4779
4780         hlist_for_each_entry(ref, &node->refs, node_entry) {
4781                 refs++;
4782                 /*
4783                  * Need the node lock to synchronize
4784                  * with new notification requests and the
4785                  * inner lock to synchronize with queued
4786                  * death notifications.
4787                  */
4788                 binder_inner_proc_lock(ref->proc);
4789                 if (!ref->death) {
4790                         binder_inner_proc_unlock(ref->proc);
4791                         continue;
4792                 }
4793
4794                 death++;
4795
4796                 BUG_ON(!list_empty(&ref->death->work.entry));
4797                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4798                 binder_enqueue_work_ilocked(&ref->death->work,
4799                                             &ref->proc->todo);
4800                 binder_wakeup_proc_ilocked(ref->proc);
4801                 binder_inner_proc_unlock(ref->proc);
4802         }
4803
4804         binder_debug(BINDER_DEBUG_DEAD_BINDER,
4805                      "node %d now dead, refs %d, death %d\n",
4806                      node->debug_id, refs, death);
4807         binder_node_unlock(node);
4808         binder_put_node(node);
4809
4810         return refs;
4811 }
4812
4813 static void binder_deferred_release(struct binder_proc *proc)
4814 {
4815         struct binder_context *context = proc->context;
4816         struct rb_node *n;
4817         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4818
4819         BUG_ON(proc->files);
4820
4821         mutex_lock(&binder_procs_lock);
4822         hlist_del(&proc->proc_node);
4823         mutex_unlock(&binder_procs_lock);
4824
4825         mutex_lock(&context->context_mgr_node_lock);
4826         if (context->binder_context_mgr_node &&
4827             context->binder_context_mgr_node->proc == proc) {
4828                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4829                              "%s: %d context_mgr_node gone\n",
4830                              __func__, proc->pid);
4831                 context->binder_context_mgr_node = NULL;
4832         }
4833         mutex_unlock(&context->context_mgr_node_lock);
4834         binder_inner_proc_lock(proc);
4835         /*
4836          * Make sure proc stays alive after we
4837          * remove all the threads
4838          */
4839         proc->tmp_ref++;
4840
4841         proc->is_dead = true;
4842         threads = 0;
4843         active_transactions = 0;
4844         while ((n = rb_first(&proc->threads))) {
4845                 struct binder_thread *thread;
4846
4847                 thread = rb_entry(n, struct binder_thread, rb_node);
4848                 binder_inner_proc_unlock(proc);
4849                 threads++;
4850                 active_transactions += binder_thread_release(proc, thread);
4851                 binder_inner_proc_lock(proc);
4852         }
4853
4854         nodes = 0;
4855         incoming_refs = 0;
4856         while ((n = rb_first(&proc->nodes))) {
4857                 struct binder_node *node;
4858
4859                 node = rb_entry(n, struct binder_node, rb_node);
4860                 nodes++;
4861                 /*
4862                  * take a temporary ref on the node before
4863                  * calling binder_node_release() which will either
4864                  * kfree() the node or call binder_put_node()
4865                  */
4866                 binder_inc_node_tmpref_ilocked(node);
4867                 rb_erase(&node->rb_node, &proc->nodes);
4868                 binder_inner_proc_unlock(proc);
4869                 incoming_refs = binder_node_release(node, incoming_refs);
4870                 binder_inner_proc_lock(proc);
4871         }
4872         binder_inner_proc_unlock(proc);
4873
4874         outgoing_refs = 0;
4875         binder_proc_lock(proc);
4876         while ((n = rb_first(&proc->refs_by_desc))) {
4877                 struct binder_ref *ref;
4878
4879                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4880                 outgoing_refs++;
4881                 binder_cleanup_ref_olocked(ref);
4882                 binder_proc_unlock(proc);
4883                 binder_free_ref(ref);
4884                 binder_proc_lock(proc);
4885         }
4886         binder_proc_unlock(proc);
4887
4888         binder_release_work(proc, &proc->todo);
4889         binder_release_work(proc, &proc->delivered_death);
4890
4891         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4892                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4893                      __func__, proc->pid, threads, nodes, incoming_refs,
4894                      outgoing_refs, active_transactions);
4895
4896         binder_proc_dec_tmpref(proc);
4897 }
4898
4899 static void binder_deferred_func(struct work_struct *work)
4900 {
4901         struct binder_proc *proc;
4902         struct files_struct *files;
4903
4904         int defer;
4905
4906         do {
4907                 mutex_lock(&binder_deferred_lock);
4908                 if (!hlist_empty(&binder_deferred_list)) {
4909                         proc = hlist_entry(binder_deferred_list.first,
4910                                         struct binder_proc, deferred_work_node);
4911                         hlist_del_init(&proc->deferred_work_node);
4912                         defer = proc->deferred_work;
4913                         proc->deferred_work = 0;
4914                 } else {
4915                         proc = NULL;
4916                         defer = 0;
4917                 }
4918                 mutex_unlock(&binder_deferred_lock);
4919
4920                 files = NULL;
4921                 if (defer & BINDER_DEFERRED_PUT_FILES) {
4922                         mutex_lock(&proc->files_lock);
4923                         files = proc->files;
4924                         if (files)
4925                                 proc->files = NULL;
4926                         mutex_unlock(&proc->files_lock);
4927                 }
4928
4929                 if (defer & BINDER_DEFERRED_FLUSH)
4930                         binder_deferred_flush(proc);
4931
4932                 if (defer & BINDER_DEFERRED_RELEASE)
4933                         binder_deferred_release(proc); /* frees proc */
4934
4935                 if (files)
4936                         put_files_struct(files);
4937         } while (proc);
4938 }
4939 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4940
4941 static void
4942 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4943 {
4944         mutex_lock(&binder_deferred_lock);
4945         proc->deferred_work |= defer;
4946         if (hlist_unhashed(&proc->deferred_work_node)) {
4947                 hlist_add_head(&proc->deferred_work_node,
4948                                 &binder_deferred_list);
4949                 schedule_work(&binder_deferred_work);
4950         }
4951         mutex_unlock(&binder_deferred_lock);
4952 }
4953
4954 static void print_binder_transaction_ilocked(struct seq_file *m,
4955                                              struct binder_proc *proc,
4956                                              const char *prefix,
4957                                              struct binder_transaction *t)
4958 {
4959         struct binder_proc *to_proc;
4960         struct binder_buffer *buffer = t->buffer;
4961
4962         spin_lock(&t->lock);
4963         to_proc = t->to_proc;
4964         seq_printf(m,
4965                    "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4966                    prefix, t->debug_id, t,
4967                    t->from ? t->from->proc->pid : 0,
4968                    t->from ? t->from->pid : 0,
4969                    to_proc ? to_proc->pid : 0,
4970                    t->to_thread ? t->to_thread->pid : 0,
4971                    t->code, t->flags, t->priority, t->need_reply);
4972         spin_unlock(&t->lock);
4973
4974         if (proc != to_proc) {
4975                 /*
4976                  * Can only safely deref buffer if we are holding the
4977                  * correct proc inner lock for this node
4978                  */
4979                 seq_puts(m, "\n");
4980                 return;
4981         }
4982
4983         if (buffer == NULL) {
4984                 seq_puts(m, " buffer free\n");
4985                 return;
4986         }
4987         if (buffer->target_node)
4988                 seq_printf(m, " node %d", buffer->target_node->debug_id);
4989         seq_printf(m, " size %zd:%zd data %p\n",
4990                    buffer->data_size, buffer->offsets_size,
4991                    buffer->data);
4992 }
4993
4994 static void print_binder_work_ilocked(struct seq_file *m,
4995                                      struct binder_proc *proc,
4996                                      const char *prefix,
4997                                      const char *transaction_prefix,
4998                                      struct binder_work *w)
4999 {
5000         struct binder_node *node;
5001         struct binder_transaction *t;
5002
5003         switch (w->type) {
5004         case BINDER_WORK_TRANSACTION:
5005                 t = container_of(w, struct binder_transaction, work);
5006                 print_binder_transaction_ilocked(
5007                                 m, proc, transaction_prefix, t);
5008                 break;
5009         case BINDER_WORK_RETURN_ERROR: {
5010                 struct binder_error *e = container_of(
5011                                 w, struct binder_error, work);
5012
5013                 seq_printf(m, "%stransaction error: %u\n",
5014                            prefix, e->cmd);
5015         } break;
5016         case BINDER_WORK_TRANSACTION_COMPLETE:
5017                 seq_printf(m, "%stransaction complete\n", prefix);
5018                 break;
5019         case BINDER_WORK_NODE:
5020                 node = container_of(w, struct binder_node, work);
5021                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5022                            prefix, node->debug_id,
5023                            (u64)node->ptr, (u64)node->cookie);
5024                 break;
5025         case BINDER_WORK_DEAD_BINDER:
5026                 seq_printf(m, "%shas dead binder\n", prefix);
5027                 break;
5028         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5029                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5030                 break;
5031         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5032                 seq_printf(m, "%shas cleared death notification\n", prefix);
5033                 break;
5034         default:
5035                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5036                 break;
5037         }
5038 }
5039
5040 static void print_binder_thread_ilocked(struct seq_file *m,
5041                                         struct binder_thread *thread,
5042                                         int print_always)
5043 {
5044         struct binder_transaction *t;
5045         struct binder_work *w;
5046         size_t start_pos = m->count;
5047         size_t header_pos;
5048
5049         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5050                         thread->pid, thread->looper,
5051                         thread->looper_need_return,
5052                         atomic_read(&thread->tmp_ref));
5053         header_pos = m->count;
5054         t = thread->transaction_stack;
5055         while (t) {
5056                 if (t->from == thread) {
5057                         print_binder_transaction_ilocked(m, thread->proc,
5058                                         "    outgoing transaction", t);
5059                         t = t->from_parent;
5060                 } else if (t->to_thread == thread) {
5061                         print_binder_transaction_ilocked(m, thread->proc,
5062                                                  "    incoming transaction", t);
5063                         t = t->to_parent;
5064                 } else {
5065                         print_binder_transaction_ilocked(m, thread->proc,
5066                                         "    bad transaction", t);
5067                         t = NULL;
5068                 }
5069         }
5070         list_for_each_entry(w, &thread->todo, entry) {
5071                 print_binder_work_ilocked(m, thread->proc, "    ",
5072                                           "    pending transaction", w);
5073         }
5074         if (!print_always && m->count == header_pos)
5075                 m->count = start_pos;
5076 }
5077
5078 static void print_binder_node_nilocked(struct seq_file *m,
5079                                        struct binder_node *node)
5080 {
5081         struct binder_ref *ref;
5082         struct binder_work *w;
5083         int count;
5084
5085         count = 0;
5086         hlist_for_each_entry(ref, &node->refs, node_entry)
5087                 count++;
5088
5089         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5090                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5091                    node->has_strong_ref, node->has_weak_ref,
5092                    node->local_strong_refs, node->local_weak_refs,
5093                    node->internal_strong_refs, count, node->tmp_refs);
5094         if (count) {
5095                 seq_puts(m, " proc");
5096                 hlist_for_each_entry(ref, &node->refs, node_entry)
5097                         seq_printf(m, " %d", ref->proc->pid);
5098         }
5099         seq_puts(m, "\n");
5100         if (node->proc) {
5101                 list_for_each_entry(w, &node->async_todo, entry)
5102                         print_binder_work_ilocked(m, node->proc, "    ",
5103                                           "    pending async transaction", w);
5104         }
5105 }
5106
5107 static void print_binder_ref_olocked(struct seq_file *m,
5108                                      struct binder_ref *ref)
5109 {
5110         binder_node_lock(ref->node);
5111         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5112                    ref->data.debug_id, ref->data.desc,
5113                    ref->node->proc ? "" : "dead ",
5114                    ref->node->debug_id, ref->data.strong,
5115                    ref->data.weak, ref->death);
5116         binder_node_unlock(ref->node);
5117 }
5118
5119 static void print_binder_proc(struct seq_file *m,
5120                               struct binder_proc *proc, int print_all)
5121 {
5122         struct binder_work *w;
5123         struct rb_node *n;
5124         size_t start_pos = m->count;
5125         size_t header_pos;
5126         struct binder_node *last_node = NULL;
5127
5128         seq_printf(m, "proc %d\n", proc->pid);
5129         seq_printf(m, "context %s\n", proc->context->name);
5130         header_pos = m->count;
5131
5132         binder_inner_proc_lock(proc);
5133         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5134                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5135                                                 rb_node), print_all);
5136
5137         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5138                 struct binder_node *node = rb_entry(n, struct binder_node,
5139                                                     rb_node);
5140                 /*
5141                  * take a temporary reference on the node so it
5142                  * survives and isn't removed from the tree
5143                  * while we print it.
5144                  */
5145                 binder_inc_node_tmpref_ilocked(node);
5146                 /* Need to drop inner lock to take node lock */
5147                 binder_inner_proc_unlock(proc);
5148                 if (last_node)
5149                         binder_put_node(last_node);
5150                 binder_node_inner_lock(node);
5151                 print_binder_node_nilocked(m, node);
5152                 binder_node_inner_unlock(node);
5153                 last_node = node;
5154                 binder_inner_proc_lock(proc);
5155         }
5156         binder_inner_proc_unlock(proc);
5157         if (last_node)
5158                 binder_put_node(last_node);
5159
5160         if (print_all) {
5161                 binder_proc_lock(proc);
5162                 for (n = rb_first(&proc->refs_by_desc);
5163                      n != NULL;
5164                      n = rb_next(n))
5165                         print_binder_ref_olocked(m, rb_entry(n,
5166                                                             struct binder_ref,
5167                                                             rb_node_desc));
5168                 binder_proc_unlock(proc);
5169         }
5170         binder_alloc_print_allocated(m, &proc->alloc);
5171         binder_inner_proc_lock(proc);
5172         list_for_each_entry(w, &proc->todo, entry)
5173                 print_binder_work_ilocked(m, proc, "  ",
5174                                           "  pending transaction", w);
5175         list_for_each_entry(w, &proc->delivered_death, entry) {
5176                 seq_puts(m, "  has delivered dead binder\n");
5177                 break;
5178         }
5179         binder_inner_proc_unlock(proc);
5180         if (!print_all && m->count == header_pos)
5181                 m->count = start_pos;
5182 }
5183
5184 static const char * const binder_return_strings[] = {
5185         "BR_ERROR",
5186         "BR_OK",
5187         "BR_TRANSACTION",
5188         "BR_REPLY",
5189         "BR_ACQUIRE_RESULT",
5190         "BR_DEAD_REPLY",
5191         "BR_TRANSACTION_COMPLETE",
5192         "BR_INCREFS",
5193         "BR_ACQUIRE",
5194         "BR_RELEASE",
5195         "BR_DECREFS",
5196         "BR_ATTEMPT_ACQUIRE",
5197         "BR_NOOP",
5198         "BR_SPAWN_LOOPER",
5199         "BR_FINISHED",
5200         "BR_DEAD_BINDER",
5201         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5202         "BR_FAILED_REPLY"
5203 };
5204
5205 static const char * const binder_command_strings[] = {
5206         "BC_TRANSACTION",
5207         "BC_REPLY",
5208         "BC_ACQUIRE_RESULT",
5209         "BC_FREE_BUFFER",
5210         "BC_INCREFS",
5211         "BC_ACQUIRE",
5212         "BC_RELEASE",
5213         "BC_DECREFS",
5214         "BC_INCREFS_DONE",
5215         "BC_ACQUIRE_DONE",
5216         "BC_ATTEMPT_ACQUIRE",
5217         "BC_REGISTER_LOOPER",
5218         "BC_ENTER_LOOPER",
5219         "BC_EXIT_LOOPER",
5220         "BC_REQUEST_DEATH_NOTIFICATION",
5221         "BC_CLEAR_DEATH_NOTIFICATION",
5222         "BC_DEAD_BINDER_DONE",
5223         "BC_TRANSACTION_SG",
5224         "BC_REPLY_SG",
5225 };
5226
5227 static const char * const binder_objstat_strings[] = {
5228         "proc",
5229         "thread",
5230         "node",
5231         "ref",
5232         "death",
5233         "transaction",
5234         "transaction_complete"
5235 };
5236
5237 static void print_binder_stats(struct seq_file *m, const char *prefix,
5238                                struct binder_stats *stats)
5239 {
5240         int i;
5241
5242         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5243                      ARRAY_SIZE(binder_command_strings));
5244         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5245                 int temp = atomic_read(&stats->bc[i]);
5246
5247                 if (temp)
5248                         seq_printf(m, "%s%s: %d\n", prefix,
5249                                    binder_command_strings[i], temp);
5250         }
5251
5252         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5253                      ARRAY_SIZE(binder_return_strings));
5254         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5255                 int temp = atomic_read(&stats->br[i]);
5256
5257                 if (temp)
5258                         seq_printf(m, "%s%s: %d\n", prefix,
5259                                    binder_return_strings[i], temp);
5260         }
5261
5262         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5263                      ARRAY_SIZE(binder_objstat_strings));
5264         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5265                      ARRAY_SIZE(stats->obj_deleted));
5266         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5267                 int created = atomic_read(&stats->obj_created[i]);
5268                 int deleted = atomic_read(&stats->obj_deleted[i]);
5269
5270                 if (created || deleted)
5271                         seq_printf(m, "%s%s: active %d total %d\n",
5272                                 prefix,
5273                                 binder_objstat_strings[i],
5274                                 created - deleted,
5275                                 created);
5276         }
5277 }
5278
5279 static void print_binder_proc_stats(struct seq_file *m,
5280                                     struct binder_proc *proc)
5281 {
5282         struct binder_work *w;
5283         struct binder_thread *thread;
5284         struct rb_node *n;
5285         int count, strong, weak, ready_threads;
5286         size_t free_async_space =
5287                 binder_alloc_get_free_async_space(&proc->alloc);
5288
5289         seq_printf(m, "proc %d\n", proc->pid);
5290         seq_printf(m, "context %s\n", proc->context->name);
5291         count = 0;
5292         ready_threads = 0;
5293         binder_inner_proc_lock(proc);
5294         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5295                 count++;
5296
5297         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5298                 ready_threads++;
5299
5300         seq_printf(m, "  threads: %d\n", count);
5301         seq_printf(m, "  requested threads: %d+%d/%d\n"
5302                         "  ready threads %d\n"
5303                         "  free async space %zd\n", proc->requested_threads,
5304                         proc->requested_threads_started, proc->max_threads,
5305                         ready_threads,
5306                         free_async_space);
5307         count = 0;
5308         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5309                 count++;
5310         binder_inner_proc_unlock(proc);
5311         seq_printf(m, "  nodes: %d\n", count);
5312         count = 0;
5313         strong = 0;
5314         weak = 0;
5315         binder_proc_lock(proc);
5316         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5317                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5318                                                   rb_node_desc);
5319                 count++;
5320                 strong += ref->data.strong;
5321                 weak += ref->data.weak;
5322         }
5323         binder_proc_unlock(proc);
5324         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5325
5326         count = binder_alloc_get_allocated_count(&proc->alloc);
5327         seq_printf(m, "  buffers: %d\n", count);
5328
5329         binder_alloc_print_pages(m, &proc->alloc);
5330
5331         count = 0;
5332         binder_inner_proc_lock(proc);
5333         list_for_each_entry(w, &proc->todo, entry) {
5334                 if (w->type == BINDER_WORK_TRANSACTION)
5335                         count++;
5336         }
5337         binder_inner_proc_unlock(proc);
5338         seq_printf(m, "  pending transactions: %d\n", count);
5339
5340         print_binder_stats(m, "  ", &proc->stats);
5341 }
5342
5343
5344 static int binder_state_show(struct seq_file *m, void *unused)
5345 {
5346         struct binder_proc *proc;
5347         struct binder_node *node;
5348         struct binder_node *last_node = NULL;
5349
5350         seq_puts(m, "binder state:\n");
5351
5352         spin_lock(&binder_dead_nodes_lock);
5353         if (!hlist_empty(&binder_dead_nodes))
5354                 seq_puts(m, "dead nodes:\n");
5355         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5356                 /*
5357                  * take a temporary reference on the node so it
5358                  * survives and isn't removed from the list
5359                  * while we print it.
5360                  */
5361                 node->tmp_refs++;
5362                 spin_unlock(&binder_dead_nodes_lock);
5363                 if (last_node)
5364                         binder_put_node(last_node);
5365                 binder_node_lock(node);
5366                 print_binder_node_nilocked(m, node);
5367                 binder_node_unlock(node);
5368                 last_node = node;
5369                 spin_lock(&binder_dead_nodes_lock);
5370         }
5371         spin_unlock(&binder_dead_nodes_lock);
5372         if (last_node)
5373                 binder_put_node(last_node);
5374
5375         mutex_lock(&binder_procs_lock);
5376         hlist_for_each_entry(proc, &binder_procs, proc_node)
5377                 print_binder_proc(m, proc, 1);
5378         mutex_unlock(&binder_procs_lock);
5379
5380         return 0;
5381 }
5382
5383 static int binder_stats_show(struct seq_file *m, void *unused)
5384 {
5385         struct binder_proc *proc;
5386
5387         seq_puts(m, "binder stats:\n");
5388
5389         print_binder_stats(m, "", &binder_stats);
5390
5391         mutex_lock(&binder_procs_lock);
5392         hlist_for_each_entry(proc, &binder_procs, proc_node)
5393                 print_binder_proc_stats(m, proc);
5394         mutex_unlock(&binder_procs_lock);
5395
5396         return 0;
5397 }
5398
5399 static int binder_transactions_show(struct seq_file *m, void *unused)
5400 {
5401         struct binder_proc *proc;
5402
5403         seq_puts(m, "binder transactions:\n");
5404         mutex_lock(&binder_procs_lock);
5405         hlist_for_each_entry(proc, &binder_procs, proc_node)
5406                 print_binder_proc(m, proc, 0);
5407         mutex_unlock(&binder_procs_lock);
5408
5409         return 0;
5410 }
5411
5412 static int binder_proc_show(struct seq_file *m, void *unused)
5413 {
5414         struct binder_proc *itr;
5415         int pid = (unsigned long)m->private;
5416
5417         mutex_lock(&binder_procs_lock);
5418         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5419                 if (itr->pid == pid) {
5420                         seq_puts(m, "binder proc state:\n");
5421                         print_binder_proc(m, itr, 1);
5422                 }
5423         }
5424         mutex_unlock(&binder_procs_lock);
5425
5426         return 0;
5427 }
5428
5429 static void print_binder_transaction_log_entry(struct seq_file *m,
5430                                         struct binder_transaction_log_entry *e)
5431 {
5432         int debug_id = READ_ONCE(e->debug_id_done);
5433         /*
5434          * read barrier to guarantee debug_id_done read before
5435          * we print the log values
5436          */
5437         smp_rmb();
5438         seq_printf(m,
5439                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5440                    e->debug_id, (e->call_type == 2) ? "reply" :
5441                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5442                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5443                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5444                    e->return_error, e->return_error_param,
5445                    e->return_error_line);
5446         /*
5447          * read-barrier to guarantee read of debug_id_done after
5448          * done printing the fields of the entry
5449          */
5450         smp_rmb();
5451         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5452                         "\n" : " (incomplete)\n");
5453 }
5454
5455 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5456 {
5457         struct binder_transaction_log *log = m->private;
5458         unsigned int log_cur = atomic_read(&log->cur);
5459         unsigned int count;
5460         unsigned int cur;
5461         int i;
5462
5463         count = log_cur + 1;
5464         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5465                 0 : count % ARRAY_SIZE(log->entry);
5466         if (count > ARRAY_SIZE(log->entry) || log->full)
5467                 count = ARRAY_SIZE(log->entry);
5468         for (i = 0; i < count; i++) {
5469                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5470
5471                 print_binder_transaction_log_entry(m, &log->entry[index]);
5472         }
5473         return 0;
5474 }
5475
5476 static const struct file_operations binder_fops = {
5477         .owner = THIS_MODULE,
5478         .poll = binder_poll,
5479         .unlocked_ioctl = binder_ioctl,
5480         .compat_ioctl = binder_ioctl,
5481         .mmap = binder_mmap,
5482         .open = binder_open,
5483         .flush = binder_flush,
5484         .release = binder_release,
5485 };
5486
5487 BINDER_DEBUG_ENTRY(state);
5488 BINDER_DEBUG_ENTRY(stats);
5489 BINDER_DEBUG_ENTRY(transactions);
5490 BINDER_DEBUG_ENTRY(transaction_log);
5491
5492 static int __init init_binder_device(const char *name)
5493 {
5494         int ret;
5495         struct binder_device *binder_device;
5496
5497         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5498         if (!binder_device)
5499                 return -ENOMEM;
5500
5501         binder_device->miscdev.fops = &binder_fops;
5502         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5503         binder_device->miscdev.name = name;
5504
5505         binder_device->context.binder_context_mgr_uid = INVALID_UID;
5506         binder_device->context.name = name;
5507         mutex_init(&binder_device->context.context_mgr_node_lock);
5508
5509         ret = misc_register(&binder_device->miscdev);
5510         if (ret < 0) {
5511                 kfree(binder_device);
5512                 return ret;
5513         }
5514
5515         hlist_add_head(&binder_device->hlist, &binder_devices);
5516
5517         return ret;
5518 }
5519
5520 static int __init binder_init(void)
5521 {
5522         int ret;
5523         char *device_name, *device_names, *device_tmp;
5524         struct binder_device *device;
5525         struct hlist_node *tmp;
5526
5527         binder_alloc_shrinker_init();
5528
5529         atomic_set(&binder_transaction_log.cur, ~0U);
5530         atomic_set(&binder_transaction_log_failed.cur, ~0U);
5531
5532         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5533         if (binder_debugfs_dir_entry_root)
5534                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5535                                                  binder_debugfs_dir_entry_root);
5536
5537         if (binder_debugfs_dir_entry_root) {
5538                 debugfs_create_file("state",
5539                                     S_IRUGO,
5540                                     binder_debugfs_dir_entry_root,
5541                                     NULL,
5542                                     &binder_state_fops);
5543                 debugfs_create_file("stats",
5544                                     S_IRUGO,
5545                                     binder_debugfs_dir_entry_root,
5546                                     NULL,
5547                                     &binder_stats_fops);
5548                 debugfs_create_file("transactions",
5549                                     S_IRUGO,
5550                                     binder_debugfs_dir_entry_root,
5551                                     NULL,
5552                                     &binder_transactions_fops);
5553                 debugfs_create_file("transaction_log",
5554                                     S_IRUGO,
5555                                     binder_debugfs_dir_entry_root,
5556                                     &binder_transaction_log,
5557                                     &binder_transaction_log_fops);
5558                 debugfs_create_file("failed_transaction_log",
5559                                     S_IRUGO,
5560                                     binder_debugfs_dir_entry_root,
5561                                     &binder_transaction_log_failed,
5562                                     &binder_transaction_log_fops);
5563         }
5564
5565         /*
5566          * Copy the module_parameter string, because we don't want to
5567          * tokenize it in-place.
5568          */
5569         device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5570         if (!device_names) {
5571                 ret = -ENOMEM;
5572                 goto err_alloc_device_names_failed;
5573         }
5574         strcpy(device_names, binder_devices_param);
5575
5576         device_tmp = device_names;
5577         while ((device_name = strsep(&device_tmp, ","))) {
5578                 ret = init_binder_device(device_name);
5579                 if (ret)
5580                         goto err_init_binder_device_failed;
5581         }
5582
5583         return ret;
5584
5585 err_init_binder_device_failed:
5586         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5587                 misc_deregister(&device->miscdev);
5588                 hlist_del(&device->hlist);
5589                 kfree(device);
5590         }
5591
5592         kfree(device_names);
5593
5594 err_alloc_device_names_failed:
5595         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5596
5597         return ret;
5598 }
5599
5600 device_initcall(binder_init);
5601
5602 #define CREATE_TRACE_POINTS
5603 #include "binder_trace.h"
5604
5605 MODULE_LICENSE("GPL v2");