Merge branch 'for-4.15/wacom' into for-linus
[sfrench/cifs-2.6.git] / drivers / android / binder.c
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 /*
19  * Locking overview
20  *
21  * There are 3 main spinlocks which must be acquired in the
22  * order shown:
23  *
24  * 1) proc->outer_lock : protects binder_ref
25  *    binder_proc_lock() and binder_proc_unlock() are
26  *    used to acq/rel.
27  * 2) node->lock : protects most fields of binder_node.
28  *    binder_node_lock() and binder_node_unlock() are
29  *    used to acq/rel
30  * 3) proc->inner_lock : protects the thread and node lists
31  *    (proc->threads, proc->waiting_threads, proc->nodes)
32  *    and all todo lists associated with the binder_proc
33  *    (proc->todo, thread->todo, proc->delivered_death and
34  *    node->async_todo), as well as thread->transaction_stack
35  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
36  *    are used to acq/rel
37  *
38  * Any lock under procA must never be nested under any lock at the same
39  * level or below on procB.
40  *
41  * Functions that require a lock held on entry indicate which lock
42  * in the suffix of the function name:
43  *
44  * foo_olocked() : requires node->outer_lock
45  * foo_nlocked() : requires node->lock
46  * foo_ilocked() : requires proc->inner_lock
47  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48  * foo_nilocked(): requires node->lock and proc->inner_lock
49  * ...
50  */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
74
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
77 #endif
78
79 #include <uapi/linux/android/binder.h>
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
82
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96
97 #define BINDER_DEBUG_ENTRY(name) \
98 static int binder_##name##_open(struct inode *inode, struct file *file) \
99 { \
100         return single_open(file, binder_##name##_show, inode->i_private); \
101 } \
102 \
103 static const struct file_operations binder_##name##_fops = { \
104         .owner = THIS_MODULE, \
105         .open = binder_##name##_open, \
106         .read = seq_read, \
107         .llseek = seq_lseek, \
108         .release = single_release, \
109 }
110
111 static int binder_proc_show(struct seq_file *m, void *unused);
112 BINDER_DEBUG_ENTRY(proc);
113
114 /* This is only defined in include/asm-arm/sizes.h */
115 #ifndef SZ_1K
116 #define SZ_1K                               0x400
117 #endif
118
119 #ifndef SZ_4M
120 #define SZ_4M                               0x400000
121 #endif
122
123 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
124
125 enum {
126         BINDER_DEBUG_USER_ERROR             = 1U << 0,
127         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
128         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
129         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
130         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
131         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
132         BINDER_DEBUG_READ_WRITE             = 1U << 6,
133         BINDER_DEBUG_USER_REFS              = 1U << 7,
134         BINDER_DEBUG_THREADS                = 1U << 8,
135         BINDER_DEBUG_TRANSACTION            = 1U << 9,
136         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
137         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
138         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
139         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
140         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
141 };
142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
145
146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147 module_param_named(devices, binder_devices_param, charp, 0444);
148
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150 static int binder_stop_on_user_error;
151
152 static int binder_set_stop_on_user_error(const char *val,
153                                          struct kernel_param *kp)
154 {
155         int ret;
156
157         ret = param_set_int(val, kp);
158         if (binder_stop_on_user_error < 2)
159                 wake_up(&binder_user_error_wait);
160         return ret;
161 }
162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163         param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
164
165 #define binder_debug(mask, x...) \
166         do { \
167                 if (binder_debug_mask & mask) \
168                         pr_info(x); \
169         } while (0)
170
171 #define binder_user_error(x...) \
172         do { \
173                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
174                         pr_info(x); \
175                 if (binder_stop_on_user_error) \
176                         binder_stop_on_user_error = 2; \
177         } while (0)
178
179 #define to_flat_binder_object(hdr) \
180         container_of(hdr, struct flat_binder_object, hdr)
181
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
184 #define to_binder_buffer_object(hdr) \
185         container_of(hdr, struct binder_buffer_object, hdr)
186
187 #define to_binder_fd_array_object(hdr) \
188         container_of(hdr, struct binder_fd_array_object, hdr)
189
190 enum binder_stat_types {
191         BINDER_STAT_PROC,
192         BINDER_STAT_THREAD,
193         BINDER_STAT_NODE,
194         BINDER_STAT_REF,
195         BINDER_STAT_DEATH,
196         BINDER_STAT_TRANSACTION,
197         BINDER_STAT_TRANSACTION_COMPLETE,
198         BINDER_STAT_COUNT
199 };
200
201 struct binder_stats {
202         atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203         atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204         atomic_t obj_created[BINDER_STAT_COUNT];
205         atomic_t obj_deleted[BINDER_STAT_COUNT];
206 };
207
208 static struct binder_stats binder_stats;
209
210 static inline void binder_stats_deleted(enum binder_stat_types type)
211 {
212         atomic_inc(&binder_stats.obj_deleted[type]);
213 }
214
215 static inline void binder_stats_created(enum binder_stat_types type)
216 {
217         atomic_inc(&binder_stats.obj_created[type]);
218 }
219
220 struct binder_transaction_log_entry {
221         int debug_id;
222         int debug_id_done;
223         int call_type;
224         int from_proc;
225         int from_thread;
226         int target_handle;
227         int to_proc;
228         int to_thread;
229         int to_node;
230         int data_size;
231         int offsets_size;
232         int return_error_line;
233         uint32_t return_error;
234         uint32_t return_error_param;
235         const char *context_name;
236 };
237 struct binder_transaction_log {
238         atomic_t cur;
239         bool full;
240         struct binder_transaction_log_entry entry[32];
241 };
242 static struct binder_transaction_log binder_transaction_log;
243 static struct binder_transaction_log binder_transaction_log_failed;
244
245 static struct binder_transaction_log_entry *binder_transaction_log_add(
246         struct binder_transaction_log *log)
247 {
248         struct binder_transaction_log_entry *e;
249         unsigned int cur = atomic_inc_return(&log->cur);
250
251         if (cur >= ARRAY_SIZE(log->entry))
252                 log->full = 1;
253         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254         WRITE_ONCE(e->debug_id_done, 0);
255         /*
256          * write-barrier to synchronize access to e->debug_id_done.
257          * We make sure the initialized 0 value is seen before
258          * memset() other fields are zeroed by memset.
259          */
260         smp_wmb();
261         memset(e, 0, sizeof(*e));
262         return e;
263 }
264
265 struct binder_context {
266         struct binder_node *binder_context_mgr_node;
267         struct mutex context_mgr_node_lock;
268
269         kuid_t binder_context_mgr_uid;
270         const char *name;
271 };
272
273 struct binder_device {
274         struct hlist_node hlist;
275         struct miscdevice miscdev;
276         struct binder_context context;
277 };
278
279 /**
280  * struct binder_work - work enqueued on a worklist
281  * @entry:             node enqueued on list
282  * @type:              type of work to be performed
283  *
284  * There are separate work lists for proc, thread, and node (async).
285  */
286 struct binder_work {
287         struct list_head entry;
288
289         enum {
290                 BINDER_WORK_TRANSACTION = 1,
291                 BINDER_WORK_TRANSACTION_COMPLETE,
292                 BINDER_WORK_RETURN_ERROR,
293                 BINDER_WORK_NODE,
294                 BINDER_WORK_DEAD_BINDER,
295                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297         } type;
298 };
299
300 struct binder_error {
301         struct binder_work work;
302         uint32_t cmd;
303 };
304
305 /**
306  * struct binder_node - binder node bookkeeping
307  * @debug_id:             unique ID for debugging
308  *                        (invariant after initialized)
309  * @lock:                 lock for node fields
310  * @work:                 worklist element for node work
311  *                        (protected by @proc->inner_lock)
312  * @rb_node:              element for proc->nodes tree
313  *                        (protected by @proc->inner_lock)
314  * @dead_node:            element for binder_dead_nodes list
315  *                        (protected by binder_dead_nodes_lock)
316  * @proc:                 binder_proc that owns this node
317  *                        (invariant after initialized)
318  * @refs:                 list of references on this node
319  *                        (protected by @lock)
320  * @internal_strong_refs: used to take strong references when
321  *                        initiating a transaction
322  *                        (protected by @proc->inner_lock if @proc
323  *                        and by @lock)
324  * @local_weak_refs:      weak user refs from local process
325  *                        (protected by @proc->inner_lock if @proc
326  *                        and by @lock)
327  * @local_strong_refs:    strong user refs from local process
328  *                        (protected by @proc->inner_lock if @proc
329  *                        and by @lock)
330  * @tmp_refs:             temporary kernel refs
331  *                        (protected by @proc->inner_lock while @proc
332  *                        is valid, and by binder_dead_nodes_lock
333  *                        if @proc is NULL. During inc/dec and node release
334  *                        it is also protected by @lock to provide safety
335  *                        as the node dies and @proc becomes NULL)
336  * @ptr:                  userspace pointer for node
337  *                        (invariant, no lock needed)
338  * @cookie:               userspace cookie for node
339  *                        (invariant, no lock needed)
340  * @has_strong_ref:       userspace notified of strong ref
341  *                        (protected by @proc->inner_lock if @proc
342  *                        and by @lock)
343  * @pending_strong_ref:   userspace has acked notification of strong ref
344  *                        (protected by @proc->inner_lock if @proc
345  *                        and by @lock)
346  * @has_weak_ref:         userspace notified of weak ref
347  *                        (protected by @proc->inner_lock if @proc
348  *                        and by @lock)
349  * @pending_weak_ref:     userspace has acked notification of weak ref
350  *                        (protected by @proc->inner_lock if @proc
351  *                        and by @lock)
352  * @has_async_transaction: async transaction to node in progress
353  *                        (protected by @lock)
354  * @accept_fds:           file descriptor operations supported for node
355  *                        (invariant after initialized)
356  * @min_priority:         minimum scheduling priority
357  *                        (invariant after initialized)
358  * @async_todo:           list of async work items
359  *                        (protected by @proc->inner_lock)
360  *
361  * Bookkeeping structure for binder nodes.
362  */
363 struct binder_node {
364         int debug_id;
365         spinlock_t lock;
366         struct binder_work work;
367         union {
368                 struct rb_node rb_node;
369                 struct hlist_node dead_node;
370         };
371         struct binder_proc *proc;
372         struct hlist_head refs;
373         int internal_strong_refs;
374         int local_weak_refs;
375         int local_strong_refs;
376         int tmp_refs;
377         binder_uintptr_t ptr;
378         binder_uintptr_t cookie;
379         struct {
380                 /*
381                  * bitfield elements protected by
382                  * proc inner_lock
383                  */
384                 u8 has_strong_ref:1;
385                 u8 pending_strong_ref:1;
386                 u8 has_weak_ref:1;
387                 u8 pending_weak_ref:1;
388         };
389         struct {
390                 /*
391                  * invariant after initialization
392                  */
393                 u8 accept_fds:1;
394                 u8 min_priority;
395         };
396         bool has_async_transaction;
397         struct list_head async_todo;
398 };
399
400 struct binder_ref_death {
401         /**
402          * @work: worklist element for death notifications
403          *        (protected by inner_lock of the proc that
404          *        this ref belongs to)
405          */
406         struct binder_work work;
407         binder_uintptr_t cookie;
408 };
409
410 /**
411  * struct binder_ref_data - binder_ref counts and id
412  * @debug_id:        unique ID for the ref
413  * @desc:            unique userspace handle for ref
414  * @strong:          strong ref count (debugging only if not locked)
415  * @weak:            weak ref count (debugging only if not locked)
416  *
417  * Structure to hold ref count and ref id information. Since
418  * the actual ref can only be accessed with a lock, this structure
419  * is used to return information about the ref to callers of
420  * ref inc/dec functions.
421  */
422 struct binder_ref_data {
423         int debug_id;
424         uint32_t desc;
425         int strong;
426         int weak;
427 };
428
429 /**
430  * struct binder_ref - struct to track references on nodes
431  * @data:        binder_ref_data containing id, handle, and current refcounts
432  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433  * @rb_node_node: node for lookup by @node in proc's rb_tree
434  * @node_entry:  list entry for node->refs list in target node
435  *               (protected by @node->lock)
436  * @proc:        binder_proc containing ref
437  * @node:        binder_node of target node. When cleaning up a
438  *               ref for deletion in binder_cleanup_ref, a non-NULL
439  *               @node indicates the node must be freed
440  * @death:       pointer to death notification (ref_death) if requested
441  *               (protected by @node->lock)
442  *
443  * Structure to track references from procA to target node (on procB). This
444  * structure is unsafe to access without holding @proc->outer_lock.
445  */
446 struct binder_ref {
447         /* Lookups needed: */
448         /*   node + proc => ref (transaction) */
449         /*   desc + proc => ref (transaction, inc/dec ref) */
450         /*   node => refs + procs (proc exit) */
451         struct binder_ref_data data;
452         struct rb_node rb_node_desc;
453         struct rb_node rb_node_node;
454         struct hlist_node node_entry;
455         struct binder_proc *proc;
456         struct binder_node *node;
457         struct binder_ref_death *death;
458 };
459
460 enum binder_deferred_state {
461         BINDER_DEFERRED_PUT_FILES    = 0x01,
462         BINDER_DEFERRED_FLUSH        = 0x02,
463         BINDER_DEFERRED_RELEASE      = 0x04,
464 };
465
466 /**
467  * struct binder_proc - binder process bookkeeping
468  * @proc_node:            element for binder_procs list
469  * @threads:              rbtree of binder_threads in this proc
470  *                        (protected by @inner_lock)
471  * @nodes:                rbtree of binder nodes associated with
472  *                        this proc ordered by node->ptr
473  *                        (protected by @inner_lock)
474  * @refs_by_desc:         rbtree of refs ordered by ref->desc
475  *                        (protected by @outer_lock)
476  * @refs_by_node:         rbtree of refs ordered by ref->node
477  *                        (protected by @outer_lock)
478  * @waiting_threads:      threads currently waiting for proc work
479  *                        (protected by @inner_lock)
480  * @pid                   PID of group_leader of process
481  *                        (invariant after initialized)
482  * @tsk                   task_struct for group_leader of process
483  *                        (invariant after initialized)
484  * @files                 files_struct for process
485  *                        (invariant after initialized)
486  * @deferred_work_node:   element for binder_deferred_list
487  *                        (protected by binder_deferred_lock)
488  * @deferred_work:        bitmap of deferred work to perform
489  *                        (protected by binder_deferred_lock)
490  * @is_dead:              process is dead and awaiting free
491  *                        when outstanding transactions are cleaned up
492  *                        (protected by @inner_lock)
493  * @todo:                 list of work for this process
494  *                        (protected by @inner_lock)
495  * @wait:                 wait queue head to wait for proc work
496  *                        (invariant after initialized)
497  * @stats:                per-process binder statistics
498  *                        (atomics, no lock needed)
499  * @delivered_death:      list of delivered death notification
500  *                        (protected by @inner_lock)
501  * @max_threads:          cap on number of binder threads
502  *                        (protected by @inner_lock)
503  * @requested_threads:    number of binder threads requested but not
504  *                        yet started. In current implementation, can
505  *                        only be 0 or 1.
506  *                        (protected by @inner_lock)
507  * @requested_threads_started: number binder threads started
508  *                        (protected by @inner_lock)
509  * @tmp_ref:              temporary reference to indicate proc is in use
510  *                        (protected by @inner_lock)
511  * @default_priority:     default scheduler priority
512  *                        (invariant after initialized)
513  * @debugfs_entry:        debugfs node
514  * @alloc:                binder allocator bookkeeping
515  * @context:              binder_context for this proc
516  *                        (invariant after initialized)
517  * @inner_lock:           can nest under outer_lock and/or node lock
518  * @outer_lock:           no nesting under innor or node lock
519  *                        Lock order: 1) outer, 2) node, 3) inner
520  *
521  * Bookkeeping structure for binder processes
522  */
523 struct binder_proc {
524         struct hlist_node proc_node;
525         struct rb_root threads;
526         struct rb_root nodes;
527         struct rb_root refs_by_desc;
528         struct rb_root refs_by_node;
529         struct list_head waiting_threads;
530         int pid;
531         struct task_struct *tsk;
532         struct files_struct *files;
533         struct hlist_node deferred_work_node;
534         int deferred_work;
535         bool is_dead;
536
537         struct list_head todo;
538         wait_queue_head_t wait;
539         struct binder_stats stats;
540         struct list_head delivered_death;
541         int max_threads;
542         int requested_threads;
543         int requested_threads_started;
544         int tmp_ref;
545         long default_priority;
546         struct dentry *debugfs_entry;
547         struct binder_alloc alloc;
548         struct binder_context *context;
549         spinlock_t inner_lock;
550         spinlock_t outer_lock;
551 };
552
553 enum {
554         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
555         BINDER_LOOPER_STATE_ENTERED     = 0x02,
556         BINDER_LOOPER_STATE_EXITED      = 0x04,
557         BINDER_LOOPER_STATE_INVALID     = 0x08,
558         BINDER_LOOPER_STATE_WAITING     = 0x10,
559         BINDER_LOOPER_STATE_POLL        = 0x20,
560 };
561
562 /**
563  * struct binder_thread - binder thread bookkeeping
564  * @proc:                 binder process for this thread
565  *                        (invariant after initialization)
566  * @rb_node:              element for proc->threads rbtree
567  *                        (protected by @proc->inner_lock)
568  * @waiting_thread_node:  element for @proc->waiting_threads list
569  *                        (protected by @proc->inner_lock)
570  * @pid:                  PID for this thread
571  *                        (invariant after initialization)
572  * @looper:               bitmap of looping state
573  *                        (only accessed by this thread)
574  * @looper_needs_return:  looping thread needs to exit driver
575  *                        (no lock needed)
576  * @transaction_stack:    stack of in-progress transactions for this thread
577  *                        (protected by @proc->inner_lock)
578  * @todo:                 list of work to do for this thread
579  *                        (protected by @proc->inner_lock)
580  * @return_error:         transaction errors reported by this thread
581  *                        (only accessed by this thread)
582  * @reply_error:          transaction errors reported by target thread
583  *                        (protected by @proc->inner_lock)
584  * @wait:                 wait queue for thread work
585  * @stats:                per-thread statistics
586  *                        (atomics, no lock needed)
587  * @tmp_ref:              temporary reference to indicate thread is in use
588  *                        (atomic since @proc->inner_lock cannot
589  *                        always be acquired)
590  * @is_dead:              thread is dead and awaiting free
591  *                        when outstanding transactions are cleaned up
592  *                        (protected by @proc->inner_lock)
593  *
594  * Bookkeeping structure for binder threads.
595  */
596 struct binder_thread {
597         struct binder_proc *proc;
598         struct rb_node rb_node;
599         struct list_head waiting_thread_node;
600         int pid;
601         int looper;              /* only modified by this thread */
602         bool looper_need_return; /* can be written by other thread */
603         struct binder_transaction *transaction_stack;
604         struct list_head todo;
605         struct binder_error return_error;
606         struct binder_error reply_error;
607         wait_queue_head_t wait;
608         struct binder_stats stats;
609         atomic_t tmp_ref;
610         bool is_dead;
611 };
612
613 struct binder_transaction {
614         int debug_id;
615         struct binder_work work;
616         struct binder_thread *from;
617         struct binder_transaction *from_parent;
618         struct binder_proc *to_proc;
619         struct binder_thread *to_thread;
620         struct binder_transaction *to_parent;
621         unsigned need_reply:1;
622         /* unsigned is_dead:1; */       /* not used at the moment */
623
624         struct binder_buffer *buffer;
625         unsigned int    code;
626         unsigned int    flags;
627         long    priority;
628         long    saved_priority;
629         kuid_t  sender_euid;
630         /**
631          * @lock:  protects @from, @to_proc, and @to_thread
632          *
633          * @from, @to_proc, and @to_thread can be set to NULL
634          * during thread teardown
635          */
636         spinlock_t lock;
637 };
638
639 /**
640  * binder_proc_lock() - Acquire outer lock for given binder_proc
641  * @proc:         struct binder_proc to acquire
642  *
643  * Acquires proc->outer_lock. Used to protect binder_ref
644  * structures associated with the given proc.
645  */
646 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
647 static void
648 _binder_proc_lock(struct binder_proc *proc, int line)
649 {
650         binder_debug(BINDER_DEBUG_SPINLOCKS,
651                      "%s: line=%d\n", __func__, line);
652         spin_lock(&proc->outer_lock);
653 }
654
655 /**
656  * binder_proc_unlock() - Release spinlock for given binder_proc
657  * @proc:         struct binder_proc to acquire
658  *
659  * Release lock acquired via binder_proc_lock()
660  */
661 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
662 static void
663 _binder_proc_unlock(struct binder_proc *proc, int line)
664 {
665         binder_debug(BINDER_DEBUG_SPINLOCKS,
666                      "%s: line=%d\n", __func__, line);
667         spin_unlock(&proc->outer_lock);
668 }
669
670 /**
671  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
672  * @proc:         struct binder_proc to acquire
673  *
674  * Acquires proc->inner_lock. Used to protect todo lists
675  */
676 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
677 static void
678 _binder_inner_proc_lock(struct binder_proc *proc, int line)
679 {
680         binder_debug(BINDER_DEBUG_SPINLOCKS,
681                      "%s: line=%d\n", __func__, line);
682         spin_lock(&proc->inner_lock);
683 }
684
685 /**
686  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
687  * @proc:         struct binder_proc to acquire
688  *
689  * Release lock acquired via binder_inner_proc_lock()
690  */
691 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
692 static void
693 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
694 {
695         binder_debug(BINDER_DEBUG_SPINLOCKS,
696                      "%s: line=%d\n", __func__, line);
697         spin_unlock(&proc->inner_lock);
698 }
699
700 /**
701  * binder_node_lock() - Acquire spinlock for given binder_node
702  * @node:         struct binder_node to acquire
703  *
704  * Acquires node->lock. Used to protect binder_node fields
705  */
706 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
707 static void
708 _binder_node_lock(struct binder_node *node, int line)
709 {
710         binder_debug(BINDER_DEBUG_SPINLOCKS,
711                      "%s: line=%d\n", __func__, line);
712         spin_lock(&node->lock);
713 }
714
715 /**
716  * binder_node_unlock() - Release spinlock for given binder_proc
717  * @node:         struct binder_node to acquire
718  *
719  * Release lock acquired via binder_node_lock()
720  */
721 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
722 static void
723 _binder_node_unlock(struct binder_node *node, int line)
724 {
725         binder_debug(BINDER_DEBUG_SPINLOCKS,
726                      "%s: line=%d\n", __func__, line);
727         spin_unlock(&node->lock);
728 }
729
730 /**
731  * binder_node_inner_lock() - Acquire node and inner locks
732  * @node:         struct binder_node to acquire
733  *
734  * Acquires node->lock. If node->proc also acquires
735  * proc->inner_lock. Used to protect binder_node fields
736  */
737 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
738 static void
739 _binder_node_inner_lock(struct binder_node *node, int line)
740 {
741         binder_debug(BINDER_DEBUG_SPINLOCKS,
742                      "%s: line=%d\n", __func__, line);
743         spin_lock(&node->lock);
744         if (node->proc)
745                 binder_inner_proc_lock(node->proc);
746 }
747
748 /**
749  * binder_node_unlock() - Release node and inner locks
750  * @node:         struct binder_node to acquire
751  *
752  * Release lock acquired via binder_node_lock()
753  */
754 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
755 static void
756 _binder_node_inner_unlock(struct binder_node *node, int line)
757 {
758         struct binder_proc *proc = node->proc;
759
760         binder_debug(BINDER_DEBUG_SPINLOCKS,
761                      "%s: line=%d\n", __func__, line);
762         if (proc)
763                 binder_inner_proc_unlock(proc);
764         spin_unlock(&node->lock);
765 }
766
767 static bool binder_worklist_empty_ilocked(struct list_head *list)
768 {
769         return list_empty(list);
770 }
771
772 /**
773  * binder_worklist_empty() - Check if no items on the work list
774  * @proc:       binder_proc associated with list
775  * @list:       list to check
776  *
777  * Return: true if there are no items on list, else false
778  */
779 static bool binder_worklist_empty(struct binder_proc *proc,
780                                   struct list_head *list)
781 {
782         bool ret;
783
784         binder_inner_proc_lock(proc);
785         ret = binder_worklist_empty_ilocked(list);
786         binder_inner_proc_unlock(proc);
787         return ret;
788 }
789
790 static void
791 binder_enqueue_work_ilocked(struct binder_work *work,
792                            struct list_head *target_list)
793 {
794         BUG_ON(target_list == NULL);
795         BUG_ON(work->entry.next && !list_empty(&work->entry));
796         list_add_tail(&work->entry, target_list);
797 }
798
799 /**
800  * binder_enqueue_work() - Add an item to the work list
801  * @proc:         binder_proc associated with list
802  * @work:         struct binder_work to add to list
803  * @target_list:  list to add work to
804  *
805  * Adds the work to the specified list. Asserts that work
806  * is not already on a list.
807  */
808 static void
809 binder_enqueue_work(struct binder_proc *proc,
810                     struct binder_work *work,
811                     struct list_head *target_list)
812 {
813         binder_inner_proc_lock(proc);
814         binder_enqueue_work_ilocked(work, target_list);
815         binder_inner_proc_unlock(proc);
816 }
817
818 static void
819 binder_dequeue_work_ilocked(struct binder_work *work)
820 {
821         list_del_init(&work->entry);
822 }
823
824 /**
825  * binder_dequeue_work() - Removes an item from the work list
826  * @proc:         binder_proc associated with list
827  * @work:         struct binder_work to remove from list
828  *
829  * Removes the specified work item from whatever list it is on.
830  * Can safely be called if work is not on any list.
831  */
832 static void
833 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
834 {
835         binder_inner_proc_lock(proc);
836         binder_dequeue_work_ilocked(work);
837         binder_inner_proc_unlock(proc);
838 }
839
840 static struct binder_work *binder_dequeue_work_head_ilocked(
841                                         struct list_head *list)
842 {
843         struct binder_work *w;
844
845         w = list_first_entry_or_null(list, struct binder_work, entry);
846         if (w)
847                 list_del_init(&w->entry);
848         return w;
849 }
850
851 /**
852  * binder_dequeue_work_head() - Dequeues the item at head of list
853  * @proc:         binder_proc associated with list
854  * @list:         list to dequeue head
855  *
856  * Removes the head of the list if there are items on the list
857  *
858  * Return: pointer dequeued binder_work, NULL if list was empty
859  */
860 static struct binder_work *binder_dequeue_work_head(
861                                         struct binder_proc *proc,
862                                         struct list_head *list)
863 {
864         struct binder_work *w;
865
866         binder_inner_proc_lock(proc);
867         w = binder_dequeue_work_head_ilocked(list);
868         binder_inner_proc_unlock(proc);
869         return w;
870 }
871
872 static void
873 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
874 static void binder_free_thread(struct binder_thread *thread);
875 static void binder_free_proc(struct binder_proc *proc);
876 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
877
878 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
879 {
880         struct files_struct *files = proc->files;
881         unsigned long rlim_cur;
882         unsigned long irqs;
883
884         if (files == NULL)
885                 return -ESRCH;
886
887         if (!lock_task_sighand(proc->tsk, &irqs))
888                 return -EMFILE;
889
890         rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
891         unlock_task_sighand(proc->tsk, &irqs);
892
893         return __alloc_fd(files, 0, rlim_cur, flags);
894 }
895
896 /*
897  * copied from fd_install
898  */
899 static void task_fd_install(
900         struct binder_proc *proc, unsigned int fd, struct file *file)
901 {
902         if (proc->files)
903                 __fd_install(proc->files, fd, file);
904 }
905
906 /*
907  * copied from sys_close
908  */
909 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
910 {
911         int retval;
912
913         if (proc->files == NULL)
914                 return -ESRCH;
915
916         retval = __close_fd(proc->files, fd);
917         /* can't restart close syscall because file table entry was cleared */
918         if (unlikely(retval == -ERESTARTSYS ||
919                      retval == -ERESTARTNOINTR ||
920                      retval == -ERESTARTNOHAND ||
921                      retval == -ERESTART_RESTARTBLOCK))
922                 retval = -EINTR;
923
924         return retval;
925 }
926
927 static bool binder_has_work_ilocked(struct binder_thread *thread,
928                                     bool do_proc_work)
929 {
930         return !binder_worklist_empty_ilocked(&thread->todo) ||
931                 thread->looper_need_return ||
932                 (do_proc_work &&
933                  !binder_worklist_empty_ilocked(&thread->proc->todo));
934 }
935
936 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
937 {
938         bool has_work;
939
940         binder_inner_proc_lock(thread->proc);
941         has_work = binder_has_work_ilocked(thread, do_proc_work);
942         binder_inner_proc_unlock(thread->proc);
943
944         return has_work;
945 }
946
947 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
948 {
949         return !thread->transaction_stack &&
950                 binder_worklist_empty_ilocked(&thread->todo) &&
951                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
952                                    BINDER_LOOPER_STATE_REGISTERED));
953 }
954
955 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
956                                                bool sync)
957 {
958         struct rb_node *n;
959         struct binder_thread *thread;
960
961         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
962                 thread = rb_entry(n, struct binder_thread, rb_node);
963                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
964                     binder_available_for_proc_work_ilocked(thread)) {
965                         if (sync)
966                                 wake_up_interruptible_sync(&thread->wait);
967                         else
968                                 wake_up_interruptible(&thread->wait);
969                 }
970         }
971 }
972
973 /**
974  * binder_select_thread_ilocked() - selects a thread for doing proc work.
975  * @proc:       process to select a thread from
976  *
977  * Note that calling this function moves the thread off the waiting_threads
978  * list, so it can only be woken up by the caller of this function, or a
979  * signal. Therefore, callers *should* always wake up the thread this function
980  * returns.
981  *
982  * Return:      If there's a thread currently waiting for process work,
983  *              returns that thread. Otherwise returns NULL.
984  */
985 static struct binder_thread *
986 binder_select_thread_ilocked(struct binder_proc *proc)
987 {
988         struct binder_thread *thread;
989
990         assert_spin_locked(&proc->inner_lock);
991         thread = list_first_entry_or_null(&proc->waiting_threads,
992                                           struct binder_thread,
993                                           waiting_thread_node);
994
995         if (thread)
996                 list_del_init(&thread->waiting_thread_node);
997
998         return thread;
999 }
1000
1001 /**
1002  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1003  * @proc:       process to wake up a thread in
1004  * @thread:     specific thread to wake-up (may be NULL)
1005  * @sync:       whether to do a synchronous wake-up
1006  *
1007  * This function wakes up a thread in the @proc process.
1008  * The caller may provide a specific thread to wake-up in
1009  * the @thread parameter. If @thread is NULL, this function
1010  * will wake up threads that have called poll().
1011  *
1012  * Note that for this function to work as expected, callers
1013  * should first call binder_select_thread() to find a thread
1014  * to handle the work (if they don't have a thread already),
1015  * and pass the result into the @thread parameter.
1016  */
1017 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1018                                          struct binder_thread *thread,
1019                                          bool sync)
1020 {
1021         assert_spin_locked(&proc->inner_lock);
1022
1023         if (thread) {
1024                 if (sync)
1025                         wake_up_interruptible_sync(&thread->wait);
1026                 else
1027                         wake_up_interruptible(&thread->wait);
1028                 return;
1029         }
1030
1031         /* Didn't find a thread waiting for proc work; this can happen
1032          * in two scenarios:
1033          * 1. All threads are busy handling transactions
1034          *    In that case, one of those threads should call back into
1035          *    the kernel driver soon and pick up this work.
1036          * 2. Threads are using the (e)poll interface, in which case
1037          *    they may be blocked on the waitqueue without having been
1038          *    added to waiting_threads. For this case, we just iterate
1039          *    over all threads not handling transaction work, and
1040          *    wake them all up. We wake all because we don't know whether
1041          *    a thread that called into (e)poll is handling non-binder
1042          *    work currently.
1043          */
1044         binder_wakeup_poll_threads_ilocked(proc, sync);
1045 }
1046
1047 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1048 {
1049         struct binder_thread *thread = binder_select_thread_ilocked(proc);
1050
1051         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1052 }
1053
1054 static void binder_set_nice(long nice)
1055 {
1056         long min_nice;
1057
1058         if (can_nice(current, nice)) {
1059                 set_user_nice(current, nice);
1060                 return;
1061         }
1062         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1063         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1064                      "%d: nice value %ld not allowed use %ld instead\n",
1065                       current->pid, nice, min_nice);
1066         set_user_nice(current, min_nice);
1067         if (min_nice <= MAX_NICE)
1068                 return;
1069         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1070 }
1071
1072 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1073                                                    binder_uintptr_t ptr)
1074 {
1075         struct rb_node *n = proc->nodes.rb_node;
1076         struct binder_node *node;
1077
1078         assert_spin_locked(&proc->inner_lock);
1079
1080         while (n) {
1081                 node = rb_entry(n, struct binder_node, rb_node);
1082
1083                 if (ptr < node->ptr)
1084                         n = n->rb_left;
1085                 else if (ptr > node->ptr)
1086                         n = n->rb_right;
1087                 else {
1088                         /*
1089                          * take an implicit weak reference
1090                          * to ensure node stays alive until
1091                          * call to binder_put_node()
1092                          */
1093                         binder_inc_node_tmpref_ilocked(node);
1094                         return node;
1095                 }
1096         }
1097         return NULL;
1098 }
1099
1100 static struct binder_node *binder_get_node(struct binder_proc *proc,
1101                                            binder_uintptr_t ptr)
1102 {
1103         struct binder_node *node;
1104
1105         binder_inner_proc_lock(proc);
1106         node = binder_get_node_ilocked(proc, ptr);
1107         binder_inner_proc_unlock(proc);
1108         return node;
1109 }
1110
1111 static struct binder_node *binder_init_node_ilocked(
1112                                                 struct binder_proc *proc,
1113                                                 struct binder_node *new_node,
1114                                                 struct flat_binder_object *fp)
1115 {
1116         struct rb_node **p = &proc->nodes.rb_node;
1117         struct rb_node *parent = NULL;
1118         struct binder_node *node;
1119         binder_uintptr_t ptr = fp ? fp->binder : 0;
1120         binder_uintptr_t cookie = fp ? fp->cookie : 0;
1121         __u32 flags = fp ? fp->flags : 0;
1122
1123         assert_spin_locked(&proc->inner_lock);
1124
1125         while (*p) {
1126
1127                 parent = *p;
1128                 node = rb_entry(parent, struct binder_node, rb_node);
1129
1130                 if (ptr < node->ptr)
1131                         p = &(*p)->rb_left;
1132                 else if (ptr > node->ptr)
1133                         p = &(*p)->rb_right;
1134                 else {
1135                         /*
1136                          * A matching node is already in
1137                          * the rb tree. Abandon the init
1138                          * and return it.
1139                          */
1140                         binder_inc_node_tmpref_ilocked(node);
1141                         return node;
1142                 }
1143         }
1144         node = new_node;
1145         binder_stats_created(BINDER_STAT_NODE);
1146         node->tmp_refs++;
1147         rb_link_node(&node->rb_node, parent, p);
1148         rb_insert_color(&node->rb_node, &proc->nodes);
1149         node->debug_id = atomic_inc_return(&binder_last_id);
1150         node->proc = proc;
1151         node->ptr = ptr;
1152         node->cookie = cookie;
1153         node->work.type = BINDER_WORK_NODE;
1154         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1155         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1156         spin_lock_init(&node->lock);
1157         INIT_LIST_HEAD(&node->work.entry);
1158         INIT_LIST_HEAD(&node->async_todo);
1159         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1160                      "%d:%d node %d u%016llx c%016llx created\n",
1161                      proc->pid, current->pid, node->debug_id,
1162                      (u64)node->ptr, (u64)node->cookie);
1163
1164         return node;
1165 }
1166
1167 static struct binder_node *binder_new_node(struct binder_proc *proc,
1168                                            struct flat_binder_object *fp)
1169 {
1170         struct binder_node *node;
1171         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1172
1173         if (!new_node)
1174                 return NULL;
1175         binder_inner_proc_lock(proc);
1176         node = binder_init_node_ilocked(proc, new_node, fp);
1177         binder_inner_proc_unlock(proc);
1178         if (node != new_node)
1179                 /*
1180                  * The node was already added by another thread
1181                  */
1182                 kfree(new_node);
1183
1184         return node;
1185 }
1186
1187 static void binder_free_node(struct binder_node *node)
1188 {
1189         kfree(node);
1190         binder_stats_deleted(BINDER_STAT_NODE);
1191 }
1192
1193 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1194                                     int internal,
1195                                     struct list_head *target_list)
1196 {
1197         struct binder_proc *proc = node->proc;
1198
1199         assert_spin_locked(&node->lock);
1200         if (proc)
1201                 assert_spin_locked(&proc->inner_lock);
1202         if (strong) {
1203                 if (internal) {
1204                         if (target_list == NULL &&
1205                             node->internal_strong_refs == 0 &&
1206                             !(node->proc &&
1207                               node == node->proc->context->binder_context_mgr_node &&
1208                               node->has_strong_ref)) {
1209                                 pr_err("invalid inc strong node for %d\n",
1210                                         node->debug_id);
1211                                 return -EINVAL;
1212                         }
1213                         node->internal_strong_refs++;
1214                 } else
1215                         node->local_strong_refs++;
1216                 if (!node->has_strong_ref && target_list) {
1217                         binder_dequeue_work_ilocked(&node->work);
1218                         binder_enqueue_work_ilocked(&node->work, target_list);
1219                 }
1220         } else {
1221                 if (!internal)
1222                         node->local_weak_refs++;
1223                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1224                         if (target_list == NULL) {
1225                                 pr_err("invalid inc weak node for %d\n",
1226                                         node->debug_id);
1227                                 return -EINVAL;
1228                         }
1229                         binder_enqueue_work_ilocked(&node->work, target_list);
1230                 }
1231         }
1232         return 0;
1233 }
1234
1235 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1236                            struct list_head *target_list)
1237 {
1238         int ret;
1239
1240         binder_node_inner_lock(node);
1241         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1242         binder_node_inner_unlock(node);
1243
1244         return ret;
1245 }
1246
1247 static bool binder_dec_node_nilocked(struct binder_node *node,
1248                                      int strong, int internal)
1249 {
1250         struct binder_proc *proc = node->proc;
1251
1252         assert_spin_locked(&node->lock);
1253         if (proc)
1254                 assert_spin_locked(&proc->inner_lock);
1255         if (strong) {
1256                 if (internal)
1257                         node->internal_strong_refs--;
1258                 else
1259                         node->local_strong_refs--;
1260                 if (node->local_strong_refs || node->internal_strong_refs)
1261                         return false;
1262         } else {
1263                 if (!internal)
1264                         node->local_weak_refs--;
1265                 if (node->local_weak_refs || node->tmp_refs ||
1266                                 !hlist_empty(&node->refs))
1267                         return false;
1268         }
1269
1270         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1271                 if (list_empty(&node->work.entry)) {
1272                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
1273                         binder_wakeup_proc_ilocked(proc);
1274                 }
1275         } else {
1276                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1277                     !node->local_weak_refs && !node->tmp_refs) {
1278                         if (proc) {
1279                                 binder_dequeue_work_ilocked(&node->work);
1280                                 rb_erase(&node->rb_node, &proc->nodes);
1281                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1282                                              "refless node %d deleted\n",
1283                                              node->debug_id);
1284                         } else {
1285                                 BUG_ON(!list_empty(&node->work.entry));
1286                                 spin_lock(&binder_dead_nodes_lock);
1287                                 /*
1288                                  * tmp_refs could have changed so
1289                                  * check it again
1290                                  */
1291                                 if (node->tmp_refs) {
1292                                         spin_unlock(&binder_dead_nodes_lock);
1293                                         return false;
1294                                 }
1295                                 hlist_del(&node->dead_node);
1296                                 spin_unlock(&binder_dead_nodes_lock);
1297                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1298                                              "dead node %d deleted\n",
1299                                              node->debug_id);
1300                         }
1301                         return true;
1302                 }
1303         }
1304         return false;
1305 }
1306
1307 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1308 {
1309         bool free_node;
1310
1311         binder_node_inner_lock(node);
1312         free_node = binder_dec_node_nilocked(node, strong, internal);
1313         binder_node_inner_unlock(node);
1314         if (free_node)
1315                 binder_free_node(node);
1316 }
1317
1318 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1319 {
1320         /*
1321          * No call to binder_inc_node() is needed since we
1322          * don't need to inform userspace of any changes to
1323          * tmp_refs
1324          */
1325         node->tmp_refs++;
1326 }
1327
1328 /**
1329  * binder_inc_node_tmpref() - take a temporary reference on node
1330  * @node:       node to reference
1331  *
1332  * Take reference on node to prevent the node from being freed
1333  * while referenced only by a local variable. The inner lock is
1334  * needed to serialize with the node work on the queue (which
1335  * isn't needed after the node is dead). If the node is dead
1336  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1337  * node->tmp_refs against dead-node-only cases where the node
1338  * lock cannot be acquired (eg traversing the dead node list to
1339  * print nodes)
1340  */
1341 static void binder_inc_node_tmpref(struct binder_node *node)
1342 {
1343         binder_node_lock(node);
1344         if (node->proc)
1345                 binder_inner_proc_lock(node->proc);
1346         else
1347                 spin_lock(&binder_dead_nodes_lock);
1348         binder_inc_node_tmpref_ilocked(node);
1349         if (node->proc)
1350                 binder_inner_proc_unlock(node->proc);
1351         else
1352                 spin_unlock(&binder_dead_nodes_lock);
1353         binder_node_unlock(node);
1354 }
1355
1356 /**
1357  * binder_dec_node_tmpref() - remove a temporary reference on node
1358  * @node:       node to reference
1359  *
1360  * Release temporary reference on node taken via binder_inc_node_tmpref()
1361  */
1362 static void binder_dec_node_tmpref(struct binder_node *node)
1363 {
1364         bool free_node;
1365
1366         binder_node_inner_lock(node);
1367         if (!node->proc)
1368                 spin_lock(&binder_dead_nodes_lock);
1369         node->tmp_refs--;
1370         BUG_ON(node->tmp_refs < 0);
1371         if (!node->proc)
1372                 spin_unlock(&binder_dead_nodes_lock);
1373         /*
1374          * Call binder_dec_node() to check if all refcounts are 0
1375          * and cleanup is needed. Calling with strong=0 and internal=1
1376          * causes no actual reference to be released in binder_dec_node().
1377          * If that changes, a change is needed here too.
1378          */
1379         free_node = binder_dec_node_nilocked(node, 0, 1);
1380         binder_node_inner_unlock(node);
1381         if (free_node)
1382                 binder_free_node(node);
1383 }
1384
1385 static void binder_put_node(struct binder_node *node)
1386 {
1387         binder_dec_node_tmpref(node);
1388 }
1389
1390 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1391                                                  u32 desc, bool need_strong_ref)
1392 {
1393         struct rb_node *n = proc->refs_by_desc.rb_node;
1394         struct binder_ref *ref;
1395
1396         while (n) {
1397                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1398
1399                 if (desc < ref->data.desc) {
1400                         n = n->rb_left;
1401                 } else if (desc > ref->data.desc) {
1402                         n = n->rb_right;
1403                 } else if (need_strong_ref && !ref->data.strong) {
1404                         binder_user_error("tried to use weak ref as strong ref\n");
1405                         return NULL;
1406                 } else {
1407                         return ref;
1408                 }
1409         }
1410         return NULL;
1411 }
1412
1413 /**
1414  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1415  * @proc:       binder_proc that owns the ref
1416  * @node:       binder_node of target
1417  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1418  *
1419  * Look up the ref for the given node and return it if it exists
1420  *
1421  * If it doesn't exist and the caller provides a newly allocated
1422  * ref, initialize the fields of the newly allocated ref and insert
1423  * into the given proc rb_trees and node refs list.
1424  *
1425  * Return:      the ref for node. It is possible that another thread
1426  *              allocated/initialized the ref first in which case the
1427  *              returned ref would be different than the passed-in
1428  *              new_ref. new_ref must be kfree'd by the caller in
1429  *              this case.
1430  */
1431 static struct binder_ref *binder_get_ref_for_node_olocked(
1432                                         struct binder_proc *proc,
1433                                         struct binder_node *node,
1434                                         struct binder_ref *new_ref)
1435 {
1436         struct binder_context *context = proc->context;
1437         struct rb_node **p = &proc->refs_by_node.rb_node;
1438         struct rb_node *parent = NULL;
1439         struct binder_ref *ref;
1440         struct rb_node *n;
1441
1442         while (*p) {
1443                 parent = *p;
1444                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1445
1446                 if (node < ref->node)
1447                         p = &(*p)->rb_left;
1448                 else if (node > ref->node)
1449                         p = &(*p)->rb_right;
1450                 else
1451                         return ref;
1452         }
1453         if (!new_ref)
1454                 return NULL;
1455
1456         binder_stats_created(BINDER_STAT_REF);
1457         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1458         new_ref->proc = proc;
1459         new_ref->node = node;
1460         rb_link_node(&new_ref->rb_node_node, parent, p);
1461         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1462
1463         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1464         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1465                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466                 if (ref->data.desc > new_ref->data.desc)
1467                         break;
1468                 new_ref->data.desc = ref->data.desc + 1;
1469         }
1470
1471         p = &proc->refs_by_desc.rb_node;
1472         while (*p) {
1473                 parent = *p;
1474                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1475
1476                 if (new_ref->data.desc < ref->data.desc)
1477                         p = &(*p)->rb_left;
1478                 else if (new_ref->data.desc > ref->data.desc)
1479                         p = &(*p)->rb_right;
1480                 else
1481                         BUG();
1482         }
1483         rb_link_node(&new_ref->rb_node_desc, parent, p);
1484         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1485
1486         binder_node_lock(node);
1487         hlist_add_head(&new_ref->node_entry, &node->refs);
1488
1489         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1490                      "%d new ref %d desc %d for node %d\n",
1491                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1492                       node->debug_id);
1493         binder_node_unlock(node);
1494         return new_ref;
1495 }
1496
1497 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1498 {
1499         bool delete_node = false;
1500
1501         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1502                      "%d delete ref %d desc %d for node %d\n",
1503                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1504                       ref->node->debug_id);
1505
1506         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1507         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1508
1509         binder_node_inner_lock(ref->node);
1510         if (ref->data.strong)
1511                 binder_dec_node_nilocked(ref->node, 1, 1);
1512
1513         hlist_del(&ref->node_entry);
1514         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1515         binder_node_inner_unlock(ref->node);
1516         /*
1517          * Clear ref->node unless we want the caller to free the node
1518          */
1519         if (!delete_node) {
1520                 /*
1521                  * The caller uses ref->node to determine
1522                  * whether the node needs to be freed. Clear
1523                  * it since the node is still alive.
1524                  */
1525                 ref->node = NULL;
1526         }
1527
1528         if (ref->death) {
1529                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1530                              "%d delete ref %d desc %d has death notification\n",
1531                               ref->proc->pid, ref->data.debug_id,
1532                               ref->data.desc);
1533                 binder_dequeue_work(ref->proc, &ref->death->work);
1534                 binder_stats_deleted(BINDER_STAT_DEATH);
1535         }
1536         binder_stats_deleted(BINDER_STAT_REF);
1537 }
1538
1539 /**
1540  * binder_inc_ref_olocked() - increment the ref for given handle
1541  * @ref:         ref to be incremented
1542  * @strong:      if true, strong increment, else weak
1543  * @target_list: list to queue node work on
1544  *
1545  * Increment the ref. @ref->proc->outer_lock must be held on entry
1546  *
1547  * Return: 0, if successful, else errno
1548  */
1549 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1550                                   struct list_head *target_list)
1551 {
1552         int ret;
1553
1554         if (strong) {
1555                 if (ref->data.strong == 0) {
1556                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1557                         if (ret)
1558                                 return ret;
1559                 }
1560                 ref->data.strong++;
1561         } else {
1562                 if (ref->data.weak == 0) {
1563                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1564                         if (ret)
1565                                 return ret;
1566                 }
1567                 ref->data.weak++;
1568         }
1569         return 0;
1570 }
1571
1572 /**
1573  * binder_dec_ref() - dec the ref for given handle
1574  * @ref:        ref to be decremented
1575  * @strong:     if true, strong decrement, else weak
1576  *
1577  * Decrement the ref.
1578  *
1579  * Return: true if ref is cleaned up and ready to be freed
1580  */
1581 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1582 {
1583         if (strong) {
1584                 if (ref->data.strong == 0) {
1585                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586                                           ref->proc->pid, ref->data.debug_id,
1587                                           ref->data.desc, ref->data.strong,
1588                                           ref->data.weak);
1589                         return false;
1590                 }
1591                 ref->data.strong--;
1592                 if (ref->data.strong == 0)
1593                         binder_dec_node(ref->node, strong, 1);
1594         } else {
1595                 if (ref->data.weak == 0) {
1596                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597                                           ref->proc->pid, ref->data.debug_id,
1598                                           ref->data.desc, ref->data.strong,
1599                                           ref->data.weak);
1600                         return false;
1601                 }
1602                 ref->data.weak--;
1603         }
1604         if (ref->data.strong == 0 && ref->data.weak == 0) {
1605                 binder_cleanup_ref_olocked(ref);
1606                 return true;
1607         }
1608         return false;
1609 }
1610
1611 /**
1612  * binder_get_node_from_ref() - get the node from the given proc/desc
1613  * @proc:       proc containing the ref
1614  * @desc:       the handle associated with the ref
1615  * @need_strong_ref: if true, only return node if ref is strong
1616  * @rdata:      the id/refcount data for the ref
1617  *
1618  * Given a proc and ref handle, return the associated binder_node
1619  *
1620  * Return: a binder_node or NULL if not found or not strong when strong required
1621  */
1622 static struct binder_node *binder_get_node_from_ref(
1623                 struct binder_proc *proc,
1624                 u32 desc, bool need_strong_ref,
1625                 struct binder_ref_data *rdata)
1626 {
1627         struct binder_node *node;
1628         struct binder_ref *ref;
1629
1630         binder_proc_lock(proc);
1631         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1632         if (!ref)
1633                 goto err_no_ref;
1634         node = ref->node;
1635         /*
1636          * Take an implicit reference on the node to ensure
1637          * it stays alive until the call to binder_put_node()
1638          */
1639         binder_inc_node_tmpref(node);
1640         if (rdata)
1641                 *rdata = ref->data;
1642         binder_proc_unlock(proc);
1643
1644         return node;
1645
1646 err_no_ref:
1647         binder_proc_unlock(proc);
1648         return NULL;
1649 }
1650
1651 /**
1652  * binder_free_ref() - free the binder_ref
1653  * @ref:        ref to free
1654  *
1655  * Free the binder_ref. Free the binder_node indicated by ref->node
1656  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1657  */
1658 static void binder_free_ref(struct binder_ref *ref)
1659 {
1660         if (ref->node)
1661                 binder_free_node(ref->node);
1662         kfree(ref->death);
1663         kfree(ref);
1664 }
1665
1666 /**
1667  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1668  * @proc:       proc containing the ref
1669  * @desc:       the handle associated with the ref
1670  * @increment:  true=inc reference, false=dec reference
1671  * @strong:     true=strong reference, false=weak reference
1672  * @rdata:      the id/refcount data for the ref
1673  *
1674  * Given a proc and ref handle, increment or decrement the ref
1675  * according to "increment" arg.
1676  *
1677  * Return: 0 if successful, else errno
1678  */
1679 static int binder_update_ref_for_handle(struct binder_proc *proc,
1680                 uint32_t desc, bool increment, bool strong,
1681                 struct binder_ref_data *rdata)
1682 {
1683         int ret = 0;
1684         struct binder_ref *ref;
1685         bool delete_ref = false;
1686
1687         binder_proc_lock(proc);
1688         ref = binder_get_ref_olocked(proc, desc, strong);
1689         if (!ref) {
1690                 ret = -EINVAL;
1691                 goto err_no_ref;
1692         }
1693         if (increment)
1694                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1695         else
1696                 delete_ref = binder_dec_ref_olocked(ref, strong);
1697
1698         if (rdata)
1699                 *rdata = ref->data;
1700         binder_proc_unlock(proc);
1701
1702         if (delete_ref)
1703                 binder_free_ref(ref);
1704         return ret;
1705
1706 err_no_ref:
1707         binder_proc_unlock(proc);
1708         return ret;
1709 }
1710
1711 /**
1712  * binder_dec_ref_for_handle() - dec the ref for given handle
1713  * @proc:       proc containing the ref
1714  * @desc:       the handle associated with the ref
1715  * @strong:     true=strong reference, false=weak reference
1716  * @rdata:      the id/refcount data for the ref
1717  *
1718  * Just calls binder_update_ref_for_handle() to decrement the ref.
1719  *
1720  * Return: 0 if successful, else errno
1721  */
1722 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1723                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1724 {
1725         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1726 }
1727
1728
1729 /**
1730  * binder_inc_ref_for_node() - increment the ref for given proc/node
1731  * @proc:        proc containing the ref
1732  * @node:        target node
1733  * @strong:      true=strong reference, false=weak reference
1734  * @target_list: worklist to use if node is incremented
1735  * @rdata:       the id/refcount data for the ref
1736  *
1737  * Given a proc and node, increment the ref. Create the ref if it
1738  * doesn't already exist
1739  *
1740  * Return: 0 if successful, else errno
1741  */
1742 static int binder_inc_ref_for_node(struct binder_proc *proc,
1743                         struct binder_node *node,
1744                         bool strong,
1745                         struct list_head *target_list,
1746                         struct binder_ref_data *rdata)
1747 {
1748         struct binder_ref *ref;
1749         struct binder_ref *new_ref = NULL;
1750         int ret = 0;
1751
1752         binder_proc_lock(proc);
1753         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1754         if (!ref) {
1755                 binder_proc_unlock(proc);
1756                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1757                 if (!new_ref)
1758                         return -ENOMEM;
1759                 binder_proc_lock(proc);
1760                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1761         }
1762         ret = binder_inc_ref_olocked(ref, strong, target_list);
1763         *rdata = ref->data;
1764         binder_proc_unlock(proc);
1765         if (new_ref && ref != new_ref)
1766                 /*
1767                  * Another thread created the ref first so
1768                  * free the one we allocated
1769                  */
1770                 kfree(new_ref);
1771         return ret;
1772 }
1773
1774 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1775                                            struct binder_transaction *t)
1776 {
1777         BUG_ON(!target_thread);
1778         assert_spin_locked(&target_thread->proc->inner_lock);
1779         BUG_ON(target_thread->transaction_stack != t);
1780         BUG_ON(target_thread->transaction_stack->from != target_thread);
1781         target_thread->transaction_stack =
1782                 target_thread->transaction_stack->from_parent;
1783         t->from = NULL;
1784 }
1785
1786 /**
1787  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1788  * @thread:     thread to decrement
1789  *
1790  * A thread needs to be kept alive while being used to create or
1791  * handle a transaction. binder_get_txn_from() is used to safely
1792  * extract t->from from a binder_transaction and keep the thread
1793  * indicated by t->from from being freed. When done with that
1794  * binder_thread, this function is called to decrement the
1795  * tmp_ref and free if appropriate (thread has been released
1796  * and no transaction being processed by the driver)
1797  */
1798 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1799 {
1800         /*
1801          * atomic is used to protect the counter value while
1802          * it cannot reach zero or thread->is_dead is false
1803          */
1804         binder_inner_proc_lock(thread->proc);
1805         atomic_dec(&thread->tmp_ref);
1806         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1807                 binder_inner_proc_unlock(thread->proc);
1808                 binder_free_thread(thread);
1809                 return;
1810         }
1811         binder_inner_proc_unlock(thread->proc);
1812 }
1813
1814 /**
1815  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1816  * @proc:       proc to decrement
1817  *
1818  * A binder_proc needs to be kept alive while being used to create or
1819  * handle a transaction. proc->tmp_ref is incremented when
1820  * creating a new transaction or the binder_proc is currently in-use
1821  * by threads that are being released. When done with the binder_proc,
1822  * this function is called to decrement the counter and free the
1823  * proc if appropriate (proc has been released, all threads have
1824  * been released and not currenly in-use to process a transaction).
1825  */
1826 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827 {
1828         binder_inner_proc_lock(proc);
1829         proc->tmp_ref--;
1830         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831                         !proc->tmp_ref) {
1832                 binder_inner_proc_unlock(proc);
1833                 binder_free_proc(proc);
1834                 return;
1835         }
1836         binder_inner_proc_unlock(proc);
1837 }
1838
1839 /**
1840  * binder_get_txn_from() - safely extract the "from" thread in transaction
1841  * @t:  binder transaction for t->from
1842  *
1843  * Atomically return the "from" thread and increment the tmp_ref
1844  * count for the thread to ensure it stays alive until
1845  * binder_thread_dec_tmpref() is called.
1846  *
1847  * Return: the value of t->from
1848  */
1849 static struct binder_thread *binder_get_txn_from(
1850                 struct binder_transaction *t)
1851 {
1852         struct binder_thread *from;
1853
1854         spin_lock(&t->lock);
1855         from = t->from;
1856         if (from)
1857                 atomic_inc(&from->tmp_ref);
1858         spin_unlock(&t->lock);
1859         return from;
1860 }
1861
1862 /**
1863  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1864  * @t:  binder transaction for t->from
1865  *
1866  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1867  * to guarantee that the thread cannot be released while operating on it.
1868  * The caller must call binder_inner_proc_unlock() to release the inner lock
1869  * as well as call binder_dec_thread_txn() to release the reference.
1870  *
1871  * Return: the value of t->from
1872  */
1873 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1874                 struct binder_transaction *t)
1875 {
1876         struct binder_thread *from;
1877
1878         from = binder_get_txn_from(t);
1879         if (!from)
1880                 return NULL;
1881         binder_inner_proc_lock(from->proc);
1882         if (t->from) {
1883                 BUG_ON(from != t->from);
1884                 return from;
1885         }
1886         binder_inner_proc_unlock(from->proc);
1887         binder_thread_dec_tmpref(from);
1888         return NULL;
1889 }
1890
1891 static void binder_free_transaction(struct binder_transaction *t)
1892 {
1893         if (t->buffer)
1894                 t->buffer->transaction = NULL;
1895         kfree(t);
1896         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1897 }
1898
1899 static void binder_send_failed_reply(struct binder_transaction *t,
1900                                      uint32_t error_code)
1901 {
1902         struct binder_thread *target_thread;
1903         struct binder_transaction *next;
1904
1905         BUG_ON(t->flags & TF_ONE_WAY);
1906         while (1) {
1907                 target_thread = binder_get_txn_from_and_acq_inner(t);
1908                 if (target_thread) {
1909                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1910                                      "send failed reply for transaction %d to %d:%d\n",
1911                                       t->debug_id,
1912                                       target_thread->proc->pid,
1913                                       target_thread->pid);
1914
1915                         binder_pop_transaction_ilocked(target_thread, t);
1916                         if (target_thread->reply_error.cmd == BR_OK) {
1917                                 target_thread->reply_error.cmd = error_code;
1918                                 binder_enqueue_work_ilocked(
1919                                         &target_thread->reply_error.work,
1920                                         &target_thread->todo);
1921                                 wake_up_interruptible(&target_thread->wait);
1922                         } else {
1923                                 WARN(1, "Unexpected reply error: %u\n",
1924                                                 target_thread->reply_error.cmd);
1925                         }
1926                         binder_inner_proc_unlock(target_thread->proc);
1927                         binder_thread_dec_tmpref(target_thread);
1928                         binder_free_transaction(t);
1929                         return;
1930                 }
1931                 next = t->from_parent;
1932
1933                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1934                              "send failed reply for transaction %d, target dead\n",
1935                              t->debug_id);
1936
1937                 binder_free_transaction(t);
1938                 if (next == NULL) {
1939                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1940                                      "reply failed, no target thread at root\n");
1941                         return;
1942                 }
1943                 t = next;
1944                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1945                              "reply failed, no target thread -- retry %d\n",
1946                               t->debug_id);
1947         }
1948 }
1949
1950 /**
1951  * binder_validate_object() - checks for a valid metadata object in a buffer.
1952  * @buffer:     binder_buffer that we're parsing.
1953  * @offset:     offset in the buffer at which to validate an object.
1954  *
1955  * Return:      If there's a valid metadata object at @offset in @buffer, the
1956  *              size of that object. Otherwise, it returns zero.
1957  */
1958 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1959 {
1960         /* Check if we can read a header first */
1961         struct binder_object_header *hdr;
1962         size_t object_size = 0;
1963
1964         if (offset > buffer->data_size - sizeof(*hdr) ||
1965             buffer->data_size < sizeof(*hdr) ||
1966             !IS_ALIGNED(offset, sizeof(u32)))
1967                 return 0;
1968
1969         /* Ok, now see if we can read a complete object. */
1970         hdr = (struct binder_object_header *)(buffer->data + offset);
1971         switch (hdr->type) {
1972         case BINDER_TYPE_BINDER:
1973         case BINDER_TYPE_WEAK_BINDER:
1974         case BINDER_TYPE_HANDLE:
1975         case BINDER_TYPE_WEAK_HANDLE:
1976                 object_size = sizeof(struct flat_binder_object);
1977                 break;
1978         case BINDER_TYPE_FD:
1979                 object_size = sizeof(struct binder_fd_object);
1980                 break;
1981         case BINDER_TYPE_PTR:
1982                 object_size = sizeof(struct binder_buffer_object);
1983                 break;
1984         case BINDER_TYPE_FDA:
1985                 object_size = sizeof(struct binder_fd_array_object);
1986                 break;
1987         default:
1988                 return 0;
1989         }
1990         if (offset <= buffer->data_size - object_size &&
1991             buffer->data_size >= object_size)
1992                 return object_size;
1993         else
1994                 return 0;
1995 }
1996
1997 /**
1998  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1999  * @b:          binder_buffer containing the object
2000  * @index:      index in offset array at which the binder_buffer_object is
2001  *              located
2002  * @start:      points to the start of the offset array
2003  * @num_valid:  the number of valid offsets in the offset array
2004  *
2005  * Return:      If @index is within the valid range of the offset array
2006  *              described by @start and @num_valid, and if there's a valid
2007  *              binder_buffer_object at the offset found in index @index
2008  *              of the offset array, that object is returned. Otherwise,
2009  *              %NULL is returned.
2010  *              Note that the offset found in index @index itself is not
2011  *              verified; this function assumes that @num_valid elements
2012  *              from @start were previously verified to have valid offsets.
2013  */
2014 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2015                                                         binder_size_t index,
2016                                                         binder_size_t *start,
2017                                                         binder_size_t num_valid)
2018 {
2019         struct binder_buffer_object *buffer_obj;
2020         binder_size_t *offp;
2021
2022         if (index >= num_valid)
2023                 return NULL;
2024
2025         offp = start + index;
2026         buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2027         if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2028                 return NULL;
2029
2030         return buffer_obj;
2031 }
2032
2033 /**
2034  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2035  * @b:                  transaction buffer
2036  * @objects_start       start of objects buffer
2037  * @buffer:             binder_buffer_object in which to fix up
2038  * @offset:             start offset in @buffer to fix up
2039  * @last_obj:           last binder_buffer_object that we fixed up in
2040  * @last_min_offset:    minimum fixup offset in @last_obj
2041  *
2042  * Return:              %true if a fixup in buffer @buffer at offset @offset is
2043  *                      allowed.
2044  *
2045  * For safety reasons, we only allow fixups inside a buffer to happen
2046  * at increasing offsets; additionally, we only allow fixup on the last
2047  * buffer object that was verified, or one of its parents.
2048  *
2049  * Example of what is allowed:
2050  *
2051  * A
2052  *   B (parent = A, offset = 0)
2053  *   C (parent = A, offset = 16)
2054  *     D (parent = C, offset = 0)
2055  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2056  *
2057  * Examples of what is not allowed:
2058  *
2059  * Decreasing offsets within the same parent:
2060  * A
2061  *   C (parent = A, offset = 16)
2062  *   B (parent = A, offset = 0) // decreasing offset within A
2063  *
2064  * Referring to a parent that wasn't the last object or any of its parents:
2065  * A
2066  *   B (parent = A, offset = 0)
2067  *   C (parent = A, offset = 0)
2068  *   C (parent = A, offset = 16)
2069  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2070  */
2071 static bool binder_validate_fixup(struct binder_buffer *b,
2072                                   binder_size_t *objects_start,
2073                                   struct binder_buffer_object *buffer,
2074                                   binder_size_t fixup_offset,
2075                                   struct binder_buffer_object *last_obj,
2076                                   binder_size_t last_min_offset)
2077 {
2078         if (!last_obj) {
2079                 /* Nothing to fix up in */
2080                 return false;
2081         }
2082
2083         while (last_obj != buffer) {
2084                 /*
2085                  * Safe to retrieve the parent of last_obj, since it
2086                  * was already previously verified by the driver.
2087                  */
2088                 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2089                         return false;
2090                 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2091                 last_obj = (struct binder_buffer_object *)
2092                         (b->data + *(objects_start + last_obj->parent));
2093         }
2094         return (fixup_offset >= last_min_offset);
2095 }
2096
2097 static void binder_transaction_buffer_release(struct binder_proc *proc,
2098                                               struct binder_buffer *buffer,
2099                                               binder_size_t *failed_at)
2100 {
2101         binder_size_t *offp, *off_start, *off_end;
2102         int debug_id = buffer->debug_id;
2103
2104         binder_debug(BINDER_DEBUG_TRANSACTION,
2105                      "%d buffer release %d, size %zd-%zd, failed at %p\n",
2106                      proc->pid, buffer->debug_id,
2107                      buffer->data_size, buffer->offsets_size, failed_at);
2108
2109         if (buffer->target_node)
2110                 binder_dec_node(buffer->target_node, 1, 0);
2111
2112         off_start = (binder_size_t *)(buffer->data +
2113                                       ALIGN(buffer->data_size, sizeof(void *)));
2114         if (failed_at)
2115                 off_end = failed_at;
2116         else
2117                 off_end = (void *)off_start + buffer->offsets_size;
2118         for (offp = off_start; offp < off_end; offp++) {
2119                 struct binder_object_header *hdr;
2120                 size_t object_size = binder_validate_object(buffer, *offp);
2121
2122                 if (object_size == 0) {
2123                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2124                                debug_id, (u64)*offp, buffer->data_size);
2125                         continue;
2126                 }
2127                 hdr = (struct binder_object_header *)(buffer->data + *offp);
2128                 switch (hdr->type) {
2129                 case BINDER_TYPE_BINDER:
2130                 case BINDER_TYPE_WEAK_BINDER: {
2131                         struct flat_binder_object *fp;
2132                         struct binder_node *node;
2133
2134                         fp = to_flat_binder_object(hdr);
2135                         node = binder_get_node(proc, fp->binder);
2136                         if (node == NULL) {
2137                                 pr_err("transaction release %d bad node %016llx\n",
2138                                        debug_id, (u64)fp->binder);
2139                                 break;
2140                         }
2141                         binder_debug(BINDER_DEBUG_TRANSACTION,
2142                                      "        node %d u%016llx\n",
2143                                      node->debug_id, (u64)node->ptr);
2144                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2145                                         0);
2146                         binder_put_node(node);
2147                 } break;
2148                 case BINDER_TYPE_HANDLE:
2149                 case BINDER_TYPE_WEAK_HANDLE: {
2150                         struct flat_binder_object *fp;
2151                         struct binder_ref_data rdata;
2152                         int ret;
2153
2154                         fp = to_flat_binder_object(hdr);
2155                         ret = binder_dec_ref_for_handle(proc, fp->handle,
2156                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2157
2158                         if (ret) {
2159                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
2160                                  debug_id, fp->handle, ret);
2161                                 break;
2162                         }
2163                         binder_debug(BINDER_DEBUG_TRANSACTION,
2164                                      "        ref %d desc %d\n",
2165                                      rdata.debug_id, rdata.desc);
2166                 } break;
2167
2168                 case BINDER_TYPE_FD: {
2169                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2170
2171                         binder_debug(BINDER_DEBUG_TRANSACTION,
2172                                      "        fd %d\n", fp->fd);
2173                         if (failed_at)
2174                                 task_close_fd(proc, fp->fd);
2175                 } break;
2176                 case BINDER_TYPE_PTR:
2177                         /*
2178                          * Nothing to do here, this will get cleaned up when the
2179                          * transaction buffer gets freed
2180                          */
2181                         break;
2182                 case BINDER_TYPE_FDA: {
2183                         struct binder_fd_array_object *fda;
2184                         struct binder_buffer_object *parent;
2185                         uintptr_t parent_buffer;
2186                         u32 *fd_array;
2187                         size_t fd_index;
2188                         binder_size_t fd_buf_size;
2189
2190                         fda = to_binder_fd_array_object(hdr);
2191                         parent = binder_validate_ptr(buffer, fda->parent,
2192                                                      off_start,
2193                                                      offp - off_start);
2194                         if (!parent) {
2195                                 pr_err("transaction release %d bad parent offset",
2196                                        debug_id);
2197                                 continue;
2198                         }
2199                         /*
2200                          * Since the parent was already fixed up, convert it
2201                          * back to kernel address space to access it
2202                          */
2203                         parent_buffer = parent->buffer -
2204                                 binder_alloc_get_user_buffer_offset(
2205                                                 &proc->alloc);
2206
2207                         fd_buf_size = sizeof(u32) * fda->num_fds;
2208                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2209                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2210                                        debug_id, (u64)fda->num_fds);
2211                                 continue;
2212                         }
2213                         if (fd_buf_size > parent->length ||
2214                             fda->parent_offset > parent->length - fd_buf_size) {
2215                                 /* No space for all file descriptors here. */
2216                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2217                                        debug_id, (u64)fda->num_fds);
2218                                 continue;
2219                         }
2220                         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2221                         for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2222                                 task_close_fd(proc, fd_array[fd_index]);
2223                 } break;
2224                 default:
2225                         pr_err("transaction release %d bad object type %x\n",
2226                                 debug_id, hdr->type);
2227                         break;
2228                 }
2229         }
2230 }
2231
2232 static int binder_translate_binder(struct flat_binder_object *fp,
2233                                    struct binder_transaction *t,
2234                                    struct binder_thread *thread)
2235 {
2236         struct binder_node *node;
2237         struct binder_proc *proc = thread->proc;
2238         struct binder_proc *target_proc = t->to_proc;
2239         struct binder_ref_data rdata;
2240         int ret = 0;
2241
2242         node = binder_get_node(proc, fp->binder);
2243         if (!node) {
2244                 node = binder_new_node(proc, fp);
2245                 if (!node)
2246                         return -ENOMEM;
2247         }
2248         if (fp->cookie != node->cookie) {
2249                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2250                                   proc->pid, thread->pid, (u64)fp->binder,
2251                                   node->debug_id, (u64)fp->cookie,
2252                                   (u64)node->cookie);
2253                 ret = -EINVAL;
2254                 goto done;
2255         }
2256         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2257                 ret = -EPERM;
2258                 goto done;
2259         }
2260
2261         ret = binder_inc_ref_for_node(target_proc, node,
2262                         fp->hdr.type == BINDER_TYPE_BINDER,
2263                         &thread->todo, &rdata);
2264         if (ret)
2265                 goto done;
2266
2267         if (fp->hdr.type == BINDER_TYPE_BINDER)
2268                 fp->hdr.type = BINDER_TYPE_HANDLE;
2269         else
2270                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2271         fp->binder = 0;
2272         fp->handle = rdata.desc;
2273         fp->cookie = 0;
2274
2275         trace_binder_transaction_node_to_ref(t, node, &rdata);
2276         binder_debug(BINDER_DEBUG_TRANSACTION,
2277                      "        node %d u%016llx -> ref %d desc %d\n",
2278                      node->debug_id, (u64)node->ptr,
2279                      rdata.debug_id, rdata.desc);
2280 done:
2281         binder_put_node(node);
2282         return ret;
2283 }
2284
2285 static int binder_translate_handle(struct flat_binder_object *fp,
2286                                    struct binder_transaction *t,
2287                                    struct binder_thread *thread)
2288 {
2289         struct binder_proc *proc = thread->proc;
2290         struct binder_proc *target_proc = t->to_proc;
2291         struct binder_node *node;
2292         struct binder_ref_data src_rdata;
2293         int ret = 0;
2294
2295         node = binder_get_node_from_ref(proc, fp->handle,
2296                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2297         if (!node) {
2298                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2299                                   proc->pid, thread->pid, fp->handle);
2300                 return -EINVAL;
2301         }
2302         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2303                 ret = -EPERM;
2304                 goto done;
2305         }
2306
2307         binder_node_lock(node);
2308         if (node->proc == target_proc) {
2309                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2310                         fp->hdr.type = BINDER_TYPE_BINDER;
2311                 else
2312                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2313                 fp->binder = node->ptr;
2314                 fp->cookie = node->cookie;
2315                 if (node->proc)
2316                         binder_inner_proc_lock(node->proc);
2317                 binder_inc_node_nilocked(node,
2318                                          fp->hdr.type == BINDER_TYPE_BINDER,
2319                                          0, NULL);
2320                 if (node->proc)
2321                         binder_inner_proc_unlock(node->proc);
2322                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2323                 binder_debug(BINDER_DEBUG_TRANSACTION,
2324                              "        ref %d desc %d -> node %d u%016llx\n",
2325                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2326                              (u64)node->ptr);
2327                 binder_node_unlock(node);
2328         } else {
2329                 struct binder_ref_data dest_rdata;
2330
2331                 binder_node_unlock(node);
2332                 ret = binder_inc_ref_for_node(target_proc, node,
2333                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2334                                 NULL, &dest_rdata);
2335                 if (ret)
2336                         goto done;
2337
2338                 fp->binder = 0;
2339                 fp->handle = dest_rdata.desc;
2340                 fp->cookie = 0;
2341                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2342                                                     &dest_rdata);
2343                 binder_debug(BINDER_DEBUG_TRANSACTION,
2344                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2345                              src_rdata.debug_id, src_rdata.desc,
2346                              dest_rdata.debug_id, dest_rdata.desc,
2347                              node->debug_id);
2348         }
2349 done:
2350         binder_put_node(node);
2351         return ret;
2352 }
2353
2354 static int binder_translate_fd(int fd,
2355                                struct binder_transaction *t,
2356                                struct binder_thread *thread,
2357                                struct binder_transaction *in_reply_to)
2358 {
2359         struct binder_proc *proc = thread->proc;
2360         struct binder_proc *target_proc = t->to_proc;
2361         int target_fd;
2362         struct file *file;
2363         int ret;
2364         bool target_allows_fd;
2365
2366         if (in_reply_to)
2367                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2368         else
2369                 target_allows_fd = t->buffer->target_node->accept_fds;
2370         if (!target_allows_fd) {
2371                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2372                                   proc->pid, thread->pid,
2373                                   in_reply_to ? "reply" : "transaction",
2374                                   fd);
2375                 ret = -EPERM;
2376                 goto err_fd_not_accepted;
2377         }
2378
2379         file = fget(fd);
2380         if (!file) {
2381                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2382                                   proc->pid, thread->pid, fd);
2383                 ret = -EBADF;
2384                 goto err_fget;
2385         }
2386         ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2387         if (ret < 0) {
2388                 ret = -EPERM;
2389                 goto err_security;
2390         }
2391
2392         target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2393         if (target_fd < 0) {
2394                 ret = -ENOMEM;
2395                 goto err_get_unused_fd;
2396         }
2397         task_fd_install(target_proc, target_fd, file);
2398         trace_binder_transaction_fd(t, fd, target_fd);
2399         binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
2400                      fd, target_fd);
2401
2402         return target_fd;
2403
2404 err_get_unused_fd:
2405 err_security:
2406         fput(file);
2407 err_fget:
2408 err_fd_not_accepted:
2409         return ret;
2410 }
2411
2412 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2413                                      struct binder_buffer_object *parent,
2414                                      struct binder_transaction *t,
2415                                      struct binder_thread *thread,
2416                                      struct binder_transaction *in_reply_to)
2417 {
2418         binder_size_t fdi, fd_buf_size, num_installed_fds;
2419         int target_fd;
2420         uintptr_t parent_buffer;
2421         u32 *fd_array;
2422         struct binder_proc *proc = thread->proc;
2423         struct binder_proc *target_proc = t->to_proc;
2424
2425         fd_buf_size = sizeof(u32) * fda->num_fds;
2426         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2427                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2428                                   proc->pid, thread->pid, (u64)fda->num_fds);
2429                 return -EINVAL;
2430         }
2431         if (fd_buf_size > parent->length ||
2432             fda->parent_offset > parent->length - fd_buf_size) {
2433                 /* No space for all file descriptors here. */
2434                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2435                                   proc->pid, thread->pid, (u64)fda->num_fds);
2436                 return -EINVAL;
2437         }
2438         /*
2439          * Since the parent was already fixed up, convert it
2440          * back to the kernel address space to access it
2441          */
2442         parent_buffer = parent->buffer -
2443                 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2444         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2445         if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2446                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2447                                   proc->pid, thread->pid);
2448                 return -EINVAL;
2449         }
2450         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2451                 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2452                                                 in_reply_to);
2453                 if (target_fd < 0)
2454                         goto err_translate_fd_failed;
2455                 fd_array[fdi] = target_fd;
2456         }
2457         return 0;
2458
2459 err_translate_fd_failed:
2460         /*
2461          * Failed to allocate fd or security error, free fds
2462          * installed so far.
2463          */
2464         num_installed_fds = fdi;
2465         for (fdi = 0; fdi < num_installed_fds; fdi++)
2466                 task_close_fd(target_proc, fd_array[fdi]);
2467         return target_fd;
2468 }
2469
2470 static int binder_fixup_parent(struct binder_transaction *t,
2471                                struct binder_thread *thread,
2472                                struct binder_buffer_object *bp,
2473                                binder_size_t *off_start,
2474                                binder_size_t num_valid,
2475                                struct binder_buffer_object *last_fixup_obj,
2476                                binder_size_t last_fixup_min_off)
2477 {
2478         struct binder_buffer_object *parent;
2479         u8 *parent_buffer;
2480         struct binder_buffer *b = t->buffer;
2481         struct binder_proc *proc = thread->proc;
2482         struct binder_proc *target_proc = t->to_proc;
2483
2484         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2485                 return 0;
2486
2487         parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2488         if (!parent) {
2489                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2490                                   proc->pid, thread->pid);
2491                 return -EINVAL;
2492         }
2493
2494         if (!binder_validate_fixup(b, off_start,
2495                                    parent, bp->parent_offset,
2496                                    last_fixup_obj,
2497                                    last_fixup_min_off)) {
2498                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2499                                   proc->pid, thread->pid);
2500                 return -EINVAL;
2501         }
2502
2503         if (parent->length < sizeof(binder_uintptr_t) ||
2504             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2505                 /* No space for a pointer here! */
2506                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2507                                   proc->pid, thread->pid);
2508                 return -EINVAL;
2509         }
2510         parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2511                         binder_alloc_get_user_buffer_offset(
2512                                 &target_proc->alloc));
2513         *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2514
2515         return 0;
2516 }
2517
2518 /**
2519  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2520  * @t:          transaction to send
2521  * @proc:       process to send the transaction to
2522  * @thread:     thread in @proc to send the transaction to (may be NULL)
2523  *
2524  * This function queues a transaction to the specified process. It will try
2525  * to find a thread in the target process to handle the transaction and
2526  * wake it up. If no thread is found, the work is queued to the proc
2527  * waitqueue.
2528  *
2529  * If the @thread parameter is not NULL, the transaction is always queued
2530  * to the waitlist of that specific thread.
2531  *
2532  * Return:      true if the transactions was successfully queued
2533  *              false if the target process or thread is dead
2534  */
2535 static bool binder_proc_transaction(struct binder_transaction *t,
2536                                     struct binder_proc *proc,
2537                                     struct binder_thread *thread)
2538 {
2539         struct list_head *target_list = NULL;
2540         struct binder_node *node = t->buffer->target_node;
2541         bool oneway = !!(t->flags & TF_ONE_WAY);
2542         bool wakeup = true;
2543
2544         BUG_ON(!node);
2545         binder_node_lock(node);
2546         if (oneway) {
2547                 BUG_ON(thread);
2548                 if (node->has_async_transaction) {
2549                         target_list = &node->async_todo;
2550                         wakeup = false;
2551                 } else {
2552                         node->has_async_transaction = 1;
2553                 }
2554         }
2555
2556         binder_inner_proc_lock(proc);
2557
2558         if (proc->is_dead || (thread && thread->is_dead)) {
2559                 binder_inner_proc_unlock(proc);
2560                 binder_node_unlock(node);
2561                 return false;
2562         }
2563
2564         if (!thread && !target_list)
2565                 thread = binder_select_thread_ilocked(proc);
2566
2567         if (thread)
2568                 target_list = &thread->todo;
2569         else if (!target_list)
2570                 target_list = &proc->todo;
2571         else
2572                 BUG_ON(target_list != &node->async_todo);
2573
2574         binder_enqueue_work_ilocked(&t->work, target_list);
2575
2576         if (wakeup)
2577                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2578
2579         binder_inner_proc_unlock(proc);
2580         binder_node_unlock(node);
2581
2582         return true;
2583 }
2584
2585 static void binder_transaction(struct binder_proc *proc,
2586                                struct binder_thread *thread,
2587                                struct binder_transaction_data *tr, int reply,
2588                                binder_size_t extra_buffers_size)
2589 {
2590         int ret;
2591         struct binder_transaction *t;
2592         struct binder_work *tcomplete;
2593         binder_size_t *offp, *off_end, *off_start;
2594         binder_size_t off_min;
2595         u8 *sg_bufp, *sg_buf_end;
2596         struct binder_proc *target_proc = NULL;
2597         struct binder_thread *target_thread = NULL;
2598         struct binder_node *target_node = NULL;
2599         struct binder_transaction *in_reply_to = NULL;
2600         struct binder_transaction_log_entry *e;
2601         uint32_t return_error = 0;
2602         uint32_t return_error_param = 0;
2603         uint32_t return_error_line = 0;
2604         struct binder_buffer_object *last_fixup_obj = NULL;
2605         binder_size_t last_fixup_min_off = 0;
2606         struct binder_context *context = proc->context;
2607         int t_debug_id = atomic_inc_return(&binder_last_id);
2608
2609         e = binder_transaction_log_add(&binder_transaction_log);
2610         e->debug_id = t_debug_id;
2611         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2612         e->from_proc = proc->pid;
2613         e->from_thread = thread->pid;
2614         e->target_handle = tr->target.handle;
2615         e->data_size = tr->data_size;
2616         e->offsets_size = tr->offsets_size;
2617         e->context_name = proc->context->name;
2618
2619         if (reply) {
2620                 binder_inner_proc_lock(proc);
2621                 in_reply_to = thread->transaction_stack;
2622                 if (in_reply_to == NULL) {
2623                         binder_inner_proc_unlock(proc);
2624                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2625                                           proc->pid, thread->pid);
2626                         return_error = BR_FAILED_REPLY;
2627                         return_error_param = -EPROTO;
2628                         return_error_line = __LINE__;
2629                         goto err_empty_call_stack;
2630                 }
2631                 if (in_reply_to->to_thread != thread) {
2632                         spin_lock(&in_reply_to->lock);
2633                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2634                                 proc->pid, thread->pid, in_reply_to->debug_id,
2635                                 in_reply_to->to_proc ?
2636                                 in_reply_to->to_proc->pid : 0,
2637                                 in_reply_to->to_thread ?
2638                                 in_reply_to->to_thread->pid : 0);
2639                         spin_unlock(&in_reply_to->lock);
2640                         binder_inner_proc_unlock(proc);
2641                         return_error = BR_FAILED_REPLY;
2642                         return_error_param = -EPROTO;
2643                         return_error_line = __LINE__;
2644                         in_reply_to = NULL;
2645                         goto err_bad_call_stack;
2646                 }
2647                 thread->transaction_stack = in_reply_to->to_parent;
2648                 binder_inner_proc_unlock(proc);
2649                 binder_set_nice(in_reply_to->saved_priority);
2650                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2651                 if (target_thread == NULL) {
2652                         return_error = BR_DEAD_REPLY;
2653                         return_error_line = __LINE__;
2654                         goto err_dead_binder;
2655                 }
2656                 if (target_thread->transaction_stack != in_reply_to) {
2657                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2658                                 proc->pid, thread->pid,
2659                                 target_thread->transaction_stack ?
2660                                 target_thread->transaction_stack->debug_id : 0,
2661                                 in_reply_to->debug_id);
2662                         binder_inner_proc_unlock(target_thread->proc);
2663                         return_error = BR_FAILED_REPLY;
2664                         return_error_param = -EPROTO;
2665                         return_error_line = __LINE__;
2666                         in_reply_to = NULL;
2667                         target_thread = NULL;
2668                         goto err_dead_binder;
2669                 }
2670                 target_proc = target_thread->proc;
2671                 target_proc->tmp_ref++;
2672                 binder_inner_proc_unlock(target_thread->proc);
2673         } else {
2674                 if (tr->target.handle) {
2675                         struct binder_ref *ref;
2676
2677                         /*
2678                          * There must already be a strong ref
2679                          * on this node. If so, do a strong
2680                          * increment on the node to ensure it
2681                          * stays alive until the transaction is
2682                          * done.
2683                          */
2684                         binder_proc_lock(proc);
2685                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2686                                                      true);
2687                         if (ref) {
2688                                 binder_inc_node(ref->node, 1, 0, NULL);
2689                                 target_node = ref->node;
2690                         }
2691                         binder_proc_unlock(proc);
2692                         if (target_node == NULL) {
2693                                 binder_user_error("%d:%d got transaction to invalid handle\n",
2694                                         proc->pid, thread->pid);
2695                                 return_error = BR_FAILED_REPLY;
2696                                 return_error_param = -EINVAL;
2697                                 return_error_line = __LINE__;
2698                                 goto err_invalid_target_handle;
2699                         }
2700                 } else {
2701                         mutex_lock(&context->context_mgr_node_lock);
2702                         target_node = context->binder_context_mgr_node;
2703                         if (target_node == NULL) {
2704                                 return_error = BR_DEAD_REPLY;
2705                                 mutex_unlock(&context->context_mgr_node_lock);
2706                                 return_error_line = __LINE__;
2707                                 goto err_no_context_mgr_node;
2708                         }
2709                         binder_inc_node(target_node, 1, 0, NULL);
2710                         mutex_unlock(&context->context_mgr_node_lock);
2711                 }
2712                 e->to_node = target_node->debug_id;
2713                 binder_node_lock(target_node);
2714                 target_proc = target_node->proc;
2715                 if (target_proc == NULL) {
2716                         binder_node_unlock(target_node);
2717                         return_error = BR_DEAD_REPLY;
2718                         return_error_line = __LINE__;
2719                         goto err_dead_binder;
2720                 }
2721                 binder_inner_proc_lock(target_proc);
2722                 target_proc->tmp_ref++;
2723                 binder_inner_proc_unlock(target_proc);
2724                 binder_node_unlock(target_node);
2725                 if (security_binder_transaction(proc->tsk,
2726                                                 target_proc->tsk) < 0) {
2727                         return_error = BR_FAILED_REPLY;
2728                         return_error_param = -EPERM;
2729                         return_error_line = __LINE__;
2730                         goto err_invalid_target_handle;
2731                 }
2732                 binder_inner_proc_lock(proc);
2733                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2734                         struct binder_transaction *tmp;
2735
2736                         tmp = thread->transaction_stack;
2737                         if (tmp->to_thread != thread) {
2738                                 spin_lock(&tmp->lock);
2739                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2740                                         proc->pid, thread->pid, tmp->debug_id,
2741                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2742                                         tmp->to_thread ?
2743                                         tmp->to_thread->pid : 0);
2744                                 spin_unlock(&tmp->lock);
2745                                 binder_inner_proc_unlock(proc);
2746                                 return_error = BR_FAILED_REPLY;
2747                                 return_error_param = -EPROTO;
2748                                 return_error_line = __LINE__;
2749                                 goto err_bad_call_stack;
2750                         }
2751                         while (tmp) {
2752                                 struct binder_thread *from;
2753
2754                                 spin_lock(&tmp->lock);
2755                                 from = tmp->from;
2756                                 if (from && from->proc == target_proc) {
2757                                         atomic_inc(&from->tmp_ref);
2758                                         target_thread = from;
2759                                         spin_unlock(&tmp->lock);
2760                                         break;
2761                                 }
2762                                 spin_unlock(&tmp->lock);
2763                                 tmp = tmp->from_parent;
2764                         }
2765                 }
2766                 binder_inner_proc_unlock(proc);
2767         }
2768         if (target_thread)
2769                 e->to_thread = target_thread->pid;
2770         e->to_proc = target_proc->pid;
2771
2772         /* TODO: reuse incoming transaction for reply */
2773         t = kzalloc(sizeof(*t), GFP_KERNEL);
2774         if (t == NULL) {
2775                 return_error = BR_FAILED_REPLY;
2776                 return_error_param = -ENOMEM;
2777                 return_error_line = __LINE__;
2778                 goto err_alloc_t_failed;
2779         }
2780         binder_stats_created(BINDER_STAT_TRANSACTION);
2781         spin_lock_init(&t->lock);
2782
2783         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2784         if (tcomplete == NULL) {
2785                 return_error = BR_FAILED_REPLY;
2786                 return_error_param = -ENOMEM;
2787                 return_error_line = __LINE__;
2788                 goto err_alloc_tcomplete_failed;
2789         }
2790         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2791
2792         t->debug_id = t_debug_id;
2793
2794         if (reply)
2795                 binder_debug(BINDER_DEBUG_TRANSACTION,
2796                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2797                              proc->pid, thread->pid, t->debug_id,
2798                              target_proc->pid, target_thread->pid,
2799                              (u64)tr->data.ptr.buffer,
2800                              (u64)tr->data.ptr.offsets,
2801                              (u64)tr->data_size, (u64)tr->offsets_size,
2802                              (u64)extra_buffers_size);
2803         else
2804                 binder_debug(BINDER_DEBUG_TRANSACTION,
2805                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2806                              proc->pid, thread->pid, t->debug_id,
2807                              target_proc->pid, target_node->debug_id,
2808                              (u64)tr->data.ptr.buffer,
2809                              (u64)tr->data.ptr.offsets,
2810                              (u64)tr->data_size, (u64)tr->offsets_size,
2811                              (u64)extra_buffers_size);
2812
2813         if (!reply && !(tr->flags & TF_ONE_WAY))
2814                 t->from = thread;
2815         else
2816                 t->from = NULL;
2817         t->sender_euid = task_euid(proc->tsk);
2818         t->to_proc = target_proc;
2819         t->to_thread = target_thread;
2820         t->code = tr->code;
2821         t->flags = tr->flags;
2822         t->priority = task_nice(current);
2823
2824         trace_binder_transaction(reply, t, target_node);
2825
2826         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2827                 tr->offsets_size, extra_buffers_size,
2828                 !reply && (t->flags & TF_ONE_WAY));
2829         if (IS_ERR(t->buffer)) {
2830                 /*
2831                  * -ESRCH indicates VMA cleared. The target is dying.
2832                  */
2833                 return_error_param = PTR_ERR(t->buffer);
2834                 return_error = return_error_param == -ESRCH ?
2835                         BR_DEAD_REPLY : BR_FAILED_REPLY;
2836                 return_error_line = __LINE__;
2837                 t->buffer = NULL;
2838                 goto err_binder_alloc_buf_failed;
2839         }
2840         t->buffer->allow_user_free = 0;
2841         t->buffer->debug_id = t->debug_id;
2842         t->buffer->transaction = t;
2843         t->buffer->target_node = target_node;
2844         trace_binder_transaction_alloc_buf(t->buffer);
2845         off_start = (binder_size_t *)(t->buffer->data +
2846                                       ALIGN(tr->data_size, sizeof(void *)));
2847         offp = off_start;
2848
2849         if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2850                            tr->data.ptr.buffer, tr->data_size)) {
2851                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2852                                 proc->pid, thread->pid);
2853                 return_error = BR_FAILED_REPLY;
2854                 return_error_param = -EFAULT;
2855                 return_error_line = __LINE__;
2856                 goto err_copy_data_failed;
2857         }
2858         if (copy_from_user(offp, (const void __user *)(uintptr_t)
2859                            tr->data.ptr.offsets, tr->offsets_size)) {
2860                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2861                                 proc->pid, thread->pid);
2862                 return_error = BR_FAILED_REPLY;
2863                 return_error_param = -EFAULT;
2864                 return_error_line = __LINE__;
2865                 goto err_copy_data_failed;
2866         }
2867         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2868                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2869                                 proc->pid, thread->pid, (u64)tr->offsets_size);
2870                 return_error = BR_FAILED_REPLY;
2871                 return_error_param = -EINVAL;
2872                 return_error_line = __LINE__;
2873                 goto err_bad_offset;
2874         }
2875         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2876                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2877                                   proc->pid, thread->pid,
2878                                   (u64)extra_buffers_size);
2879                 return_error = BR_FAILED_REPLY;
2880                 return_error_param = -EINVAL;
2881                 return_error_line = __LINE__;
2882                 goto err_bad_offset;
2883         }
2884         off_end = (void *)off_start + tr->offsets_size;
2885         sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2886         sg_buf_end = sg_bufp + extra_buffers_size;
2887         off_min = 0;
2888         for (; offp < off_end; offp++) {
2889                 struct binder_object_header *hdr;
2890                 size_t object_size = binder_validate_object(t->buffer, *offp);
2891
2892                 if (object_size == 0 || *offp < off_min) {
2893                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2894                                           proc->pid, thread->pid, (u64)*offp,
2895                                           (u64)off_min,
2896                                           (u64)t->buffer->data_size);
2897                         return_error = BR_FAILED_REPLY;
2898                         return_error_param = -EINVAL;
2899                         return_error_line = __LINE__;
2900                         goto err_bad_offset;
2901                 }
2902
2903                 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2904                 off_min = *offp + object_size;
2905                 switch (hdr->type) {
2906                 case BINDER_TYPE_BINDER:
2907                 case BINDER_TYPE_WEAK_BINDER: {
2908                         struct flat_binder_object *fp;
2909
2910                         fp = to_flat_binder_object(hdr);
2911                         ret = binder_translate_binder(fp, t, thread);
2912                         if (ret < 0) {
2913                                 return_error = BR_FAILED_REPLY;
2914                                 return_error_param = ret;
2915                                 return_error_line = __LINE__;
2916                                 goto err_translate_failed;
2917                         }
2918                 } break;
2919                 case BINDER_TYPE_HANDLE:
2920                 case BINDER_TYPE_WEAK_HANDLE: {
2921                         struct flat_binder_object *fp;
2922
2923                         fp = to_flat_binder_object(hdr);
2924                         ret = binder_translate_handle(fp, t, thread);
2925                         if (ret < 0) {
2926                                 return_error = BR_FAILED_REPLY;
2927                                 return_error_param = ret;
2928                                 return_error_line = __LINE__;
2929                                 goto err_translate_failed;
2930                         }
2931                 } break;
2932
2933                 case BINDER_TYPE_FD: {
2934                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2935                         int target_fd = binder_translate_fd(fp->fd, t, thread,
2936                                                             in_reply_to);
2937
2938                         if (target_fd < 0) {
2939                                 return_error = BR_FAILED_REPLY;
2940                                 return_error_param = target_fd;
2941                                 return_error_line = __LINE__;
2942                                 goto err_translate_failed;
2943                         }
2944                         fp->pad_binder = 0;
2945                         fp->fd = target_fd;
2946                 } break;
2947                 case BINDER_TYPE_FDA: {
2948                         struct binder_fd_array_object *fda =
2949                                 to_binder_fd_array_object(hdr);
2950                         struct binder_buffer_object *parent =
2951                                 binder_validate_ptr(t->buffer, fda->parent,
2952                                                     off_start,
2953                                                     offp - off_start);
2954                         if (!parent) {
2955                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2956                                                   proc->pid, thread->pid);
2957                                 return_error = BR_FAILED_REPLY;
2958                                 return_error_param = -EINVAL;
2959                                 return_error_line = __LINE__;
2960                                 goto err_bad_parent;
2961                         }
2962                         if (!binder_validate_fixup(t->buffer, off_start,
2963                                                    parent, fda->parent_offset,
2964                                                    last_fixup_obj,
2965                                                    last_fixup_min_off)) {
2966                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2967                                                   proc->pid, thread->pid);
2968                                 return_error = BR_FAILED_REPLY;
2969                                 return_error_param = -EINVAL;
2970                                 return_error_line = __LINE__;
2971                                 goto err_bad_parent;
2972                         }
2973                         ret = binder_translate_fd_array(fda, parent, t, thread,
2974                                                         in_reply_to);
2975                         if (ret < 0) {
2976                                 return_error = BR_FAILED_REPLY;
2977                                 return_error_param = ret;
2978                                 return_error_line = __LINE__;
2979                                 goto err_translate_failed;
2980                         }
2981                         last_fixup_obj = parent;
2982                         last_fixup_min_off =
2983                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
2984                 } break;
2985                 case BINDER_TYPE_PTR: {
2986                         struct binder_buffer_object *bp =
2987                                 to_binder_buffer_object(hdr);
2988                         size_t buf_left = sg_buf_end - sg_bufp;
2989
2990                         if (bp->length > buf_left) {
2991                                 binder_user_error("%d:%d got transaction with too large buffer\n",
2992                                                   proc->pid, thread->pid);
2993                                 return_error = BR_FAILED_REPLY;
2994                                 return_error_param = -EINVAL;
2995                                 return_error_line = __LINE__;
2996                                 goto err_bad_offset;
2997                         }
2998                         if (copy_from_user(sg_bufp,
2999                                            (const void __user *)(uintptr_t)
3000                                            bp->buffer, bp->length)) {
3001                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3002                                                   proc->pid, thread->pid);
3003                                 return_error_param = -EFAULT;
3004                                 return_error = BR_FAILED_REPLY;
3005                                 return_error_line = __LINE__;
3006                                 goto err_copy_data_failed;
3007                         }
3008                         /* Fixup buffer pointer to target proc address space */
3009                         bp->buffer = (uintptr_t)sg_bufp +
3010                                 binder_alloc_get_user_buffer_offset(
3011                                                 &target_proc->alloc);
3012                         sg_bufp += ALIGN(bp->length, sizeof(u64));
3013
3014                         ret = binder_fixup_parent(t, thread, bp, off_start,
3015                                                   offp - off_start,
3016                                                   last_fixup_obj,
3017                                                   last_fixup_min_off);
3018                         if (ret < 0) {
3019                                 return_error = BR_FAILED_REPLY;
3020                                 return_error_param = ret;
3021                                 return_error_line = __LINE__;
3022                                 goto err_translate_failed;
3023                         }
3024                         last_fixup_obj = bp;
3025                         last_fixup_min_off = 0;
3026                 } break;
3027                 default:
3028                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3029                                 proc->pid, thread->pid, hdr->type);
3030                         return_error = BR_FAILED_REPLY;
3031                         return_error_param = -EINVAL;
3032                         return_error_line = __LINE__;
3033                         goto err_bad_object_type;
3034                 }
3035         }
3036         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3037         binder_enqueue_work(proc, tcomplete, &thread->todo);
3038         t->work.type = BINDER_WORK_TRANSACTION;
3039
3040         if (reply) {
3041                 binder_inner_proc_lock(target_proc);
3042                 if (target_thread->is_dead) {
3043                         binder_inner_proc_unlock(target_proc);
3044                         goto err_dead_proc_or_thread;
3045                 }
3046                 BUG_ON(t->buffer->async_transaction != 0);
3047                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3048                 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
3049                 binder_inner_proc_unlock(target_proc);
3050                 wake_up_interruptible_sync(&target_thread->wait);
3051                 binder_free_transaction(in_reply_to);
3052         } else if (!(t->flags & TF_ONE_WAY)) {
3053                 BUG_ON(t->buffer->async_transaction != 0);
3054                 binder_inner_proc_lock(proc);
3055                 t->need_reply = 1;
3056                 t->from_parent = thread->transaction_stack;
3057                 thread->transaction_stack = t;
3058                 binder_inner_proc_unlock(proc);
3059                 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3060                         binder_inner_proc_lock(proc);
3061                         binder_pop_transaction_ilocked(thread, t);
3062                         binder_inner_proc_unlock(proc);
3063                         goto err_dead_proc_or_thread;
3064                 }
3065         } else {
3066                 BUG_ON(target_node == NULL);
3067                 BUG_ON(t->buffer->async_transaction != 1);
3068                 if (!binder_proc_transaction(t, target_proc, NULL))
3069                         goto err_dead_proc_or_thread;
3070         }
3071         if (target_thread)
3072                 binder_thread_dec_tmpref(target_thread);
3073         binder_proc_dec_tmpref(target_proc);
3074         /*
3075          * write barrier to synchronize with initialization
3076          * of log entry
3077          */
3078         smp_wmb();
3079         WRITE_ONCE(e->debug_id_done, t_debug_id);
3080         return;
3081
3082 err_dead_proc_or_thread:
3083         return_error = BR_DEAD_REPLY;
3084         return_error_line = __LINE__;
3085         binder_dequeue_work(proc, tcomplete);
3086 err_translate_failed:
3087 err_bad_object_type:
3088 err_bad_offset:
3089 err_bad_parent:
3090 err_copy_data_failed:
3091         trace_binder_transaction_failed_buffer_release(t->buffer);
3092         binder_transaction_buffer_release(target_proc, t->buffer, offp);
3093         target_node = NULL;
3094         t->buffer->transaction = NULL;
3095         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3096 err_binder_alloc_buf_failed:
3097         kfree(tcomplete);
3098         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3099 err_alloc_tcomplete_failed:
3100         kfree(t);
3101         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3102 err_alloc_t_failed:
3103 err_bad_call_stack:
3104 err_empty_call_stack:
3105 err_dead_binder:
3106 err_invalid_target_handle:
3107 err_no_context_mgr_node:
3108         if (target_thread)
3109                 binder_thread_dec_tmpref(target_thread);
3110         if (target_proc)
3111                 binder_proc_dec_tmpref(target_proc);
3112         if (target_node)
3113                 binder_dec_node(target_node, 1, 0);
3114
3115         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3116                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3117                      proc->pid, thread->pid, return_error, return_error_param,
3118                      (u64)tr->data_size, (u64)tr->offsets_size,
3119                      return_error_line);
3120
3121         {
3122                 struct binder_transaction_log_entry *fe;
3123
3124                 e->return_error = return_error;
3125                 e->return_error_param = return_error_param;
3126                 e->return_error_line = return_error_line;
3127                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3128                 *fe = *e;
3129                 /*
3130                  * write barrier to synchronize with initialization
3131                  * of log entry
3132                  */
3133                 smp_wmb();
3134                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3135                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3136         }
3137
3138         BUG_ON(thread->return_error.cmd != BR_OK);
3139         if (in_reply_to) {
3140                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3141                 binder_enqueue_work(thread->proc,
3142                                     &thread->return_error.work,
3143                                     &thread->todo);
3144                 binder_send_failed_reply(in_reply_to, return_error);
3145         } else {
3146                 thread->return_error.cmd = return_error;
3147                 binder_enqueue_work(thread->proc,
3148                                     &thread->return_error.work,
3149                                     &thread->todo);
3150         }
3151 }
3152
3153 static int binder_thread_write(struct binder_proc *proc,
3154                         struct binder_thread *thread,
3155                         binder_uintptr_t binder_buffer, size_t size,
3156                         binder_size_t *consumed)
3157 {
3158         uint32_t cmd;
3159         struct binder_context *context = proc->context;
3160         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3161         void __user *ptr = buffer + *consumed;
3162         void __user *end = buffer + size;
3163
3164         while (ptr < end && thread->return_error.cmd == BR_OK) {
3165                 int ret;
3166
3167                 if (get_user(cmd, (uint32_t __user *)ptr))
3168                         return -EFAULT;
3169                 ptr += sizeof(uint32_t);
3170                 trace_binder_command(cmd);
3171                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3172                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3173                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3174                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3175                 }
3176                 switch (cmd) {
3177                 case BC_INCREFS:
3178                 case BC_ACQUIRE:
3179                 case BC_RELEASE:
3180                 case BC_DECREFS: {
3181                         uint32_t target;
3182                         const char *debug_string;
3183                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3184                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3185                         struct binder_ref_data rdata;
3186
3187                         if (get_user(target, (uint32_t __user *)ptr))
3188                                 return -EFAULT;
3189
3190                         ptr += sizeof(uint32_t);
3191                         ret = -1;
3192                         if (increment && !target) {
3193                                 struct binder_node *ctx_mgr_node;
3194                                 mutex_lock(&context->context_mgr_node_lock);
3195                                 ctx_mgr_node = context->binder_context_mgr_node;
3196                                 if (ctx_mgr_node)
3197                                         ret = binder_inc_ref_for_node(
3198                                                         proc, ctx_mgr_node,
3199                                                         strong, NULL, &rdata);
3200                                 mutex_unlock(&context->context_mgr_node_lock);
3201                         }
3202                         if (ret)
3203                                 ret = binder_update_ref_for_handle(
3204                                                 proc, target, increment, strong,
3205                                                 &rdata);
3206                         if (!ret && rdata.desc != target) {
3207                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3208                                         proc->pid, thread->pid,
3209                                         target, rdata.desc);
3210                         }
3211                         switch (cmd) {
3212                         case BC_INCREFS:
3213                                 debug_string = "IncRefs";
3214                                 break;
3215                         case BC_ACQUIRE:
3216                                 debug_string = "Acquire";
3217                                 break;
3218                         case BC_RELEASE:
3219                                 debug_string = "Release";
3220                                 break;
3221                         case BC_DECREFS:
3222                         default:
3223                                 debug_string = "DecRefs";
3224                                 break;
3225                         }
3226                         if (ret) {
3227                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3228                                         proc->pid, thread->pid, debug_string,
3229                                         strong, target, ret);
3230                                 break;
3231                         }
3232                         binder_debug(BINDER_DEBUG_USER_REFS,
3233                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3234                                      proc->pid, thread->pid, debug_string,
3235                                      rdata.debug_id, rdata.desc, rdata.strong,
3236                                      rdata.weak);
3237                         break;
3238                 }
3239                 case BC_INCREFS_DONE:
3240                 case BC_ACQUIRE_DONE: {
3241                         binder_uintptr_t node_ptr;
3242                         binder_uintptr_t cookie;
3243                         struct binder_node *node;
3244                         bool free_node;
3245
3246                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3247                                 return -EFAULT;
3248                         ptr += sizeof(binder_uintptr_t);
3249                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3250                                 return -EFAULT;
3251                         ptr += sizeof(binder_uintptr_t);
3252                         node = binder_get_node(proc, node_ptr);
3253                         if (node == NULL) {
3254                                 binder_user_error("%d:%d %s u%016llx no match\n",
3255                                         proc->pid, thread->pid,
3256                                         cmd == BC_INCREFS_DONE ?
3257                                         "BC_INCREFS_DONE" :
3258                                         "BC_ACQUIRE_DONE",
3259                                         (u64)node_ptr);
3260                                 break;
3261                         }
3262                         if (cookie != node->cookie) {
3263                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3264                                         proc->pid, thread->pid,
3265                                         cmd == BC_INCREFS_DONE ?
3266                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3267                                         (u64)node_ptr, node->debug_id,
3268                                         (u64)cookie, (u64)node->cookie);
3269                                 binder_put_node(node);
3270                                 break;
3271                         }
3272                         binder_node_inner_lock(node);
3273                         if (cmd == BC_ACQUIRE_DONE) {
3274                                 if (node->pending_strong_ref == 0) {
3275                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3276                                                 proc->pid, thread->pid,
3277                                                 node->debug_id);
3278                                         binder_node_inner_unlock(node);
3279                                         binder_put_node(node);
3280                                         break;
3281                                 }
3282                                 node->pending_strong_ref = 0;
3283                         } else {
3284                                 if (node->pending_weak_ref == 0) {
3285                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3286                                                 proc->pid, thread->pid,
3287                                                 node->debug_id);
3288                                         binder_node_inner_unlock(node);
3289                                         binder_put_node(node);
3290                                         break;
3291                                 }
3292                                 node->pending_weak_ref = 0;
3293                         }
3294                         free_node = binder_dec_node_nilocked(node,
3295                                         cmd == BC_ACQUIRE_DONE, 0);
3296                         WARN_ON(free_node);
3297                         binder_debug(BINDER_DEBUG_USER_REFS,
3298                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3299                                      proc->pid, thread->pid,
3300                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3301                                      node->debug_id, node->local_strong_refs,
3302                                      node->local_weak_refs, node->tmp_refs);
3303                         binder_node_inner_unlock(node);
3304                         binder_put_node(node);
3305                         break;
3306                 }
3307                 case BC_ATTEMPT_ACQUIRE:
3308                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3309                         return -EINVAL;
3310                 case BC_ACQUIRE_RESULT:
3311                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3312                         return -EINVAL;
3313
3314                 case BC_FREE_BUFFER: {
3315                         binder_uintptr_t data_ptr;
3316                         struct binder_buffer *buffer;
3317
3318                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3319                                 return -EFAULT;
3320                         ptr += sizeof(binder_uintptr_t);
3321
3322                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3323                                                               data_ptr);
3324                         if (buffer == NULL) {
3325                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3326                                         proc->pid, thread->pid, (u64)data_ptr);
3327                                 break;
3328                         }
3329                         if (!buffer->allow_user_free) {
3330                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3331                                         proc->pid, thread->pid, (u64)data_ptr);
3332                                 break;
3333                         }
3334                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3335                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3336                                      proc->pid, thread->pid, (u64)data_ptr,
3337                                      buffer->debug_id,
3338                                      buffer->transaction ? "active" : "finished");
3339
3340                         if (buffer->transaction) {
3341                                 buffer->transaction->buffer = NULL;
3342                                 buffer->transaction = NULL;
3343                         }
3344                         if (buffer->async_transaction && buffer->target_node) {
3345                                 struct binder_node *buf_node;
3346                                 struct binder_work *w;
3347
3348                                 buf_node = buffer->target_node;
3349                                 binder_node_inner_lock(buf_node);
3350                                 BUG_ON(!buf_node->has_async_transaction);
3351                                 BUG_ON(buf_node->proc != proc);
3352                                 w = binder_dequeue_work_head_ilocked(
3353                                                 &buf_node->async_todo);
3354                                 if (!w) {
3355                                         buf_node->has_async_transaction = 0;
3356                                 } else {
3357                                         binder_enqueue_work_ilocked(
3358                                                         w, &proc->todo);
3359                                         binder_wakeup_proc_ilocked(proc);
3360                                 }
3361                                 binder_node_inner_unlock(buf_node);
3362                         }
3363                         trace_binder_transaction_buffer_release(buffer);
3364                         binder_transaction_buffer_release(proc, buffer, NULL);
3365                         binder_alloc_free_buf(&proc->alloc, buffer);
3366                         break;
3367                 }
3368
3369                 case BC_TRANSACTION_SG:
3370                 case BC_REPLY_SG: {
3371                         struct binder_transaction_data_sg tr;
3372
3373                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3374                                 return -EFAULT;
3375                         ptr += sizeof(tr);
3376                         binder_transaction(proc, thread, &tr.transaction_data,
3377                                            cmd == BC_REPLY_SG, tr.buffers_size);
3378                         break;
3379                 }
3380                 case BC_TRANSACTION:
3381                 case BC_REPLY: {
3382                         struct binder_transaction_data tr;
3383
3384                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3385                                 return -EFAULT;
3386                         ptr += sizeof(tr);
3387                         binder_transaction(proc, thread, &tr,
3388                                            cmd == BC_REPLY, 0);
3389                         break;
3390                 }
3391
3392                 case BC_REGISTER_LOOPER:
3393                         binder_debug(BINDER_DEBUG_THREADS,
3394                                      "%d:%d BC_REGISTER_LOOPER\n",
3395                                      proc->pid, thread->pid);
3396                         binder_inner_proc_lock(proc);
3397                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3398                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3399                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3400                                         proc->pid, thread->pid);
3401                         } else if (proc->requested_threads == 0) {
3402                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3403                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3404                                         proc->pid, thread->pid);
3405                         } else {
3406                                 proc->requested_threads--;
3407                                 proc->requested_threads_started++;
3408                         }
3409                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3410                         binder_inner_proc_unlock(proc);
3411                         break;
3412                 case BC_ENTER_LOOPER:
3413                         binder_debug(BINDER_DEBUG_THREADS,
3414                                      "%d:%d BC_ENTER_LOOPER\n",
3415                                      proc->pid, thread->pid);
3416                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3417                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3418                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3419                                         proc->pid, thread->pid);
3420                         }
3421                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3422                         break;
3423                 case BC_EXIT_LOOPER:
3424                         binder_debug(BINDER_DEBUG_THREADS,
3425                                      "%d:%d BC_EXIT_LOOPER\n",
3426                                      proc->pid, thread->pid);
3427                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3428                         break;
3429
3430                 case BC_REQUEST_DEATH_NOTIFICATION:
3431                 case BC_CLEAR_DEATH_NOTIFICATION: {
3432                         uint32_t target;
3433                         binder_uintptr_t cookie;
3434                         struct binder_ref *ref;
3435                         struct binder_ref_death *death = NULL;
3436
3437                         if (get_user(target, (uint32_t __user *)ptr))
3438                                 return -EFAULT;
3439                         ptr += sizeof(uint32_t);
3440                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3441                                 return -EFAULT;
3442                         ptr += sizeof(binder_uintptr_t);
3443                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3444                                 /*
3445                                  * Allocate memory for death notification
3446                                  * before taking lock
3447                                  */
3448                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3449                                 if (death == NULL) {
3450                                         WARN_ON(thread->return_error.cmd !=
3451                                                 BR_OK);
3452                                         thread->return_error.cmd = BR_ERROR;
3453                                         binder_enqueue_work(
3454                                                 thread->proc,
3455                                                 &thread->return_error.work,
3456                                                 &thread->todo);
3457                                         binder_debug(
3458                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3459                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3460                                                 proc->pid, thread->pid);
3461                                         break;
3462                                 }
3463                         }
3464                         binder_proc_lock(proc);
3465                         ref = binder_get_ref_olocked(proc, target, false);
3466                         if (ref == NULL) {
3467                                 binder_user_error("%d:%d %s invalid ref %d\n",
3468                                         proc->pid, thread->pid,
3469                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3470                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3471                                         "BC_CLEAR_DEATH_NOTIFICATION",
3472                                         target);
3473                                 binder_proc_unlock(proc);
3474                                 kfree(death);
3475                                 break;
3476                         }
3477
3478                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3479                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3480                                      proc->pid, thread->pid,
3481                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3482                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3483                                      "BC_CLEAR_DEATH_NOTIFICATION",
3484                                      (u64)cookie, ref->data.debug_id,
3485                                      ref->data.desc, ref->data.strong,
3486                                      ref->data.weak, ref->node->debug_id);
3487
3488                         binder_node_lock(ref->node);
3489                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3490                                 if (ref->death) {
3491                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3492                                                 proc->pid, thread->pid);
3493                                         binder_node_unlock(ref->node);
3494                                         binder_proc_unlock(proc);
3495                                         kfree(death);
3496                                         break;
3497                                 }
3498                                 binder_stats_created(BINDER_STAT_DEATH);
3499                                 INIT_LIST_HEAD(&death->work.entry);
3500                                 death->cookie = cookie;
3501                                 ref->death = death;
3502                                 if (ref->node->proc == NULL) {
3503                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3504
3505                                         binder_inner_proc_lock(proc);
3506                                         binder_enqueue_work_ilocked(
3507                                                 &ref->death->work, &proc->todo);
3508                                         binder_wakeup_proc_ilocked(proc);
3509                                         binder_inner_proc_unlock(proc);
3510                                 }
3511                         } else {
3512                                 if (ref->death == NULL) {
3513                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3514                                                 proc->pid, thread->pid);
3515                                         binder_node_unlock(ref->node);
3516                                         binder_proc_unlock(proc);
3517                                         break;
3518                                 }
3519                                 death = ref->death;
3520                                 if (death->cookie != cookie) {
3521                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3522                                                 proc->pid, thread->pid,
3523                                                 (u64)death->cookie,
3524                                                 (u64)cookie);
3525                                         binder_node_unlock(ref->node);
3526                                         binder_proc_unlock(proc);
3527                                         break;
3528                                 }
3529                                 ref->death = NULL;
3530                                 binder_inner_proc_lock(proc);
3531                                 if (list_empty(&death->work.entry)) {
3532                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3533                                         if (thread->looper &
3534                                             (BINDER_LOOPER_STATE_REGISTERED |
3535                                              BINDER_LOOPER_STATE_ENTERED))
3536                                                 binder_enqueue_work_ilocked(
3537                                                                 &death->work,
3538                                                                 &thread->todo);
3539                                         else {
3540                                                 binder_enqueue_work_ilocked(
3541                                                                 &death->work,
3542                                                                 &proc->todo);
3543                                                 binder_wakeup_proc_ilocked(
3544                                                                 proc);
3545                                         }
3546                                 } else {
3547                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3548                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3549                                 }
3550                                 binder_inner_proc_unlock(proc);
3551                         }
3552                         binder_node_unlock(ref->node);
3553                         binder_proc_unlock(proc);
3554                 } break;
3555                 case BC_DEAD_BINDER_DONE: {
3556                         struct binder_work *w;
3557                         binder_uintptr_t cookie;
3558                         struct binder_ref_death *death = NULL;
3559
3560                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3561                                 return -EFAULT;
3562
3563                         ptr += sizeof(cookie);
3564                         binder_inner_proc_lock(proc);
3565                         list_for_each_entry(w, &proc->delivered_death,
3566                                             entry) {
3567                                 struct binder_ref_death *tmp_death =
3568                                         container_of(w,
3569                                                      struct binder_ref_death,
3570                                                      work);
3571
3572                                 if (tmp_death->cookie == cookie) {
3573                                         death = tmp_death;
3574                                         break;
3575                                 }
3576                         }
3577                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3578                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3579                                      proc->pid, thread->pid, (u64)cookie,
3580                                      death);
3581                         if (death == NULL) {
3582                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3583                                         proc->pid, thread->pid, (u64)cookie);
3584                                 binder_inner_proc_unlock(proc);
3585                                 break;
3586                         }
3587                         binder_dequeue_work_ilocked(&death->work);
3588                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3589                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3590                                 if (thread->looper &
3591                                         (BINDER_LOOPER_STATE_REGISTERED |
3592                                          BINDER_LOOPER_STATE_ENTERED))
3593                                         binder_enqueue_work_ilocked(
3594                                                 &death->work, &thread->todo);
3595                                 else {
3596                                         binder_enqueue_work_ilocked(
3597                                                         &death->work,
3598                                                         &proc->todo);
3599                                         binder_wakeup_proc_ilocked(proc);
3600                                 }
3601                         }
3602                         binder_inner_proc_unlock(proc);
3603                 } break;
3604
3605                 default:
3606                         pr_err("%d:%d unknown command %d\n",
3607                                proc->pid, thread->pid, cmd);
3608                         return -EINVAL;
3609                 }
3610                 *consumed = ptr - buffer;
3611         }
3612         return 0;
3613 }
3614
3615 static void binder_stat_br(struct binder_proc *proc,
3616                            struct binder_thread *thread, uint32_t cmd)
3617 {
3618         trace_binder_return(cmd);
3619         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3620                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3621                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3622                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3623         }
3624 }
3625
3626 static int binder_has_thread_work(struct binder_thread *thread)
3627 {
3628         return !binder_worklist_empty(thread->proc, &thread->todo) ||
3629                 thread->looper_need_return;
3630 }
3631
3632 static int binder_put_node_cmd(struct binder_proc *proc,
3633                                struct binder_thread *thread,
3634                                void __user **ptrp,
3635                                binder_uintptr_t node_ptr,
3636                                binder_uintptr_t node_cookie,
3637                                int node_debug_id,
3638                                uint32_t cmd, const char *cmd_name)
3639 {
3640         void __user *ptr = *ptrp;
3641
3642         if (put_user(cmd, (uint32_t __user *)ptr))
3643                 return -EFAULT;
3644         ptr += sizeof(uint32_t);
3645
3646         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3647                 return -EFAULT;
3648         ptr += sizeof(binder_uintptr_t);
3649
3650         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3651                 return -EFAULT;
3652         ptr += sizeof(binder_uintptr_t);
3653
3654         binder_stat_br(proc, thread, cmd);
3655         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3656                      proc->pid, thread->pid, cmd_name, node_debug_id,
3657                      (u64)node_ptr, (u64)node_cookie);
3658
3659         *ptrp = ptr;
3660         return 0;
3661 }
3662
3663 static int binder_wait_for_work(struct binder_thread *thread,
3664                                 bool do_proc_work)
3665 {
3666         DEFINE_WAIT(wait);
3667         struct binder_proc *proc = thread->proc;
3668         int ret = 0;
3669
3670         freezer_do_not_count();
3671         binder_inner_proc_lock(proc);
3672         for (;;) {
3673                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3674                 if (binder_has_work_ilocked(thread, do_proc_work))
3675                         break;
3676                 if (do_proc_work)
3677                         list_add(&thread->waiting_thread_node,
3678                                  &proc->waiting_threads);
3679                 binder_inner_proc_unlock(proc);
3680                 schedule();
3681                 binder_inner_proc_lock(proc);
3682                 list_del_init(&thread->waiting_thread_node);
3683                 if (signal_pending(current)) {
3684                         ret = -ERESTARTSYS;
3685                         break;
3686                 }
3687         }
3688         finish_wait(&thread->wait, &wait);
3689         binder_inner_proc_unlock(proc);
3690         freezer_count();
3691
3692         return ret;
3693 }
3694
3695 static int binder_thread_read(struct binder_proc *proc,
3696                               struct binder_thread *thread,
3697                               binder_uintptr_t binder_buffer, size_t size,
3698                               binder_size_t *consumed, int non_block)
3699 {
3700         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3701         void __user *ptr = buffer + *consumed;
3702         void __user *end = buffer + size;
3703
3704         int ret = 0;
3705         int wait_for_proc_work;
3706
3707         if (*consumed == 0) {
3708                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3709                         return -EFAULT;
3710                 ptr += sizeof(uint32_t);
3711         }
3712
3713 retry:
3714         binder_inner_proc_lock(proc);
3715         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3716         binder_inner_proc_unlock(proc);
3717
3718         thread->looper |= BINDER_LOOPER_STATE_WAITING;
3719
3720         trace_binder_wait_for_work(wait_for_proc_work,
3721                                    !!thread->transaction_stack,
3722                                    !binder_worklist_empty(proc, &thread->todo));
3723         if (wait_for_proc_work) {
3724                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3725                                         BINDER_LOOPER_STATE_ENTERED))) {
3726                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3727                                 proc->pid, thread->pid, thread->looper);
3728                         wait_event_interruptible(binder_user_error_wait,
3729                                                  binder_stop_on_user_error < 2);
3730                 }
3731                 binder_set_nice(proc->default_priority);
3732         }
3733
3734         if (non_block) {
3735                 if (!binder_has_work(thread, wait_for_proc_work))
3736                         ret = -EAGAIN;
3737         } else {
3738                 ret = binder_wait_for_work(thread, wait_for_proc_work);
3739         }
3740
3741         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3742
3743         if (ret)
3744                 return ret;
3745
3746         while (1) {
3747                 uint32_t cmd;
3748                 struct binder_transaction_data tr;
3749                 struct binder_work *w = NULL;
3750                 struct list_head *list = NULL;
3751                 struct binder_transaction *t = NULL;
3752                 struct binder_thread *t_from;
3753
3754                 binder_inner_proc_lock(proc);
3755                 if (!binder_worklist_empty_ilocked(&thread->todo))
3756                         list = &thread->todo;
3757                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3758                            wait_for_proc_work)
3759                         list = &proc->todo;
3760                 else {
3761                         binder_inner_proc_unlock(proc);
3762
3763                         /* no data added */
3764                         if (ptr - buffer == 4 && !thread->looper_need_return)
3765                                 goto retry;
3766                         break;
3767                 }
3768
3769                 if (end - ptr < sizeof(tr) + 4) {
3770                         binder_inner_proc_unlock(proc);
3771                         break;
3772                 }
3773                 w = binder_dequeue_work_head_ilocked(list);
3774
3775                 switch (w->type) {
3776                 case BINDER_WORK_TRANSACTION: {
3777                         binder_inner_proc_unlock(proc);
3778                         t = container_of(w, struct binder_transaction, work);
3779                 } break;
3780                 case BINDER_WORK_RETURN_ERROR: {
3781                         struct binder_error *e = container_of(
3782                                         w, struct binder_error, work);
3783
3784                         WARN_ON(e->cmd == BR_OK);
3785                         binder_inner_proc_unlock(proc);
3786                         if (put_user(e->cmd, (uint32_t __user *)ptr))
3787                                 return -EFAULT;
3788                         e->cmd = BR_OK;
3789                         ptr += sizeof(uint32_t);
3790
3791                         binder_stat_br(proc, thread, e->cmd);
3792                 } break;
3793                 case BINDER_WORK_TRANSACTION_COMPLETE: {
3794                         binder_inner_proc_unlock(proc);
3795                         cmd = BR_TRANSACTION_COMPLETE;
3796                         if (put_user(cmd, (uint32_t __user *)ptr))
3797                                 return -EFAULT;
3798                         ptr += sizeof(uint32_t);
3799
3800                         binder_stat_br(proc, thread, cmd);
3801                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3802                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
3803                                      proc->pid, thread->pid);
3804                         kfree(w);
3805                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3806                 } break;
3807                 case BINDER_WORK_NODE: {
3808                         struct binder_node *node = container_of(w, struct binder_node, work);
3809                         int strong, weak;
3810                         binder_uintptr_t node_ptr = node->ptr;
3811                         binder_uintptr_t node_cookie = node->cookie;
3812                         int node_debug_id = node->debug_id;
3813                         int has_weak_ref;
3814                         int has_strong_ref;
3815                         void __user *orig_ptr = ptr;
3816
3817                         BUG_ON(proc != node->proc);
3818                         strong = node->internal_strong_refs ||
3819                                         node->local_strong_refs;
3820                         weak = !hlist_empty(&node->refs) ||
3821                                         node->local_weak_refs ||
3822                                         node->tmp_refs || strong;
3823                         has_strong_ref = node->has_strong_ref;
3824                         has_weak_ref = node->has_weak_ref;
3825
3826                         if (weak && !has_weak_ref) {
3827                                 node->has_weak_ref = 1;
3828                                 node->pending_weak_ref = 1;
3829                                 node->local_weak_refs++;
3830                         }
3831                         if (strong && !has_strong_ref) {
3832                                 node->has_strong_ref = 1;
3833                                 node->pending_strong_ref = 1;
3834                                 node->local_strong_refs++;
3835                         }
3836                         if (!strong && has_strong_ref)
3837                                 node->has_strong_ref = 0;
3838                         if (!weak && has_weak_ref)
3839                                 node->has_weak_ref = 0;
3840                         if (!weak && !strong) {
3841                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3842                                              "%d:%d node %d u%016llx c%016llx deleted\n",
3843                                              proc->pid, thread->pid,
3844                                              node_debug_id,
3845                                              (u64)node_ptr,
3846                                              (u64)node_cookie);
3847                                 rb_erase(&node->rb_node, &proc->nodes);
3848                                 binder_inner_proc_unlock(proc);
3849                                 binder_node_lock(node);
3850                                 /*
3851                                  * Acquire the node lock before freeing the
3852                                  * node to serialize with other threads that
3853                                  * may have been holding the node lock while
3854                                  * decrementing this node (avoids race where
3855                                  * this thread frees while the other thread
3856                                  * is unlocking the node after the final
3857                                  * decrement)
3858                                  */
3859                                 binder_node_unlock(node);
3860                                 binder_free_node(node);
3861                         } else
3862                                 binder_inner_proc_unlock(proc);
3863
3864                         if (weak && !has_weak_ref)
3865                                 ret = binder_put_node_cmd(
3866                                                 proc, thread, &ptr, node_ptr,
3867                                                 node_cookie, node_debug_id,
3868                                                 BR_INCREFS, "BR_INCREFS");
3869                         if (!ret && strong && !has_strong_ref)
3870                                 ret = binder_put_node_cmd(
3871                                                 proc, thread, &ptr, node_ptr,
3872                                                 node_cookie, node_debug_id,
3873                                                 BR_ACQUIRE, "BR_ACQUIRE");
3874                         if (!ret && !strong && has_strong_ref)
3875                                 ret = binder_put_node_cmd(
3876                                                 proc, thread, &ptr, node_ptr,
3877                                                 node_cookie, node_debug_id,
3878                                                 BR_RELEASE, "BR_RELEASE");
3879                         if (!ret && !weak && has_weak_ref)
3880                                 ret = binder_put_node_cmd(
3881                                                 proc, thread, &ptr, node_ptr,
3882                                                 node_cookie, node_debug_id,
3883                                                 BR_DECREFS, "BR_DECREFS");
3884                         if (orig_ptr == ptr)
3885                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3886                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
3887                                              proc->pid, thread->pid,
3888                                              node_debug_id,
3889                                              (u64)node_ptr,
3890                                              (u64)node_cookie);
3891                         if (ret)
3892                                 return ret;
3893                 } break;
3894                 case BINDER_WORK_DEAD_BINDER:
3895                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3896                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3897                         struct binder_ref_death *death;
3898                         uint32_t cmd;
3899                         binder_uintptr_t cookie;
3900
3901                         death = container_of(w, struct binder_ref_death, work);
3902                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3903                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3904                         else
3905                                 cmd = BR_DEAD_BINDER;
3906                         cookie = death->cookie;
3907
3908                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3909                                      "%d:%d %s %016llx\n",
3910                                       proc->pid, thread->pid,
3911                                       cmd == BR_DEAD_BINDER ?
3912                                       "BR_DEAD_BINDER" :
3913                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3914                                       (u64)cookie);
3915                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3916                                 binder_inner_proc_unlock(proc);
3917                                 kfree(death);
3918                                 binder_stats_deleted(BINDER_STAT_DEATH);
3919                         } else {
3920                                 binder_enqueue_work_ilocked(
3921                                                 w, &proc->delivered_death);
3922                                 binder_inner_proc_unlock(proc);
3923                         }
3924                         if (put_user(cmd, (uint32_t __user *)ptr))
3925                                 return -EFAULT;
3926                         ptr += sizeof(uint32_t);
3927                         if (put_user(cookie,
3928                                      (binder_uintptr_t __user *)ptr))
3929                                 return -EFAULT;
3930                         ptr += sizeof(binder_uintptr_t);
3931                         binder_stat_br(proc, thread, cmd);
3932                         if (cmd == BR_DEAD_BINDER)
3933                                 goto done; /* DEAD_BINDER notifications can cause transactions */
3934                 } break;
3935                 }
3936
3937                 if (!t)
3938                         continue;
3939
3940                 BUG_ON(t->buffer == NULL);
3941                 if (t->buffer->target_node) {
3942                         struct binder_node *target_node = t->buffer->target_node;
3943
3944                         tr.target.ptr = target_node->ptr;
3945                         tr.cookie =  target_node->cookie;
3946                         t->saved_priority = task_nice(current);
3947                         if (t->priority < target_node->min_priority &&
3948                             !(t->flags & TF_ONE_WAY))
3949                                 binder_set_nice(t->priority);
3950                         else if (!(t->flags & TF_ONE_WAY) ||
3951                                  t->saved_priority > target_node->min_priority)
3952                                 binder_set_nice(target_node->min_priority);
3953                         cmd = BR_TRANSACTION;
3954                 } else {
3955                         tr.target.ptr = 0;
3956                         tr.cookie = 0;
3957                         cmd = BR_REPLY;
3958                 }
3959                 tr.code = t->code;
3960                 tr.flags = t->flags;
3961                 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
3962
3963                 t_from = binder_get_txn_from(t);
3964                 if (t_from) {
3965                         struct task_struct *sender = t_from->proc->tsk;
3966
3967                         tr.sender_pid = task_tgid_nr_ns(sender,
3968                                                         task_active_pid_ns(current));
3969                 } else {
3970                         tr.sender_pid = 0;
3971                 }
3972
3973                 tr.data_size = t->buffer->data_size;
3974                 tr.offsets_size = t->buffer->offsets_size;
3975                 tr.data.ptr.buffer = (binder_uintptr_t)
3976                         ((uintptr_t)t->buffer->data +
3977                         binder_alloc_get_user_buffer_offset(&proc->alloc));
3978                 tr.data.ptr.offsets = tr.data.ptr.buffer +
3979                                         ALIGN(t->buffer->data_size,
3980                                             sizeof(void *));
3981
3982                 if (put_user(cmd, (uint32_t __user *)ptr)) {
3983                         if (t_from)
3984                                 binder_thread_dec_tmpref(t_from);
3985                         return -EFAULT;
3986                 }
3987                 ptr += sizeof(uint32_t);
3988                 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3989                         if (t_from)
3990                                 binder_thread_dec_tmpref(t_from);
3991                         return -EFAULT;
3992                 }
3993                 ptr += sizeof(tr);
3994
3995                 trace_binder_transaction_received(t);
3996                 binder_stat_br(proc, thread, cmd);
3997                 binder_debug(BINDER_DEBUG_TRANSACTION,
3998                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
3999                              proc->pid, thread->pid,
4000                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4001                              "BR_REPLY",
4002                              t->debug_id, t_from ? t_from->proc->pid : 0,
4003                              t_from ? t_from->pid : 0, cmd,
4004                              t->buffer->data_size, t->buffer->offsets_size,
4005                              (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4006
4007                 if (t_from)
4008                         binder_thread_dec_tmpref(t_from);
4009                 t->buffer->allow_user_free = 1;
4010                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4011                         binder_inner_proc_lock(thread->proc);
4012                         t->to_parent = thread->transaction_stack;
4013                         t->to_thread = thread;
4014                         thread->transaction_stack = t;
4015                         binder_inner_proc_unlock(thread->proc);
4016                 } else {
4017                         binder_free_transaction(t);
4018                 }
4019                 break;
4020         }
4021
4022 done:
4023
4024         *consumed = ptr - buffer;
4025         binder_inner_proc_lock(proc);
4026         if (proc->requested_threads == 0 &&
4027             list_empty(&thread->proc->waiting_threads) &&
4028             proc->requested_threads_started < proc->max_threads &&
4029             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4030              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4031              /*spawn a new thread if we leave this out */) {
4032                 proc->requested_threads++;
4033                 binder_inner_proc_unlock(proc);
4034                 binder_debug(BINDER_DEBUG_THREADS,
4035                              "%d:%d BR_SPAWN_LOOPER\n",
4036                              proc->pid, thread->pid);
4037                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4038                         return -EFAULT;
4039                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4040         } else
4041                 binder_inner_proc_unlock(proc);
4042         return 0;
4043 }
4044
4045 static void binder_release_work(struct binder_proc *proc,
4046                                 struct list_head *list)
4047 {
4048         struct binder_work *w;
4049
4050         while (1) {
4051                 w = binder_dequeue_work_head(proc, list);
4052                 if (!w)
4053                         return;
4054
4055                 switch (w->type) {
4056                 case BINDER_WORK_TRANSACTION: {
4057                         struct binder_transaction *t;
4058
4059                         t = container_of(w, struct binder_transaction, work);
4060                         if (t->buffer->target_node &&
4061                             !(t->flags & TF_ONE_WAY)) {
4062                                 binder_send_failed_reply(t, BR_DEAD_REPLY);
4063                         } else {
4064                                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4065                                         "undelivered transaction %d\n",
4066                                         t->debug_id);
4067                                 binder_free_transaction(t);
4068                         }
4069                 } break;
4070                 case BINDER_WORK_RETURN_ERROR: {
4071                         struct binder_error *e = container_of(
4072                                         w, struct binder_error, work);
4073
4074                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4075                                 "undelivered TRANSACTION_ERROR: %u\n",
4076                                 e->cmd);
4077                 } break;
4078                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4079                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4080                                 "undelivered TRANSACTION_COMPLETE\n");
4081                         kfree(w);
4082                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4083                 } break;
4084                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4085                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4086                         struct binder_ref_death *death;
4087
4088                         death = container_of(w, struct binder_ref_death, work);
4089                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4090                                 "undelivered death notification, %016llx\n",
4091                                 (u64)death->cookie);
4092                         kfree(death);
4093                         binder_stats_deleted(BINDER_STAT_DEATH);
4094                 } break;
4095                 default:
4096                         pr_err("unexpected work type, %d, not freed\n",
4097                                w->type);
4098                         break;
4099                 }
4100         }
4101
4102 }
4103
4104 static struct binder_thread *binder_get_thread_ilocked(
4105                 struct binder_proc *proc, struct binder_thread *new_thread)
4106 {
4107         struct binder_thread *thread = NULL;
4108         struct rb_node *parent = NULL;
4109         struct rb_node **p = &proc->threads.rb_node;
4110
4111         while (*p) {
4112                 parent = *p;
4113                 thread = rb_entry(parent, struct binder_thread, rb_node);
4114
4115                 if (current->pid < thread->pid)
4116                         p = &(*p)->rb_left;
4117                 else if (current->pid > thread->pid)
4118                         p = &(*p)->rb_right;
4119                 else
4120                         return thread;
4121         }
4122         if (!new_thread)
4123                 return NULL;
4124         thread = new_thread;
4125         binder_stats_created(BINDER_STAT_THREAD);
4126         thread->proc = proc;
4127         thread->pid = current->pid;
4128         atomic_set(&thread->tmp_ref, 0);
4129         init_waitqueue_head(&thread->wait);
4130         INIT_LIST_HEAD(&thread->todo);
4131         rb_link_node(&thread->rb_node, parent, p);
4132         rb_insert_color(&thread->rb_node, &proc->threads);
4133         thread->looper_need_return = true;
4134         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4135         thread->return_error.cmd = BR_OK;
4136         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4137         thread->reply_error.cmd = BR_OK;
4138         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4139         return thread;
4140 }
4141
4142 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4143 {
4144         struct binder_thread *thread;
4145         struct binder_thread *new_thread;
4146
4147         binder_inner_proc_lock(proc);
4148         thread = binder_get_thread_ilocked(proc, NULL);
4149         binder_inner_proc_unlock(proc);
4150         if (!thread) {
4151                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4152                 if (new_thread == NULL)
4153                         return NULL;
4154                 binder_inner_proc_lock(proc);
4155                 thread = binder_get_thread_ilocked(proc, new_thread);
4156                 binder_inner_proc_unlock(proc);
4157                 if (thread != new_thread)
4158                         kfree(new_thread);
4159         }
4160         return thread;
4161 }
4162
4163 static void binder_free_proc(struct binder_proc *proc)
4164 {
4165         BUG_ON(!list_empty(&proc->todo));
4166         BUG_ON(!list_empty(&proc->delivered_death));
4167         binder_alloc_deferred_release(&proc->alloc);
4168         put_task_struct(proc->tsk);
4169         binder_stats_deleted(BINDER_STAT_PROC);
4170         kfree(proc);
4171 }
4172
4173 static void binder_free_thread(struct binder_thread *thread)
4174 {
4175         BUG_ON(!list_empty(&thread->todo));
4176         binder_stats_deleted(BINDER_STAT_THREAD);
4177         binder_proc_dec_tmpref(thread->proc);
4178         kfree(thread);
4179 }
4180
4181 static int binder_thread_release(struct binder_proc *proc,
4182                                  struct binder_thread *thread)
4183 {
4184         struct binder_transaction *t;
4185         struct binder_transaction *send_reply = NULL;
4186         int active_transactions = 0;
4187         struct binder_transaction *last_t = NULL;
4188
4189         binder_inner_proc_lock(thread->proc);
4190         /*
4191          * take a ref on the proc so it survives
4192          * after we remove this thread from proc->threads.
4193          * The corresponding dec is when we actually
4194          * free the thread in binder_free_thread()
4195          */
4196         proc->tmp_ref++;
4197         /*
4198          * take a ref on this thread to ensure it
4199          * survives while we are releasing it
4200          */
4201         atomic_inc(&thread->tmp_ref);
4202         rb_erase(&thread->rb_node, &proc->threads);
4203         t = thread->transaction_stack;
4204         if (t) {
4205                 spin_lock(&t->lock);
4206                 if (t->to_thread == thread)
4207                         send_reply = t;
4208         }
4209         thread->is_dead = true;
4210
4211         while (t) {
4212                 last_t = t;
4213                 active_transactions++;
4214                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4215                              "release %d:%d transaction %d %s, still active\n",
4216                               proc->pid, thread->pid,
4217                              t->debug_id,
4218                              (t->to_thread == thread) ? "in" : "out");
4219
4220                 if (t->to_thread == thread) {
4221                         t->to_proc = NULL;
4222                         t->to_thread = NULL;
4223                         if (t->buffer) {
4224                                 t->buffer->transaction = NULL;
4225                                 t->buffer = NULL;
4226                         }
4227                         t = t->to_parent;
4228                 } else if (t->from == thread) {
4229                         t->from = NULL;
4230                         t = t->from_parent;
4231                 } else
4232                         BUG();
4233                 spin_unlock(&last_t->lock);
4234                 if (t)
4235                         spin_lock(&t->lock);
4236         }
4237         binder_inner_proc_unlock(thread->proc);
4238
4239         if (send_reply)
4240                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4241         binder_release_work(proc, &thread->todo);
4242         binder_thread_dec_tmpref(thread);
4243         return active_transactions;
4244 }
4245
4246 static unsigned int binder_poll(struct file *filp,
4247                                 struct poll_table_struct *wait)
4248 {
4249         struct binder_proc *proc = filp->private_data;
4250         struct binder_thread *thread = NULL;
4251         bool wait_for_proc_work;
4252
4253         thread = binder_get_thread(proc);
4254
4255         binder_inner_proc_lock(thread->proc);
4256         thread->looper |= BINDER_LOOPER_STATE_POLL;
4257         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4258
4259         binder_inner_proc_unlock(thread->proc);
4260
4261         if (binder_has_work(thread, wait_for_proc_work))
4262                 return POLLIN;
4263
4264         poll_wait(filp, &thread->wait, wait);
4265
4266         if (binder_has_thread_work(thread))
4267                 return POLLIN;
4268
4269         return 0;
4270 }
4271
4272 static int binder_ioctl_write_read(struct file *filp,
4273                                 unsigned int cmd, unsigned long arg,
4274                                 struct binder_thread *thread)
4275 {
4276         int ret = 0;
4277         struct binder_proc *proc = filp->private_data;
4278         unsigned int size = _IOC_SIZE(cmd);
4279         void __user *ubuf = (void __user *)arg;
4280         struct binder_write_read bwr;
4281
4282         if (size != sizeof(struct binder_write_read)) {
4283                 ret = -EINVAL;
4284                 goto out;
4285         }
4286         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4287                 ret = -EFAULT;
4288                 goto out;
4289         }
4290         binder_debug(BINDER_DEBUG_READ_WRITE,
4291                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4292                      proc->pid, thread->pid,
4293                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4294                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4295
4296         if (bwr.write_size > 0) {
4297                 ret = binder_thread_write(proc, thread,
4298                                           bwr.write_buffer,
4299                                           bwr.write_size,
4300                                           &bwr.write_consumed);
4301                 trace_binder_write_done(ret);
4302                 if (ret < 0) {
4303                         bwr.read_consumed = 0;
4304                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4305                                 ret = -EFAULT;
4306                         goto out;
4307                 }
4308         }
4309         if (bwr.read_size > 0) {
4310                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4311                                          bwr.read_size,
4312                                          &bwr.read_consumed,
4313                                          filp->f_flags & O_NONBLOCK);
4314                 trace_binder_read_done(ret);
4315                 binder_inner_proc_lock(proc);
4316                 if (!binder_worklist_empty_ilocked(&proc->todo))
4317                         binder_wakeup_proc_ilocked(proc);
4318                 binder_inner_proc_unlock(proc);
4319                 if (ret < 0) {
4320                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4321                                 ret = -EFAULT;
4322                         goto out;
4323                 }
4324         }
4325         binder_debug(BINDER_DEBUG_READ_WRITE,
4326                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4327                      proc->pid, thread->pid,
4328                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4329                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4330         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4331                 ret = -EFAULT;
4332                 goto out;
4333         }
4334 out:
4335         return ret;
4336 }
4337
4338 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4339 {
4340         int ret = 0;
4341         struct binder_proc *proc = filp->private_data;
4342         struct binder_context *context = proc->context;
4343         struct binder_node *new_node;
4344         kuid_t curr_euid = current_euid();
4345
4346         mutex_lock(&context->context_mgr_node_lock);
4347         if (context->binder_context_mgr_node) {
4348                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4349                 ret = -EBUSY;
4350                 goto out;
4351         }
4352         ret = security_binder_set_context_mgr(proc->tsk);
4353         if (ret < 0)
4354                 goto out;
4355         if (uid_valid(context->binder_context_mgr_uid)) {
4356                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4357                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4358                                from_kuid(&init_user_ns, curr_euid),
4359                                from_kuid(&init_user_ns,
4360                                          context->binder_context_mgr_uid));
4361                         ret = -EPERM;
4362                         goto out;
4363                 }
4364         } else {
4365                 context->binder_context_mgr_uid = curr_euid;
4366         }
4367         new_node = binder_new_node(proc, NULL);
4368         if (!new_node) {
4369                 ret = -ENOMEM;
4370                 goto out;
4371         }
4372         binder_node_lock(new_node);
4373         new_node->local_weak_refs++;
4374         new_node->local_strong_refs++;
4375         new_node->has_strong_ref = 1;
4376         new_node->has_weak_ref = 1;
4377         context->binder_context_mgr_node = new_node;
4378         binder_node_unlock(new_node);
4379         binder_put_node(new_node);
4380 out:
4381         mutex_unlock(&context->context_mgr_node_lock);
4382         return ret;
4383 }
4384
4385 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4386                                 struct binder_node_debug_info *info)
4387 {
4388         struct rb_node *n;
4389         binder_uintptr_t ptr = info->ptr;
4390
4391         memset(info, 0, sizeof(*info));
4392
4393         binder_inner_proc_lock(proc);
4394         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4395                 struct binder_node *node = rb_entry(n, struct binder_node,
4396                                                     rb_node);
4397                 if (node->ptr > ptr) {
4398                         info->ptr = node->ptr;
4399                         info->cookie = node->cookie;
4400                         info->has_strong_ref = node->has_strong_ref;
4401                         info->has_weak_ref = node->has_weak_ref;
4402                         break;
4403                 }
4404         }
4405         binder_inner_proc_unlock(proc);
4406
4407         return 0;
4408 }
4409
4410 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4411 {
4412         int ret;
4413         struct binder_proc *proc = filp->private_data;
4414         struct binder_thread *thread;
4415         unsigned int size = _IOC_SIZE(cmd);
4416         void __user *ubuf = (void __user *)arg;
4417
4418         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4419                         proc->pid, current->pid, cmd, arg);*/
4420
4421         binder_selftest_alloc(&proc->alloc);
4422
4423         trace_binder_ioctl(cmd, arg);
4424
4425         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4426         if (ret)
4427                 goto err_unlocked;
4428
4429         thread = binder_get_thread(proc);
4430         if (thread == NULL) {
4431                 ret = -ENOMEM;
4432                 goto err;
4433         }
4434
4435         switch (cmd) {
4436         case BINDER_WRITE_READ:
4437                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4438                 if (ret)
4439                         goto err;
4440                 break;
4441         case BINDER_SET_MAX_THREADS: {
4442                 int max_threads;
4443
4444                 if (copy_from_user(&max_threads, ubuf,
4445                                    sizeof(max_threads))) {
4446                         ret = -EINVAL;
4447                         goto err;
4448                 }
4449                 binder_inner_proc_lock(proc);
4450                 proc->max_threads = max_threads;
4451                 binder_inner_proc_unlock(proc);
4452                 break;
4453         }
4454         case BINDER_SET_CONTEXT_MGR:
4455                 ret = binder_ioctl_set_ctx_mgr(filp);
4456                 if (ret)
4457                         goto err;
4458                 break;
4459         case BINDER_THREAD_EXIT:
4460                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4461                              proc->pid, thread->pid);
4462                 binder_thread_release(proc, thread);
4463                 thread = NULL;
4464                 break;
4465         case BINDER_VERSION: {
4466                 struct binder_version __user *ver = ubuf;
4467
4468                 if (size != sizeof(struct binder_version)) {
4469                         ret = -EINVAL;
4470                         goto err;
4471                 }
4472                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4473                              &ver->protocol_version)) {
4474                         ret = -EINVAL;
4475                         goto err;
4476                 }
4477                 break;
4478         }
4479         case BINDER_GET_NODE_DEBUG_INFO: {
4480                 struct binder_node_debug_info info;
4481
4482                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4483                         ret = -EFAULT;
4484                         goto err;
4485                 }
4486
4487                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4488                 if (ret < 0)
4489                         goto err;
4490
4491                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4492                         ret = -EFAULT;
4493                         goto err;
4494                 }
4495                 break;
4496         }
4497         default:
4498                 ret = -EINVAL;
4499                 goto err;
4500         }
4501         ret = 0;
4502 err:
4503         if (thread)
4504                 thread->looper_need_return = false;
4505         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4506         if (ret && ret != -ERESTARTSYS)
4507                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4508 err_unlocked:
4509         trace_binder_ioctl_done(ret);
4510         return ret;
4511 }
4512
4513 static void binder_vma_open(struct vm_area_struct *vma)
4514 {
4515         struct binder_proc *proc = vma->vm_private_data;
4516
4517         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4518                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4519                      proc->pid, vma->vm_start, vma->vm_end,
4520                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4521                      (unsigned long)pgprot_val(vma->vm_page_prot));
4522 }
4523
4524 static void binder_vma_close(struct vm_area_struct *vma)
4525 {
4526         struct binder_proc *proc = vma->vm_private_data;
4527
4528         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4529                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4530                      proc->pid, vma->vm_start, vma->vm_end,
4531                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4532                      (unsigned long)pgprot_val(vma->vm_page_prot));
4533         binder_alloc_vma_close(&proc->alloc);
4534         binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4535 }
4536
4537 static int binder_vm_fault(struct vm_fault *vmf)
4538 {
4539         return VM_FAULT_SIGBUS;
4540 }
4541
4542 static const struct vm_operations_struct binder_vm_ops = {
4543         .open = binder_vma_open,
4544         .close = binder_vma_close,
4545         .fault = binder_vm_fault,
4546 };
4547
4548 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4549 {
4550         int ret;
4551         struct binder_proc *proc = filp->private_data;
4552         const char *failure_string;
4553
4554         if (proc->tsk != current->group_leader)
4555                 return -EINVAL;
4556
4557         if ((vma->vm_end - vma->vm_start) > SZ_4M)
4558                 vma->vm_end = vma->vm_start + SZ_4M;
4559
4560         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4561                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4562                      __func__, proc->pid, vma->vm_start, vma->vm_end,
4563                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4564                      (unsigned long)pgprot_val(vma->vm_page_prot));
4565
4566         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4567                 ret = -EPERM;
4568                 failure_string = "bad vm_flags";
4569                 goto err_bad_arg;
4570         }
4571         vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4572         vma->vm_ops = &binder_vm_ops;
4573         vma->vm_private_data = proc;
4574
4575         ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4576         if (ret)
4577                 return ret;
4578         proc->files = get_files_struct(current);
4579         return 0;
4580
4581 err_bad_arg:
4582         pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4583                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4584         return ret;
4585 }
4586
4587 static int binder_open(struct inode *nodp, struct file *filp)
4588 {
4589         struct binder_proc *proc;
4590         struct binder_device *binder_dev;
4591
4592         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4593                      current->group_leader->pid, current->pid);
4594
4595         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4596         if (proc == NULL)
4597                 return -ENOMEM;
4598         spin_lock_init(&proc->inner_lock);
4599         spin_lock_init(&proc->outer_lock);
4600         get_task_struct(current->group_leader);
4601         proc->tsk = current->group_leader;
4602         INIT_LIST_HEAD(&proc->todo);
4603         proc->default_priority = task_nice(current);
4604         binder_dev = container_of(filp->private_data, struct binder_device,
4605                                   miscdev);
4606         proc->context = &binder_dev->context;
4607         binder_alloc_init(&proc->alloc);
4608
4609         binder_stats_created(BINDER_STAT_PROC);
4610         proc->pid = current->group_leader->pid;
4611         INIT_LIST_HEAD(&proc->delivered_death);
4612         INIT_LIST_HEAD(&proc->waiting_threads);
4613         filp->private_data = proc;
4614
4615         mutex_lock(&binder_procs_lock);
4616         hlist_add_head(&proc->proc_node, &binder_procs);
4617         mutex_unlock(&binder_procs_lock);
4618
4619         if (binder_debugfs_dir_entry_proc) {
4620                 char strbuf[11];
4621
4622                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4623                 /*
4624                  * proc debug entries are shared between contexts, so
4625                  * this will fail if the process tries to open the driver
4626                  * again with a different context. The priting code will
4627                  * anyway print all contexts that a given PID has, so this
4628                  * is not a problem.
4629                  */
4630                 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4631                         binder_debugfs_dir_entry_proc,
4632                         (void *)(unsigned long)proc->pid,
4633                         &binder_proc_fops);
4634         }
4635
4636         return 0;
4637 }
4638
4639 static int binder_flush(struct file *filp, fl_owner_t id)
4640 {
4641         struct binder_proc *proc = filp->private_data;
4642
4643         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4644
4645         return 0;
4646 }
4647
4648 static void binder_deferred_flush(struct binder_proc *proc)
4649 {
4650         struct rb_node *n;
4651         int wake_count = 0;
4652
4653         binder_inner_proc_lock(proc);
4654         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4655                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4656
4657                 thread->looper_need_return = true;
4658                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4659                         wake_up_interruptible(&thread->wait);
4660                         wake_count++;
4661                 }
4662         }
4663         binder_inner_proc_unlock(proc);
4664
4665         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4666                      "binder_flush: %d woke %d threads\n", proc->pid,
4667                      wake_count);
4668 }
4669
4670 static int binder_release(struct inode *nodp, struct file *filp)
4671 {
4672         struct binder_proc *proc = filp->private_data;
4673
4674         debugfs_remove(proc->debugfs_entry);
4675         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4676
4677         return 0;
4678 }
4679
4680 static int binder_node_release(struct binder_node *node, int refs)
4681 {
4682         struct binder_ref *ref;
4683         int death = 0;
4684         struct binder_proc *proc = node->proc;
4685
4686         binder_release_work(proc, &node->async_todo);
4687
4688         binder_node_lock(node);
4689         binder_inner_proc_lock(proc);
4690         binder_dequeue_work_ilocked(&node->work);
4691         /*
4692          * The caller must have taken a temporary ref on the node,
4693          */
4694         BUG_ON(!node->tmp_refs);
4695         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4696                 binder_inner_proc_unlock(proc);
4697                 binder_node_unlock(node);
4698                 binder_free_node(node);
4699
4700                 return refs;
4701         }
4702
4703         node->proc = NULL;
4704         node->local_strong_refs = 0;
4705         node->local_weak_refs = 0;
4706         binder_inner_proc_unlock(proc);
4707
4708         spin_lock(&binder_dead_nodes_lock);
4709         hlist_add_head(&node->dead_node, &binder_dead_nodes);
4710         spin_unlock(&binder_dead_nodes_lock);
4711
4712         hlist_for_each_entry(ref, &node->refs, node_entry) {
4713                 refs++;
4714                 /*
4715                  * Need the node lock to synchronize
4716                  * with new notification requests and the
4717                  * inner lock to synchronize with queued
4718                  * death notifications.
4719                  */
4720                 binder_inner_proc_lock(ref->proc);
4721                 if (!ref->death) {
4722                         binder_inner_proc_unlock(ref->proc);
4723                         continue;
4724                 }
4725
4726                 death++;
4727
4728                 BUG_ON(!list_empty(&ref->death->work.entry));
4729                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4730                 binder_enqueue_work_ilocked(&ref->death->work,
4731                                             &ref->proc->todo);
4732                 binder_wakeup_proc_ilocked(ref->proc);
4733                 binder_inner_proc_unlock(ref->proc);
4734         }
4735
4736         binder_debug(BINDER_DEBUG_DEAD_BINDER,
4737                      "node %d now dead, refs %d, death %d\n",
4738                      node->debug_id, refs, death);
4739         binder_node_unlock(node);
4740         binder_put_node(node);
4741
4742         return refs;
4743 }
4744
4745 static void binder_deferred_release(struct binder_proc *proc)
4746 {
4747         struct binder_context *context = proc->context;
4748         struct rb_node *n;
4749         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4750
4751         BUG_ON(proc->files);
4752
4753         mutex_lock(&binder_procs_lock);
4754         hlist_del(&proc->proc_node);
4755         mutex_unlock(&binder_procs_lock);
4756
4757         mutex_lock(&context->context_mgr_node_lock);
4758         if (context->binder_context_mgr_node &&
4759             context->binder_context_mgr_node->proc == proc) {
4760                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4761                              "%s: %d context_mgr_node gone\n",
4762                              __func__, proc->pid);
4763                 context->binder_context_mgr_node = NULL;
4764         }
4765         mutex_unlock(&context->context_mgr_node_lock);
4766         binder_inner_proc_lock(proc);
4767         /*
4768          * Make sure proc stays alive after we
4769          * remove all the threads
4770          */
4771         proc->tmp_ref++;
4772
4773         proc->is_dead = true;
4774         threads = 0;
4775         active_transactions = 0;
4776         while ((n = rb_first(&proc->threads))) {
4777                 struct binder_thread *thread;
4778
4779                 thread = rb_entry(n, struct binder_thread, rb_node);
4780                 binder_inner_proc_unlock(proc);
4781                 threads++;
4782                 active_transactions += binder_thread_release(proc, thread);
4783                 binder_inner_proc_lock(proc);
4784         }
4785
4786         nodes = 0;
4787         incoming_refs = 0;
4788         while ((n = rb_first(&proc->nodes))) {
4789                 struct binder_node *node;
4790
4791                 node = rb_entry(n, struct binder_node, rb_node);
4792                 nodes++;
4793                 /*
4794                  * take a temporary ref on the node before
4795                  * calling binder_node_release() which will either
4796                  * kfree() the node or call binder_put_node()
4797                  */
4798                 binder_inc_node_tmpref_ilocked(node);
4799                 rb_erase(&node->rb_node, &proc->nodes);
4800                 binder_inner_proc_unlock(proc);
4801                 incoming_refs = binder_node_release(node, incoming_refs);
4802                 binder_inner_proc_lock(proc);
4803         }
4804         binder_inner_proc_unlock(proc);
4805
4806         outgoing_refs = 0;
4807         binder_proc_lock(proc);
4808         while ((n = rb_first(&proc->refs_by_desc))) {
4809                 struct binder_ref *ref;
4810
4811                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4812                 outgoing_refs++;
4813                 binder_cleanup_ref_olocked(ref);
4814                 binder_proc_unlock(proc);
4815                 binder_free_ref(ref);
4816                 binder_proc_lock(proc);
4817         }
4818         binder_proc_unlock(proc);
4819
4820         binder_release_work(proc, &proc->todo);
4821         binder_release_work(proc, &proc->delivered_death);
4822
4823         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4824                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4825                      __func__, proc->pid, threads, nodes, incoming_refs,
4826                      outgoing_refs, active_transactions);
4827
4828         binder_proc_dec_tmpref(proc);
4829 }
4830
4831 static void binder_deferred_func(struct work_struct *work)
4832 {
4833         struct binder_proc *proc;
4834         struct files_struct *files;
4835
4836         int defer;
4837
4838         do {
4839                 mutex_lock(&binder_deferred_lock);
4840                 if (!hlist_empty(&binder_deferred_list)) {
4841                         proc = hlist_entry(binder_deferred_list.first,
4842                                         struct binder_proc, deferred_work_node);
4843                         hlist_del_init(&proc->deferred_work_node);
4844                         defer = proc->deferred_work;
4845                         proc->deferred_work = 0;
4846                 } else {
4847                         proc = NULL;
4848                         defer = 0;
4849                 }
4850                 mutex_unlock(&binder_deferred_lock);
4851
4852                 files = NULL;
4853                 if (defer & BINDER_DEFERRED_PUT_FILES) {
4854                         files = proc->files;
4855                         if (files)
4856                                 proc->files = NULL;
4857                 }
4858
4859                 if (defer & BINDER_DEFERRED_FLUSH)
4860                         binder_deferred_flush(proc);
4861
4862                 if (defer & BINDER_DEFERRED_RELEASE)
4863                         binder_deferred_release(proc); /* frees proc */
4864
4865                 if (files)
4866                         put_files_struct(files);
4867         } while (proc);
4868 }
4869 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4870
4871 static void
4872 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4873 {
4874         mutex_lock(&binder_deferred_lock);
4875         proc->deferred_work |= defer;
4876         if (hlist_unhashed(&proc->deferred_work_node)) {
4877                 hlist_add_head(&proc->deferred_work_node,
4878                                 &binder_deferred_list);
4879                 schedule_work(&binder_deferred_work);
4880         }
4881         mutex_unlock(&binder_deferred_lock);
4882 }
4883
4884 static void print_binder_transaction_ilocked(struct seq_file *m,
4885                                              struct binder_proc *proc,
4886                                              const char *prefix,
4887                                              struct binder_transaction *t)
4888 {
4889         struct binder_proc *to_proc;
4890         struct binder_buffer *buffer = t->buffer;
4891
4892         spin_lock(&t->lock);
4893         to_proc = t->to_proc;
4894         seq_printf(m,
4895                    "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4896                    prefix, t->debug_id, t,
4897                    t->from ? t->from->proc->pid : 0,
4898                    t->from ? t->from->pid : 0,
4899                    to_proc ? to_proc->pid : 0,
4900                    t->to_thread ? t->to_thread->pid : 0,
4901                    t->code, t->flags, t->priority, t->need_reply);
4902         spin_unlock(&t->lock);
4903
4904         if (proc != to_proc) {
4905                 /*
4906                  * Can only safely deref buffer if we are holding the
4907                  * correct proc inner lock for this node
4908                  */
4909                 seq_puts(m, "\n");
4910                 return;
4911         }
4912
4913         if (buffer == NULL) {
4914                 seq_puts(m, " buffer free\n");
4915                 return;
4916         }
4917         if (buffer->target_node)
4918                 seq_printf(m, " node %d", buffer->target_node->debug_id);
4919         seq_printf(m, " size %zd:%zd data %p\n",
4920                    buffer->data_size, buffer->offsets_size,
4921                    buffer->data);
4922 }
4923
4924 static void print_binder_work_ilocked(struct seq_file *m,
4925                                      struct binder_proc *proc,
4926                                      const char *prefix,
4927                                      const char *transaction_prefix,
4928                                      struct binder_work *w)
4929 {
4930         struct binder_node *node;
4931         struct binder_transaction *t;
4932
4933         switch (w->type) {
4934         case BINDER_WORK_TRANSACTION:
4935                 t = container_of(w, struct binder_transaction, work);
4936                 print_binder_transaction_ilocked(
4937                                 m, proc, transaction_prefix, t);
4938                 break;
4939         case BINDER_WORK_RETURN_ERROR: {
4940                 struct binder_error *e = container_of(
4941                                 w, struct binder_error, work);
4942
4943                 seq_printf(m, "%stransaction error: %u\n",
4944                            prefix, e->cmd);
4945         } break;
4946         case BINDER_WORK_TRANSACTION_COMPLETE:
4947                 seq_printf(m, "%stransaction complete\n", prefix);
4948                 break;
4949         case BINDER_WORK_NODE:
4950                 node = container_of(w, struct binder_node, work);
4951                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4952                            prefix, node->debug_id,
4953                            (u64)node->ptr, (u64)node->cookie);
4954                 break;
4955         case BINDER_WORK_DEAD_BINDER:
4956                 seq_printf(m, "%shas dead binder\n", prefix);
4957                 break;
4958         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4959                 seq_printf(m, "%shas cleared dead binder\n", prefix);
4960                 break;
4961         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
4962                 seq_printf(m, "%shas cleared death notification\n", prefix);
4963                 break;
4964         default:
4965                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
4966                 break;
4967         }
4968 }
4969
4970 static void print_binder_thread_ilocked(struct seq_file *m,
4971                                         struct binder_thread *thread,
4972                                         int print_always)
4973 {
4974         struct binder_transaction *t;
4975         struct binder_work *w;
4976         size_t start_pos = m->count;
4977         size_t header_pos;
4978
4979         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
4980                         thread->pid, thread->looper,
4981                         thread->looper_need_return,
4982                         atomic_read(&thread->tmp_ref));
4983         header_pos = m->count;
4984         t = thread->transaction_stack;
4985         while (t) {
4986                 if (t->from == thread) {
4987                         print_binder_transaction_ilocked(m, thread->proc,
4988                                         "    outgoing transaction", t);
4989                         t = t->from_parent;
4990                 } else if (t->to_thread == thread) {
4991                         print_binder_transaction_ilocked(m, thread->proc,
4992                                                  "    incoming transaction", t);
4993                         t = t->to_parent;
4994                 } else {
4995                         print_binder_transaction_ilocked(m, thread->proc,
4996                                         "    bad transaction", t);
4997                         t = NULL;
4998                 }
4999         }
5000         list_for_each_entry(w, &thread->todo, entry) {
5001                 print_binder_work_ilocked(m, thread->proc, "    ",
5002                                           "    pending transaction", w);
5003         }
5004         if (!print_always && m->count == header_pos)
5005                 m->count = start_pos;
5006 }
5007
5008 static void print_binder_node_nilocked(struct seq_file *m,
5009                                        struct binder_node *node)
5010 {
5011         struct binder_ref *ref;
5012         struct binder_work *w;
5013         int count;
5014
5015         count = 0;
5016         hlist_for_each_entry(ref, &node->refs, node_entry)
5017                 count++;
5018
5019         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5020                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5021                    node->has_strong_ref, node->has_weak_ref,
5022                    node->local_strong_refs, node->local_weak_refs,
5023                    node->internal_strong_refs, count, node->tmp_refs);
5024         if (count) {
5025                 seq_puts(m, " proc");
5026                 hlist_for_each_entry(ref, &node->refs, node_entry)
5027                         seq_printf(m, " %d", ref->proc->pid);
5028         }
5029         seq_puts(m, "\n");
5030         if (node->proc) {
5031                 list_for_each_entry(w, &node->async_todo, entry)
5032                         print_binder_work_ilocked(m, node->proc, "    ",
5033                                           "    pending async transaction", w);
5034         }
5035 }
5036
5037 static void print_binder_ref_olocked(struct seq_file *m,
5038                                      struct binder_ref *ref)
5039 {
5040         binder_node_lock(ref->node);
5041         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5042                    ref->data.debug_id, ref->data.desc,
5043                    ref->node->proc ? "" : "dead ",
5044                    ref->node->debug_id, ref->data.strong,
5045                    ref->data.weak, ref->death);
5046         binder_node_unlock(ref->node);
5047 }
5048
5049 static void print_binder_proc(struct seq_file *m,
5050                               struct binder_proc *proc, int print_all)
5051 {
5052         struct binder_work *w;
5053         struct rb_node *n;
5054         size_t start_pos = m->count;
5055         size_t header_pos;
5056         struct binder_node *last_node = NULL;
5057
5058         seq_printf(m, "proc %d\n", proc->pid);
5059         seq_printf(m, "context %s\n", proc->context->name);
5060         header_pos = m->count;
5061
5062         binder_inner_proc_lock(proc);
5063         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5064                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5065                                                 rb_node), print_all);
5066
5067         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5068                 struct binder_node *node = rb_entry(n, struct binder_node,
5069                                                     rb_node);
5070                 /*
5071                  * take a temporary reference on the node so it
5072                  * survives and isn't removed from the tree
5073                  * while we print it.
5074                  */
5075                 binder_inc_node_tmpref_ilocked(node);
5076                 /* Need to drop inner lock to take node lock */
5077                 binder_inner_proc_unlock(proc);
5078                 if (last_node)
5079                         binder_put_node(last_node);
5080                 binder_node_inner_lock(node);
5081                 print_binder_node_nilocked(m, node);
5082                 binder_node_inner_unlock(node);
5083                 last_node = node;
5084                 binder_inner_proc_lock(proc);
5085         }
5086         binder_inner_proc_unlock(proc);
5087         if (last_node)
5088                 binder_put_node(last_node);
5089
5090         if (print_all) {
5091                 binder_proc_lock(proc);
5092                 for (n = rb_first(&proc->refs_by_desc);
5093                      n != NULL;
5094                      n = rb_next(n))
5095                         print_binder_ref_olocked(m, rb_entry(n,
5096                                                             struct binder_ref,
5097                                                             rb_node_desc));
5098                 binder_proc_unlock(proc);
5099         }
5100         binder_alloc_print_allocated(m, &proc->alloc);
5101         binder_inner_proc_lock(proc);
5102         list_for_each_entry(w, &proc->todo, entry)
5103                 print_binder_work_ilocked(m, proc, "  ",
5104                                           "  pending transaction", w);
5105         list_for_each_entry(w, &proc->delivered_death, entry) {
5106                 seq_puts(m, "  has delivered dead binder\n");
5107                 break;
5108         }
5109         binder_inner_proc_unlock(proc);
5110         if (!print_all && m->count == header_pos)
5111                 m->count = start_pos;
5112 }
5113
5114 static const char * const binder_return_strings[] = {
5115         "BR_ERROR",
5116         "BR_OK",
5117         "BR_TRANSACTION",
5118         "BR_REPLY",
5119         "BR_ACQUIRE_RESULT",
5120         "BR_DEAD_REPLY",
5121         "BR_TRANSACTION_COMPLETE",
5122         "BR_INCREFS",
5123         "BR_ACQUIRE",
5124         "BR_RELEASE",
5125         "BR_DECREFS",
5126         "BR_ATTEMPT_ACQUIRE",
5127         "BR_NOOP",
5128         "BR_SPAWN_LOOPER",
5129         "BR_FINISHED",
5130         "BR_DEAD_BINDER",
5131         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5132         "BR_FAILED_REPLY"
5133 };
5134
5135 static const char * const binder_command_strings[] = {
5136         "BC_TRANSACTION",
5137         "BC_REPLY",
5138         "BC_ACQUIRE_RESULT",
5139         "BC_FREE_BUFFER",
5140         "BC_INCREFS",
5141         "BC_ACQUIRE",
5142         "BC_RELEASE",
5143         "BC_DECREFS",
5144         "BC_INCREFS_DONE",
5145         "BC_ACQUIRE_DONE",
5146         "BC_ATTEMPT_ACQUIRE",
5147         "BC_REGISTER_LOOPER",
5148         "BC_ENTER_LOOPER",
5149         "BC_EXIT_LOOPER",
5150         "BC_REQUEST_DEATH_NOTIFICATION",
5151         "BC_CLEAR_DEATH_NOTIFICATION",
5152         "BC_DEAD_BINDER_DONE",
5153         "BC_TRANSACTION_SG",
5154         "BC_REPLY_SG",
5155 };
5156
5157 static const char * const binder_objstat_strings[] = {
5158         "proc",
5159         "thread",
5160         "node",
5161         "ref",
5162         "death",
5163         "transaction",
5164         "transaction_complete"
5165 };
5166
5167 static void print_binder_stats(struct seq_file *m, const char *prefix,
5168                                struct binder_stats *stats)
5169 {
5170         int i;
5171
5172         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5173                      ARRAY_SIZE(binder_command_strings));
5174         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5175                 int temp = atomic_read(&stats->bc[i]);
5176
5177                 if (temp)
5178                         seq_printf(m, "%s%s: %d\n", prefix,
5179                                    binder_command_strings[i], temp);
5180         }
5181
5182         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5183                      ARRAY_SIZE(binder_return_strings));
5184         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5185                 int temp = atomic_read(&stats->br[i]);
5186
5187                 if (temp)
5188                         seq_printf(m, "%s%s: %d\n", prefix,
5189                                    binder_return_strings[i], temp);
5190         }
5191
5192         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5193                      ARRAY_SIZE(binder_objstat_strings));
5194         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5195                      ARRAY_SIZE(stats->obj_deleted));
5196         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5197                 int created = atomic_read(&stats->obj_created[i]);
5198                 int deleted = atomic_read(&stats->obj_deleted[i]);
5199
5200                 if (created || deleted)
5201                         seq_printf(m, "%s%s: active %d total %d\n",
5202                                 prefix,
5203                                 binder_objstat_strings[i],
5204                                 created - deleted,
5205                                 created);
5206         }
5207 }
5208
5209 static void print_binder_proc_stats(struct seq_file *m,
5210                                     struct binder_proc *proc)
5211 {
5212         struct binder_work *w;
5213         struct binder_thread *thread;
5214         struct rb_node *n;
5215         int count, strong, weak, ready_threads;
5216         size_t free_async_space =
5217                 binder_alloc_get_free_async_space(&proc->alloc);
5218
5219         seq_printf(m, "proc %d\n", proc->pid);
5220         seq_printf(m, "context %s\n", proc->context->name);
5221         count = 0;
5222         ready_threads = 0;
5223         binder_inner_proc_lock(proc);
5224         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5225                 count++;
5226
5227         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5228                 ready_threads++;
5229
5230         seq_printf(m, "  threads: %d\n", count);
5231         seq_printf(m, "  requested threads: %d+%d/%d\n"
5232                         "  ready threads %d\n"
5233                         "  free async space %zd\n", proc->requested_threads,
5234                         proc->requested_threads_started, proc->max_threads,
5235                         ready_threads,
5236                         free_async_space);
5237         count = 0;
5238         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5239                 count++;
5240         binder_inner_proc_unlock(proc);
5241         seq_printf(m, "  nodes: %d\n", count);
5242         count = 0;
5243         strong = 0;
5244         weak = 0;
5245         binder_proc_lock(proc);
5246         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5247                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5248                                                   rb_node_desc);
5249                 count++;
5250                 strong += ref->data.strong;
5251                 weak += ref->data.weak;
5252         }
5253         binder_proc_unlock(proc);
5254         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5255
5256         count = binder_alloc_get_allocated_count(&proc->alloc);
5257         seq_printf(m, "  buffers: %d\n", count);
5258
5259         binder_alloc_print_pages(m, &proc->alloc);
5260
5261         count = 0;
5262         binder_inner_proc_lock(proc);
5263         list_for_each_entry(w, &proc->todo, entry) {
5264                 if (w->type == BINDER_WORK_TRANSACTION)
5265                         count++;
5266         }
5267         binder_inner_proc_unlock(proc);
5268         seq_printf(m, "  pending transactions: %d\n", count);
5269
5270         print_binder_stats(m, "  ", &proc->stats);
5271 }
5272
5273
5274 static int binder_state_show(struct seq_file *m, void *unused)
5275 {
5276         struct binder_proc *proc;
5277         struct binder_node *node;
5278         struct binder_node *last_node = NULL;
5279
5280         seq_puts(m, "binder state:\n");
5281
5282         spin_lock(&binder_dead_nodes_lock);
5283         if (!hlist_empty(&binder_dead_nodes))
5284                 seq_puts(m, "dead nodes:\n");
5285         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5286                 /*
5287                  * take a temporary reference on the node so it
5288                  * survives and isn't removed from the list
5289                  * while we print it.
5290                  */
5291                 node->tmp_refs++;
5292                 spin_unlock(&binder_dead_nodes_lock);
5293                 if (last_node)
5294                         binder_put_node(last_node);
5295                 binder_node_lock(node);
5296                 print_binder_node_nilocked(m, node);
5297                 binder_node_unlock(node);
5298                 last_node = node;
5299                 spin_lock(&binder_dead_nodes_lock);
5300         }
5301         spin_unlock(&binder_dead_nodes_lock);
5302         if (last_node)
5303                 binder_put_node(last_node);
5304
5305         mutex_lock(&binder_procs_lock);
5306         hlist_for_each_entry(proc, &binder_procs, proc_node)
5307                 print_binder_proc(m, proc, 1);
5308         mutex_unlock(&binder_procs_lock);
5309
5310         return 0;
5311 }
5312
5313 static int binder_stats_show(struct seq_file *m, void *unused)
5314 {
5315         struct binder_proc *proc;
5316
5317         seq_puts(m, "binder stats:\n");
5318
5319         print_binder_stats(m, "", &binder_stats);
5320
5321         mutex_lock(&binder_procs_lock);
5322         hlist_for_each_entry(proc, &binder_procs, proc_node)
5323                 print_binder_proc_stats(m, proc);
5324         mutex_unlock(&binder_procs_lock);
5325
5326         return 0;
5327 }
5328
5329 static int binder_transactions_show(struct seq_file *m, void *unused)
5330 {
5331         struct binder_proc *proc;
5332
5333         seq_puts(m, "binder transactions:\n");
5334         mutex_lock(&binder_procs_lock);
5335         hlist_for_each_entry(proc, &binder_procs, proc_node)
5336                 print_binder_proc(m, proc, 0);
5337         mutex_unlock(&binder_procs_lock);
5338
5339         return 0;
5340 }
5341
5342 static int binder_proc_show(struct seq_file *m, void *unused)
5343 {
5344         struct binder_proc *itr;
5345         int pid = (unsigned long)m->private;
5346
5347         mutex_lock(&binder_procs_lock);
5348         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5349                 if (itr->pid == pid) {
5350                         seq_puts(m, "binder proc state:\n");
5351                         print_binder_proc(m, itr, 1);
5352                 }
5353         }
5354         mutex_unlock(&binder_procs_lock);
5355
5356         return 0;
5357 }
5358
5359 static void print_binder_transaction_log_entry(struct seq_file *m,
5360                                         struct binder_transaction_log_entry *e)
5361 {
5362         int debug_id = READ_ONCE(e->debug_id_done);
5363         /*
5364          * read barrier to guarantee debug_id_done read before
5365          * we print the log values
5366          */
5367         smp_rmb();
5368         seq_printf(m,
5369                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5370                    e->debug_id, (e->call_type == 2) ? "reply" :
5371                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5372                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5373                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5374                    e->return_error, e->return_error_param,
5375                    e->return_error_line);
5376         /*
5377          * read-barrier to guarantee read of debug_id_done after
5378          * done printing the fields of the entry
5379          */
5380         smp_rmb();
5381         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5382                         "\n" : " (incomplete)\n");
5383 }
5384
5385 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5386 {
5387         struct binder_transaction_log *log = m->private;
5388         unsigned int log_cur = atomic_read(&log->cur);
5389         unsigned int count;
5390         unsigned int cur;
5391         int i;
5392
5393         count = log_cur + 1;
5394         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5395                 0 : count % ARRAY_SIZE(log->entry);
5396         if (count > ARRAY_SIZE(log->entry) || log->full)
5397                 count = ARRAY_SIZE(log->entry);
5398         for (i = 0; i < count; i++) {
5399                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5400
5401                 print_binder_transaction_log_entry(m, &log->entry[index]);
5402         }
5403         return 0;
5404 }
5405
5406 static const struct file_operations binder_fops = {
5407         .owner = THIS_MODULE,
5408         .poll = binder_poll,
5409         .unlocked_ioctl = binder_ioctl,
5410         .compat_ioctl = binder_ioctl,
5411         .mmap = binder_mmap,
5412         .open = binder_open,
5413         .flush = binder_flush,
5414         .release = binder_release,
5415 };
5416
5417 BINDER_DEBUG_ENTRY(state);
5418 BINDER_DEBUG_ENTRY(stats);
5419 BINDER_DEBUG_ENTRY(transactions);
5420 BINDER_DEBUG_ENTRY(transaction_log);
5421
5422 static int __init init_binder_device(const char *name)
5423 {
5424         int ret;
5425         struct binder_device *binder_device;
5426
5427         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5428         if (!binder_device)
5429                 return -ENOMEM;
5430
5431         binder_device->miscdev.fops = &binder_fops;
5432         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5433         binder_device->miscdev.name = name;
5434
5435         binder_device->context.binder_context_mgr_uid = INVALID_UID;
5436         binder_device->context.name = name;
5437         mutex_init(&binder_device->context.context_mgr_node_lock);
5438
5439         ret = misc_register(&binder_device->miscdev);
5440         if (ret < 0) {
5441                 kfree(binder_device);
5442                 return ret;
5443         }
5444
5445         hlist_add_head(&binder_device->hlist, &binder_devices);
5446
5447         return ret;
5448 }
5449
5450 static int __init binder_init(void)
5451 {
5452         int ret;
5453         char *device_name, *device_names, *device_tmp;
5454         struct binder_device *device;
5455         struct hlist_node *tmp;
5456
5457         binder_alloc_shrinker_init();
5458
5459         atomic_set(&binder_transaction_log.cur, ~0U);
5460         atomic_set(&binder_transaction_log_failed.cur, ~0U);
5461
5462         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5463         if (binder_debugfs_dir_entry_root)
5464                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5465                                                  binder_debugfs_dir_entry_root);
5466
5467         if (binder_debugfs_dir_entry_root) {
5468                 debugfs_create_file("state",
5469                                     S_IRUGO,
5470                                     binder_debugfs_dir_entry_root,
5471                                     NULL,
5472                                     &binder_state_fops);
5473                 debugfs_create_file("stats",
5474                                     S_IRUGO,
5475                                     binder_debugfs_dir_entry_root,
5476                                     NULL,
5477                                     &binder_stats_fops);
5478                 debugfs_create_file("transactions",
5479                                     S_IRUGO,
5480                                     binder_debugfs_dir_entry_root,
5481                                     NULL,
5482                                     &binder_transactions_fops);
5483                 debugfs_create_file("transaction_log",
5484                                     S_IRUGO,
5485                                     binder_debugfs_dir_entry_root,
5486                                     &binder_transaction_log,
5487                                     &binder_transaction_log_fops);
5488                 debugfs_create_file("failed_transaction_log",
5489                                     S_IRUGO,
5490                                     binder_debugfs_dir_entry_root,
5491                                     &binder_transaction_log_failed,
5492                                     &binder_transaction_log_fops);
5493         }
5494
5495         /*
5496          * Copy the module_parameter string, because we don't want to
5497          * tokenize it in-place.
5498          */
5499         device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5500         if (!device_names) {
5501                 ret = -ENOMEM;
5502                 goto err_alloc_device_names_failed;
5503         }
5504         strcpy(device_names, binder_devices_param);
5505
5506         device_tmp = device_names;
5507         while ((device_name = strsep(&device_tmp, ","))) {
5508                 ret = init_binder_device(device_name);
5509                 if (ret)
5510                         goto err_init_binder_device_failed;
5511         }
5512
5513         return ret;
5514
5515 err_init_binder_device_failed:
5516         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5517                 misc_deregister(&device->miscdev);
5518                 hlist_del(&device->hlist);
5519                 kfree(device);
5520         }
5521
5522         kfree(device_names);
5523
5524 err_alloc_device_names_failed:
5525         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5526
5527         return ret;
5528 }
5529
5530 device_initcall(binder_init);
5531
5532 #define CREATE_TRACE_POINTS
5533 #include "binder_trace.h"
5534
5535 MODULE_LICENSE("GPL v2");