1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/uaccess.h>
61 #include <linux/pid_namespace.h>
62 #include <linux/security.h>
63 #include <linux/spinlock.h>
64 #include <linux/ratelimit.h>
65 #include <linux/syscalls.h>
66 #include <linux/task_work.h>
68 #include <uapi/linux/android/binder.h>
70 #include <asm/cacheflush.h>
72 #include "binder_alloc.h"
73 #include "binder_internal.h"
74 #include "binder_trace.h"
76 static HLIST_HEAD(binder_deferred_list);
77 static DEFINE_MUTEX(binder_deferred_lock);
79 static HLIST_HEAD(binder_devices);
80 static HLIST_HEAD(binder_procs);
81 static DEFINE_MUTEX(binder_procs_lock);
83 static HLIST_HEAD(binder_dead_nodes);
84 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86 static struct dentry *binder_debugfs_dir_entry_root;
87 static struct dentry *binder_debugfs_dir_entry_proc;
88 static atomic_t binder_last_id;
90 static int proc_show(struct seq_file *m, void *unused);
91 DEFINE_SHOW_ATTRIBUTE(proc);
93 /* This is only defined in include/asm-arm/sizes.h */
99 #define SZ_4M 0x400000
102 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
105 BINDER_DEBUG_USER_ERROR = 1U << 0,
106 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
107 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
108 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
109 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
110 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
111 BINDER_DEBUG_READ_WRITE = 1U << 6,
112 BINDER_DEBUG_USER_REFS = 1U << 7,
113 BINDER_DEBUG_THREADS = 1U << 8,
114 BINDER_DEBUG_TRANSACTION = 1U << 9,
115 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
116 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
117 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
118 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
119 BINDER_DEBUG_SPINLOCKS = 1U << 14,
121 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
122 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
123 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
125 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
126 module_param_named(devices, binder_devices_param, charp, 0444);
128 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
129 static int binder_stop_on_user_error;
131 static int binder_set_stop_on_user_error(const char *val,
132 const struct kernel_param *kp)
136 ret = param_set_int(val, kp);
137 if (binder_stop_on_user_error < 2)
138 wake_up(&binder_user_error_wait);
141 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
142 param_get_int, &binder_stop_on_user_error, 0644);
144 #define binder_debug(mask, x...) \
146 if (binder_debug_mask & mask) \
147 pr_info_ratelimited(x); \
150 #define binder_user_error(x...) \
152 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
153 pr_info_ratelimited(x); \
154 if (binder_stop_on_user_error) \
155 binder_stop_on_user_error = 2; \
158 #define to_flat_binder_object(hdr) \
159 container_of(hdr, struct flat_binder_object, hdr)
161 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
163 #define to_binder_buffer_object(hdr) \
164 container_of(hdr, struct binder_buffer_object, hdr)
166 #define to_binder_fd_array_object(hdr) \
167 container_of(hdr, struct binder_fd_array_object, hdr)
169 enum binder_stat_types {
175 BINDER_STAT_TRANSACTION,
176 BINDER_STAT_TRANSACTION_COMPLETE,
180 struct binder_stats {
181 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
182 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
183 atomic_t obj_created[BINDER_STAT_COUNT];
184 atomic_t obj_deleted[BINDER_STAT_COUNT];
187 static struct binder_stats binder_stats;
189 static inline void binder_stats_deleted(enum binder_stat_types type)
191 atomic_inc(&binder_stats.obj_deleted[type]);
194 static inline void binder_stats_created(enum binder_stat_types type)
196 atomic_inc(&binder_stats.obj_created[type]);
199 struct binder_transaction_log_entry {
211 int return_error_line;
212 uint32_t return_error;
213 uint32_t return_error_param;
214 const char *context_name;
216 struct binder_transaction_log {
219 struct binder_transaction_log_entry entry[32];
221 static struct binder_transaction_log binder_transaction_log;
222 static struct binder_transaction_log binder_transaction_log_failed;
224 static struct binder_transaction_log_entry *binder_transaction_log_add(
225 struct binder_transaction_log *log)
227 struct binder_transaction_log_entry *e;
228 unsigned int cur = atomic_inc_return(&log->cur);
230 if (cur >= ARRAY_SIZE(log->entry))
232 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
233 WRITE_ONCE(e->debug_id_done, 0);
235 * write-barrier to synchronize access to e->debug_id_done.
236 * We make sure the initialized 0 value is seen before
237 * memset() other fields are zeroed by memset.
240 memset(e, 0, sizeof(*e));
245 * struct binder_work - work enqueued on a worklist
246 * @entry: node enqueued on list
247 * @type: type of work to be performed
249 * There are separate work lists for proc, thread, and node (async).
252 struct list_head entry;
255 BINDER_WORK_TRANSACTION = 1,
256 BINDER_WORK_TRANSACTION_COMPLETE,
257 BINDER_WORK_RETURN_ERROR,
259 BINDER_WORK_DEAD_BINDER,
260 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
261 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
265 struct binder_error {
266 struct binder_work work;
271 * struct binder_node - binder node bookkeeping
272 * @debug_id: unique ID for debugging
273 * (invariant after initialized)
274 * @lock: lock for node fields
275 * @work: worklist element for node work
276 * (protected by @proc->inner_lock)
277 * @rb_node: element for proc->nodes tree
278 * (protected by @proc->inner_lock)
279 * @dead_node: element for binder_dead_nodes list
280 * (protected by binder_dead_nodes_lock)
281 * @proc: binder_proc that owns this node
282 * (invariant after initialized)
283 * @refs: list of references on this node
284 * (protected by @lock)
285 * @internal_strong_refs: used to take strong references when
286 * initiating a transaction
287 * (protected by @proc->inner_lock if @proc
289 * @local_weak_refs: weak user refs from local process
290 * (protected by @proc->inner_lock if @proc
292 * @local_strong_refs: strong user refs from local process
293 * (protected by @proc->inner_lock if @proc
295 * @tmp_refs: temporary kernel refs
296 * (protected by @proc->inner_lock while @proc
297 * is valid, and by binder_dead_nodes_lock
298 * if @proc is NULL. During inc/dec and node release
299 * it is also protected by @lock to provide safety
300 * as the node dies and @proc becomes NULL)
301 * @ptr: userspace pointer for node
302 * (invariant, no lock needed)
303 * @cookie: userspace cookie for node
304 * (invariant, no lock needed)
305 * @has_strong_ref: userspace notified of strong ref
306 * (protected by @proc->inner_lock if @proc
308 * @pending_strong_ref: userspace has acked notification of strong ref
309 * (protected by @proc->inner_lock if @proc
311 * @has_weak_ref: userspace notified of weak ref
312 * (protected by @proc->inner_lock if @proc
314 * @pending_weak_ref: userspace has acked notification of weak ref
315 * (protected by @proc->inner_lock if @proc
317 * @has_async_transaction: async transaction to node in progress
318 * (protected by @lock)
319 * @accept_fds: file descriptor operations supported for node
320 * (invariant after initialized)
321 * @min_priority: minimum scheduling priority
322 * (invariant after initialized)
323 * @txn_security_ctx: require sender's security context
324 * (invariant after initialized)
325 * @async_todo: list of async work items
326 * (protected by @proc->inner_lock)
328 * Bookkeeping structure for binder nodes.
333 struct binder_work work;
335 struct rb_node rb_node;
336 struct hlist_node dead_node;
338 struct binder_proc *proc;
339 struct hlist_head refs;
340 int internal_strong_refs;
342 int local_strong_refs;
344 binder_uintptr_t ptr;
345 binder_uintptr_t cookie;
348 * bitfield elements protected by
352 u8 pending_strong_ref:1;
354 u8 pending_weak_ref:1;
358 * invariant after initialization
361 u8 txn_security_ctx:1;
364 bool has_async_transaction;
365 struct list_head async_todo;
368 struct binder_ref_death {
370 * @work: worklist element for death notifications
371 * (protected by inner_lock of the proc that
372 * this ref belongs to)
374 struct binder_work work;
375 binder_uintptr_t cookie;
379 * struct binder_ref_data - binder_ref counts and id
380 * @debug_id: unique ID for the ref
381 * @desc: unique userspace handle for ref
382 * @strong: strong ref count (debugging only if not locked)
383 * @weak: weak ref count (debugging only if not locked)
385 * Structure to hold ref count and ref id information. Since
386 * the actual ref can only be accessed with a lock, this structure
387 * is used to return information about the ref to callers of
388 * ref inc/dec functions.
390 struct binder_ref_data {
398 * struct binder_ref - struct to track references on nodes
399 * @data: binder_ref_data containing id, handle, and current refcounts
400 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
401 * @rb_node_node: node for lookup by @node in proc's rb_tree
402 * @node_entry: list entry for node->refs list in target node
403 * (protected by @node->lock)
404 * @proc: binder_proc containing ref
405 * @node: binder_node of target node. When cleaning up a
406 * ref for deletion in binder_cleanup_ref, a non-NULL
407 * @node indicates the node must be freed
408 * @death: pointer to death notification (ref_death) if requested
409 * (protected by @node->lock)
411 * Structure to track references from procA to target node (on procB). This
412 * structure is unsafe to access without holding @proc->outer_lock.
415 /* Lookups needed: */
416 /* node + proc => ref (transaction) */
417 /* desc + proc => ref (transaction, inc/dec ref) */
418 /* node => refs + procs (proc exit) */
419 struct binder_ref_data data;
420 struct rb_node rb_node_desc;
421 struct rb_node rb_node_node;
422 struct hlist_node node_entry;
423 struct binder_proc *proc;
424 struct binder_node *node;
425 struct binder_ref_death *death;
428 enum binder_deferred_state {
429 BINDER_DEFERRED_FLUSH = 0x01,
430 BINDER_DEFERRED_RELEASE = 0x02,
434 * struct binder_proc - binder process bookkeeping
435 * @proc_node: element for binder_procs list
436 * @threads: rbtree of binder_threads in this proc
437 * (protected by @inner_lock)
438 * @nodes: rbtree of binder nodes associated with
439 * this proc ordered by node->ptr
440 * (protected by @inner_lock)
441 * @refs_by_desc: rbtree of refs ordered by ref->desc
442 * (protected by @outer_lock)
443 * @refs_by_node: rbtree of refs ordered by ref->node
444 * (protected by @outer_lock)
445 * @waiting_threads: threads currently waiting for proc work
446 * (protected by @inner_lock)
447 * @pid PID of group_leader of process
448 * (invariant after initialized)
449 * @tsk task_struct for group_leader of process
450 * (invariant after initialized)
451 * @deferred_work_node: element for binder_deferred_list
452 * (protected by binder_deferred_lock)
453 * @deferred_work: bitmap of deferred work to perform
454 * (protected by binder_deferred_lock)
455 * @is_dead: process is dead and awaiting free
456 * when outstanding transactions are cleaned up
457 * (protected by @inner_lock)
458 * @todo: list of work for this process
459 * (protected by @inner_lock)
460 * @stats: per-process binder statistics
461 * (atomics, no lock needed)
462 * @delivered_death: list of delivered death notification
463 * (protected by @inner_lock)
464 * @max_threads: cap on number of binder threads
465 * (protected by @inner_lock)
466 * @requested_threads: number of binder threads requested but not
467 * yet started. In current implementation, can
469 * (protected by @inner_lock)
470 * @requested_threads_started: number binder threads started
471 * (protected by @inner_lock)
472 * @tmp_ref: temporary reference to indicate proc is in use
473 * (protected by @inner_lock)
474 * @default_priority: default scheduler priority
475 * (invariant after initialized)
476 * @debugfs_entry: debugfs node
477 * @alloc: binder allocator bookkeeping
478 * @context: binder_context for this proc
479 * (invariant after initialized)
480 * @inner_lock: can nest under outer_lock and/or node lock
481 * @outer_lock: no nesting under innor or node lock
482 * Lock order: 1) outer, 2) node, 3) inner
484 * Bookkeeping structure for binder processes
487 struct hlist_node proc_node;
488 struct rb_root threads;
489 struct rb_root nodes;
490 struct rb_root refs_by_desc;
491 struct rb_root refs_by_node;
492 struct list_head waiting_threads;
494 struct task_struct *tsk;
495 struct hlist_node deferred_work_node;
499 struct list_head todo;
500 struct binder_stats stats;
501 struct list_head delivered_death;
503 int requested_threads;
504 int requested_threads_started;
506 long default_priority;
507 struct dentry *debugfs_entry;
508 struct binder_alloc alloc;
509 struct binder_context *context;
510 spinlock_t inner_lock;
511 spinlock_t outer_lock;
515 BINDER_LOOPER_STATE_REGISTERED = 0x01,
516 BINDER_LOOPER_STATE_ENTERED = 0x02,
517 BINDER_LOOPER_STATE_EXITED = 0x04,
518 BINDER_LOOPER_STATE_INVALID = 0x08,
519 BINDER_LOOPER_STATE_WAITING = 0x10,
520 BINDER_LOOPER_STATE_POLL = 0x20,
524 * struct binder_thread - binder thread bookkeeping
525 * @proc: binder process for this thread
526 * (invariant after initialization)
527 * @rb_node: element for proc->threads rbtree
528 * (protected by @proc->inner_lock)
529 * @waiting_thread_node: element for @proc->waiting_threads list
530 * (protected by @proc->inner_lock)
531 * @pid: PID for this thread
532 * (invariant after initialization)
533 * @looper: bitmap of looping state
534 * (only accessed by this thread)
535 * @looper_needs_return: looping thread needs to exit driver
537 * @transaction_stack: stack of in-progress transactions for this thread
538 * (protected by @proc->inner_lock)
539 * @todo: list of work to do for this thread
540 * (protected by @proc->inner_lock)
541 * @process_todo: whether work in @todo should be processed
542 * (protected by @proc->inner_lock)
543 * @return_error: transaction errors reported by this thread
544 * (only accessed by this thread)
545 * @reply_error: transaction errors reported by target thread
546 * (protected by @proc->inner_lock)
547 * @wait: wait queue for thread work
548 * @stats: per-thread statistics
549 * (atomics, no lock needed)
550 * @tmp_ref: temporary reference to indicate thread is in use
551 * (atomic since @proc->inner_lock cannot
552 * always be acquired)
553 * @is_dead: thread is dead and awaiting free
554 * when outstanding transactions are cleaned up
555 * (protected by @proc->inner_lock)
557 * Bookkeeping structure for binder threads.
559 struct binder_thread {
560 struct binder_proc *proc;
561 struct rb_node rb_node;
562 struct list_head waiting_thread_node;
564 int looper; /* only modified by this thread */
565 bool looper_need_return; /* can be written by other thread */
566 struct binder_transaction *transaction_stack;
567 struct list_head todo;
569 struct binder_error return_error;
570 struct binder_error reply_error;
571 wait_queue_head_t wait;
572 struct binder_stats stats;
578 * struct binder_txn_fd_fixup - transaction fd fixup list element
579 * @fixup_entry: list entry
580 * @file: struct file to be associated with new fd
581 * @offset: offset in buffer data to this fixup
583 * List element for fd fixups in a transaction. Since file
584 * descriptors need to be allocated in the context of the
585 * target process, we pass each fd to be processed in this
588 struct binder_txn_fd_fixup {
589 struct list_head fixup_entry;
594 struct binder_transaction {
596 struct binder_work work;
597 struct binder_thread *from;
598 struct binder_transaction *from_parent;
599 struct binder_proc *to_proc;
600 struct binder_thread *to_thread;
601 struct binder_transaction *to_parent;
602 unsigned need_reply:1;
603 /* unsigned is_dead:1; */ /* not used at the moment */
605 struct binder_buffer *buffer;
611 struct list_head fd_fixups;
612 binder_uintptr_t security_ctx;
614 * @lock: protects @from, @to_proc, and @to_thread
616 * @from, @to_proc, and @to_thread can be set to NULL
617 * during thread teardown
623 * struct binder_object - union of flat binder object types
624 * @hdr: generic object header
625 * @fbo: binder object (nodes and refs)
626 * @fdo: file descriptor object
627 * @bbo: binder buffer pointer
628 * @fdao: file descriptor array
630 * Used for type-independent object copies
632 struct binder_object {
634 struct binder_object_header hdr;
635 struct flat_binder_object fbo;
636 struct binder_fd_object fdo;
637 struct binder_buffer_object bbo;
638 struct binder_fd_array_object fdao;
643 * binder_proc_lock() - Acquire outer lock for given binder_proc
644 * @proc: struct binder_proc to acquire
646 * Acquires proc->outer_lock. Used to protect binder_ref
647 * structures associated with the given proc.
649 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
651 _binder_proc_lock(struct binder_proc *proc, int line)
652 __acquires(&proc->outer_lock)
654 binder_debug(BINDER_DEBUG_SPINLOCKS,
655 "%s: line=%d\n", __func__, line);
656 spin_lock(&proc->outer_lock);
660 * binder_proc_unlock() - Release spinlock for given binder_proc
661 * @proc: struct binder_proc to acquire
663 * Release lock acquired via binder_proc_lock()
665 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
667 _binder_proc_unlock(struct binder_proc *proc, int line)
668 __releases(&proc->outer_lock)
670 binder_debug(BINDER_DEBUG_SPINLOCKS,
671 "%s: line=%d\n", __func__, line);
672 spin_unlock(&proc->outer_lock);
676 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
677 * @proc: struct binder_proc to acquire
679 * Acquires proc->inner_lock. Used to protect todo lists
681 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
683 _binder_inner_proc_lock(struct binder_proc *proc, int line)
684 __acquires(&proc->inner_lock)
686 binder_debug(BINDER_DEBUG_SPINLOCKS,
687 "%s: line=%d\n", __func__, line);
688 spin_lock(&proc->inner_lock);
692 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
693 * @proc: struct binder_proc to acquire
695 * Release lock acquired via binder_inner_proc_lock()
697 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
699 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
700 __releases(&proc->inner_lock)
702 binder_debug(BINDER_DEBUG_SPINLOCKS,
703 "%s: line=%d\n", __func__, line);
704 spin_unlock(&proc->inner_lock);
708 * binder_node_lock() - Acquire spinlock for given binder_node
709 * @node: struct binder_node to acquire
711 * Acquires node->lock. Used to protect binder_node fields
713 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
715 _binder_node_lock(struct binder_node *node, int line)
716 __acquires(&node->lock)
718 binder_debug(BINDER_DEBUG_SPINLOCKS,
719 "%s: line=%d\n", __func__, line);
720 spin_lock(&node->lock);
724 * binder_node_unlock() - Release spinlock for given binder_proc
725 * @node: struct binder_node to acquire
727 * Release lock acquired via binder_node_lock()
729 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
731 _binder_node_unlock(struct binder_node *node, int line)
732 __releases(&node->lock)
734 binder_debug(BINDER_DEBUG_SPINLOCKS,
735 "%s: line=%d\n", __func__, line);
736 spin_unlock(&node->lock);
740 * binder_node_inner_lock() - Acquire node and inner locks
741 * @node: struct binder_node to acquire
743 * Acquires node->lock. If node->proc also acquires
744 * proc->inner_lock. Used to protect binder_node fields
746 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
748 _binder_node_inner_lock(struct binder_node *node, int line)
749 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
751 binder_debug(BINDER_DEBUG_SPINLOCKS,
752 "%s: line=%d\n", __func__, line);
753 spin_lock(&node->lock);
755 binder_inner_proc_lock(node->proc);
757 /* annotation for sparse */
758 __acquire(&node->proc->inner_lock);
762 * binder_node_unlock() - Release node and inner locks
763 * @node: struct binder_node to acquire
765 * Release lock acquired via binder_node_lock()
767 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
769 _binder_node_inner_unlock(struct binder_node *node, int line)
770 __releases(&node->lock) __releases(&node->proc->inner_lock)
772 struct binder_proc *proc = node->proc;
774 binder_debug(BINDER_DEBUG_SPINLOCKS,
775 "%s: line=%d\n", __func__, line);
777 binder_inner_proc_unlock(proc);
779 /* annotation for sparse */
780 __release(&node->proc->inner_lock);
781 spin_unlock(&node->lock);
784 static bool binder_worklist_empty_ilocked(struct list_head *list)
786 return list_empty(list);
790 * binder_worklist_empty() - Check if no items on the work list
791 * @proc: binder_proc associated with list
792 * @list: list to check
794 * Return: true if there are no items on list, else false
796 static bool binder_worklist_empty(struct binder_proc *proc,
797 struct list_head *list)
801 binder_inner_proc_lock(proc);
802 ret = binder_worklist_empty_ilocked(list);
803 binder_inner_proc_unlock(proc);
808 * binder_enqueue_work_ilocked() - Add an item to the work list
809 * @work: struct binder_work to add to list
810 * @target_list: list to add work to
812 * Adds the work to the specified list. Asserts that work
813 * is not already on a list.
815 * Requires the proc->inner_lock to be held.
818 binder_enqueue_work_ilocked(struct binder_work *work,
819 struct list_head *target_list)
821 BUG_ON(target_list == NULL);
822 BUG_ON(work->entry.next && !list_empty(&work->entry));
823 list_add_tail(&work->entry, target_list);
827 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
828 * @thread: thread to queue work to
829 * @work: struct binder_work to add to list
831 * Adds the work to the todo list of the thread. Doesn't set the process_todo
832 * flag, which means that (if it wasn't already set) the thread will go to
833 * sleep without handling this work when it calls read.
835 * Requires the proc->inner_lock to be held.
838 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
839 struct binder_work *work)
841 WARN_ON(!list_empty(&thread->waiting_thread_node));
842 binder_enqueue_work_ilocked(work, &thread->todo);
846 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
847 * @thread: thread to queue work to
848 * @work: struct binder_work to add to list
850 * Adds the work to the todo list of the thread, and enables processing
853 * Requires the proc->inner_lock to be held.
856 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
857 struct binder_work *work)
859 WARN_ON(!list_empty(&thread->waiting_thread_node));
860 binder_enqueue_work_ilocked(work, &thread->todo);
861 thread->process_todo = true;
865 * binder_enqueue_thread_work() - Add an item to the thread work list
866 * @thread: thread to queue work to
867 * @work: struct binder_work to add to list
869 * Adds the work to the todo list of the thread, and enables processing
873 binder_enqueue_thread_work(struct binder_thread *thread,
874 struct binder_work *work)
876 binder_inner_proc_lock(thread->proc);
877 binder_enqueue_thread_work_ilocked(thread, work);
878 binder_inner_proc_unlock(thread->proc);
882 binder_dequeue_work_ilocked(struct binder_work *work)
884 list_del_init(&work->entry);
888 * binder_dequeue_work() - Removes an item from the work list
889 * @proc: binder_proc associated with list
890 * @work: struct binder_work to remove from list
892 * Removes the specified work item from whatever list it is on.
893 * Can safely be called if work is not on any list.
896 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
898 binder_inner_proc_lock(proc);
899 binder_dequeue_work_ilocked(work);
900 binder_inner_proc_unlock(proc);
903 static struct binder_work *binder_dequeue_work_head_ilocked(
904 struct list_head *list)
906 struct binder_work *w;
908 w = list_first_entry_or_null(list, struct binder_work, entry);
910 list_del_init(&w->entry);
915 * binder_dequeue_work_head() - Dequeues the item at head of list
916 * @proc: binder_proc associated with list
917 * @list: list to dequeue head
919 * Removes the head of the list if there are items on the list
921 * Return: pointer dequeued binder_work, NULL if list was empty
923 static struct binder_work *binder_dequeue_work_head(
924 struct binder_proc *proc,
925 struct list_head *list)
927 struct binder_work *w;
929 binder_inner_proc_lock(proc);
930 w = binder_dequeue_work_head_ilocked(list);
931 binder_inner_proc_unlock(proc);
936 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
937 static void binder_free_thread(struct binder_thread *thread);
938 static void binder_free_proc(struct binder_proc *proc);
939 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
941 static bool binder_has_work_ilocked(struct binder_thread *thread,
944 return thread->process_todo ||
945 thread->looper_need_return ||
947 !binder_worklist_empty_ilocked(&thread->proc->todo));
950 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
954 binder_inner_proc_lock(thread->proc);
955 has_work = binder_has_work_ilocked(thread, do_proc_work);
956 binder_inner_proc_unlock(thread->proc);
961 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
963 return !thread->transaction_stack &&
964 binder_worklist_empty_ilocked(&thread->todo) &&
965 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
966 BINDER_LOOPER_STATE_REGISTERED));
969 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
973 struct binder_thread *thread;
975 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
976 thread = rb_entry(n, struct binder_thread, rb_node);
977 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
978 binder_available_for_proc_work_ilocked(thread)) {
980 wake_up_interruptible_sync(&thread->wait);
982 wake_up_interruptible(&thread->wait);
988 * binder_select_thread_ilocked() - selects a thread for doing proc work.
989 * @proc: process to select a thread from
991 * Note that calling this function moves the thread off the waiting_threads
992 * list, so it can only be woken up by the caller of this function, or a
993 * signal. Therefore, callers *should* always wake up the thread this function
996 * Return: If there's a thread currently waiting for process work,
997 * returns that thread. Otherwise returns NULL.
999 static struct binder_thread *
1000 binder_select_thread_ilocked(struct binder_proc *proc)
1002 struct binder_thread *thread;
1004 assert_spin_locked(&proc->inner_lock);
1005 thread = list_first_entry_or_null(&proc->waiting_threads,
1006 struct binder_thread,
1007 waiting_thread_node);
1010 list_del_init(&thread->waiting_thread_node);
1016 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1017 * @proc: process to wake up a thread in
1018 * @thread: specific thread to wake-up (may be NULL)
1019 * @sync: whether to do a synchronous wake-up
1021 * This function wakes up a thread in the @proc process.
1022 * The caller may provide a specific thread to wake-up in
1023 * the @thread parameter. If @thread is NULL, this function
1024 * will wake up threads that have called poll().
1026 * Note that for this function to work as expected, callers
1027 * should first call binder_select_thread() to find a thread
1028 * to handle the work (if they don't have a thread already),
1029 * and pass the result into the @thread parameter.
1031 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1032 struct binder_thread *thread,
1035 assert_spin_locked(&proc->inner_lock);
1039 wake_up_interruptible_sync(&thread->wait);
1041 wake_up_interruptible(&thread->wait);
1045 /* Didn't find a thread waiting for proc work; this can happen
1047 * 1. All threads are busy handling transactions
1048 * In that case, one of those threads should call back into
1049 * the kernel driver soon and pick up this work.
1050 * 2. Threads are using the (e)poll interface, in which case
1051 * they may be blocked on the waitqueue without having been
1052 * added to waiting_threads. For this case, we just iterate
1053 * over all threads not handling transaction work, and
1054 * wake them all up. We wake all because we don't know whether
1055 * a thread that called into (e)poll is handling non-binder
1058 binder_wakeup_poll_threads_ilocked(proc, sync);
1061 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1063 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1065 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1068 static void binder_set_nice(long nice)
1072 if (can_nice(current, nice)) {
1073 set_user_nice(current, nice);
1076 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1077 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1078 "%d: nice value %ld not allowed use %ld instead\n",
1079 current->pid, nice, min_nice);
1080 set_user_nice(current, min_nice);
1081 if (min_nice <= MAX_NICE)
1083 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1086 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1087 binder_uintptr_t ptr)
1089 struct rb_node *n = proc->nodes.rb_node;
1090 struct binder_node *node;
1092 assert_spin_locked(&proc->inner_lock);
1095 node = rb_entry(n, struct binder_node, rb_node);
1097 if (ptr < node->ptr)
1099 else if (ptr > node->ptr)
1103 * take an implicit weak reference
1104 * to ensure node stays alive until
1105 * call to binder_put_node()
1107 binder_inc_node_tmpref_ilocked(node);
1114 static struct binder_node *binder_get_node(struct binder_proc *proc,
1115 binder_uintptr_t ptr)
1117 struct binder_node *node;
1119 binder_inner_proc_lock(proc);
1120 node = binder_get_node_ilocked(proc, ptr);
1121 binder_inner_proc_unlock(proc);
1125 static struct binder_node *binder_init_node_ilocked(
1126 struct binder_proc *proc,
1127 struct binder_node *new_node,
1128 struct flat_binder_object *fp)
1130 struct rb_node **p = &proc->nodes.rb_node;
1131 struct rb_node *parent = NULL;
1132 struct binder_node *node;
1133 binder_uintptr_t ptr = fp ? fp->binder : 0;
1134 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1135 __u32 flags = fp ? fp->flags : 0;
1137 assert_spin_locked(&proc->inner_lock);
1142 node = rb_entry(parent, struct binder_node, rb_node);
1144 if (ptr < node->ptr)
1146 else if (ptr > node->ptr)
1147 p = &(*p)->rb_right;
1150 * A matching node is already in
1151 * the rb tree. Abandon the init
1154 binder_inc_node_tmpref_ilocked(node);
1159 binder_stats_created(BINDER_STAT_NODE);
1161 rb_link_node(&node->rb_node, parent, p);
1162 rb_insert_color(&node->rb_node, &proc->nodes);
1163 node->debug_id = atomic_inc_return(&binder_last_id);
1166 node->cookie = cookie;
1167 node->work.type = BINDER_WORK_NODE;
1168 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1169 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1170 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1171 spin_lock_init(&node->lock);
1172 INIT_LIST_HEAD(&node->work.entry);
1173 INIT_LIST_HEAD(&node->async_todo);
1174 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175 "%d:%d node %d u%016llx c%016llx created\n",
1176 proc->pid, current->pid, node->debug_id,
1177 (u64)node->ptr, (u64)node->cookie);
1182 static struct binder_node *binder_new_node(struct binder_proc *proc,
1183 struct flat_binder_object *fp)
1185 struct binder_node *node;
1186 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1190 binder_inner_proc_lock(proc);
1191 node = binder_init_node_ilocked(proc, new_node, fp);
1192 binder_inner_proc_unlock(proc);
1193 if (node != new_node)
1195 * The node was already added by another thread
1202 static void binder_free_node(struct binder_node *node)
1205 binder_stats_deleted(BINDER_STAT_NODE);
1208 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1210 struct list_head *target_list)
1212 struct binder_proc *proc = node->proc;
1214 assert_spin_locked(&node->lock);
1216 assert_spin_locked(&proc->inner_lock);
1219 if (target_list == NULL &&
1220 node->internal_strong_refs == 0 &&
1222 node == node->proc->context->binder_context_mgr_node &&
1223 node->has_strong_ref)) {
1224 pr_err("invalid inc strong node for %d\n",
1228 node->internal_strong_refs++;
1230 node->local_strong_refs++;
1231 if (!node->has_strong_ref && target_list) {
1232 struct binder_thread *thread = container_of(target_list,
1233 struct binder_thread, todo);
1234 binder_dequeue_work_ilocked(&node->work);
1235 BUG_ON(&thread->todo != target_list);
1236 binder_enqueue_deferred_thread_work_ilocked(thread,
1241 node->local_weak_refs++;
1242 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1243 if (target_list == NULL) {
1244 pr_err("invalid inc weak node for %d\n",
1251 binder_enqueue_work_ilocked(&node->work, target_list);
1257 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1258 struct list_head *target_list)
1262 binder_node_inner_lock(node);
1263 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1264 binder_node_inner_unlock(node);
1269 static bool binder_dec_node_nilocked(struct binder_node *node,
1270 int strong, int internal)
1272 struct binder_proc *proc = node->proc;
1274 assert_spin_locked(&node->lock);
1276 assert_spin_locked(&proc->inner_lock);
1279 node->internal_strong_refs--;
1281 node->local_strong_refs--;
1282 if (node->local_strong_refs || node->internal_strong_refs)
1286 node->local_weak_refs--;
1287 if (node->local_weak_refs || node->tmp_refs ||
1288 !hlist_empty(&node->refs))
1292 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1293 if (list_empty(&node->work.entry)) {
1294 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1295 binder_wakeup_proc_ilocked(proc);
1298 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1299 !node->local_weak_refs && !node->tmp_refs) {
1301 binder_dequeue_work_ilocked(&node->work);
1302 rb_erase(&node->rb_node, &proc->nodes);
1303 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1304 "refless node %d deleted\n",
1307 BUG_ON(!list_empty(&node->work.entry));
1308 spin_lock(&binder_dead_nodes_lock);
1310 * tmp_refs could have changed so
1313 if (node->tmp_refs) {
1314 spin_unlock(&binder_dead_nodes_lock);
1317 hlist_del(&node->dead_node);
1318 spin_unlock(&binder_dead_nodes_lock);
1319 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1320 "dead node %d deleted\n",
1329 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1333 binder_node_inner_lock(node);
1334 free_node = binder_dec_node_nilocked(node, strong, internal);
1335 binder_node_inner_unlock(node);
1337 binder_free_node(node);
1340 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1343 * No call to binder_inc_node() is needed since we
1344 * don't need to inform userspace of any changes to
1351 * binder_inc_node_tmpref() - take a temporary reference on node
1352 * @node: node to reference
1354 * Take reference on node to prevent the node from being freed
1355 * while referenced only by a local variable. The inner lock is
1356 * needed to serialize with the node work on the queue (which
1357 * isn't needed after the node is dead). If the node is dead
1358 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1359 * node->tmp_refs against dead-node-only cases where the node
1360 * lock cannot be acquired (eg traversing the dead node list to
1363 static void binder_inc_node_tmpref(struct binder_node *node)
1365 binder_node_lock(node);
1367 binder_inner_proc_lock(node->proc);
1369 spin_lock(&binder_dead_nodes_lock);
1370 binder_inc_node_tmpref_ilocked(node);
1372 binder_inner_proc_unlock(node->proc);
1374 spin_unlock(&binder_dead_nodes_lock);
1375 binder_node_unlock(node);
1379 * binder_dec_node_tmpref() - remove a temporary reference on node
1380 * @node: node to reference
1382 * Release temporary reference on node taken via binder_inc_node_tmpref()
1384 static void binder_dec_node_tmpref(struct binder_node *node)
1388 binder_node_inner_lock(node);
1390 spin_lock(&binder_dead_nodes_lock);
1392 __acquire(&binder_dead_nodes_lock);
1394 BUG_ON(node->tmp_refs < 0);
1396 spin_unlock(&binder_dead_nodes_lock);
1398 __release(&binder_dead_nodes_lock);
1400 * Call binder_dec_node() to check if all refcounts are 0
1401 * and cleanup is needed. Calling with strong=0 and internal=1
1402 * causes no actual reference to be released in binder_dec_node().
1403 * If that changes, a change is needed here too.
1405 free_node = binder_dec_node_nilocked(node, 0, 1);
1406 binder_node_inner_unlock(node);
1408 binder_free_node(node);
1411 static void binder_put_node(struct binder_node *node)
1413 binder_dec_node_tmpref(node);
1416 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1417 u32 desc, bool need_strong_ref)
1419 struct rb_node *n = proc->refs_by_desc.rb_node;
1420 struct binder_ref *ref;
1423 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1425 if (desc < ref->data.desc) {
1427 } else if (desc > ref->data.desc) {
1429 } else if (need_strong_ref && !ref->data.strong) {
1430 binder_user_error("tried to use weak ref as strong ref\n");
1440 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1441 * @proc: binder_proc that owns the ref
1442 * @node: binder_node of target
1443 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1445 * Look up the ref for the given node and return it if it exists
1447 * If it doesn't exist and the caller provides a newly allocated
1448 * ref, initialize the fields of the newly allocated ref and insert
1449 * into the given proc rb_trees and node refs list.
1451 * Return: the ref for node. It is possible that another thread
1452 * allocated/initialized the ref first in which case the
1453 * returned ref would be different than the passed-in
1454 * new_ref. new_ref must be kfree'd by the caller in
1457 static struct binder_ref *binder_get_ref_for_node_olocked(
1458 struct binder_proc *proc,
1459 struct binder_node *node,
1460 struct binder_ref *new_ref)
1462 struct binder_context *context = proc->context;
1463 struct rb_node **p = &proc->refs_by_node.rb_node;
1464 struct rb_node *parent = NULL;
1465 struct binder_ref *ref;
1470 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1472 if (node < ref->node)
1474 else if (node > ref->node)
1475 p = &(*p)->rb_right;
1482 binder_stats_created(BINDER_STAT_REF);
1483 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1484 new_ref->proc = proc;
1485 new_ref->node = node;
1486 rb_link_node(&new_ref->rb_node_node, parent, p);
1487 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1489 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1490 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1491 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1492 if (ref->data.desc > new_ref->data.desc)
1494 new_ref->data.desc = ref->data.desc + 1;
1497 p = &proc->refs_by_desc.rb_node;
1500 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1502 if (new_ref->data.desc < ref->data.desc)
1504 else if (new_ref->data.desc > ref->data.desc)
1505 p = &(*p)->rb_right;
1509 rb_link_node(&new_ref->rb_node_desc, parent, p);
1510 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1512 binder_node_lock(node);
1513 hlist_add_head(&new_ref->node_entry, &node->refs);
1515 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1516 "%d new ref %d desc %d for node %d\n",
1517 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1519 binder_node_unlock(node);
1523 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1525 bool delete_node = false;
1527 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1528 "%d delete ref %d desc %d for node %d\n",
1529 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1530 ref->node->debug_id);
1532 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1533 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1535 binder_node_inner_lock(ref->node);
1536 if (ref->data.strong)
1537 binder_dec_node_nilocked(ref->node, 1, 1);
1539 hlist_del(&ref->node_entry);
1540 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1541 binder_node_inner_unlock(ref->node);
1543 * Clear ref->node unless we want the caller to free the node
1547 * The caller uses ref->node to determine
1548 * whether the node needs to be freed. Clear
1549 * it since the node is still alive.
1555 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1556 "%d delete ref %d desc %d has death notification\n",
1557 ref->proc->pid, ref->data.debug_id,
1559 binder_dequeue_work(ref->proc, &ref->death->work);
1560 binder_stats_deleted(BINDER_STAT_DEATH);
1562 binder_stats_deleted(BINDER_STAT_REF);
1566 * binder_inc_ref_olocked() - increment the ref for given handle
1567 * @ref: ref to be incremented
1568 * @strong: if true, strong increment, else weak
1569 * @target_list: list to queue node work on
1571 * Increment the ref. @ref->proc->outer_lock must be held on entry
1573 * Return: 0, if successful, else errno
1575 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1576 struct list_head *target_list)
1581 if (ref->data.strong == 0) {
1582 ret = binder_inc_node(ref->node, 1, 1, target_list);
1588 if (ref->data.weak == 0) {
1589 ret = binder_inc_node(ref->node, 0, 1, target_list);
1599 * binder_dec_ref() - dec the ref for given handle
1600 * @ref: ref to be decremented
1601 * @strong: if true, strong decrement, else weak
1603 * Decrement the ref.
1605 * Return: true if ref is cleaned up and ready to be freed
1607 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1610 if (ref->data.strong == 0) {
1611 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1612 ref->proc->pid, ref->data.debug_id,
1613 ref->data.desc, ref->data.strong,
1618 if (ref->data.strong == 0)
1619 binder_dec_node(ref->node, strong, 1);
1621 if (ref->data.weak == 0) {
1622 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1623 ref->proc->pid, ref->data.debug_id,
1624 ref->data.desc, ref->data.strong,
1630 if (ref->data.strong == 0 && ref->data.weak == 0) {
1631 binder_cleanup_ref_olocked(ref);
1638 * binder_get_node_from_ref() - get the node from the given proc/desc
1639 * @proc: proc containing the ref
1640 * @desc: the handle associated with the ref
1641 * @need_strong_ref: if true, only return node if ref is strong
1642 * @rdata: the id/refcount data for the ref
1644 * Given a proc and ref handle, return the associated binder_node
1646 * Return: a binder_node or NULL if not found or not strong when strong required
1648 static struct binder_node *binder_get_node_from_ref(
1649 struct binder_proc *proc,
1650 u32 desc, bool need_strong_ref,
1651 struct binder_ref_data *rdata)
1653 struct binder_node *node;
1654 struct binder_ref *ref;
1656 binder_proc_lock(proc);
1657 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1662 * Take an implicit reference on the node to ensure
1663 * it stays alive until the call to binder_put_node()
1665 binder_inc_node_tmpref(node);
1668 binder_proc_unlock(proc);
1673 binder_proc_unlock(proc);
1678 * binder_free_ref() - free the binder_ref
1681 * Free the binder_ref. Free the binder_node indicated by ref->node
1682 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1684 static void binder_free_ref(struct binder_ref *ref)
1687 binder_free_node(ref->node);
1693 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1694 * @proc: proc containing the ref
1695 * @desc: the handle associated with the ref
1696 * @increment: true=inc reference, false=dec reference
1697 * @strong: true=strong reference, false=weak reference
1698 * @rdata: the id/refcount data for the ref
1700 * Given a proc and ref handle, increment or decrement the ref
1701 * according to "increment" arg.
1703 * Return: 0 if successful, else errno
1705 static int binder_update_ref_for_handle(struct binder_proc *proc,
1706 uint32_t desc, bool increment, bool strong,
1707 struct binder_ref_data *rdata)
1710 struct binder_ref *ref;
1711 bool delete_ref = false;
1713 binder_proc_lock(proc);
1714 ref = binder_get_ref_olocked(proc, desc, strong);
1720 ret = binder_inc_ref_olocked(ref, strong, NULL);
1722 delete_ref = binder_dec_ref_olocked(ref, strong);
1726 binder_proc_unlock(proc);
1729 binder_free_ref(ref);
1733 binder_proc_unlock(proc);
1738 * binder_dec_ref_for_handle() - dec the ref for given handle
1739 * @proc: proc containing the ref
1740 * @desc: the handle associated with the ref
1741 * @strong: true=strong reference, false=weak reference
1742 * @rdata: the id/refcount data for the ref
1744 * Just calls binder_update_ref_for_handle() to decrement the ref.
1746 * Return: 0 if successful, else errno
1748 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1749 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1751 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1756 * binder_inc_ref_for_node() - increment the ref for given proc/node
1757 * @proc: proc containing the ref
1758 * @node: target node
1759 * @strong: true=strong reference, false=weak reference
1760 * @target_list: worklist to use if node is incremented
1761 * @rdata: the id/refcount data for the ref
1763 * Given a proc and node, increment the ref. Create the ref if it
1764 * doesn't already exist
1766 * Return: 0 if successful, else errno
1768 static int binder_inc_ref_for_node(struct binder_proc *proc,
1769 struct binder_node *node,
1771 struct list_head *target_list,
1772 struct binder_ref_data *rdata)
1774 struct binder_ref *ref;
1775 struct binder_ref *new_ref = NULL;
1778 binder_proc_lock(proc);
1779 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1781 binder_proc_unlock(proc);
1782 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1785 binder_proc_lock(proc);
1786 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1788 ret = binder_inc_ref_olocked(ref, strong, target_list);
1790 binder_proc_unlock(proc);
1791 if (new_ref && ref != new_ref)
1793 * Another thread created the ref first so
1794 * free the one we allocated
1800 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1801 struct binder_transaction *t)
1803 BUG_ON(!target_thread);
1804 assert_spin_locked(&target_thread->proc->inner_lock);
1805 BUG_ON(target_thread->transaction_stack != t);
1806 BUG_ON(target_thread->transaction_stack->from != target_thread);
1807 target_thread->transaction_stack =
1808 target_thread->transaction_stack->from_parent;
1813 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1814 * @thread: thread to decrement
1816 * A thread needs to be kept alive while being used to create or
1817 * handle a transaction. binder_get_txn_from() is used to safely
1818 * extract t->from from a binder_transaction and keep the thread
1819 * indicated by t->from from being freed. When done with that
1820 * binder_thread, this function is called to decrement the
1821 * tmp_ref and free if appropriate (thread has been released
1822 * and no transaction being processed by the driver)
1824 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1827 * atomic is used to protect the counter value while
1828 * it cannot reach zero or thread->is_dead is false
1830 binder_inner_proc_lock(thread->proc);
1831 atomic_dec(&thread->tmp_ref);
1832 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1833 binder_inner_proc_unlock(thread->proc);
1834 binder_free_thread(thread);
1837 binder_inner_proc_unlock(thread->proc);
1841 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1842 * @proc: proc to decrement
1844 * A binder_proc needs to be kept alive while being used to create or
1845 * handle a transaction. proc->tmp_ref is incremented when
1846 * creating a new transaction or the binder_proc is currently in-use
1847 * by threads that are being released. When done with the binder_proc,
1848 * this function is called to decrement the counter and free the
1849 * proc if appropriate (proc has been released, all threads have
1850 * been released and not currenly in-use to process a transaction).
1852 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1854 binder_inner_proc_lock(proc);
1856 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1858 binder_inner_proc_unlock(proc);
1859 binder_free_proc(proc);
1862 binder_inner_proc_unlock(proc);
1866 * binder_get_txn_from() - safely extract the "from" thread in transaction
1867 * @t: binder transaction for t->from
1869 * Atomically return the "from" thread and increment the tmp_ref
1870 * count for the thread to ensure it stays alive until
1871 * binder_thread_dec_tmpref() is called.
1873 * Return: the value of t->from
1875 static struct binder_thread *binder_get_txn_from(
1876 struct binder_transaction *t)
1878 struct binder_thread *from;
1880 spin_lock(&t->lock);
1883 atomic_inc(&from->tmp_ref);
1884 spin_unlock(&t->lock);
1889 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1890 * @t: binder transaction for t->from
1892 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1893 * to guarantee that the thread cannot be released while operating on it.
1894 * The caller must call binder_inner_proc_unlock() to release the inner lock
1895 * as well as call binder_dec_thread_txn() to release the reference.
1897 * Return: the value of t->from
1899 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1900 struct binder_transaction *t)
1901 __acquires(&t->from->proc->inner_lock)
1903 struct binder_thread *from;
1905 from = binder_get_txn_from(t);
1907 __acquire(&from->proc->inner_lock);
1910 binder_inner_proc_lock(from->proc);
1912 BUG_ON(from != t->from);
1915 binder_inner_proc_unlock(from->proc);
1916 __acquire(&from->proc->inner_lock);
1917 binder_thread_dec_tmpref(from);
1922 * binder_free_txn_fixups() - free unprocessed fd fixups
1923 * @t: binder transaction for t->from
1925 * If the transaction is being torn down prior to being
1926 * processed by the target process, free all of the
1927 * fd fixups and fput the file structs. It is safe to
1928 * call this function after the fixups have been
1929 * processed -- in that case, the list will be empty.
1931 static void binder_free_txn_fixups(struct binder_transaction *t)
1933 struct binder_txn_fd_fixup *fixup, *tmp;
1935 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1937 list_del(&fixup->fixup_entry);
1942 static void binder_free_transaction(struct binder_transaction *t)
1945 t->buffer->transaction = NULL;
1946 binder_free_txn_fixups(t);
1948 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1951 static void binder_send_failed_reply(struct binder_transaction *t,
1952 uint32_t error_code)
1954 struct binder_thread *target_thread;
1955 struct binder_transaction *next;
1957 BUG_ON(t->flags & TF_ONE_WAY);
1959 target_thread = binder_get_txn_from_and_acq_inner(t);
1960 if (target_thread) {
1961 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1962 "send failed reply for transaction %d to %d:%d\n",
1964 target_thread->proc->pid,
1965 target_thread->pid);
1967 binder_pop_transaction_ilocked(target_thread, t);
1968 if (target_thread->reply_error.cmd == BR_OK) {
1969 target_thread->reply_error.cmd = error_code;
1970 binder_enqueue_thread_work_ilocked(
1972 &target_thread->reply_error.work);
1973 wake_up_interruptible(&target_thread->wait);
1976 * Cannot get here for normal operation, but
1977 * we can if multiple synchronous transactions
1978 * are sent without blocking for responses.
1979 * Just ignore the 2nd error in this case.
1981 pr_warn("Unexpected reply error: %u\n",
1982 target_thread->reply_error.cmd);
1984 binder_inner_proc_unlock(target_thread->proc);
1985 binder_thread_dec_tmpref(target_thread);
1986 binder_free_transaction(t);
1989 __release(&target_thread->proc->inner_lock);
1991 next = t->from_parent;
1993 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1994 "send failed reply for transaction %d, target dead\n",
1997 binder_free_transaction(t);
1999 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2000 "reply failed, no target thread at root\n");
2004 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2005 "reply failed, no target thread -- retry %d\n",
2011 * binder_cleanup_transaction() - cleans up undelivered transaction
2012 * @t: transaction that needs to be cleaned up
2013 * @reason: reason the transaction wasn't delivered
2014 * @error_code: error to return to caller (if synchronous call)
2016 static void binder_cleanup_transaction(struct binder_transaction *t,
2018 uint32_t error_code)
2020 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2021 binder_send_failed_reply(t, error_code);
2023 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2024 "undelivered transaction %d, %s\n",
2025 t->debug_id, reason);
2026 binder_free_transaction(t);
2031 * binder_get_object() - gets object and checks for valid metadata
2032 * @proc: binder_proc owning the buffer
2033 * @buffer: binder_buffer that we're parsing.
2034 * @offset: offset in the @buffer at which to validate an object.
2035 * @object: struct binder_object to read into
2037 * Return: If there's a valid metadata object at @offset in @buffer, the
2038 * size of that object. Otherwise, it returns zero. The object
2039 * is read into the struct binder_object pointed to by @object.
2041 static size_t binder_get_object(struct binder_proc *proc,
2042 struct binder_buffer *buffer,
2043 unsigned long offset,
2044 struct binder_object *object)
2047 struct binder_object_header *hdr;
2048 size_t object_size = 0;
2050 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2051 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2052 !IS_ALIGNED(offset, sizeof(u32)))
2054 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2057 /* Ok, now see if we read a complete object. */
2059 switch (hdr->type) {
2060 case BINDER_TYPE_BINDER:
2061 case BINDER_TYPE_WEAK_BINDER:
2062 case BINDER_TYPE_HANDLE:
2063 case BINDER_TYPE_WEAK_HANDLE:
2064 object_size = sizeof(struct flat_binder_object);
2066 case BINDER_TYPE_FD:
2067 object_size = sizeof(struct binder_fd_object);
2069 case BINDER_TYPE_PTR:
2070 object_size = sizeof(struct binder_buffer_object);
2072 case BINDER_TYPE_FDA:
2073 object_size = sizeof(struct binder_fd_array_object);
2078 if (offset <= buffer->data_size - object_size &&
2079 buffer->data_size >= object_size)
2086 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2087 * @proc: binder_proc owning the buffer
2088 * @b: binder_buffer containing the object
2089 * @object: struct binder_object to read into
2090 * @index: index in offset array at which the binder_buffer_object is
2092 * @start_offset: points to the start of the offset array
2093 * @object_offsetp: offset of @object read from @b
2094 * @num_valid: the number of valid offsets in the offset array
2096 * Return: If @index is within the valid range of the offset array
2097 * described by @start and @num_valid, and if there's a valid
2098 * binder_buffer_object at the offset found in index @index
2099 * of the offset array, that object is returned. Otherwise,
2100 * %NULL is returned.
2101 * Note that the offset found in index @index itself is not
2102 * verified; this function assumes that @num_valid elements
2103 * from @start were previously verified to have valid offsets.
2104 * If @object_offsetp is non-NULL, then the offset within
2105 * @b is written to it.
2107 static struct binder_buffer_object *binder_validate_ptr(
2108 struct binder_proc *proc,
2109 struct binder_buffer *b,
2110 struct binder_object *object,
2111 binder_size_t index,
2112 binder_size_t start_offset,
2113 binder_size_t *object_offsetp,
2114 binder_size_t num_valid)
2117 binder_size_t object_offset;
2118 unsigned long buffer_offset;
2120 if (index >= num_valid)
2123 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2124 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2125 b, buffer_offset, sizeof(object_offset));
2126 object_size = binder_get_object(proc, b, object_offset, object);
2127 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2130 *object_offsetp = object_offset;
2132 return &object->bbo;
2136 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2137 * @proc: binder_proc owning the buffer
2138 * @b: transaction buffer
2139 * @objects_start_offset: offset to start of objects buffer
2140 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2141 * @fixup_offset: start offset in @buffer to fix up
2142 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2143 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2145 * Return: %true if a fixup in buffer @buffer at offset @offset is
2148 * For safety reasons, we only allow fixups inside a buffer to happen
2149 * at increasing offsets; additionally, we only allow fixup on the last
2150 * buffer object that was verified, or one of its parents.
2152 * Example of what is allowed:
2155 * B (parent = A, offset = 0)
2156 * C (parent = A, offset = 16)
2157 * D (parent = C, offset = 0)
2158 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2160 * Examples of what is not allowed:
2162 * Decreasing offsets within the same parent:
2164 * C (parent = A, offset = 16)
2165 * B (parent = A, offset = 0) // decreasing offset within A
2167 * Referring to a parent that wasn't the last object or any of its parents:
2169 * B (parent = A, offset = 0)
2170 * C (parent = A, offset = 0)
2171 * C (parent = A, offset = 16)
2172 * D (parent = B, offset = 0) // B is not A or any of A's parents
2174 static bool binder_validate_fixup(struct binder_proc *proc,
2175 struct binder_buffer *b,
2176 binder_size_t objects_start_offset,
2177 binder_size_t buffer_obj_offset,
2178 binder_size_t fixup_offset,
2179 binder_size_t last_obj_offset,
2180 binder_size_t last_min_offset)
2182 if (!last_obj_offset) {
2183 /* Nothing to fix up in */
2187 while (last_obj_offset != buffer_obj_offset) {
2188 unsigned long buffer_offset;
2189 struct binder_object last_object;
2190 struct binder_buffer_object *last_bbo;
2191 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2193 if (object_size != sizeof(*last_bbo))
2196 last_bbo = &last_object.bbo;
2198 * Safe to retrieve the parent of last_obj, since it
2199 * was already previously verified by the driver.
2201 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2203 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2204 buffer_offset = objects_start_offset +
2205 sizeof(binder_size_t) * last_bbo->parent,
2206 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2208 sizeof(last_obj_offset));
2210 return (fixup_offset >= last_min_offset);
2214 * struct binder_task_work_cb - for deferred close
2216 * @twork: callback_head for task work
2219 * Structure to pass task work to be handled after
2220 * returning from binder_ioctl() via task_work_add().
2222 struct binder_task_work_cb {
2223 struct callback_head twork;
2228 * binder_do_fd_close() - close list of file descriptors
2229 * @twork: callback head for task work
2231 * It is not safe to call ksys_close() during the binder_ioctl()
2232 * function if there is a chance that binder's own file descriptor
2233 * might be closed. This is to meet the requirements for using
2234 * fdget() (see comments for __fget_light()). Therefore use
2235 * task_work_add() to schedule the close operation once we have
2236 * returned from binder_ioctl(). This function is a callback
2237 * for that mechanism and does the actual ksys_close() on the
2238 * given file descriptor.
2240 static void binder_do_fd_close(struct callback_head *twork)
2242 struct binder_task_work_cb *twcb = container_of(twork,
2243 struct binder_task_work_cb, twork);
2250 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2251 * @fd: file-descriptor to close
2253 * See comments in binder_do_fd_close(). This function is used to schedule
2254 * a file-descriptor to be closed after returning from binder_ioctl().
2256 static void binder_deferred_fd_close(int fd)
2258 struct binder_task_work_cb *twcb;
2260 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2263 init_task_work(&twcb->twork, binder_do_fd_close);
2264 __close_fd_get_file(fd, &twcb->file);
2266 task_work_add(current, &twcb->twork, true);
2271 static void binder_transaction_buffer_release(struct binder_proc *proc,
2272 struct binder_buffer *buffer,
2273 binder_size_t failed_at,
2276 int debug_id = buffer->debug_id;
2277 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2279 binder_debug(BINDER_DEBUG_TRANSACTION,
2280 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2281 proc->pid, buffer->debug_id,
2282 buffer->data_size, buffer->offsets_size,
2283 (unsigned long long)failed_at);
2285 if (buffer->target_node)
2286 binder_dec_node(buffer->target_node, 1, 0);
2288 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2289 off_end_offset = is_failure ? failed_at :
2290 off_start_offset + buffer->offsets_size;
2291 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2292 buffer_offset += sizeof(binder_size_t)) {
2293 struct binder_object_header *hdr;
2295 struct binder_object object;
2296 binder_size_t object_offset;
2298 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2299 buffer, buffer_offset,
2300 sizeof(object_offset));
2301 object_size = binder_get_object(proc, buffer,
2302 object_offset, &object);
2303 if (object_size == 0) {
2304 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2305 debug_id, (u64)object_offset, buffer->data_size);
2309 switch (hdr->type) {
2310 case BINDER_TYPE_BINDER:
2311 case BINDER_TYPE_WEAK_BINDER: {
2312 struct flat_binder_object *fp;
2313 struct binder_node *node;
2315 fp = to_flat_binder_object(hdr);
2316 node = binder_get_node(proc, fp->binder);
2318 pr_err("transaction release %d bad node %016llx\n",
2319 debug_id, (u64)fp->binder);
2322 binder_debug(BINDER_DEBUG_TRANSACTION,
2323 " node %d u%016llx\n",
2324 node->debug_id, (u64)node->ptr);
2325 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2327 binder_put_node(node);
2329 case BINDER_TYPE_HANDLE:
2330 case BINDER_TYPE_WEAK_HANDLE: {
2331 struct flat_binder_object *fp;
2332 struct binder_ref_data rdata;
2335 fp = to_flat_binder_object(hdr);
2336 ret = binder_dec_ref_for_handle(proc, fp->handle,
2337 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2340 pr_err("transaction release %d bad handle %d, ret = %d\n",
2341 debug_id, fp->handle, ret);
2344 binder_debug(BINDER_DEBUG_TRANSACTION,
2345 " ref %d desc %d\n",
2346 rdata.debug_id, rdata.desc);
2349 case BINDER_TYPE_FD: {
2351 * No need to close the file here since user-space
2352 * closes it for for successfully delivered
2353 * transactions. For transactions that weren't
2354 * delivered, the new fd was never allocated so
2355 * there is no need to close and the fput on the
2356 * file is done when the transaction is torn
2359 WARN_ON(failed_at &&
2360 proc->tsk == current->group_leader);
2362 case BINDER_TYPE_PTR:
2364 * Nothing to do here, this will get cleaned up when the
2365 * transaction buffer gets freed
2368 case BINDER_TYPE_FDA: {
2369 struct binder_fd_array_object *fda;
2370 struct binder_buffer_object *parent;
2371 struct binder_object ptr_object;
2372 binder_size_t fda_offset;
2374 binder_size_t fd_buf_size;
2375 binder_size_t num_valid;
2377 if (proc->tsk != current->group_leader) {
2379 * Nothing to do if running in sender context
2380 * The fd fixups have not been applied so no
2381 * fds need to be closed.
2386 num_valid = (buffer_offset - off_start_offset) /
2387 sizeof(binder_size_t);
2388 fda = to_binder_fd_array_object(hdr);
2389 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2395 pr_err("transaction release %d bad parent offset\n",
2399 fd_buf_size = sizeof(u32) * fda->num_fds;
2400 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2401 pr_err("transaction release %d invalid number of fds (%lld)\n",
2402 debug_id, (u64)fda->num_fds);
2405 if (fd_buf_size > parent->length ||
2406 fda->parent_offset > parent->length - fd_buf_size) {
2407 /* No space for all file descriptors here. */
2408 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2409 debug_id, (u64)fda->num_fds);
2413 * the source data for binder_buffer_object is visible
2414 * to user-space and the @buffer element is the user
2415 * pointer to the buffer_object containing the fd_array.
2416 * Convert the address to an offset relative to
2417 * the base of the transaction buffer.
2420 (parent->buffer - (uintptr_t)buffer->user_data) +
2422 for (fd_index = 0; fd_index < fda->num_fds;
2425 binder_size_t offset = fda_offset +
2426 fd_index * sizeof(fd);
2428 binder_alloc_copy_from_buffer(&proc->alloc,
2433 binder_deferred_fd_close(fd);
2437 pr_err("transaction release %d bad object type %x\n",
2438 debug_id, hdr->type);
2444 static int binder_translate_binder(struct flat_binder_object *fp,
2445 struct binder_transaction *t,
2446 struct binder_thread *thread)
2448 struct binder_node *node;
2449 struct binder_proc *proc = thread->proc;
2450 struct binder_proc *target_proc = t->to_proc;
2451 struct binder_ref_data rdata;
2454 node = binder_get_node(proc, fp->binder);
2456 node = binder_new_node(proc, fp);
2460 if (fp->cookie != node->cookie) {
2461 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2462 proc->pid, thread->pid, (u64)fp->binder,
2463 node->debug_id, (u64)fp->cookie,
2468 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2473 ret = binder_inc_ref_for_node(target_proc, node,
2474 fp->hdr.type == BINDER_TYPE_BINDER,
2475 &thread->todo, &rdata);
2479 if (fp->hdr.type == BINDER_TYPE_BINDER)
2480 fp->hdr.type = BINDER_TYPE_HANDLE;
2482 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2484 fp->handle = rdata.desc;
2487 trace_binder_transaction_node_to_ref(t, node, &rdata);
2488 binder_debug(BINDER_DEBUG_TRANSACTION,
2489 " node %d u%016llx -> ref %d desc %d\n",
2490 node->debug_id, (u64)node->ptr,
2491 rdata.debug_id, rdata.desc);
2493 binder_put_node(node);
2497 static int binder_translate_handle(struct flat_binder_object *fp,
2498 struct binder_transaction *t,
2499 struct binder_thread *thread)
2501 struct binder_proc *proc = thread->proc;
2502 struct binder_proc *target_proc = t->to_proc;
2503 struct binder_node *node;
2504 struct binder_ref_data src_rdata;
2507 node = binder_get_node_from_ref(proc, fp->handle,
2508 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2510 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2511 proc->pid, thread->pid, fp->handle);
2514 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2519 binder_node_lock(node);
2520 if (node->proc == target_proc) {
2521 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2522 fp->hdr.type = BINDER_TYPE_BINDER;
2524 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2525 fp->binder = node->ptr;
2526 fp->cookie = node->cookie;
2528 binder_inner_proc_lock(node->proc);
2530 __acquire(&node->proc->inner_lock);
2531 binder_inc_node_nilocked(node,
2532 fp->hdr.type == BINDER_TYPE_BINDER,
2535 binder_inner_proc_unlock(node->proc);
2537 __release(&node->proc->inner_lock);
2538 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2539 binder_debug(BINDER_DEBUG_TRANSACTION,
2540 " ref %d desc %d -> node %d u%016llx\n",
2541 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2543 binder_node_unlock(node);
2545 struct binder_ref_data dest_rdata;
2547 binder_node_unlock(node);
2548 ret = binder_inc_ref_for_node(target_proc, node,
2549 fp->hdr.type == BINDER_TYPE_HANDLE,
2555 fp->handle = dest_rdata.desc;
2557 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2559 binder_debug(BINDER_DEBUG_TRANSACTION,
2560 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2561 src_rdata.debug_id, src_rdata.desc,
2562 dest_rdata.debug_id, dest_rdata.desc,
2566 binder_put_node(node);
2570 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2571 struct binder_transaction *t,
2572 struct binder_thread *thread,
2573 struct binder_transaction *in_reply_to)
2575 struct binder_proc *proc = thread->proc;
2576 struct binder_proc *target_proc = t->to_proc;
2577 struct binder_txn_fd_fixup *fixup;
2580 bool target_allows_fd;
2583 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2585 target_allows_fd = t->buffer->target_node->accept_fds;
2586 if (!target_allows_fd) {
2587 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2588 proc->pid, thread->pid,
2589 in_reply_to ? "reply" : "transaction",
2592 goto err_fd_not_accepted;
2597 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2598 proc->pid, thread->pid, fd);
2602 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2609 * Add fixup record for this transaction. The allocation
2610 * of the fd in the target needs to be done from a
2613 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2619 fixup->offset = fd_offset;
2620 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2621 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2629 err_fd_not_accepted:
2633 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2634 struct binder_buffer_object *parent,
2635 struct binder_transaction *t,
2636 struct binder_thread *thread,
2637 struct binder_transaction *in_reply_to)
2639 binder_size_t fdi, fd_buf_size;
2640 binder_size_t fda_offset;
2641 struct binder_proc *proc = thread->proc;
2642 struct binder_proc *target_proc = t->to_proc;
2644 fd_buf_size = sizeof(u32) * fda->num_fds;
2645 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2646 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2647 proc->pid, thread->pid, (u64)fda->num_fds);
2650 if (fd_buf_size > parent->length ||
2651 fda->parent_offset > parent->length - fd_buf_size) {
2652 /* No space for all file descriptors here. */
2653 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2654 proc->pid, thread->pid, (u64)fda->num_fds);
2658 * the source data for binder_buffer_object is visible
2659 * to user-space and the @buffer element is the user
2660 * pointer to the buffer_object containing the fd_array.
2661 * Convert the address to an offset relative to
2662 * the base of the transaction buffer.
2664 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2666 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2667 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2668 proc->pid, thread->pid);
2671 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2674 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2676 binder_alloc_copy_from_buffer(&target_proc->alloc,
2678 offset, sizeof(fd));
2679 ret = binder_translate_fd(fd, offset, t, thread,
2687 static int binder_fixup_parent(struct binder_transaction *t,
2688 struct binder_thread *thread,
2689 struct binder_buffer_object *bp,
2690 binder_size_t off_start_offset,
2691 binder_size_t num_valid,
2692 binder_size_t last_fixup_obj_off,
2693 binder_size_t last_fixup_min_off)
2695 struct binder_buffer_object *parent;
2696 struct binder_buffer *b = t->buffer;
2697 struct binder_proc *proc = thread->proc;
2698 struct binder_proc *target_proc = t->to_proc;
2699 struct binder_object object;
2700 binder_size_t buffer_offset;
2701 binder_size_t parent_offset;
2703 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2706 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2707 off_start_offset, &parent_offset,
2710 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2711 proc->pid, thread->pid);
2715 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2716 parent_offset, bp->parent_offset,
2718 last_fixup_min_off)) {
2719 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2720 proc->pid, thread->pid);
2724 if (parent->length < sizeof(binder_uintptr_t) ||
2725 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2726 /* No space for a pointer here! */
2727 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2728 proc->pid, thread->pid);
2731 buffer_offset = bp->parent_offset +
2732 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2733 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2734 &bp->buffer, sizeof(bp->buffer));
2740 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2741 * @t: transaction to send
2742 * @proc: process to send the transaction to
2743 * @thread: thread in @proc to send the transaction to (may be NULL)
2745 * This function queues a transaction to the specified process. It will try
2746 * to find a thread in the target process to handle the transaction and
2747 * wake it up. If no thread is found, the work is queued to the proc
2750 * If the @thread parameter is not NULL, the transaction is always queued
2751 * to the waitlist of that specific thread.
2753 * Return: true if the transactions was successfully queued
2754 * false if the target process or thread is dead
2756 static bool binder_proc_transaction(struct binder_transaction *t,
2757 struct binder_proc *proc,
2758 struct binder_thread *thread)
2760 struct binder_node *node = t->buffer->target_node;
2761 bool oneway = !!(t->flags & TF_ONE_WAY);
2762 bool pending_async = false;
2765 binder_node_lock(node);
2768 if (node->has_async_transaction) {
2769 pending_async = true;
2771 node->has_async_transaction = true;
2775 binder_inner_proc_lock(proc);
2777 if (proc->is_dead || (thread && thread->is_dead)) {
2778 binder_inner_proc_unlock(proc);
2779 binder_node_unlock(node);
2783 if (!thread && !pending_async)
2784 thread = binder_select_thread_ilocked(proc);
2787 binder_enqueue_thread_work_ilocked(thread, &t->work);
2788 else if (!pending_async)
2789 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2791 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2794 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2796 binder_inner_proc_unlock(proc);
2797 binder_node_unlock(node);
2803 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2804 * @node: struct binder_node for which to get refs
2805 * @proc: returns @node->proc if valid
2806 * @error: if no @proc then returns BR_DEAD_REPLY
2808 * User-space normally keeps the node alive when creating a transaction
2809 * since it has a reference to the target. The local strong ref keeps it
2810 * alive if the sending process dies before the target process processes
2811 * the transaction. If the source process is malicious or has a reference
2812 * counting bug, relying on the local strong ref can fail.
2814 * Since user-space can cause the local strong ref to go away, we also take
2815 * a tmpref on the node to ensure it survives while we are constructing
2816 * the transaction. We also need a tmpref on the proc while we are
2817 * constructing the transaction, so we take that here as well.
2819 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2820 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2821 * target proc has died, @error is set to BR_DEAD_REPLY
2823 static struct binder_node *binder_get_node_refs_for_txn(
2824 struct binder_node *node,
2825 struct binder_proc **procp,
2828 struct binder_node *target_node = NULL;
2830 binder_node_inner_lock(node);
2833 binder_inc_node_nilocked(node, 1, 0, NULL);
2834 binder_inc_node_tmpref_ilocked(node);
2835 node->proc->tmp_ref++;
2836 *procp = node->proc;
2838 *error = BR_DEAD_REPLY;
2839 binder_node_inner_unlock(node);
2844 static void binder_transaction(struct binder_proc *proc,
2845 struct binder_thread *thread,
2846 struct binder_transaction_data *tr, int reply,
2847 binder_size_t extra_buffers_size)
2850 struct binder_transaction *t;
2851 struct binder_work *w;
2852 struct binder_work *tcomplete;
2853 binder_size_t buffer_offset = 0;
2854 binder_size_t off_start_offset, off_end_offset;
2855 binder_size_t off_min;
2856 binder_size_t sg_buf_offset, sg_buf_end_offset;
2857 struct binder_proc *target_proc = NULL;
2858 struct binder_thread *target_thread = NULL;
2859 struct binder_node *target_node = NULL;
2860 struct binder_transaction *in_reply_to = NULL;
2861 struct binder_transaction_log_entry *e;
2862 uint32_t return_error = 0;
2863 uint32_t return_error_param = 0;
2864 uint32_t return_error_line = 0;
2865 binder_size_t last_fixup_obj_off = 0;
2866 binder_size_t last_fixup_min_off = 0;
2867 struct binder_context *context = proc->context;
2868 int t_debug_id = atomic_inc_return(&binder_last_id);
2869 char *secctx = NULL;
2872 e = binder_transaction_log_add(&binder_transaction_log);
2873 e->debug_id = t_debug_id;
2874 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2875 e->from_proc = proc->pid;
2876 e->from_thread = thread->pid;
2877 e->target_handle = tr->target.handle;
2878 e->data_size = tr->data_size;
2879 e->offsets_size = tr->offsets_size;
2880 e->context_name = proc->context->name;
2883 binder_inner_proc_lock(proc);
2884 in_reply_to = thread->transaction_stack;
2885 if (in_reply_to == NULL) {
2886 binder_inner_proc_unlock(proc);
2887 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2888 proc->pid, thread->pid);
2889 return_error = BR_FAILED_REPLY;
2890 return_error_param = -EPROTO;
2891 return_error_line = __LINE__;
2892 goto err_empty_call_stack;
2894 if (in_reply_to->to_thread != thread) {
2895 spin_lock(&in_reply_to->lock);
2896 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2897 proc->pid, thread->pid, in_reply_to->debug_id,
2898 in_reply_to->to_proc ?
2899 in_reply_to->to_proc->pid : 0,
2900 in_reply_to->to_thread ?
2901 in_reply_to->to_thread->pid : 0);
2902 spin_unlock(&in_reply_to->lock);
2903 binder_inner_proc_unlock(proc);
2904 return_error = BR_FAILED_REPLY;
2905 return_error_param = -EPROTO;
2906 return_error_line = __LINE__;
2908 goto err_bad_call_stack;
2910 thread->transaction_stack = in_reply_to->to_parent;
2911 binder_inner_proc_unlock(proc);
2912 binder_set_nice(in_reply_to->saved_priority);
2913 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2914 if (target_thread == NULL) {
2915 /* annotation for sparse */
2916 __release(&target_thread->proc->inner_lock);
2917 return_error = BR_DEAD_REPLY;
2918 return_error_line = __LINE__;
2919 goto err_dead_binder;
2921 if (target_thread->transaction_stack != in_reply_to) {
2922 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2923 proc->pid, thread->pid,
2924 target_thread->transaction_stack ?
2925 target_thread->transaction_stack->debug_id : 0,
2926 in_reply_to->debug_id);
2927 binder_inner_proc_unlock(target_thread->proc);
2928 return_error = BR_FAILED_REPLY;
2929 return_error_param = -EPROTO;
2930 return_error_line = __LINE__;
2932 target_thread = NULL;
2933 goto err_dead_binder;
2935 target_proc = target_thread->proc;
2936 target_proc->tmp_ref++;
2937 binder_inner_proc_unlock(target_thread->proc);
2939 if (tr->target.handle) {
2940 struct binder_ref *ref;
2943 * There must already be a strong ref
2944 * on this node. If so, do a strong
2945 * increment on the node to ensure it
2946 * stays alive until the transaction is
2949 binder_proc_lock(proc);
2950 ref = binder_get_ref_olocked(proc, tr->target.handle,
2953 target_node = binder_get_node_refs_for_txn(
2954 ref->node, &target_proc,
2957 binder_user_error("%d:%d got transaction to invalid handle\n",
2958 proc->pid, thread->pid);
2959 return_error = BR_FAILED_REPLY;
2961 binder_proc_unlock(proc);
2963 mutex_lock(&context->context_mgr_node_lock);
2964 target_node = context->binder_context_mgr_node;
2966 target_node = binder_get_node_refs_for_txn(
2967 target_node, &target_proc,
2970 return_error = BR_DEAD_REPLY;
2971 mutex_unlock(&context->context_mgr_node_lock);
2972 if (target_node && target_proc == proc) {
2973 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2974 proc->pid, thread->pid);
2975 return_error = BR_FAILED_REPLY;
2976 return_error_param = -EINVAL;
2977 return_error_line = __LINE__;
2978 goto err_invalid_target_handle;
2983 * return_error is set above
2985 return_error_param = -EINVAL;
2986 return_error_line = __LINE__;
2987 goto err_dead_binder;
2989 e->to_node = target_node->debug_id;
2990 if (security_binder_transaction(proc->tsk,
2991 target_proc->tsk) < 0) {
2992 return_error = BR_FAILED_REPLY;
2993 return_error_param = -EPERM;
2994 return_error_line = __LINE__;
2995 goto err_invalid_target_handle;
2997 binder_inner_proc_lock(proc);
2999 w = list_first_entry_or_null(&thread->todo,
3000 struct binder_work, entry);
3001 if (!(tr->flags & TF_ONE_WAY) && w &&
3002 w->type == BINDER_WORK_TRANSACTION) {
3004 * Do not allow new outgoing transaction from a
3005 * thread that has a transaction at the head of
3006 * its todo list. Only need to check the head
3007 * because binder_select_thread_ilocked picks a
3008 * thread from proc->waiting_threads to enqueue
3009 * the transaction, and nothing is queued to the
3010 * todo list while the thread is on waiting_threads.
3012 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3013 proc->pid, thread->pid);
3014 binder_inner_proc_unlock(proc);
3015 return_error = BR_FAILED_REPLY;
3016 return_error_param = -EPROTO;
3017 return_error_line = __LINE__;
3018 goto err_bad_todo_list;
3021 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3022 struct binder_transaction *tmp;
3024 tmp = thread->transaction_stack;
3025 if (tmp->to_thread != thread) {
3026 spin_lock(&tmp->lock);
3027 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3028 proc->pid, thread->pid, tmp->debug_id,
3029 tmp->to_proc ? tmp->to_proc->pid : 0,
3031 tmp->to_thread->pid : 0);
3032 spin_unlock(&tmp->lock);
3033 binder_inner_proc_unlock(proc);
3034 return_error = BR_FAILED_REPLY;
3035 return_error_param = -EPROTO;
3036 return_error_line = __LINE__;
3037 goto err_bad_call_stack;
3040 struct binder_thread *from;
3042 spin_lock(&tmp->lock);
3044 if (from && from->proc == target_proc) {
3045 atomic_inc(&from->tmp_ref);
3046 target_thread = from;
3047 spin_unlock(&tmp->lock);
3050 spin_unlock(&tmp->lock);
3051 tmp = tmp->from_parent;
3054 binder_inner_proc_unlock(proc);
3057 e->to_thread = target_thread->pid;
3058 e->to_proc = target_proc->pid;
3060 /* TODO: reuse incoming transaction for reply */
3061 t = kzalloc(sizeof(*t), GFP_KERNEL);
3063 return_error = BR_FAILED_REPLY;
3064 return_error_param = -ENOMEM;
3065 return_error_line = __LINE__;
3066 goto err_alloc_t_failed;
3068 INIT_LIST_HEAD(&t->fd_fixups);
3069 binder_stats_created(BINDER_STAT_TRANSACTION);
3070 spin_lock_init(&t->lock);
3072 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3073 if (tcomplete == NULL) {
3074 return_error = BR_FAILED_REPLY;
3075 return_error_param = -ENOMEM;
3076 return_error_line = __LINE__;
3077 goto err_alloc_tcomplete_failed;
3079 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3081 t->debug_id = t_debug_id;
3084 binder_debug(BINDER_DEBUG_TRANSACTION,
3085 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3086 proc->pid, thread->pid, t->debug_id,
3087 target_proc->pid, target_thread->pid,
3088 (u64)tr->data.ptr.buffer,
3089 (u64)tr->data.ptr.offsets,
3090 (u64)tr->data_size, (u64)tr->offsets_size,
3091 (u64)extra_buffers_size);
3093 binder_debug(BINDER_DEBUG_TRANSACTION,
3094 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3095 proc->pid, thread->pid, t->debug_id,
3096 target_proc->pid, target_node->debug_id,
3097 (u64)tr->data.ptr.buffer,
3098 (u64)tr->data.ptr.offsets,
3099 (u64)tr->data_size, (u64)tr->offsets_size,
3100 (u64)extra_buffers_size);
3102 if (!reply && !(tr->flags & TF_ONE_WAY))
3106 t->sender_euid = task_euid(proc->tsk);
3107 t->to_proc = target_proc;
3108 t->to_thread = target_thread;
3110 t->flags = tr->flags;
3111 t->priority = task_nice(current);
3113 if (target_node && target_node->txn_security_ctx) {
3117 security_task_getsecid(proc->tsk, &secid);
3118 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3120 return_error = BR_FAILED_REPLY;
3121 return_error_param = ret;
3122 return_error_line = __LINE__;
3123 goto err_get_secctx_failed;
3125 added_size = ALIGN(secctx_sz, sizeof(u64));
3126 extra_buffers_size += added_size;
3127 if (extra_buffers_size < added_size) {
3128 /* integer overflow of extra_buffers_size */
3129 return_error = BR_FAILED_REPLY;
3130 return_error_param = EINVAL;
3131 return_error_line = __LINE__;
3132 goto err_bad_extra_size;
3136 trace_binder_transaction(reply, t, target_node);
3138 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3139 tr->offsets_size, extra_buffers_size,
3140 !reply && (t->flags & TF_ONE_WAY));
3141 if (IS_ERR(t->buffer)) {
3143 * -ESRCH indicates VMA cleared. The target is dying.
3145 return_error_param = PTR_ERR(t->buffer);
3146 return_error = return_error_param == -ESRCH ?
3147 BR_DEAD_REPLY : BR_FAILED_REPLY;
3148 return_error_line = __LINE__;
3150 goto err_binder_alloc_buf_failed;
3153 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3154 ALIGN(tr->offsets_size, sizeof(void *)) +
3155 ALIGN(extra_buffers_size, sizeof(void *)) -
3156 ALIGN(secctx_sz, sizeof(u64));
3158 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3159 binder_alloc_copy_to_buffer(&target_proc->alloc,
3160 t->buffer, buf_offset,
3162 security_release_secctx(secctx, secctx_sz);
3165 t->buffer->debug_id = t->debug_id;
3166 t->buffer->transaction = t;
3167 t->buffer->target_node = target_node;
3168 trace_binder_transaction_alloc_buf(t->buffer);
3170 if (binder_alloc_copy_user_to_buffer(
3171 &target_proc->alloc,
3173 (const void __user *)
3174 (uintptr_t)tr->data.ptr.buffer,
3176 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3177 proc->pid, thread->pid);
3178 return_error = BR_FAILED_REPLY;
3179 return_error_param = -EFAULT;
3180 return_error_line = __LINE__;
3181 goto err_copy_data_failed;
3183 if (binder_alloc_copy_user_to_buffer(
3184 &target_proc->alloc,
3186 ALIGN(tr->data_size, sizeof(void *)),
3187 (const void __user *)
3188 (uintptr_t)tr->data.ptr.offsets,
3189 tr->offsets_size)) {
3190 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3191 proc->pid, thread->pid);
3192 return_error = BR_FAILED_REPLY;
3193 return_error_param = -EFAULT;
3194 return_error_line = __LINE__;
3195 goto err_copy_data_failed;
3197 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3198 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3199 proc->pid, thread->pid, (u64)tr->offsets_size);
3200 return_error = BR_FAILED_REPLY;
3201 return_error_param = -EINVAL;
3202 return_error_line = __LINE__;
3203 goto err_bad_offset;
3205 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3206 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3207 proc->pid, thread->pid,
3208 (u64)extra_buffers_size);
3209 return_error = BR_FAILED_REPLY;
3210 return_error_param = -EINVAL;
3211 return_error_line = __LINE__;
3212 goto err_bad_offset;
3214 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3215 buffer_offset = off_start_offset;
3216 off_end_offset = off_start_offset + tr->offsets_size;
3217 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3218 sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
3220 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3221 buffer_offset += sizeof(binder_size_t)) {
3222 struct binder_object_header *hdr;
3224 struct binder_object object;
3225 binder_size_t object_offset;
3227 binder_alloc_copy_from_buffer(&target_proc->alloc,
3231 sizeof(object_offset));
3232 object_size = binder_get_object(target_proc, t->buffer,
3233 object_offset, &object);
3234 if (object_size == 0 || object_offset < off_min) {
3235 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3236 proc->pid, thread->pid,
3239 (u64)t->buffer->data_size);
3240 return_error = BR_FAILED_REPLY;
3241 return_error_param = -EINVAL;
3242 return_error_line = __LINE__;
3243 goto err_bad_offset;
3247 off_min = object_offset + object_size;
3248 switch (hdr->type) {
3249 case BINDER_TYPE_BINDER:
3250 case BINDER_TYPE_WEAK_BINDER: {
3251 struct flat_binder_object *fp;
3253 fp = to_flat_binder_object(hdr);
3254 ret = binder_translate_binder(fp, t, thread);
3256 return_error = BR_FAILED_REPLY;
3257 return_error_param = ret;
3258 return_error_line = __LINE__;
3259 goto err_translate_failed;
3261 binder_alloc_copy_to_buffer(&target_proc->alloc,
3262 t->buffer, object_offset,
3265 case BINDER_TYPE_HANDLE:
3266 case BINDER_TYPE_WEAK_HANDLE: {
3267 struct flat_binder_object *fp;
3269 fp = to_flat_binder_object(hdr);
3270 ret = binder_translate_handle(fp, t, thread);
3272 return_error = BR_FAILED_REPLY;
3273 return_error_param = ret;
3274 return_error_line = __LINE__;
3275 goto err_translate_failed;
3277 binder_alloc_copy_to_buffer(&target_proc->alloc,
3278 t->buffer, object_offset,
3282 case BINDER_TYPE_FD: {
3283 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3284 binder_size_t fd_offset = object_offset +
3285 (uintptr_t)&fp->fd - (uintptr_t)fp;
3286 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3287 thread, in_reply_to);
3290 return_error = BR_FAILED_REPLY;
3291 return_error_param = ret;
3292 return_error_line = __LINE__;
3293 goto err_translate_failed;
3296 binder_alloc_copy_to_buffer(&target_proc->alloc,
3297 t->buffer, object_offset,
3300 case BINDER_TYPE_FDA: {
3301 struct binder_object ptr_object;
3302 binder_size_t parent_offset;
3303 struct binder_fd_array_object *fda =
3304 to_binder_fd_array_object(hdr);
3305 size_t num_valid = (buffer_offset - off_start_offset) *
3306 sizeof(binder_size_t);
3307 struct binder_buffer_object *parent =
3308 binder_validate_ptr(target_proc, t->buffer,
3309 &ptr_object, fda->parent,
3314 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3315 proc->pid, thread->pid);
3316 return_error = BR_FAILED_REPLY;
3317 return_error_param = -EINVAL;
3318 return_error_line = __LINE__;
3319 goto err_bad_parent;
3321 if (!binder_validate_fixup(target_proc, t->buffer,
3326 last_fixup_min_off)) {
3327 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3328 proc->pid, thread->pid);
3329 return_error = BR_FAILED_REPLY;
3330 return_error_param = -EINVAL;
3331 return_error_line = __LINE__;
3332 goto err_bad_parent;
3334 ret = binder_translate_fd_array(fda, parent, t, thread,
3337 return_error = BR_FAILED_REPLY;
3338 return_error_param = ret;
3339 return_error_line = __LINE__;
3340 goto err_translate_failed;
3342 last_fixup_obj_off = parent_offset;
3343 last_fixup_min_off =
3344 fda->parent_offset + sizeof(u32) * fda->num_fds;
3346 case BINDER_TYPE_PTR: {
3347 struct binder_buffer_object *bp =
3348 to_binder_buffer_object(hdr);
3349 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3352 if (bp->length > buf_left) {
3353 binder_user_error("%d:%d got transaction with too large buffer\n",
3354 proc->pid, thread->pid);
3355 return_error = BR_FAILED_REPLY;
3356 return_error_param = -EINVAL;
3357 return_error_line = __LINE__;
3358 goto err_bad_offset;
3360 if (binder_alloc_copy_user_to_buffer(
3361 &target_proc->alloc,
3364 (const void __user *)
3365 (uintptr_t)bp->buffer,
3367 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3368 proc->pid, thread->pid);
3369 return_error_param = -EFAULT;
3370 return_error = BR_FAILED_REPLY;
3371 return_error_line = __LINE__;
3372 goto err_copy_data_failed;
3374 /* Fixup buffer pointer to target proc address space */
3375 bp->buffer = (uintptr_t)
3376 t->buffer->user_data + sg_buf_offset;
3377 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3379 num_valid = (buffer_offset - off_start_offset) *
3380 sizeof(binder_size_t);
3381 ret = binder_fixup_parent(t, thread, bp,
3385 last_fixup_min_off);
3387 return_error = BR_FAILED_REPLY;
3388 return_error_param = ret;
3389 return_error_line = __LINE__;
3390 goto err_translate_failed;
3392 binder_alloc_copy_to_buffer(&target_proc->alloc,
3393 t->buffer, object_offset,
3395 last_fixup_obj_off = object_offset;
3396 last_fixup_min_off = 0;
3399 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3400 proc->pid, thread->pid, hdr->type);
3401 return_error = BR_FAILED_REPLY;
3402 return_error_param = -EINVAL;
3403 return_error_line = __LINE__;
3404 goto err_bad_object_type;
3407 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3408 t->work.type = BINDER_WORK_TRANSACTION;
3411 binder_enqueue_thread_work(thread, tcomplete);
3412 binder_inner_proc_lock(target_proc);
3413 if (target_thread->is_dead) {
3414 binder_inner_proc_unlock(target_proc);
3415 goto err_dead_proc_or_thread;
3417 BUG_ON(t->buffer->async_transaction != 0);
3418 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3419 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3420 binder_inner_proc_unlock(target_proc);
3421 wake_up_interruptible_sync(&target_thread->wait);
3422 binder_free_transaction(in_reply_to);
3423 } else if (!(t->flags & TF_ONE_WAY)) {
3424 BUG_ON(t->buffer->async_transaction != 0);
3425 binder_inner_proc_lock(proc);
3427 * Defer the TRANSACTION_COMPLETE, so we don't return to
3428 * userspace immediately; this allows the target process to
3429 * immediately start processing this transaction, reducing
3430 * latency. We will then return the TRANSACTION_COMPLETE when
3431 * the target replies (or there is an error).
3433 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3435 t->from_parent = thread->transaction_stack;
3436 thread->transaction_stack = t;
3437 binder_inner_proc_unlock(proc);
3438 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3439 binder_inner_proc_lock(proc);
3440 binder_pop_transaction_ilocked(thread, t);
3441 binder_inner_proc_unlock(proc);
3442 goto err_dead_proc_or_thread;
3445 BUG_ON(target_node == NULL);
3446 BUG_ON(t->buffer->async_transaction != 1);
3447 binder_enqueue_thread_work(thread, tcomplete);
3448 if (!binder_proc_transaction(t, target_proc, NULL))
3449 goto err_dead_proc_or_thread;
3452 binder_thread_dec_tmpref(target_thread);
3453 binder_proc_dec_tmpref(target_proc);
3455 binder_dec_node_tmpref(target_node);
3457 * write barrier to synchronize with initialization
3461 WRITE_ONCE(e->debug_id_done, t_debug_id);
3464 err_dead_proc_or_thread:
3465 return_error = BR_DEAD_REPLY;
3466 return_error_line = __LINE__;
3467 binder_dequeue_work(proc, tcomplete);
3468 err_translate_failed:
3469 err_bad_object_type:
3472 err_copy_data_failed:
3473 binder_free_txn_fixups(t);
3474 trace_binder_transaction_failed_buffer_release(t->buffer);
3475 binder_transaction_buffer_release(target_proc, t->buffer,
3476 buffer_offset, true);
3478 binder_dec_node_tmpref(target_node);
3480 t->buffer->transaction = NULL;
3481 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3482 err_binder_alloc_buf_failed:
3485 security_release_secctx(secctx, secctx_sz);
3486 err_get_secctx_failed:
3488 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3489 err_alloc_tcomplete_failed:
3491 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3495 err_empty_call_stack:
3497 err_invalid_target_handle:
3499 binder_thread_dec_tmpref(target_thread);
3501 binder_proc_dec_tmpref(target_proc);
3503 binder_dec_node(target_node, 1, 0);
3504 binder_dec_node_tmpref(target_node);
3507 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3508 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3509 proc->pid, thread->pid, return_error, return_error_param,
3510 (u64)tr->data_size, (u64)tr->offsets_size,
3514 struct binder_transaction_log_entry *fe;
3516 e->return_error = return_error;
3517 e->return_error_param = return_error_param;
3518 e->return_error_line = return_error_line;
3519 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3522 * write barrier to synchronize with initialization
3526 WRITE_ONCE(e->debug_id_done, t_debug_id);
3527 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3530 BUG_ON(thread->return_error.cmd != BR_OK);
3532 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3533 binder_enqueue_thread_work(thread, &thread->return_error.work);
3534 binder_send_failed_reply(in_reply_to, return_error);
3536 thread->return_error.cmd = return_error;
3537 binder_enqueue_thread_work(thread, &thread->return_error.work);
3542 * binder_free_buf() - free the specified buffer
3543 * @proc: binder proc that owns buffer
3544 * @buffer: buffer to be freed
3546 * If buffer for an async transaction, enqueue the next async
3547 * transaction from the node.
3549 * Cleanup buffer and free it.
3552 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3554 if (buffer->transaction) {
3555 buffer->transaction->buffer = NULL;
3556 buffer->transaction = NULL;
3558 if (buffer->async_transaction && buffer->target_node) {
3559 struct binder_node *buf_node;
3560 struct binder_work *w;
3562 buf_node = buffer->target_node;
3563 binder_node_inner_lock(buf_node);
3564 BUG_ON(!buf_node->has_async_transaction);
3565 BUG_ON(buf_node->proc != proc);
3566 w = binder_dequeue_work_head_ilocked(
3567 &buf_node->async_todo);
3569 buf_node->has_async_transaction = false;
3571 binder_enqueue_work_ilocked(
3573 binder_wakeup_proc_ilocked(proc);
3575 binder_node_inner_unlock(buf_node);
3577 trace_binder_transaction_buffer_release(buffer);
3578 binder_transaction_buffer_release(proc, buffer, 0, false);
3579 binder_alloc_free_buf(&proc->alloc, buffer);
3582 static int binder_thread_write(struct binder_proc *proc,
3583 struct binder_thread *thread,
3584 binder_uintptr_t binder_buffer, size_t size,
3585 binder_size_t *consumed)
3588 struct binder_context *context = proc->context;
3589 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3590 void __user *ptr = buffer + *consumed;
3591 void __user *end = buffer + size;
3593 while (ptr < end && thread->return_error.cmd == BR_OK) {
3596 if (get_user(cmd, (uint32_t __user *)ptr))
3598 ptr += sizeof(uint32_t);
3599 trace_binder_command(cmd);
3600 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3601 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3602 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3603 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3611 const char *debug_string;
3612 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3613 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3614 struct binder_ref_data rdata;
3616 if (get_user(target, (uint32_t __user *)ptr))
3619 ptr += sizeof(uint32_t);
3621 if (increment && !target) {
3622 struct binder_node *ctx_mgr_node;
3623 mutex_lock(&context->context_mgr_node_lock);
3624 ctx_mgr_node = context->binder_context_mgr_node;
3626 ret = binder_inc_ref_for_node(
3628 strong, NULL, &rdata);
3629 mutex_unlock(&context->context_mgr_node_lock);
3632 ret = binder_update_ref_for_handle(
3633 proc, target, increment, strong,
3635 if (!ret && rdata.desc != target) {
3636 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3637 proc->pid, thread->pid,
3638 target, rdata.desc);
3642 debug_string = "IncRefs";
3645 debug_string = "Acquire";
3648 debug_string = "Release";
3652 debug_string = "DecRefs";
3656 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3657 proc->pid, thread->pid, debug_string,
3658 strong, target, ret);
3661 binder_debug(BINDER_DEBUG_USER_REFS,
3662 "%d:%d %s ref %d desc %d s %d w %d\n",
3663 proc->pid, thread->pid, debug_string,
3664 rdata.debug_id, rdata.desc, rdata.strong,
3668 case BC_INCREFS_DONE:
3669 case BC_ACQUIRE_DONE: {
3670 binder_uintptr_t node_ptr;
3671 binder_uintptr_t cookie;
3672 struct binder_node *node;
3675 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3677 ptr += sizeof(binder_uintptr_t);
3678 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3680 ptr += sizeof(binder_uintptr_t);
3681 node = binder_get_node(proc, node_ptr);
3683 binder_user_error("%d:%d %s u%016llx no match\n",
3684 proc->pid, thread->pid,
3685 cmd == BC_INCREFS_DONE ?
3691 if (cookie != node->cookie) {
3692 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3693 proc->pid, thread->pid,
3694 cmd == BC_INCREFS_DONE ?
3695 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3696 (u64)node_ptr, node->debug_id,
3697 (u64)cookie, (u64)node->cookie);
3698 binder_put_node(node);
3701 binder_node_inner_lock(node);
3702 if (cmd == BC_ACQUIRE_DONE) {
3703 if (node->pending_strong_ref == 0) {
3704 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3705 proc->pid, thread->pid,
3707 binder_node_inner_unlock(node);
3708 binder_put_node(node);
3711 node->pending_strong_ref = 0;
3713 if (node->pending_weak_ref == 0) {
3714 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3715 proc->pid, thread->pid,
3717 binder_node_inner_unlock(node);
3718 binder_put_node(node);
3721 node->pending_weak_ref = 0;
3723 free_node = binder_dec_node_nilocked(node,
3724 cmd == BC_ACQUIRE_DONE, 0);
3726 binder_debug(BINDER_DEBUG_USER_REFS,
3727 "%d:%d %s node %d ls %d lw %d tr %d\n",
3728 proc->pid, thread->pid,
3729 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3730 node->debug_id, node->local_strong_refs,
3731 node->local_weak_refs, node->tmp_refs);
3732 binder_node_inner_unlock(node);
3733 binder_put_node(node);
3736 case BC_ATTEMPT_ACQUIRE:
3737 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3739 case BC_ACQUIRE_RESULT:
3740 pr_err("BC_ACQUIRE_RESULT not supported\n");
3743 case BC_FREE_BUFFER: {
3744 binder_uintptr_t data_ptr;
3745 struct binder_buffer *buffer;
3747 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3749 ptr += sizeof(binder_uintptr_t);
3751 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3753 if (IS_ERR_OR_NULL(buffer)) {
3754 if (PTR_ERR(buffer) == -EPERM) {
3756 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3757 proc->pid, thread->pid,
3761 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3762 proc->pid, thread->pid,
3767 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3768 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3769 proc->pid, thread->pid, (u64)data_ptr,
3771 buffer->transaction ? "active" : "finished");
3772 binder_free_buf(proc, buffer);
3776 case BC_TRANSACTION_SG:
3778 struct binder_transaction_data_sg tr;
3780 if (copy_from_user(&tr, ptr, sizeof(tr)))
3783 binder_transaction(proc, thread, &tr.transaction_data,
3784 cmd == BC_REPLY_SG, tr.buffers_size);
3787 case BC_TRANSACTION:
3789 struct binder_transaction_data tr;
3791 if (copy_from_user(&tr, ptr, sizeof(tr)))
3794 binder_transaction(proc, thread, &tr,
3795 cmd == BC_REPLY, 0);
3799 case BC_REGISTER_LOOPER:
3800 binder_debug(BINDER_DEBUG_THREADS,
3801 "%d:%d BC_REGISTER_LOOPER\n",
3802 proc->pid, thread->pid);
3803 binder_inner_proc_lock(proc);
3804 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3805 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3806 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3807 proc->pid, thread->pid);
3808 } else if (proc->requested_threads == 0) {
3809 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3810 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3811 proc->pid, thread->pid);
3813 proc->requested_threads--;
3814 proc->requested_threads_started++;
3816 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3817 binder_inner_proc_unlock(proc);
3819 case BC_ENTER_LOOPER:
3820 binder_debug(BINDER_DEBUG_THREADS,
3821 "%d:%d BC_ENTER_LOOPER\n",
3822 proc->pid, thread->pid);
3823 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3824 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3825 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3826 proc->pid, thread->pid);
3828 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3830 case BC_EXIT_LOOPER:
3831 binder_debug(BINDER_DEBUG_THREADS,
3832 "%d:%d BC_EXIT_LOOPER\n",
3833 proc->pid, thread->pid);
3834 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3837 case BC_REQUEST_DEATH_NOTIFICATION:
3838 case BC_CLEAR_DEATH_NOTIFICATION: {
3840 binder_uintptr_t cookie;
3841 struct binder_ref *ref;
3842 struct binder_ref_death *death = NULL;
3844 if (get_user(target, (uint32_t __user *)ptr))
3846 ptr += sizeof(uint32_t);
3847 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3849 ptr += sizeof(binder_uintptr_t);
3850 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3852 * Allocate memory for death notification
3853 * before taking lock
3855 death = kzalloc(sizeof(*death), GFP_KERNEL);
3856 if (death == NULL) {
3857 WARN_ON(thread->return_error.cmd !=
3859 thread->return_error.cmd = BR_ERROR;
3860 binder_enqueue_thread_work(
3862 &thread->return_error.work);
3864 BINDER_DEBUG_FAILED_TRANSACTION,
3865 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3866 proc->pid, thread->pid);
3870 binder_proc_lock(proc);
3871 ref = binder_get_ref_olocked(proc, target, false);
3873 binder_user_error("%d:%d %s invalid ref %d\n",
3874 proc->pid, thread->pid,
3875 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3876 "BC_REQUEST_DEATH_NOTIFICATION" :
3877 "BC_CLEAR_DEATH_NOTIFICATION",
3879 binder_proc_unlock(proc);
3884 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3885 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3886 proc->pid, thread->pid,
3887 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3888 "BC_REQUEST_DEATH_NOTIFICATION" :
3889 "BC_CLEAR_DEATH_NOTIFICATION",
3890 (u64)cookie, ref->data.debug_id,
3891 ref->data.desc, ref->data.strong,
3892 ref->data.weak, ref->node->debug_id);
3894 binder_node_lock(ref->node);
3895 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3897 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3898 proc->pid, thread->pid);
3899 binder_node_unlock(ref->node);
3900 binder_proc_unlock(proc);
3904 binder_stats_created(BINDER_STAT_DEATH);
3905 INIT_LIST_HEAD(&death->work.entry);
3906 death->cookie = cookie;
3908 if (ref->node->proc == NULL) {
3909 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3911 binder_inner_proc_lock(proc);
3912 binder_enqueue_work_ilocked(
3913 &ref->death->work, &proc->todo);
3914 binder_wakeup_proc_ilocked(proc);
3915 binder_inner_proc_unlock(proc);
3918 if (ref->death == NULL) {
3919 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3920 proc->pid, thread->pid);
3921 binder_node_unlock(ref->node);
3922 binder_proc_unlock(proc);
3926 if (death->cookie != cookie) {
3927 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3928 proc->pid, thread->pid,
3931 binder_node_unlock(ref->node);
3932 binder_proc_unlock(proc);
3936 binder_inner_proc_lock(proc);
3937 if (list_empty(&death->work.entry)) {
3938 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3939 if (thread->looper &
3940 (BINDER_LOOPER_STATE_REGISTERED |
3941 BINDER_LOOPER_STATE_ENTERED))
3942 binder_enqueue_thread_work_ilocked(
3946 binder_enqueue_work_ilocked(
3949 binder_wakeup_proc_ilocked(
3953 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3954 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3956 binder_inner_proc_unlock(proc);
3958 binder_node_unlock(ref->node);
3959 binder_proc_unlock(proc);
3961 case BC_DEAD_BINDER_DONE: {
3962 struct binder_work *w;
3963 binder_uintptr_t cookie;
3964 struct binder_ref_death *death = NULL;
3966 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3969 ptr += sizeof(cookie);
3970 binder_inner_proc_lock(proc);
3971 list_for_each_entry(w, &proc->delivered_death,
3973 struct binder_ref_death *tmp_death =
3975 struct binder_ref_death,
3978 if (tmp_death->cookie == cookie) {
3983 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3984 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3985 proc->pid, thread->pid, (u64)cookie,
3987 if (death == NULL) {
3988 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3989 proc->pid, thread->pid, (u64)cookie);
3990 binder_inner_proc_unlock(proc);
3993 binder_dequeue_work_ilocked(&death->work);
3994 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3995 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3996 if (thread->looper &
3997 (BINDER_LOOPER_STATE_REGISTERED |
3998 BINDER_LOOPER_STATE_ENTERED))
3999 binder_enqueue_thread_work_ilocked(
4000 thread, &death->work);
4002 binder_enqueue_work_ilocked(
4005 binder_wakeup_proc_ilocked(proc);
4008 binder_inner_proc_unlock(proc);
4012 pr_err("%d:%d unknown command %d\n",
4013 proc->pid, thread->pid, cmd);
4016 *consumed = ptr - buffer;
4021 static void binder_stat_br(struct binder_proc *proc,
4022 struct binder_thread *thread, uint32_t cmd)
4024 trace_binder_return(cmd);
4025 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4026 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4027 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4028 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4032 static int binder_put_node_cmd(struct binder_proc *proc,
4033 struct binder_thread *thread,
4035 binder_uintptr_t node_ptr,
4036 binder_uintptr_t node_cookie,
4038 uint32_t cmd, const char *cmd_name)
4040 void __user *ptr = *ptrp;
4042 if (put_user(cmd, (uint32_t __user *)ptr))
4044 ptr += sizeof(uint32_t);
4046 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4048 ptr += sizeof(binder_uintptr_t);
4050 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4052 ptr += sizeof(binder_uintptr_t);
4054 binder_stat_br(proc, thread, cmd);
4055 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4056 proc->pid, thread->pid, cmd_name, node_debug_id,
4057 (u64)node_ptr, (u64)node_cookie);
4063 static int binder_wait_for_work(struct binder_thread *thread,
4067 struct binder_proc *proc = thread->proc;
4070 freezer_do_not_count();
4071 binder_inner_proc_lock(proc);
4073 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4074 if (binder_has_work_ilocked(thread, do_proc_work))
4077 list_add(&thread->waiting_thread_node,
4078 &proc->waiting_threads);
4079 binder_inner_proc_unlock(proc);
4081 binder_inner_proc_lock(proc);
4082 list_del_init(&thread->waiting_thread_node);
4083 if (signal_pending(current)) {
4088 finish_wait(&thread->wait, &wait);
4089 binder_inner_proc_unlock(proc);
4096 * binder_apply_fd_fixups() - finish fd translation
4097 * @proc: binder_proc associated @t->buffer
4098 * @t: binder transaction with list of fd fixups
4100 * Now that we are in the context of the transaction target
4101 * process, we can allocate and install fds. Process the
4102 * list of fds to translate and fixup the buffer with the
4105 * If we fail to allocate an fd, then free the resources by
4106 * fput'ing files that have not been processed and ksys_close'ing
4107 * any fds that have already been allocated.
4109 static int binder_apply_fd_fixups(struct binder_proc *proc,
4110 struct binder_transaction *t)
4112 struct binder_txn_fd_fixup *fixup, *tmp;
4115 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4116 int fd = get_unused_fd_flags(O_CLOEXEC);
4119 binder_debug(BINDER_DEBUG_TRANSACTION,
4120 "failed fd fixup txn %d fd %d\n",
4125 binder_debug(BINDER_DEBUG_TRANSACTION,
4126 "fd fixup txn %d fd %d\n",
4128 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4129 fd_install(fd, fixup->file);
4131 binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4135 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4141 binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4142 t->buffer, fixup->offset,
4144 binder_deferred_fd_close(fd);
4146 list_del(&fixup->fixup_entry);
4153 static int binder_thread_read(struct binder_proc *proc,
4154 struct binder_thread *thread,
4155 binder_uintptr_t binder_buffer, size_t size,
4156 binder_size_t *consumed, int non_block)
4158 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4159 void __user *ptr = buffer + *consumed;
4160 void __user *end = buffer + size;
4163 int wait_for_proc_work;
4165 if (*consumed == 0) {
4166 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4168 ptr += sizeof(uint32_t);
4172 binder_inner_proc_lock(proc);
4173 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4174 binder_inner_proc_unlock(proc);
4176 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4178 trace_binder_wait_for_work(wait_for_proc_work,
4179 !!thread->transaction_stack,
4180 !binder_worklist_empty(proc, &thread->todo));
4181 if (wait_for_proc_work) {
4182 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4183 BINDER_LOOPER_STATE_ENTERED))) {
4184 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4185 proc->pid, thread->pid, thread->looper);
4186 wait_event_interruptible(binder_user_error_wait,
4187 binder_stop_on_user_error < 2);
4189 binder_set_nice(proc->default_priority);
4193 if (!binder_has_work(thread, wait_for_proc_work))
4196 ret = binder_wait_for_work(thread, wait_for_proc_work);
4199 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4206 struct binder_transaction_data_secctx tr;
4207 struct binder_transaction_data *trd = &tr.transaction_data;
4208 struct binder_work *w = NULL;
4209 struct list_head *list = NULL;
4210 struct binder_transaction *t = NULL;
4211 struct binder_thread *t_from;
4212 size_t trsize = sizeof(*trd);
4214 binder_inner_proc_lock(proc);
4215 if (!binder_worklist_empty_ilocked(&thread->todo))
4216 list = &thread->todo;
4217 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4221 binder_inner_proc_unlock(proc);
4224 if (ptr - buffer == 4 && !thread->looper_need_return)
4229 if (end - ptr < sizeof(tr) + 4) {
4230 binder_inner_proc_unlock(proc);
4233 w = binder_dequeue_work_head_ilocked(list);
4234 if (binder_worklist_empty_ilocked(&thread->todo))
4235 thread->process_todo = false;
4238 case BINDER_WORK_TRANSACTION: {
4239 binder_inner_proc_unlock(proc);
4240 t = container_of(w, struct binder_transaction, work);
4242 case BINDER_WORK_RETURN_ERROR: {
4243 struct binder_error *e = container_of(
4244 w, struct binder_error, work);
4246 WARN_ON(e->cmd == BR_OK);
4247 binder_inner_proc_unlock(proc);
4248 if (put_user(e->cmd, (uint32_t __user *)ptr))
4252 ptr += sizeof(uint32_t);
4254 binder_stat_br(proc, thread, cmd);
4256 case BINDER_WORK_TRANSACTION_COMPLETE: {
4257 binder_inner_proc_unlock(proc);
4258 cmd = BR_TRANSACTION_COMPLETE;
4259 if (put_user(cmd, (uint32_t __user *)ptr))
4261 ptr += sizeof(uint32_t);
4263 binder_stat_br(proc, thread, cmd);
4264 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4265 "%d:%d BR_TRANSACTION_COMPLETE\n",
4266 proc->pid, thread->pid);
4268 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4270 case BINDER_WORK_NODE: {
4271 struct binder_node *node = container_of(w, struct binder_node, work);
4273 binder_uintptr_t node_ptr = node->ptr;
4274 binder_uintptr_t node_cookie = node->cookie;
4275 int node_debug_id = node->debug_id;
4278 void __user *orig_ptr = ptr;
4280 BUG_ON(proc != node->proc);
4281 strong = node->internal_strong_refs ||
4282 node->local_strong_refs;
4283 weak = !hlist_empty(&node->refs) ||
4284 node->local_weak_refs ||
4285 node->tmp_refs || strong;
4286 has_strong_ref = node->has_strong_ref;
4287 has_weak_ref = node->has_weak_ref;
4289 if (weak && !has_weak_ref) {
4290 node->has_weak_ref = 1;
4291 node->pending_weak_ref = 1;
4292 node->local_weak_refs++;
4294 if (strong && !has_strong_ref) {
4295 node->has_strong_ref = 1;
4296 node->pending_strong_ref = 1;
4297 node->local_strong_refs++;
4299 if (!strong && has_strong_ref)
4300 node->has_strong_ref = 0;
4301 if (!weak && has_weak_ref)
4302 node->has_weak_ref = 0;
4303 if (!weak && !strong) {
4304 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4305 "%d:%d node %d u%016llx c%016llx deleted\n",
4306 proc->pid, thread->pid,
4310 rb_erase(&node->rb_node, &proc->nodes);
4311 binder_inner_proc_unlock(proc);
4312 binder_node_lock(node);
4314 * Acquire the node lock before freeing the
4315 * node to serialize with other threads that
4316 * may have been holding the node lock while
4317 * decrementing this node (avoids race where
4318 * this thread frees while the other thread
4319 * is unlocking the node after the final
4322 binder_node_unlock(node);
4323 binder_free_node(node);
4325 binder_inner_proc_unlock(proc);
4327 if (weak && !has_weak_ref)
4328 ret = binder_put_node_cmd(
4329 proc, thread, &ptr, node_ptr,
4330 node_cookie, node_debug_id,
4331 BR_INCREFS, "BR_INCREFS");
4332 if (!ret && strong && !has_strong_ref)
4333 ret = binder_put_node_cmd(
4334 proc, thread, &ptr, node_ptr,
4335 node_cookie, node_debug_id,
4336 BR_ACQUIRE, "BR_ACQUIRE");
4337 if (!ret && !strong && has_strong_ref)
4338 ret = binder_put_node_cmd(
4339 proc, thread, &ptr, node_ptr,
4340 node_cookie, node_debug_id,
4341 BR_RELEASE, "BR_RELEASE");
4342 if (!ret && !weak && has_weak_ref)
4343 ret = binder_put_node_cmd(
4344 proc, thread, &ptr, node_ptr,
4345 node_cookie, node_debug_id,
4346 BR_DECREFS, "BR_DECREFS");
4347 if (orig_ptr == ptr)
4348 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4349 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4350 proc->pid, thread->pid,
4357 case BINDER_WORK_DEAD_BINDER:
4358 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4359 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4360 struct binder_ref_death *death;
4362 binder_uintptr_t cookie;
4364 death = container_of(w, struct binder_ref_death, work);
4365 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4366 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4368 cmd = BR_DEAD_BINDER;
4369 cookie = death->cookie;
4371 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4372 "%d:%d %s %016llx\n",
4373 proc->pid, thread->pid,
4374 cmd == BR_DEAD_BINDER ?
4376 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4378 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4379 binder_inner_proc_unlock(proc);
4381 binder_stats_deleted(BINDER_STAT_DEATH);
4383 binder_enqueue_work_ilocked(
4384 w, &proc->delivered_death);
4385 binder_inner_proc_unlock(proc);
4387 if (put_user(cmd, (uint32_t __user *)ptr))
4389 ptr += sizeof(uint32_t);
4390 if (put_user(cookie,
4391 (binder_uintptr_t __user *)ptr))
4393 ptr += sizeof(binder_uintptr_t);
4394 binder_stat_br(proc, thread, cmd);
4395 if (cmd == BR_DEAD_BINDER)
4396 goto done; /* DEAD_BINDER notifications can cause transactions */
4399 binder_inner_proc_unlock(proc);
4400 pr_err("%d:%d: bad work type %d\n",
4401 proc->pid, thread->pid, w->type);
4408 BUG_ON(t->buffer == NULL);
4409 if (t->buffer->target_node) {
4410 struct binder_node *target_node = t->buffer->target_node;
4412 trd->target.ptr = target_node->ptr;
4413 trd->cookie = target_node->cookie;
4414 t->saved_priority = task_nice(current);
4415 if (t->priority < target_node->min_priority &&
4416 !(t->flags & TF_ONE_WAY))
4417 binder_set_nice(t->priority);
4418 else if (!(t->flags & TF_ONE_WAY) ||
4419 t->saved_priority > target_node->min_priority)
4420 binder_set_nice(target_node->min_priority);
4421 cmd = BR_TRANSACTION;
4423 trd->target.ptr = 0;
4427 trd->code = t->code;
4428 trd->flags = t->flags;
4429 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4431 t_from = binder_get_txn_from(t);
4433 struct task_struct *sender = t_from->proc->tsk;
4436 task_tgid_nr_ns(sender,
4437 task_active_pid_ns(current));
4439 trd->sender_pid = 0;
4442 ret = binder_apply_fd_fixups(proc, t);
4444 struct binder_buffer *buffer = t->buffer;
4445 bool oneway = !!(t->flags & TF_ONE_WAY);
4446 int tid = t->debug_id;
4449 binder_thread_dec_tmpref(t_from);
4450 buffer->transaction = NULL;
4451 binder_cleanup_transaction(t, "fd fixups failed",
4453 binder_free_buf(proc, buffer);
4454 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4455 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4456 proc->pid, thread->pid,
4458 (cmd == BR_REPLY ? "reply " : ""),
4459 tid, BR_FAILED_REPLY, ret, __LINE__);
4460 if (cmd == BR_REPLY) {
4461 cmd = BR_FAILED_REPLY;
4462 if (put_user(cmd, (uint32_t __user *)ptr))
4464 ptr += sizeof(uint32_t);
4465 binder_stat_br(proc, thread, cmd);
4470 trd->data_size = t->buffer->data_size;
4471 trd->offsets_size = t->buffer->offsets_size;
4472 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4473 trd->data.ptr.offsets = trd->data.ptr.buffer +
4474 ALIGN(t->buffer->data_size,
4477 tr.secctx = t->security_ctx;
4478 if (t->security_ctx) {
4479 cmd = BR_TRANSACTION_SEC_CTX;
4480 trsize = sizeof(tr);
4482 if (put_user(cmd, (uint32_t __user *)ptr)) {
4484 binder_thread_dec_tmpref(t_from);
4486 binder_cleanup_transaction(t, "put_user failed",
4491 ptr += sizeof(uint32_t);
4492 if (copy_to_user(ptr, &tr, trsize)) {
4494 binder_thread_dec_tmpref(t_from);
4496 binder_cleanup_transaction(t, "copy_to_user failed",
4503 trace_binder_transaction_received(t);
4504 binder_stat_br(proc, thread, cmd);
4505 binder_debug(BINDER_DEBUG_TRANSACTION,
4506 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4507 proc->pid, thread->pid,
4508 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4509 (cmd == BR_TRANSACTION_SEC_CTX) ?
4510 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4511 t->debug_id, t_from ? t_from->proc->pid : 0,
4512 t_from ? t_from->pid : 0, cmd,
4513 t->buffer->data_size, t->buffer->offsets_size,
4514 (u64)trd->data.ptr.buffer,
4515 (u64)trd->data.ptr.offsets);
4518 binder_thread_dec_tmpref(t_from);
4519 t->buffer->allow_user_free = 1;
4520 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4521 binder_inner_proc_lock(thread->proc);
4522 t->to_parent = thread->transaction_stack;
4523 t->to_thread = thread;
4524 thread->transaction_stack = t;
4525 binder_inner_proc_unlock(thread->proc);
4527 binder_free_transaction(t);
4534 *consumed = ptr - buffer;
4535 binder_inner_proc_lock(proc);
4536 if (proc->requested_threads == 0 &&
4537 list_empty(&thread->proc->waiting_threads) &&
4538 proc->requested_threads_started < proc->max_threads &&
4539 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4540 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4541 /*spawn a new thread if we leave this out */) {
4542 proc->requested_threads++;
4543 binder_inner_proc_unlock(proc);
4544 binder_debug(BINDER_DEBUG_THREADS,
4545 "%d:%d BR_SPAWN_LOOPER\n",
4546 proc->pid, thread->pid);
4547 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4549 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4551 binder_inner_proc_unlock(proc);
4555 static void binder_release_work(struct binder_proc *proc,
4556 struct list_head *list)
4558 struct binder_work *w;
4561 w = binder_dequeue_work_head(proc, list);
4566 case BINDER_WORK_TRANSACTION: {
4567 struct binder_transaction *t;
4569 t = container_of(w, struct binder_transaction, work);
4571 binder_cleanup_transaction(t, "process died.",
4574 case BINDER_WORK_RETURN_ERROR: {
4575 struct binder_error *e = container_of(
4576 w, struct binder_error, work);
4578 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4579 "undelivered TRANSACTION_ERROR: %u\n",
4582 case BINDER_WORK_TRANSACTION_COMPLETE: {
4583 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4584 "undelivered TRANSACTION_COMPLETE\n");
4586 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4588 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4589 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4590 struct binder_ref_death *death;
4592 death = container_of(w, struct binder_ref_death, work);
4593 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4594 "undelivered death notification, %016llx\n",
4595 (u64)death->cookie);
4597 binder_stats_deleted(BINDER_STAT_DEATH);
4600 pr_err("unexpected work type, %d, not freed\n",
4608 static struct binder_thread *binder_get_thread_ilocked(
4609 struct binder_proc *proc, struct binder_thread *new_thread)
4611 struct binder_thread *thread = NULL;
4612 struct rb_node *parent = NULL;
4613 struct rb_node **p = &proc->threads.rb_node;
4617 thread = rb_entry(parent, struct binder_thread, rb_node);
4619 if (current->pid < thread->pid)
4621 else if (current->pid > thread->pid)
4622 p = &(*p)->rb_right;
4628 thread = new_thread;
4629 binder_stats_created(BINDER_STAT_THREAD);
4630 thread->proc = proc;
4631 thread->pid = current->pid;
4632 atomic_set(&thread->tmp_ref, 0);
4633 init_waitqueue_head(&thread->wait);
4634 INIT_LIST_HEAD(&thread->todo);
4635 rb_link_node(&thread->rb_node, parent, p);
4636 rb_insert_color(&thread->rb_node, &proc->threads);
4637 thread->looper_need_return = true;
4638 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4639 thread->return_error.cmd = BR_OK;
4640 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4641 thread->reply_error.cmd = BR_OK;
4642 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4646 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4648 struct binder_thread *thread;
4649 struct binder_thread *new_thread;
4651 binder_inner_proc_lock(proc);
4652 thread = binder_get_thread_ilocked(proc, NULL);
4653 binder_inner_proc_unlock(proc);
4655 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4656 if (new_thread == NULL)
4658 binder_inner_proc_lock(proc);
4659 thread = binder_get_thread_ilocked(proc, new_thread);
4660 binder_inner_proc_unlock(proc);
4661 if (thread != new_thread)
4667 static void binder_free_proc(struct binder_proc *proc)
4669 BUG_ON(!list_empty(&proc->todo));
4670 BUG_ON(!list_empty(&proc->delivered_death));
4671 binder_alloc_deferred_release(&proc->alloc);
4672 put_task_struct(proc->tsk);
4673 binder_stats_deleted(BINDER_STAT_PROC);
4677 static void binder_free_thread(struct binder_thread *thread)
4679 BUG_ON(!list_empty(&thread->todo));
4680 binder_stats_deleted(BINDER_STAT_THREAD);
4681 binder_proc_dec_tmpref(thread->proc);
4685 static int binder_thread_release(struct binder_proc *proc,
4686 struct binder_thread *thread)
4688 struct binder_transaction *t;
4689 struct binder_transaction *send_reply = NULL;
4690 int active_transactions = 0;
4691 struct binder_transaction *last_t = NULL;
4693 binder_inner_proc_lock(thread->proc);
4695 * take a ref on the proc so it survives
4696 * after we remove this thread from proc->threads.
4697 * The corresponding dec is when we actually
4698 * free the thread in binder_free_thread()
4702 * take a ref on this thread to ensure it
4703 * survives while we are releasing it
4705 atomic_inc(&thread->tmp_ref);
4706 rb_erase(&thread->rb_node, &proc->threads);
4707 t = thread->transaction_stack;
4709 spin_lock(&t->lock);
4710 if (t->to_thread == thread)
4713 __acquire(&t->lock);
4715 thread->is_dead = true;
4719 active_transactions++;
4720 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4721 "release %d:%d transaction %d %s, still active\n",
4722 proc->pid, thread->pid,
4724 (t->to_thread == thread) ? "in" : "out");
4726 if (t->to_thread == thread) {
4728 t->to_thread = NULL;
4730 t->buffer->transaction = NULL;
4734 } else if (t->from == thread) {
4739 spin_unlock(&last_t->lock);
4741 spin_lock(&t->lock);
4743 __acquire(&t->lock);
4745 /* annotation for sparse, lock not acquired in last iteration above */
4746 __release(&t->lock);
4749 * If this thread used poll, make sure we remove the waitqueue
4750 * from any epoll data structures holding it with POLLFREE.
4751 * waitqueue_active() is safe to use here because we're holding
4754 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4755 waitqueue_active(&thread->wait)) {
4756 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4759 binder_inner_proc_unlock(thread->proc);
4762 * This is needed to avoid races between wake_up_poll() above and
4763 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4764 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4765 * lock, so we can be sure it's done after calling synchronize_rcu().
4767 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4771 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4772 binder_release_work(proc, &thread->todo);
4773 binder_thread_dec_tmpref(thread);
4774 return active_transactions;
4777 static __poll_t binder_poll(struct file *filp,
4778 struct poll_table_struct *wait)
4780 struct binder_proc *proc = filp->private_data;
4781 struct binder_thread *thread = NULL;
4782 bool wait_for_proc_work;
4784 thread = binder_get_thread(proc);
4788 binder_inner_proc_lock(thread->proc);
4789 thread->looper |= BINDER_LOOPER_STATE_POLL;
4790 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4792 binder_inner_proc_unlock(thread->proc);
4794 poll_wait(filp, &thread->wait, wait);
4796 if (binder_has_work(thread, wait_for_proc_work))
4802 static int binder_ioctl_write_read(struct file *filp,
4803 unsigned int cmd, unsigned long arg,
4804 struct binder_thread *thread)
4807 struct binder_proc *proc = filp->private_data;
4808 unsigned int size = _IOC_SIZE(cmd);
4809 void __user *ubuf = (void __user *)arg;
4810 struct binder_write_read bwr;
4812 if (size != sizeof(struct binder_write_read)) {
4816 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4820 binder_debug(BINDER_DEBUG_READ_WRITE,
4821 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4822 proc->pid, thread->pid,
4823 (u64)bwr.write_size, (u64)bwr.write_buffer,
4824 (u64)bwr.read_size, (u64)bwr.read_buffer);
4826 if (bwr.write_size > 0) {
4827 ret = binder_thread_write(proc, thread,
4830 &bwr.write_consumed);
4831 trace_binder_write_done(ret);
4833 bwr.read_consumed = 0;
4834 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4839 if (bwr.read_size > 0) {
4840 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4843 filp->f_flags & O_NONBLOCK);
4844 trace_binder_read_done(ret);
4845 binder_inner_proc_lock(proc);
4846 if (!binder_worklist_empty_ilocked(&proc->todo))
4847 binder_wakeup_proc_ilocked(proc);
4848 binder_inner_proc_unlock(proc);
4850 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4855 binder_debug(BINDER_DEBUG_READ_WRITE,
4856 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4857 proc->pid, thread->pid,
4858 (u64)bwr.write_consumed, (u64)bwr.write_size,
4859 (u64)bwr.read_consumed, (u64)bwr.read_size);
4860 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4868 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4869 struct flat_binder_object *fbo)
4872 struct binder_proc *proc = filp->private_data;
4873 struct binder_context *context = proc->context;
4874 struct binder_node *new_node;
4875 kuid_t curr_euid = current_euid();
4877 mutex_lock(&context->context_mgr_node_lock);
4878 if (context->binder_context_mgr_node) {
4879 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4883 ret = security_binder_set_context_mgr(proc->tsk);
4886 if (uid_valid(context->binder_context_mgr_uid)) {
4887 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4888 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4889 from_kuid(&init_user_ns, curr_euid),
4890 from_kuid(&init_user_ns,
4891 context->binder_context_mgr_uid));
4896 context->binder_context_mgr_uid = curr_euid;
4898 new_node = binder_new_node(proc, fbo);
4903 binder_node_lock(new_node);
4904 new_node->local_weak_refs++;
4905 new_node->local_strong_refs++;
4906 new_node->has_strong_ref = 1;
4907 new_node->has_weak_ref = 1;
4908 context->binder_context_mgr_node = new_node;
4909 binder_node_unlock(new_node);
4910 binder_put_node(new_node);
4912 mutex_unlock(&context->context_mgr_node_lock);
4916 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4917 struct binder_node_info_for_ref *info)
4919 struct binder_node *node;
4920 struct binder_context *context = proc->context;
4921 __u32 handle = info->handle;
4923 if (info->strong_count || info->weak_count || info->reserved1 ||
4924 info->reserved2 || info->reserved3) {
4925 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4930 /* This ioctl may only be used by the context manager */
4931 mutex_lock(&context->context_mgr_node_lock);
4932 if (!context->binder_context_mgr_node ||
4933 context->binder_context_mgr_node->proc != proc) {
4934 mutex_unlock(&context->context_mgr_node_lock);
4937 mutex_unlock(&context->context_mgr_node_lock);
4939 node = binder_get_node_from_ref(proc, handle, true, NULL);
4943 info->strong_count = node->local_strong_refs +
4944 node->internal_strong_refs;
4945 info->weak_count = node->local_weak_refs;
4947 binder_put_node(node);
4952 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4953 struct binder_node_debug_info *info)
4956 binder_uintptr_t ptr = info->ptr;
4958 memset(info, 0, sizeof(*info));
4960 binder_inner_proc_lock(proc);
4961 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4962 struct binder_node *node = rb_entry(n, struct binder_node,
4964 if (node->ptr > ptr) {
4965 info->ptr = node->ptr;
4966 info->cookie = node->cookie;
4967 info->has_strong_ref = node->has_strong_ref;
4968 info->has_weak_ref = node->has_weak_ref;
4972 binder_inner_proc_unlock(proc);
4977 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4980 struct binder_proc *proc = filp->private_data;
4981 struct binder_thread *thread;
4982 unsigned int size = _IOC_SIZE(cmd);
4983 void __user *ubuf = (void __user *)arg;
4985 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4986 proc->pid, current->pid, cmd, arg);*/
4988 binder_selftest_alloc(&proc->alloc);
4990 trace_binder_ioctl(cmd, arg);
4992 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4996 thread = binder_get_thread(proc);
4997 if (thread == NULL) {
5003 case BINDER_WRITE_READ:
5004 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5008 case BINDER_SET_MAX_THREADS: {
5011 if (copy_from_user(&max_threads, ubuf,
5012 sizeof(max_threads))) {
5016 binder_inner_proc_lock(proc);
5017 proc->max_threads = max_threads;
5018 binder_inner_proc_unlock(proc);
5021 case BINDER_SET_CONTEXT_MGR_EXT: {
5022 struct flat_binder_object fbo;
5024 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5028 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5033 case BINDER_SET_CONTEXT_MGR:
5034 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5038 case BINDER_THREAD_EXIT:
5039 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5040 proc->pid, thread->pid);
5041 binder_thread_release(proc, thread);
5044 case BINDER_VERSION: {
5045 struct binder_version __user *ver = ubuf;
5047 if (size != sizeof(struct binder_version)) {
5051 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5052 &ver->protocol_version)) {
5058 case BINDER_GET_NODE_INFO_FOR_REF: {
5059 struct binder_node_info_for_ref info;
5061 if (copy_from_user(&info, ubuf, sizeof(info))) {
5066 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5070 if (copy_to_user(ubuf, &info, sizeof(info))) {
5077 case BINDER_GET_NODE_DEBUG_INFO: {
5078 struct binder_node_debug_info info;
5080 if (copy_from_user(&info, ubuf, sizeof(info))) {
5085 ret = binder_ioctl_get_node_debug_info(proc, &info);
5089 if (copy_to_user(ubuf, &info, sizeof(info))) {
5102 thread->looper_need_return = false;
5103 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5104 if (ret && ret != -ERESTARTSYS)
5105 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5107 trace_binder_ioctl_done(ret);
5111 static void binder_vma_open(struct vm_area_struct *vma)
5113 struct binder_proc *proc = vma->vm_private_data;
5115 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5116 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5117 proc->pid, vma->vm_start, vma->vm_end,
5118 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5119 (unsigned long)pgprot_val(vma->vm_page_prot));
5122 static void binder_vma_close(struct vm_area_struct *vma)
5124 struct binder_proc *proc = vma->vm_private_data;
5126 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5127 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5128 proc->pid, vma->vm_start, vma->vm_end,
5129 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5130 (unsigned long)pgprot_val(vma->vm_page_prot));
5131 binder_alloc_vma_close(&proc->alloc);
5134 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5136 return VM_FAULT_SIGBUS;
5139 static const struct vm_operations_struct binder_vm_ops = {
5140 .open = binder_vma_open,
5141 .close = binder_vma_close,
5142 .fault = binder_vm_fault,
5145 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5148 struct binder_proc *proc = filp->private_data;
5149 const char *failure_string;
5151 if (proc->tsk != current->group_leader)
5154 if ((vma->vm_end - vma->vm_start) > SZ_4M)
5155 vma->vm_end = vma->vm_start + SZ_4M;
5157 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5158 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5159 __func__, proc->pid, vma->vm_start, vma->vm_end,
5160 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5161 (unsigned long)pgprot_val(vma->vm_page_prot));
5163 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5165 failure_string = "bad vm_flags";
5168 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5169 vma->vm_flags &= ~VM_MAYWRITE;
5171 vma->vm_ops = &binder_vm_ops;
5172 vma->vm_private_data = proc;
5174 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5180 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5181 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5185 static int binder_open(struct inode *nodp, struct file *filp)
5187 struct binder_proc *proc;
5188 struct binder_device *binder_dev;
5190 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5191 current->group_leader->pid, current->pid);
5193 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5196 spin_lock_init(&proc->inner_lock);
5197 spin_lock_init(&proc->outer_lock);
5198 get_task_struct(current->group_leader);
5199 proc->tsk = current->group_leader;
5200 INIT_LIST_HEAD(&proc->todo);
5201 proc->default_priority = task_nice(current);
5202 /* binderfs stashes devices in i_private */
5203 if (is_binderfs_device(nodp))
5204 binder_dev = nodp->i_private;
5206 binder_dev = container_of(filp->private_data,
5207 struct binder_device, miscdev);
5208 proc->context = &binder_dev->context;
5209 binder_alloc_init(&proc->alloc);
5211 binder_stats_created(BINDER_STAT_PROC);
5212 proc->pid = current->group_leader->pid;
5213 INIT_LIST_HEAD(&proc->delivered_death);
5214 INIT_LIST_HEAD(&proc->waiting_threads);
5215 filp->private_data = proc;
5217 mutex_lock(&binder_procs_lock);
5218 hlist_add_head(&proc->proc_node, &binder_procs);
5219 mutex_unlock(&binder_procs_lock);
5221 if (binder_debugfs_dir_entry_proc) {
5224 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5226 * proc debug entries are shared between contexts, so
5227 * this will fail if the process tries to open the driver
5228 * again with a different context. The priting code will
5229 * anyway print all contexts that a given PID has, so this
5232 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5233 binder_debugfs_dir_entry_proc,
5234 (void *)(unsigned long)proc->pid,
5241 static int binder_flush(struct file *filp, fl_owner_t id)
5243 struct binder_proc *proc = filp->private_data;
5245 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5250 static void binder_deferred_flush(struct binder_proc *proc)
5255 binder_inner_proc_lock(proc);
5256 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5257 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5259 thread->looper_need_return = true;
5260 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5261 wake_up_interruptible(&thread->wait);
5265 binder_inner_proc_unlock(proc);
5267 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5268 "binder_flush: %d woke %d threads\n", proc->pid,
5272 static int binder_release(struct inode *nodp, struct file *filp)
5274 struct binder_proc *proc = filp->private_data;
5276 debugfs_remove(proc->debugfs_entry);
5277 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5282 static int binder_node_release(struct binder_node *node, int refs)
5284 struct binder_ref *ref;
5286 struct binder_proc *proc = node->proc;
5288 binder_release_work(proc, &node->async_todo);
5290 binder_node_lock(node);
5291 binder_inner_proc_lock(proc);
5292 binder_dequeue_work_ilocked(&node->work);
5294 * The caller must have taken a temporary ref on the node,
5296 BUG_ON(!node->tmp_refs);
5297 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5298 binder_inner_proc_unlock(proc);
5299 binder_node_unlock(node);
5300 binder_free_node(node);
5306 node->local_strong_refs = 0;
5307 node->local_weak_refs = 0;
5308 binder_inner_proc_unlock(proc);
5310 spin_lock(&binder_dead_nodes_lock);
5311 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5312 spin_unlock(&binder_dead_nodes_lock);
5314 hlist_for_each_entry(ref, &node->refs, node_entry) {
5317 * Need the node lock to synchronize
5318 * with new notification requests and the
5319 * inner lock to synchronize with queued
5320 * death notifications.
5322 binder_inner_proc_lock(ref->proc);
5324 binder_inner_proc_unlock(ref->proc);
5330 BUG_ON(!list_empty(&ref->death->work.entry));
5331 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5332 binder_enqueue_work_ilocked(&ref->death->work,
5334 binder_wakeup_proc_ilocked(ref->proc);
5335 binder_inner_proc_unlock(ref->proc);
5338 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5339 "node %d now dead, refs %d, death %d\n",
5340 node->debug_id, refs, death);
5341 binder_node_unlock(node);
5342 binder_put_node(node);
5347 static void binder_deferred_release(struct binder_proc *proc)
5349 struct binder_context *context = proc->context;
5351 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5353 mutex_lock(&binder_procs_lock);
5354 hlist_del(&proc->proc_node);
5355 mutex_unlock(&binder_procs_lock);
5357 mutex_lock(&context->context_mgr_node_lock);
5358 if (context->binder_context_mgr_node &&
5359 context->binder_context_mgr_node->proc == proc) {
5360 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5361 "%s: %d context_mgr_node gone\n",
5362 __func__, proc->pid);
5363 context->binder_context_mgr_node = NULL;
5365 mutex_unlock(&context->context_mgr_node_lock);
5366 binder_inner_proc_lock(proc);
5368 * Make sure proc stays alive after we
5369 * remove all the threads
5373 proc->is_dead = true;
5375 active_transactions = 0;
5376 while ((n = rb_first(&proc->threads))) {
5377 struct binder_thread *thread;
5379 thread = rb_entry(n, struct binder_thread, rb_node);
5380 binder_inner_proc_unlock(proc);
5382 active_transactions += binder_thread_release(proc, thread);
5383 binder_inner_proc_lock(proc);
5388 while ((n = rb_first(&proc->nodes))) {
5389 struct binder_node *node;
5391 node = rb_entry(n, struct binder_node, rb_node);
5394 * take a temporary ref on the node before
5395 * calling binder_node_release() which will either
5396 * kfree() the node or call binder_put_node()
5398 binder_inc_node_tmpref_ilocked(node);
5399 rb_erase(&node->rb_node, &proc->nodes);
5400 binder_inner_proc_unlock(proc);
5401 incoming_refs = binder_node_release(node, incoming_refs);
5402 binder_inner_proc_lock(proc);
5404 binder_inner_proc_unlock(proc);
5407 binder_proc_lock(proc);
5408 while ((n = rb_first(&proc->refs_by_desc))) {
5409 struct binder_ref *ref;
5411 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5413 binder_cleanup_ref_olocked(ref);
5414 binder_proc_unlock(proc);
5415 binder_free_ref(ref);
5416 binder_proc_lock(proc);
5418 binder_proc_unlock(proc);
5420 binder_release_work(proc, &proc->todo);
5421 binder_release_work(proc, &proc->delivered_death);
5423 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5424 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5425 __func__, proc->pid, threads, nodes, incoming_refs,
5426 outgoing_refs, active_transactions);
5428 binder_proc_dec_tmpref(proc);
5431 static void binder_deferred_func(struct work_struct *work)
5433 struct binder_proc *proc;
5438 mutex_lock(&binder_deferred_lock);
5439 if (!hlist_empty(&binder_deferred_list)) {
5440 proc = hlist_entry(binder_deferred_list.first,
5441 struct binder_proc, deferred_work_node);
5442 hlist_del_init(&proc->deferred_work_node);
5443 defer = proc->deferred_work;
5444 proc->deferred_work = 0;
5449 mutex_unlock(&binder_deferred_lock);
5451 if (defer & BINDER_DEFERRED_FLUSH)
5452 binder_deferred_flush(proc);
5454 if (defer & BINDER_DEFERRED_RELEASE)
5455 binder_deferred_release(proc); /* frees proc */
5458 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5461 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5463 mutex_lock(&binder_deferred_lock);
5464 proc->deferred_work |= defer;
5465 if (hlist_unhashed(&proc->deferred_work_node)) {
5466 hlist_add_head(&proc->deferred_work_node,
5467 &binder_deferred_list);
5468 schedule_work(&binder_deferred_work);
5470 mutex_unlock(&binder_deferred_lock);
5473 static void print_binder_transaction_ilocked(struct seq_file *m,
5474 struct binder_proc *proc,
5476 struct binder_transaction *t)
5478 struct binder_proc *to_proc;
5479 struct binder_buffer *buffer = t->buffer;
5481 spin_lock(&t->lock);
5482 to_proc = t->to_proc;
5484 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5485 prefix, t->debug_id, t,
5486 t->from ? t->from->proc->pid : 0,
5487 t->from ? t->from->pid : 0,
5488 to_proc ? to_proc->pid : 0,
5489 t->to_thread ? t->to_thread->pid : 0,
5490 t->code, t->flags, t->priority, t->need_reply);
5491 spin_unlock(&t->lock);
5493 if (proc != to_proc) {
5495 * Can only safely deref buffer if we are holding the
5496 * correct proc inner lock for this node
5502 if (buffer == NULL) {
5503 seq_puts(m, " buffer free\n");
5506 if (buffer->target_node)
5507 seq_printf(m, " node %d", buffer->target_node->debug_id);
5508 seq_printf(m, " size %zd:%zd data %pK\n",
5509 buffer->data_size, buffer->offsets_size,
5513 static void print_binder_work_ilocked(struct seq_file *m,
5514 struct binder_proc *proc,
5516 const char *transaction_prefix,
5517 struct binder_work *w)
5519 struct binder_node *node;
5520 struct binder_transaction *t;
5523 case BINDER_WORK_TRANSACTION:
5524 t = container_of(w, struct binder_transaction, work);
5525 print_binder_transaction_ilocked(
5526 m, proc, transaction_prefix, t);
5528 case BINDER_WORK_RETURN_ERROR: {
5529 struct binder_error *e = container_of(
5530 w, struct binder_error, work);
5532 seq_printf(m, "%stransaction error: %u\n",
5535 case BINDER_WORK_TRANSACTION_COMPLETE:
5536 seq_printf(m, "%stransaction complete\n", prefix);
5538 case BINDER_WORK_NODE:
5539 node = container_of(w, struct binder_node, work);
5540 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5541 prefix, node->debug_id,
5542 (u64)node->ptr, (u64)node->cookie);
5544 case BINDER_WORK_DEAD_BINDER:
5545 seq_printf(m, "%shas dead binder\n", prefix);
5547 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5548 seq_printf(m, "%shas cleared dead binder\n", prefix);
5550 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5551 seq_printf(m, "%shas cleared death notification\n", prefix);
5554 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5559 static void print_binder_thread_ilocked(struct seq_file *m,
5560 struct binder_thread *thread,
5563 struct binder_transaction *t;
5564 struct binder_work *w;
5565 size_t start_pos = m->count;
5568 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5569 thread->pid, thread->looper,
5570 thread->looper_need_return,
5571 atomic_read(&thread->tmp_ref));
5572 header_pos = m->count;
5573 t = thread->transaction_stack;
5575 if (t->from == thread) {
5576 print_binder_transaction_ilocked(m, thread->proc,
5577 " outgoing transaction", t);
5579 } else if (t->to_thread == thread) {
5580 print_binder_transaction_ilocked(m, thread->proc,
5581 " incoming transaction", t);
5584 print_binder_transaction_ilocked(m, thread->proc,
5585 " bad transaction", t);
5589 list_for_each_entry(w, &thread->todo, entry) {
5590 print_binder_work_ilocked(m, thread->proc, " ",
5591 " pending transaction", w);
5593 if (!print_always && m->count == header_pos)
5594 m->count = start_pos;
5597 static void print_binder_node_nilocked(struct seq_file *m,
5598 struct binder_node *node)
5600 struct binder_ref *ref;
5601 struct binder_work *w;
5605 hlist_for_each_entry(ref, &node->refs, node_entry)
5608 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5609 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5610 node->has_strong_ref, node->has_weak_ref,
5611 node->local_strong_refs, node->local_weak_refs,
5612 node->internal_strong_refs, count, node->tmp_refs);
5614 seq_puts(m, " proc");
5615 hlist_for_each_entry(ref, &node->refs, node_entry)
5616 seq_printf(m, " %d", ref->proc->pid);
5620 list_for_each_entry(w, &node->async_todo, entry)
5621 print_binder_work_ilocked(m, node->proc, " ",
5622 " pending async transaction", w);
5626 static void print_binder_ref_olocked(struct seq_file *m,
5627 struct binder_ref *ref)
5629 binder_node_lock(ref->node);
5630 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5631 ref->data.debug_id, ref->data.desc,
5632 ref->node->proc ? "" : "dead ",
5633 ref->node->debug_id, ref->data.strong,
5634 ref->data.weak, ref->death);
5635 binder_node_unlock(ref->node);
5638 static void print_binder_proc(struct seq_file *m,
5639 struct binder_proc *proc, int print_all)
5641 struct binder_work *w;
5643 size_t start_pos = m->count;
5645 struct binder_node *last_node = NULL;
5647 seq_printf(m, "proc %d\n", proc->pid);
5648 seq_printf(m, "context %s\n", proc->context->name);
5649 header_pos = m->count;
5651 binder_inner_proc_lock(proc);
5652 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5653 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5654 rb_node), print_all);
5656 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5657 struct binder_node *node = rb_entry(n, struct binder_node,
5659 if (!print_all && !node->has_async_transaction)
5663 * take a temporary reference on the node so it
5664 * survives and isn't removed from the tree
5665 * while we print it.
5667 binder_inc_node_tmpref_ilocked(node);
5668 /* Need to drop inner lock to take node lock */
5669 binder_inner_proc_unlock(proc);
5671 binder_put_node(last_node);
5672 binder_node_inner_lock(node);
5673 print_binder_node_nilocked(m, node);
5674 binder_node_inner_unlock(node);
5676 binder_inner_proc_lock(proc);
5678 binder_inner_proc_unlock(proc);
5680 binder_put_node(last_node);
5683 binder_proc_lock(proc);
5684 for (n = rb_first(&proc->refs_by_desc);
5687 print_binder_ref_olocked(m, rb_entry(n,
5690 binder_proc_unlock(proc);
5692 binder_alloc_print_allocated(m, &proc->alloc);
5693 binder_inner_proc_lock(proc);
5694 list_for_each_entry(w, &proc->todo, entry)
5695 print_binder_work_ilocked(m, proc, " ",
5696 " pending transaction", w);
5697 list_for_each_entry(w, &proc->delivered_death, entry) {
5698 seq_puts(m, " has delivered dead binder\n");
5701 binder_inner_proc_unlock(proc);
5702 if (!print_all && m->count == header_pos)
5703 m->count = start_pos;
5706 static const char * const binder_return_strings[] = {
5711 "BR_ACQUIRE_RESULT",
5713 "BR_TRANSACTION_COMPLETE",
5718 "BR_ATTEMPT_ACQUIRE",
5723 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5727 static const char * const binder_command_strings[] = {
5730 "BC_ACQUIRE_RESULT",
5738 "BC_ATTEMPT_ACQUIRE",
5739 "BC_REGISTER_LOOPER",
5742 "BC_REQUEST_DEATH_NOTIFICATION",
5743 "BC_CLEAR_DEATH_NOTIFICATION",
5744 "BC_DEAD_BINDER_DONE",
5745 "BC_TRANSACTION_SG",
5749 static const char * const binder_objstat_strings[] = {
5756 "transaction_complete"
5759 static void print_binder_stats(struct seq_file *m, const char *prefix,
5760 struct binder_stats *stats)
5764 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5765 ARRAY_SIZE(binder_command_strings));
5766 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5767 int temp = atomic_read(&stats->bc[i]);
5770 seq_printf(m, "%s%s: %d\n", prefix,
5771 binder_command_strings[i], temp);
5774 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5775 ARRAY_SIZE(binder_return_strings));
5776 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5777 int temp = atomic_read(&stats->br[i]);
5780 seq_printf(m, "%s%s: %d\n", prefix,
5781 binder_return_strings[i], temp);
5784 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5785 ARRAY_SIZE(binder_objstat_strings));
5786 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5787 ARRAY_SIZE(stats->obj_deleted));
5788 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5789 int created = atomic_read(&stats->obj_created[i]);
5790 int deleted = atomic_read(&stats->obj_deleted[i]);
5792 if (created || deleted)
5793 seq_printf(m, "%s%s: active %d total %d\n",
5795 binder_objstat_strings[i],
5801 static void print_binder_proc_stats(struct seq_file *m,
5802 struct binder_proc *proc)
5804 struct binder_work *w;
5805 struct binder_thread *thread;
5807 int count, strong, weak, ready_threads;
5808 size_t free_async_space =
5809 binder_alloc_get_free_async_space(&proc->alloc);
5811 seq_printf(m, "proc %d\n", proc->pid);
5812 seq_printf(m, "context %s\n", proc->context->name);
5815 binder_inner_proc_lock(proc);
5816 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5819 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5822 seq_printf(m, " threads: %d\n", count);
5823 seq_printf(m, " requested threads: %d+%d/%d\n"
5824 " ready threads %d\n"
5825 " free async space %zd\n", proc->requested_threads,
5826 proc->requested_threads_started, proc->max_threads,
5830 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5832 binder_inner_proc_unlock(proc);
5833 seq_printf(m, " nodes: %d\n", count);
5837 binder_proc_lock(proc);
5838 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5839 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5842 strong += ref->data.strong;
5843 weak += ref->data.weak;
5845 binder_proc_unlock(proc);
5846 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5848 count = binder_alloc_get_allocated_count(&proc->alloc);
5849 seq_printf(m, " buffers: %d\n", count);
5851 binder_alloc_print_pages(m, &proc->alloc);
5854 binder_inner_proc_lock(proc);
5855 list_for_each_entry(w, &proc->todo, entry) {
5856 if (w->type == BINDER_WORK_TRANSACTION)
5859 binder_inner_proc_unlock(proc);
5860 seq_printf(m, " pending transactions: %d\n", count);
5862 print_binder_stats(m, " ", &proc->stats);
5866 static int state_show(struct seq_file *m, void *unused)
5868 struct binder_proc *proc;
5869 struct binder_node *node;
5870 struct binder_node *last_node = NULL;
5872 seq_puts(m, "binder state:\n");
5874 spin_lock(&binder_dead_nodes_lock);
5875 if (!hlist_empty(&binder_dead_nodes))
5876 seq_puts(m, "dead nodes:\n");
5877 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5879 * take a temporary reference on the node so it
5880 * survives and isn't removed from the list
5881 * while we print it.
5884 spin_unlock(&binder_dead_nodes_lock);
5886 binder_put_node(last_node);
5887 binder_node_lock(node);
5888 print_binder_node_nilocked(m, node);
5889 binder_node_unlock(node);
5891 spin_lock(&binder_dead_nodes_lock);
5893 spin_unlock(&binder_dead_nodes_lock);
5895 binder_put_node(last_node);
5897 mutex_lock(&binder_procs_lock);
5898 hlist_for_each_entry(proc, &binder_procs, proc_node)
5899 print_binder_proc(m, proc, 1);
5900 mutex_unlock(&binder_procs_lock);
5905 static int stats_show(struct seq_file *m, void *unused)
5907 struct binder_proc *proc;
5909 seq_puts(m, "binder stats:\n");
5911 print_binder_stats(m, "", &binder_stats);
5913 mutex_lock(&binder_procs_lock);
5914 hlist_for_each_entry(proc, &binder_procs, proc_node)
5915 print_binder_proc_stats(m, proc);
5916 mutex_unlock(&binder_procs_lock);
5921 static int transactions_show(struct seq_file *m, void *unused)
5923 struct binder_proc *proc;
5925 seq_puts(m, "binder transactions:\n");
5926 mutex_lock(&binder_procs_lock);
5927 hlist_for_each_entry(proc, &binder_procs, proc_node)
5928 print_binder_proc(m, proc, 0);
5929 mutex_unlock(&binder_procs_lock);
5934 static int proc_show(struct seq_file *m, void *unused)
5936 struct binder_proc *itr;
5937 int pid = (unsigned long)m->private;
5939 mutex_lock(&binder_procs_lock);
5940 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5941 if (itr->pid == pid) {
5942 seq_puts(m, "binder proc state:\n");
5943 print_binder_proc(m, itr, 1);
5946 mutex_unlock(&binder_procs_lock);
5951 static void print_binder_transaction_log_entry(struct seq_file *m,
5952 struct binder_transaction_log_entry *e)
5954 int debug_id = READ_ONCE(e->debug_id_done);
5956 * read barrier to guarantee debug_id_done read before
5957 * we print the log values
5961 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5962 e->debug_id, (e->call_type == 2) ? "reply" :
5963 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5964 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5965 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5966 e->return_error, e->return_error_param,
5967 e->return_error_line);
5969 * read-barrier to guarantee read of debug_id_done after
5970 * done printing the fields of the entry
5973 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5974 "\n" : " (incomplete)\n");
5977 static int transaction_log_show(struct seq_file *m, void *unused)
5979 struct binder_transaction_log *log = m->private;
5980 unsigned int log_cur = atomic_read(&log->cur);
5985 count = log_cur + 1;
5986 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5987 0 : count % ARRAY_SIZE(log->entry);
5988 if (count > ARRAY_SIZE(log->entry) || log->full)
5989 count = ARRAY_SIZE(log->entry);
5990 for (i = 0; i < count; i++) {
5991 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5993 print_binder_transaction_log_entry(m, &log->entry[index]);
5998 const struct file_operations binder_fops = {
5999 .owner = THIS_MODULE,
6000 .poll = binder_poll,
6001 .unlocked_ioctl = binder_ioctl,
6002 .compat_ioctl = binder_ioctl,
6003 .mmap = binder_mmap,
6004 .open = binder_open,
6005 .flush = binder_flush,
6006 .release = binder_release,
6009 DEFINE_SHOW_ATTRIBUTE(state);
6010 DEFINE_SHOW_ATTRIBUTE(stats);
6011 DEFINE_SHOW_ATTRIBUTE(transactions);
6012 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6014 static int __init init_binder_device(const char *name)
6017 struct binder_device *binder_device;
6019 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6023 binder_device->miscdev.fops = &binder_fops;
6024 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6025 binder_device->miscdev.name = name;
6027 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6028 binder_device->context.name = name;
6029 mutex_init(&binder_device->context.context_mgr_node_lock);
6031 ret = misc_register(&binder_device->miscdev);
6033 kfree(binder_device);
6037 hlist_add_head(&binder_device->hlist, &binder_devices);
6042 static int __init binder_init(void)
6045 char *device_name, *device_tmp;
6046 struct binder_device *device;
6047 struct hlist_node *tmp;
6048 char *device_names = NULL;
6050 ret = binder_alloc_shrinker_init();
6054 atomic_set(&binder_transaction_log.cur, ~0U);
6055 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6057 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6058 if (binder_debugfs_dir_entry_root)
6059 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6060 binder_debugfs_dir_entry_root);
6062 if (binder_debugfs_dir_entry_root) {
6063 debugfs_create_file("state",
6065 binder_debugfs_dir_entry_root,
6068 debugfs_create_file("stats",
6070 binder_debugfs_dir_entry_root,
6073 debugfs_create_file("transactions",
6075 binder_debugfs_dir_entry_root,
6077 &transactions_fops);
6078 debugfs_create_file("transaction_log",
6080 binder_debugfs_dir_entry_root,
6081 &binder_transaction_log,
6082 &transaction_log_fops);
6083 debugfs_create_file("failed_transaction_log",
6085 binder_debugfs_dir_entry_root,
6086 &binder_transaction_log_failed,
6087 &transaction_log_fops);
6090 if (strcmp(binder_devices_param, "") != 0) {
6092 * Copy the module_parameter string, because we don't want to
6093 * tokenize it in-place.
6095 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6096 if (!device_names) {
6098 goto err_alloc_device_names_failed;
6101 device_tmp = device_names;
6102 while ((device_name = strsep(&device_tmp, ","))) {
6103 ret = init_binder_device(device_name);
6105 goto err_init_binder_device_failed;
6109 ret = init_binderfs();
6111 goto err_init_binder_device_failed;
6115 err_init_binder_device_failed:
6116 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6117 misc_deregister(&device->miscdev);
6118 hlist_del(&device->hlist);
6122 kfree(device_names);
6124 err_alloc_device_names_failed:
6125 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6130 device_initcall(binder_init);
6132 #define CREATE_TRACE_POINTS
6133 #include "binder_trace.h"
6135 MODULE_LICENSE("GPL v2");