1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
72 #include <asm/cacheflush.h>
74 #include "binder_internal.h"
75 #include "binder_trace.h"
77 static HLIST_HEAD(binder_deferred_list
);
78 static DEFINE_MUTEX(binder_deferred_lock
);
80 static HLIST_HEAD(binder_devices
);
81 static HLIST_HEAD(binder_procs
);
82 static DEFINE_MUTEX(binder_procs_lock
);
84 static HLIST_HEAD(binder_dead_nodes
);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
87 static struct dentry
*binder_debugfs_dir_entry_root
;
88 static struct dentry
*binder_debugfs_dir_entry_proc
;
89 static atomic_t binder_last_id
;
91 static int proc_show(struct seq_file
*m
, void *unused
);
92 DEFINE_SHOW_ATTRIBUTE(proc
);
94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97 BINDER_DEBUG_USER_ERROR
= 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
103 BINDER_DEBUG_READ_WRITE
= 1U << 6,
104 BINDER_DEBUG_USER_REFS
= 1U << 7,
105 BINDER_DEBUG_THREADS
= 1U << 8,
106 BINDER_DEBUG_TRANSACTION
= 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
111 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
113 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
114 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
115 module_param_named(debug_mask
, binder_debug_mask
, uint
, 0644);
117 char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
118 module_param_named(devices
, binder_devices_param
, charp
, 0444);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
121 static int binder_stop_on_user_error
;
123 static int binder_set_stop_on_user_error(const char *val
,
124 const struct kernel_param
*kp
)
128 ret
= param_set_int(val
, kp
);
129 if (binder_stop_on_user_error
< 2)
130 wake_up(&binder_user_error_wait
);
133 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
134 param_get_int
, &binder_stop_on_user_error
, 0644);
136 #define binder_debug(mask, x...) \
138 if (binder_debug_mask & mask) \
139 pr_info_ratelimited(x); \
142 #define binder_user_error(x...) \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 pr_info_ratelimited(x); \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
150 #define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
155 #define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
158 #define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
161 static struct binder_stats binder_stats
;
163 static inline void binder_stats_deleted(enum binder_stat_types type
)
165 atomic_inc(&binder_stats
.obj_deleted
[type
]);
168 static inline void binder_stats_created(enum binder_stat_types type
)
170 atomic_inc(&binder_stats
.obj_created
[type
]);
173 struct binder_transaction_log binder_transaction_log
;
174 struct binder_transaction_log binder_transaction_log_failed
;
176 static struct binder_transaction_log_entry
*binder_transaction_log_add(
177 struct binder_transaction_log
*log
)
179 struct binder_transaction_log_entry
*e
;
180 unsigned int cur
= atomic_inc_return(&log
->cur
);
182 if (cur
>= ARRAY_SIZE(log
->entry
))
184 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
185 WRITE_ONCE(e
->debug_id_done
, 0);
187 * write-barrier to synchronize access to e->debug_id_done.
188 * We make sure the initialized 0 value is seen before
189 * memset() other fields are zeroed by memset.
192 memset(e
, 0, sizeof(*e
));
196 enum binder_deferred_state
{
197 BINDER_DEFERRED_FLUSH
= 0x01,
198 BINDER_DEFERRED_RELEASE
= 0x02,
202 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
203 BINDER_LOOPER_STATE_ENTERED
= 0x02,
204 BINDER_LOOPER_STATE_EXITED
= 0x04,
205 BINDER_LOOPER_STATE_INVALID
= 0x08,
206 BINDER_LOOPER_STATE_WAITING
= 0x10,
207 BINDER_LOOPER_STATE_POLL
= 0x20,
211 * binder_proc_lock() - Acquire outer lock for given binder_proc
212 * @proc: struct binder_proc to acquire
214 * Acquires proc->outer_lock. Used to protect binder_ref
215 * structures associated with the given proc.
217 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
219 _binder_proc_lock(struct binder_proc
*proc
, int line
)
220 __acquires(&proc
->outer_lock
)
222 binder_debug(BINDER_DEBUG_SPINLOCKS
,
223 "%s: line=%d\n", __func__
, line
);
224 spin_lock(&proc
->outer_lock
);
228 * binder_proc_unlock() - Release spinlock for given binder_proc
229 * @proc: struct binder_proc to acquire
231 * Release lock acquired via binder_proc_lock()
233 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
235 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
236 __releases(&proc
->outer_lock
)
238 binder_debug(BINDER_DEBUG_SPINLOCKS
,
239 "%s: line=%d\n", __func__
, line
);
240 spin_unlock(&proc
->outer_lock
);
244 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245 * @proc: struct binder_proc to acquire
247 * Acquires proc->inner_lock. Used to protect todo lists
249 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
251 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
252 __acquires(&proc
->inner_lock
)
254 binder_debug(BINDER_DEBUG_SPINLOCKS
,
255 "%s: line=%d\n", __func__
, line
);
256 spin_lock(&proc
->inner_lock
);
260 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261 * @proc: struct binder_proc to acquire
263 * Release lock acquired via binder_inner_proc_lock()
265 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
267 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
268 __releases(&proc
->inner_lock
)
270 binder_debug(BINDER_DEBUG_SPINLOCKS
,
271 "%s: line=%d\n", __func__
, line
);
272 spin_unlock(&proc
->inner_lock
);
276 * binder_node_lock() - Acquire spinlock for given binder_node
277 * @node: struct binder_node to acquire
279 * Acquires node->lock. Used to protect binder_node fields
281 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
283 _binder_node_lock(struct binder_node
*node
, int line
)
284 __acquires(&node
->lock
)
286 binder_debug(BINDER_DEBUG_SPINLOCKS
,
287 "%s: line=%d\n", __func__
, line
);
288 spin_lock(&node
->lock
);
292 * binder_node_unlock() - Release spinlock for given binder_proc
293 * @node: struct binder_node to acquire
295 * Release lock acquired via binder_node_lock()
297 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
299 _binder_node_unlock(struct binder_node
*node
, int line
)
300 __releases(&node
->lock
)
302 binder_debug(BINDER_DEBUG_SPINLOCKS
,
303 "%s: line=%d\n", __func__
, line
);
304 spin_unlock(&node
->lock
);
308 * binder_node_inner_lock() - Acquire node and inner locks
309 * @node: struct binder_node to acquire
311 * Acquires node->lock. If node->proc also acquires
312 * proc->inner_lock. Used to protect binder_node fields
314 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
316 _binder_node_inner_lock(struct binder_node
*node
, int line
)
317 __acquires(&node
->lock
) __acquires(&node
->proc
->inner_lock
)
319 binder_debug(BINDER_DEBUG_SPINLOCKS
,
320 "%s: line=%d\n", __func__
, line
);
321 spin_lock(&node
->lock
);
323 binder_inner_proc_lock(node
->proc
);
325 /* annotation for sparse */
326 __acquire(&node
->proc
->inner_lock
);
330 * binder_node_unlock() - Release node and inner locks
331 * @node: struct binder_node to acquire
333 * Release lock acquired via binder_node_lock()
335 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
337 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
338 __releases(&node
->lock
) __releases(&node
->proc
->inner_lock
)
340 struct binder_proc
*proc
= node
->proc
;
342 binder_debug(BINDER_DEBUG_SPINLOCKS
,
343 "%s: line=%d\n", __func__
, line
);
345 binder_inner_proc_unlock(proc
);
347 /* annotation for sparse */
348 __release(&node
->proc
->inner_lock
);
349 spin_unlock(&node
->lock
);
352 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
354 return list_empty(list
);
358 * binder_worklist_empty() - Check if no items on the work list
359 * @proc: binder_proc associated with list
360 * @list: list to check
362 * Return: true if there are no items on list, else false
364 static bool binder_worklist_empty(struct binder_proc
*proc
,
365 struct list_head
*list
)
369 binder_inner_proc_lock(proc
);
370 ret
= binder_worklist_empty_ilocked(list
);
371 binder_inner_proc_unlock(proc
);
376 * binder_enqueue_work_ilocked() - Add an item to the work list
377 * @work: struct binder_work to add to list
378 * @target_list: list to add work to
380 * Adds the work to the specified list. Asserts that work
381 * is not already on a list.
383 * Requires the proc->inner_lock to be held.
386 binder_enqueue_work_ilocked(struct binder_work
*work
,
387 struct list_head
*target_list
)
389 BUG_ON(target_list
== NULL
);
390 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
391 list_add_tail(&work
->entry
, target_list
);
395 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396 * @thread: thread to queue work to
397 * @work: struct binder_work to add to list
399 * Adds the work to the todo list of the thread. Doesn't set the process_todo
400 * flag, which means that (if it wasn't already set) the thread will go to
401 * sleep without handling this work when it calls read.
403 * Requires the proc->inner_lock to be held.
406 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
407 struct binder_work
*work
)
409 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
410 binder_enqueue_work_ilocked(work
, &thread
->todo
);
414 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415 * @thread: thread to queue work to
416 * @work: struct binder_work to add to list
418 * Adds the work to the todo list of the thread, and enables processing
421 * Requires the proc->inner_lock to be held.
424 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
425 struct binder_work
*work
)
427 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
428 binder_enqueue_work_ilocked(work
, &thread
->todo
);
429 thread
->process_todo
= true;
433 * binder_enqueue_thread_work() - Add an item to the thread work list
434 * @thread: thread to queue work to
435 * @work: struct binder_work to add to list
437 * Adds the work to the todo list of the thread, and enables processing
441 binder_enqueue_thread_work(struct binder_thread
*thread
,
442 struct binder_work
*work
)
444 binder_inner_proc_lock(thread
->proc
);
445 binder_enqueue_thread_work_ilocked(thread
, work
);
446 binder_inner_proc_unlock(thread
->proc
);
450 binder_dequeue_work_ilocked(struct binder_work
*work
)
452 list_del_init(&work
->entry
);
456 * binder_dequeue_work() - Removes an item from the work list
457 * @proc: binder_proc associated with list
458 * @work: struct binder_work to remove from list
460 * Removes the specified work item from whatever list it is on.
461 * Can safely be called if work is not on any list.
464 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
466 binder_inner_proc_lock(proc
);
467 binder_dequeue_work_ilocked(work
);
468 binder_inner_proc_unlock(proc
);
471 static struct binder_work
*binder_dequeue_work_head_ilocked(
472 struct list_head
*list
)
474 struct binder_work
*w
;
476 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
478 list_del_init(&w
->entry
);
483 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
484 static void binder_free_thread(struct binder_thread
*thread
);
485 static void binder_free_proc(struct binder_proc
*proc
);
486 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
488 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
491 return thread
->process_todo
||
492 thread
->looper_need_return
||
494 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
497 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
501 binder_inner_proc_lock(thread
->proc
);
502 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
503 binder_inner_proc_unlock(thread
->proc
);
508 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
510 return !thread
->transaction_stack
&&
511 binder_worklist_empty_ilocked(&thread
->todo
) &&
512 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
513 BINDER_LOOPER_STATE_REGISTERED
));
516 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
520 struct binder_thread
*thread
;
522 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
523 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
524 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
525 binder_available_for_proc_work_ilocked(thread
)) {
527 wake_up_interruptible_sync(&thread
->wait
);
529 wake_up_interruptible(&thread
->wait
);
535 * binder_select_thread_ilocked() - selects a thread for doing proc work.
536 * @proc: process to select a thread from
538 * Note that calling this function moves the thread off the waiting_threads
539 * list, so it can only be woken up by the caller of this function, or a
540 * signal. Therefore, callers *should* always wake up the thread this function
543 * Return: If there's a thread currently waiting for process work,
544 * returns that thread. Otherwise returns NULL.
546 static struct binder_thread
*
547 binder_select_thread_ilocked(struct binder_proc
*proc
)
549 struct binder_thread
*thread
;
551 assert_spin_locked(&proc
->inner_lock
);
552 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
553 struct binder_thread
,
554 waiting_thread_node
);
557 list_del_init(&thread
->waiting_thread_node
);
563 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564 * @proc: process to wake up a thread in
565 * @thread: specific thread to wake-up (may be NULL)
566 * @sync: whether to do a synchronous wake-up
568 * This function wakes up a thread in the @proc process.
569 * The caller may provide a specific thread to wake-up in
570 * the @thread parameter. If @thread is NULL, this function
571 * will wake up threads that have called poll().
573 * Note that for this function to work as expected, callers
574 * should first call binder_select_thread() to find a thread
575 * to handle the work (if they don't have a thread already),
576 * and pass the result into the @thread parameter.
578 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
579 struct binder_thread
*thread
,
582 assert_spin_locked(&proc
->inner_lock
);
586 wake_up_interruptible_sync(&thread
->wait
);
588 wake_up_interruptible(&thread
->wait
);
592 /* Didn't find a thread waiting for proc work; this can happen
594 * 1. All threads are busy handling transactions
595 * In that case, one of those threads should call back into
596 * the kernel driver soon and pick up this work.
597 * 2. Threads are using the (e)poll interface, in which case
598 * they may be blocked on the waitqueue without having been
599 * added to waiting_threads. For this case, we just iterate
600 * over all threads not handling transaction work, and
601 * wake them all up. We wake all because we don't know whether
602 * a thread that called into (e)poll is handling non-binder
605 binder_wakeup_poll_threads_ilocked(proc
, sync
);
608 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
610 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
612 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
615 static void binder_set_nice(long nice
)
619 if (can_nice(current
, nice
)) {
620 set_user_nice(current
, nice
);
623 min_nice
= rlimit_to_nice(rlimit(RLIMIT_NICE
));
624 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
625 "%d: nice value %ld not allowed use %ld instead\n",
626 current
->pid
, nice
, min_nice
);
627 set_user_nice(current
, min_nice
);
628 if (min_nice
<= MAX_NICE
)
630 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
633 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
634 binder_uintptr_t ptr
)
636 struct rb_node
*n
= proc
->nodes
.rb_node
;
637 struct binder_node
*node
;
639 assert_spin_locked(&proc
->inner_lock
);
642 node
= rb_entry(n
, struct binder_node
, rb_node
);
646 else if (ptr
> node
->ptr
)
650 * take an implicit weak reference
651 * to ensure node stays alive until
652 * call to binder_put_node()
654 binder_inc_node_tmpref_ilocked(node
);
661 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
662 binder_uintptr_t ptr
)
664 struct binder_node
*node
;
666 binder_inner_proc_lock(proc
);
667 node
= binder_get_node_ilocked(proc
, ptr
);
668 binder_inner_proc_unlock(proc
);
672 static struct binder_node
*binder_init_node_ilocked(
673 struct binder_proc
*proc
,
674 struct binder_node
*new_node
,
675 struct flat_binder_object
*fp
)
677 struct rb_node
**p
= &proc
->nodes
.rb_node
;
678 struct rb_node
*parent
= NULL
;
679 struct binder_node
*node
;
680 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
681 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
682 __u32 flags
= fp
? fp
->flags
: 0;
684 assert_spin_locked(&proc
->inner_lock
);
689 node
= rb_entry(parent
, struct binder_node
, rb_node
);
693 else if (ptr
> node
->ptr
)
697 * A matching node is already in
698 * the rb tree. Abandon the init
701 binder_inc_node_tmpref_ilocked(node
);
706 binder_stats_created(BINDER_STAT_NODE
);
708 rb_link_node(&node
->rb_node
, parent
, p
);
709 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
710 node
->debug_id
= atomic_inc_return(&binder_last_id
);
713 node
->cookie
= cookie
;
714 node
->work
.type
= BINDER_WORK_NODE
;
715 node
->min_priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
716 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
717 node
->txn_security_ctx
= !!(flags
& FLAT_BINDER_FLAG_TXN_SECURITY_CTX
);
718 spin_lock_init(&node
->lock
);
719 INIT_LIST_HEAD(&node
->work
.entry
);
720 INIT_LIST_HEAD(&node
->async_todo
);
721 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
722 "%d:%d node %d u%016llx c%016llx created\n",
723 proc
->pid
, current
->pid
, node
->debug_id
,
724 (u64
)node
->ptr
, (u64
)node
->cookie
);
729 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
730 struct flat_binder_object
*fp
)
732 struct binder_node
*node
;
733 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
737 binder_inner_proc_lock(proc
);
738 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
739 binder_inner_proc_unlock(proc
);
740 if (node
!= new_node
)
742 * The node was already added by another thread
749 static void binder_free_node(struct binder_node
*node
)
752 binder_stats_deleted(BINDER_STAT_NODE
);
755 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
757 struct list_head
*target_list
)
759 struct binder_proc
*proc
= node
->proc
;
761 assert_spin_locked(&node
->lock
);
763 assert_spin_locked(&proc
->inner_lock
);
766 if (target_list
== NULL
&&
767 node
->internal_strong_refs
== 0 &&
769 node
== node
->proc
->context
->binder_context_mgr_node
&&
770 node
->has_strong_ref
)) {
771 pr_err("invalid inc strong node for %d\n",
775 node
->internal_strong_refs
++;
777 node
->local_strong_refs
++;
778 if (!node
->has_strong_ref
&& target_list
) {
779 struct binder_thread
*thread
= container_of(target_list
,
780 struct binder_thread
, todo
);
781 binder_dequeue_work_ilocked(&node
->work
);
782 BUG_ON(&thread
->todo
!= target_list
);
783 binder_enqueue_deferred_thread_work_ilocked(thread
,
788 node
->local_weak_refs
++;
789 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
790 if (target_list
== NULL
) {
791 pr_err("invalid inc weak node for %d\n",
798 binder_enqueue_work_ilocked(&node
->work
, target_list
);
804 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
805 struct list_head
*target_list
)
809 binder_node_inner_lock(node
);
810 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
811 binder_node_inner_unlock(node
);
816 static bool binder_dec_node_nilocked(struct binder_node
*node
,
817 int strong
, int internal
)
819 struct binder_proc
*proc
= node
->proc
;
821 assert_spin_locked(&node
->lock
);
823 assert_spin_locked(&proc
->inner_lock
);
826 node
->internal_strong_refs
--;
828 node
->local_strong_refs
--;
829 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
833 node
->local_weak_refs
--;
834 if (node
->local_weak_refs
|| node
->tmp_refs
||
835 !hlist_empty(&node
->refs
))
839 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
840 if (list_empty(&node
->work
.entry
)) {
841 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
842 binder_wakeup_proc_ilocked(proc
);
845 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
846 !node
->local_weak_refs
&& !node
->tmp_refs
) {
848 binder_dequeue_work_ilocked(&node
->work
);
849 rb_erase(&node
->rb_node
, &proc
->nodes
);
850 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
851 "refless node %d deleted\n",
854 BUG_ON(!list_empty(&node
->work
.entry
));
855 spin_lock(&binder_dead_nodes_lock
);
857 * tmp_refs could have changed so
860 if (node
->tmp_refs
) {
861 spin_unlock(&binder_dead_nodes_lock
);
864 hlist_del(&node
->dead_node
);
865 spin_unlock(&binder_dead_nodes_lock
);
866 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
867 "dead node %d deleted\n",
876 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
880 binder_node_inner_lock(node
);
881 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
882 binder_node_inner_unlock(node
);
884 binder_free_node(node
);
887 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
890 * No call to binder_inc_node() is needed since we
891 * don't need to inform userspace of any changes to
898 * binder_inc_node_tmpref() - take a temporary reference on node
899 * @node: node to reference
901 * Take reference on node to prevent the node from being freed
902 * while referenced only by a local variable. The inner lock is
903 * needed to serialize with the node work on the queue (which
904 * isn't needed after the node is dead). If the node is dead
905 * (node->proc is NULL), use binder_dead_nodes_lock to protect
906 * node->tmp_refs against dead-node-only cases where the node
907 * lock cannot be acquired (eg traversing the dead node list to
910 static void binder_inc_node_tmpref(struct binder_node
*node
)
912 binder_node_lock(node
);
914 binder_inner_proc_lock(node
->proc
);
916 spin_lock(&binder_dead_nodes_lock
);
917 binder_inc_node_tmpref_ilocked(node
);
919 binder_inner_proc_unlock(node
->proc
);
921 spin_unlock(&binder_dead_nodes_lock
);
922 binder_node_unlock(node
);
926 * binder_dec_node_tmpref() - remove a temporary reference on node
927 * @node: node to reference
929 * Release temporary reference on node taken via binder_inc_node_tmpref()
931 static void binder_dec_node_tmpref(struct binder_node
*node
)
935 binder_node_inner_lock(node
);
937 spin_lock(&binder_dead_nodes_lock
);
939 __acquire(&binder_dead_nodes_lock
);
941 BUG_ON(node
->tmp_refs
< 0);
943 spin_unlock(&binder_dead_nodes_lock
);
945 __release(&binder_dead_nodes_lock
);
947 * Call binder_dec_node() to check if all refcounts are 0
948 * and cleanup is needed. Calling with strong=0 and internal=1
949 * causes no actual reference to be released in binder_dec_node().
950 * If that changes, a change is needed here too.
952 free_node
= binder_dec_node_nilocked(node
, 0, 1);
953 binder_node_inner_unlock(node
);
955 binder_free_node(node
);
958 static void binder_put_node(struct binder_node
*node
)
960 binder_dec_node_tmpref(node
);
963 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
964 u32 desc
, bool need_strong_ref
)
966 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
967 struct binder_ref
*ref
;
970 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
972 if (desc
< ref
->data
.desc
) {
974 } else if (desc
> ref
->data
.desc
) {
976 } else if (need_strong_ref
&& !ref
->data
.strong
) {
977 binder_user_error("tried to use weak ref as strong ref\n");
987 * binder_get_ref_for_node_olocked() - get the ref associated with given node
988 * @proc: binder_proc that owns the ref
989 * @node: binder_node of target
990 * @new_ref: newly allocated binder_ref to be initialized or %NULL
992 * Look up the ref for the given node and return it if it exists
994 * If it doesn't exist and the caller provides a newly allocated
995 * ref, initialize the fields of the newly allocated ref and insert
996 * into the given proc rb_trees and node refs list.
998 * Return: the ref for node. It is possible that another thread
999 * allocated/initialized the ref first in which case the
1000 * returned ref would be different than the passed-in
1001 * new_ref. new_ref must be kfree'd by the caller in
1004 static struct binder_ref
*binder_get_ref_for_node_olocked(
1005 struct binder_proc
*proc
,
1006 struct binder_node
*node
,
1007 struct binder_ref
*new_ref
)
1009 struct binder_context
*context
= proc
->context
;
1010 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1011 struct rb_node
*parent
= NULL
;
1012 struct binder_ref
*ref
;
1017 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1019 if (node
< ref
->node
)
1021 else if (node
> ref
->node
)
1022 p
= &(*p
)->rb_right
;
1029 binder_stats_created(BINDER_STAT_REF
);
1030 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1031 new_ref
->proc
= proc
;
1032 new_ref
->node
= node
;
1033 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1034 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1036 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1037 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1038 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1039 if (ref
->data
.desc
> new_ref
->data
.desc
)
1041 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1044 p
= &proc
->refs_by_desc
.rb_node
;
1047 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1049 if (new_ref
->data
.desc
< ref
->data
.desc
)
1051 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1052 p
= &(*p
)->rb_right
;
1056 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1057 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1059 binder_node_lock(node
);
1060 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1062 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1063 "%d new ref %d desc %d for node %d\n",
1064 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1066 binder_node_unlock(node
);
1070 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1072 bool delete_node
= false;
1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1075 "%d delete ref %d desc %d for node %d\n",
1076 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1077 ref
->node
->debug_id
);
1079 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1080 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1082 binder_node_inner_lock(ref
->node
);
1083 if (ref
->data
.strong
)
1084 binder_dec_node_nilocked(ref
->node
, 1, 1);
1086 hlist_del(&ref
->node_entry
);
1087 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1088 binder_node_inner_unlock(ref
->node
);
1090 * Clear ref->node unless we want the caller to free the node
1094 * The caller uses ref->node to determine
1095 * whether the node needs to be freed. Clear
1096 * it since the node is still alive.
1102 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1103 "%d delete ref %d desc %d has death notification\n",
1104 ref
->proc
->pid
, ref
->data
.debug_id
,
1106 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1107 binder_stats_deleted(BINDER_STAT_DEATH
);
1109 binder_stats_deleted(BINDER_STAT_REF
);
1113 * binder_inc_ref_olocked() - increment the ref for given handle
1114 * @ref: ref to be incremented
1115 * @strong: if true, strong increment, else weak
1116 * @target_list: list to queue node work on
1118 * Increment the ref. @ref->proc->outer_lock must be held on entry
1120 * Return: 0, if successful, else errno
1122 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1123 struct list_head
*target_list
)
1128 if (ref
->data
.strong
== 0) {
1129 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1135 if (ref
->data
.weak
== 0) {
1136 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1146 * binder_dec_ref() - dec the ref for given handle
1147 * @ref: ref to be decremented
1148 * @strong: if true, strong decrement, else weak
1150 * Decrement the ref.
1152 * Return: true if ref is cleaned up and ready to be freed
1154 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1157 if (ref
->data
.strong
== 0) {
1158 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159 ref
->proc
->pid
, ref
->data
.debug_id
,
1160 ref
->data
.desc
, ref
->data
.strong
,
1165 if (ref
->data
.strong
== 0)
1166 binder_dec_node(ref
->node
, strong
, 1);
1168 if (ref
->data
.weak
== 0) {
1169 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170 ref
->proc
->pid
, ref
->data
.debug_id
,
1171 ref
->data
.desc
, ref
->data
.strong
,
1177 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1178 binder_cleanup_ref_olocked(ref
);
1185 * binder_get_node_from_ref() - get the node from the given proc/desc
1186 * @proc: proc containing the ref
1187 * @desc: the handle associated with the ref
1188 * @need_strong_ref: if true, only return node if ref is strong
1189 * @rdata: the id/refcount data for the ref
1191 * Given a proc and ref handle, return the associated binder_node
1193 * Return: a binder_node or NULL if not found or not strong when strong required
1195 static struct binder_node
*binder_get_node_from_ref(
1196 struct binder_proc
*proc
,
1197 u32 desc
, bool need_strong_ref
,
1198 struct binder_ref_data
*rdata
)
1200 struct binder_node
*node
;
1201 struct binder_ref
*ref
;
1203 binder_proc_lock(proc
);
1204 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1209 * Take an implicit reference on the node to ensure
1210 * it stays alive until the call to binder_put_node()
1212 binder_inc_node_tmpref(node
);
1215 binder_proc_unlock(proc
);
1220 binder_proc_unlock(proc
);
1225 * binder_free_ref() - free the binder_ref
1228 * Free the binder_ref. Free the binder_node indicated by ref->node
1229 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1231 static void binder_free_ref(struct binder_ref
*ref
)
1234 binder_free_node(ref
->node
);
1240 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241 * @proc: proc containing the ref
1242 * @desc: the handle associated with the ref
1243 * @increment: true=inc reference, false=dec reference
1244 * @strong: true=strong reference, false=weak reference
1245 * @rdata: the id/refcount data for the ref
1247 * Given a proc and ref handle, increment or decrement the ref
1248 * according to "increment" arg.
1250 * Return: 0 if successful, else errno
1252 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1253 uint32_t desc
, bool increment
, bool strong
,
1254 struct binder_ref_data
*rdata
)
1257 struct binder_ref
*ref
;
1258 bool delete_ref
= false;
1260 binder_proc_lock(proc
);
1261 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1267 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1269 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1273 binder_proc_unlock(proc
);
1276 binder_free_ref(ref
);
1280 binder_proc_unlock(proc
);
1285 * binder_dec_ref_for_handle() - dec the ref for given handle
1286 * @proc: proc containing the ref
1287 * @desc: the handle associated with the ref
1288 * @strong: true=strong reference, false=weak reference
1289 * @rdata: the id/refcount data for the ref
1291 * Just calls binder_update_ref_for_handle() to decrement the ref.
1293 * Return: 0 if successful, else errno
1295 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1296 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1298 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1303 * binder_inc_ref_for_node() - increment the ref for given proc/node
1304 * @proc: proc containing the ref
1305 * @node: target node
1306 * @strong: true=strong reference, false=weak reference
1307 * @target_list: worklist to use if node is incremented
1308 * @rdata: the id/refcount data for the ref
1310 * Given a proc and node, increment the ref. Create the ref if it
1311 * doesn't already exist
1313 * Return: 0 if successful, else errno
1315 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1316 struct binder_node
*node
,
1318 struct list_head
*target_list
,
1319 struct binder_ref_data
*rdata
)
1321 struct binder_ref
*ref
;
1322 struct binder_ref
*new_ref
= NULL
;
1325 binder_proc_lock(proc
);
1326 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1328 binder_proc_unlock(proc
);
1329 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1332 binder_proc_lock(proc
);
1333 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1335 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1337 binder_proc_unlock(proc
);
1338 if (new_ref
&& ref
!= new_ref
)
1340 * Another thread created the ref first so
1341 * free the one we allocated
1347 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1348 struct binder_transaction
*t
)
1350 BUG_ON(!target_thread
);
1351 assert_spin_locked(&target_thread
->proc
->inner_lock
);
1352 BUG_ON(target_thread
->transaction_stack
!= t
);
1353 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1354 target_thread
->transaction_stack
=
1355 target_thread
->transaction_stack
->from_parent
;
1360 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361 * @thread: thread to decrement
1363 * A thread needs to be kept alive while being used to create or
1364 * handle a transaction. binder_get_txn_from() is used to safely
1365 * extract t->from from a binder_transaction and keep the thread
1366 * indicated by t->from from being freed. When done with that
1367 * binder_thread, this function is called to decrement the
1368 * tmp_ref and free if appropriate (thread has been released
1369 * and no transaction being processed by the driver)
1371 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1374 * atomic is used to protect the counter value while
1375 * it cannot reach zero or thread->is_dead is false
1377 binder_inner_proc_lock(thread
->proc
);
1378 atomic_dec(&thread
->tmp_ref
);
1379 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1380 binder_inner_proc_unlock(thread
->proc
);
1381 binder_free_thread(thread
);
1384 binder_inner_proc_unlock(thread
->proc
);
1388 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389 * @proc: proc to decrement
1391 * A binder_proc needs to be kept alive while being used to create or
1392 * handle a transaction. proc->tmp_ref is incremented when
1393 * creating a new transaction or the binder_proc is currently in-use
1394 * by threads that are being released. When done with the binder_proc,
1395 * this function is called to decrement the counter and free the
1396 * proc if appropriate (proc has been released, all threads have
1397 * been released and not currenly in-use to process a transaction).
1399 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1401 binder_inner_proc_lock(proc
);
1403 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1405 binder_inner_proc_unlock(proc
);
1406 binder_free_proc(proc
);
1409 binder_inner_proc_unlock(proc
);
1413 * binder_get_txn_from() - safely extract the "from" thread in transaction
1414 * @t: binder transaction for t->from
1416 * Atomically return the "from" thread and increment the tmp_ref
1417 * count for the thread to ensure it stays alive until
1418 * binder_thread_dec_tmpref() is called.
1420 * Return: the value of t->from
1422 static struct binder_thread
*binder_get_txn_from(
1423 struct binder_transaction
*t
)
1425 struct binder_thread
*from
;
1427 spin_lock(&t
->lock
);
1430 atomic_inc(&from
->tmp_ref
);
1431 spin_unlock(&t
->lock
);
1436 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437 * @t: binder transaction for t->from
1439 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440 * to guarantee that the thread cannot be released while operating on it.
1441 * The caller must call binder_inner_proc_unlock() to release the inner lock
1442 * as well as call binder_dec_thread_txn() to release the reference.
1444 * Return: the value of t->from
1446 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
1447 struct binder_transaction
*t
)
1448 __acquires(&t
->from
->proc
->inner_lock
)
1450 struct binder_thread
*from
;
1452 from
= binder_get_txn_from(t
);
1454 __acquire(&from
->proc
->inner_lock
);
1457 binder_inner_proc_lock(from
->proc
);
1459 BUG_ON(from
!= t
->from
);
1462 binder_inner_proc_unlock(from
->proc
);
1463 __acquire(&from
->proc
->inner_lock
);
1464 binder_thread_dec_tmpref(from
);
1469 * binder_free_txn_fixups() - free unprocessed fd fixups
1470 * @t: binder transaction for t->from
1472 * If the transaction is being torn down prior to being
1473 * processed by the target process, free all of the
1474 * fd fixups and fput the file structs. It is safe to
1475 * call this function after the fixups have been
1476 * processed -- in that case, the list will be empty.
1478 static void binder_free_txn_fixups(struct binder_transaction
*t
)
1480 struct binder_txn_fd_fixup
*fixup
, *tmp
;
1482 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
1484 list_del(&fixup
->fixup_entry
);
1489 static void binder_txn_latency_free(struct binder_transaction
*t
)
1491 int from_proc
, from_thread
, to_proc
, to_thread
;
1493 spin_lock(&t
->lock
);
1494 from_proc
= t
->from
? t
->from
->proc
->pid
: 0;
1495 from_thread
= t
->from
? t
->from
->pid
: 0;
1496 to_proc
= t
->to_proc
? t
->to_proc
->pid
: 0;
1497 to_thread
= t
->to_thread
? t
->to_thread
->pid
: 0;
1498 spin_unlock(&t
->lock
);
1500 trace_binder_txn_latency_free(t
, from_proc
, from_thread
, to_proc
, to_thread
);
1503 static void binder_free_transaction(struct binder_transaction
*t
)
1505 struct binder_proc
*target_proc
= t
->to_proc
;
1508 binder_inner_proc_lock(target_proc
);
1509 target_proc
->outstanding_txns
--;
1510 if (target_proc
->outstanding_txns
< 0)
1511 pr_warn("%s: Unexpected outstanding_txns %d\n",
1512 __func__
, target_proc
->outstanding_txns
);
1513 if (!target_proc
->outstanding_txns
&& target_proc
->is_frozen
)
1514 wake_up_interruptible_all(&target_proc
->freeze_wait
);
1516 t
->buffer
->transaction
= NULL
;
1517 binder_inner_proc_unlock(target_proc
);
1519 if (trace_binder_txn_latency_free_enabled())
1520 binder_txn_latency_free(t
);
1522 * If the transaction has no target_proc, then
1523 * t->buffer->transaction has already been cleared.
1525 binder_free_txn_fixups(t
);
1527 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1530 static void binder_send_failed_reply(struct binder_transaction
*t
,
1531 uint32_t error_code
)
1533 struct binder_thread
*target_thread
;
1534 struct binder_transaction
*next
;
1536 BUG_ON(t
->flags
& TF_ONE_WAY
);
1538 target_thread
= binder_get_txn_from_and_acq_inner(t
);
1539 if (target_thread
) {
1540 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1541 "send failed reply for transaction %d to %d:%d\n",
1543 target_thread
->proc
->pid
,
1544 target_thread
->pid
);
1546 binder_pop_transaction_ilocked(target_thread
, t
);
1547 if (target_thread
->reply_error
.cmd
== BR_OK
) {
1548 target_thread
->reply_error
.cmd
= error_code
;
1549 binder_enqueue_thread_work_ilocked(
1551 &target_thread
->reply_error
.work
);
1552 wake_up_interruptible(&target_thread
->wait
);
1555 * Cannot get here for normal operation, but
1556 * we can if multiple synchronous transactions
1557 * are sent without blocking for responses.
1558 * Just ignore the 2nd error in this case.
1560 pr_warn("Unexpected reply error: %u\n",
1561 target_thread
->reply_error
.cmd
);
1563 binder_inner_proc_unlock(target_thread
->proc
);
1564 binder_thread_dec_tmpref(target_thread
);
1565 binder_free_transaction(t
);
1568 __release(&target_thread
->proc
->inner_lock
);
1569 next
= t
->from_parent
;
1571 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1572 "send failed reply for transaction %d, target dead\n",
1575 binder_free_transaction(t
);
1577 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1578 "reply failed, no target thread at root\n");
1582 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1583 "reply failed, no target thread -- retry %d\n",
1589 * binder_cleanup_transaction() - cleans up undelivered transaction
1590 * @t: transaction that needs to be cleaned up
1591 * @reason: reason the transaction wasn't delivered
1592 * @error_code: error to return to caller (if synchronous call)
1594 static void binder_cleanup_transaction(struct binder_transaction
*t
,
1596 uint32_t error_code
)
1598 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
1599 binder_send_failed_reply(t
, error_code
);
1601 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
1602 "undelivered transaction %d, %s\n",
1603 t
->debug_id
, reason
);
1604 binder_free_transaction(t
);
1609 * binder_get_object() - gets object and checks for valid metadata
1610 * @proc: binder_proc owning the buffer
1611 * @buffer: binder_buffer that we're parsing.
1612 * @offset: offset in the @buffer at which to validate an object.
1613 * @object: struct binder_object to read into
1615 * Return: If there's a valid metadata object at @offset in @buffer, the
1616 * size of that object. Otherwise, it returns zero. The object
1617 * is read into the struct binder_object pointed to by @object.
1619 static size_t binder_get_object(struct binder_proc
*proc
,
1620 struct binder_buffer
*buffer
,
1621 unsigned long offset
,
1622 struct binder_object
*object
)
1625 struct binder_object_header
*hdr
;
1626 size_t object_size
= 0;
1628 read_size
= min_t(size_t, sizeof(*object
), buffer
->data_size
- offset
);
1629 if (offset
> buffer
->data_size
|| read_size
< sizeof(*hdr
) ||
1630 binder_alloc_copy_from_buffer(&proc
->alloc
, object
, buffer
,
1634 /* Ok, now see if we read a complete object. */
1636 switch (hdr
->type
) {
1637 case BINDER_TYPE_BINDER
:
1638 case BINDER_TYPE_WEAK_BINDER
:
1639 case BINDER_TYPE_HANDLE
:
1640 case BINDER_TYPE_WEAK_HANDLE
:
1641 object_size
= sizeof(struct flat_binder_object
);
1643 case BINDER_TYPE_FD
:
1644 object_size
= sizeof(struct binder_fd_object
);
1646 case BINDER_TYPE_PTR
:
1647 object_size
= sizeof(struct binder_buffer_object
);
1649 case BINDER_TYPE_FDA
:
1650 object_size
= sizeof(struct binder_fd_array_object
);
1655 if (offset
<= buffer
->data_size
- object_size
&&
1656 buffer
->data_size
>= object_size
)
1663 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1664 * @proc: binder_proc owning the buffer
1665 * @b: binder_buffer containing the object
1666 * @object: struct binder_object to read into
1667 * @index: index in offset array at which the binder_buffer_object is
1669 * @start_offset: points to the start of the offset array
1670 * @object_offsetp: offset of @object read from @b
1671 * @num_valid: the number of valid offsets in the offset array
1673 * Return: If @index is within the valid range of the offset array
1674 * described by @start and @num_valid, and if there's a valid
1675 * binder_buffer_object at the offset found in index @index
1676 * of the offset array, that object is returned. Otherwise,
1677 * %NULL is returned.
1678 * Note that the offset found in index @index itself is not
1679 * verified; this function assumes that @num_valid elements
1680 * from @start were previously verified to have valid offsets.
1681 * If @object_offsetp is non-NULL, then the offset within
1682 * @b is written to it.
1684 static struct binder_buffer_object
*binder_validate_ptr(
1685 struct binder_proc
*proc
,
1686 struct binder_buffer
*b
,
1687 struct binder_object
*object
,
1688 binder_size_t index
,
1689 binder_size_t start_offset
,
1690 binder_size_t
*object_offsetp
,
1691 binder_size_t num_valid
)
1694 binder_size_t object_offset
;
1695 unsigned long buffer_offset
;
1697 if (index
>= num_valid
)
1700 buffer_offset
= start_offset
+ sizeof(binder_size_t
) * index
;
1701 if (binder_alloc_copy_from_buffer(&proc
->alloc
, &object_offset
,
1703 sizeof(object_offset
)))
1705 object_size
= binder_get_object(proc
, b
, object_offset
, object
);
1706 if (!object_size
|| object
->hdr
.type
!= BINDER_TYPE_PTR
)
1709 *object_offsetp
= object_offset
;
1711 return &object
->bbo
;
1715 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1716 * @proc: binder_proc owning the buffer
1717 * @b: transaction buffer
1718 * @objects_start_offset: offset to start of objects buffer
1719 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1720 * @fixup_offset: start offset in @buffer to fix up
1721 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1722 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1724 * Return: %true if a fixup in buffer @buffer at offset @offset is
1727 * For safety reasons, we only allow fixups inside a buffer to happen
1728 * at increasing offsets; additionally, we only allow fixup on the last
1729 * buffer object that was verified, or one of its parents.
1731 * Example of what is allowed:
1734 * B (parent = A, offset = 0)
1735 * C (parent = A, offset = 16)
1736 * D (parent = C, offset = 0)
1737 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1739 * Examples of what is not allowed:
1741 * Decreasing offsets within the same parent:
1743 * C (parent = A, offset = 16)
1744 * B (parent = A, offset = 0) // decreasing offset within A
1746 * Referring to a parent that wasn't the last object or any of its parents:
1748 * B (parent = A, offset = 0)
1749 * C (parent = A, offset = 0)
1750 * C (parent = A, offset = 16)
1751 * D (parent = B, offset = 0) // B is not A or any of A's parents
1753 static bool binder_validate_fixup(struct binder_proc
*proc
,
1754 struct binder_buffer
*b
,
1755 binder_size_t objects_start_offset
,
1756 binder_size_t buffer_obj_offset
,
1757 binder_size_t fixup_offset
,
1758 binder_size_t last_obj_offset
,
1759 binder_size_t last_min_offset
)
1761 if (!last_obj_offset
) {
1762 /* Nothing to fix up in */
1766 while (last_obj_offset
!= buffer_obj_offset
) {
1767 unsigned long buffer_offset
;
1768 struct binder_object last_object
;
1769 struct binder_buffer_object
*last_bbo
;
1770 size_t object_size
= binder_get_object(proc
, b
, last_obj_offset
,
1772 if (object_size
!= sizeof(*last_bbo
))
1775 last_bbo
= &last_object
.bbo
;
1777 * Safe to retrieve the parent of last_obj, since it
1778 * was already previously verified by the driver.
1780 if ((last_bbo
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
1782 last_min_offset
= last_bbo
->parent_offset
+ sizeof(uintptr_t);
1783 buffer_offset
= objects_start_offset
+
1784 sizeof(binder_size_t
) * last_bbo
->parent
;
1785 if (binder_alloc_copy_from_buffer(&proc
->alloc
,
1788 sizeof(last_obj_offset
)))
1791 return (fixup_offset
>= last_min_offset
);
1795 * struct binder_task_work_cb - for deferred close
1797 * @twork: callback_head for task work
1800 * Structure to pass task work to be handled after
1801 * returning from binder_ioctl() via task_work_add().
1803 struct binder_task_work_cb
{
1804 struct callback_head twork
;
1809 * binder_do_fd_close() - close list of file descriptors
1810 * @twork: callback head for task work
1812 * It is not safe to call ksys_close() during the binder_ioctl()
1813 * function if there is a chance that binder's own file descriptor
1814 * might be closed. This is to meet the requirements for using
1815 * fdget() (see comments for __fget_light()). Therefore use
1816 * task_work_add() to schedule the close operation once we have
1817 * returned from binder_ioctl(). This function is a callback
1818 * for that mechanism and does the actual ksys_close() on the
1819 * given file descriptor.
1821 static void binder_do_fd_close(struct callback_head
*twork
)
1823 struct binder_task_work_cb
*twcb
= container_of(twork
,
1824 struct binder_task_work_cb
, twork
);
1831 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1832 * @fd: file-descriptor to close
1834 * See comments in binder_do_fd_close(). This function is used to schedule
1835 * a file-descriptor to be closed after returning from binder_ioctl().
1837 static void binder_deferred_fd_close(int fd
)
1839 struct binder_task_work_cb
*twcb
;
1841 twcb
= kzalloc(sizeof(*twcb
), GFP_KERNEL
);
1844 init_task_work(&twcb
->twork
, binder_do_fd_close
);
1845 close_fd_get_file(fd
, &twcb
->file
);
1847 filp_close(twcb
->file
, current
->files
);
1848 task_work_add(current
, &twcb
->twork
, TWA_RESUME
);
1854 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
1855 struct binder_thread
*thread
,
1856 struct binder_buffer
*buffer
,
1857 binder_size_t failed_at
,
1860 int debug_id
= buffer
->debug_id
;
1861 binder_size_t off_start_offset
, buffer_offset
, off_end_offset
;
1863 binder_debug(BINDER_DEBUG_TRANSACTION
,
1864 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1865 proc
->pid
, buffer
->debug_id
,
1866 buffer
->data_size
, buffer
->offsets_size
,
1867 (unsigned long long)failed_at
);
1869 if (buffer
->target_node
)
1870 binder_dec_node(buffer
->target_node
, 1, 0);
1872 off_start_offset
= ALIGN(buffer
->data_size
, sizeof(void *));
1873 off_end_offset
= is_failure
? failed_at
:
1874 off_start_offset
+ buffer
->offsets_size
;
1875 for (buffer_offset
= off_start_offset
; buffer_offset
< off_end_offset
;
1876 buffer_offset
+= sizeof(binder_size_t
)) {
1877 struct binder_object_header
*hdr
;
1878 size_t object_size
= 0;
1879 struct binder_object object
;
1880 binder_size_t object_offset
;
1882 if (!binder_alloc_copy_from_buffer(&proc
->alloc
, &object_offset
,
1883 buffer
, buffer_offset
,
1884 sizeof(object_offset
)))
1885 object_size
= binder_get_object(proc
, buffer
,
1886 object_offset
, &object
);
1887 if (object_size
== 0) {
1888 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1889 debug_id
, (u64
)object_offset
, buffer
->data_size
);
1893 switch (hdr
->type
) {
1894 case BINDER_TYPE_BINDER
:
1895 case BINDER_TYPE_WEAK_BINDER
: {
1896 struct flat_binder_object
*fp
;
1897 struct binder_node
*node
;
1899 fp
= to_flat_binder_object(hdr
);
1900 node
= binder_get_node(proc
, fp
->binder
);
1902 pr_err("transaction release %d bad node %016llx\n",
1903 debug_id
, (u64
)fp
->binder
);
1906 binder_debug(BINDER_DEBUG_TRANSACTION
,
1907 " node %d u%016llx\n",
1908 node
->debug_id
, (u64
)node
->ptr
);
1909 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
1911 binder_put_node(node
);
1913 case BINDER_TYPE_HANDLE
:
1914 case BINDER_TYPE_WEAK_HANDLE
: {
1915 struct flat_binder_object
*fp
;
1916 struct binder_ref_data rdata
;
1919 fp
= to_flat_binder_object(hdr
);
1920 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
1921 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
1924 pr_err("transaction release %d bad handle %d, ret = %d\n",
1925 debug_id
, fp
->handle
, ret
);
1928 binder_debug(BINDER_DEBUG_TRANSACTION
,
1929 " ref %d desc %d\n",
1930 rdata
.debug_id
, rdata
.desc
);
1933 case BINDER_TYPE_FD
: {
1935 * No need to close the file here since user-space
1936 * closes it for for successfully delivered
1937 * transactions. For transactions that weren't
1938 * delivered, the new fd was never allocated so
1939 * there is no need to close and the fput on the
1940 * file is done when the transaction is torn
1944 case BINDER_TYPE_PTR
:
1946 * Nothing to do here, this will get cleaned up when the
1947 * transaction buffer gets freed
1950 case BINDER_TYPE_FDA
: {
1951 struct binder_fd_array_object
*fda
;
1952 struct binder_buffer_object
*parent
;
1953 struct binder_object ptr_object
;
1954 binder_size_t fda_offset
;
1956 binder_size_t fd_buf_size
;
1957 binder_size_t num_valid
;
1959 if (proc
->tsk
!= current
->group_leader
) {
1961 * Nothing to do if running in sender context
1962 * The fd fixups have not been applied so no
1963 * fds need to be closed.
1968 num_valid
= (buffer_offset
- off_start_offset
) /
1969 sizeof(binder_size_t
);
1970 fda
= to_binder_fd_array_object(hdr
);
1971 parent
= binder_validate_ptr(proc
, buffer
, &ptr_object
,
1977 pr_err("transaction release %d bad parent offset\n",
1981 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
1982 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
1983 pr_err("transaction release %d invalid number of fds (%lld)\n",
1984 debug_id
, (u64
)fda
->num_fds
);
1987 if (fd_buf_size
> parent
->length
||
1988 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
1989 /* No space for all file descriptors here. */
1990 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1991 debug_id
, (u64
)fda
->num_fds
);
1995 * the source data for binder_buffer_object is visible
1996 * to user-space and the @buffer element is the user
1997 * pointer to the buffer_object containing the fd_array.
1998 * Convert the address to an offset relative to
1999 * the base of the transaction buffer.
2002 (parent
->buffer
- (uintptr_t)buffer
->user_data
) +
2004 for (fd_index
= 0; fd_index
< fda
->num_fds
;
2008 binder_size_t offset
= fda_offset
+
2009 fd_index
* sizeof(fd
);
2011 err
= binder_alloc_copy_from_buffer(
2012 &proc
->alloc
, &fd
, buffer
,
2013 offset
, sizeof(fd
));
2016 binder_deferred_fd_close(fd
);
2018 * Need to make sure the thread goes
2019 * back to userspace to complete the
2023 thread
->looper_need_return
= true;
2028 pr_err("transaction release %d bad object type %x\n",
2029 debug_id
, hdr
->type
);
2035 static int binder_translate_binder(struct flat_binder_object
*fp
,
2036 struct binder_transaction
*t
,
2037 struct binder_thread
*thread
)
2039 struct binder_node
*node
;
2040 struct binder_proc
*proc
= thread
->proc
;
2041 struct binder_proc
*target_proc
= t
->to_proc
;
2042 struct binder_ref_data rdata
;
2045 node
= binder_get_node(proc
, fp
->binder
);
2047 node
= binder_new_node(proc
, fp
);
2051 if (fp
->cookie
!= node
->cookie
) {
2052 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2053 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2054 node
->debug_id
, (u64
)fp
->cookie
,
2059 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2064 ret
= binder_inc_ref_for_node(target_proc
, node
,
2065 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2066 &thread
->todo
, &rdata
);
2070 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2071 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2073 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2075 fp
->handle
= rdata
.desc
;
2078 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2079 binder_debug(BINDER_DEBUG_TRANSACTION
,
2080 " node %d u%016llx -> ref %d desc %d\n",
2081 node
->debug_id
, (u64
)node
->ptr
,
2082 rdata
.debug_id
, rdata
.desc
);
2084 binder_put_node(node
);
2088 static int binder_translate_handle(struct flat_binder_object
*fp
,
2089 struct binder_transaction
*t
,
2090 struct binder_thread
*thread
)
2092 struct binder_proc
*proc
= thread
->proc
;
2093 struct binder_proc
*target_proc
= t
->to_proc
;
2094 struct binder_node
*node
;
2095 struct binder_ref_data src_rdata
;
2098 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2099 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2101 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2102 proc
->pid
, thread
->pid
, fp
->handle
);
2105 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2110 binder_node_lock(node
);
2111 if (node
->proc
== target_proc
) {
2112 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2113 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2115 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2116 fp
->binder
= node
->ptr
;
2117 fp
->cookie
= node
->cookie
;
2119 binder_inner_proc_lock(node
->proc
);
2121 __acquire(&node
->proc
->inner_lock
);
2122 binder_inc_node_nilocked(node
,
2123 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2126 binder_inner_proc_unlock(node
->proc
);
2128 __release(&node
->proc
->inner_lock
);
2129 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2130 binder_debug(BINDER_DEBUG_TRANSACTION
,
2131 " ref %d desc %d -> node %d u%016llx\n",
2132 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2134 binder_node_unlock(node
);
2136 struct binder_ref_data dest_rdata
;
2138 binder_node_unlock(node
);
2139 ret
= binder_inc_ref_for_node(target_proc
, node
,
2140 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2146 fp
->handle
= dest_rdata
.desc
;
2148 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2150 binder_debug(BINDER_DEBUG_TRANSACTION
,
2151 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2152 src_rdata
.debug_id
, src_rdata
.desc
,
2153 dest_rdata
.debug_id
, dest_rdata
.desc
,
2157 binder_put_node(node
);
2161 static int binder_translate_fd(u32 fd
, binder_size_t fd_offset
,
2162 struct binder_transaction
*t
,
2163 struct binder_thread
*thread
,
2164 struct binder_transaction
*in_reply_to
)
2166 struct binder_proc
*proc
= thread
->proc
;
2167 struct binder_proc
*target_proc
= t
->to_proc
;
2168 struct binder_txn_fd_fixup
*fixup
;
2171 bool target_allows_fd
;
2174 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2176 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2177 if (!target_allows_fd
) {
2178 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2179 proc
->pid
, thread
->pid
,
2180 in_reply_to
? "reply" : "transaction",
2183 goto err_fd_not_accepted
;
2188 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2189 proc
->pid
, thread
->pid
, fd
);
2193 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2200 * Add fixup record for this transaction. The allocation
2201 * of the fd in the target needs to be done from a
2204 fixup
= kzalloc(sizeof(*fixup
), GFP_KERNEL
);
2210 fixup
->offset
= fd_offset
;
2211 trace_binder_transaction_fd_send(t
, fd
, fixup
->offset
);
2212 list_add_tail(&fixup
->fixup_entry
, &t
->fd_fixups
);
2220 err_fd_not_accepted
:
2224 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2225 struct binder_buffer_object
*parent
,
2226 struct binder_transaction
*t
,
2227 struct binder_thread
*thread
,
2228 struct binder_transaction
*in_reply_to
)
2230 binder_size_t fdi
, fd_buf_size
;
2231 binder_size_t fda_offset
;
2232 struct binder_proc
*proc
= thread
->proc
;
2233 struct binder_proc
*target_proc
= t
->to_proc
;
2235 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2236 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2237 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2238 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2241 if (fd_buf_size
> parent
->length
||
2242 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2243 /* No space for all file descriptors here. */
2244 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2245 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2249 * the source data for binder_buffer_object is visible
2250 * to user-space and the @buffer element is the user
2251 * pointer to the buffer_object containing the fd_array.
2252 * Convert the address to an offset relative to
2253 * the base of the transaction buffer.
2255 fda_offset
= (parent
->buffer
- (uintptr_t)t
->buffer
->user_data
) +
2257 if (!IS_ALIGNED((unsigned long)fda_offset
, sizeof(u32
))) {
2258 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2259 proc
->pid
, thread
->pid
);
2262 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2265 binder_size_t offset
= fda_offset
+ fdi
* sizeof(fd
);
2267 ret
= binder_alloc_copy_from_buffer(&target_proc
->alloc
,
2269 offset
, sizeof(fd
));
2271 ret
= binder_translate_fd(fd
, offset
, t
, thread
,
2279 static int binder_fixup_parent(struct binder_transaction
*t
,
2280 struct binder_thread
*thread
,
2281 struct binder_buffer_object
*bp
,
2282 binder_size_t off_start_offset
,
2283 binder_size_t num_valid
,
2284 binder_size_t last_fixup_obj_off
,
2285 binder_size_t last_fixup_min_off
)
2287 struct binder_buffer_object
*parent
;
2288 struct binder_buffer
*b
= t
->buffer
;
2289 struct binder_proc
*proc
= thread
->proc
;
2290 struct binder_proc
*target_proc
= t
->to_proc
;
2291 struct binder_object object
;
2292 binder_size_t buffer_offset
;
2293 binder_size_t parent_offset
;
2295 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2298 parent
= binder_validate_ptr(target_proc
, b
, &object
, bp
->parent
,
2299 off_start_offset
, &parent_offset
,
2302 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2303 proc
->pid
, thread
->pid
);
2307 if (!binder_validate_fixup(target_proc
, b
, off_start_offset
,
2308 parent_offset
, bp
->parent_offset
,
2310 last_fixup_min_off
)) {
2311 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2312 proc
->pid
, thread
->pid
);
2316 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2317 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2318 /* No space for a pointer here! */
2319 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2320 proc
->pid
, thread
->pid
);
2323 buffer_offset
= bp
->parent_offset
+
2324 (uintptr_t)parent
->buffer
- (uintptr_t)b
->user_data
;
2325 if (binder_alloc_copy_to_buffer(&target_proc
->alloc
, b
, buffer_offset
,
2326 &bp
->buffer
, sizeof(bp
->buffer
))) {
2327 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2328 proc
->pid
, thread
->pid
);
2336 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2337 * @t: transaction to send
2338 * @proc: process to send the transaction to
2339 * @thread: thread in @proc to send the transaction to (may be NULL)
2341 * This function queues a transaction to the specified process. It will try
2342 * to find a thread in the target process to handle the transaction and
2343 * wake it up. If no thread is found, the work is queued to the proc
2346 * If the @thread parameter is not NULL, the transaction is always queued
2347 * to the waitlist of that specific thread.
2349 * Return: 0 if the transaction was successfully queued
2350 * BR_DEAD_REPLY if the target process or thread is dead
2351 * BR_FROZEN_REPLY if the target process or thread is frozen
2353 static int binder_proc_transaction(struct binder_transaction
*t
,
2354 struct binder_proc
*proc
,
2355 struct binder_thread
*thread
)
2357 struct binder_node
*node
= t
->buffer
->target_node
;
2358 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2359 bool pending_async
= false;
2362 binder_node_lock(node
);
2365 if (node
->has_async_transaction
)
2366 pending_async
= true;
2368 node
->has_async_transaction
= true;
2371 binder_inner_proc_lock(proc
);
2372 if (proc
->is_frozen
) {
2373 proc
->sync_recv
|= !oneway
;
2374 proc
->async_recv
|= oneway
;
2377 if ((proc
->is_frozen
&& !oneway
) || proc
->is_dead
||
2378 (thread
&& thread
->is_dead
)) {
2379 binder_inner_proc_unlock(proc
);
2380 binder_node_unlock(node
);
2381 return proc
->is_frozen
? BR_FROZEN_REPLY
: BR_DEAD_REPLY
;
2384 if (!thread
&& !pending_async
)
2385 thread
= binder_select_thread_ilocked(proc
);
2388 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2389 else if (!pending_async
)
2390 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2392 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2395 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2397 proc
->outstanding_txns
++;
2398 binder_inner_proc_unlock(proc
);
2399 binder_node_unlock(node
);
2405 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2406 * @node: struct binder_node for which to get refs
2407 * @proc: returns @node->proc if valid
2408 * @error: if no @proc then returns BR_DEAD_REPLY
2410 * User-space normally keeps the node alive when creating a transaction
2411 * since it has a reference to the target. The local strong ref keeps it
2412 * alive if the sending process dies before the target process processes
2413 * the transaction. If the source process is malicious or has a reference
2414 * counting bug, relying on the local strong ref can fail.
2416 * Since user-space can cause the local strong ref to go away, we also take
2417 * a tmpref on the node to ensure it survives while we are constructing
2418 * the transaction. We also need a tmpref on the proc while we are
2419 * constructing the transaction, so we take that here as well.
2421 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2422 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2423 * target proc has died, @error is set to BR_DEAD_REPLY
2425 static struct binder_node
*binder_get_node_refs_for_txn(
2426 struct binder_node
*node
,
2427 struct binder_proc
**procp
,
2430 struct binder_node
*target_node
= NULL
;
2432 binder_node_inner_lock(node
);
2435 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2436 binder_inc_node_tmpref_ilocked(node
);
2437 node
->proc
->tmp_ref
++;
2438 *procp
= node
->proc
;
2440 *error
= BR_DEAD_REPLY
;
2441 binder_node_inner_unlock(node
);
2446 static void binder_transaction(struct binder_proc
*proc
,
2447 struct binder_thread
*thread
,
2448 struct binder_transaction_data
*tr
, int reply
,
2449 binder_size_t extra_buffers_size
)
2452 struct binder_transaction
*t
;
2453 struct binder_work
*w
;
2454 struct binder_work
*tcomplete
;
2455 binder_size_t buffer_offset
= 0;
2456 binder_size_t off_start_offset
, off_end_offset
;
2457 binder_size_t off_min
;
2458 binder_size_t sg_buf_offset
, sg_buf_end_offset
;
2459 struct binder_proc
*target_proc
= NULL
;
2460 struct binder_thread
*target_thread
= NULL
;
2461 struct binder_node
*target_node
= NULL
;
2462 struct binder_transaction
*in_reply_to
= NULL
;
2463 struct binder_transaction_log_entry
*e
;
2464 uint32_t return_error
= 0;
2465 uint32_t return_error_param
= 0;
2466 uint32_t return_error_line
= 0;
2467 binder_size_t last_fixup_obj_off
= 0;
2468 binder_size_t last_fixup_min_off
= 0;
2469 struct binder_context
*context
= proc
->context
;
2470 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2471 char *secctx
= NULL
;
2474 e
= binder_transaction_log_add(&binder_transaction_log
);
2475 e
->debug_id
= t_debug_id
;
2476 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2477 e
->from_proc
= proc
->pid
;
2478 e
->from_thread
= thread
->pid
;
2479 e
->target_handle
= tr
->target
.handle
;
2480 e
->data_size
= tr
->data_size
;
2481 e
->offsets_size
= tr
->offsets_size
;
2482 strscpy(e
->context_name
, proc
->context
->name
, BINDERFS_MAX_NAME
);
2485 binder_inner_proc_lock(proc
);
2486 in_reply_to
= thread
->transaction_stack
;
2487 if (in_reply_to
== NULL
) {
2488 binder_inner_proc_unlock(proc
);
2489 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2490 proc
->pid
, thread
->pid
);
2491 return_error
= BR_FAILED_REPLY
;
2492 return_error_param
= -EPROTO
;
2493 return_error_line
= __LINE__
;
2494 goto err_empty_call_stack
;
2496 if (in_reply_to
->to_thread
!= thread
) {
2497 spin_lock(&in_reply_to
->lock
);
2498 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2499 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2500 in_reply_to
->to_proc
?
2501 in_reply_to
->to_proc
->pid
: 0,
2502 in_reply_to
->to_thread
?
2503 in_reply_to
->to_thread
->pid
: 0);
2504 spin_unlock(&in_reply_to
->lock
);
2505 binder_inner_proc_unlock(proc
);
2506 return_error
= BR_FAILED_REPLY
;
2507 return_error_param
= -EPROTO
;
2508 return_error_line
= __LINE__
;
2510 goto err_bad_call_stack
;
2512 thread
->transaction_stack
= in_reply_to
->to_parent
;
2513 binder_inner_proc_unlock(proc
);
2514 binder_set_nice(in_reply_to
->saved_priority
);
2515 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2516 if (target_thread
== NULL
) {
2517 /* annotation for sparse */
2518 __release(&target_thread
->proc
->inner_lock
);
2519 return_error
= BR_DEAD_REPLY
;
2520 return_error_line
= __LINE__
;
2521 goto err_dead_binder
;
2523 if (target_thread
->transaction_stack
!= in_reply_to
) {
2524 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2525 proc
->pid
, thread
->pid
,
2526 target_thread
->transaction_stack
?
2527 target_thread
->transaction_stack
->debug_id
: 0,
2528 in_reply_to
->debug_id
);
2529 binder_inner_proc_unlock(target_thread
->proc
);
2530 return_error
= BR_FAILED_REPLY
;
2531 return_error_param
= -EPROTO
;
2532 return_error_line
= __LINE__
;
2534 target_thread
= NULL
;
2535 goto err_dead_binder
;
2537 target_proc
= target_thread
->proc
;
2538 target_proc
->tmp_ref
++;
2539 binder_inner_proc_unlock(target_thread
->proc
);
2541 if (tr
->target
.handle
) {
2542 struct binder_ref
*ref
;
2545 * There must already be a strong ref
2546 * on this node. If so, do a strong
2547 * increment on the node to ensure it
2548 * stays alive until the transaction is
2551 binder_proc_lock(proc
);
2552 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2555 target_node
= binder_get_node_refs_for_txn(
2556 ref
->node
, &target_proc
,
2559 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2560 proc
->pid
, thread
->pid
, tr
->target
.handle
);
2561 return_error
= BR_FAILED_REPLY
;
2563 binder_proc_unlock(proc
);
2565 mutex_lock(&context
->context_mgr_node_lock
);
2566 target_node
= context
->binder_context_mgr_node
;
2568 target_node
= binder_get_node_refs_for_txn(
2569 target_node
, &target_proc
,
2572 return_error
= BR_DEAD_REPLY
;
2573 mutex_unlock(&context
->context_mgr_node_lock
);
2574 if (target_node
&& target_proc
->pid
== proc
->pid
) {
2575 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2576 proc
->pid
, thread
->pid
);
2577 return_error
= BR_FAILED_REPLY
;
2578 return_error_param
= -EINVAL
;
2579 return_error_line
= __LINE__
;
2580 goto err_invalid_target_handle
;
2585 * return_error is set above
2587 return_error_param
= -EINVAL
;
2588 return_error_line
= __LINE__
;
2589 goto err_dead_binder
;
2591 e
->to_node
= target_node
->debug_id
;
2592 if (WARN_ON(proc
== target_proc
)) {
2593 return_error
= BR_FAILED_REPLY
;
2594 return_error_param
= -EINVAL
;
2595 return_error_line
= __LINE__
;
2596 goto err_invalid_target_handle
;
2598 if (security_binder_transaction(proc
->tsk
,
2599 target_proc
->tsk
) < 0) {
2600 return_error
= BR_FAILED_REPLY
;
2601 return_error_param
= -EPERM
;
2602 return_error_line
= __LINE__
;
2603 goto err_invalid_target_handle
;
2605 binder_inner_proc_lock(proc
);
2607 w
= list_first_entry_or_null(&thread
->todo
,
2608 struct binder_work
, entry
);
2609 if (!(tr
->flags
& TF_ONE_WAY
) && w
&&
2610 w
->type
== BINDER_WORK_TRANSACTION
) {
2612 * Do not allow new outgoing transaction from a
2613 * thread that has a transaction at the head of
2614 * its todo list. Only need to check the head
2615 * because binder_select_thread_ilocked picks a
2616 * thread from proc->waiting_threads to enqueue
2617 * the transaction, and nothing is queued to the
2618 * todo list while the thread is on waiting_threads.
2620 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2621 proc
->pid
, thread
->pid
);
2622 binder_inner_proc_unlock(proc
);
2623 return_error
= BR_FAILED_REPLY
;
2624 return_error_param
= -EPROTO
;
2625 return_error_line
= __LINE__
;
2626 goto err_bad_todo_list
;
2629 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2630 struct binder_transaction
*tmp
;
2632 tmp
= thread
->transaction_stack
;
2633 if (tmp
->to_thread
!= thread
) {
2634 spin_lock(&tmp
->lock
);
2635 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2636 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2637 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2639 tmp
->to_thread
->pid
: 0);
2640 spin_unlock(&tmp
->lock
);
2641 binder_inner_proc_unlock(proc
);
2642 return_error
= BR_FAILED_REPLY
;
2643 return_error_param
= -EPROTO
;
2644 return_error_line
= __LINE__
;
2645 goto err_bad_call_stack
;
2648 struct binder_thread
*from
;
2650 spin_lock(&tmp
->lock
);
2652 if (from
&& from
->proc
== target_proc
) {
2653 atomic_inc(&from
->tmp_ref
);
2654 target_thread
= from
;
2655 spin_unlock(&tmp
->lock
);
2658 spin_unlock(&tmp
->lock
);
2659 tmp
= tmp
->from_parent
;
2662 binder_inner_proc_unlock(proc
);
2665 e
->to_thread
= target_thread
->pid
;
2666 e
->to_proc
= target_proc
->pid
;
2668 /* TODO: reuse incoming transaction for reply */
2669 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
2671 return_error
= BR_FAILED_REPLY
;
2672 return_error_param
= -ENOMEM
;
2673 return_error_line
= __LINE__
;
2674 goto err_alloc_t_failed
;
2676 INIT_LIST_HEAD(&t
->fd_fixups
);
2677 binder_stats_created(BINDER_STAT_TRANSACTION
);
2678 spin_lock_init(&t
->lock
);
2680 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
2681 if (tcomplete
== NULL
) {
2682 return_error
= BR_FAILED_REPLY
;
2683 return_error_param
= -ENOMEM
;
2684 return_error_line
= __LINE__
;
2685 goto err_alloc_tcomplete_failed
;
2687 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2689 t
->debug_id
= t_debug_id
;
2692 binder_debug(BINDER_DEBUG_TRANSACTION
,
2693 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2694 proc
->pid
, thread
->pid
, t
->debug_id
,
2695 target_proc
->pid
, target_thread
->pid
,
2696 (u64
)tr
->data
.ptr
.buffer
,
2697 (u64
)tr
->data
.ptr
.offsets
,
2698 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2699 (u64
)extra_buffers_size
);
2701 binder_debug(BINDER_DEBUG_TRANSACTION
,
2702 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2703 proc
->pid
, thread
->pid
, t
->debug_id
,
2704 target_proc
->pid
, target_node
->debug_id
,
2705 (u64
)tr
->data
.ptr
.buffer
,
2706 (u64
)tr
->data
.ptr
.offsets
,
2707 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2708 (u64
)extra_buffers_size
);
2710 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
2714 t
->sender_euid
= task_euid(proc
->tsk
);
2715 t
->to_proc
= target_proc
;
2716 t
->to_thread
= target_thread
;
2718 t
->flags
= tr
->flags
;
2719 t
->priority
= task_nice(current
);
2721 if (target_node
&& target_node
->txn_security_ctx
) {
2726 * Arguably this should be the task's subjective LSM secid but
2727 * we can't reliably access the subjective creds of a task
2728 * other than our own so we must use the objective creds, which
2729 * are safe to access. The downside is that if a task is
2730 * temporarily overriding it's creds it will not be reflected
2731 * here; however, it isn't clear that binder would handle that
2734 security_task_getsecid_obj(proc
->tsk
, &secid
);
2735 ret
= security_secid_to_secctx(secid
, &secctx
, &secctx_sz
);
2737 return_error
= BR_FAILED_REPLY
;
2738 return_error_param
= ret
;
2739 return_error_line
= __LINE__
;
2740 goto err_get_secctx_failed
;
2742 added_size
= ALIGN(secctx_sz
, sizeof(u64
));
2743 extra_buffers_size
+= added_size
;
2744 if (extra_buffers_size
< added_size
) {
2745 /* integer overflow of extra_buffers_size */
2746 return_error
= BR_FAILED_REPLY
;
2747 return_error_param
= -EINVAL
;
2748 return_error_line
= __LINE__
;
2749 goto err_bad_extra_size
;
2753 trace_binder_transaction(reply
, t
, target_node
);
2755 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
2756 tr
->offsets_size
, extra_buffers_size
,
2757 !reply
&& (t
->flags
& TF_ONE_WAY
), current
->tgid
);
2758 if (IS_ERR(t
->buffer
)) {
2760 * -ESRCH indicates VMA cleared. The target is dying.
2762 return_error_param
= PTR_ERR(t
->buffer
);
2763 return_error
= return_error_param
== -ESRCH
?
2764 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
2765 return_error_line
= __LINE__
;
2767 goto err_binder_alloc_buf_failed
;
2771 size_t buf_offset
= ALIGN(tr
->data_size
, sizeof(void *)) +
2772 ALIGN(tr
->offsets_size
, sizeof(void *)) +
2773 ALIGN(extra_buffers_size
, sizeof(void *)) -
2774 ALIGN(secctx_sz
, sizeof(u64
));
2776 t
->security_ctx
= (uintptr_t)t
->buffer
->user_data
+ buf_offset
;
2777 err
= binder_alloc_copy_to_buffer(&target_proc
->alloc
,
2778 t
->buffer
, buf_offset
,
2781 t
->security_ctx
= 0;
2784 security_release_secctx(secctx
, secctx_sz
);
2787 t
->buffer
->debug_id
= t
->debug_id
;
2788 t
->buffer
->transaction
= t
;
2789 t
->buffer
->target_node
= target_node
;
2790 t
->buffer
->clear_on_free
= !!(t
->flags
& TF_CLEAR_BUF
);
2791 trace_binder_transaction_alloc_buf(t
->buffer
);
2793 if (binder_alloc_copy_user_to_buffer(
2794 &target_proc
->alloc
,
2796 (const void __user
*)
2797 (uintptr_t)tr
->data
.ptr
.buffer
,
2799 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2800 proc
->pid
, thread
->pid
);
2801 return_error
= BR_FAILED_REPLY
;
2802 return_error_param
= -EFAULT
;
2803 return_error_line
= __LINE__
;
2804 goto err_copy_data_failed
;
2806 if (binder_alloc_copy_user_to_buffer(
2807 &target_proc
->alloc
,
2809 ALIGN(tr
->data_size
, sizeof(void *)),
2810 (const void __user
*)
2811 (uintptr_t)tr
->data
.ptr
.offsets
,
2812 tr
->offsets_size
)) {
2813 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2814 proc
->pid
, thread
->pid
);
2815 return_error
= BR_FAILED_REPLY
;
2816 return_error_param
= -EFAULT
;
2817 return_error_line
= __LINE__
;
2818 goto err_copy_data_failed
;
2820 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
2821 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2822 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
2823 return_error
= BR_FAILED_REPLY
;
2824 return_error_param
= -EINVAL
;
2825 return_error_line
= __LINE__
;
2826 goto err_bad_offset
;
2828 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
2829 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2830 proc
->pid
, thread
->pid
,
2831 (u64
)extra_buffers_size
);
2832 return_error
= BR_FAILED_REPLY
;
2833 return_error_param
= -EINVAL
;
2834 return_error_line
= __LINE__
;
2835 goto err_bad_offset
;
2837 off_start_offset
= ALIGN(tr
->data_size
, sizeof(void *));
2838 buffer_offset
= off_start_offset
;
2839 off_end_offset
= off_start_offset
+ tr
->offsets_size
;
2840 sg_buf_offset
= ALIGN(off_end_offset
, sizeof(void *));
2841 sg_buf_end_offset
= sg_buf_offset
+ extra_buffers_size
-
2842 ALIGN(secctx_sz
, sizeof(u64
));
2844 for (buffer_offset
= off_start_offset
; buffer_offset
< off_end_offset
;
2845 buffer_offset
+= sizeof(binder_size_t
)) {
2846 struct binder_object_header
*hdr
;
2848 struct binder_object object
;
2849 binder_size_t object_offset
;
2851 if (binder_alloc_copy_from_buffer(&target_proc
->alloc
,
2855 sizeof(object_offset
))) {
2856 return_error
= BR_FAILED_REPLY
;
2857 return_error_param
= -EINVAL
;
2858 return_error_line
= __LINE__
;
2859 goto err_bad_offset
;
2861 object_size
= binder_get_object(target_proc
, t
->buffer
,
2862 object_offset
, &object
);
2863 if (object_size
== 0 || object_offset
< off_min
) {
2864 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2865 proc
->pid
, thread
->pid
,
2868 (u64
)t
->buffer
->data_size
);
2869 return_error
= BR_FAILED_REPLY
;
2870 return_error_param
= -EINVAL
;
2871 return_error_line
= __LINE__
;
2872 goto err_bad_offset
;
2876 off_min
= object_offset
+ object_size
;
2877 switch (hdr
->type
) {
2878 case BINDER_TYPE_BINDER
:
2879 case BINDER_TYPE_WEAK_BINDER
: {
2880 struct flat_binder_object
*fp
;
2882 fp
= to_flat_binder_object(hdr
);
2883 ret
= binder_translate_binder(fp
, t
, thread
);
2886 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
2890 return_error
= BR_FAILED_REPLY
;
2891 return_error_param
= ret
;
2892 return_error_line
= __LINE__
;
2893 goto err_translate_failed
;
2896 case BINDER_TYPE_HANDLE
:
2897 case BINDER_TYPE_WEAK_HANDLE
: {
2898 struct flat_binder_object
*fp
;
2900 fp
= to_flat_binder_object(hdr
);
2901 ret
= binder_translate_handle(fp
, t
, thread
);
2903 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
2907 return_error
= BR_FAILED_REPLY
;
2908 return_error_param
= ret
;
2909 return_error_line
= __LINE__
;
2910 goto err_translate_failed
;
2914 case BINDER_TYPE_FD
: {
2915 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2916 binder_size_t fd_offset
= object_offset
+
2917 (uintptr_t)&fp
->fd
- (uintptr_t)fp
;
2918 int ret
= binder_translate_fd(fp
->fd
, fd_offset
, t
,
2919 thread
, in_reply_to
);
2923 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
2927 return_error
= BR_FAILED_REPLY
;
2928 return_error_param
= ret
;
2929 return_error_line
= __LINE__
;
2930 goto err_translate_failed
;
2933 case BINDER_TYPE_FDA
: {
2934 struct binder_object ptr_object
;
2935 binder_size_t parent_offset
;
2936 struct binder_fd_array_object
*fda
=
2937 to_binder_fd_array_object(hdr
);
2938 size_t num_valid
= (buffer_offset
- off_start_offset
) /
2939 sizeof(binder_size_t
);
2940 struct binder_buffer_object
*parent
=
2941 binder_validate_ptr(target_proc
, t
->buffer
,
2942 &ptr_object
, fda
->parent
,
2947 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2948 proc
->pid
, thread
->pid
);
2949 return_error
= BR_FAILED_REPLY
;
2950 return_error_param
= -EINVAL
;
2951 return_error_line
= __LINE__
;
2952 goto err_bad_parent
;
2954 if (!binder_validate_fixup(target_proc
, t
->buffer
,
2959 last_fixup_min_off
)) {
2960 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2961 proc
->pid
, thread
->pid
);
2962 return_error
= BR_FAILED_REPLY
;
2963 return_error_param
= -EINVAL
;
2964 return_error_line
= __LINE__
;
2965 goto err_bad_parent
;
2967 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
2970 return_error
= BR_FAILED_REPLY
;
2971 return_error_param
= ret
;
2972 return_error_line
= __LINE__
;
2973 goto err_translate_failed
;
2975 last_fixup_obj_off
= parent_offset
;
2976 last_fixup_min_off
=
2977 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
2979 case BINDER_TYPE_PTR
: {
2980 struct binder_buffer_object
*bp
=
2981 to_binder_buffer_object(hdr
);
2982 size_t buf_left
= sg_buf_end_offset
- sg_buf_offset
;
2985 if (bp
->length
> buf_left
) {
2986 binder_user_error("%d:%d got transaction with too large buffer\n",
2987 proc
->pid
, thread
->pid
);
2988 return_error
= BR_FAILED_REPLY
;
2989 return_error_param
= -EINVAL
;
2990 return_error_line
= __LINE__
;
2991 goto err_bad_offset
;
2993 if (binder_alloc_copy_user_to_buffer(
2994 &target_proc
->alloc
,
2997 (const void __user
*)
2998 (uintptr_t)bp
->buffer
,
3000 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3001 proc
->pid
, thread
->pid
);
3002 return_error_param
= -EFAULT
;
3003 return_error
= BR_FAILED_REPLY
;
3004 return_error_line
= __LINE__
;
3005 goto err_copy_data_failed
;
3007 /* Fixup buffer pointer to target proc address space */
3008 bp
->buffer
= (uintptr_t)
3009 t
->buffer
->user_data
+ sg_buf_offset
;
3010 sg_buf_offset
+= ALIGN(bp
->length
, sizeof(u64
));
3012 num_valid
= (buffer_offset
- off_start_offset
) /
3013 sizeof(binder_size_t
);
3014 ret
= binder_fixup_parent(t
, thread
, bp
,
3018 last_fixup_min_off
);
3020 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3024 return_error
= BR_FAILED_REPLY
;
3025 return_error_param
= ret
;
3026 return_error_line
= __LINE__
;
3027 goto err_translate_failed
;
3029 last_fixup_obj_off
= object_offset
;
3030 last_fixup_min_off
= 0;
3033 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3034 proc
->pid
, thread
->pid
, hdr
->type
);
3035 return_error
= BR_FAILED_REPLY
;
3036 return_error_param
= -EINVAL
;
3037 return_error_line
= __LINE__
;
3038 goto err_bad_object_type
;
3041 if (t
->buffer
->oneway_spam_suspect
)
3042 tcomplete
->type
= BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT
;
3044 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3045 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3048 binder_enqueue_thread_work(thread
, tcomplete
);
3049 binder_inner_proc_lock(target_proc
);
3050 if (target_thread
->is_dead
) {
3051 return_error
= BR_DEAD_REPLY
;
3052 binder_inner_proc_unlock(target_proc
);
3053 goto err_dead_proc_or_thread
;
3055 BUG_ON(t
->buffer
->async_transaction
!= 0);
3056 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3057 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3058 target_proc
->outstanding_txns
++;
3059 binder_inner_proc_unlock(target_proc
);
3060 wake_up_interruptible_sync(&target_thread
->wait
);
3061 binder_free_transaction(in_reply_to
);
3062 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3063 BUG_ON(t
->buffer
->async_transaction
!= 0);
3064 binder_inner_proc_lock(proc
);
3066 * Defer the TRANSACTION_COMPLETE, so we don't return to
3067 * userspace immediately; this allows the target process to
3068 * immediately start processing this transaction, reducing
3069 * latency. We will then return the TRANSACTION_COMPLETE when
3070 * the target replies (or there is an error).
3072 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3074 t
->from_parent
= thread
->transaction_stack
;
3075 thread
->transaction_stack
= t
;
3076 binder_inner_proc_unlock(proc
);
3077 return_error
= binder_proc_transaction(t
,
3078 target_proc
, target_thread
);
3080 binder_inner_proc_lock(proc
);
3081 binder_pop_transaction_ilocked(thread
, t
);
3082 binder_inner_proc_unlock(proc
);
3083 goto err_dead_proc_or_thread
;
3086 BUG_ON(target_node
== NULL
);
3087 BUG_ON(t
->buffer
->async_transaction
!= 1);
3088 binder_enqueue_thread_work(thread
, tcomplete
);
3089 return_error
= binder_proc_transaction(t
, target_proc
, NULL
);
3091 goto err_dead_proc_or_thread
;
3094 binder_thread_dec_tmpref(target_thread
);
3095 binder_proc_dec_tmpref(target_proc
);
3097 binder_dec_node_tmpref(target_node
);
3099 * write barrier to synchronize with initialization
3103 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3106 err_dead_proc_or_thread
:
3107 return_error_line
= __LINE__
;
3108 binder_dequeue_work(proc
, tcomplete
);
3109 err_translate_failed
:
3110 err_bad_object_type
:
3113 err_copy_data_failed
:
3114 binder_free_txn_fixups(t
);
3115 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3116 binder_transaction_buffer_release(target_proc
, NULL
, t
->buffer
,
3117 buffer_offset
, true);
3119 binder_dec_node_tmpref(target_node
);
3121 t
->buffer
->transaction
= NULL
;
3122 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3123 err_binder_alloc_buf_failed
:
3126 security_release_secctx(secctx
, secctx_sz
);
3127 err_get_secctx_failed
:
3129 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3130 err_alloc_tcomplete_failed
:
3131 if (trace_binder_txn_latency_free_enabled())
3132 binder_txn_latency_free(t
);
3134 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3138 err_empty_call_stack
:
3140 err_invalid_target_handle
:
3142 binder_thread_dec_tmpref(target_thread
);
3144 binder_proc_dec_tmpref(target_proc
);
3146 binder_dec_node(target_node
, 1, 0);
3147 binder_dec_node_tmpref(target_node
);
3150 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3151 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3152 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3153 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3157 struct binder_transaction_log_entry
*fe
;
3159 e
->return_error
= return_error
;
3160 e
->return_error_param
= return_error_param
;
3161 e
->return_error_line
= return_error_line
;
3162 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3165 * write barrier to synchronize with initialization
3169 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3170 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3173 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3175 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3176 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3177 binder_send_failed_reply(in_reply_to
, return_error
);
3179 thread
->return_error
.cmd
= return_error
;
3180 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3185 * binder_free_buf() - free the specified buffer
3186 * @proc: binder proc that owns buffer
3187 * @buffer: buffer to be freed
3189 * If buffer for an async transaction, enqueue the next async
3190 * transaction from the node.
3192 * Cleanup buffer and free it.
3195 binder_free_buf(struct binder_proc
*proc
,
3196 struct binder_thread
*thread
,
3197 struct binder_buffer
*buffer
)
3199 binder_inner_proc_lock(proc
);
3200 if (buffer
->transaction
) {
3201 buffer
->transaction
->buffer
= NULL
;
3202 buffer
->transaction
= NULL
;
3204 binder_inner_proc_unlock(proc
);
3205 if (buffer
->async_transaction
&& buffer
->target_node
) {
3206 struct binder_node
*buf_node
;
3207 struct binder_work
*w
;
3209 buf_node
= buffer
->target_node
;
3210 binder_node_inner_lock(buf_node
);
3211 BUG_ON(!buf_node
->has_async_transaction
);
3212 BUG_ON(buf_node
->proc
!= proc
);
3213 w
= binder_dequeue_work_head_ilocked(
3214 &buf_node
->async_todo
);
3216 buf_node
->has_async_transaction
= false;
3218 binder_enqueue_work_ilocked(
3220 binder_wakeup_proc_ilocked(proc
);
3222 binder_node_inner_unlock(buf_node
);
3224 trace_binder_transaction_buffer_release(buffer
);
3225 binder_transaction_buffer_release(proc
, thread
, buffer
, 0, false);
3226 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3229 static int binder_thread_write(struct binder_proc
*proc
,
3230 struct binder_thread
*thread
,
3231 binder_uintptr_t binder_buffer
, size_t size
,
3232 binder_size_t
*consumed
)
3235 struct binder_context
*context
= proc
->context
;
3236 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3237 void __user
*ptr
= buffer
+ *consumed
;
3238 void __user
*end
= buffer
+ size
;
3240 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3243 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3245 ptr
+= sizeof(uint32_t);
3246 trace_binder_command(cmd
);
3247 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3248 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3249 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3250 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3258 const char *debug_string
;
3259 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3260 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3261 struct binder_ref_data rdata
;
3263 if (get_user(target
, (uint32_t __user
*)ptr
))
3266 ptr
+= sizeof(uint32_t);
3268 if (increment
&& !target
) {
3269 struct binder_node
*ctx_mgr_node
;
3271 mutex_lock(&context
->context_mgr_node_lock
);
3272 ctx_mgr_node
= context
->binder_context_mgr_node
;
3274 if (ctx_mgr_node
->proc
== proc
) {
3275 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3276 proc
->pid
, thread
->pid
);
3277 mutex_unlock(&context
->context_mgr_node_lock
);
3280 ret
= binder_inc_ref_for_node(
3282 strong
, NULL
, &rdata
);
3284 mutex_unlock(&context
->context_mgr_node_lock
);
3287 ret
= binder_update_ref_for_handle(
3288 proc
, target
, increment
, strong
,
3290 if (!ret
&& rdata
.desc
!= target
) {
3291 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3292 proc
->pid
, thread
->pid
,
3293 target
, rdata
.desc
);
3297 debug_string
= "IncRefs";
3300 debug_string
= "Acquire";
3303 debug_string
= "Release";
3307 debug_string
= "DecRefs";
3311 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3312 proc
->pid
, thread
->pid
, debug_string
,
3313 strong
, target
, ret
);
3316 binder_debug(BINDER_DEBUG_USER_REFS
,
3317 "%d:%d %s ref %d desc %d s %d w %d\n",
3318 proc
->pid
, thread
->pid
, debug_string
,
3319 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3323 case BC_INCREFS_DONE
:
3324 case BC_ACQUIRE_DONE
: {
3325 binder_uintptr_t node_ptr
;
3326 binder_uintptr_t cookie
;
3327 struct binder_node
*node
;
3330 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3332 ptr
+= sizeof(binder_uintptr_t
);
3333 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3335 ptr
+= sizeof(binder_uintptr_t
);
3336 node
= binder_get_node(proc
, node_ptr
);
3338 binder_user_error("%d:%d %s u%016llx no match\n",
3339 proc
->pid
, thread
->pid
,
3340 cmd
== BC_INCREFS_DONE
?
3346 if (cookie
!= node
->cookie
) {
3347 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3348 proc
->pid
, thread
->pid
,
3349 cmd
== BC_INCREFS_DONE
?
3350 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3351 (u64
)node_ptr
, node
->debug_id
,
3352 (u64
)cookie
, (u64
)node
->cookie
);
3353 binder_put_node(node
);
3356 binder_node_inner_lock(node
);
3357 if (cmd
== BC_ACQUIRE_DONE
) {
3358 if (node
->pending_strong_ref
== 0) {
3359 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3360 proc
->pid
, thread
->pid
,
3362 binder_node_inner_unlock(node
);
3363 binder_put_node(node
);
3366 node
->pending_strong_ref
= 0;
3368 if (node
->pending_weak_ref
== 0) {
3369 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3370 proc
->pid
, thread
->pid
,
3372 binder_node_inner_unlock(node
);
3373 binder_put_node(node
);
3376 node
->pending_weak_ref
= 0;
3378 free_node
= binder_dec_node_nilocked(node
,
3379 cmd
== BC_ACQUIRE_DONE
, 0);
3381 binder_debug(BINDER_DEBUG_USER_REFS
,
3382 "%d:%d %s node %d ls %d lw %d tr %d\n",
3383 proc
->pid
, thread
->pid
,
3384 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3385 node
->debug_id
, node
->local_strong_refs
,
3386 node
->local_weak_refs
, node
->tmp_refs
);
3387 binder_node_inner_unlock(node
);
3388 binder_put_node(node
);
3391 case BC_ATTEMPT_ACQUIRE
:
3392 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3394 case BC_ACQUIRE_RESULT
:
3395 pr_err("BC_ACQUIRE_RESULT not supported\n");
3398 case BC_FREE_BUFFER
: {
3399 binder_uintptr_t data_ptr
;
3400 struct binder_buffer
*buffer
;
3402 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3404 ptr
+= sizeof(binder_uintptr_t
);
3406 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3408 if (IS_ERR_OR_NULL(buffer
)) {
3409 if (PTR_ERR(buffer
) == -EPERM
) {
3411 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3412 proc
->pid
, thread
->pid
,
3416 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3417 proc
->pid
, thread
->pid
,
3422 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3423 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3424 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3426 buffer
->transaction
? "active" : "finished");
3427 binder_free_buf(proc
, thread
, buffer
);
3431 case BC_TRANSACTION_SG
:
3433 struct binder_transaction_data_sg tr
;
3435 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3438 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3439 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3442 case BC_TRANSACTION
:
3444 struct binder_transaction_data tr
;
3446 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3449 binder_transaction(proc
, thread
, &tr
,
3450 cmd
== BC_REPLY
, 0);
3454 case BC_REGISTER_LOOPER
:
3455 binder_debug(BINDER_DEBUG_THREADS
,
3456 "%d:%d BC_REGISTER_LOOPER\n",
3457 proc
->pid
, thread
->pid
);
3458 binder_inner_proc_lock(proc
);
3459 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3460 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3461 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3462 proc
->pid
, thread
->pid
);
3463 } else if (proc
->requested_threads
== 0) {
3464 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3465 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3466 proc
->pid
, thread
->pid
);
3468 proc
->requested_threads
--;
3469 proc
->requested_threads_started
++;
3471 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3472 binder_inner_proc_unlock(proc
);
3474 case BC_ENTER_LOOPER
:
3475 binder_debug(BINDER_DEBUG_THREADS
,
3476 "%d:%d BC_ENTER_LOOPER\n",
3477 proc
->pid
, thread
->pid
);
3478 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3479 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3480 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3481 proc
->pid
, thread
->pid
);
3483 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3485 case BC_EXIT_LOOPER
:
3486 binder_debug(BINDER_DEBUG_THREADS
,
3487 "%d:%d BC_EXIT_LOOPER\n",
3488 proc
->pid
, thread
->pid
);
3489 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3492 case BC_REQUEST_DEATH_NOTIFICATION
:
3493 case BC_CLEAR_DEATH_NOTIFICATION
: {
3495 binder_uintptr_t cookie
;
3496 struct binder_ref
*ref
;
3497 struct binder_ref_death
*death
= NULL
;
3499 if (get_user(target
, (uint32_t __user
*)ptr
))
3501 ptr
+= sizeof(uint32_t);
3502 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3504 ptr
+= sizeof(binder_uintptr_t
);
3505 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3507 * Allocate memory for death notification
3508 * before taking lock
3510 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3511 if (death
== NULL
) {
3512 WARN_ON(thread
->return_error
.cmd
!=
3514 thread
->return_error
.cmd
= BR_ERROR
;
3515 binder_enqueue_thread_work(
3517 &thread
->return_error
.work
);
3519 BINDER_DEBUG_FAILED_TRANSACTION
,
3520 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3521 proc
->pid
, thread
->pid
);
3525 binder_proc_lock(proc
);
3526 ref
= binder_get_ref_olocked(proc
, target
, false);
3528 binder_user_error("%d:%d %s invalid ref %d\n",
3529 proc
->pid
, thread
->pid
,
3530 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3531 "BC_REQUEST_DEATH_NOTIFICATION" :
3532 "BC_CLEAR_DEATH_NOTIFICATION",
3534 binder_proc_unlock(proc
);
3539 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3540 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3541 proc
->pid
, thread
->pid
,
3542 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3543 "BC_REQUEST_DEATH_NOTIFICATION" :
3544 "BC_CLEAR_DEATH_NOTIFICATION",
3545 (u64
)cookie
, ref
->data
.debug_id
,
3546 ref
->data
.desc
, ref
->data
.strong
,
3547 ref
->data
.weak
, ref
->node
->debug_id
);
3549 binder_node_lock(ref
->node
);
3550 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3552 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3553 proc
->pid
, thread
->pid
);
3554 binder_node_unlock(ref
->node
);
3555 binder_proc_unlock(proc
);
3559 binder_stats_created(BINDER_STAT_DEATH
);
3560 INIT_LIST_HEAD(&death
->work
.entry
);
3561 death
->cookie
= cookie
;
3563 if (ref
->node
->proc
== NULL
) {
3564 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3566 binder_inner_proc_lock(proc
);
3567 binder_enqueue_work_ilocked(
3568 &ref
->death
->work
, &proc
->todo
);
3569 binder_wakeup_proc_ilocked(proc
);
3570 binder_inner_proc_unlock(proc
);
3573 if (ref
->death
== NULL
) {
3574 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3575 proc
->pid
, thread
->pid
);
3576 binder_node_unlock(ref
->node
);
3577 binder_proc_unlock(proc
);
3581 if (death
->cookie
!= cookie
) {
3582 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3583 proc
->pid
, thread
->pid
,
3586 binder_node_unlock(ref
->node
);
3587 binder_proc_unlock(proc
);
3591 binder_inner_proc_lock(proc
);
3592 if (list_empty(&death
->work
.entry
)) {
3593 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3594 if (thread
->looper
&
3595 (BINDER_LOOPER_STATE_REGISTERED
|
3596 BINDER_LOOPER_STATE_ENTERED
))
3597 binder_enqueue_thread_work_ilocked(
3601 binder_enqueue_work_ilocked(
3604 binder_wakeup_proc_ilocked(
3608 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3609 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3611 binder_inner_proc_unlock(proc
);
3613 binder_node_unlock(ref
->node
);
3614 binder_proc_unlock(proc
);
3616 case BC_DEAD_BINDER_DONE
: {
3617 struct binder_work
*w
;
3618 binder_uintptr_t cookie
;
3619 struct binder_ref_death
*death
= NULL
;
3621 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3624 ptr
+= sizeof(cookie
);
3625 binder_inner_proc_lock(proc
);
3626 list_for_each_entry(w
, &proc
->delivered_death
,
3628 struct binder_ref_death
*tmp_death
=
3630 struct binder_ref_death
,
3633 if (tmp_death
->cookie
== cookie
) {
3638 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3639 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3640 proc
->pid
, thread
->pid
, (u64
)cookie
,
3642 if (death
== NULL
) {
3643 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3644 proc
->pid
, thread
->pid
, (u64
)cookie
);
3645 binder_inner_proc_unlock(proc
);
3648 binder_dequeue_work_ilocked(&death
->work
);
3649 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3650 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3651 if (thread
->looper
&
3652 (BINDER_LOOPER_STATE_REGISTERED
|
3653 BINDER_LOOPER_STATE_ENTERED
))
3654 binder_enqueue_thread_work_ilocked(
3655 thread
, &death
->work
);
3657 binder_enqueue_work_ilocked(
3660 binder_wakeup_proc_ilocked(proc
);
3663 binder_inner_proc_unlock(proc
);
3667 pr_err("%d:%d unknown command %d\n",
3668 proc
->pid
, thread
->pid
, cmd
);
3671 *consumed
= ptr
- buffer
;
3676 static void binder_stat_br(struct binder_proc
*proc
,
3677 struct binder_thread
*thread
, uint32_t cmd
)
3679 trace_binder_return(cmd
);
3680 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3681 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3682 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3683 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3687 static int binder_put_node_cmd(struct binder_proc
*proc
,
3688 struct binder_thread
*thread
,
3690 binder_uintptr_t node_ptr
,
3691 binder_uintptr_t node_cookie
,
3693 uint32_t cmd
, const char *cmd_name
)
3695 void __user
*ptr
= *ptrp
;
3697 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3699 ptr
+= sizeof(uint32_t);
3701 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3703 ptr
+= sizeof(binder_uintptr_t
);
3705 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3707 ptr
+= sizeof(binder_uintptr_t
);
3709 binder_stat_br(proc
, thread
, cmd
);
3710 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3711 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3712 (u64
)node_ptr
, (u64
)node_cookie
);
3718 static int binder_wait_for_work(struct binder_thread
*thread
,
3722 struct binder_proc
*proc
= thread
->proc
;
3725 freezer_do_not_count();
3726 binder_inner_proc_lock(proc
);
3728 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
3729 if (binder_has_work_ilocked(thread
, do_proc_work
))
3732 list_add(&thread
->waiting_thread_node
,
3733 &proc
->waiting_threads
);
3734 binder_inner_proc_unlock(proc
);
3736 binder_inner_proc_lock(proc
);
3737 list_del_init(&thread
->waiting_thread_node
);
3738 if (signal_pending(current
)) {
3743 finish_wait(&thread
->wait
, &wait
);
3744 binder_inner_proc_unlock(proc
);
3751 * binder_apply_fd_fixups() - finish fd translation
3752 * @proc: binder_proc associated @t->buffer
3753 * @t: binder transaction with list of fd fixups
3755 * Now that we are in the context of the transaction target
3756 * process, we can allocate and install fds. Process the
3757 * list of fds to translate and fixup the buffer with the
3760 * If we fail to allocate an fd, then free the resources by
3761 * fput'ing files that have not been processed and ksys_close'ing
3762 * any fds that have already been allocated.
3764 static int binder_apply_fd_fixups(struct binder_proc
*proc
,
3765 struct binder_transaction
*t
)
3767 struct binder_txn_fd_fixup
*fixup
, *tmp
;
3770 list_for_each_entry(fixup
, &t
->fd_fixups
, fixup_entry
) {
3771 int fd
= get_unused_fd_flags(O_CLOEXEC
);
3774 binder_debug(BINDER_DEBUG_TRANSACTION
,
3775 "failed fd fixup txn %d fd %d\n",
3780 binder_debug(BINDER_DEBUG_TRANSACTION
,
3781 "fd fixup txn %d fd %d\n",
3783 trace_binder_transaction_fd_recv(t
, fd
, fixup
->offset
);
3784 fd_install(fd
, fixup
->file
);
3786 if (binder_alloc_copy_to_buffer(&proc
->alloc
, t
->buffer
,
3793 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
3800 err
= binder_alloc_copy_from_buffer(&proc
->alloc
, &fd
,
3806 binder_deferred_fd_close(fd
);
3808 list_del(&fixup
->fixup_entry
);
3815 static int binder_thread_read(struct binder_proc
*proc
,
3816 struct binder_thread
*thread
,
3817 binder_uintptr_t binder_buffer
, size_t size
,
3818 binder_size_t
*consumed
, int non_block
)
3820 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3821 void __user
*ptr
= buffer
+ *consumed
;
3822 void __user
*end
= buffer
+ size
;
3825 int wait_for_proc_work
;
3827 if (*consumed
== 0) {
3828 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
3830 ptr
+= sizeof(uint32_t);
3834 binder_inner_proc_lock(proc
);
3835 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
3836 binder_inner_proc_unlock(proc
);
3838 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
3840 trace_binder_wait_for_work(wait_for_proc_work
,
3841 !!thread
->transaction_stack
,
3842 !binder_worklist_empty(proc
, &thread
->todo
));
3843 if (wait_for_proc_work
) {
3844 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3845 BINDER_LOOPER_STATE_ENTERED
))) {
3846 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3847 proc
->pid
, thread
->pid
, thread
->looper
);
3848 wait_event_interruptible(binder_user_error_wait
,
3849 binder_stop_on_user_error
< 2);
3851 binder_set_nice(proc
->default_priority
);
3855 if (!binder_has_work(thread
, wait_for_proc_work
))
3858 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
3861 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
3868 struct binder_transaction_data_secctx tr
;
3869 struct binder_transaction_data
*trd
= &tr
.transaction_data
;
3870 struct binder_work
*w
= NULL
;
3871 struct list_head
*list
= NULL
;
3872 struct binder_transaction
*t
= NULL
;
3873 struct binder_thread
*t_from
;
3874 size_t trsize
= sizeof(*trd
);
3876 binder_inner_proc_lock(proc
);
3877 if (!binder_worklist_empty_ilocked(&thread
->todo
))
3878 list
= &thread
->todo
;
3879 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
3883 binder_inner_proc_unlock(proc
);
3886 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
3891 if (end
- ptr
< sizeof(tr
) + 4) {
3892 binder_inner_proc_unlock(proc
);
3895 w
= binder_dequeue_work_head_ilocked(list
);
3896 if (binder_worklist_empty_ilocked(&thread
->todo
))
3897 thread
->process_todo
= false;
3900 case BINDER_WORK_TRANSACTION
: {
3901 binder_inner_proc_unlock(proc
);
3902 t
= container_of(w
, struct binder_transaction
, work
);
3904 case BINDER_WORK_RETURN_ERROR
: {
3905 struct binder_error
*e
= container_of(
3906 w
, struct binder_error
, work
);
3908 WARN_ON(e
->cmd
== BR_OK
);
3909 binder_inner_proc_unlock(proc
);
3910 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
3914 ptr
+= sizeof(uint32_t);
3916 binder_stat_br(proc
, thread
, cmd
);
3918 case BINDER_WORK_TRANSACTION_COMPLETE
:
3919 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT
: {
3920 if (proc
->oneway_spam_detection_enabled
&&
3921 w
->type
== BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT
)
3922 cmd
= BR_ONEWAY_SPAM_SUSPECT
;
3924 cmd
= BR_TRANSACTION_COMPLETE
;
3925 binder_inner_proc_unlock(proc
);
3927 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3928 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3930 ptr
+= sizeof(uint32_t);
3932 binder_stat_br(proc
, thread
, cmd
);
3933 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
3934 "%d:%d BR_TRANSACTION_COMPLETE\n",
3935 proc
->pid
, thread
->pid
);
3937 case BINDER_WORK_NODE
: {
3938 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
3940 binder_uintptr_t node_ptr
= node
->ptr
;
3941 binder_uintptr_t node_cookie
= node
->cookie
;
3942 int node_debug_id
= node
->debug_id
;
3945 void __user
*orig_ptr
= ptr
;
3947 BUG_ON(proc
!= node
->proc
);
3948 strong
= node
->internal_strong_refs
||
3949 node
->local_strong_refs
;
3950 weak
= !hlist_empty(&node
->refs
) ||
3951 node
->local_weak_refs
||
3952 node
->tmp_refs
|| strong
;
3953 has_strong_ref
= node
->has_strong_ref
;
3954 has_weak_ref
= node
->has_weak_ref
;
3956 if (weak
&& !has_weak_ref
) {
3957 node
->has_weak_ref
= 1;
3958 node
->pending_weak_ref
= 1;
3959 node
->local_weak_refs
++;
3961 if (strong
&& !has_strong_ref
) {
3962 node
->has_strong_ref
= 1;
3963 node
->pending_strong_ref
= 1;
3964 node
->local_strong_refs
++;
3966 if (!strong
&& has_strong_ref
)
3967 node
->has_strong_ref
= 0;
3968 if (!weak
&& has_weak_ref
)
3969 node
->has_weak_ref
= 0;
3970 if (!weak
&& !strong
) {
3971 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
3972 "%d:%d node %d u%016llx c%016llx deleted\n",
3973 proc
->pid
, thread
->pid
,
3977 rb_erase(&node
->rb_node
, &proc
->nodes
);
3978 binder_inner_proc_unlock(proc
);
3979 binder_node_lock(node
);
3981 * Acquire the node lock before freeing the
3982 * node to serialize with other threads that
3983 * may have been holding the node lock while
3984 * decrementing this node (avoids race where
3985 * this thread frees while the other thread
3986 * is unlocking the node after the final
3989 binder_node_unlock(node
);
3990 binder_free_node(node
);
3992 binder_inner_proc_unlock(proc
);
3994 if (weak
&& !has_weak_ref
)
3995 ret
= binder_put_node_cmd(
3996 proc
, thread
, &ptr
, node_ptr
,
3997 node_cookie
, node_debug_id
,
3998 BR_INCREFS
, "BR_INCREFS");
3999 if (!ret
&& strong
&& !has_strong_ref
)
4000 ret
= binder_put_node_cmd(
4001 proc
, thread
, &ptr
, node_ptr
,
4002 node_cookie
, node_debug_id
,
4003 BR_ACQUIRE
, "BR_ACQUIRE");
4004 if (!ret
&& !strong
&& has_strong_ref
)
4005 ret
= binder_put_node_cmd(
4006 proc
, thread
, &ptr
, node_ptr
,
4007 node_cookie
, node_debug_id
,
4008 BR_RELEASE
, "BR_RELEASE");
4009 if (!ret
&& !weak
&& has_weak_ref
)
4010 ret
= binder_put_node_cmd(
4011 proc
, thread
, &ptr
, node_ptr
,
4012 node_cookie
, node_debug_id
,
4013 BR_DECREFS
, "BR_DECREFS");
4014 if (orig_ptr
== ptr
)
4015 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4016 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4017 proc
->pid
, thread
->pid
,
4024 case BINDER_WORK_DEAD_BINDER
:
4025 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4026 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4027 struct binder_ref_death
*death
;
4029 binder_uintptr_t cookie
;
4031 death
= container_of(w
, struct binder_ref_death
, work
);
4032 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4033 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4035 cmd
= BR_DEAD_BINDER
;
4036 cookie
= death
->cookie
;
4038 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4039 "%d:%d %s %016llx\n",
4040 proc
->pid
, thread
->pid
,
4041 cmd
== BR_DEAD_BINDER
?
4043 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4045 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4046 binder_inner_proc_unlock(proc
);
4048 binder_stats_deleted(BINDER_STAT_DEATH
);
4050 binder_enqueue_work_ilocked(
4051 w
, &proc
->delivered_death
);
4052 binder_inner_proc_unlock(proc
);
4054 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4056 ptr
+= sizeof(uint32_t);
4057 if (put_user(cookie
,
4058 (binder_uintptr_t __user
*)ptr
))
4060 ptr
+= sizeof(binder_uintptr_t
);
4061 binder_stat_br(proc
, thread
, cmd
);
4062 if (cmd
== BR_DEAD_BINDER
)
4063 goto done
; /* DEAD_BINDER notifications can cause transactions */
4066 binder_inner_proc_unlock(proc
);
4067 pr_err("%d:%d: bad work type %d\n",
4068 proc
->pid
, thread
->pid
, w
->type
);
4075 BUG_ON(t
->buffer
== NULL
);
4076 if (t
->buffer
->target_node
) {
4077 struct binder_node
*target_node
= t
->buffer
->target_node
;
4079 trd
->target
.ptr
= target_node
->ptr
;
4080 trd
->cookie
= target_node
->cookie
;
4081 t
->saved_priority
= task_nice(current
);
4082 if (t
->priority
< target_node
->min_priority
&&
4083 !(t
->flags
& TF_ONE_WAY
))
4084 binder_set_nice(t
->priority
);
4085 else if (!(t
->flags
& TF_ONE_WAY
) ||
4086 t
->saved_priority
> target_node
->min_priority
)
4087 binder_set_nice(target_node
->min_priority
);
4088 cmd
= BR_TRANSACTION
;
4090 trd
->target
.ptr
= 0;
4094 trd
->code
= t
->code
;
4095 trd
->flags
= t
->flags
;
4096 trd
->sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4098 t_from
= binder_get_txn_from(t
);
4100 struct task_struct
*sender
= t_from
->proc
->tsk
;
4103 task_tgid_nr_ns(sender
,
4104 task_active_pid_ns(current
));
4106 trd
->sender_pid
= 0;
4109 ret
= binder_apply_fd_fixups(proc
, t
);
4111 struct binder_buffer
*buffer
= t
->buffer
;
4112 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
4113 int tid
= t
->debug_id
;
4116 binder_thread_dec_tmpref(t_from
);
4117 buffer
->transaction
= NULL
;
4118 binder_cleanup_transaction(t
, "fd fixups failed",
4120 binder_free_buf(proc
, thread
, buffer
);
4121 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
4122 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4123 proc
->pid
, thread
->pid
,
4125 (cmd
== BR_REPLY
? "reply " : ""),
4126 tid
, BR_FAILED_REPLY
, ret
, __LINE__
);
4127 if (cmd
== BR_REPLY
) {
4128 cmd
= BR_FAILED_REPLY
;
4129 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4131 ptr
+= sizeof(uint32_t);
4132 binder_stat_br(proc
, thread
, cmd
);
4137 trd
->data_size
= t
->buffer
->data_size
;
4138 trd
->offsets_size
= t
->buffer
->offsets_size
;
4139 trd
->data
.ptr
.buffer
= (uintptr_t)t
->buffer
->user_data
;
4140 trd
->data
.ptr
.offsets
= trd
->data
.ptr
.buffer
+
4141 ALIGN(t
->buffer
->data_size
,
4144 tr
.secctx
= t
->security_ctx
;
4145 if (t
->security_ctx
) {
4146 cmd
= BR_TRANSACTION_SEC_CTX
;
4147 trsize
= sizeof(tr
);
4149 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4151 binder_thread_dec_tmpref(t_from
);
4153 binder_cleanup_transaction(t
, "put_user failed",
4158 ptr
+= sizeof(uint32_t);
4159 if (copy_to_user(ptr
, &tr
, trsize
)) {
4161 binder_thread_dec_tmpref(t_from
);
4163 binder_cleanup_transaction(t
, "copy_to_user failed",
4170 trace_binder_transaction_received(t
);
4171 binder_stat_br(proc
, thread
, cmd
);
4172 binder_debug(BINDER_DEBUG_TRANSACTION
,
4173 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4174 proc
->pid
, thread
->pid
,
4175 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4176 (cmd
== BR_TRANSACTION_SEC_CTX
) ?
4177 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4178 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4179 t_from
? t_from
->pid
: 0, cmd
,
4180 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4181 (u64
)trd
->data
.ptr
.buffer
,
4182 (u64
)trd
->data
.ptr
.offsets
);
4185 binder_thread_dec_tmpref(t_from
);
4186 t
->buffer
->allow_user_free
= 1;
4187 if (cmd
!= BR_REPLY
&& !(t
->flags
& TF_ONE_WAY
)) {
4188 binder_inner_proc_lock(thread
->proc
);
4189 t
->to_parent
= thread
->transaction_stack
;
4190 t
->to_thread
= thread
;
4191 thread
->transaction_stack
= t
;
4192 binder_inner_proc_unlock(thread
->proc
);
4194 binder_free_transaction(t
);
4201 *consumed
= ptr
- buffer
;
4202 binder_inner_proc_lock(proc
);
4203 if (proc
->requested_threads
== 0 &&
4204 list_empty(&thread
->proc
->waiting_threads
) &&
4205 proc
->requested_threads_started
< proc
->max_threads
&&
4206 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4207 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4208 /*spawn a new thread if we leave this out */) {
4209 proc
->requested_threads
++;
4210 binder_inner_proc_unlock(proc
);
4211 binder_debug(BINDER_DEBUG_THREADS
,
4212 "%d:%d BR_SPAWN_LOOPER\n",
4213 proc
->pid
, thread
->pid
);
4214 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4216 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4218 binder_inner_proc_unlock(proc
);
4222 static void binder_release_work(struct binder_proc
*proc
,
4223 struct list_head
*list
)
4225 struct binder_work
*w
;
4226 enum binder_work_type wtype
;
4229 binder_inner_proc_lock(proc
);
4230 w
= binder_dequeue_work_head_ilocked(list
);
4231 wtype
= w
? w
->type
: 0;
4232 binder_inner_proc_unlock(proc
);
4237 case BINDER_WORK_TRANSACTION
: {
4238 struct binder_transaction
*t
;
4240 t
= container_of(w
, struct binder_transaction
, work
);
4242 binder_cleanup_transaction(t
, "process died.",
4245 case BINDER_WORK_RETURN_ERROR
: {
4246 struct binder_error
*e
= container_of(
4247 w
, struct binder_error
, work
);
4249 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4250 "undelivered TRANSACTION_ERROR: %u\n",
4253 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4254 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4255 "undelivered TRANSACTION_COMPLETE\n");
4257 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4259 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4260 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4261 struct binder_ref_death
*death
;
4263 death
= container_of(w
, struct binder_ref_death
, work
);
4264 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4265 "undelivered death notification, %016llx\n",
4266 (u64
)death
->cookie
);
4268 binder_stats_deleted(BINDER_STAT_DEATH
);
4270 case BINDER_WORK_NODE
:
4273 pr_err("unexpected work type, %d, not freed\n",
4281 static struct binder_thread
*binder_get_thread_ilocked(
4282 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4284 struct binder_thread
*thread
= NULL
;
4285 struct rb_node
*parent
= NULL
;
4286 struct rb_node
**p
= &proc
->threads
.rb_node
;
4290 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4292 if (current
->pid
< thread
->pid
)
4294 else if (current
->pid
> thread
->pid
)
4295 p
= &(*p
)->rb_right
;
4301 thread
= new_thread
;
4302 binder_stats_created(BINDER_STAT_THREAD
);
4303 thread
->proc
= proc
;
4304 thread
->pid
= current
->pid
;
4305 atomic_set(&thread
->tmp_ref
, 0);
4306 init_waitqueue_head(&thread
->wait
);
4307 INIT_LIST_HEAD(&thread
->todo
);
4308 rb_link_node(&thread
->rb_node
, parent
, p
);
4309 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4310 thread
->looper_need_return
= true;
4311 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4312 thread
->return_error
.cmd
= BR_OK
;
4313 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4314 thread
->reply_error
.cmd
= BR_OK
;
4315 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4319 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4321 struct binder_thread
*thread
;
4322 struct binder_thread
*new_thread
;
4324 binder_inner_proc_lock(proc
);
4325 thread
= binder_get_thread_ilocked(proc
, NULL
);
4326 binder_inner_proc_unlock(proc
);
4328 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4329 if (new_thread
== NULL
)
4331 binder_inner_proc_lock(proc
);
4332 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4333 binder_inner_proc_unlock(proc
);
4334 if (thread
!= new_thread
)
4340 static void binder_free_proc(struct binder_proc
*proc
)
4342 struct binder_device
*device
;
4344 BUG_ON(!list_empty(&proc
->todo
));
4345 BUG_ON(!list_empty(&proc
->delivered_death
));
4346 if (proc
->outstanding_txns
)
4347 pr_warn("%s: Unexpected outstanding_txns %d\n",
4348 __func__
, proc
->outstanding_txns
);
4349 device
= container_of(proc
->context
, struct binder_device
, context
);
4350 if (refcount_dec_and_test(&device
->ref
)) {
4351 kfree(proc
->context
->name
);
4354 binder_alloc_deferred_release(&proc
->alloc
);
4355 put_task_struct(proc
->tsk
);
4356 binder_stats_deleted(BINDER_STAT_PROC
);
4360 static void binder_free_thread(struct binder_thread
*thread
)
4362 BUG_ON(!list_empty(&thread
->todo
));
4363 binder_stats_deleted(BINDER_STAT_THREAD
);
4364 binder_proc_dec_tmpref(thread
->proc
);
4368 static int binder_thread_release(struct binder_proc
*proc
,
4369 struct binder_thread
*thread
)
4371 struct binder_transaction
*t
;
4372 struct binder_transaction
*send_reply
= NULL
;
4373 int active_transactions
= 0;
4374 struct binder_transaction
*last_t
= NULL
;
4376 binder_inner_proc_lock(thread
->proc
);
4378 * take a ref on the proc so it survives
4379 * after we remove this thread from proc->threads.
4380 * The corresponding dec is when we actually
4381 * free the thread in binder_free_thread()
4385 * take a ref on this thread to ensure it
4386 * survives while we are releasing it
4388 atomic_inc(&thread
->tmp_ref
);
4389 rb_erase(&thread
->rb_node
, &proc
->threads
);
4390 t
= thread
->transaction_stack
;
4392 spin_lock(&t
->lock
);
4393 if (t
->to_thread
== thread
)
4396 __acquire(&t
->lock
);
4398 thread
->is_dead
= true;
4402 active_transactions
++;
4403 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4404 "release %d:%d transaction %d %s, still active\n",
4405 proc
->pid
, thread
->pid
,
4407 (t
->to_thread
== thread
) ? "in" : "out");
4409 if (t
->to_thread
== thread
) {
4410 thread
->proc
->outstanding_txns
--;
4412 t
->to_thread
= NULL
;
4414 t
->buffer
->transaction
= NULL
;
4418 } else if (t
->from
== thread
) {
4423 spin_unlock(&last_t
->lock
);
4425 spin_lock(&t
->lock
);
4427 __acquire(&t
->lock
);
4429 /* annotation for sparse, lock not acquired in last iteration above */
4430 __release(&t
->lock
);
4433 * If this thread used poll, make sure we remove the waitqueue
4434 * from any epoll data structures holding it with POLLFREE.
4435 * waitqueue_active() is safe to use here because we're holding
4438 if ((thread
->looper
& BINDER_LOOPER_STATE_POLL
) &&
4439 waitqueue_active(&thread
->wait
)) {
4440 wake_up_poll(&thread
->wait
, EPOLLHUP
| POLLFREE
);
4443 binder_inner_proc_unlock(thread
->proc
);
4446 * This is needed to avoid races between wake_up_poll() above and
4447 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4448 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4449 * lock, so we can be sure it's done after calling synchronize_rcu().
4451 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
4455 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4456 binder_release_work(proc
, &thread
->todo
);
4457 binder_thread_dec_tmpref(thread
);
4458 return active_transactions
;
4461 static __poll_t
binder_poll(struct file
*filp
,
4462 struct poll_table_struct
*wait
)
4464 struct binder_proc
*proc
= filp
->private_data
;
4465 struct binder_thread
*thread
= NULL
;
4466 bool wait_for_proc_work
;
4468 thread
= binder_get_thread(proc
);
4472 binder_inner_proc_lock(thread
->proc
);
4473 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4474 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4476 binder_inner_proc_unlock(thread
->proc
);
4478 poll_wait(filp
, &thread
->wait
, wait
);
4480 if (binder_has_work(thread
, wait_for_proc_work
))
4486 static int binder_ioctl_write_read(struct file
*filp
,
4487 unsigned int cmd
, unsigned long arg
,
4488 struct binder_thread
*thread
)
4491 struct binder_proc
*proc
= filp
->private_data
;
4492 unsigned int size
= _IOC_SIZE(cmd
);
4493 void __user
*ubuf
= (void __user
*)arg
;
4494 struct binder_write_read bwr
;
4496 if (size
!= sizeof(struct binder_write_read
)) {
4500 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4504 binder_debug(BINDER_DEBUG_READ_WRITE
,
4505 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4506 proc
->pid
, thread
->pid
,
4507 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4508 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4510 if (bwr
.write_size
> 0) {
4511 ret
= binder_thread_write(proc
, thread
,
4514 &bwr
.write_consumed
);
4515 trace_binder_write_done(ret
);
4517 bwr
.read_consumed
= 0;
4518 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4523 if (bwr
.read_size
> 0) {
4524 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4527 filp
->f_flags
& O_NONBLOCK
);
4528 trace_binder_read_done(ret
);
4529 binder_inner_proc_lock(proc
);
4530 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4531 binder_wakeup_proc_ilocked(proc
);
4532 binder_inner_proc_unlock(proc
);
4534 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4539 binder_debug(BINDER_DEBUG_READ_WRITE
,
4540 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4541 proc
->pid
, thread
->pid
,
4542 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4543 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4544 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4552 static int binder_ioctl_set_ctx_mgr(struct file
*filp
,
4553 struct flat_binder_object
*fbo
)
4556 struct binder_proc
*proc
= filp
->private_data
;
4557 struct binder_context
*context
= proc
->context
;
4558 struct binder_node
*new_node
;
4559 kuid_t curr_euid
= current_euid();
4561 mutex_lock(&context
->context_mgr_node_lock
);
4562 if (context
->binder_context_mgr_node
) {
4563 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4567 ret
= security_binder_set_context_mgr(proc
->tsk
);
4570 if (uid_valid(context
->binder_context_mgr_uid
)) {
4571 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4572 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4573 from_kuid(&init_user_ns
, curr_euid
),
4574 from_kuid(&init_user_ns
,
4575 context
->binder_context_mgr_uid
));
4580 context
->binder_context_mgr_uid
= curr_euid
;
4582 new_node
= binder_new_node(proc
, fbo
);
4587 binder_node_lock(new_node
);
4588 new_node
->local_weak_refs
++;
4589 new_node
->local_strong_refs
++;
4590 new_node
->has_strong_ref
= 1;
4591 new_node
->has_weak_ref
= 1;
4592 context
->binder_context_mgr_node
= new_node
;
4593 binder_node_unlock(new_node
);
4594 binder_put_node(new_node
);
4596 mutex_unlock(&context
->context_mgr_node_lock
);
4600 static int binder_ioctl_get_node_info_for_ref(struct binder_proc
*proc
,
4601 struct binder_node_info_for_ref
*info
)
4603 struct binder_node
*node
;
4604 struct binder_context
*context
= proc
->context
;
4605 __u32 handle
= info
->handle
;
4607 if (info
->strong_count
|| info
->weak_count
|| info
->reserved1
||
4608 info
->reserved2
|| info
->reserved3
) {
4609 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4614 /* This ioctl may only be used by the context manager */
4615 mutex_lock(&context
->context_mgr_node_lock
);
4616 if (!context
->binder_context_mgr_node
||
4617 context
->binder_context_mgr_node
->proc
!= proc
) {
4618 mutex_unlock(&context
->context_mgr_node_lock
);
4621 mutex_unlock(&context
->context_mgr_node_lock
);
4623 node
= binder_get_node_from_ref(proc
, handle
, true, NULL
);
4627 info
->strong_count
= node
->local_strong_refs
+
4628 node
->internal_strong_refs
;
4629 info
->weak_count
= node
->local_weak_refs
;
4631 binder_put_node(node
);
4636 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4637 struct binder_node_debug_info
*info
)
4640 binder_uintptr_t ptr
= info
->ptr
;
4642 memset(info
, 0, sizeof(*info
));
4644 binder_inner_proc_lock(proc
);
4645 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4646 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4648 if (node
->ptr
> ptr
) {
4649 info
->ptr
= node
->ptr
;
4650 info
->cookie
= node
->cookie
;
4651 info
->has_strong_ref
= node
->has_strong_ref
;
4652 info
->has_weak_ref
= node
->has_weak_ref
;
4656 binder_inner_proc_unlock(proc
);
4661 static bool binder_txns_pending_ilocked(struct binder_proc
*proc
)
4664 struct binder_thread
*thread
;
4666 if (proc
->outstanding_txns
> 0)
4669 for (n
= rb_first(&proc
->threads
); n
; n
= rb_next(n
)) {
4670 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4671 if (thread
->transaction_stack
)
4677 static int binder_ioctl_freeze(struct binder_freeze_info
*info
,
4678 struct binder_proc
*target_proc
)
4682 if (!info
->enable
) {
4683 binder_inner_proc_lock(target_proc
);
4684 target_proc
->sync_recv
= false;
4685 target_proc
->async_recv
= false;
4686 target_proc
->is_frozen
= false;
4687 binder_inner_proc_unlock(target_proc
);
4692 * Freezing the target. Prevent new transactions by
4693 * setting frozen state. If timeout specified, wait
4694 * for transactions to drain.
4696 binder_inner_proc_lock(target_proc
);
4697 target_proc
->sync_recv
= false;
4698 target_proc
->async_recv
= false;
4699 target_proc
->is_frozen
= true;
4700 binder_inner_proc_unlock(target_proc
);
4702 if (info
->timeout_ms
> 0)
4703 ret
= wait_event_interruptible_timeout(
4704 target_proc
->freeze_wait
,
4705 (!target_proc
->outstanding_txns
),
4706 msecs_to_jiffies(info
->timeout_ms
));
4708 /* Check pending transactions that wait for reply */
4710 binder_inner_proc_lock(target_proc
);
4711 if (binder_txns_pending_ilocked(target_proc
))
4713 binder_inner_proc_unlock(target_proc
);
4717 binder_inner_proc_lock(target_proc
);
4718 target_proc
->is_frozen
= false;
4719 binder_inner_proc_unlock(target_proc
);
4725 static int binder_ioctl_get_freezer_info(
4726 struct binder_frozen_status_info
*info
)
4728 struct binder_proc
*target_proc
;
4732 info
->sync_recv
= 0;
4733 info
->async_recv
= 0;
4735 mutex_lock(&binder_procs_lock
);
4736 hlist_for_each_entry(target_proc
, &binder_procs
, proc_node
) {
4737 if (target_proc
->pid
== info
->pid
) {
4739 binder_inner_proc_lock(target_proc
);
4740 txns_pending
= binder_txns_pending_ilocked(target_proc
);
4741 info
->sync_recv
|= target_proc
->sync_recv
|
4742 (txns_pending
<< 1);
4743 info
->async_recv
|= target_proc
->async_recv
;
4744 binder_inner_proc_unlock(target_proc
);
4747 mutex_unlock(&binder_procs_lock
);
4755 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4758 struct binder_proc
*proc
= filp
->private_data
;
4759 struct binder_thread
*thread
;
4760 unsigned int size
= _IOC_SIZE(cmd
);
4761 void __user
*ubuf
= (void __user
*)arg
;
4763 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4764 proc->pid, current->pid, cmd, arg);*/
4766 binder_selftest_alloc(&proc
->alloc
);
4768 trace_binder_ioctl(cmd
, arg
);
4770 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4774 thread
= binder_get_thread(proc
);
4775 if (thread
== NULL
) {
4781 case BINDER_WRITE_READ
:
4782 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4786 case BINDER_SET_MAX_THREADS
: {
4789 if (copy_from_user(&max_threads
, ubuf
,
4790 sizeof(max_threads
))) {
4794 binder_inner_proc_lock(proc
);
4795 proc
->max_threads
= max_threads
;
4796 binder_inner_proc_unlock(proc
);
4799 case BINDER_SET_CONTEXT_MGR_EXT
: {
4800 struct flat_binder_object fbo
;
4802 if (copy_from_user(&fbo
, ubuf
, sizeof(fbo
))) {
4806 ret
= binder_ioctl_set_ctx_mgr(filp
, &fbo
);
4811 case BINDER_SET_CONTEXT_MGR
:
4812 ret
= binder_ioctl_set_ctx_mgr(filp
, NULL
);
4816 case BINDER_THREAD_EXIT
:
4817 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4818 proc
->pid
, thread
->pid
);
4819 binder_thread_release(proc
, thread
);
4822 case BINDER_VERSION
: {
4823 struct binder_version __user
*ver
= ubuf
;
4825 if (size
!= sizeof(struct binder_version
)) {
4829 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4830 &ver
->protocol_version
)) {
4836 case BINDER_GET_NODE_INFO_FOR_REF
: {
4837 struct binder_node_info_for_ref info
;
4839 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4844 ret
= binder_ioctl_get_node_info_for_ref(proc
, &info
);
4848 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4855 case BINDER_GET_NODE_DEBUG_INFO
: {
4856 struct binder_node_debug_info info
;
4858 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4863 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4867 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4873 case BINDER_FREEZE
: {
4874 struct binder_freeze_info info
;
4875 struct binder_proc
**target_procs
= NULL
, *target_proc
;
4876 int target_procs_count
= 0, i
= 0;
4880 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4885 mutex_lock(&binder_procs_lock
);
4886 hlist_for_each_entry(target_proc
, &binder_procs
, proc_node
) {
4887 if (target_proc
->pid
== info
.pid
)
4888 target_procs_count
++;
4891 if (target_procs_count
== 0) {
4892 mutex_unlock(&binder_procs_lock
);
4897 target_procs
= kcalloc(target_procs_count
,
4898 sizeof(struct binder_proc
*),
4901 if (!target_procs
) {
4902 mutex_unlock(&binder_procs_lock
);
4907 hlist_for_each_entry(target_proc
, &binder_procs
, proc_node
) {
4908 if (target_proc
->pid
!= info
.pid
)
4911 binder_inner_proc_lock(target_proc
);
4912 target_proc
->tmp_ref
++;
4913 binder_inner_proc_unlock(target_proc
);
4915 target_procs
[i
++] = target_proc
;
4917 mutex_unlock(&binder_procs_lock
);
4919 for (i
= 0; i
< target_procs_count
; i
++) {
4921 ret
= binder_ioctl_freeze(&info
,
4924 binder_proc_dec_tmpref(target_procs
[i
]);
4927 kfree(target_procs
);
4933 case BINDER_GET_FROZEN_INFO
: {
4934 struct binder_frozen_status_info info
;
4936 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4941 ret
= binder_ioctl_get_freezer_info(&info
);
4945 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4951 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION
: {
4954 if (copy_from_user(&enable
, ubuf
, sizeof(enable
))) {
4958 binder_inner_proc_lock(proc
);
4959 proc
->oneway_spam_detection_enabled
= (bool)enable
;
4960 binder_inner_proc_unlock(proc
);
4970 thread
->looper_need_return
= false;
4971 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4972 if (ret
&& ret
!= -EINTR
)
4973 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4975 trace_binder_ioctl_done(ret
);
4979 static void binder_vma_open(struct vm_area_struct
*vma
)
4981 struct binder_proc
*proc
= vma
->vm_private_data
;
4983 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4984 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4985 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4986 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4987 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4990 static void binder_vma_close(struct vm_area_struct
*vma
)
4992 struct binder_proc
*proc
= vma
->vm_private_data
;
4994 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4995 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4996 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4997 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4998 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4999 binder_alloc_vma_close(&proc
->alloc
);
5002 static vm_fault_t
binder_vm_fault(struct vm_fault
*vmf
)
5004 return VM_FAULT_SIGBUS
;
5007 static const struct vm_operations_struct binder_vm_ops
= {
5008 .open
= binder_vma_open
,
5009 .close
= binder_vma_close
,
5010 .fault
= binder_vm_fault
,
5013 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
5015 struct binder_proc
*proc
= filp
->private_data
;
5017 if (proc
->tsk
!= current
->group_leader
)
5020 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5021 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5022 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5023 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5024 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5026 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
5027 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
5028 proc
->pid
, vma
->vm_start
, vma
->vm_end
, "bad vm_flags", -EPERM
);
5031 vma
->vm_flags
|= VM_DONTCOPY
| VM_MIXEDMAP
;
5032 vma
->vm_flags
&= ~VM_MAYWRITE
;
5034 vma
->vm_ops
= &binder_vm_ops
;
5035 vma
->vm_private_data
= proc
;
5037 return binder_alloc_mmap_handler(&proc
->alloc
, vma
);
5040 static int binder_open(struct inode
*nodp
, struct file
*filp
)
5042 struct binder_proc
*proc
, *itr
;
5043 struct binder_device
*binder_dev
;
5044 struct binderfs_info
*info
;
5045 struct dentry
*binder_binderfs_dir_entry_proc
= NULL
;
5046 bool existing_pid
= false;
5048 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "%s: %d:%d\n", __func__
,
5049 current
->group_leader
->pid
, current
->pid
);
5051 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
5054 spin_lock_init(&proc
->inner_lock
);
5055 spin_lock_init(&proc
->outer_lock
);
5056 get_task_struct(current
->group_leader
);
5057 proc
->tsk
= current
->group_leader
;
5058 INIT_LIST_HEAD(&proc
->todo
);
5059 init_waitqueue_head(&proc
->freeze_wait
);
5060 proc
->default_priority
= task_nice(current
);
5061 /* binderfs stashes devices in i_private */
5062 if (is_binderfs_device(nodp
)) {
5063 binder_dev
= nodp
->i_private
;
5064 info
= nodp
->i_sb
->s_fs_info
;
5065 binder_binderfs_dir_entry_proc
= info
->proc_log_dir
;
5067 binder_dev
= container_of(filp
->private_data
,
5068 struct binder_device
, miscdev
);
5070 refcount_inc(&binder_dev
->ref
);
5071 proc
->context
= &binder_dev
->context
;
5072 binder_alloc_init(&proc
->alloc
);
5074 binder_stats_created(BINDER_STAT_PROC
);
5075 proc
->pid
= current
->group_leader
->pid
;
5076 INIT_LIST_HEAD(&proc
->delivered_death
);
5077 INIT_LIST_HEAD(&proc
->waiting_threads
);
5078 filp
->private_data
= proc
;
5080 mutex_lock(&binder_procs_lock
);
5081 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5082 if (itr
->pid
== proc
->pid
) {
5083 existing_pid
= true;
5087 hlist_add_head(&proc
->proc_node
, &binder_procs
);
5088 mutex_unlock(&binder_procs_lock
);
5090 if (binder_debugfs_dir_entry_proc
&& !existing_pid
) {
5093 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
5095 * proc debug entries are shared between contexts.
5096 * Only create for the first PID to avoid debugfs log spamming
5097 * The printing code will anyway print all contexts for a given
5098 * PID so this is not a problem.
5100 proc
->debugfs_entry
= debugfs_create_file(strbuf
, 0444,
5101 binder_debugfs_dir_entry_proc
,
5102 (void *)(unsigned long)proc
->pid
,
5106 if (binder_binderfs_dir_entry_proc
&& !existing_pid
) {
5108 struct dentry
*binderfs_entry
;
5110 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
5112 * Similar to debugfs, the process specific log file is shared
5113 * between contexts. Only create for the first PID.
5114 * This is ok since same as debugfs, the log file will contain
5115 * information on all contexts of a given PID.
5117 binderfs_entry
= binderfs_create_file(binder_binderfs_dir_entry_proc
,
5118 strbuf
, &proc_fops
, (void *)(unsigned long)proc
->pid
);
5119 if (!IS_ERR(binderfs_entry
)) {
5120 proc
->binderfs_entry
= binderfs_entry
;
5124 error
= PTR_ERR(binderfs_entry
);
5125 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5133 static int binder_flush(struct file
*filp
, fl_owner_t id
)
5135 struct binder_proc
*proc
= filp
->private_data
;
5137 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
5142 static void binder_deferred_flush(struct binder_proc
*proc
)
5147 binder_inner_proc_lock(proc
);
5148 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
5149 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5151 thread
->looper_need_return
= true;
5152 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
5153 wake_up_interruptible(&thread
->wait
);
5157 binder_inner_proc_unlock(proc
);
5159 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5160 "binder_flush: %d woke %d threads\n", proc
->pid
,
5164 static int binder_release(struct inode
*nodp
, struct file
*filp
)
5166 struct binder_proc
*proc
= filp
->private_data
;
5168 debugfs_remove(proc
->debugfs_entry
);
5170 if (proc
->binderfs_entry
) {
5171 binderfs_remove_file(proc
->binderfs_entry
);
5172 proc
->binderfs_entry
= NULL
;
5175 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
5180 static int binder_node_release(struct binder_node
*node
, int refs
)
5182 struct binder_ref
*ref
;
5184 struct binder_proc
*proc
= node
->proc
;
5186 binder_release_work(proc
, &node
->async_todo
);
5188 binder_node_lock(node
);
5189 binder_inner_proc_lock(proc
);
5190 binder_dequeue_work_ilocked(&node
->work
);
5192 * The caller must have taken a temporary ref on the node,
5194 BUG_ON(!node
->tmp_refs
);
5195 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
5196 binder_inner_proc_unlock(proc
);
5197 binder_node_unlock(node
);
5198 binder_free_node(node
);
5204 node
->local_strong_refs
= 0;
5205 node
->local_weak_refs
= 0;
5206 binder_inner_proc_unlock(proc
);
5208 spin_lock(&binder_dead_nodes_lock
);
5209 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
5210 spin_unlock(&binder_dead_nodes_lock
);
5212 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5215 * Need the node lock to synchronize
5216 * with new notification requests and the
5217 * inner lock to synchronize with queued
5218 * death notifications.
5220 binder_inner_proc_lock(ref
->proc
);
5222 binder_inner_proc_unlock(ref
->proc
);
5228 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
5229 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
5230 binder_enqueue_work_ilocked(&ref
->death
->work
,
5232 binder_wakeup_proc_ilocked(ref
->proc
);
5233 binder_inner_proc_unlock(ref
->proc
);
5236 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5237 "node %d now dead, refs %d, death %d\n",
5238 node
->debug_id
, refs
, death
);
5239 binder_node_unlock(node
);
5240 binder_put_node(node
);
5245 static void binder_deferred_release(struct binder_proc
*proc
)
5247 struct binder_context
*context
= proc
->context
;
5249 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
5251 mutex_lock(&binder_procs_lock
);
5252 hlist_del(&proc
->proc_node
);
5253 mutex_unlock(&binder_procs_lock
);
5255 mutex_lock(&context
->context_mgr_node_lock
);
5256 if (context
->binder_context_mgr_node
&&
5257 context
->binder_context_mgr_node
->proc
== proc
) {
5258 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5259 "%s: %d context_mgr_node gone\n",
5260 __func__
, proc
->pid
);
5261 context
->binder_context_mgr_node
= NULL
;
5263 mutex_unlock(&context
->context_mgr_node_lock
);
5264 binder_inner_proc_lock(proc
);
5266 * Make sure proc stays alive after we
5267 * remove all the threads
5271 proc
->is_dead
= true;
5272 proc
->is_frozen
= false;
5273 proc
->sync_recv
= false;
5274 proc
->async_recv
= false;
5276 active_transactions
= 0;
5277 while ((n
= rb_first(&proc
->threads
))) {
5278 struct binder_thread
*thread
;
5280 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5281 binder_inner_proc_unlock(proc
);
5283 active_transactions
+= binder_thread_release(proc
, thread
);
5284 binder_inner_proc_lock(proc
);
5289 while ((n
= rb_first(&proc
->nodes
))) {
5290 struct binder_node
*node
;
5292 node
= rb_entry(n
, struct binder_node
, rb_node
);
5295 * take a temporary ref on the node before
5296 * calling binder_node_release() which will either
5297 * kfree() the node or call binder_put_node()
5299 binder_inc_node_tmpref_ilocked(node
);
5300 rb_erase(&node
->rb_node
, &proc
->nodes
);
5301 binder_inner_proc_unlock(proc
);
5302 incoming_refs
= binder_node_release(node
, incoming_refs
);
5303 binder_inner_proc_lock(proc
);
5305 binder_inner_proc_unlock(proc
);
5308 binder_proc_lock(proc
);
5309 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5310 struct binder_ref
*ref
;
5312 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5314 binder_cleanup_ref_olocked(ref
);
5315 binder_proc_unlock(proc
);
5316 binder_free_ref(ref
);
5317 binder_proc_lock(proc
);
5319 binder_proc_unlock(proc
);
5321 binder_release_work(proc
, &proc
->todo
);
5322 binder_release_work(proc
, &proc
->delivered_death
);
5324 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5325 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5326 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5327 outgoing_refs
, active_transactions
);
5329 binder_proc_dec_tmpref(proc
);
5332 static void binder_deferred_func(struct work_struct
*work
)
5334 struct binder_proc
*proc
;
5339 mutex_lock(&binder_deferred_lock
);
5340 if (!hlist_empty(&binder_deferred_list
)) {
5341 proc
= hlist_entry(binder_deferred_list
.first
,
5342 struct binder_proc
, deferred_work_node
);
5343 hlist_del_init(&proc
->deferred_work_node
);
5344 defer
= proc
->deferred_work
;
5345 proc
->deferred_work
= 0;
5350 mutex_unlock(&binder_deferred_lock
);
5352 if (defer
& BINDER_DEFERRED_FLUSH
)
5353 binder_deferred_flush(proc
);
5355 if (defer
& BINDER_DEFERRED_RELEASE
)
5356 binder_deferred_release(proc
); /* frees proc */
5359 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5362 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5364 mutex_lock(&binder_deferred_lock
);
5365 proc
->deferred_work
|= defer
;
5366 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5367 hlist_add_head(&proc
->deferred_work_node
,
5368 &binder_deferred_list
);
5369 schedule_work(&binder_deferred_work
);
5371 mutex_unlock(&binder_deferred_lock
);
5374 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5375 struct binder_proc
*proc
,
5377 struct binder_transaction
*t
)
5379 struct binder_proc
*to_proc
;
5380 struct binder_buffer
*buffer
= t
->buffer
;
5382 spin_lock(&t
->lock
);
5383 to_proc
= t
->to_proc
;
5385 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5386 prefix
, t
->debug_id
, t
,
5387 t
->from
? t
->from
->proc
->pid
: 0,
5388 t
->from
? t
->from
->pid
: 0,
5389 to_proc
? to_proc
->pid
: 0,
5390 t
->to_thread
? t
->to_thread
->pid
: 0,
5391 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
5392 spin_unlock(&t
->lock
);
5394 if (proc
!= to_proc
) {
5396 * Can only safely deref buffer if we are holding the
5397 * correct proc inner lock for this node
5403 if (buffer
== NULL
) {
5404 seq_puts(m
, " buffer free\n");
5407 if (buffer
->target_node
)
5408 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5409 seq_printf(m
, " size %zd:%zd data %pK\n",
5410 buffer
->data_size
, buffer
->offsets_size
,
5414 static void print_binder_work_ilocked(struct seq_file
*m
,
5415 struct binder_proc
*proc
,
5417 const char *transaction_prefix
,
5418 struct binder_work
*w
)
5420 struct binder_node
*node
;
5421 struct binder_transaction
*t
;
5424 case BINDER_WORK_TRANSACTION
:
5425 t
= container_of(w
, struct binder_transaction
, work
);
5426 print_binder_transaction_ilocked(
5427 m
, proc
, transaction_prefix
, t
);
5429 case BINDER_WORK_RETURN_ERROR
: {
5430 struct binder_error
*e
= container_of(
5431 w
, struct binder_error
, work
);
5433 seq_printf(m
, "%stransaction error: %u\n",
5436 case BINDER_WORK_TRANSACTION_COMPLETE
:
5437 seq_printf(m
, "%stransaction complete\n", prefix
);
5439 case BINDER_WORK_NODE
:
5440 node
= container_of(w
, struct binder_node
, work
);
5441 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5442 prefix
, node
->debug_id
,
5443 (u64
)node
->ptr
, (u64
)node
->cookie
);
5445 case BINDER_WORK_DEAD_BINDER
:
5446 seq_printf(m
, "%shas dead binder\n", prefix
);
5448 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5449 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5451 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5452 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5455 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5460 static void print_binder_thread_ilocked(struct seq_file
*m
,
5461 struct binder_thread
*thread
,
5464 struct binder_transaction
*t
;
5465 struct binder_work
*w
;
5466 size_t start_pos
= m
->count
;
5469 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5470 thread
->pid
, thread
->looper
,
5471 thread
->looper_need_return
,
5472 atomic_read(&thread
->tmp_ref
));
5473 header_pos
= m
->count
;
5474 t
= thread
->transaction_stack
;
5476 if (t
->from
== thread
) {
5477 print_binder_transaction_ilocked(m
, thread
->proc
,
5478 " outgoing transaction", t
);
5480 } else if (t
->to_thread
== thread
) {
5481 print_binder_transaction_ilocked(m
, thread
->proc
,
5482 " incoming transaction", t
);
5485 print_binder_transaction_ilocked(m
, thread
->proc
,
5486 " bad transaction", t
);
5490 list_for_each_entry(w
, &thread
->todo
, entry
) {
5491 print_binder_work_ilocked(m
, thread
->proc
, " ",
5492 " pending transaction", w
);
5494 if (!print_always
&& m
->count
== header_pos
)
5495 m
->count
= start_pos
;
5498 static void print_binder_node_nilocked(struct seq_file
*m
,
5499 struct binder_node
*node
)
5501 struct binder_ref
*ref
;
5502 struct binder_work
*w
;
5506 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5509 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5510 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5511 node
->has_strong_ref
, node
->has_weak_ref
,
5512 node
->local_strong_refs
, node
->local_weak_refs
,
5513 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5515 seq_puts(m
, " proc");
5516 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5517 seq_printf(m
, " %d", ref
->proc
->pid
);
5521 list_for_each_entry(w
, &node
->async_todo
, entry
)
5522 print_binder_work_ilocked(m
, node
->proc
, " ",
5523 " pending async transaction", w
);
5527 static void print_binder_ref_olocked(struct seq_file
*m
,
5528 struct binder_ref
*ref
)
5530 binder_node_lock(ref
->node
);
5531 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5532 ref
->data
.debug_id
, ref
->data
.desc
,
5533 ref
->node
->proc
? "" : "dead ",
5534 ref
->node
->debug_id
, ref
->data
.strong
,
5535 ref
->data
.weak
, ref
->death
);
5536 binder_node_unlock(ref
->node
);
5539 static void print_binder_proc(struct seq_file
*m
,
5540 struct binder_proc
*proc
, int print_all
)
5542 struct binder_work
*w
;
5544 size_t start_pos
= m
->count
;
5546 struct binder_node
*last_node
= NULL
;
5548 seq_printf(m
, "proc %d\n", proc
->pid
);
5549 seq_printf(m
, "context %s\n", proc
->context
->name
);
5550 header_pos
= m
->count
;
5552 binder_inner_proc_lock(proc
);
5553 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5554 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5555 rb_node
), print_all
);
5557 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5558 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5560 if (!print_all
&& !node
->has_async_transaction
)
5564 * take a temporary reference on the node so it
5565 * survives and isn't removed from the tree
5566 * while we print it.
5568 binder_inc_node_tmpref_ilocked(node
);
5569 /* Need to drop inner lock to take node lock */
5570 binder_inner_proc_unlock(proc
);
5572 binder_put_node(last_node
);
5573 binder_node_inner_lock(node
);
5574 print_binder_node_nilocked(m
, node
);
5575 binder_node_inner_unlock(node
);
5577 binder_inner_proc_lock(proc
);
5579 binder_inner_proc_unlock(proc
);
5581 binder_put_node(last_node
);
5584 binder_proc_lock(proc
);
5585 for (n
= rb_first(&proc
->refs_by_desc
);
5588 print_binder_ref_olocked(m
, rb_entry(n
,
5591 binder_proc_unlock(proc
);
5593 binder_alloc_print_allocated(m
, &proc
->alloc
);
5594 binder_inner_proc_lock(proc
);
5595 list_for_each_entry(w
, &proc
->todo
, entry
)
5596 print_binder_work_ilocked(m
, proc
, " ",
5597 " pending transaction", w
);
5598 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5599 seq_puts(m
, " has delivered dead binder\n");
5602 binder_inner_proc_unlock(proc
);
5603 if (!print_all
&& m
->count
== header_pos
)
5604 m
->count
= start_pos
;
5607 static const char * const binder_return_strings
[] = {
5612 "BR_ACQUIRE_RESULT",
5614 "BR_TRANSACTION_COMPLETE",
5619 "BR_ATTEMPT_ACQUIRE",
5624 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5627 "BR_ONEWAY_SPAM_SUSPECT",
5630 static const char * const binder_command_strings
[] = {
5633 "BC_ACQUIRE_RESULT",
5641 "BC_ATTEMPT_ACQUIRE",
5642 "BC_REGISTER_LOOPER",
5645 "BC_REQUEST_DEATH_NOTIFICATION",
5646 "BC_CLEAR_DEATH_NOTIFICATION",
5647 "BC_DEAD_BINDER_DONE",
5648 "BC_TRANSACTION_SG",
5652 static const char * const binder_objstat_strings
[] = {
5659 "transaction_complete"
5662 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5663 struct binder_stats
*stats
)
5667 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5668 ARRAY_SIZE(binder_command_strings
));
5669 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5670 int temp
= atomic_read(&stats
->bc
[i
]);
5673 seq_printf(m
, "%s%s: %d\n", prefix
,
5674 binder_command_strings
[i
], temp
);
5677 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5678 ARRAY_SIZE(binder_return_strings
));
5679 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5680 int temp
= atomic_read(&stats
->br
[i
]);
5683 seq_printf(m
, "%s%s: %d\n", prefix
,
5684 binder_return_strings
[i
], temp
);
5687 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5688 ARRAY_SIZE(binder_objstat_strings
));
5689 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5690 ARRAY_SIZE(stats
->obj_deleted
));
5691 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5692 int created
= atomic_read(&stats
->obj_created
[i
]);
5693 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5695 if (created
|| deleted
)
5696 seq_printf(m
, "%s%s: active %d total %d\n",
5698 binder_objstat_strings
[i
],
5704 static void print_binder_proc_stats(struct seq_file
*m
,
5705 struct binder_proc
*proc
)
5707 struct binder_work
*w
;
5708 struct binder_thread
*thread
;
5710 int count
, strong
, weak
, ready_threads
;
5711 size_t free_async_space
=
5712 binder_alloc_get_free_async_space(&proc
->alloc
);
5714 seq_printf(m
, "proc %d\n", proc
->pid
);
5715 seq_printf(m
, "context %s\n", proc
->context
->name
);
5718 binder_inner_proc_lock(proc
);
5719 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5722 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5725 seq_printf(m
, " threads: %d\n", count
);
5726 seq_printf(m
, " requested threads: %d+%d/%d\n"
5727 " ready threads %d\n"
5728 " free async space %zd\n", proc
->requested_threads
,
5729 proc
->requested_threads_started
, proc
->max_threads
,
5733 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5735 binder_inner_proc_unlock(proc
);
5736 seq_printf(m
, " nodes: %d\n", count
);
5740 binder_proc_lock(proc
);
5741 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5742 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5745 strong
+= ref
->data
.strong
;
5746 weak
+= ref
->data
.weak
;
5748 binder_proc_unlock(proc
);
5749 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5751 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5752 seq_printf(m
, " buffers: %d\n", count
);
5754 binder_alloc_print_pages(m
, &proc
->alloc
);
5757 binder_inner_proc_lock(proc
);
5758 list_for_each_entry(w
, &proc
->todo
, entry
) {
5759 if (w
->type
== BINDER_WORK_TRANSACTION
)
5762 binder_inner_proc_unlock(proc
);
5763 seq_printf(m
, " pending transactions: %d\n", count
);
5765 print_binder_stats(m
, " ", &proc
->stats
);
5769 int binder_state_show(struct seq_file
*m
, void *unused
)
5771 struct binder_proc
*proc
;
5772 struct binder_node
*node
;
5773 struct binder_node
*last_node
= NULL
;
5775 seq_puts(m
, "binder state:\n");
5777 spin_lock(&binder_dead_nodes_lock
);
5778 if (!hlist_empty(&binder_dead_nodes
))
5779 seq_puts(m
, "dead nodes:\n");
5780 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5782 * take a temporary reference on the node so it
5783 * survives and isn't removed from the list
5784 * while we print it.
5787 spin_unlock(&binder_dead_nodes_lock
);
5789 binder_put_node(last_node
);
5790 binder_node_lock(node
);
5791 print_binder_node_nilocked(m
, node
);
5792 binder_node_unlock(node
);
5794 spin_lock(&binder_dead_nodes_lock
);
5796 spin_unlock(&binder_dead_nodes_lock
);
5798 binder_put_node(last_node
);
5800 mutex_lock(&binder_procs_lock
);
5801 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5802 print_binder_proc(m
, proc
, 1);
5803 mutex_unlock(&binder_procs_lock
);
5808 int binder_stats_show(struct seq_file
*m
, void *unused
)
5810 struct binder_proc
*proc
;
5812 seq_puts(m
, "binder stats:\n");
5814 print_binder_stats(m
, "", &binder_stats
);
5816 mutex_lock(&binder_procs_lock
);
5817 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5818 print_binder_proc_stats(m
, proc
);
5819 mutex_unlock(&binder_procs_lock
);
5824 int binder_transactions_show(struct seq_file
*m
, void *unused
)
5826 struct binder_proc
*proc
;
5828 seq_puts(m
, "binder transactions:\n");
5829 mutex_lock(&binder_procs_lock
);
5830 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5831 print_binder_proc(m
, proc
, 0);
5832 mutex_unlock(&binder_procs_lock
);
5837 static int proc_show(struct seq_file
*m
, void *unused
)
5839 struct binder_proc
*itr
;
5840 int pid
= (unsigned long)m
->private;
5842 mutex_lock(&binder_procs_lock
);
5843 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5844 if (itr
->pid
== pid
) {
5845 seq_puts(m
, "binder proc state:\n");
5846 print_binder_proc(m
, itr
, 1);
5849 mutex_unlock(&binder_procs_lock
);
5854 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5855 struct binder_transaction_log_entry
*e
)
5857 int debug_id
= READ_ONCE(e
->debug_id_done
);
5859 * read barrier to guarantee debug_id_done read before
5860 * we print the log values
5864 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5865 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5866 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5867 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5868 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5869 e
->return_error
, e
->return_error_param
,
5870 e
->return_error_line
);
5872 * read-barrier to guarantee read of debug_id_done after
5873 * done printing the fields of the entry
5876 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5877 "\n" : " (incomplete)\n");
5880 int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5882 struct binder_transaction_log
*log
= m
->private;
5883 unsigned int log_cur
= atomic_read(&log
->cur
);
5888 count
= log_cur
+ 1;
5889 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5890 0 : count
% ARRAY_SIZE(log
->entry
);
5891 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5892 count
= ARRAY_SIZE(log
->entry
);
5893 for (i
= 0; i
< count
; i
++) {
5894 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5896 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5901 const struct file_operations binder_fops
= {
5902 .owner
= THIS_MODULE
,
5903 .poll
= binder_poll
,
5904 .unlocked_ioctl
= binder_ioctl
,
5905 .compat_ioctl
= compat_ptr_ioctl
,
5906 .mmap
= binder_mmap
,
5907 .open
= binder_open
,
5908 .flush
= binder_flush
,
5909 .release
= binder_release
,
5912 static int __init
init_binder_device(const char *name
)
5915 struct binder_device
*binder_device
;
5917 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5921 binder_device
->miscdev
.fops
= &binder_fops
;
5922 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5923 binder_device
->miscdev
.name
= name
;
5925 refcount_set(&binder_device
->ref
, 1);
5926 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5927 binder_device
->context
.name
= name
;
5928 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5930 ret
= misc_register(&binder_device
->miscdev
);
5932 kfree(binder_device
);
5936 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5941 static int __init
binder_init(void)
5944 char *device_name
, *device_tmp
;
5945 struct binder_device
*device
;
5946 struct hlist_node
*tmp
;
5947 char *device_names
= NULL
;
5949 ret
= binder_alloc_shrinker_init();
5953 atomic_set(&binder_transaction_log
.cur
, ~0U);
5954 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5956 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5957 if (binder_debugfs_dir_entry_root
)
5958 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5959 binder_debugfs_dir_entry_root
);
5961 if (binder_debugfs_dir_entry_root
) {
5962 debugfs_create_file("state",
5964 binder_debugfs_dir_entry_root
,
5966 &binder_state_fops
);
5967 debugfs_create_file("stats",
5969 binder_debugfs_dir_entry_root
,
5971 &binder_stats_fops
);
5972 debugfs_create_file("transactions",
5974 binder_debugfs_dir_entry_root
,
5976 &binder_transactions_fops
);
5977 debugfs_create_file("transaction_log",
5979 binder_debugfs_dir_entry_root
,
5980 &binder_transaction_log
,
5981 &binder_transaction_log_fops
);
5982 debugfs_create_file("failed_transaction_log",
5984 binder_debugfs_dir_entry_root
,
5985 &binder_transaction_log_failed
,
5986 &binder_transaction_log_fops
);
5989 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS
) &&
5990 strcmp(binder_devices_param
, "") != 0) {
5992 * Copy the module_parameter string, because we don't want to
5993 * tokenize it in-place.
5995 device_names
= kstrdup(binder_devices_param
, GFP_KERNEL
);
5996 if (!device_names
) {
5998 goto err_alloc_device_names_failed
;
6001 device_tmp
= device_names
;
6002 while ((device_name
= strsep(&device_tmp
, ","))) {
6003 ret
= init_binder_device(device_name
);
6005 goto err_init_binder_device_failed
;
6009 ret
= init_binderfs();
6011 goto err_init_binder_device_failed
;
6015 err_init_binder_device_failed
:
6016 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
6017 misc_deregister(&device
->miscdev
);
6018 hlist_del(&device
->hlist
);
6022 kfree(device_names
);
6024 err_alloc_device_names_failed
:
6025 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
6030 device_initcall(binder_init
);
6032 #define CREATE_TRACE_POINTS
6033 #include "binder_trace.h"
6035 MODULE_LICENSE("GPL v2");