]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/android/binder.c
5654187555be8b95971d5c257b95c1abd3d05342
[mirror_ubuntu-bionic-kernel.git] / drivers / android / binder.c
1 /* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 /*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
33 * proc->delivered_death and node->async_todo).
34 * binder_inner_proc_lock() and binder_inner_proc_unlock()
35 * are used to acq/rel
36 *
37 * Any lock under procA must never be nested under any lock at the same
38 * level or below on procB.
39 *
40 * Functions that require a lock held on entry indicate which lock
41 * in the suffix of the function name:
42 *
43 * foo_olocked() : requires node->outer_lock
44 * foo_nlocked() : requires node->lock
45 * foo_ilocked() : requires proc->inner_lock
46 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
47 * foo_nilocked(): requires node->lock and proc->inner_lock
48 * ...
49 */
50
51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
53 #include <asm/cacheflush.h>
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
57 #include <linux/fs.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/mm.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73
74 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75 #define BINDER_IPC_32BIT 1
76 #endif
77
78 #include <uapi/linux/android/binder.h>
79 #include "binder_alloc.h"
80 #include "binder_trace.h"
81
82 static DEFINE_MUTEX(binder_main_lock);
83
84 static HLIST_HEAD(binder_deferred_list);
85 static DEFINE_MUTEX(binder_deferred_lock);
86
87 static HLIST_HEAD(binder_devices);
88 static HLIST_HEAD(binder_procs);
89 static DEFINE_MUTEX(binder_procs_lock);
90
91 static HLIST_HEAD(binder_dead_nodes);
92 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
93
94 static struct dentry *binder_debugfs_dir_entry_root;
95 static struct dentry *binder_debugfs_dir_entry_proc;
96 static atomic_t binder_last_id;
97
98 #define BINDER_DEBUG_ENTRY(name) \
99 static int binder_##name##_open(struct inode *inode, struct file *file) \
100 { \
101 return single_open(file, binder_##name##_show, inode->i_private); \
102 } \
103 \
104 static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110 }
111
112 static int binder_proc_show(struct seq_file *m, void *unused);
113 BINDER_DEBUG_ENTRY(proc);
114
115 /* This is only defined in include/asm-arm/sizes.h */
116 #ifndef SZ_1K
117 #define SZ_1K 0x400
118 #endif
119
120 #ifndef SZ_4M
121 #define SZ_4M 0x400000
122 #endif
123
124 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128 enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
144 };
145 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
149 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150 module_param_named(devices, binder_devices_param, charp, 0444);
151
152 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153 static int binder_stop_on_user_error;
154
155 static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157 {
158 int ret;
159
160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164 }
165 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168 #define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
171 pr_info(x); \
172 } while (0)
173
174 #define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
177 pr_info(x); \
178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
182 #define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
187 #define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
190 #define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
193 enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202 };
203
204 struct binder_stats {
205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
209 };
210
211 static struct binder_stats binder_stats;
212
213 static inline void binder_stats_deleted(enum binder_stat_types type)
214 {
215 atomic_inc(&binder_stats.obj_deleted[type]);
216 }
217
218 static inline void binder_stats_created(enum binder_stat_types type)
219 {
220 atomic_inc(&binder_stats.obj_created[type]);
221 }
222
223 struct binder_transaction_log_entry {
224 int debug_id;
225 int debug_id_done;
226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
238 const char *context_name;
239 };
240 struct binder_transaction_log {
241 atomic_t cur;
242 bool full;
243 struct binder_transaction_log_entry entry[32];
244 };
245 static struct binder_transaction_log binder_transaction_log;
246 static struct binder_transaction_log binder_transaction_log_failed;
247
248 static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250 {
251 struct binder_transaction_log_entry *e;
252 unsigned int cur = atomic_inc_return(&log->cur);
253
254 if (cur >= ARRAY_SIZE(log->entry))
255 log->full = 1;
256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
265 return e;
266 }
267
268 struct binder_context {
269 struct binder_node *binder_context_mgr_node;
270 struct mutex context_mgr_node_lock;
271
272 kuid_t binder_context_mgr_uid;
273 const char *name;
274 };
275
276 struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
280 };
281
282 /**
283 * struct binder_work - work enqueued on a worklist
284 * @entry: node enqueued on list
285 * @type: type of work to be performed
286 *
287 * There are separate work lists for proc, thread, and node (async).
288 */
289 struct binder_work {
290 struct list_head entry;
291
292 enum {
293 BINDER_WORK_TRANSACTION = 1,
294 BINDER_WORK_TRANSACTION_COMPLETE,
295 BINDER_WORK_RETURN_ERROR,
296 BINDER_WORK_NODE,
297 BINDER_WORK_DEAD_BINDER,
298 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
299 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
300 } type;
301 };
302
303 struct binder_error {
304 struct binder_work work;
305 uint32_t cmd;
306 };
307
308 /**
309 * struct binder_node - binder node bookkeeping
310 * @debug_id: unique ID for debugging
311 * (invariant after initialized)
312 * @lock: lock for node fields
313 * @work: worklist element for node work
314 * (protected by @proc->inner_lock)
315 * @rb_node: element for proc->nodes tree
316 * @dead_node: element for binder_dead_nodes list
317 * (protected by binder_dead_nodes_lock)
318 * @proc: binder_proc that owns this node
319 * (invariant after initialized)
320 * @refs: list of references on this node
321 * (protected by @lock)
322 * @internal_strong_refs: used to take strong references when
323 * initiating a transaction
324 * (protected by @proc->inner_lock if @proc
325 * and by @lock)
326 * @local_weak_refs: weak user refs from local process
327 * (protected by @proc->inner_lock if @proc
328 * and by @lock)
329 * @local_strong_refs: strong user refs from local process
330 * (protected by @proc->inner_lock if @proc
331 * and by @lock)
332 * @tmp_refs: temporary kernel refs
333 * (protected by @proc->inner_lock while @proc
334 * is valid, and by binder_dead_nodes_lock
335 * if @proc is NULL. During inc/dec and node release
336 * it is also protected by @lock to provide safety
337 * as the node dies and @proc becomes NULL)
338 * @ptr: userspace pointer for node
339 * (invariant, no lock needed)
340 * @cookie: userspace cookie for node
341 * (invariant, no lock needed)
342 * @has_strong_ref: userspace notified of strong ref
343 * (protected by @proc->inner_lock if @proc
344 * and by @lock)
345 * @pending_strong_ref: userspace has acked notification of strong ref
346 * (protected by @proc->inner_lock if @proc
347 * and by @lock)
348 * @has_weak_ref: userspace notified of weak ref
349 * (protected by @proc->inner_lock if @proc
350 * and by @lock)
351 * @pending_weak_ref: userspace has acked notification of weak ref
352 * (protected by @proc->inner_lock if @proc
353 * and by @lock)
354 * @has_async_transaction: async transaction to node in progress
355 * (protected by @lock)
356 * @accept_fds: file descriptor operations supported for node
357 * (invariant after initialized)
358 * @min_priority: minimum scheduling priority
359 * (invariant after initialized)
360 * @async_todo: list of async work items
361 * (protected by @proc->inner_lock)
362 *
363 * Bookkeeping structure for binder nodes.
364 */
365 struct binder_node {
366 int debug_id;
367 spinlock_t lock;
368 struct binder_work work;
369 union {
370 struct rb_node rb_node;
371 struct hlist_node dead_node;
372 };
373 struct binder_proc *proc;
374 struct hlist_head refs;
375 int internal_strong_refs;
376 int local_weak_refs;
377 int local_strong_refs;
378 int tmp_refs;
379 binder_uintptr_t ptr;
380 binder_uintptr_t cookie;
381 struct {
382 /*
383 * bitfield elements protected by
384 * proc inner_lock
385 */
386 u8 has_strong_ref:1;
387 u8 pending_strong_ref:1;
388 u8 has_weak_ref:1;
389 u8 pending_weak_ref:1;
390 };
391 struct {
392 /*
393 * invariant after initialization
394 */
395 u8 accept_fds:1;
396 u8 min_priority;
397 };
398 bool has_async_transaction;
399 struct list_head async_todo;
400 };
401
402 struct binder_ref_death {
403 /**
404 * @work: worklist element for death notifications
405 * (protected by inner_lock of the proc that
406 * this ref belongs to)
407 */
408 struct binder_work work;
409 binder_uintptr_t cookie;
410 };
411
412 /**
413 * struct binder_ref_data - binder_ref counts and id
414 * @debug_id: unique ID for the ref
415 * @desc: unique userspace handle for ref
416 * @strong: strong ref count (debugging only if not locked)
417 * @weak: weak ref count (debugging only if not locked)
418 *
419 * Structure to hold ref count and ref id information. Since
420 * the actual ref can only be accessed with a lock, this structure
421 * is used to return information about the ref to callers of
422 * ref inc/dec functions.
423 */
424 struct binder_ref_data {
425 int debug_id;
426 uint32_t desc;
427 int strong;
428 int weak;
429 };
430
431 /**
432 * struct binder_ref - struct to track references on nodes
433 * @data: binder_ref_data containing id, handle, and current refcounts
434 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
435 * @rb_node_node: node for lookup by @node in proc's rb_tree
436 * @node_entry: list entry for node->refs list in target node
437 * (protected by @node->lock)
438 * @proc: binder_proc containing ref
439 * @node: binder_node of target node. When cleaning up a
440 * ref for deletion in binder_cleanup_ref, a non-NULL
441 * @node indicates the node must be freed
442 * @death: pointer to death notification (ref_death) if requested
443 *
444 * Structure to track references from procA to target node (on procB). This
445 * structure is unsafe to access without holding @proc->outer_lock.
446 */
447 struct binder_ref {
448 /* Lookups needed: */
449 /* node + proc => ref (transaction) */
450 /* desc + proc => ref (transaction, inc/dec ref) */
451 /* node => refs + procs (proc exit) */
452 struct binder_ref_data data;
453 struct rb_node rb_node_desc;
454 struct rb_node rb_node_node;
455 struct hlist_node node_entry;
456 struct binder_proc *proc;
457 struct binder_node *node;
458 struct binder_ref_death *death;
459 };
460
461 enum binder_deferred_state {
462 BINDER_DEFERRED_PUT_FILES = 0x01,
463 BINDER_DEFERRED_FLUSH = 0x02,
464 BINDER_DEFERRED_RELEASE = 0x04,
465 };
466
467 /**
468 * struct binder_proc - binder process bookkeeping
469 * @proc_node: element for binder_procs list
470 * @threads: rbtree of binder_threads in this proc
471 * @nodes: rbtree of binder nodes associated with
472 * this proc ordered by node->ptr
473 * @refs_by_desc: rbtree of refs ordered by ref->desc
474 * @refs_by_node: rbtree of refs ordered by ref->node
475 * @pid PID of group_leader of process
476 * (invariant after initialized)
477 * @tsk task_struct for group_leader of process
478 * (invariant after initialized)
479 * @files files_struct for process
480 * (invariant after initialized)
481 * @deferred_work_node: element for binder_deferred_list
482 * (protected by binder_deferred_lock)
483 * @deferred_work: bitmap of deferred work to perform
484 * (protected by binder_deferred_lock)
485 * @is_dead: process is dead and awaiting free
486 * when outstanding transactions are cleaned up
487 * @todo: list of work for this process
488 * (protected by @inner_lock)
489 * @wait: wait queue head to wait for proc work
490 * (invariant after initialized)
491 * @stats: per-process binder statistics
492 * (atomics, no lock needed)
493 * @delivered_death: list of delivered death notification
494 * (protected by @inner_lock)
495 * @max_threads: cap on number of binder threads
496 * @requested_threads: number of binder threads requested but not
497 * yet started. In current implementation, can
498 * only be 0 or 1.
499 * @requested_threads_started: number binder threads started
500 * @ready_threads: number of threads waiting for proc work
501 * @tmp_ref: temporary reference to indicate proc is in use
502 * @default_priority: default scheduler priority
503 * (invariant after initialized)
504 * @debugfs_entry: debugfs node
505 * @alloc: binder allocator bookkeeping
506 * @context: binder_context for this proc
507 * (invariant after initialized)
508 * @inner_lock: can nest under outer_lock and/or node lock
509 * @outer_lock: no nesting under innor or node lock
510 * Lock order: 1) outer, 2) node, 3) inner
511 *
512 * Bookkeeping structure for binder processes
513 */
514 struct binder_proc {
515 struct hlist_node proc_node;
516 struct rb_root threads;
517 struct rb_root nodes;
518 struct rb_root refs_by_desc;
519 struct rb_root refs_by_node;
520 int pid;
521 struct task_struct *tsk;
522 struct files_struct *files;
523 struct hlist_node deferred_work_node;
524 int deferred_work;
525 bool is_dead;
526
527 struct list_head todo;
528 wait_queue_head_t wait;
529 struct binder_stats stats;
530 struct list_head delivered_death;
531 int max_threads;
532 int requested_threads;
533 int requested_threads_started;
534 int ready_threads;
535 int tmp_ref;
536 long default_priority;
537 struct dentry *debugfs_entry;
538 struct binder_alloc alloc;
539 struct binder_context *context;
540 spinlock_t inner_lock;
541 spinlock_t outer_lock;
542 };
543
544 enum {
545 BINDER_LOOPER_STATE_REGISTERED = 0x01,
546 BINDER_LOOPER_STATE_ENTERED = 0x02,
547 BINDER_LOOPER_STATE_EXITED = 0x04,
548 BINDER_LOOPER_STATE_INVALID = 0x08,
549 BINDER_LOOPER_STATE_WAITING = 0x10,
550 };
551
552 /**
553 * struct binder_thread - binder thread bookkeeping
554 * @proc: binder process for this thread
555 * (invariant after initialization)
556 * @rb_node: element for proc->threads rbtree
557 * @pid: PID for this thread
558 * (invariant after initialization)
559 * @looper: bitmap of looping state
560 * (only accessed by this thread)
561 * @looper_needs_return: looping thread needs to exit driver
562 * (no lock needed)
563 * @transaction_stack: stack of in-progress transactions for this thread
564 * @todo: list of work to do for this thread
565 * (protected by @proc->inner_lock)
566 * @return_error: transaction errors reported by this thread
567 * (only accessed by this thread)
568 * @reply_error: transaction errors reported by target thread
569 * @wait: wait queue for thread work
570 * @stats: per-thread statistics
571 * (atomics, no lock needed)
572 * @tmp_ref: temporary reference to indicate thread is in use
573 * (atomic since @proc->inner_lock cannot
574 * always be acquired)
575 * @is_dead: thread is dead and awaiting free
576 * when outstanding transactions are cleaned up
577 *
578 * Bookkeeping structure for binder threads.
579 */
580 struct binder_thread {
581 struct binder_proc *proc;
582 struct rb_node rb_node;
583 int pid;
584 int looper; /* only modified by this thread */
585 bool looper_need_return; /* can be written by other thread */
586 struct binder_transaction *transaction_stack;
587 struct list_head todo;
588 struct binder_error return_error;
589 struct binder_error reply_error;
590 wait_queue_head_t wait;
591 struct binder_stats stats;
592 atomic_t tmp_ref;
593 bool is_dead;
594 };
595
596 struct binder_transaction {
597 int debug_id;
598 struct binder_work work;
599 struct binder_thread *from;
600 struct binder_transaction *from_parent;
601 struct binder_proc *to_proc;
602 struct binder_thread *to_thread;
603 struct binder_transaction *to_parent;
604 unsigned need_reply:1;
605 /* unsigned is_dead:1; */ /* not used at the moment */
606
607 struct binder_buffer *buffer;
608 unsigned int code;
609 unsigned int flags;
610 long priority;
611 long saved_priority;
612 kuid_t sender_euid;
613 /**
614 * @lock: protects @from, @to_proc, and @to_thread
615 *
616 * @from, @to_proc, and @to_thread can be set to NULL
617 * during thread teardown
618 */
619 spinlock_t lock;
620 };
621
622 /**
623 * binder_proc_lock() - Acquire outer lock for given binder_proc
624 * @proc: struct binder_proc to acquire
625 *
626 * Acquires proc->outer_lock. Used to protect binder_ref
627 * structures associated with the given proc.
628 */
629 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
630 static void
631 _binder_proc_lock(struct binder_proc *proc, int line)
632 {
633 binder_debug(BINDER_DEBUG_SPINLOCKS,
634 "%s: line=%d\n", __func__, line);
635 spin_lock(&proc->outer_lock);
636 }
637
638 /**
639 * binder_proc_unlock() - Release spinlock for given binder_proc
640 * @proc: struct binder_proc to acquire
641 *
642 * Release lock acquired via binder_proc_lock()
643 */
644 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
645 static void
646 _binder_proc_unlock(struct binder_proc *proc, int line)
647 {
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_unlock(&proc->outer_lock);
651 }
652
653 /**
654 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
655 * @proc: struct binder_proc to acquire
656 *
657 * Acquires proc->inner_lock. Used to protect todo lists
658 */
659 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
660 static void
661 _binder_inner_proc_lock(struct binder_proc *proc, int line)
662 {
663 binder_debug(BINDER_DEBUG_SPINLOCKS,
664 "%s: line=%d\n", __func__, line);
665 spin_lock(&proc->inner_lock);
666 }
667
668 /**
669 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
670 * @proc: struct binder_proc to acquire
671 *
672 * Release lock acquired via binder_inner_proc_lock()
673 */
674 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
675 static void
676 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
677 {
678 binder_debug(BINDER_DEBUG_SPINLOCKS,
679 "%s: line=%d\n", __func__, line);
680 spin_unlock(&proc->inner_lock);
681 }
682
683 /**
684 * binder_node_lock() - Acquire spinlock for given binder_node
685 * @node: struct binder_node to acquire
686 *
687 * Acquires node->lock. Used to protect binder_node fields
688 */
689 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
690 static void
691 _binder_node_lock(struct binder_node *node, int line)
692 {
693 binder_debug(BINDER_DEBUG_SPINLOCKS,
694 "%s: line=%d\n", __func__, line);
695 spin_lock(&node->lock);
696 }
697
698 /**
699 * binder_node_unlock() - Release spinlock for given binder_proc
700 * @node: struct binder_node to acquire
701 *
702 * Release lock acquired via binder_node_lock()
703 */
704 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
705 static void
706 _binder_node_unlock(struct binder_node *node, int line)
707 {
708 binder_debug(BINDER_DEBUG_SPINLOCKS,
709 "%s: line=%d\n", __func__, line);
710 spin_unlock(&node->lock);
711 }
712
713 /**
714 * binder_node_inner_lock() - Acquire node and inner locks
715 * @node: struct binder_node to acquire
716 *
717 * Acquires node->lock. If node->proc also acquires
718 * proc->inner_lock. Used to protect binder_node fields
719 */
720 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
721 static void
722 _binder_node_inner_lock(struct binder_node *node, int line)
723 {
724 binder_debug(BINDER_DEBUG_SPINLOCKS,
725 "%s: line=%d\n", __func__, line);
726 spin_lock(&node->lock);
727 if (node->proc)
728 binder_inner_proc_lock(node->proc);
729 }
730
731 /**
732 * binder_node_unlock() - Release node and inner locks
733 * @node: struct binder_node to acquire
734 *
735 * Release lock acquired via binder_node_lock()
736 */
737 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
738 static void
739 _binder_node_inner_unlock(struct binder_node *node, int line)
740 {
741 struct binder_proc *proc = node->proc;
742
743 binder_debug(BINDER_DEBUG_SPINLOCKS,
744 "%s: line=%d\n", __func__, line);
745 if (proc)
746 binder_inner_proc_unlock(proc);
747 spin_unlock(&node->lock);
748 }
749
750 static bool binder_worklist_empty_ilocked(struct list_head *list)
751 {
752 return list_empty(list);
753 }
754
755 /**
756 * binder_worklist_empty() - Check if no items on the work list
757 * @proc: binder_proc associated with list
758 * @list: list to check
759 *
760 * Return: true if there are no items on list, else false
761 */
762 static bool binder_worklist_empty(struct binder_proc *proc,
763 struct list_head *list)
764 {
765 bool ret;
766
767 binder_inner_proc_lock(proc);
768 ret = binder_worklist_empty_ilocked(list);
769 binder_inner_proc_unlock(proc);
770 return ret;
771 }
772
773 static void
774 binder_enqueue_work_ilocked(struct binder_work *work,
775 struct list_head *target_list)
776 {
777 BUG_ON(target_list == NULL);
778 BUG_ON(work->entry.next && !list_empty(&work->entry));
779 list_add_tail(&work->entry, target_list);
780 }
781
782 /**
783 * binder_enqueue_work() - Add an item to the work list
784 * @proc: binder_proc associated with list
785 * @work: struct binder_work to add to list
786 * @target_list: list to add work to
787 *
788 * Adds the work to the specified list. Asserts that work
789 * is not already on a list.
790 */
791 static void
792 binder_enqueue_work(struct binder_proc *proc,
793 struct binder_work *work,
794 struct list_head *target_list)
795 {
796 binder_inner_proc_lock(proc);
797 binder_enqueue_work_ilocked(work, target_list);
798 binder_inner_proc_unlock(proc);
799 }
800
801 static void
802 binder_dequeue_work_ilocked(struct binder_work *work)
803 {
804 list_del_init(&work->entry);
805 }
806
807 /**
808 * binder_dequeue_work() - Removes an item from the work list
809 * @proc: binder_proc associated with list
810 * @work: struct binder_work to remove from list
811 *
812 * Removes the specified work item from whatever list it is on.
813 * Can safely be called if work is not on any list.
814 */
815 static void
816 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
817 {
818 binder_inner_proc_lock(proc);
819 binder_dequeue_work_ilocked(work);
820 binder_inner_proc_unlock(proc);
821 }
822
823 static struct binder_work *binder_dequeue_work_head_ilocked(
824 struct list_head *list)
825 {
826 struct binder_work *w;
827
828 w = list_first_entry_or_null(list, struct binder_work, entry);
829 if (w)
830 list_del_init(&w->entry);
831 return w;
832 }
833
834 /**
835 * binder_dequeue_work_head() - Dequeues the item at head of list
836 * @proc: binder_proc associated with list
837 * @list: list to dequeue head
838 *
839 * Removes the head of the list if there are items on the list
840 *
841 * Return: pointer dequeued binder_work, NULL if list was empty
842 */
843 static struct binder_work *binder_dequeue_work_head(
844 struct binder_proc *proc,
845 struct list_head *list)
846 {
847 struct binder_work *w;
848
849 binder_inner_proc_lock(proc);
850 w = binder_dequeue_work_head_ilocked(list);
851 binder_inner_proc_unlock(proc);
852 return w;
853 }
854
855 static void
856 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
857 static void binder_free_thread(struct binder_thread *thread);
858 static void binder_free_proc(struct binder_proc *proc);
859 static void binder_inc_node_tmpref(struct binder_node *node);
860
861 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
862 {
863 struct files_struct *files = proc->files;
864 unsigned long rlim_cur;
865 unsigned long irqs;
866
867 if (files == NULL)
868 return -ESRCH;
869
870 if (!lock_task_sighand(proc->tsk, &irqs))
871 return -EMFILE;
872
873 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
874 unlock_task_sighand(proc->tsk, &irqs);
875
876 return __alloc_fd(files, 0, rlim_cur, flags);
877 }
878
879 /*
880 * copied from fd_install
881 */
882 static void task_fd_install(
883 struct binder_proc *proc, unsigned int fd, struct file *file)
884 {
885 if (proc->files)
886 __fd_install(proc->files, fd, file);
887 }
888
889 /*
890 * copied from sys_close
891 */
892 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
893 {
894 int retval;
895
896 if (proc->files == NULL)
897 return -ESRCH;
898
899 retval = __close_fd(proc->files, fd);
900 /* can't restart close syscall because file table entry was cleared */
901 if (unlikely(retval == -ERESTARTSYS ||
902 retval == -ERESTARTNOINTR ||
903 retval == -ERESTARTNOHAND ||
904 retval == -ERESTART_RESTARTBLOCK))
905 retval = -EINTR;
906
907 return retval;
908 }
909
910 static inline void binder_lock(const char *tag)
911 {
912 trace_binder_lock(tag);
913 mutex_lock(&binder_main_lock);
914 trace_binder_locked(tag);
915 }
916
917 static inline void binder_unlock(const char *tag)
918 {
919 trace_binder_unlock(tag);
920 mutex_unlock(&binder_main_lock);
921 }
922
923 static void binder_set_nice(long nice)
924 {
925 long min_nice;
926
927 if (can_nice(current, nice)) {
928 set_user_nice(current, nice);
929 return;
930 }
931 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
932 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
933 "%d: nice value %ld not allowed use %ld instead\n",
934 current->pid, nice, min_nice);
935 set_user_nice(current, min_nice);
936 if (min_nice <= MAX_NICE)
937 return;
938 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
939 }
940
941 static struct binder_node *binder_get_node(struct binder_proc *proc,
942 binder_uintptr_t ptr)
943 {
944 struct rb_node *n = proc->nodes.rb_node;
945 struct binder_node *node;
946
947 while (n) {
948 node = rb_entry(n, struct binder_node, rb_node);
949
950 if (ptr < node->ptr)
951 n = n->rb_left;
952 else if (ptr > node->ptr)
953 n = n->rb_right;
954 else {
955 /*
956 * take an implicit weak reference
957 * to ensure node stays alive until
958 * call to binder_put_node()
959 */
960 binder_inc_node_tmpref(node);
961 return node;
962 }
963 }
964 return NULL;
965 }
966
967 static struct binder_node *binder_new_node(struct binder_proc *proc,
968 struct flat_binder_object *fp)
969 {
970 struct rb_node **p = &proc->nodes.rb_node;
971 struct rb_node *parent = NULL;
972 struct binder_node *node;
973 binder_uintptr_t ptr = fp ? fp->binder : 0;
974 binder_uintptr_t cookie = fp ? fp->cookie : 0;
975 __u32 flags = fp ? fp->flags : 0;
976
977 while (*p) {
978 parent = *p;
979 node = rb_entry(parent, struct binder_node, rb_node);
980
981 if (ptr < node->ptr)
982 p = &(*p)->rb_left;
983 else if (ptr > node->ptr)
984 p = &(*p)->rb_right;
985 else
986 return NULL;
987 }
988
989 node = kzalloc(sizeof(*node), GFP_KERNEL);
990 if (node == NULL)
991 return NULL;
992 binder_stats_created(BINDER_STAT_NODE);
993 node->tmp_refs++;
994 rb_link_node(&node->rb_node, parent, p);
995 rb_insert_color(&node->rb_node, &proc->nodes);
996 node->debug_id = atomic_inc_return(&binder_last_id);
997 node->proc = proc;
998 node->ptr = ptr;
999 node->cookie = cookie;
1000 node->work.type = BINDER_WORK_NODE;
1001 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1002 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1003 spin_lock_init(&node->lock);
1004 INIT_LIST_HEAD(&node->work.entry);
1005 INIT_LIST_HEAD(&node->async_todo);
1006 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1007 "%d:%d node %d u%016llx c%016llx created\n",
1008 proc->pid, current->pid, node->debug_id,
1009 (u64)node->ptr, (u64)node->cookie);
1010 return node;
1011 }
1012
1013 static void binder_free_node(struct binder_node *node)
1014 {
1015 kfree(node);
1016 binder_stats_deleted(BINDER_STAT_NODE);
1017 }
1018
1019 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1020 int internal,
1021 struct list_head *target_list)
1022 {
1023 struct binder_proc *proc = node->proc;
1024
1025 BUG_ON(!spin_is_locked(&node->lock));
1026 if (proc)
1027 BUG_ON(!spin_is_locked(&proc->inner_lock));
1028 if (strong) {
1029 if (internal) {
1030 if (target_list == NULL &&
1031 node->internal_strong_refs == 0 &&
1032 !(node->proc &&
1033 node == node->proc->context->binder_context_mgr_node &&
1034 node->has_strong_ref)) {
1035 pr_err("invalid inc strong node for %d\n",
1036 node->debug_id);
1037 return -EINVAL;
1038 }
1039 node->internal_strong_refs++;
1040 } else
1041 node->local_strong_refs++;
1042 if (!node->has_strong_ref && target_list) {
1043 binder_dequeue_work_ilocked(&node->work);
1044 binder_enqueue_work_ilocked(&node->work, target_list);
1045 }
1046 } else {
1047 if (!internal)
1048 node->local_weak_refs++;
1049 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1050 if (target_list == NULL) {
1051 pr_err("invalid inc weak node for %d\n",
1052 node->debug_id);
1053 return -EINVAL;
1054 }
1055 binder_enqueue_work_ilocked(&node->work, target_list);
1056 }
1057 }
1058 return 0;
1059 }
1060
1061 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1062 struct list_head *target_list)
1063 {
1064 int ret;
1065
1066 binder_node_inner_lock(node);
1067 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1068 binder_node_inner_unlock(node);
1069
1070 return ret;
1071 }
1072
1073 static bool binder_dec_node_nilocked(struct binder_node *node,
1074 int strong, int internal)
1075 {
1076 struct binder_proc *proc = node->proc;
1077
1078 BUG_ON(!spin_is_locked(&node->lock));
1079 if (proc)
1080 BUG_ON(!spin_is_locked(&proc->inner_lock));
1081 if (strong) {
1082 if (internal)
1083 node->internal_strong_refs--;
1084 else
1085 node->local_strong_refs--;
1086 if (node->local_strong_refs || node->internal_strong_refs)
1087 return false;
1088 } else {
1089 if (!internal)
1090 node->local_weak_refs--;
1091 if (node->local_weak_refs || node->tmp_refs ||
1092 !hlist_empty(&node->refs))
1093 return false;
1094 }
1095
1096 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1097 if (list_empty(&node->work.entry)) {
1098 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1099 wake_up_interruptible(&node->proc->wait);
1100 }
1101 } else {
1102 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1103 !node->local_weak_refs && !node->tmp_refs) {
1104 if (proc) {
1105 binder_dequeue_work_ilocked(&node->work);
1106 rb_erase(&node->rb_node, &proc->nodes);
1107 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1108 "refless node %d deleted\n",
1109 node->debug_id);
1110 } else {
1111 BUG_ON(!list_empty(&node->work.entry));
1112 spin_lock(&binder_dead_nodes_lock);
1113 /*
1114 * tmp_refs could have changed so
1115 * check it again
1116 */
1117 if (node->tmp_refs) {
1118 spin_unlock(&binder_dead_nodes_lock);
1119 return false;
1120 }
1121 hlist_del(&node->dead_node);
1122 spin_unlock(&binder_dead_nodes_lock);
1123 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1124 "dead node %d deleted\n",
1125 node->debug_id);
1126 }
1127 return true;
1128 }
1129 }
1130 return false;
1131 }
1132
1133 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1134 {
1135 bool free_node;
1136
1137 binder_node_inner_lock(node);
1138 free_node = binder_dec_node_nilocked(node, strong, internal);
1139 binder_node_inner_unlock(node);
1140 if (free_node)
1141 binder_free_node(node);
1142 }
1143
1144 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1145 {
1146 /*
1147 * No call to binder_inc_node() is needed since we
1148 * don't need to inform userspace of any changes to
1149 * tmp_refs
1150 */
1151 node->tmp_refs++;
1152 }
1153
1154 /**
1155 * binder_inc_node_tmpref() - take a temporary reference on node
1156 * @node: node to reference
1157 *
1158 * Take reference on node to prevent the node from being freed
1159 * while referenced only by a local variable. The inner lock is
1160 * needed to serialize with the node work on the queue (which
1161 * isn't needed after the node is dead). If the node is dead
1162 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1163 * node->tmp_refs against dead-node-only cases where the node
1164 * lock cannot be acquired (eg traversing the dead node list to
1165 * print nodes)
1166 */
1167 static void binder_inc_node_tmpref(struct binder_node *node)
1168 {
1169 binder_node_lock(node);
1170 if (node->proc)
1171 binder_inner_proc_lock(node->proc);
1172 else
1173 spin_lock(&binder_dead_nodes_lock);
1174 binder_inc_node_tmpref_ilocked(node);
1175 if (node->proc)
1176 binder_inner_proc_unlock(node->proc);
1177 else
1178 spin_unlock(&binder_dead_nodes_lock);
1179 binder_node_unlock(node);
1180 }
1181
1182 /**
1183 * binder_dec_node_tmpref() - remove a temporary reference on node
1184 * @node: node to reference
1185 *
1186 * Release temporary reference on node taken via binder_inc_node_tmpref()
1187 */
1188 static void binder_dec_node_tmpref(struct binder_node *node)
1189 {
1190 bool free_node;
1191
1192 binder_node_inner_lock(node);
1193 if (!node->proc)
1194 spin_lock(&binder_dead_nodes_lock);
1195 node->tmp_refs--;
1196 BUG_ON(node->tmp_refs < 0);
1197 if (!node->proc)
1198 spin_unlock(&binder_dead_nodes_lock);
1199 /*
1200 * Call binder_dec_node() to check if all refcounts are 0
1201 * and cleanup is needed. Calling with strong=0 and internal=1
1202 * causes no actual reference to be released in binder_dec_node().
1203 * If that changes, a change is needed here too.
1204 */
1205 free_node = binder_dec_node_nilocked(node, 0, 1);
1206 binder_node_inner_unlock(node);
1207 if (free_node)
1208 binder_free_node(node);
1209 }
1210
1211 static void binder_put_node(struct binder_node *node)
1212 {
1213 binder_dec_node_tmpref(node);
1214 }
1215
1216 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1217 u32 desc, bool need_strong_ref)
1218 {
1219 struct rb_node *n = proc->refs_by_desc.rb_node;
1220 struct binder_ref *ref;
1221
1222 while (n) {
1223 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1224
1225 if (desc < ref->data.desc) {
1226 n = n->rb_left;
1227 } else if (desc > ref->data.desc) {
1228 n = n->rb_right;
1229 } else if (need_strong_ref && !ref->data.strong) {
1230 binder_user_error("tried to use weak ref as strong ref\n");
1231 return NULL;
1232 } else {
1233 return ref;
1234 }
1235 }
1236 return NULL;
1237 }
1238
1239 /**
1240 * binder_get_ref_for_node() - get the ref associated with given node
1241 * @proc: binder_proc that owns the ref
1242 * @node: binder_node of target
1243 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1244 *
1245 * Look up the ref for the given node and return it if it exists
1246 *
1247 * If it doesn't exist and the caller provides a newly allocated
1248 * ref, initialize the fields of the newly allocated ref and insert
1249 * into the given proc rb_trees and node refs list.
1250 *
1251 * Return: the ref for node. It is possible that another thread
1252 * allocated/initialized the ref first in which case the
1253 * returned ref would be different than the passed-in
1254 * new_ref. new_ref must be kfree'd by the caller in
1255 * this case.
1256 */
1257 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1258 struct binder_node *node,
1259 struct binder_ref *new_ref)
1260 {
1261 struct binder_context *context = proc->context;
1262 struct rb_node **p = &proc->refs_by_node.rb_node;
1263 struct rb_node *parent = NULL;
1264 struct binder_ref *ref;
1265 struct rb_node *n;
1266
1267 while (*p) {
1268 parent = *p;
1269 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1270
1271 if (node < ref->node)
1272 p = &(*p)->rb_left;
1273 else if (node > ref->node)
1274 p = &(*p)->rb_right;
1275 else
1276 return ref;
1277 }
1278 if (!new_ref)
1279 return NULL;
1280
1281 binder_stats_created(BINDER_STAT_REF);
1282 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1283 new_ref->proc = proc;
1284 new_ref->node = node;
1285 rb_link_node(&new_ref->rb_node_node, parent, p);
1286 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1287
1288 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1289 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1290 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1291 if (ref->data.desc > new_ref->data.desc)
1292 break;
1293 new_ref->data.desc = ref->data.desc + 1;
1294 }
1295
1296 p = &proc->refs_by_desc.rb_node;
1297 while (*p) {
1298 parent = *p;
1299 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1300
1301 if (new_ref->data.desc < ref->data.desc)
1302 p = &(*p)->rb_left;
1303 else if (new_ref->data.desc > ref->data.desc)
1304 p = &(*p)->rb_right;
1305 else
1306 BUG();
1307 }
1308 rb_link_node(&new_ref->rb_node_desc, parent, p);
1309 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1310
1311 binder_node_lock(node);
1312 hlist_add_head(&new_ref->node_entry, &node->refs);
1313
1314 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1315 "%d new ref %d desc %d for node %d\n",
1316 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1317 node->debug_id);
1318 binder_node_unlock(node);
1319 return new_ref;
1320 }
1321
1322 static void binder_cleanup_ref(struct binder_ref *ref)
1323 {
1324 bool delete_node = false;
1325
1326 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1327 "%d delete ref %d desc %d for node %d\n",
1328 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1329 ref->node->debug_id);
1330
1331 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1332 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1333
1334 binder_node_inner_lock(ref->node);
1335 if (ref->data.strong)
1336 binder_dec_node_nilocked(ref->node, 1, 1);
1337
1338 hlist_del(&ref->node_entry);
1339 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1340 binder_node_inner_unlock(ref->node);
1341 /*
1342 * Clear ref->node unless we want the caller to free the node
1343 */
1344 if (!delete_node) {
1345 /*
1346 * The caller uses ref->node to determine
1347 * whether the node needs to be freed. Clear
1348 * it since the node is still alive.
1349 */
1350 ref->node = NULL;
1351 }
1352
1353 if (ref->death) {
1354 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1355 "%d delete ref %d desc %d has death notification\n",
1356 ref->proc->pid, ref->data.debug_id,
1357 ref->data.desc);
1358 binder_dequeue_work(ref->proc, &ref->death->work);
1359 binder_stats_deleted(BINDER_STAT_DEATH);
1360 }
1361 binder_stats_deleted(BINDER_STAT_REF);
1362 }
1363
1364 /**
1365 * binder_inc_ref() - increment the ref for given handle
1366 * @ref: ref to be incremented
1367 * @strong: if true, strong increment, else weak
1368 * @target_list: list to queue node work on
1369 *
1370 * Increment the ref.
1371 *
1372 * Return: 0, if successful, else errno
1373 */
1374 static int binder_inc_ref(struct binder_ref *ref, int strong,
1375 struct list_head *target_list)
1376 {
1377 int ret;
1378
1379 if (strong) {
1380 if (ref->data.strong == 0) {
1381 ret = binder_inc_node(ref->node, 1, 1, target_list);
1382 if (ret)
1383 return ret;
1384 }
1385 ref->data.strong++;
1386 } else {
1387 if (ref->data.weak == 0) {
1388 ret = binder_inc_node(ref->node, 0, 1, target_list);
1389 if (ret)
1390 return ret;
1391 }
1392 ref->data.weak++;
1393 }
1394 return 0;
1395 }
1396
1397 /**
1398 * binder_dec_ref() - dec the ref for given handle
1399 * @ref: ref to be decremented
1400 * @strong: if true, strong decrement, else weak
1401 *
1402 * Decrement the ref.
1403 *
1404 * TODO: kfree is avoided here since an upcoming patch
1405 * will put this under a lock.
1406 *
1407 * Return: true if ref is cleaned up and ready to be freed
1408 */
1409 static bool binder_dec_ref(struct binder_ref *ref, int strong)
1410 {
1411 if (strong) {
1412 if (ref->data.strong == 0) {
1413 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1414 ref->proc->pid, ref->data.debug_id,
1415 ref->data.desc, ref->data.strong,
1416 ref->data.weak);
1417 return false;
1418 }
1419 ref->data.strong--;
1420 if (ref->data.strong == 0)
1421 binder_dec_node(ref->node, strong, 1);
1422 } else {
1423 if (ref->data.weak == 0) {
1424 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1425 ref->proc->pid, ref->data.debug_id,
1426 ref->data.desc, ref->data.strong,
1427 ref->data.weak);
1428 return false;
1429 }
1430 ref->data.weak--;
1431 }
1432 if (ref->data.strong == 0 && ref->data.weak == 0) {
1433 binder_cleanup_ref(ref);
1434 /*
1435 * TODO: we could kfree(ref) here, but an upcoming
1436 * patch will call this with a lock held, so we
1437 * return an indication that the ref should be
1438 * freed.
1439 */
1440 return true;
1441 }
1442 return false;
1443 }
1444
1445 /**
1446 * binder_get_node_from_ref() - get the node from the given proc/desc
1447 * @proc: proc containing the ref
1448 * @desc: the handle associated with the ref
1449 * @need_strong_ref: if true, only return node if ref is strong
1450 * @rdata: the id/refcount data for the ref
1451 *
1452 * Given a proc and ref handle, return the associated binder_node
1453 *
1454 * Return: a binder_node or NULL if not found or not strong when strong required
1455 */
1456 static struct binder_node *binder_get_node_from_ref(
1457 struct binder_proc *proc,
1458 u32 desc, bool need_strong_ref,
1459 struct binder_ref_data *rdata)
1460 {
1461 struct binder_node *node;
1462 struct binder_ref *ref;
1463
1464 ref = binder_get_ref(proc, desc, need_strong_ref);
1465 if (!ref)
1466 goto err_no_ref;
1467 node = ref->node;
1468 /*
1469 * Take an implicit reference on the node to ensure
1470 * it stays alive until the call to binder_put_node()
1471 */
1472 binder_inc_node_tmpref(node);
1473 if (rdata)
1474 *rdata = ref->data;
1475
1476 return node;
1477
1478 err_no_ref:
1479 return NULL;
1480 }
1481
1482 /**
1483 * binder_free_ref() - free the binder_ref
1484 * @ref: ref to free
1485 *
1486 * Free the binder_ref. Free the binder_node indicated by ref->node
1487 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1488 */
1489 static void binder_free_ref(struct binder_ref *ref)
1490 {
1491 if (ref->node)
1492 binder_free_node(ref->node);
1493 kfree(ref->death);
1494 kfree(ref);
1495 }
1496
1497 /**
1498 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1499 * @proc: proc containing the ref
1500 * @desc: the handle associated with the ref
1501 * @increment: true=inc reference, false=dec reference
1502 * @strong: true=strong reference, false=weak reference
1503 * @rdata: the id/refcount data for the ref
1504 *
1505 * Given a proc and ref handle, increment or decrement the ref
1506 * according to "increment" arg.
1507 *
1508 * Return: 0 if successful, else errno
1509 */
1510 static int binder_update_ref_for_handle(struct binder_proc *proc,
1511 uint32_t desc, bool increment, bool strong,
1512 struct binder_ref_data *rdata)
1513 {
1514 int ret = 0;
1515 struct binder_ref *ref;
1516 bool delete_ref = false;
1517
1518 ref = binder_get_ref(proc, desc, strong);
1519 if (!ref) {
1520 ret = -EINVAL;
1521 goto err_no_ref;
1522 }
1523 if (increment)
1524 ret = binder_inc_ref(ref, strong, NULL);
1525 else
1526 delete_ref = binder_dec_ref(ref, strong);
1527
1528 if (rdata)
1529 *rdata = ref->data;
1530
1531 if (delete_ref)
1532 binder_free_ref(ref);
1533 return ret;
1534
1535 err_no_ref:
1536 return ret;
1537 }
1538
1539 /**
1540 * binder_dec_ref_for_handle() - dec the ref for given handle
1541 * @proc: proc containing the ref
1542 * @desc: the handle associated with the ref
1543 * @strong: true=strong reference, false=weak reference
1544 * @rdata: the id/refcount data for the ref
1545 *
1546 * Just calls binder_update_ref_for_handle() to decrement the ref.
1547 *
1548 * Return: 0 if successful, else errno
1549 */
1550 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1551 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1552 {
1553 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1554 }
1555
1556
1557 /**
1558 * binder_inc_ref_for_node() - increment the ref for given proc/node
1559 * @proc: proc containing the ref
1560 * @node: target node
1561 * @strong: true=strong reference, false=weak reference
1562 * @target_list: worklist to use if node is incremented
1563 * @rdata: the id/refcount data for the ref
1564 *
1565 * Given a proc and node, increment the ref. Create the ref if it
1566 * doesn't already exist
1567 *
1568 * Return: 0 if successful, else errno
1569 */
1570 static int binder_inc_ref_for_node(struct binder_proc *proc,
1571 struct binder_node *node,
1572 bool strong,
1573 struct list_head *target_list,
1574 struct binder_ref_data *rdata)
1575 {
1576 struct binder_ref *ref;
1577 struct binder_ref *new_ref = NULL;
1578 int ret = 0;
1579
1580 ref = binder_get_ref_for_node(proc, node, NULL);
1581 if (!ref) {
1582 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1583 if (!new_ref)
1584 return -ENOMEM;
1585 ref = binder_get_ref_for_node(proc, node, new_ref);
1586 }
1587 ret = binder_inc_ref(ref, strong, target_list);
1588 *rdata = ref->data;
1589 if (new_ref && ref != new_ref)
1590 /*
1591 * Another thread created the ref first so
1592 * free the one we allocated
1593 */
1594 kfree(new_ref);
1595 return ret;
1596 }
1597
1598 static void binder_pop_transaction(struct binder_thread *target_thread,
1599 struct binder_transaction *t)
1600 {
1601 BUG_ON(!target_thread);
1602 BUG_ON(target_thread->transaction_stack != t);
1603 BUG_ON(target_thread->transaction_stack->from != target_thread);
1604 target_thread->transaction_stack =
1605 target_thread->transaction_stack->from_parent;
1606 t->from = NULL;
1607 }
1608
1609 /**
1610 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1611 * @thread: thread to decrement
1612 *
1613 * A thread needs to be kept alive while being used to create or
1614 * handle a transaction. binder_get_txn_from() is used to safely
1615 * extract t->from from a binder_transaction and keep the thread
1616 * indicated by t->from from being freed. When done with that
1617 * binder_thread, this function is called to decrement the
1618 * tmp_ref and free if appropriate (thread has been released
1619 * and no transaction being processed by the driver)
1620 */
1621 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1622 {
1623 /*
1624 * atomic is used to protect the counter value while
1625 * it cannot reach zero or thread->is_dead is false
1626 *
1627 * TODO: future patch adds locking to ensure that the
1628 * check of tmp_ref and is_dead is done with a lock held
1629 */
1630 atomic_dec(&thread->tmp_ref);
1631 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1632 binder_free_thread(thread);
1633 return;
1634 }
1635 }
1636
1637 /**
1638 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1639 * @proc: proc to decrement
1640 *
1641 * A binder_proc needs to be kept alive while being used to create or
1642 * handle a transaction. proc->tmp_ref is incremented when
1643 * creating a new transaction or the binder_proc is currently in-use
1644 * by threads that are being released. When done with the binder_proc,
1645 * this function is called to decrement the counter and free the
1646 * proc if appropriate (proc has been released, all threads have
1647 * been released and not currenly in-use to process a transaction).
1648 */
1649 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1650 {
1651 proc->tmp_ref--;
1652 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1653 !proc->tmp_ref) {
1654 binder_free_proc(proc);
1655 return;
1656 }
1657 }
1658
1659 /**
1660 * binder_get_txn_from() - safely extract the "from" thread in transaction
1661 * @t: binder transaction for t->from
1662 *
1663 * Atomically return the "from" thread and increment the tmp_ref
1664 * count for the thread to ensure it stays alive until
1665 * binder_thread_dec_tmpref() is called.
1666 *
1667 * Return: the value of t->from
1668 */
1669 static struct binder_thread *binder_get_txn_from(
1670 struct binder_transaction *t)
1671 {
1672 struct binder_thread *from;
1673
1674 spin_lock(&t->lock);
1675 from = t->from;
1676 if (from)
1677 atomic_inc(&from->tmp_ref);
1678 spin_unlock(&t->lock);
1679 return from;
1680 }
1681
1682 static void binder_free_transaction(struct binder_transaction *t)
1683 {
1684 if (t->buffer)
1685 t->buffer->transaction = NULL;
1686 kfree(t);
1687 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1688 }
1689
1690 static void binder_send_failed_reply(struct binder_transaction *t,
1691 uint32_t error_code)
1692 {
1693 struct binder_thread *target_thread;
1694 struct binder_transaction *next;
1695
1696 BUG_ON(t->flags & TF_ONE_WAY);
1697 while (1) {
1698 target_thread = binder_get_txn_from(t);
1699 if (target_thread) {
1700 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1701 "send failed reply for transaction %d to %d:%d\n",
1702 t->debug_id,
1703 target_thread->proc->pid,
1704 target_thread->pid);
1705
1706 binder_pop_transaction(target_thread, t);
1707 if (target_thread->reply_error.cmd == BR_OK) {
1708 target_thread->reply_error.cmd = error_code;
1709 binder_enqueue_work(
1710 target_thread->proc,
1711 &target_thread->reply_error.work,
1712 &target_thread->todo);
1713 wake_up_interruptible(&target_thread->wait);
1714 } else {
1715 WARN(1, "Unexpected reply error: %u\n",
1716 target_thread->reply_error.cmd);
1717 }
1718 binder_thread_dec_tmpref(target_thread);
1719 binder_free_transaction(t);
1720 return;
1721 }
1722 next = t->from_parent;
1723
1724 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1725 "send failed reply for transaction %d, target dead\n",
1726 t->debug_id);
1727
1728 binder_free_transaction(t);
1729 if (next == NULL) {
1730 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1731 "reply failed, no target thread at root\n");
1732 return;
1733 }
1734 t = next;
1735 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1736 "reply failed, no target thread -- retry %d\n",
1737 t->debug_id);
1738 }
1739 }
1740
1741 /**
1742 * binder_validate_object() - checks for a valid metadata object in a buffer.
1743 * @buffer: binder_buffer that we're parsing.
1744 * @offset: offset in the buffer at which to validate an object.
1745 *
1746 * Return: If there's a valid metadata object at @offset in @buffer, the
1747 * size of that object. Otherwise, it returns zero.
1748 */
1749 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1750 {
1751 /* Check if we can read a header first */
1752 struct binder_object_header *hdr;
1753 size_t object_size = 0;
1754
1755 if (offset > buffer->data_size - sizeof(*hdr) ||
1756 buffer->data_size < sizeof(*hdr) ||
1757 !IS_ALIGNED(offset, sizeof(u32)))
1758 return 0;
1759
1760 /* Ok, now see if we can read a complete object. */
1761 hdr = (struct binder_object_header *)(buffer->data + offset);
1762 switch (hdr->type) {
1763 case BINDER_TYPE_BINDER:
1764 case BINDER_TYPE_WEAK_BINDER:
1765 case BINDER_TYPE_HANDLE:
1766 case BINDER_TYPE_WEAK_HANDLE:
1767 object_size = sizeof(struct flat_binder_object);
1768 break;
1769 case BINDER_TYPE_FD:
1770 object_size = sizeof(struct binder_fd_object);
1771 break;
1772 case BINDER_TYPE_PTR:
1773 object_size = sizeof(struct binder_buffer_object);
1774 break;
1775 case BINDER_TYPE_FDA:
1776 object_size = sizeof(struct binder_fd_array_object);
1777 break;
1778 default:
1779 return 0;
1780 }
1781 if (offset <= buffer->data_size - object_size &&
1782 buffer->data_size >= object_size)
1783 return object_size;
1784 else
1785 return 0;
1786 }
1787
1788 /**
1789 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1790 * @b: binder_buffer containing the object
1791 * @index: index in offset array at which the binder_buffer_object is
1792 * located
1793 * @start: points to the start of the offset array
1794 * @num_valid: the number of valid offsets in the offset array
1795 *
1796 * Return: If @index is within the valid range of the offset array
1797 * described by @start and @num_valid, and if there's a valid
1798 * binder_buffer_object at the offset found in index @index
1799 * of the offset array, that object is returned. Otherwise,
1800 * %NULL is returned.
1801 * Note that the offset found in index @index itself is not
1802 * verified; this function assumes that @num_valid elements
1803 * from @start were previously verified to have valid offsets.
1804 */
1805 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1806 binder_size_t index,
1807 binder_size_t *start,
1808 binder_size_t num_valid)
1809 {
1810 struct binder_buffer_object *buffer_obj;
1811 binder_size_t *offp;
1812
1813 if (index >= num_valid)
1814 return NULL;
1815
1816 offp = start + index;
1817 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1818 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1819 return NULL;
1820
1821 return buffer_obj;
1822 }
1823
1824 /**
1825 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1826 * @b: transaction buffer
1827 * @objects_start start of objects buffer
1828 * @buffer: binder_buffer_object in which to fix up
1829 * @offset: start offset in @buffer to fix up
1830 * @last_obj: last binder_buffer_object that we fixed up in
1831 * @last_min_offset: minimum fixup offset in @last_obj
1832 *
1833 * Return: %true if a fixup in buffer @buffer at offset @offset is
1834 * allowed.
1835 *
1836 * For safety reasons, we only allow fixups inside a buffer to happen
1837 * at increasing offsets; additionally, we only allow fixup on the last
1838 * buffer object that was verified, or one of its parents.
1839 *
1840 * Example of what is allowed:
1841 *
1842 * A
1843 * B (parent = A, offset = 0)
1844 * C (parent = A, offset = 16)
1845 * D (parent = C, offset = 0)
1846 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1847 *
1848 * Examples of what is not allowed:
1849 *
1850 * Decreasing offsets within the same parent:
1851 * A
1852 * C (parent = A, offset = 16)
1853 * B (parent = A, offset = 0) // decreasing offset within A
1854 *
1855 * Referring to a parent that wasn't the last object or any of its parents:
1856 * A
1857 * B (parent = A, offset = 0)
1858 * C (parent = A, offset = 0)
1859 * C (parent = A, offset = 16)
1860 * D (parent = B, offset = 0) // B is not A or any of A's parents
1861 */
1862 static bool binder_validate_fixup(struct binder_buffer *b,
1863 binder_size_t *objects_start,
1864 struct binder_buffer_object *buffer,
1865 binder_size_t fixup_offset,
1866 struct binder_buffer_object *last_obj,
1867 binder_size_t last_min_offset)
1868 {
1869 if (!last_obj) {
1870 /* Nothing to fix up in */
1871 return false;
1872 }
1873
1874 while (last_obj != buffer) {
1875 /*
1876 * Safe to retrieve the parent of last_obj, since it
1877 * was already previously verified by the driver.
1878 */
1879 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1880 return false;
1881 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1882 last_obj = (struct binder_buffer_object *)
1883 (b->data + *(objects_start + last_obj->parent));
1884 }
1885 return (fixup_offset >= last_min_offset);
1886 }
1887
1888 static void binder_transaction_buffer_release(struct binder_proc *proc,
1889 struct binder_buffer *buffer,
1890 binder_size_t *failed_at)
1891 {
1892 binder_size_t *offp, *off_start, *off_end;
1893 int debug_id = buffer->debug_id;
1894
1895 binder_debug(BINDER_DEBUG_TRANSACTION,
1896 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1897 proc->pid, buffer->debug_id,
1898 buffer->data_size, buffer->offsets_size, failed_at);
1899
1900 if (buffer->target_node)
1901 binder_dec_node(buffer->target_node, 1, 0);
1902
1903 off_start = (binder_size_t *)(buffer->data +
1904 ALIGN(buffer->data_size, sizeof(void *)));
1905 if (failed_at)
1906 off_end = failed_at;
1907 else
1908 off_end = (void *)off_start + buffer->offsets_size;
1909 for (offp = off_start; offp < off_end; offp++) {
1910 struct binder_object_header *hdr;
1911 size_t object_size = binder_validate_object(buffer, *offp);
1912
1913 if (object_size == 0) {
1914 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1915 debug_id, (u64)*offp, buffer->data_size);
1916 continue;
1917 }
1918 hdr = (struct binder_object_header *)(buffer->data + *offp);
1919 switch (hdr->type) {
1920 case BINDER_TYPE_BINDER:
1921 case BINDER_TYPE_WEAK_BINDER: {
1922 struct flat_binder_object *fp;
1923 struct binder_node *node;
1924
1925 fp = to_flat_binder_object(hdr);
1926 node = binder_get_node(proc, fp->binder);
1927 if (node == NULL) {
1928 pr_err("transaction release %d bad node %016llx\n",
1929 debug_id, (u64)fp->binder);
1930 break;
1931 }
1932 binder_debug(BINDER_DEBUG_TRANSACTION,
1933 " node %d u%016llx\n",
1934 node->debug_id, (u64)node->ptr);
1935 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1936 0);
1937 binder_put_node(node);
1938 } break;
1939 case BINDER_TYPE_HANDLE:
1940 case BINDER_TYPE_WEAK_HANDLE: {
1941 struct flat_binder_object *fp;
1942 struct binder_ref_data rdata;
1943 int ret;
1944
1945 fp = to_flat_binder_object(hdr);
1946 ret = binder_dec_ref_for_handle(proc, fp->handle,
1947 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1948
1949 if (ret) {
1950 pr_err("transaction release %d bad handle %d, ret = %d\n",
1951 debug_id, fp->handle, ret);
1952 break;
1953 }
1954 binder_debug(BINDER_DEBUG_TRANSACTION,
1955 " ref %d desc %d\n",
1956 rdata.debug_id, rdata.desc);
1957 } break;
1958
1959 case BINDER_TYPE_FD: {
1960 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1961
1962 binder_debug(BINDER_DEBUG_TRANSACTION,
1963 " fd %d\n", fp->fd);
1964 if (failed_at)
1965 task_close_fd(proc, fp->fd);
1966 } break;
1967 case BINDER_TYPE_PTR:
1968 /*
1969 * Nothing to do here, this will get cleaned up when the
1970 * transaction buffer gets freed
1971 */
1972 break;
1973 case BINDER_TYPE_FDA: {
1974 struct binder_fd_array_object *fda;
1975 struct binder_buffer_object *parent;
1976 uintptr_t parent_buffer;
1977 u32 *fd_array;
1978 size_t fd_index;
1979 binder_size_t fd_buf_size;
1980
1981 fda = to_binder_fd_array_object(hdr);
1982 parent = binder_validate_ptr(buffer, fda->parent,
1983 off_start,
1984 offp - off_start);
1985 if (!parent) {
1986 pr_err("transaction release %d bad parent offset",
1987 debug_id);
1988 continue;
1989 }
1990 /*
1991 * Since the parent was already fixed up, convert it
1992 * back to kernel address space to access it
1993 */
1994 parent_buffer = parent->buffer -
1995 binder_alloc_get_user_buffer_offset(
1996 &proc->alloc);
1997
1998 fd_buf_size = sizeof(u32) * fda->num_fds;
1999 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2000 pr_err("transaction release %d invalid number of fds (%lld)\n",
2001 debug_id, (u64)fda->num_fds);
2002 continue;
2003 }
2004 if (fd_buf_size > parent->length ||
2005 fda->parent_offset > parent->length - fd_buf_size) {
2006 /* No space for all file descriptors here. */
2007 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2008 debug_id, (u64)fda->num_fds);
2009 continue;
2010 }
2011 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2012 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2013 task_close_fd(proc, fd_array[fd_index]);
2014 } break;
2015 default:
2016 pr_err("transaction release %d bad object type %x\n",
2017 debug_id, hdr->type);
2018 break;
2019 }
2020 }
2021 }
2022
2023 static int binder_translate_binder(struct flat_binder_object *fp,
2024 struct binder_transaction *t,
2025 struct binder_thread *thread)
2026 {
2027 struct binder_node *node;
2028 struct binder_proc *proc = thread->proc;
2029 struct binder_proc *target_proc = t->to_proc;
2030 struct binder_ref_data rdata;
2031 int ret = 0;
2032
2033 node = binder_get_node(proc, fp->binder);
2034 if (!node) {
2035 node = binder_new_node(proc, fp);
2036 if (!node)
2037 return -ENOMEM;
2038 }
2039 if (fp->cookie != node->cookie) {
2040 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2041 proc->pid, thread->pid, (u64)fp->binder,
2042 node->debug_id, (u64)fp->cookie,
2043 (u64)node->cookie);
2044 ret = -EINVAL;
2045 goto done;
2046 }
2047 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2048 ret = -EPERM;
2049 goto done;
2050 }
2051
2052 ret = binder_inc_ref_for_node(target_proc, node,
2053 fp->hdr.type == BINDER_TYPE_BINDER,
2054 &thread->todo, &rdata);
2055 if (ret)
2056 goto done;
2057
2058 if (fp->hdr.type == BINDER_TYPE_BINDER)
2059 fp->hdr.type = BINDER_TYPE_HANDLE;
2060 else
2061 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2062 fp->binder = 0;
2063 fp->handle = rdata.desc;
2064 fp->cookie = 0;
2065
2066 trace_binder_transaction_node_to_ref(t, node, &rdata);
2067 binder_debug(BINDER_DEBUG_TRANSACTION,
2068 " node %d u%016llx -> ref %d desc %d\n",
2069 node->debug_id, (u64)node->ptr,
2070 rdata.debug_id, rdata.desc);
2071 done:
2072 binder_put_node(node);
2073 return ret;
2074 }
2075
2076 static int binder_translate_handle(struct flat_binder_object *fp,
2077 struct binder_transaction *t,
2078 struct binder_thread *thread)
2079 {
2080 struct binder_proc *proc = thread->proc;
2081 struct binder_proc *target_proc = t->to_proc;
2082 struct binder_node *node;
2083 struct binder_ref_data src_rdata;
2084 int ret = 0;
2085
2086 node = binder_get_node_from_ref(proc, fp->handle,
2087 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2088 if (!node) {
2089 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2090 proc->pid, thread->pid, fp->handle);
2091 return -EINVAL;
2092 }
2093 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2094 ret = -EPERM;
2095 goto done;
2096 }
2097
2098 binder_node_lock(node);
2099 if (node->proc == target_proc) {
2100 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2101 fp->hdr.type = BINDER_TYPE_BINDER;
2102 else
2103 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2104 fp->binder = node->ptr;
2105 fp->cookie = node->cookie;
2106 if (node->proc)
2107 binder_inner_proc_lock(node->proc);
2108 binder_inc_node_nilocked(node,
2109 fp->hdr.type == BINDER_TYPE_BINDER,
2110 0, NULL);
2111 if (node->proc)
2112 binder_inner_proc_unlock(node->proc);
2113 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2114 binder_debug(BINDER_DEBUG_TRANSACTION,
2115 " ref %d desc %d -> node %d u%016llx\n",
2116 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2117 (u64)node->ptr);
2118 binder_node_unlock(node);
2119 } else {
2120 int ret;
2121 struct binder_ref_data dest_rdata;
2122
2123 binder_node_unlock(node);
2124 ret = binder_inc_ref_for_node(target_proc, node,
2125 fp->hdr.type == BINDER_TYPE_HANDLE,
2126 NULL, &dest_rdata);
2127 if (ret)
2128 goto done;
2129
2130 fp->binder = 0;
2131 fp->handle = dest_rdata.desc;
2132 fp->cookie = 0;
2133 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2134 &dest_rdata);
2135 binder_debug(BINDER_DEBUG_TRANSACTION,
2136 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2137 src_rdata.debug_id, src_rdata.desc,
2138 dest_rdata.debug_id, dest_rdata.desc,
2139 node->debug_id);
2140 }
2141 done:
2142 binder_put_node(node);
2143 return ret;
2144 }
2145
2146 static int binder_translate_fd(int fd,
2147 struct binder_transaction *t,
2148 struct binder_thread *thread,
2149 struct binder_transaction *in_reply_to)
2150 {
2151 struct binder_proc *proc = thread->proc;
2152 struct binder_proc *target_proc = t->to_proc;
2153 int target_fd;
2154 struct file *file;
2155 int ret;
2156 bool target_allows_fd;
2157
2158 if (in_reply_to)
2159 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2160 else
2161 target_allows_fd = t->buffer->target_node->accept_fds;
2162 if (!target_allows_fd) {
2163 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2164 proc->pid, thread->pid,
2165 in_reply_to ? "reply" : "transaction",
2166 fd);
2167 ret = -EPERM;
2168 goto err_fd_not_accepted;
2169 }
2170
2171 file = fget(fd);
2172 if (!file) {
2173 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2174 proc->pid, thread->pid, fd);
2175 ret = -EBADF;
2176 goto err_fget;
2177 }
2178 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2179 if (ret < 0) {
2180 ret = -EPERM;
2181 goto err_security;
2182 }
2183
2184 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2185 if (target_fd < 0) {
2186 ret = -ENOMEM;
2187 goto err_get_unused_fd;
2188 }
2189 task_fd_install(target_proc, target_fd, file);
2190 trace_binder_transaction_fd(t, fd, target_fd);
2191 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2192 fd, target_fd);
2193
2194 return target_fd;
2195
2196 err_get_unused_fd:
2197 err_security:
2198 fput(file);
2199 err_fget:
2200 err_fd_not_accepted:
2201 return ret;
2202 }
2203
2204 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2205 struct binder_buffer_object *parent,
2206 struct binder_transaction *t,
2207 struct binder_thread *thread,
2208 struct binder_transaction *in_reply_to)
2209 {
2210 binder_size_t fdi, fd_buf_size, num_installed_fds;
2211 int target_fd;
2212 uintptr_t parent_buffer;
2213 u32 *fd_array;
2214 struct binder_proc *proc = thread->proc;
2215 struct binder_proc *target_proc = t->to_proc;
2216
2217 fd_buf_size = sizeof(u32) * fda->num_fds;
2218 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2219 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2220 proc->pid, thread->pid, (u64)fda->num_fds);
2221 return -EINVAL;
2222 }
2223 if (fd_buf_size > parent->length ||
2224 fda->parent_offset > parent->length - fd_buf_size) {
2225 /* No space for all file descriptors here. */
2226 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2227 proc->pid, thread->pid, (u64)fda->num_fds);
2228 return -EINVAL;
2229 }
2230 /*
2231 * Since the parent was already fixed up, convert it
2232 * back to the kernel address space to access it
2233 */
2234 parent_buffer = parent->buffer -
2235 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2236 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2237 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2238 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2239 proc->pid, thread->pid);
2240 return -EINVAL;
2241 }
2242 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2243 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2244 in_reply_to);
2245 if (target_fd < 0)
2246 goto err_translate_fd_failed;
2247 fd_array[fdi] = target_fd;
2248 }
2249 return 0;
2250
2251 err_translate_fd_failed:
2252 /*
2253 * Failed to allocate fd or security error, free fds
2254 * installed so far.
2255 */
2256 num_installed_fds = fdi;
2257 for (fdi = 0; fdi < num_installed_fds; fdi++)
2258 task_close_fd(target_proc, fd_array[fdi]);
2259 return target_fd;
2260 }
2261
2262 static int binder_fixup_parent(struct binder_transaction *t,
2263 struct binder_thread *thread,
2264 struct binder_buffer_object *bp,
2265 binder_size_t *off_start,
2266 binder_size_t num_valid,
2267 struct binder_buffer_object *last_fixup_obj,
2268 binder_size_t last_fixup_min_off)
2269 {
2270 struct binder_buffer_object *parent;
2271 u8 *parent_buffer;
2272 struct binder_buffer *b = t->buffer;
2273 struct binder_proc *proc = thread->proc;
2274 struct binder_proc *target_proc = t->to_proc;
2275
2276 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2277 return 0;
2278
2279 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2280 if (!parent) {
2281 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2282 proc->pid, thread->pid);
2283 return -EINVAL;
2284 }
2285
2286 if (!binder_validate_fixup(b, off_start,
2287 parent, bp->parent_offset,
2288 last_fixup_obj,
2289 last_fixup_min_off)) {
2290 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2291 proc->pid, thread->pid);
2292 return -EINVAL;
2293 }
2294
2295 if (parent->length < sizeof(binder_uintptr_t) ||
2296 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2297 /* No space for a pointer here! */
2298 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2299 proc->pid, thread->pid);
2300 return -EINVAL;
2301 }
2302 parent_buffer = (u8 *)(parent->buffer -
2303 binder_alloc_get_user_buffer_offset(
2304 &target_proc->alloc));
2305 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2306
2307 return 0;
2308 }
2309
2310 static void binder_transaction(struct binder_proc *proc,
2311 struct binder_thread *thread,
2312 struct binder_transaction_data *tr, int reply,
2313 binder_size_t extra_buffers_size)
2314 {
2315 int ret;
2316 struct binder_transaction *t;
2317 struct binder_work *tcomplete;
2318 binder_size_t *offp, *off_end, *off_start;
2319 binder_size_t off_min;
2320 u8 *sg_bufp, *sg_buf_end;
2321 struct binder_proc *target_proc = NULL;
2322 struct binder_thread *target_thread = NULL;
2323 struct binder_node *target_node = NULL;
2324 struct list_head *target_list;
2325 wait_queue_head_t *target_wait;
2326 struct binder_transaction *in_reply_to = NULL;
2327 struct binder_transaction_log_entry *e;
2328 uint32_t return_error = 0;
2329 uint32_t return_error_param = 0;
2330 uint32_t return_error_line = 0;
2331 struct binder_buffer_object *last_fixup_obj = NULL;
2332 binder_size_t last_fixup_min_off = 0;
2333 struct binder_context *context = proc->context;
2334 int t_debug_id = atomic_inc_return(&binder_last_id);
2335
2336 e = binder_transaction_log_add(&binder_transaction_log);
2337 e->debug_id = t_debug_id;
2338 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2339 e->from_proc = proc->pid;
2340 e->from_thread = thread->pid;
2341 e->target_handle = tr->target.handle;
2342 e->data_size = tr->data_size;
2343 e->offsets_size = tr->offsets_size;
2344 e->context_name = proc->context->name;
2345
2346 if (reply) {
2347 in_reply_to = thread->transaction_stack;
2348 if (in_reply_to == NULL) {
2349 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2350 proc->pid, thread->pid);
2351 return_error = BR_FAILED_REPLY;
2352 return_error_param = -EPROTO;
2353 return_error_line = __LINE__;
2354 goto err_empty_call_stack;
2355 }
2356 binder_set_nice(in_reply_to->saved_priority);
2357 if (in_reply_to->to_thread != thread) {
2358 spin_lock(&in_reply_to->lock);
2359 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2360 proc->pid, thread->pid, in_reply_to->debug_id,
2361 in_reply_to->to_proc ?
2362 in_reply_to->to_proc->pid : 0,
2363 in_reply_to->to_thread ?
2364 in_reply_to->to_thread->pid : 0);
2365 spin_unlock(&in_reply_to->lock);
2366 return_error = BR_FAILED_REPLY;
2367 return_error_param = -EPROTO;
2368 return_error_line = __LINE__;
2369 in_reply_to = NULL;
2370 goto err_bad_call_stack;
2371 }
2372 thread->transaction_stack = in_reply_to->to_parent;
2373 target_thread = binder_get_txn_from(in_reply_to);
2374 if (target_thread == NULL) {
2375 return_error = BR_DEAD_REPLY;
2376 return_error_line = __LINE__;
2377 goto err_dead_binder;
2378 }
2379 if (target_thread->transaction_stack != in_reply_to) {
2380 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2381 proc->pid, thread->pid,
2382 target_thread->transaction_stack ?
2383 target_thread->transaction_stack->debug_id : 0,
2384 in_reply_to->debug_id);
2385 return_error = BR_FAILED_REPLY;
2386 return_error_param = -EPROTO;
2387 return_error_line = __LINE__;
2388 in_reply_to = NULL;
2389 target_thread = NULL;
2390 goto err_dead_binder;
2391 }
2392 target_proc = target_thread->proc;
2393 target_proc->tmp_ref++;
2394 } else {
2395 if (tr->target.handle) {
2396 struct binder_ref *ref;
2397
2398 /*
2399 * There must already be a strong ref
2400 * on this node. If so, do a strong
2401 * increment on the node to ensure it
2402 * stays alive until the transaction is
2403 * done.
2404 */
2405 ref = binder_get_ref(proc, tr->target.handle, true);
2406 if (ref) {
2407 binder_inc_node(ref->node, 1, 0, NULL);
2408 target_node = ref->node;
2409 }
2410 if (target_node == NULL) {
2411 binder_user_error("%d:%d got transaction to invalid handle\n",
2412 proc->pid, thread->pid);
2413 return_error = BR_FAILED_REPLY;
2414 return_error_param = -EINVAL;
2415 return_error_line = __LINE__;
2416 goto err_invalid_target_handle;
2417 }
2418 } else {
2419 mutex_lock(&context->context_mgr_node_lock);
2420 target_node = context->binder_context_mgr_node;
2421 if (target_node == NULL) {
2422 return_error = BR_DEAD_REPLY;
2423 mutex_unlock(&context->context_mgr_node_lock);
2424 return_error_line = __LINE__;
2425 goto err_no_context_mgr_node;
2426 }
2427 binder_inc_node(target_node, 1, 0, NULL);
2428 mutex_unlock(&context->context_mgr_node_lock);
2429 }
2430 e->to_node = target_node->debug_id;
2431 binder_node_lock(target_node);
2432 target_proc = target_node->proc;
2433 if (target_proc == NULL) {
2434 binder_node_unlock(target_node);
2435 return_error = BR_DEAD_REPLY;
2436 return_error_line = __LINE__;
2437 goto err_dead_binder;
2438 }
2439 target_proc->tmp_ref++;
2440 binder_node_unlock(target_node);
2441 if (security_binder_transaction(proc->tsk,
2442 target_proc->tsk) < 0) {
2443 return_error = BR_FAILED_REPLY;
2444 return_error_param = -EPERM;
2445 return_error_line = __LINE__;
2446 goto err_invalid_target_handle;
2447 }
2448 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2449 struct binder_transaction *tmp;
2450
2451 tmp = thread->transaction_stack;
2452 if (tmp->to_thread != thread) {
2453 spin_lock(&tmp->lock);
2454 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2455 proc->pid, thread->pid, tmp->debug_id,
2456 tmp->to_proc ? tmp->to_proc->pid : 0,
2457 tmp->to_thread ?
2458 tmp->to_thread->pid : 0);
2459 spin_unlock(&tmp->lock);
2460 return_error = BR_FAILED_REPLY;
2461 return_error_param = -EPROTO;
2462 return_error_line = __LINE__;
2463 goto err_bad_call_stack;
2464 }
2465 while (tmp) {
2466 struct binder_thread *from;
2467
2468 spin_lock(&tmp->lock);
2469 from = tmp->from;
2470 if (from && from->proc == target_proc) {
2471 atomic_inc(&from->tmp_ref);
2472 target_thread = from;
2473 spin_unlock(&tmp->lock);
2474 break;
2475 }
2476 spin_unlock(&tmp->lock);
2477 tmp = tmp->from_parent;
2478 }
2479 }
2480 }
2481 if (target_thread) {
2482 e->to_thread = target_thread->pid;
2483 target_list = &target_thread->todo;
2484 target_wait = &target_thread->wait;
2485 } else {
2486 target_list = &target_proc->todo;
2487 target_wait = &target_proc->wait;
2488 }
2489 e->to_proc = target_proc->pid;
2490
2491 /* TODO: reuse incoming transaction for reply */
2492 t = kzalloc(sizeof(*t), GFP_KERNEL);
2493 if (t == NULL) {
2494 return_error = BR_FAILED_REPLY;
2495 return_error_param = -ENOMEM;
2496 return_error_line = __LINE__;
2497 goto err_alloc_t_failed;
2498 }
2499 binder_stats_created(BINDER_STAT_TRANSACTION);
2500 spin_lock_init(&t->lock);
2501
2502 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2503 if (tcomplete == NULL) {
2504 return_error = BR_FAILED_REPLY;
2505 return_error_param = -ENOMEM;
2506 return_error_line = __LINE__;
2507 goto err_alloc_tcomplete_failed;
2508 }
2509 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2510
2511 t->debug_id = t_debug_id;
2512
2513 if (reply)
2514 binder_debug(BINDER_DEBUG_TRANSACTION,
2515 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2516 proc->pid, thread->pid, t->debug_id,
2517 target_proc->pid, target_thread->pid,
2518 (u64)tr->data.ptr.buffer,
2519 (u64)tr->data.ptr.offsets,
2520 (u64)tr->data_size, (u64)tr->offsets_size,
2521 (u64)extra_buffers_size);
2522 else
2523 binder_debug(BINDER_DEBUG_TRANSACTION,
2524 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2525 proc->pid, thread->pid, t->debug_id,
2526 target_proc->pid, target_node->debug_id,
2527 (u64)tr->data.ptr.buffer,
2528 (u64)tr->data.ptr.offsets,
2529 (u64)tr->data_size, (u64)tr->offsets_size,
2530 (u64)extra_buffers_size);
2531
2532 if (!reply && !(tr->flags & TF_ONE_WAY))
2533 t->from = thread;
2534 else
2535 t->from = NULL;
2536 t->sender_euid = task_euid(proc->tsk);
2537 t->to_proc = target_proc;
2538 t->to_thread = target_thread;
2539 t->code = tr->code;
2540 t->flags = tr->flags;
2541 t->priority = task_nice(current);
2542
2543 trace_binder_transaction(reply, t, target_node);
2544
2545 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2546 tr->offsets_size, extra_buffers_size,
2547 !reply && (t->flags & TF_ONE_WAY));
2548 if (IS_ERR(t->buffer)) {
2549 /*
2550 * -ESRCH indicates VMA cleared. The target is dying.
2551 */
2552 return_error_param = PTR_ERR(t->buffer);
2553 return_error = return_error_param == -ESRCH ?
2554 BR_DEAD_REPLY : BR_FAILED_REPLY;
2555 return_error_line = __LINE__;
2556 t->buffer = NULL;
2557 goto err_binder_alloc_buf_failed;
2558 }
2559 t->buffer->allow_user_free = 0;
2560 t->buffer->debug_id = t->debug_id;
2561 t->buffer->transaction = t;
2562 t->buffer->target_node = target_node;
2563 trace_binder_transaction_alloc_buf(t->buffer);
2564 off_start = (binder_size_t *)(t->buffer->data +
2565 ALIGN(tr->data_size, sizeof(void *)));
2566 offp = off_start;
2567
2568 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2569 tr->data.ptr.buffer, tr->data_size)) {
2570 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2571 proc->pid, thread->pid);
2572 return_error = BR_FAILED_REPLY;
2573 return_error_param = -EFAULT;
2574 return_error_line = __LINE__;
2575 goto err_copy_data_failed;
2576 }
2577 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2578 tr->data.ptr.offsets, tr->offsets_size)) {
2579 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2580 proc->pid, thread->pid);
2581 return_error = BR_FAILED_REPLY;
2582 return_error_param = -EFAULT;
2583 return_error_line = __LINE__;
2584 goto err_copy_data_failed;
2585 }
2586 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2587 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2588 proc->pid, thread->pid, (u64)tr->offsets_size);
2589 return_error = BR_FAILED_REPLY;
2590 return_error_param = -EINVAL;
2591 return_error_line = __LINE__;
2592 goto err_bad_offset;
2593 }
2594 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2595 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2596 proc->pid, thread->pid,
2597 (u64)extra_buffers_size);
2598 return_error = BR_FAILED_REPLY;
2599 return_error_param = -EINVAL;
2600 return_error_line = __LINE__;
2601 goto err_bad_offset;
2602 }
2603 off_end = (void *)off_start + tr->offsets_size;
2604 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2605 sg_buf_end = sg_bufp + extra_buffers_size;
2606 off_min = 0;
2607 for (; offp < off_end; offp++) {
2608 struct binder_object_header *hdr;
2609 size_t object_size = binder_validate_object(t->buffer, *offp);
2610
2611 if (object_size == 0 || *offp < off_min) {
2612 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2613 proc->pid, thread->pid, (u64)*offp,
2614 (u64)off_min,
2615 (u64)t->buffer->data_size);
2616 return_error = BR_FAILED_REPLY;
2617 return_error_param = -EINVAL;
2618 return_error_line = __LINE__;
2619 goto err_bad_offset;
2620 }
2621
2622 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2623 off_min = *offp + object_size;
2624 switch (hdr->type) {
2625 case BINDER_TYPE_BINDER:
2626 case BINDER_TYPE_WEAK_BINDER: {
2627 struct flat_binder_object *fp;
2628
2629 fp = to_flat_binder_object(hdr);
2630 ret = binder_translate_binder(fp, t, thread);
2631 if (ret < 0) {
2632 return_error = BR_FAILED_REPLY;
2633 return_error_param = ret;
2634 return_error_line = __LINE__;
2635 goto err_translate_failed;
2636 }
2637 } break;
2638 case BINDER_TYPE_HANDLE:
2639 case BINDER_TYPE_WEAK_HANDLE: {
2640 struct flat_binder_object *fp;
2641
2642 fp = to_flat_binder_object(hdr);
2643 ret = binder_translate_handle(fp, t, thread);
2644 if (ret < 0) {
2645 return_error = BR_FAILED_REPLY;
2646 return_error_param = ret;
2647 return_error_line = __LINE__;
2648 goto err_translate_failed;
2649 }
2650 } break;
2651
2652 case BINDER_TYPE_FD: {
2653 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2654 int target_fd = binder_translate_fd(fp->fd, t, thread,
2655 in_reply_to);
2656
2657 if (target_fd < 0) {
2658 return_error = BR_FAILED_REPLY;
2659 return_error_param = target_fd;
2660 return_error_line = __LINE__;
2661 goto err_translate_failed;
2662 }
2663 fp->pad_binder = 0;
2664 fp->fd = target_fd;
2665 } break;
2666 case BINDER_TYPE_FDA: {
2667 struct binder_fd_array_object *fda =
2668 to_binder_fd_array_object(hdr);
2669 struct binder_buffer_object *parent =
2670 binder_validate_ptr(t->buffer, fda->parent,
2671 off_start,
2672 offp - off_start);
2673 if (!parent) {
2674 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2675 proc->pid, thread->pid);
2676 return_error = BR_FAILED_REPLY;
2677 return_error_param = -EINVAL;
2678 return_error_line = __LINE__;
2679 goto err_bad_parent;
2680 }
2681 if (!binder_validate_fixup(t->buffer, off_start,
2682 parent, fda->parent_offset,
2683 last_fixup_obj,
2684 last_fixup_min_off)) {
2685 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2686 proc->pid, thread->pid);
2687 return_error = BR_FAILED_REPLY;
2688 return_error_param = -EINVAL;
2689 return_error_line = __LINE__;
2690 goto err_bad_parent;
2691 }
2692 ret = binder_translate_fd_array(fda, parent, t, thread,
2693 in_reply_to);
2694 if (ret < 0) {
2695 return_error = BR_FAILED_REPLY;
2696 return_error_param = ret;
2697 return_error_line = __LINE__;
2698 goto err_translate_failed;
2699 }
2700 last_fixup_obj = parent;
2701 last_fixup_min_off =
2702 fda->parent_offset + sizeof(u32) * fda->num_fds;
2703 } break;
2704 case BINDER_TYPE_PTR: {
2705 struct binder_buffer_object *bp =
2706 to_binder_buffer_object(hdr);
2707 size_t buf_left = sg_buf_end - sg_bufp;
2708
2709 if (bp->length > buf_left) {
2710 binder_user_error("%d:%d got transaction with too large buffer\n",
2711 proc->pid, thread->pid);
2712 return_error = BR_FAILED_REPLY;
2713 return_error_param = -EINVAL;
2714 return_error_line = __LINE__;
2715 goto err_bad_offset;
2716 }
2717 if (copy_from_user(sg_bufp,
2718 (const void __user *)(uintptr_t)
2719 bp->buffer, bp->length)) {
2720 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2721 proc->pid, thread->pid);
2722 return_error_param = -EFAULT;
2723 return_error = BR_FAILED_REPLY;
2724 return_error_line = __LINE__;
2725 goto err_copy_data_failed;
2726 }
2727 /* Fixup buffer pointer to target proc address space */
2728 bp->buffer = (uintptr_t)sg_bufp +
2729 binder_alloc_get_user_buffer_offset(
2730 &target_proc->alloc);
2731 sg_bufp += ALIGN(bp->length, sizeof(u64));
2732
2733 ret = binder_fixup_parent(t, thread, bp, off_start,
2734 offp - off_start,
2735 last_fixup_obj,
2736 last_fixup_min_off);
2737 if (ret < 0) {
2738 return_error = BR_FAILED_REPLY;
2739 return_error_param = ret;
2740 return_error_line = __LINE__;
2741 goto err_translate_failed;
2742 }
2743 last_fixup_obj = bp;
2744 last_fixup_min_off = 0;
2745 } break;
2746 default:
2747 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2748 proc->pid, thread->pid, hdr->type);
2749 return_error = BR_FAILED_REPLY;
2750 return_error_param = -EINVAL;
2751 return_error_line = __LINE__;
2752 goto err_bad_object_type;
2753 }
2754 }
2755 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2756 binder_enqueue_work(proc, tcomplete, &thread->todo);
2757 t->work.type = BINDER_WORK_TRANSACTION;
2758
2759 if (reply) {
2760 if (target_thread->is_dead)
2761 goto err_dead_proc_or_thread;
2762 BUG_ON(t->buffer->async_transaction != 0);
2763 binder_pop_transaction(target_thread, in_reply_to);
2764 binder_free_transaction(in_reply_to);
2765 binder_enqueue_work(target_proc, &t->work, target_list);
2766 } else if (!(t->flags & TF_ONE_WAY)) {
2767 BUG_ON(t->buffer->async_transaction != 0);
2768 t->need_reply = 1;
2769 t->from_parent = thread->transaction_stack;
2770 thread->transaction_stack = t;
2771 if (target_proc->is_dead ||
2772 (target_thread && target_thread->is_dead)) {
2773 binder_pop_transaction(thread, t);
2774 goto err_dead_proc_or_thread;
2775 }
2776 binder_enqueue_work(target_proc, &t->work, target_list);
2777 } else {
2778 BUG_ON(target_node == NULL);
2779 BUG_ON(t->buffer->async_transaction != 1);
2780 binder_node_lock(target_node);
2781 if (target_node->has_async_transaction) {
2782 target_list = &target_node->async_todo;
2783 target_wait = NULL;
2784 } else
2785 target_node->has_async_transaction = 1;
2786 /*
2787 * Test/set of has_async_transaction
2788 * must be atomic with enqueue on
2789 * async_todo
2790 */
2791 if (target_proc->is_dead ||
2792 (target_thread && target_thread->is_dead)) {
2793 binder_node_unlock(target_node);
2794 goto err_dead_proc_or_thread;
2795 }
2796 binder_enqueue_work(target_proc, &t->work, target_list);
2797 binder_node_unlock(target_node);
2798 }
2799 if (target_wait) {
2800 if (reply || !(tr->flags & TF_ONE_WAY))
2801 wake_up_interruptible_sync(target_wait);
2802 else
2803 wake_up_interruptible(target_wait);
2804 }
2805 if (target_thread)
2806 binder_thread_dec_tmpref(target_thread);
2807 binder_proc_dec_tmpref(target_proc);
2808 /*
2809 * write barrier to synchronize with initialization
2810 * of log entry
2811 */
2812 smp_wmb();
2813 WRITE_ONCE(e->debug_id_done, t_debug_id);
2814 return;
2815
2816 err_dead_proc_or_thread:
2817 return_error = BR_DEAD_REPLY;
2818 return_error_line = __LINE__;
2819 err_translate_failed:
2820 err_bad_object_type:
2821 err_bad_offset:
2822 err_bad_parent:
2823 err_copy_data_failed:
2824 trace_binder_transaction_failed_buffer_release(t->buffer);
2825 binder_transaction_buffer_release(target_proc, t->buffer, offp);
2826 target_node = NULL;
2827 t->buffer->transaction = NULL;
2828 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
2829 err_binder_alloc_buf_failed:
2830 kfree(tcomplete);
2831 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2832 err_alloc_tcomplete_failed:
2833 kfree(t);
2834 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2835 err_alloc_t_failed:
2836 err_bad_call_stack:
2837 err_empty_call_stack:
2838 err_dead_binder:
2839 err_invalid_target_handle:
2840 err_no_context_mgr_node:
2841 if (target_thread)
2842 binder_thread_dec_tmpref(target_thread);
2843 if (target_proc)
2844 binder_proc_dec_tmpref(target_proc);
2845 if (target_node)
2846 binder_dec_node(target_node, 1, 0);
2847
2848 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2849 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2850 proc->pid, thread->pid, return_error, return_error_param,
2851 (u64)tr->data_size, (u64)tr->offsets_size,
2852 return_error_line);
2853
2854 {
2855 struct binder_transaction_log_entry *fe;
2856
2857 e->return_error = return_error;
2858 e->return_error_param = return_error_param;
2859 e->return_error_line = return_error_line;
2860 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2861 *fe = *e;
2862 /*
2863 * write barrier to synchronize with initialization
2864 * of log entry
2865 */
2866 smp_wmb();
2867 WRITE_ONCE(e->debug_id_done, t_debug_id);
2868 WRITE_ONCE(fe->debug_id_done, t_debug_id);
2869 }
2870
2871 BUG_ON(thread->return_error.cmd != BR_OK);
2872 if (in_reply_to) {
2873 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
2874 binder_enqueue_work(thread->proc,
2875 &thread->return_error.work,
2876 &thread->todo);
2877 binder_send_failed_reply(in_reply_to, return_error);
2878 } else {
2879 thread->return_error.cmd = return_error;
2880 binder_enqueue_work(thread->proc,
2881 &thread->return_error.work,
2882 &thread->todo);
2883 }
2884 }
2885
2886 static int binder_thread_write(struct binder_proc *proc,
2887 struct binder_thread *thread,
2888 binder_uintptr_t binder_buffer, size_t size,
2889 binder_size_t *consumed)
2890 {
2891 uint32_t cmd;
2892 struct binder_context *context = proc->context;
2893 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2894 void __user *ptr = buffer + *consumed;
2895 void __user *end = buffer + size;
2896
2897 while (ptr < end && thread->return_error.cmd == BR_OK) {
2898 int ret;
2899
2900 if (get_user(cmd, (uint32_t __user *)ptr))
2901 return -EFAULT;
2902 ptr += sizeof(uint32_t);
2903 trace_binder_command(cmd);
2904 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
2905 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2906 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2907 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
2908 }
2909 switch (cmd) {
2910 case BC_INCREFS:
2911 case BC_ACQUIRE:
2912 case BC_RELEASE:
2913 case BC_DECREFS: {
2914 uint32_t target;
2915 const char *debug_string;
2916 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
2917 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
2918 struct binder_ref_data rdata;
2919
2920 if (get_user(target, (uint32_t __user *)ptr))
2921 return -EFAULT;
2922
2923 ptr += sizeof(uint32_t);
2924 ret = -1;
2925 if (increment && !target) {
2926 struct binder_node *ctx_mgr_node;
2927 mutex_lock(&context->context_mgr_node_lock);
2928 ctx_mgr_node = context->binder_context_mgr_node;
2929 if (ctx_mgr_node)
2930 ret = binder_inc_ref_for_node(
2931 proc, ctx_mgr_node,
2932 strong, NULL, &rdata);
2933 mutex_unlock(&context->context_mgr_node_lock);
2934 }
2935 if (ret)
2936 ret = binder_update_ref_for_handle(
2937 proc, target, increment, strong,
2938 &rdata);
2939 if (!ret && rdata.desc != target) {
2940 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
2941 proc->pid, thread->pid,
2942 target, rdata.desc);
2943 }
2944 switch (cmd) {
2945 case BC_INCREFS:
2946 debug_string = "IncRefs";
2947 break;
2948 case BC_ACQUIRE:
2949 debug_string = "Acquire";
2950 break;
2951 case BC_RELEASE:
2952 debug_string = "Release";
2953 break;
2954 case BC_DECREFS:
2955 default:
2956 debug_string = "DecRefs";
2957 break;
2958 }
2959 if (ret) {
2960 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
2961 proc->pid, thread->pid, debug_string,
2962 strong, target, ret);
2963 break;
2964 }
2965 binder_debug(BINDER_DEBUG_USER_REFS,
2966 "%d:%d %s ref %d desc %d s %d w %d\n",
2967 proc->pid, thread->pid, debug_string,
2968 rdata.debug_id, rdata.desc, rdata.strong,
2969 rdata.weak);
2970 break;
2971 }
2972 case BC_INCREFS_DONE:
2973 case BC_ACQUIRE_DONE: {
2974 binder_uintptr_t node_ptr;
2975 binder_uintptr_t cookie;
2976 struct binder_node *node;
2977 bool free_node;
2978
2979 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
2980 return -EFAULT;
2981 ptr += sizeof(binder_uintptr_t);
2982 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2983 return -EFAULT;
2984 ptr += sizeof(binder_uintptr_t);
2985 node = binder_get_node(proc, node_ptr);
2986 if (node == NULL) {
2987 binder_user_error("%d:%d %s u%016llx no match\n",
2988 proc->pid, thread->pid,
2989 cmd == BC_INCREFS_DONE ?
2990 "BC_INCREFS_DONE" :
2991 "BC_ACQUIRE_DONE",
2992 (u64)node_ptr);
2993 break;
2994 }
2995 if (cookie != node->cookie) {
2996 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2997 proc->pid, thread->pid,
2998 cmd == BC_INCREFS_DONE ?
2999 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3000 (u64)node_ptr, node->debug_id,
3001 (u64)cookie, (u64)node->cookie);
3002 binder_put_node(node);
3003 break;
3004 }
3005 binder_node_inner_lock(node);
3006 if (cmd == BC_ACQUIRE_DONE) {
3007 if (node->pending_strong_ref == 0) {
3008 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3009 proc->pid, thread->pid,
3010 node->debug_id);
3011 binder_node_inner_unlock(node);
3012 binder_put_node(node);
3013 break;
3014 }
3015 node->pending_strong_ref = 0;
3016 } else {
3017 if (node->pending_weak_ref == 0) {
3018 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3019 proc->pid, thread->pid,
3020 node->debug_id);
3021 binder_node_inner_unlock(node);
3022 binder_put_node(node);
3023 break;
3024 }
3025 node->pending_weak_ref = 0;
3026 }
3027 free_node = binder_dec_node_nilocked(node,
3028 cmd == BC_ACQUIRE_DONE, 0);
3029 WARN_ON(free_node);
3030 binder_debug(BINDER_DEBUG_USER_REFS,
3031 "%d:%d %s node %d ls %d lw %d tr %d\n",
3032 proc->pid, thread->pid,
3033 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3034 node->debug_id, node->local_strong_refs,
3035 node->local_weak_refs, node->tmp_refs);
3036 binder_node_inner_unlock(node);
3037 binder_put_node(node);
3038 break;
3039 }
3040 case BC_ATTEMPT_ACQUIRE:
3041 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3042 return -EINVAL;
3043 case BC_ACQUIRE_RESULT:
3044 pr_err("BC_ACQUIRE_RESULT not supported\n");
3045 return -EINVAL;
3046
3047 case BC_FREE_BUFFER: {
3048 binder_uintptr_t data_ptr;
3049 struct binder_buffer *buffer;
3050
3051 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3052 return -EFAULT;
3053 ptr += sizeof(binder_uintptr_t);
3054
3055 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3056 data_ptr);
3057 if (buffer == NULL) {
3058 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3059 proc->pid, thread->pid, (u64)data_ptr);
3060 break;
3061 }
3062 if (!buffer->allow_user_free) {
3063 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3064 proc->pid, thread->pid, (u64)data_ptr);
3065 break;
3066 }
3067 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3068 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3069 proc->pid, thread->pid, (u64)data_ptr,
3070 buffer->debug_id,
3071 buffer->transaction ? "active" : "finished");
3072
3073 if (buffer->transaction) {
3074 buffer->transaction->buffer = NULL;
3075 buffer->transaction = NULL;
3076 }
3077 if (buffer->async_transaction && buffer->target_node) {
3078 struct binder_node *buf_node;
3079 struct binder_work *w;
3080
3081 buf_node = buffer->target_node;
3082 binder_node_inner_lock(buf_node);
3083 BUG_ON(!buf_node->has_async_transaction);
3084 BUG_ON(buf_node->proc != proc);
3085 w = binder_dequeue_work_head_ilocked(
3086 &buf_node->async_todo);
3087 if (!w)
3088 buf_node->has_async_transaction = 0;
3089 else
3090 binder_enqueue_work_ilocked(
3091 w, &thread->todo);
3092 binder_node_inner_unlock(buf_node);
3093 }
3094 trace_binder_transaction_buffer_release(buffer);
3095 binder_transaction_buffer_release(proc, buffer, NULL);
3096 binder_alloc_free_buf(&proc->alloc, buffer);
3097 break;
3098 }
3099
3100 case BC_TRANSACTION_SG:
3101 case BC_REPLY_SG: {
3102 struct binder_transaction_data_sg tr;
3103
3104 if (copy_from_user(&tr, ptr, sizeof(tr)))
3105 return -EFAULT;
3106 ptr += sizeof(tr);
3107 binder_transaction(proc, thread, &tr.transaction_data,
3108 cmd == BC_REPLY_SG, tr.buffers_size);
3109 break;
3110 }
3111 case BC_TRANSACTION:
3112 case BC_REPLY: {
3113 struct binder_transaction_data tr;
3114
3115 if (copy_from_user(&tr, ptr, sizeof(tr)))
3116 return -EFAULT;
3117 ptr += sizeof(tr);
3118 binder_transaction(proc, thread, &tr,
3119 cmd == BC_REPLY, 0);
3120 break;
3121 }
3122
3123 case BC_REGISTER_LOOPER:
3124 binder_debug(BINDER_DEBUG_THREADS,
3125 "%d:%d BC_REGISTER_LOOPER\n",
3126 proc->pid, thread->pid);
3127 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3128 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3129 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3130 proc->pid, thread->pid);
3131 } else if (proc->requested_threads == 0) {
3132 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3133 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3134 proc->pid, thread->pid);
3135 } else {
3136 proc->requested_threads--;
3137 proc->requested_threads_started++;
3138 }
3139 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3140 break;
3141 case BC_ENTER_LOOPER:
3142 binder_debug(BINDER_DEBUG_THREADS,
3143 "%d:%d BC_ENTER_LOOPER\n",
3144 proc->pid, thread->pid);
3145 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3146 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3147 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3148 proc->pid, thread->pid);
3149 }
3150 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3151 break;
3152 case BC_EXIT_LOOPER:
3153 binder_debug(BINDER_DEBUG_THREADS,
3154 "%d:%d BC_EXIT_LOOPER\n",
3155 proc->pid, thread->pid);
3156 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3157 break;
3158
3159 case BC_REQUEST_DEATH_NOTIFICATION:
3160 case BC_CLEAR_DEATH_NOTIFICATION: {
3161 uint32_t target;
3162 binder_uintptr_t cookie;
3163 struct binder_ref *ref;
3164 struct binder_ref_death *death;
3165
3166 if (get_user(target, (uint32_t __user *)ptr))
3167 return -EFAULT;
3168 ptr += sizeof(uint32_t);
3169 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3170 return -EFAULT;
3171 ptr += sizeof(binder_uintptr_t);
3172 ref = binder_get_ref(proc, target, false);
3173 if (ref == NULL) {
3174 binder_user_error("%d:%d %s invalid ref %d\n",
3175 proc->pid, thread->pid,
3176 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3177 "BC_REQUEST_DEATH_NOTIFICATION" :
3178 "BC_CLEAR_DEATH_NOTIFICATION",
3179 target);
3180 break;
3181 }
3182
3183 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3184 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3185 proc->pid, thread->pid,
3186 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3187 "BC_REQUEST_DEATH_NOTIFICATION" :
3188 "BC_CLEAR_DEATH_NOTIFICATION",
3189 (u64)cookie, ref->data.debug_id,
3190 ref->data.desc, ref->data.strong,
3191 ref->data.weak, ref->node->debug_id);
3192
3193 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3194 if (ref->death) {
3195 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3196 proc->pid, thread->pid);
3197 break;
3198 }
3199 death = kzalloc(sizeof(*death), GFP_KERNEL);
3200 if (death == NULL) {
3201 WARN_ON(thread->return_error.cmd !=
3202 BR_OK);
3203 thread->return_error.cmd = BR_ERROR;
3204 binder_enqueue_work(
3205 thread->proc,
3206 &thread->return_error.work,
3207 &thread->todo);
3208 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3209 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3210 proc->pid, thread->pid);
3211 break;
3212 }
3213 binder_stats_created(BINDER_STAT_DEATH);
3214 INIT_LIST_HEAD(&death->work.entry);
3215 death->cookie = cookie;
3216 ref->death = death;
3217 binder_node_lock(ref->node);
3218 if (ref->node->proc == NULL) {
3219 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3220 if (thread->looper &
3221 (BINDER_LOOPER_STATE_REGISTERED |
3222 BINDER_LOOPER_STATE_ENTERED))
3223 binder_enqueue_work(
3224 proc,
3225 &ref->death->work,
3226 &thread->todo);
3227 else {
3228 binder_enqueue_work(
3229 proc,
3230 &ref->death->work,
3231 &proc->todo);
3232 wake_up_interruptible(
3233 &proc->wait);
3234 }
3235 }
3236 binder_node_unlock(ref->node);
3237 } else {
3238 binder_node_lock(ref->node);
3239 if (ref->death == NULL) {
3240 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3241 proc->pid, thread->pid);
3242 binder_node_unlock(ref->node);
3243 break;
3244 }
3245 death = ref->death;
3246 if (death->cookie != cookie) {
3247 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3248 proc->pid, thread->pid,
3249 (u64)death->cookie,
3250 (u64)cookie);
3251 binder_node_unlock(ref->node);
3252 break;
3253 }
3254 ref->death = NULL;
3255 binder_inner_proc_lock(proc);
3256 if (list_empty(&death->work.entry)) {
3257 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3258 if (thread->looper &
3259 (BINDER_LOOPER_STATE_REGISTERED |
3260 BINDER_LOOPER_STATE_ENTERED))
3261 binder_enqueue_work_ilocked(
3262 &death->work,
3263 &thread->todo);
3264 else {
3265 binder_enqueue_work_ilocked(
3266 &death->work,
3267 &proc->todo);
3268 wake_up_interruptible(
3269 &proc->wait);
3270 }
3271 } else {
3272 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3273 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3274 }
3275 binder_inner_proc_unlock(proc);
3276 binder_node_unlock(ref->node);
3277 }
3278 } break;
3279 case BC_DEAD_BINDER_DONE: {
3280 struct binder_work *w;
3281 binder_uintptr_t cookie;
3282 struct binder_ref_death *death = NULL;
3283
3284 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3285 return -EFAULT;
3286
3287 ptr += sizeof(cookie);
3288 binder_inner_proc_lock(proc);
3289 list_for_each_entry(w, &proc->delivered_death,
3290 entry) {
3291 struct binder_ref_death *tmp_death =
3292 container_of(w,
3293 struct binder_ref_death,
3294 work);
3295
3296 if (tmp_death->cookie == cookie) {
3297 death = tmp_death;
3298 break;
3299 }
3300 }
3301 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3302 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3303 proc->pid, thread->pid, (u64)cookie,
3304 death);
3305 if (death == NULL) {
3306 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3307 proc->pid, thread->pid, (u64)cookie);
3308 binder_inner_proc_unlock(proc);
3309 break;
3310 }
3311 binder_dequeue_work_ilocked(&death->work);
3312 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3313 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3314 if (thread->looper &
3315 (BINDER_LOOPER_STATE_REGISTERED |
3316 BINDER_LOOPER_STATE_ENTERED))
3317 binder_enqueue_work_ilocked(
3318 &death->work, &thread->todo);
3319 else {
3320 binder_enqueue_work_ilocked(
3321 &death->work,
3322 &proc->todo);
3323 wake_up_interruptible(&proc->wait);
3324 }
3325 }
3326 binder_inner_proc_unlock(proc);
3327 } break;
3328
3329 default:
3330 pr_err("%d:%d unknown command %d\n",
3331 proc->pid, thread->pid, cmd);
3332 return -EINVAL;
3333 }
3334 *consumed = ptr - buffer;
3335 }
3336 return 0;
3337 }
3338
3339 static void binder_stat_br(struct binder_proc *proc,
3340 struct binder_thread *thread, uint32_t cmd)
3341 {
3342 trace_binder_return(cmd);
3343 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3344 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3345 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3346 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3347 }
3348 }
3349
3350 static int binder_has_proc_work(struct binder_proc *proc,
3351 struct binder_thread *thread)
3352 {
3353 return !binder_worklist_empty(proc, &proc->todo) ||
3354 thread->looper_need_return;
3355 }
3356
3357 static int binder_has_thread_work(struct binder_thread *thread)
3358 {
3359 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3360 thread->looper_need_return;
3361 }
3362
3363 static int binder_put_node_cmd(struct binder_proc *proc,
3364 struct binder_thread *thread,
3365 void __user **ptrp,
3366 binder_uintptr_t node_ptr,
3367 binder_uintptr_t node_cookie,
3368 int node_debug_id,
3369 uint32_t cmd, const char *cmd_name)
3370 {
3371 void __user *ptr = *ptrp;
3372
3373 if (put_user(cmd, (uint32_t __user *)ptr))
3374 return -EFAULT;
3375 ptr += sizeof(uint32_t);
3376
3377 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3378 return -EFAULT;
3379 ptr += sizeof(binder_uintptr_t);
3380
3381 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3382 return -EFAULT;
3383 ptr += sizeof(binder_uintptr_t);
3384
3385 binder_stat_br(proc, thread, cmd);
3386 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3387 proc->pid, thread->pid, cmd_name, node_debug_id,
3388 (u64)node_ptr, (u64)node_cookie);
3389
3390 *ptrp = ptr;
3391 return 0;
3392 }
3393
3394 static int binder_thread_read(struct binder_proc *proc,
3395 struct binder_thread *thread,
3396 binder_uintptr_t binder_buffer, size_t size,
3397 binder_size_t *consumed, int non_block)
3398 {
3399 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3400 void __user *ptr = buffer + *consumed;
3401 void __user *end = buffer + size;
3402
3403 int ret = 0;
3404 int wait_for_proc_work;
3405
3406 if (*consumed == 0) {
3407 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3408 return -EFAULT;
3409 ptr += sizeof(uint32_t);
3410 }
3411
3412 retry:
3413 wait_for_proc_work = thread->transaction_stack == NULL &&
3414 binder_worklist_empty(proc, &thread->todo);
3415
3416 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3417 if (wait_for_proc_work)
3418 proc->ready_threads++;
3419
3420 binder_unlock(__func__);
3421
3422 trace_binder_wait_for_work(wait_for_proc_work,
3423 !!thread->transaction_stack,
3424 !binder_worklist_empty(proc, &thread->todo));
3425 if (wait_for_proc_work) {
3426 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3427 BINDER_LOOPER_STATE_ENTERED))) {
3428 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3429 proc->pid, thread->pid, thread->looper);
3430 wait_event_interruptible(binder_user_error_wait,
3431 binder_stop_on_user_error < 2);
3432 }
3433 binder_set_nice(proc->default_priority);
3434 if (non_block) {
3435 if (!binder_has_proc_work(proc, thread))
3436 ret = -EAGAIN;
3437 } else
3438 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
3439 } else {
3440 if (non_block) {
3441 if (!binder_has_thread_work(thread))
3442 ret = -EAGAIN;
3443 } else
3444 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
3445 }
3446
3447 binder_lock(__func__);
3448
3449 if (wait_for_proc_work)
3450 proc->ready_threads--;
3451 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3452
3453 if (ret)
3454 return ret;
3455
3456 while (1) {
3457 uint32_t cmd;
3458 struct binder_transaction_data tr;
3459 struct binder_work *w = NULL;
3460 struct list_head *list = NULL;
3461 struct binder_transaction *t = NULL;
3462 struct binder_thread *t_from;
3463
3464 binder_inner_proc_lock(proc);
3465 if (!binder_worklist_empty_ilocked(&thread->todo))
3466 list = &thread->todo;
3467 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3468 wait_for_proc_work)
3469 list = &proc->todo;
3470 else {
3471 binder_inner_proc_unlock(proc);
3472
3473 /* no data added */
3474 if (ptr - buffer == 4 && !thread->looper_need_return)
3475 goto retry;
3476 break;
3477 }
3478
3479 if (end - ptr < sizeof(tr) + 4) {
3480 binder_inner_proc_unlock(proc);
3481 break;
3482 }
3483 w = binder_dequeue_work_head_ilocked(list);
3484
3485 switch (w->type) {
3486 case BINDER_WORK_TRANSACTION: {
3487 binder_inner_proc_unlock(proc);
3488 t = container_of(w, struct binder_transaction, work);
3489 } break;
3490 case BINDER_WORK_RETURN_ERROR: {
3491 struct binder_error *e = container_of(
3492 w, struct binder_error, work);
3493
3494 WARN_ON(e->cmd == BR_OK);
3495 binder_inner_proc_unlock(proc);
3496 if (put_user(e->cmd, (uint32_t __user *)ptr))
3497 return -EFAULT;
3498 e->cmd = BR_OK;
3499 ptr += sizeof(uint32_t);
3500
3501 binder_stat_br(proc, thread, cmd);
3502 } break;
3503 case BINDER_WORK_TRANSACTION_COMPLETE: {
3504 binder_inner_proc_unlock(proc);
3505 cmd = BR_TRANSACTION_COMPLETE;
3506 if (put_user(cmd, (uint32_t __user *)ptr))
3507 return -EFAULT;
3508 ptr += sizeof(uint32_t);
3509
3510 binder_stat_br(proc, thread, cmd);
3511 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3512 "%d:%d BR_TRANSACTION_COMPLETE\n",
3513 proc->pid, thread->pid);
3514 kfree(w);
3515 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3516 } break;
3517 case BINDER_WORK_NODE: {
3518 struct binder_node *node = container_of(w, struct binder_node, work);
3519 int strong, weak;
3520 binder_uintptr_t node_ptr = node->ptr;
3521 binder_uintptr_t node_cookie = node->cookie;
3522 int node_debug_id = node->debug_id;
3523 int has_weak_ref;
3524 int has_strong_ref;
3525 void __user *orig_ptr = ptr;
3526
3527 BUG_ON(proc != node->proc);
3528 strong = node->internal_strong_refs ||
3529 node->local_strong_refs;
3530 weak = !hlist_empty(&node->refs) ||
3531 node->local_weak_refs ||
3532 node->tmp_refs || strong;
3533 has_strong_ref = node->has_strong_ref;
3534 has_weak_ref = node->has_weak_ref;
3535
3536 if (weak && !has_weak_ref) {
3537 node->has_weak_ref = 1;
3538 node->pending_weak_ref = 1;
3539 node->local_weak_refs++;
3540 }
3541 if (strong && !has_strong_ref) {
3542 node->has_strong_ref = 1;
3543 node->pending_strong_ref = 1;
3544 node->local_strong_refs++;
3545 }
3546 if (!strong && has_strong_ref)
3547 node->has_strong_ref = 0;
3548 if (!weak && has_weak_ref)
3549 node->has_weak_ref = 0;
3550 if (!weak && !strong) {
3551 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3552 "%d:%d node %d u%016llx c%016llx deleted\n",
3553 proc->pid, thread->pid,
3554 node_debug_id,
3555 (u64)node_ptr,
3556 (u64)node_cookie);
3557 rb_erase(&node->rb_node, &proc->nodes);
3558 binder_inner_proc_unlock(proc);
3559 binder_node_lock(node);
3560 /*
3561 * Acquire the node lock before freeing the
3562 * node to serialize with other threads that
3563 * may have been holding the node lock while
3564 * decrementing this node (avoids race where
3565 * this thread frees while the other thread
3566 * is unlocking the node after the final
3567 * decrement)
3568 */
3569 binder_node_unlock(node);
3570 binder_free_node(node);
3571 } else
3572 binder_inner_proc_unlock(proc);
3573
3574 if (weak && !has_weak_ref)
3575 ret = binder_put_node_cmd(
3576 proc, thread, &ptr, node_ptr,
3577 node_cookie, node_debug_id,
3578 BR_INCREFS, "BR_INCREFS");
3579 if (!ret && strong && !has_strong_ref)
3580 ret = binder_put_node_cmd(
3581 proc, thread, &ptr, node_ptr,
3582 node_cookie, node_debug_id,
3583 BR_ACQUIRE, "BR_ACQUIRE");
3584 if (!ret && !strong && has_strong_ref)
3585 ret = binder_put_node_cmd(
3586 proc, thread, &ptr, node_ptr,
3587 node_cookie, node_debug_id,
3588 BR_RELEASE, "BR_RELEASE");
3589 if (!ret && !weak && has_weak_ref)
3590 ret = binder_put_node_cmd(
3591 proc, thread, &ptr, node_ptr,
3592 node_cookie, node_debug_id,
3593 BR_DECREFS, "BR_DECREFS");
3594 if (orig_ptr == ptr)
3595 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3596 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3597 proc->pid, thread->pid,
3598 node_debug_id,
3599 (u64)node_ptr,
3600 (u64)node_cookie);
3601 if (ret)
3602 return ret;
3603 } break;
3604 case BINDER_WORK_DEAD_BINDER:
3605 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3606 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3607 struct binder_ref_death *death;
3608 uint32_t cmd;
3609
3610 death = container_of(w, struct binder_ref_death, work);
3611 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3612 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3613 else
3614 cmd = BR_DEAD_BINDER;
3615 /*
3616 * TODO: there is a race condition between
3617 * death notification requests and delivery
3618 * of the notifications. This will be handled
3619 * in a later patch.
3620 */
3621 binder_inner_proc_unlock(proc);
3622 if (put_user(cmd, (uint32_t __user *)ptr))
3623 return -EFAULT;
3624 ptr += sizeof(uint32_t);
3625 if (put_user(death->cookie,
3626 (binder_uintptr_t __user *)ptr))
3627 return -EFAULT;
3628 ptr += sizeof(binder_uintptr_t);
3629 binder_stat_br(proc, thread, cmd);
3630 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3631 "%d:%d %s %016llx\n",
3632 proc->pid, thread->pid,
3633 cmd == BR_DEAD_BINDER ?
3634 "BR_DEAD_BINDER" :
3635 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3636 (u64)death->cookie);
3637
3638 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3639 kfree(death);
3640 binder_stats_deleted(BINDER_STAT_DEATH);
3641 } else {
3642 binder_inner_proc_lock(proc);
3643 binder_enqueue_work_ilocked(
3644 w, &proc->delivered_death);
3645 binder_inner_proc_unlock(proc);
3646 }
3647 if (cmd == BR_DEAD_BINDER)
3648 goto done; /* DEAD_BINDER notifications can cause transactions */
3649 } break;
3650 }
3651
3652 if (!t)
3653 continue;
3654
3655 BUG_ON(t->buffer == NULL);
3656 if (t->buffer->target_node) {
3657 struct binder_node *target_node = t->buffer->target_node;
3658
3659 tr.target.ptr = target_node->ptr;
3660 tr.cookie = target_node->cookie;
3661 t->saved_priority = task_nice(current);
3662 if (t->priority < target_node->min_priority &&
3663 !(t->flags & TF_ONE_WAY))
3664 binder_set_nice(t->priority);
3665 else if (!(t->flags & TF_ONE_WAY) ||
3666 t->saved_priority > target_node->min_priority)
3667 binder_set_nice(target_node->min_priority);
3668 cmd = BR_TRANSACTION;
3669 } else {
3670 tr.target.ptr = 0;
3671 tr.cookie = 0;
3672 cmd = BR_REPLY;
3673 }
3674 tr.code = t->code;
3675 tr.flags = t->flags;
3676 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
3677
3678 t_from = binder_get_txn_from(t);
3679 if (t_from) {
3680 struct task_struct *sender = t_from->proc->tsk;
3681
3682 tr.sender_pid = task_tgid_nr_ns(sender,
3683 task_active_pid_ns(current));
3684 } else {
3685 tr.sender_pid = 0;
3686 }
3687
3688 tr.data_size = t->buffer->data_size;
3689 tr.offsets_size = t->buffer->offsets_size;
3690 tr.data.ptr.buffer = (binder_uintptr_t)
3691 ((uintptr_t)t->buffer->data +
3692 binder_alloc_get_user_buffer_offset(&proc->alloc));
3693 tr.data.ptr.offsets = tr.data.ptr.buffer +
3694 ALIGN(t->buffer->data_size,
3695 sizeof(void *));
3696
3697 if (put_user(cmd, (uint32_t __user *)ptr)) {
3698 if (t_from)
3699 binder_thread_dec_tmpref(t_from);
3700 return -EFAULT;
3701 }
3702 ptr += sizeof(uint32_t);
3703 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3704 if (t_from)
3705 binder_thread_dec_tmpref(t_from);
3706 return -EFAULT;
3707 }
3708 ptr += sizeof(tr);
3709
3710 trace_binder_transaction_received(t);
3711 binder_stat_br(proc, thread, cmd);
3712 binder_debug(BINDER_DEBUG_TRANSACTION,
3713 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
3714 proc->pid, thread->pid,
3715 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3716 "BR_REPLY",
3717 t->debug_id, t_from ? t_from->proc->pid : 0,
3718 t_from ? t_from->pid : 0, cmd,
3719 t->buffer->data_size, t->buffer->offsets_size,
3720 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
3721
3722 if (t_from)
3723 binder_thread_dec_tmpref(t_from);
3724 t->buffer->allow_user_free = 1;
3725 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
3726 t->to_parent = thread->transaction_stack;
3727 t->to_thread = thread;
3728 thread->transaction_stack = t;
3729 } else {
3730 binder_free_transaction(t);
3731 }
3732 break;
3733 }
3734
3735 done:
3736
3737 *consumed = ptr - buffer;
3738 if (proc->requested_threads + proc->ready_threads == 0 &&
3739 proc->requested_threads_started < proc->max_threads &&
3740 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3741 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3742 /*spawn a new thread if we leave this out */) {
3743 proc->requested_threads++;
3744 binder_debug(BINDER_DEBUG_THREADS,
3745 "%d:%d BR_SPAWN_LOOPER\n",
3746 proc->pid, thread->pid);
3747 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3748 return -EFAULT;
3749 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
3750 }
3751 return 0;
3752 }
3753
3754 static void binder_release_work(struct binder_proc *proc,
3755 struct list_head *list)
3756 {
3757 struct binder_work *w;
3758
3759 while (1) {
3760 w = binder_dequeue_work_head(proc, list);
3761 if (!w)
3762 return;
3763
3764 switch (w->type) {
3765 case BINDER_WORK_TRANSACTION: {
3766 struct binder_transaction *t;
3767
3768 t = container_of(w, struct binder_transaction, work);
3769 if (t->buffer->target_node &&
3770 !(t->flags & TF_ONE_WAY)) {
3771 binder_send_failed_reply(t, BR_DEAD_REPLY);
3772 } else {
3773 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3774 "undelivered transaction %d\n",
3775 t->debug_id);
3776 binder_free_transaction(t);
3777 }
3778 } break;
3779 case BINDER_WORK_RETURN_ERROR: {
3780 struct binder_error *e = container_of(
3781 w, struct binder_error, work);
3782
3783 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3784 "undelivered TRANSACTION_ERROR: %u\n",
3785 e->cmd);
3786 } break;
3787 case BINDER_WORK_TRANSACTION_COMPLETE: {
3788 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3789 "undelivered TRANSACTION_COMPLETE\n");
3790 kfree(w);
3791 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3792 } break;
3793 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3794 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3795 struct binder_ref_death *death;
3796
3797 death = container_of(w, struct binder_ref_death, work);
3798 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3799 "undelivered death notification, %016llx\n",
3800 (u64)death->cookie);
3801 kfree(death);
3802 binder_stats_deleted(BINDER_STAT_DEATH);
3803 } break;
3804 default:
3805 pr_err("unexpected work type, %d, not freed\n",
3806 w->type);
3807 break;
3808 }
3809 }
3810
3811 }
3812
3813 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3814 {
3815 struct binder_thread *thread = NULL;
3816 struct rb_node *parent = NULL;
3817 struct rb_node **p = &proc->threads.rb_node;
3818
3819 while (*p) {
3820 parent = *p;
3821 thread = rb_entry(parent, struct binder_thread, rb_node);
3822
3823 if (current->pid < thread->pid)
3824 p = &(*p)->rb_left;
3825 else if (current->pid > thread->pid)
3826 p = &(*p)->rb_right;
3827 else
3828 break;
3829 }
3830 if (*p == NULL) {
3831 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3832 if (thread == NULL)
3833 return NULL;
3834 binder_stats_created(BINDER_STAT_THREAD);
3835 thread->proc = proc;
3836 thread->pid = current->pid;
3837 atomic_set(&thread->tmp_ref, 0);
3838 init_waitqueue_head(&thread->wait);
3839 INIT_LIST_HEAD(&thread->todo);
3840 rb_link_node(&thread->rb_node, parent, p);
3841 rb_insert_color(&thread->rb_node, &proc->threads);
3842 thread->looper_need_return = true;
3843 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3844 thread->return_error.cmd = BR_OK;
3845 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3846 thread->reply_error.cmd = BR_OK;
3847 }
3848 return thread;
3849 }
3850
3851 static void binder_free_proc(struct binder_proc *proc)
3852 {
3853 BUG_ON(!list_empty(&proc->todo));
3854 BUG_ON(!list_empty(&proc->delivered_death));
3855 binder_alloc_deferred_release(&proc->alloc);
3856 put_task_struct(proc->tsk);
3857 binder_stats_deleted(BINDER_STAT_PROC);
3858 kfree(proc);
3859 }
3860
3861 static void binder_free_thread(struct binder_thread *thread)
3862 {
3863 BUG_ON(!list_empty(&thread->todo));
3864 binder_stats_deleted(BINDER_STAT_THREAD);
3865 binder_proc_dec_tmpref(thread->proc);
3866 kfree(thread);
3867 }
3868
3869 static int binder_thread_release(struct binder_proc *proc,
3870 struct binder_thread *thread)
3871 {
3872 struct binder_transaction *t;
3873 struct binder_transaction *send_reply = NULL;
3874 int active_transactions = 0;
3875 struct binder_transaction *last_t = NULL;
3876
3877 /*
3878 * take a ref on the proc so it survives
3879 * after we remove this thread from proc->threads.
3880 * The corresponding dec is when we actually
3881 * free the thread in binder_free_thread()
3882 */
3883 proc->tmp_ref++;
3884 /*
3885 * take a ref on this thread to ensure it
3886 * survives while we are releasing it
3887 */
3888 atomic_inc(&thread->tmp_ref);
3889 rb_erase(&thread->rb_node, &proc->threads);
3890 t = thread->transaction_stack;
3891 if (t) {
3892 spin_lock(&t->lock);
3893 if (t->to_thread == thread)
3894 send_reply = t;
3895 }
3896 thread->is_dead = true;
3897
3898 while (t) {
3899 last_t = t;
3900 active_transactions++;
3901 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3902 "release %d:%d transaction %d %s, still active\n",
3903 proc->pid, thread->pid,
3904 t->debug_id,
3905 (t->to_thread == thread) ? "in" : "out");
3906
3907 if (t->to_thread == thread) {
3908 t->to_proc = NULL;
3909 t->to_thread = NULL;
3910 if (t->buffer) {
3911 t->buffer->transaction = NULL;
3912 t->buffer = NULL;
3913 }
3914 t = t->to_parent;
3915 } else if (t->from == thread) {
3916 t->from = NULL;
3917 t = t->from_parent;
3918 } else
3919 BUG();
3920 spin_unlock(&last_t->lock);
3921 if (t)
3922 spin_lock(&t->lock);
3923 }
3924
3925 if (send_reply)
3926 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3927 binder_release_work(proc, &thread->todo);
3928 binder_thread_dec_tmpref(thread);
3929 return active_transactions;
3930 }
3931
3932 static unsigned int binder_poll(struct file *filp,
3933 struct poll_table_struct *wait)
3934 {
3935 struct binder_proc *proc = filp->private_data;
3936 struct binder_thread *thread = NULL;
3937 int wait_for_proc_work;
3938
3939 binder_lock(__func__);
3940
3941 thread = binder_get_thread(proc);
3942
3943 wait_for_proc_work = thread->transaction_stack == NULL &&
3944 binder_worklist_empty(proc, &thread->todo);
3945
3946 binder_unlock(__func__);
3947
3948 if (wait_for_proc_work) {
3949 if (binder_has_proc_work(proc, thread))
3950 return POLLIN;
3951 poll_wait(filp, &proc->wait, wait);
3952 if (binder_has_proc_work(proc, thread))
3953 return POLLIN;
3954 } else {
3955 if (binder_has_thread_work(thread))
3956 return POLLIN;
3957 poll_wait(filp, &thread->wait, wait);
3958 if (binder_has_thread_work(thread))
3959 return POLLIN;
3960 }
3961 return 0;
3962 }
3963
3964 static int binder_ioctl_write_read(struct file *filp,
3965 unsigned int cmd, unsigned long arg,
3966 struct binder_thread *thread)
3967 {
3968 int ret = 0;
3969 struct binder_proc *proc = filp->private_data;
3970 unsigned int size = _IOC_SIZE(cmd);
3971 void __user *ubuf = (void __user *)arg;
3972 struct binder_write_read bwr;
3973
3974 if (size != sizeof(struct binder_write_read)) {
3975 ret = -EINVAL;
3976 goto out;
3977 }
3978 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3979 ret = -EFAULT;
3980 goto out;
3981 }
3982 binder_debug(BINDER_DEBUG_READ_WRITE,
3983 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3984 proc->pid, thread->pid,
3985 (u64)bwr.write_size, (u64)bwr.write_buffer,
3986 (u64)bwr.read_size, (u64)bwr.read_buffer);
3987
3988 if (bwr.write_size > 0) {
3989 ret = binder_thread_write(proc, thread,
3990 bwr.write_buffer,
3991 bwr.write_size,
3992 &bwr.write_consumed);
3993 trace_binder_write_done(ret);
3994 if (ret < 0) {
3995 bwr.read_consumed = 0;
3996 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3997 ret = -EFAULT;
3998 goto out;
3999 }
4000 }
4001 if (bwr.read_size > 0) {
4002 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4003 bwr.read_size,
4004 &bwr.read_consumed,
4005 filp->f_flags & O_NONBLOCK);
4006 trace_binder_read_done(ret);
4007 if (!binder_worklist_empty(proc, &proc->todo))
4008 wake_up_interruptible(&proc->wait);
4009 if (ret < 0) {
4010 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4011 ret = -EFAULT;
4012 goto out;
4013 }
4014 }
4015 binder_debug(BINDER_DEBUG_READ_WRITE,
4016 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4017 proc->pid, thread->pid,
4018 (u64)bwr.write_consumed, (u64)bwr.write_size,
4019 (u64)bwr.read_consumed, (u64)bwr.read_size);
4020 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4021 ret = -EFAULT;
4022 goto out;
4023 }
4024 out:
4025 return ret;
4026 }
4027
4028 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4029 {
4030 int ret = 0;
4031 struct binder_proc *proc = filp->private_data;
4032 struct binder_context *context = proc->context;
4033 struct binder_node *new_node;
4034 kuid_t curr_euid = current_euid();
4035
4036 mutex_lock(&context->context_mgr_node_lock);
4037 if (context->binder_context_mgr_node) {
4038 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4039 ret = -EBUSY;
4040 goto out;
4041 }
4042 ret = security_binder_set_context_mgr(proc->tsk);
4043 if (ret < 0)
4044 goto out;
4045 if (uid_valid(context->binder_context_mgr_uid)) {
4046 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4047 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4048 from_kuid(&init_user_ns, curr_euid),
4049 from_kuid(&init_user_ns,
4050 context->binder_context_mgr_uid));
4051 ret = -EPERM;
4052 goto out;
4053 }
4054 } else {
4055 context->binder_context_mgr_uid = curr_euid;
4056 }
4057 new_node = binder_new_node(proc, NULL);
4058 if (!new_node) {
4059 ret = -ENOMEM;
4060 goto out;
4061 }
4062 binder_node_lock(new_node);
4063 new_node->local_weak_refs++;
4064 new_node->local_strong_refs++;
4065 new_node->has_strong_ref = 1;
4066 new_node->has_weak_ref = 1;
4067 context->binder_context_mgr_node = new_node;
4068 binder_node_unlock(new_node);
4069 binder_put_node(new_node);
4070 out:
4071 mutex_unlock(&context->context_mgr_node_lock);
4072 return ret;
4073 }
4074
4075 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4076 {
4077 int ret;
4078 struct binder_proc *proc = filp->private_data;
4079 struct binder_thread *thread;
4080 unsigned int size = _IOC_SIZE(cmd);
4081 void __user *ubuf = (void __user *)arg;
4082
4083 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4084 proc->pid, current->pid, cmd, arg);*/
4085
4086 trace_binder_ioctl(cmd, arg);
4087
4088 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4089 if (ret)
4090 goto err_unlocked;
4091
4092 binder_lock(__func__);
4093 thread = binder_get_thread(proc);
4094 if (thread == NULL) {
4095 ret = -ENOMEM;
4096 goto err;
4097 }
4098
4099 switch (cmd) {
4100 case BINDER_WRITE_READ:
4101 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4102 if (ret)
4103 goto err;
4104 break;
4105 case BINDER_SET_MAX_THREADS:
4106 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
4107 ret = -EINVAL;
4108 goto err;
4109 }
4110 break;
4111 case BINDER_SET_CONTEXT_MGR:
4112 ret = binder_ioctl_set_ctx_mgr(filp);
4113 if (ret)
4114 goto err;
4115 break;
4116 case BINDER_THREAD_EXIT:
4117 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4118 proc->pid, thread->pid);
4119 binder_thread_release(proc, thread);
4120 thread = NULL;
4121 break;
4122 case BINDER_VERSION: {
4123 struct binder_version __user *ver = ubuf;
4124
4125 if (size != sizeof(struct binder_version)) {
4126 ret = -EINVAL;
4127 goto err;
4128 }
4129 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4130 &ver->protocol_version)) {
4131 ret = -EINVAL;
4132 goto err;
4133 }
4134 break;
4135 }
4136 default:
4137 ret = -EINVAL;
4138 goto err;
4139 }
4140 ret = 0;
4141 err:
4142 if (thread)
4143 thread->looper_need_return = false;
4144 binder_unlock(__func__);
4145 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4146 if (ret && ret != -ERESTARTSYS)
4147 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4148 err_unlocked:
4149 trace_binder_ioctl_done(ret);
4150 return ret;
4151 }
4152
4153 static void binder_vma_open(struct vm_area_struct *vma)
4154 {
4155 struct binder_proc *proc = vma->vm_private_data;
4156
4157 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4158 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4159 proc->pid, vma->vm_start, vma->vm_end,
4160 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4161 (unsigned long)pgprot_val(vma->vm_page_prot));
4162 }
4163
4164 static void binder_vma_close(struct vm_area_struct *vma)
4165 {
4166 struct binder_proc *proc = vma->vm_private_data;
4167
4168 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4169 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4170 proc->pid, vma->vm_start, vma->vm_end,
4171 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4172 (unsigned long)pgprot_val(vma->vm_page_prot));
4173 binder_alloc_vma_close(&proc->alloc);
4174 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4175 }
4176
4177 static int binder_vm_fault(struct vm_fault *vmf)
4178 {
4179 return VM_FAULT_SIGBUS;
4180 }
4181
4182 static const struct vm_operations_struct binder_vm_ops = {
4183 .open = binder_vma_open,
4184 .close = binder_vma_close,
4185 .fault = binder_vm_fault,
4186 };
4187
4188 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4189 {
4190 int ret;
4191 struct binder_proc *proc = filp->private_data;
4192 const char *failure_string;
4193
4194 if (proc->tsk != current->group_leader)
4195 return -EINVAL;
4196
4197 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4198 vma->vm_end = vma->vm_start + SZ_4M;
4199
4200 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4201 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4202 __func__, proc->pid, vma->vm_start, vma->vm_end,
4203 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4204 (unsigned long)pgprot_val(vma->vm_page_prot));
4205
4206 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4207 ret = -EPERM;
4208 failure_string = "bad vm_flags";
4209 goto err_bad_arg;
4210 }
4211 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4212 vma->vm_ops = &binder_vm_ops;
4213 vma->vm_private_data = proc;
4214
4215 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4216 if (ret)
4217 return ret;
4218 proc->files = get_files_struct(current);
4219 return 0;
4220
4221 err_bad_arg:
4222 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4223 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4224 return ret;
4225 }
4226
4227 static int binder_open(struct inode *nodp, struct file *filp)
4228 {
4229 struct binder_proc *proc;
4230 struct binder_device *binder_dev;
4231
4232 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4233 current->group_leader->pid, current->pid);
4234
4235 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4236 if (proc == NULL)
4237 return -ENOMEM;
4238 spin_lock_init(&proc->inner_lock);
4239 spin_lock_init(&proc->outer_lock);
4240 get_task_struct(current->group_leader);
4241 proc->tsk = current->group_leader;
4242 INIT_LIST_HEAD(&proc->todo);
4243 init_waitqueue_head(&proc->wait);
4244 proc->default_priority = task_nice(current);
4245 binder_dev = container_of(filp->private_data, struct binder_device,
4246 miscdev);
4247 proc->context = &binder_dev->context;
4248 binder_alloc_init(&proc->alloc);
4249
4250 binder_lock(__func__);
4251
4252 binder_stats_created(BINDER_STAT_PROC);
4253 proc->pid = current->group_leader->pid;
4254 INIT_LIST_HEAD(&proc->delivered_death);
4255 filp->private_data = proc;
4256
4257 binder_unlock(__func__);
4258
4259 mutex_lock(&binder_procs_lock);
4260 hlist_add_head(&proc->proc_node, &binder_procs);
4261 mutex_unlock(&binder_procs_lock);
4262
4263 if (binder_debugfs_dir_entry_proc) {
4264 char strbuf[11];
4265
4266 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4267 /*
4268 * proc debug entries are shared between contexts, so
4269 * this will fail if the process tries to open the driver
4270 * again with a different context. The priting code will
4271 * anyway print all contexts that a given PID has, so this
4272 * is not a problem.
4273 */
4274 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4275 binder_debugfs_dir_entry_proc,
4276 (void *)(unsigned long)proc->pid,
4277 &binder_proc_fops);
4278 }
4279
4280 return 0;
4281 }
4282
4283 static int binder_flush(struct file *filp, fl_owner_t id)
4284 {
4285 struct binder_proc *proc = filp->private_data;
4286
4287 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4288
4289 return 0;
4290 }
4291
4292 static void binder_deferred_flush(struct binder_proc *proc)
4293 {
4294 struct rb_node *n;
4295 int wake_count = 0;
4296
4297 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4298 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4299
4300 thread->looper_need_return = true;
4301 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4302 wake_up_interruptible(&thread->wait);
4303 wake_count++;
4304 }
4305 }
4306 wake_up_interruptible_all(&proc->wait);
4307
4308 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4309 "binder_flush: %d woke %d threads\n", proc->pid,
4310 wake_count);
4311 }
4312
4313 static int binder_release(struct inode *nodp, struct file *filp)
4314 {
4315 struct binder_proc *proc = filp->private_data;
4316
4317 debugfs_remove(proc->debugfs_entry);
4318 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4319
4320 return 0;
4321 }
4322
4323 static int binder_node_release(struct binder_node *node, int refs)
4324 {
4325 struct binder_ref *ref;
4326 int death = 0;
4327 struct binder_proc *proc = node->proc;
4328
4329 binder_release_work(proc, &node->async_todo);
4330
4331 binder_node_lock(node);
4332 binder_inner_proc_lock(proc);
4333 binder_dequeue_work_ilocked(&node->work);
4334 /*
4335 * The caller must have taken a temporary ref on the node,
4336 */
4337 BUG_ON(!node->tmp_refs);
4338 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4339 binder_inner_proc_unlock(proc);
4340 binder_node_unlock(node);
4341 binder_free_node(node);
4342
4343 return refs;
4344 }
4345
4346 node->proc = NULL;
4347 node->local_strong_refs = 0;
4348 node->local_weak_refs = 0;
4349 binder_inner_proc_unlock(proc);
4350
4351 spin_lock(&binder_dead_nodes_lock);
4352 hlist_add_head(&node->dead_node, &binder_dead_nodes);
4353 spin_unlock(&binder_dead_nodes_lock);
4354
4355 hlist_for_each_entry(ref, &node->refs, node_entry) {
4356 refs++;
4357
4358 if (!ref->death)
4359 continue;
4360
4361 death++;
4362
4363 binder_inner_proc_lock(ref->proc);
4364 if (list_empty(&ref->death->work.entry)) {
4365 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4366 binder_enqueue_work_ilocked(&ref->death->work,
4367 &ref->proc->todo);
4368 wake_up_interruptible(&ref->proc->wait);
4369 } else
4370 BUG();
4371 binder_inner_proc_unlock(ref->proc);
4372 }
4373
4374 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4375 "node %d now dead, refs %d, death %d\n",
4376 node->debug_id, refs, death);
4377 binder_node_unlock(node);
4378 binder_put_node(node);
4379
4380 return refs;
4381 }
4382
4383 static void binder_deferred_release(struct binder_proc *proc)
4384 {
4385 struct binder_context *context = proc->context;
4386 struct rb_node *n;
4387 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4388
4389 BUG_ON(proc->files);
4390
4391 mutex_lock(&binder_procs_lock);
4392 hlist_del(&proc->proc_node);
4393 mutex_unlock(&binder_procs_lock);
4394
4395 mutex_lock(&context->context_mgr_node_lock);
4396 if (context->binder_context_mgr_node &&
4397 context->binder_context_mgr_node->proc == proc) {
4398 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4399 "%s: %d context_mgr_node gone\n",
4400 __func__, proc->pid);
4401 context->binder_context_mgr_node = NULL;
4402 }
4403 mutex_unlock(&context->context_mgr_node_lock);
4404 /*
4405 * Make sure proc stays alive after we
4406 * remove all the threads
4407 */
4408 proc->tmp_ref++;
4409
4410 proc->is_dead = true;
4411 threads = 0;
4412 active_transactions = 0;
4413 while ((n = rb_first(&proc->threads))) {
4414 struct binder_thread *thread;
4415
4416 thread = rb_entry(n, struct binder_thread, rb_node);
4417 threads++;
4418 active_transactions += binder_thread_release(proc, thread);
4419 }
4420
4421 nodes = 0;
4422 incoming_refs = 0;
4423 while ((n = rb_first(&proc->nodes))) {
4424 struct binder_node *node;
4425
4426 node = rb_entry(n, struct binder_node, rb_node);
4427 nodes++;
4428 /*
4429 * take a temporary ref on the node before
4430 * calling binder_node_release() which will either
4431 * kfree() the node or call binder_put_node()
4432 */
4433 binder_inc_node_tmpref(node);
4434 rb_erase(&node->rb_node, &proc->nodes);
4435 incoming_refs = binder_node_release(node, incoming_refs);
4436 }
4437
4438 outgoing_refs = 0;
4439 while ((n = rb_first(&proc->refs_by_desc))) {
4440 struct binder_ref *ref;
4441
4442 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4443 outgoing_refs++;
4444 binder_cleanup_ref(ref);
4445 binder_free_ref(ref);
4446 }
4447
4448 binder_release_work(proc, &proc->todo);
4449 binder_release_work(proc, &proc->delivered_death);
4450
4451 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4452 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4453 __func__, proc->pid, threads, nodes, incoming_refs,
4454 outgoing_refs, active_transactions);
4455
4456 binder_proc_dec_tmpref(proc);
4457 }
4458
4459 static void binder_deferred_func(struct work_struct *work)
4460 {
4461 struct binder_proc *proc;
4462 struct files_struct *files;
4463
4464 int defer;
4465
4466 do {
4467 binder_lock(__func__);
4468 mutex_lock(&binder_deferred_lock);
4469 if (!hlist_empty(&binder_deferred_list)) {
4470 proc = hlist_entry(binder_deferred_list.first,
4471 struct binder_proc, deferred_work_node);
4472 hlist_del_init(&proc->deferred_work_node);
4473 defer = proc->deferred_work;
4474 proc->deferred_work = 0;
4475 } else {
4476 proc = NULL;
4477 defer = 0;
4478 }
4479 mutex_unlock(&binder_deferred_lock);
4480
4481 files = NULL;
4482 if (defer & BINDER_DEFERRED_PUT_FILES) {
4483 files = proc->files;
4484 if (files)
4485 proc->files = NULL;
4486 }
4487
4488 if (defer & BINDER_DEFERRED_FLUSH)
4489 binder_deferred_flush(proc);
4490
4491 if (defer & BINDER_DEFERRED_RELEASE)
4492 binder_deferred_release(proc); /* frees proc */
4493
4494 binder_unlock(__func__);
4495 if (files)
4496 put_files_struct(files);
4497 } while (proc);
4498 }
4499 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4500
4501 static void
4502 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4503 {
4504 mutex_lock(&binder_deferred_lock);
4505 proc->deferred_work |= defer;
4506 if (hlist_unhashed(&proc->deferred_work_node)) {
4507 hlist_add_head(&proc->deferred_work_node,
4508 &binder_deferred_list);
4509 schedule_work(&binder_deferred_work);
4510 }
4511 mutex_unlock(&binder_deferred_lock);
4512 }
4513
4514 static void print_binder_transaction(struct seq_file *m, const char *prefix,
4515 struct binder_transaction *t)
4516 {
4517 spin_lock(&t->lock);
4518 seq_printf(m,
4519 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4520 prefix, t->debug_id, t,
4521 t->from ? t->from->proc->pid : 0,
4522 t->from ? t->from->pid : 0,
4523 t->to_proc ? t->to_proc->pid : 0,
4524 t->to_thread ? t->to_thread->pid : 0,
4525 t->code, t->flags, t->priority, t->need_reply);
4526 spin_unlock(&t->lock);
4527
4528 if (t->buffer == NULL) {
4529 seq_puts(m, " buffer free\n");
4530 return;
4531 }
4532 if (t->buffer->target_node)
4533 seq_printf(m, " node %d",
4534 t->buffer->target_node->debug_id);
4535 seq_printf(m, " size %zd:%zd data %p\n",
4536 t->buffer->data_size, t->buffer->offsets_size,
4537 t->buffer->data);
4538 }
4539
4540 static void print_binder_work_ilocked(struct seq_file *m, const char *prefix,
4541 const char *transaction_prefix,
4542 struct binder_work *w)
4543 {
4544 struct binder_node *node;
4545 struct binder_transaction *t;
4546
4547 switch (w->type) {
4548 case BINDER_WORK_TRANSACTION:
4549 t = container_of(w, struct binder_transaction, work);
4550 print_binder_transaction(m, transaction_prefix, t);
4551 break;
4552 case BINDER_WORK_RETURN_ERROR: {
4553 struct binder_error *e = container_of(
4554 w, struct binder_error, work);
4555
4556 seq_printf(m, "%stransaction error: %u\n",
4557 prefix, e->cmd);
4558 } break;
4559 case BINDER_WORK_TRANSACTION_COMPLETE:
4560 seq_printf(m, "%stransaction complete\n", prefix);
4561 break;
4562 case BINDER_WORK_NODE:
4563 node = container_of(w, struct binder_node, work);
4564 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4565 prefix, node->debug_id,
4566 (u64)node->ptr, (u64)node->cookie);
4567 break;
4568 case BINDER_WORK_DEAD_BINDER:
4569 seq_printf(m, "%shas dead binder\n", prefix);
4570 break;
4571 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4572 seq_printf(m, "%shas cleared dead binder\n", prefix);
4573 break;
4574 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
4575 seq_printf(m, "%shas cleared death notification\n", prefix);
4576 break;
4577 default:
4578 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
4579 break;
4580 }
4581 }
4582
4583 static void print_binder_thread_ilocked(struct seq_file *m,
4584 struct binder_thread *thread,
4585 int print_always)
4586 {
4587 struct binder_transaction *t;
4588 struct binder_work *w;
4589 size_t start_pos = m->count;
4590 size_t header_pos;
4591
4592 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
4593 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
4594 thread->pid, thread->looper,
4595 thread->looper_need_return,
4596 atomic_read(&thread->tmp_ref));
4597 header_pos = m->count;
4598 t = thread->transaction_stack;
4599 while (t) {
4600 if (t->from == thread) {
4601 print_binder_transaction(m,
4602 " outgoing transaction", t);
4603 t = t->from_parent;
4604 } else if (t->to_thread == thread) {
4605 print_binder_transaction(m,
4606 " incoming transaction", t);
4607 t = t->to_parent;
4608 } else {
4609 print_binder_transaction(m, " bad transaction", t);
4610 t = NULL;
4611 }
4612 }
4613 list_for_each_entry(w, &thread->todo, entry) {
4614 print_binder_work_ilocked(m, " ",
4615 " pending transaction", w);
4616 }
4617 if (!print_always && m->count == header_pos)
4618 m->count = start_pos;
4619 }
4620
4621 static void print_binder_node_nlocked(struct seq_file *m,
4622 struct binder_node *node)
4623 {
4624 struct binder_ref *ref;
4625 struct binder_work *w;
4626 int count;
4627
4628 WARN_ON(!spin_is_locked(&node->lock));
4629
4630 count = 0;
4631 hlist_for_each_entry(ref, &node->refs, node_entry)
4632 count++;
4633
4634 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
4635 node->debug_id, (u64)node->ptr, (u64)node->cookie,
4636 node->has_strong_ref, node->has_weak_ref,
4637 node->local_strong_refs, node->local_weak_refs,
4638 node->internal_strong_refs, count, node->tmp_refs);
4639 if (count) {
4640 seq_puts(m, " proc");
4641 hlist_for_each_entry(ref, &node->refs, node_entry)
4642 seq_printf(m, " %d", ref->proc->pid);
4643 }
4644 seq_puts(m, "\n");
4645 if (node->proc) {
4646 binder_inner_proc_lock(node->proc);
4647 list_for_each_entry(w, &node->async_todo, entry)
4648 print_binder_work_ilocked(m, " ",
4649 " pending async transaction", w);
4650 binder_inner_proc_unlock(node->proc);
4651 }
4652 }
4653
4654 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
4655 {
4656 binder_node_lock(ref->node);
4657 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4658 ref->data.debug_id, ref->data.desc,
4659 ref->node->proc ? "" : "dead ",
4660 ref->node->debug_id, ref->data.strong,
4661 ref->data.weak, ref->death);
4662 binder_node_unlock(ref->node);
4663 }
4664
4665 static void print_binder_proc(struct seq_file *m,
4666 struct binder_proc *proc, int print_all)
4667 {
4668 struct binder_work *w;
4669 struct rb_node *n;
4670 size_t start_pos = m->count;
4671 size_t header_pos;
4672
4673 seq_printf(m, "proc %d\n", proc->pid);
4674 seq_printf(m, "context %s\n", proc->context->name);
4675 header_pos = m->count;
4676
4677 binder_inner_proc_lock(proc);
4678 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4679 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
4680 rb_node), print_all);
4681 binder_inner_proc_unlock(proc);
4682 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4683 struct binder_node *node = rb_entry(n, struct binder_node,
4684 rb_node);
4685 binder_node_lock(node);
4686 if (print_all || node->has_async_transaction)
4687 print_binder_node_nlocked(m, node);
4688 binder_node_unlock(node);
4689 }
4690 if (print_all) {
4691 for (n = rb_first(&proc->refs_by_desc);
4692 n != NULL;
4693 n = rb_next(n))
4694 print_binder_ref(m, rb_entry(n, struct binder_ref,
4695 rb_node_desc));
4696 }
4697 binder_alloc_print_allocated(m, &proc->alloc);
4698 binder_inner_proc_lock(proc);
4699 list_for_each_entry(w, &proc->todo, entry)
4700 print_binder_work_ilocked(m, " ", " pending transaction", w);
4701 list_for_each_entry(w, &proc->delivered_death, entry) {
4702 seq_puts(m, " has delivered dead binder\n");
4703 break;
4704 }
4705 binder_inner_proc_unlock(proc);
4706 if (!print_all && m->count == header_pos)
4707 m->count = start_pos;
4708 }
4709
4710 static const char * const binder_return_strings[] = {
4711 "BR_ERROR",
4712 "BR_OK",
4713 "BR_TRANSACTION",
4714 "BR_REPLY",
4715 "BR_ACQUIRE_RESULT",
4716 "BR_DEAD_REPLY",
4717 "BR_TRANSACTION_COMPLETE",
4718 "BR_INCREFS",
4719 "BR_ACQUIRE",
4720 "BR_RELEASE",
4721 "BR_DECREFS",
4722 "BR_ATTEMPT_ACQUIRE",
4723 "BR_NOOP",
4724 "BR_SPAWN_LOOPER",
4725 "BR_FINISHED",
4726 "BR_DEAD_BINDER",
4727 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4728 "BR_FAILED_REPLY"
4729 };
4730
4731 static const char * const binder_command_strings[] = {
4732 "BC_TRANSACTION",
4733 "BC_REPLY",
4734 "BC_ACQUIRE_RESULT",
4735 "BC_FREE_BUFFER",
4736 "BC_INCREFS",
4737 "BC_ACQUIRE",
4738 "BC_RELEASE",
4739 "BC_DECREFS",
4740 "BC_INCREFS_DONE",
4741 "BC_ACQUIRE_DONE",
4742 "BC_ATTEMPT_ACQUIRE",
4743 "BC_REGISTER_LOOPER",
4744 "BC_ENTER_LOOPER",
4745 "BC_EXIT_LOOPER",
4746 "BC_REQUEST_DEATH_NOTIFICATION",
4747 "BC_CLEAR_DEATH_NOTIFICATION",
4748 "BC_DEAD_BINDER_DONE",
4749 "BC_TRANSACTION_SG",
4750 "BC_REPLY_SG",
4751 };
4752
4753 static const char * const binder_objstat_strings[] = {
4754 "proc",
4755 "thread",
4756 "node",
4757 "ref",
4758 "death",
4759 "transaction",
4760 "transaction_complete"
4761 };
4762
4763 static void print_binder_stats(struct seq_file *m, const char *prefix,
4764 struct binder_stats *stats)
4765 {
4766 int i;
4767
4768 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
4769 ARRAY_SIZE(binder_command_strings));
4770 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
4771 int temp = atomic_read(&stats->bc[i]);
4772
4773 if (temp)
4774 seq_printf(m, "%s%s: %d\n", prefix,
4775 binder_command_strings[i], temp);
4776 }
4777
4778 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
4779 ARRAY_SIZE(binder_return_strings));
4780 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
4781 int temp = atomic_read(&stats->br[i]);
4782
4783 if (temp)
4784 seq_printf(m, "%s%s: %d\n", prefix,
4785 binder_return_strings[i], temp);
4786 }
4787
4788 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4789 ARRAY_SIZE(binder_objstat_strings));
4790 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4791 ARRAY_SIZE(stats->obj_deleted));
4792 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
4793 int created = atomic_read(&stats->obj_created[i]);
4794 int deleted = atomic_read(&stats->obj_deleted[i]);
4795
4796 if (created || deleted)
4797 seq_printf(m, "%s%s: active %d total %d\n",
4798 prefix,
4799 binder_objstat_strings[i],
4800 created - deleted,
4801 created);
4802 }
4803 }
4804
4805 static void print_binder_proc_stats(struct seq_file *m,
4806 struct binder_proc *proc)
4807 {
4808 struct binder_work *w;
4809 struct rb_node *n;
4810 int count, strong, weak;
4811
4812 seq_printf(m, "proc %d\n", proc->pid);
4813 seq_printf(m, "context %s\n", proc->context->name);
4814 count = 0;
4815 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4816 count++;
4817 seq_printf(m, " threads: %d\n", count);
4818 seq_printf(m, " requested threads: %d+%d/%d\n"
4819 " ready threads %d\n"
4820 " free async space %zd\n", proc->requested_threads,
4821 proc->requested_threads_started, proc->max_threads,
4822 proc->ready_threads,
4823 binder_alloc_get_free_async_space(&proc->alloc));
4824 count = 0;
4825 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4826 count++;
4827 seq_printf(m, " nodes: %d\n", count);
4828 count = 0;
4829 strong = 0;
4830 weak = 0;
4831 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4832 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4833 rb_node_desc);
4834 count++;
4835 strong += ref->data.strong;
4836 weak += ref->data.weak;
4837 }
4838 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
4839
4840 count = binder_alloc_get_allocated_count(&proc->alloc);
4841 seq_printf(m, " buffers: %d\n", count);
4842
4843 count = 0;
4844 binder_inner_proc_lock(proc);
4845 list_for_each_entry(w, &proc->todo, entry) {
4846 if (w->type == BINDER_WORK_TRANSACTION)
4847 count++;
4848 }
4849 binder_inner_proc_unlock(proc);
4850 seq_printf(m, " pending transactions: %d\n", count);
4851
4852 print_binder_stats(m, " ", &proc->stats);
4853 }
4854
4855
4856 static int binder_state_show(struct seq_file *m, void *unused)
4857 {
4858 struct binder_proc *proc;
4859 struct binder_node *node;
4860 struct binder_node *last_node = NULL;
4861
4862 binder_lock(__func__);
4863
4864 seq_puts(m, "binder state:\n");
4865
4866 spin_lock(&binder_dead_nodes_lock);
4867 if (!hlist_empty(&binder_dead_nodes))
4868 seq_puts(m, "dead nodes:\n");
4869 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
4870 /*
4871 * take a temporary reference on the node so it
4872 * survives and isn't removed from the list
4873 * while we print it.
4874 */
4875 node->tmp_refs++;
4876 spin_unlock(&binder_dead_nodes_lock);
4877 if (last_node)
4878 binder_put_node(last_node);
4879 binder_node_lock(node);
4880 print_binder_node_nlocked(m, node);
4881 binder_node_unlock(node);
4882 last_node = node;
4883 spin_lock(&binder_dead_nodes_lock);
4884 }
4885 spin_unlock(&binder_dead_nodes_lock);
4886 if (last_node)
4887 binder_put_node(last_node);
4888
4889 mutex_lock(&binder_procs_lock);
4890 hlist_for_each_entry(proc, &binder_procs, proc_node)
4891 print_binder_proc(m, proc, 1);
4892 mutex_unlock(&binder_procs_lock);
4893 binder_unlock(__func__);
4894 return 0;
4895 }
4896
4897 static int binder_stats_show(struct seq_file *m, void *unused)
4898 {
4899 struct binder_proc *proc;
4900
4901 binder_lock(__func__);
4902
4903 seq_puts(m, "binder stats:\n");
4904
4905 print_binder_stats(m, "", &binder_stats);
4906
4907 mutex_lock(&binder_procs_lock);
4908 hlist_for_each_entry(proc, &binder_procs, proc_node)
4909 print_binder_proc_stats(m, proc);
4910 mutex_unlock(&binder_procs_lock);
4911 binder_unlock(__func__);
4912 return 0;
4913 }
4914
4915 static int binder_transactions_show(struct seq_file *m, void *unused)
4916 {
4917 struct binder_proc *proc;
4918
4919 binder_lock(__func__);
4920
4921 seq_puts(m, "binder transactions:\n");
4922 mutex_lock(&binder_procs_lock);
4923 hlist_for_each_entry(proc, &binder_procs, proc_node)
4924 print_binder_proc(m, proc, 0);
4925 mutex_unlock(&binder_procs_lock);
4926 binder_unlock(__func__);
4927 return 0;
4928 }
4929
4930 static int binder_proc_show(struct seq_file *m, void *unused)
4931 {
4932 struct binder_proc *itr;
4933 int pid = (unsigned long)m->private;
4934
4935 binder_lock(__func__);
4936
4937 mutex_lock(&binder_procs_lock);
4938 hlist_for_each_entry(itr, &binder_procs, proc_node) {
4939 if (itr->pid == pid) {
4940 seq_puts(m, "binder proc state:\n");
4941 print_binder_proc(m, itr, 1);
4942 }
4943 }
4944 mutex_unlock(&binder_procs_lock);
4945
4946 binder_unlock(__func__);
4947 return 0;
4948 }
4949
4950 static void print_binder_transaction_log_entry(struct seq_file *m,
4951 struct binder_transaction_log_entry *e)
4952 {
4953 int debug_id = READ_ONCE(e->debug_id_done);
4954 /*
4955 * read barrier to guarantee debug_id_done read before
4956 * we print the log values
4957 */
4958 smp_rmb();
4959 seq_printf(m,
4960 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
4961 e->debug_id, (e->call_type == 2) ? "reply" :
4962 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
4963 e->from_thread, e->to_proc, e->to_thread, e->context_name,
4964 e->to_node, e->target_handle, e->data_size, e->offsets_size,
4965 e->return_error, e->return_error_param,
4966 e->return_error_line);
4967 /*
4968 * read-barrier to guarantee read of debug_id_done after
4969 * done printing the fields of the entry
4970 */
4971 smp_rmb();
4972 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
4973 "\n" : " (incomplete)\n");
4974 }
4975
4976 static int binder_transaction_log_show(struct seq_file *m, void *unused)
4977 {
4978 struct binder_transaction_log *log = m->private;
4979 unsigned int log_cur = atomic_read(&log->cur);
4980 unsigned int count;
4981 unsigned int cur;
4982 int i;
4983
4984 count = log_cur + 1;
4985 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
4986 0 : count % ARRAY_SIZE(log->entry);
4987 if (count > ARRAY_SIZE(log->entry) || log->full)
4988 count = ARRAY_SIZE(log->entry);
4989 for (i = 0; i < count; i++) {
4990 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
4991
4992 print_binder_transaction_log_entry(m, &log->entry[index]);
4993 }
4994 return 0;
4995 }
4996
4997 static const struct file_operations binder_fops = {
4998 .owner = THIS_MODULE,
4999 .poll = binder_poll,
5000 .unlocked_ioctl = binder_ioctl,
5001 .compat_ioctl = binder_ioctl,
5002 .mmap = binder_mmap,
5003 .open = binder_open,
5004 .flush = binder_flush,
5005 .release = binder_release,
5006 };
5007
5008 BINDER_DEBUG_ENTRY(state);
5009 BINDER_DEBUG_ENTRY(stats);
5010 BINDER_DEBUG_ENTRY(transactions);
5011 BINDER_DEBUG_ENTRY(transaction_log);
5012
5013 static int __init init_binder_device(const char *name)
5014 {
5015 int ret;
5016 struct binder_device *binder_device;
5017
5018 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5019 if (!binder_device)
5020 return -ENOMEM;
5021
5022 binder_device->miscdev.fops = &binder_fops;
5023 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5024 binder_device->miscdev.name = name;
5025
5026 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5027 binder_device->context.name = name;
5028 mutex_init(&binder_device->context.context_mgr_node_lock);
5029
5030 ret = misc_register(&binder_device->miscdev);
5031 if (ret < 0) {
5032 kfree(binder_device);
5033 return ret;
5034 }
5035
5036 hlist_add_head(&binder_device->hlist, &binder_devices);
5037
5038 return ret;
5039 }
5040
5041 static int __init binder_init(void)
5042 {
5043 int ret;
5044 char *device_name, *device_names;
5045 struct binder_device *device;
5046 struct hlist_node *tmp;
5047
5048 atomic_set(&binder_transaction_log.cur, ~0U);
5049 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5050
5051 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5052 if (binder_debugfs_dir_entry_root)
5053 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5054 binder_debugfs_dir_entry_root);
5055
5056 if (binder_debugfs_dir_entry_root) {
5057 debugfs_create_file("state",
5058 S_IRUGO,
5059 binder_debugfs_dir_entry_root,
5060 NULL,
5061 &binder_state_fops);
5062 debugfs_create_file("stats",
5063 S_IRUGO,
5064 binder_debugfs_dir_entry_root,
5065 NULL,
5066 &binder_stats_fops);
5067 debugfs_create_file("transactions",
5068 S_IRUGO,
5069 binder_debugfs_dir_entry_root,
5070 NULL,
5071 &binder_transactions_fops);
5072 debugfs_create_file("transaction_log",
5073 S_IRUGO,
5074 binder_debugfs_dir_entry_root,
5075 &binder_transaction_log,
5076 &binder_transaction_log_fops);
5077 debugfs_create_file("failed_transaction_log",
5078 S_IRUGO,
5079 binder_debugfs_dir_entry_root,
5080 &binder_transaction_log_failed,
5081 &binder_transaction_log_fops);
5082 }
5083
5084 /*
5085 * Copy the module_parameter string, because we don't want to
5086 * tokenize it in-place.
5087 */
5088 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5089 if (!device_names) {
5090 ret = -ENOMEM;
5091 goto err_alloc_device_names_failed;
5092 }
5093 strcpy(device_names, binder_devices_param);
5094
5095 while ((device_name = strsep(&device_names, ","))) {
5096 ret = init_binder_device(device_name);
5097 if (ret)
5098 goto err_init_binder_device_failed;
5099 }
5100
5101 return ret;
5102
5103 err_init_binder_device_failed:
5104 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5105 misc_deregister(&device->miscdev);
5106 hlist_del(&device->hlist);
5107 kfree(device);
5108 }
5109 err_alloc_device_names_failed:
5110 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5111
5112 return ret;
5113 }
5114
5115 device_initcall(binder_init);
5116
5117 #define CREATE_TRACE_POINTS
5118 #include "binder_trace.h"
5119
5120 MODULE_LICENSE("GPL v2");