]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/android/binder.c
ANDROID: binder: remove WARN() for redundant txn error
[mirror_ubuntu-jammy-kernel.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
9630fe88
TK
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
1b77e9dc
MC
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
9630fe88
TK
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
56b468fc
AS
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
355b0502
GKH
54#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
e2610b26 57#include <linux/freezer.h>
355b0502
GKH
58#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
355b0502
GKH
61#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
16b66554 65#include <linux/debugfs.h>
355b0502 66#include <linux/rbtree.h>
3f07c014 67#include <linux/sched/signal.h>
6e84f315 68#include <linux/sched/mm.h>
5249f488 69#include <linux/seq_file.h>
355b0502 70#include <linux/uaccess.h>
17cf22c3 71#include <linux/pid_namespace.h>
79af7307 72#include <linux/security.h>
9630fe88 73#include <linux/spinlock.h>
355b0502 74
9246a4a9
GKH
75#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76#define BINDER_IPC_32BIT 1
77#endif
78
79#include <uapi/linux/android/binder.h>
0c972a05 80#include "binder_alloc.h"
975a1ac9 81#include "binder_trace.h"
355b0502 82
c44b1231 83static HLIST_HEAD(binder_deferred_list);
355b0502
GKH
84static DEFINE_MUTEX(binder_deferred_lock);
85
ac4812c5 86static HLIST_HEAD(binder_devices);
355b0502 87static HLIST_HEAD(binder_procs);
c44b1231
TK
88static DEFINE_MUTEX(binder_procs_lock);
89
355b0502 90static HLIST_HEAD(binder_dead_nodes);
c44b1231 91static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 92
16b66554
AH
93static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
656a800a 95static atomic_t binder_last_id;
355b0502 96
5249f488
AH
97#define BINDER_DEBUG_ENTRY(name) \
98static int binder_##name##_open(struct inode *inode, struct file *file) \
99{ \
16b66554 100 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
101} \
102\
103static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
106 .read = seq_read, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
109}
110
111static int binder_proc_show(struct seq_file *m, void *unused);
112BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
113
114/* This is only defined in include/asm-arm/sizes.h */
115#ifndef SZ_1K
116#define SZ_1K 0x400
117#endif
118
119#ifndef SZ_4M
120#define SZ_4M 0x400000
121#endif
122
123#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
124
355b0502
GKH
125enum {
126 BINDER_DEBUG_USER_ERROR = 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
132 BINDER_DEBUG_READ_WRITE = 1U << 6,
133 BINDER_DEBUG_USER_REFS = 1U << 7,
134 BINDER_DEBUG_THREADS = 1U << 8,
135 BINDER_DEBUG_TRANSACTION = 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
19c98724 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
9630fe88 140 BINDER_DEBUG_SPINLOCKS = 1U << 14,
355b0502
GKH
141};
142static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
21d02ddf 144module_param_named(debug_mask, binder_debug_mask, uint, 0644);
355b0502 145
ac4812c5
MC
146static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147module_param_named(devices, binder_devices_param, charp, 0444);
148
355b0502
GKH
149static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150static int binder_stop_on_user_error;
151
152static int binder_set_stop_on_user_error(const char *val,
e4dca7b7 153 const struct kernel_param *kp)
355b0502
GKH
154{
155 int ret;
10f62861 156
355b0502
GKH
157 ret = param_set_int(val, kp);
158 if (binder_stop_on_user_error < 2)
159 wake_up(&binder_user_error_wait);
160 return ret;
161}
162module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
21d02ddf 163 param_get_int, &binder_stop_on_user_error, 0644);
355b0502
GKH
164
165#define binder_debug(mask, x...) \
166 do { \
167 if (binder_debug_mask & mask) \
258767fe 168 pr_info(x); \
355b0502
GKH
169 } while (0)
170
171#define binder_user_error(x...) \
172 do { \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 174 pr_info(x); \
355b0502
GKH
175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
177 } while (0)
178
feba3900
MC
179#define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
181
182#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
7980240b
MC
184#define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
186
def95c73
MC
187#define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
189
355b0502
GKH
190enum binder_stat_types {
191 BINDER_STAT_PROC,
192 BINDER_STAT_THREAD,
193 BINDER_STAT_NODE,
194 BINDER_STAT_REF,
195 BINDER_STAT_DEATH,
196 BINDER_STAT_TRANSACTION,
197 BINDER_STAT_TRANSACTION_COMPLETE,
198 BINDER_STAT_COUNT
199};
200
201struct binder_stats {
0953c797
BJS
202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 atomic_t obj_created[BINDER_STAT_COUNT];
205 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
206};
207
208static struct binder_stats binder_stats;
209
210static inline void binder_stats_deleted(enum binder_stat_types type)
211{
0953c797 212 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
213}
214
215static inline void binder_stats_created(enum binder_stat_types type)
216{
0953c797 217 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
218}
219
220struct binder_transaction_log_entry {
221 int debug_id;
d99c7333 222 int debug_id_done;
355b0502
GKH
223 int call_type;
224 int from_proc;
225 int from_thread;
226 int target_handle;
227 int to_proc;
228 int to_thread;
229 int to_node;
230 int data_size;
231 int offsets_size;
57ada2fb
TK
232 int return_error_line;
233 uint32_t return_error;
234 uint32_t return_error_param;
14db3181 235 const char *context_name;
355b0502
GKH
236};
237struct binder_transaction_log {
d99c7333
TK
238 atomic_t cur;
239 bool full;
355b0502
GKH
240 struct binder_transaction_log_entry entry[32];
241};
242static struct binder_transaction_log binder_transaction_log;
243static struct binder_transaction_log binder_transaction_log_failed;
244
245static struct binder_transaction_log_entry *binder_transaction_log_add(
246 struct binder_transaction_log *log)
247{
248 struct binder_transaction_log_entry *e;
d99c7333 249 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 250
d99c7333 251 if (cur >= ARRAY_SIZE(log->entry))
197410ad 252 log->full = true;
d99c7333
TK
253 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 WRITE_ONCE(e->debug_id_done, 0);
255 /*
256 * write-barrier to synchronize access to e->debug_id_done.
257 * We make sure the initialized 0 value is seen before
258 * memset() other fields are zeroed by memset.
259 */
260 smp_wmb();
261 memset(e, 0, sizeof(*e));
355b0502
GKH
262 return e;
263}
264
342e5c90
MC
265struct binder_context {
266 struct binder_node *binder_context_mgr_node;
c44b1231
TK
267 struct mutex context_mgr_node_lock;
268
342e5c90 269 kuid_t binder_context_mgr_uid;
14db3181 270 const char *name;
342e5c90
MC
271};
272
ac4812c5
MC
273struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
342e5c90
MC
277};
278
72196393
TK
279/**
280 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list
282 * @type: type of work to be performed
283 *
284 * There are separate work lists for proc, thread, and node (async).
285 */
355b0502
GKH
286struct binder_work {
287 struct list_head entry;
72196393 288
355b0502
GKH
289 enum {
290 BINDER_WORK_TRANSACTION = 1,
291 BINDER_WORK_TRANSACTION_COMPLETE,
26549d17 292 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
293 BINDER_WORK_NODE,
294 BINDER_WORK_DEAD_BINDER,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297 } type;
298};
299
26549d17
TK
300struct binder_error {
301 struct binder_work work;
302 uint32_t cmd;
303};
304
9630fe88
TK
305/**
306 * struct binder_node - binder node bookkeeping
307 * @debug_id: unique ID for debugging
308 * (invariant after initialized)
309 * @lock: lock for node fields
310 * @work: worklist element for node work
72196393 311 * (protected by @proc->inner_lock)
9630fe88 312 * @rb_node: element for proc->nodes tree
da0fa9e4 313 * (protected by @proc->inner_lock)
9630fe88
TK
314 * @dead_node: element for binder_dead_nodes list
315 * (protected by binder_dead_nodes_lock)
316 * @proc: binder_proc that owns this node
317 * (invariant after initialized)
318 * @refs: list of references on this node
673068ee 319 * (protected by @lock)
9630fe88
TK
320 * @internal_strong_refs: used to take strong references when
321 * initiating a transaction
ed29721e
TK
322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
9630fe88 324 * @local_weak_refs: weak user refs from local process
ed29721e
TK
325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
9630fe88 327 * @local_strong_refs: strong user refs from local process
ed29721e
TK
328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
9630fe88 330 * @tmp_refs: temporary kernel refs
ed29721e
TK
331 * (protected by @proc->inner_lock while @proc
332 * is valid, and by binder_dead_nodes_lock
333 * if @proc is NULL. During inc/dec and node release
334 * it is also protected by @lock to provide safety
335 * as the node dies and @proc becomes NULL)
9630fe88
TK
336 * @ptr: userspace pointer for node
337 * (invariant, no lock needed)
338 * @cookie: userspace cookie for node
339 * (invariant, no lock needed)
340 * @has_strong_ref: userspace notified of strong ref
ed29721e
TK
341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
9630fe88 343 * @pending_strong_ref: userspace has acked notification of strong ref
ed29721e
TK
344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
9630fe88 346 * @has_weak_ref: userspace notified of weak ref
ed29721e
TK
347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
9630fe88 349 * @pending_weak_ref: userspace has acked notification of weak ref
ed29721e
TK
350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
9630fe88 352 * @has_async_transaction: async transaction to node in progress
673068ee 353 * (protected by @lock)
9630fe88
TK
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @async_todo: list of async work items
72196393 359 * (protected by @proc->inner_lock)
9630fe88
TK
360 *
361 * Bookkeeping structure for binder nodes.
362 */
355b0502
GKH
363struct binder_node {
364 int debug_id;
9630fe88 365 spinlock_t lock;
355b0502
GKH
366 struct binder_work work;
367 union {
368 struct rb_node rb_node;
369 struct hlist_node dead_node;
370 };
371 struct binder_proc *proc;
372 struct hlist_head refs;
373 int internal_strong_refs;
374 int local_weak_refs;
375 int local_strong_refs;
adc18842 376 int tmp_refs;
da49889d
AH
377 binder_uintptr_t ptr;
378 binder_uintptr_t cookie;
ed29721e
TK
379 struct {
380 /*
381 * bitfield elements protected by
382 * proc inner_lock
383 */
384 u8 has_strong_ref:1;
385 u8 pending_strong_ref:1;
386 u8 has_weak_ref:1;
387 u8 pending_weak_ref:1;
388 };
389 struct {
390 /*
391 * invariant after initialization
392 */
393 u8 accept_fds:1;
394 u8 min_priority;
395 };
396 bool has_async_transaction;
355b0502
GKH
397 struct list_head async_todo;
398};
399
400struct binder_ref_death {
72196393
TK
401 /**
402 * @work: worklist element for death notifications
403 * (protected by inner_lock of the proc that
404 * this ref belongs to)
405 */
355b0502 406 struct binder_work work;
da49889d 407 binder_uintptr_t cookie;
355b0502
GKH
408};
409
372e3147
TK
410/**
411 * struct binder_ref_data - binder_ref counts and id
412 * @debug_id: unique ID for the ref
413 * @desc: unique userspace handle for ref
414 * @strong: strong ref count (debugging only if not locked)
415 * @weak: weak ref count (debugging only if not locked)
416 *
417 * Structure to hold ref count and ref id information. Since
418 * the actual ref can only be accessed with a lock, this structure
419 * is used to return information about the ref to callers of
420 * ref inc/dec functions.
421 */
422struct binder_ref_data {
423 int debug_id;
424 uint32_t desc;
425 int strong;
426 int weak;
427};
428
429/**
430 * struct binder_ref - struct to track references on nodes
431 * @data: binder_ref_data containing id, handle, and current refcounts
432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433 * @rb_node_node: node for lookup by @node in proc's rb_tree
434 * @node_entry: list entry for node->refs list in target node
673068ee 435 * (protected by @node->lock)
372e3147
TK
436 * @proc: binder_proc containing ref
437 * @node: binder_node of target node. When cleaning up a
438 * ref for deletion in binder_cleanup_ref, a non-NULL
439 * @node indicates the node must be freed
440 * @death: pointer to death notification (ref_death) if requested
ab51ec6b 441 * (protected by @node->lock)
372e3147
TK
442 *
443 * Structure to track references from procA to target node (on procB). This
444 * structure is unsafe to access without holding @proc->outer_lock.
445 */
355b0502
GKH
446struct binder_ref {
447 /* Lookups needed: */
448 /* node + proc => ref (transaction) */
449 /* desc + proc => ref (transaction, inc/dec ref) */
450 /* node => refs + procs (proc exit) */
372e3147 451 struct binder_ref_data data;
355b0502
GKH
452 struct rb_node rb_node_desc;
453 struct rb_node rb_node_node;
454 struct hlist_node node_entry;
455 struct binder_proc *proc;
456 struct binder_node *node;
355b0502
GKH
457 struct binder_ref_death *death;
458};
459
355b0502
GKH
460enum binder_deferred_state {
461 BINDER_DEFERRED_PUT_FILES = 0x01,
462 BINDER_DEFERRED_FLUSH = 0x02,
463 BINDER_DEFERRED_RELEASE = 0x04,
464};
465
9630fe88
TK
466/**
467 * struct binder_proc - binder process bookkeeping
468 * @proc_node: element for binder_procs list
469 * @threads: rbtree of binder_threads in this proc
7bd7b0e6 470 * (protected by @inner_lock)
9630fe88
TK
471 * @nodes: rbtree of binder nodes associated with
472 * this proc ordered by node->ptr
da0fa9e4 473 * (protected by @inner_lock)
9630fe88 474 * @refs_by_desc: rbtree of refs ordered by ref->desc
2c1838dc 475 * (protected by @outer_lock)
9630fe88 476 * @refs_by_node: rbtree of refs ordered by ref->node
2c1838dc 477 * (protected by @outer_lock)
1b77e9dc
MC
478 * @waiting_threads: threads currently waiting for proc work
479 * (protected by @inner_lock)
9630fe88
TK
480 * @pid PID of group_leader of process
481 * (invariant after initialized)
482 * @tsk task_struct for group_leader of process
483 * (invariant after initialized)
484 * @files files_struct for process
7f3dc008
TK
485 * (protected by @files_lock)
486 * @files_lock mutex to protect @files
9630fe88
TK
487 * @deferred_work_node: element for binder_deferred_list
488 * (protected by binder_deferred_lock)
489 * @deferred_work: bitmap of deferred work to perform
490 * (protected by binder_deferred_lock)
491 * @is_dead: process is dead and awaiting free
492 * when outstanding transactions are cleaned up
7bd7b0e6 493 * (protected by @inner_lock)
9630fe88 494 * @todo: list of work for this process
72196393 495 * (protected by @inner_lock)
9630fe88
TK
496 * @stats: per-process binder statistics
497 * (atomics, no lock needed)
498 * @delivered_death: list of delivered death notification
72196393 499 * (protected by @inner_lock)
9630fe88 500 * @max_threads: cap on number of binder threads
b3e68612 501 * (protected by @inner_lock)
9630fe88
TK
502 * @requested_threads: number of binder threads requested but not
503 * yet started. In current implementation, can
504 * only be 0 or 1.
b3e68612 505 * (protected by @inner_lock)
9630fe88 506 * @requested_threads_started: number binder threads started
b3e68612 507 * (protected by @inner_lock)
9630fe88 508 * @tmp_ref: temporary reference to indicate proc is in use
7bd7b0e6 509 * (protected by @inner_lock)
9630fe88
TK
510 * @default_priority: default scheduler priority
511 * (invariant after initialized)
512 * @debugfs_entry: debugfs node
513 * @alloc: binder allocator bookkeeping
514 * @context: binder_context for this proc
515 * (invariant after initialized)
516 * @inner_lock: can nest under outer_lock and/or node lock
517 * @outer_lock: no nesting under innor or node lock
518 * Lock order: 1) outer, 2) node, 3) inner
519 *
520 * Bookkeeping structure for binder processes
521 */
355b0502
GKH
522struct binder_proc {
523 struct hlist_node proc_node;
524 struct rb_root threads;
525 struct rb_root nodes;
526 struct rb_root refs_by_desc;
527 struct rb_root refs_by_node;
1b77e9dc 528 struct list_head waiting_threads;
355b0502 529 int pid;
355b0502
GKH
530 struct task_struct *tsk;
531 struct files_struct *files;
7f3dc008 532 struct mutex files_lock;
355b0502
GKH
533 struct hlist_node deferred_work_node;
534 int deferred_work;
7a4408c6 535 bool is_dead;
355b0502 536
355b0502 537 struct list_head todo;
355b0502
GKH
538 struct binder_stats stats;
539 struct list_head delivered_death;
540 int max_threads;
541 int requested_threads;
542 int requested_threads_started;
7a4408c6 543 int tmp_ref;
355b0502 544 long default_priority;
16b66554 545 struct dentry *debugfs_entry;
fdfb4a99 546 struct binder_alloc alloc;
342e5c90 547 struct binder_context *context;
9630fe88
TK
548 spinlock_t inner_lock;
549 spinlock_t outer_lock;
355b0502
GKH
550};
551
552enum {
553 BINDER_LOOPER_STATE_REGISTERED = 0x01,
554 BINDER_LOOPER_STATE_ENTERED = 0x02,
555 BINDER_LOOPER_STATE_EXITED = 0x04,
556 BINDER_LOOPER_STATE_INVALID = 0x08,
557 BINDER_LOOPER_STATE_WAITING = 0x10,
1b77e9dc 558 BINDER_LOOPER_STATE_POLL = 0x20,
355b0502
GKH
559};
560
9630fe88
TK
561/**
562 * struct binder_thread - binder thread bookkeeping
563 * @proc: binder process for this thread
564 * (invariant after initialization)
565 * @rb_node: element for proc->threads rbtree
7bd7b0e6 566 * (protected by @proc->inner_lock)
1b77e9dc
MC
567 * @waiting_thread_node: element for @proc->waiting_threads list
568 * (protected by @proc->inner_lock)
9630fe88
TK
569 * @pid: PID for this thread
570 * (invariant after initialization)
571 * @looper: bitmap of looping state
572 * (only accessed by this thread)
573 * @looper_needs_return: looping thread needs to exit driver
574 * (no lock needed)
575 * @transaction_stack: stack of in-progress transactions for this thread
0b89d69a 576 * (protected by @proc->inner_lock)
9630fe88 577 * @todo: list of work to do for this thread
72196393 578 * (protected by @proc->inner_lock)
148ade2c
MC
579 * @process_todo: whether work in @todo should be processed
580 * (protected by @proc->inner_lock)
9630fe88
TK
581 * @return_error: transaction errors reported by this thread
582 * (only accessed by this thread)
583 * @reply_error: transaction errors reported by target thread
0b89d69a 584 * (protected by @proc->inner_lock)
9630fe88
TK
585 * @wait: wait queue for thread work
586 * @stats: per-thread statistics
587 * (atomics, no lock needed)
588 * @tmp_ref: temporary reference to indicate thread is in use
589 * (atomic since @proc->inner_lock cannot
590 * always be acquired)
591 * @is_dead: thread is dead and awaiting free
592 * when outstanding transactions are cleaned up
7bd7b0e6 593 * (protected by @proc->inner_lock)
9630fe88
TK
594 *
595 * Bookkeeping structure for binder threads.
596 */
355b0502
GKH
597struct binder_thread {
598 struct binder_proc *proc;
599 struct rb_node rb_node;
1b77e9dc 600 struct list_head waiting_thread_node;
355b0502 601 int pid;
08dabcee
TK
602 int looper; /* only modified by this thread */
603 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
604 struct binder_transaction *transaction_stack;
605 struct list_head todo;
148ade2c 606 bool process_todo;
26549d17
TK
607 struct binder_error return_error;
608 struct binder_error reply_error;
355b0502
GKH
609 wait_queue_head_t wait;
610 struct binder_stats stats;
7a4408c6
TK
611 atomic_t tmp_ref;
612 bool is_dead;
355b0502
GKH
613};
614
615struct binder_transaction {
616 int debug_id;
617 struct binder_work work;
618 struct binder_thread *from;
619 struct binder_transaction *from_parent;
620 struct binder_proc *to_proc;
621 struct binder_thread *to_thread;
622 struct binder_transaction *to_parent;
623 unsigned need_reply:1;
624 /* unsigned is_dead:1; */ /* not used at the moment */
625
626 struct binder_buffer *buffer;
627 unsigned int code;
628 unsigned int flags;
629 long priority;
630 long saved_priority;
4a2ebb93 631 kuid_t sender_euid;
7a4408c6
TK
632 /**
633 * @lock: protects @from, @to_proc, and @to_thread
634 *
635 * @from, @to_proc, and @to_thread can be set to NULL
636 * during thread teardown
637 */
638 spinlock_t lock;
355b0502
GKH
639};
640
9630fe88
TK
641/**
642 * binder_proc_lock() - Acquire outer lock for given binder_proc
643 * @proc: struct binder_proc to acquire
644 *
645 * Acquires proc->outer_lock. Used to protect binder_ref
646 * structures associated with the given proc.
647 */
648#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
649static void
650_binder_proc_lock(struct binder_proc *proc, int line)
651{
652 binder_debug(BINDER_DEBUG_SPINLOCKS,
653 "%s: line=%d\n", __func__, line);
654 spin_lock(&proc->outer_lock);
655}
656
657/**
658 * binder_proc_unlock() - Release spinlock for given binder_proc
659 * @proc: struct binder_proc to acquire
660 *
661 * Release lock acquired via binder_proc_lock()
662 */
663#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
664static void
665_binder_proc_unlock(struct binder_proc *proc, int line)
666{
667 binder_debug(BINDER_DEBUG_SPINLOCKS,
668 "%s: line=%d\n", __func__, line);
669 spin_unlock(&proc->outer_lock);
670}
671
672/**
673 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
674 * @proc: struct binder_proc to acquire
675 *
676 * Acquires proc->inner_lock. Used to protect todo lists
677 */
678#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
679static void
680_binder_inner_proc_lock(struct binder_proc *proc, int line)
681{
682 binder_debug(BINDER_DEBUG_SPINLOCKS,
683 "%s: line=%d\n", __func__, line);
684 spin_lock(&proc->inner_lock);
685}
686
687/**
688 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
689 * @proc: struct binder_proc to acquire
690 *
691 * Release lock acquired via binder_inner_proc_lock()
692 */
693#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
694static void
695_binder_inner_proc_unlock(struct binder_proc *proc, int line)
696{
697 binder_debug(BINDER_DEBUG_SPINLOCKS,
698 "%s: line=%d\n", __func__, line);
699 spin_unlock(&proc->inner_lock);
700}
701
702/**
703 * binder_node_lock() - Acquire spinlock for given binder_node
704 * @node: struct binder_node to acquire
705 *
706 * Acquires node->lock. Used to protect binder_node fields
707 */
708#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
709static void
710_binder_node_lock(struct binder_node *node, int line)
711{
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_lock(&node->lock);
715}
716
717/**
718 * binder_node_unlock() - Release spinlock for given binder_proc
719 * @node: struct binder_node to acquire
720 *
721 * Release lock acquired via binder_node_lock()
722 */
723#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
724static void
725_binder_node_unlock(struct binder_node *node, int line)
726{
727 binder_debug(BINDER_DEBUG_SPINLOCKS,
728 "%s: line=%d\n", __func__, line);
729 spin_unlock(&node->lock);
730}
731
673068ee
TK
732/**
733 * binder_node_inner_lock() - Acquire node and inner locks
734 * @node: struct binder_node to acquire
735 *
736 * Acquires node->lock. If node->proc also acquires
737 * proc->inner_lock. Used to protect binder_node fields
738 */
739#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
740static void
741_binder_node_inner_lock(struct binder_node *node, int line)
742{
743 binder_debug(BINDER_DEBUG_SPINLOCKS,
744 "%s: line=%d\n", __func__, line);
745 spin_lock(&node->lock);
746 if (node->proc)
747 binder_inner_proc_lock(node->proc);
748}
749
750/**
751 * binder_node_unlock() - Release node and inner locks
752 * @node: struct binder_node to acquire
753 *
754 * Release lock acquired via binder_node_lock()
755 */
756#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
757static void
758_binder_node_inner_unlock(struct binder_node *node, int line)
759{
760 struct binder_proc *proc = node->proc;
761
762 binder_debug(BINDER_DEBUG_SPINLOCKS,
763 "%s: line=%d\n", __func__, line);
764 if (proc)
765 binder_inner_proc_unlock(proc);
766 spin_unlock(&node->lock);
767}
768
72196393
TK
769static bool binder_worklist_empty_ilocked(struct list_head *list)
770{
771 return list_empty(list);
772}
773
774/**
775 * binder_worklist_empty() - Check if no items on the work list
776 * @proc: binder_proc associated with list
777 * @list: list to check
778 *
779 * Return: true if there are no items on list, else false
780 */
781static bool binder_worklist_empty(struct binder_proc *proc,
782 struct list_head *list)
783{
784 bool ret;
785
786 binder_inner_proc_lock(proc);
787 ret = binder_worklist_empty_ilocked(list);
788 binder_inner_proc_unlock(proc);
789 return ret;
790}
791
148ade2c
MC
792/**
793 * binder_enqueue_work_ilocked() - Add an item to the work list
794 * @work: struct binder_work to add to list
795 * @target_list: list to add work to
796 *
797 * Adds the work to the specified list. Asserts that work
798 * is not already on a list.
799 *
800 * Requires the proc->inner_lock to be held.
801 */
72196393
TK
802static void
803binder_enqueue_work_ilocked(struct binder_work *work,
804 struct list_head *target_list)
805{
806 BUG_ON(target_list == NULL);
807 BUG_ON(work->entry.next && !list_empty(&work->entry));
808 list_add_tail(&work->entry, target_list);
809}
810
811/**
148ade2c
MC
812 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
813 * @thread: thread to queue work to
72196393 814 * @work: struct binder_work to add to list
72196393 815 *
148ade2c
MC
816 * Adds the work to the todo list of the thread. Doesn't set the process_todo
817 * flag, which means that (if it wasn't already set) the thread will go to
818 * sleep without handling this work when it calls read.
819 *
820 * Requires the proc->inner_lock to be held.
72196393
TK
821 */
822static void
148ade2c
MC
823binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
824 struct binder_work *work)
72196393 825{
148ade2c
MC
826 binder_enqueue_work_ilocked(work, &thread->todo);
827}
828
829/**
830 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
831 * @thread: thread to queue work to
832 * @work: struct binder_work to add to list
833 *
834 * Adds the work to the todo list of the thread, and enables processing
835 * of the todo queue.
836 *
837 * Requires the proc->inner_lock to be held.
838 */
839static void
840binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
841 struct binder_work *work)
842{
843 binder_enqueue_work_ilocked(work, &thread->todo);
844 thread->process_todo = true;
845}
846
847/**
848 * binder_enqueue_thread_work() - Add an item to the thread work list
849 * @thread: thread to queue work to
850 * @work: struct binder_work to add to list
851 *
852 * Adds the work to the todo list of the thread, and enables processing
853 * of the todo queue.
854 */
855static void
856binder_enqueue_thread_work(struct binder_thread *thread,
857 struct binder_work *work)
858{
859 binder_inner_proc_lock(thread->proc);
860 binder_enqueue_thread_work_ilocked(thread, work);
861 binder_inner_proc_unlock(thread->proc);
72196393
TK
862}
863
864static void
865binder_dequeue_work_ilocked(struct binder_work *work)
866{
867 list_del_init(&work->entry);
868}
869
870/**
871 * binder_dequeue_work() - Removes an item from the work list
872 * @proc: binder_proc associated with list
873 * @work: struct binder_work to remove from list
874 *
875 * Removes the specified work item from whatever list it is on.
876 * Can safely be called if work is not on any list.
877 */
878static void
879binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
880{
881 binder_inner_proc_lock(proc);
882 binder_dequeue_work_ilocked(work);
883 binder_inner_proc_unlock(proc);
884}
885
886static struct binder_work *binder_dequeue_work_head_ilocked(
887 struct list_head *list)
888{
889 struct binder_work *w;
890
891 w = list_first_entry_or_null(list, struct binder_work, entry);
892 if (w)
893 list_del_init(&w->entry);
894 return w;
895}
896
897/**
898 * binder_dequeue_work_head() - Dequeues the item at head of list
899 * @proc: binder_proc associated with list
900 * @list: list to dequeue head
901 *
902 * Removes the head of the list if there are items on the list
903 *
904 * Return: pointer dequeued binder_work, NULL if list was empty
905 */
906static struct binder_work *binder_dequeue_work_head(
907 struct binder_proc *proc,
908 struct list_head *list)
909{
910 struct binder_work *w;
911
912 binder_inner_proc_lock(proc);
913 w = binder_dequeue_work_head_ilocked(list);
914 binder_inner_proc_unlock(proc);
915 return w;
916}
917
355b0502
GKH
918static void
919binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
7a4408c6
TK
920static void binder_free_thread(struct binder_thread *thread);
921static void binder_free_proc(struct binder_proc *proc);
da0fa9e4 922static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
355b0502 923
efde99cd 924static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502 925{
355b0502
GKH
926 unsigned long rlim_cur;
927 unsigned long irqs;
7f3dc008 928 int ret;
355b0502 929
7f3dc008
TK
930 mutex_lock(&proc->files_lock);
931 if (proc->files == NULL) {
932 ret = -ESRCH;
933 goto err;
934 }
935 if (!lock_task_sighand(proc->tsk, &irqs)) {
936 ret = -EMFILE;
937 goto err;
938 }
dcfadfa4
AV
939 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
940 unlock_task_sighand(proc->tsk, &irqs);
355b0502 941
7f3dc008
TK
942 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
943err:
944 mutex_unlock(&proc->files_lock);
945 return ret;
355b0502
GKH
946}
947
948/*
949 * copied from fd_install
950 */
951static void task_fd_install(
952 struct binder_proc *proc, unsigned int fd, struct file *file)
953{
7f3dc008 954 mutex_lock(&proc->files_lock);
f869e8a7
AV
955 if (proc->files)
956 __fd_install(proc->files, fd, file);
7f3dc008 957 mutex_unlock(&proc->files_lock);
355b0502
GKH
958}
959
960/*
961 * copied from sys_close
962 */
963static long task_close_fd(struct binder_proc *proc, unsigned int fd)
964{
355b0502
GKH
965 int retval;
966
7f3dc008
TK
967 mutex_lock(&proc->files_lock);
968 if (proc->files == NULL) {
969 retval = -ESRCH;
970 goto err;
971 }
483ce1d4 972 retval = __close_fd(proc->files, fd);
355b0502
GKH
973 /* can't restart close syscall because file table entry was cleared */
974 if (unlikely(retval == -ERESTARTSYS ||
975 retval == -ERESTARTNOINTR ||
976 retval == -ERESTARTNOHAND ||
977 retval == -ERESTART_RESTARTBLOCK))
978 retval = -EINTR;
7f3dc008
TK
979err:
980 mutex_unlock(&proc->files_lock);
355b0502 981 return retval;
355b0502
GKH
982}
983
1b77e9dc
MC
984static bool binder_has_work_ilocked(struct binder_thread *thread,
985 bool do_proc_work)
986{
148ade2c 987 return thread->process_todo ||
1b77e9dc
MC
988 thread->looper_need_return ||
989 (do_proc_work &&
990 !binder_worklist_empty_ilocked(&thread->proc->todo));
991}
992
993static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
994{
995 bool has_work;
996
997 binder_inner_proc_lock(thread->proc);
998 has_work = binder_has_work_ilocked(thread, do_proc_work);
999 binder_inner_proc_unlock(thread->proc);
1000
1001 return has_work;
1002}
1003
1004static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1005{
1006 return !thread->transaction_stack &&
1007 binder_worklist_empty_ilocked(&thread->todo) &&
1008 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1009 BINDER_LOOPER_STATE_REGISTERED));
1010}
1011
1012static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1013 bool sync)
1014{
1015 struct rb_node *n;
1016 struct binder_thread *thread;
1017
1018 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1019 thread = rb_entry(n, struct binder_thread, rb_node);
1020 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1021 binder_available_for_proc_work_ilocked(thread)) {
1022 if (sync)
1023 wake_up_interruptible_sync(&thread->wait);
1024 else
1025 wake_up_interruptible(&thread->wait);
1026 }
1027 }
1028}
1029
408c68b1
MC
1030/**
1031 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1032 * @proc: process to select a thread from
1033 *
1034 * Note that calling this function moves the thread off the waiting_threads
1035 * list, so it can only be woken up by the caller of this function, or a
1036 * signal. Therefore, callers *should* always wake up the thread this function
1037 * returns.
1038 *
1039 * Return: If there's a thread currently waiting for process work,
1040 * returns that thread. Otherwise returns NULL.
1041 */
1042static struct binder_thread *
1043binder_select_thread_ilocked(struct binder_proc *proc)
1b77e9dc
MC
1044{
1045 struct binder_thread *thread;
1046
858b2719 1047 assert_spin_locked(&proc->inner_lock);
1b77e9dc
MC
1048 thread = list_first_entry_or_null(&proc->waiting_threads,
1049 struct binder_thread,
1050 waiting_thread_node);
1051
408c68b1 1052 if (thread)
1b77e9dc 1053 list_del_init(&thread->waiting_thread_node);
408c68b1
MC
1054
1055 return thread;
1056}
1057
1058/**
1059 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1060 * @proc: process to wake up a thread in
1061 * @thread: specific thread to wake-up (may be NULL)
1062 * @sync: whether to do a synchronous wake-up
1063 *
1064 * This function wakes up a thread in the @proc process.
1065 * The caller may provide a specific thread to wake-up in
1066 * the @thread parameter. If @thread is NULL, this function
1067 * will wake up threads that have called poll().
1068 *
1069 * Note that for this function to work as expected, callers
1070 * should first call binder_select_thread() to find a thread
1071 * to handle the work (if they don't have a thread already),
1072 * and pass the result into the @thread parameter.
1073 */
1074static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1075 struct binder_thread *thread,
1076 bool sync)
1077{
858b2719 1078 assert_spin_locked(&proc->inner_lock);
408c68b1
MC
1079
1080 if (thread) {
1b77e9dc
MC
1081 if (sync)
1082 wake_up_interruptible_sync(&thread->wait);
1083 else
1084 wake_up_interruptible(&thread->wait);
1085 return;
1086 }
1087
1088 /* Didn't find a thread waiting for proc work; this can happen
1089 * in two scenarios:
1090 * 1. All threads are busy handling transactions
1091 * In that case, one of those threads should call back into
1092 * the kernel driver soon and pick up this work.
1093 * 2. Threads are using the (e)poll interface, in which case
1094 * they may be blocked on the waitqueue without having been
1095 * added to waiting_threads. For this case, we just iterate
1096 * over all threads not handling transaction work, and
1097 * wake them all up. We wake all because we don't know whether
1098 * a thread that called into (e)poll is handling non-binder
1099 * work currently.
1100 */
1101 binder_wakeup_poll_threads_ilocked(proc, sync);
1102}
1103
408c68b1
MC
1104static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1105{
1106 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1107
1108 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1109}
1110
355b0502
GKH
1111static void binder_set_nice(long nice)
1112{
1113 long min_nice;
10f62861 1114
355b0502
GKH
1115 if (can_nice(current, nice)) {
1116 set_user_nice(current, nice);
1117 return;
1118 }
c3643b69 1119 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
355b0502 1120 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
1121 "%d: nice value %ld not allowed use %ld instead\n",
1122 current->pid, nice, min_nice);
355b0502 1123 set_user_nice(current, min_nice);
8698a745 1124 if (min_nice <= MAX_NICE)
355b0502 1125 return;
56b468fc 1126 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
1127}
1128
da0fa9e4
TK
1129static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1130 binder_uintptr_t ptr)
355b0502
GKH
1131{
1132 struct rb_node *n = proc->nodes.rb_node;
1133 struct binder_node *node;
1134
858b2719 1135 assert_spin_locked(&proc->inner_lock);
da0fa9e4 1136
355b0502
GKH
1137 while (n) {
1138 node = rb_entry(n, struct binder_node, rb_node);
1139
1140 if (ptr < node->ptr)
1141 n = n->rb_left;
1142 else if (ptr > node->ptr)
1143 n = n->rb_right;
adc18842
TK
1144 else {
1145 /*
1146 * take an implicit weak reference
1147 * to ensure node stays alive until
1148 * call to binder_put_node()
1149 */
da0fa9e4 1150 binder_inc_node_tmpref_ilocked(node);
355b0502 1151 return node;
adc18842 1152 }
355b0502
GKH
1153 }
1154 return NULL;
1155}
1156
da0fa9e4
TK
1157static struct binder_node *binder_get_node(struct binder_proc *proc,
1158 binder_uintptr_t ptr)
1159{
1160 struct binder_node *node;
1161
1162 binder_inner_proc_lock(proc);
1163 node = binder_get_node_ilocked(proc, ptr);
1164 binder_inner_proc_unlock(proc);
1165 return node;
1166}
1167
1168static struct binder_node *binder_init_node_ilocked(
1169 struct binder_proc *proc,
1170 struct binder_node *new_node,
1171 struct flat_binder_object *fp)
355b0502
GKH
1172{
1173 struct rb_node **p = &proc->nodes.rb_node;
1174 struct rb_node *parent = NULL;
1175 struct binder_node *node;
673068ee
TK
1176 binder_uintptr_t ptr = fp ? fp->binder : 0;
1177 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1178 __u32 flags = fp ? fp->flags : 0;
355b0502 1179
858b2719
MC
1180 assert_spin_locked(&proc->inner_lock);
1181
355b0502 1182 while (*p) {
da0fa9e4 1183
355b0502
GKH
1184 parent = *p;
1185 node = rb_entry(parent, struct binder_node, rb_node);
1186
1187 if (ptr < node->ptr)
1188 p = &(*p)->rb_left;
1189 else if (ptr > node->ptr)
1190 p = &(*p)->rb_right;
da0fa9e4
TK
1191 else {
1192 /*
1193 * A matching node is already in
1194 * the rb tree. Abandon the init
1195 * and return it.
1196 */
1197 binder_inc_node_tmpref_ilocked(node);
1198 return node;
1199 }
355b0502 1200 }
da0fa9e4 1201 node = new_node;
355b0502 1202 binder_stats_created(BINDER_STAT_NODE);
adc18842 1203 node->tmp_refs++;
355b0502
GKH
1204 rb_link_node(&node->rb_node, parent, p);
1205 rb_insert_color(&node->rb_node, &proc->nodes);
656a800a 1206 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1207 node->proc = proc;
1208 node->ptr = ptr;
1209 node->cookie = cookie;
1210 node->work.type = BINDER_WORK_NODE;
673068ee
TK
1211 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1212 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
9630fe88 1213 spin_lock_init(&node->lock);
355b0502
GKH
1214 INIT_LIST_HEAD(&node->work.entry);
1215 INIT_LIST_HEAD(&node->async_todo);
1216 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 1217 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 1218 proc->pid, current->pid, node->debug_id,
da49889d 1219 (u64)node->ptr, (u64)node->cookie);
da0fa9e4
TK
1220
1221 return node;
1222}
1223
1224static struct binder_node *binder_new_node(struct binder_proc *proc,
1225 struct flat_binder_object *fp)
1226{
1227 struct binder_node *node;
1228 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1229
1230 if (!new_node)
1231 return NULL;
1232 binder_inner_proc_lock(proc);
1233 node = binder_init_node_ilocked(proc, new_node, fp);
1234 binder_inner_proc_unlock(proc);
1235 if (node != new_node)
1236 /*
1237 * The node was already added by another thread
1238 */
1239 kfree(new_node);
1240
355b0502
GKH
1241 return node;
1242}
1243
ed29721e 1244static void binder_free_node(struct binder_node *node)
355b0502 1245{
ed29721e
TK
1246 kfree(node);
1247 binder_stats_deleted(BINDER_STAT_NODE);
1248}
1249
673068ee
TK
1250static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1251 int internal,
1252 struct list_head *target_list)
ed29721e 1253{
673068ee
TK
1254 struct binder_proc *proc = node->proc;
1255
858b2719 1256 assert_spin_locked(&node->lock);
673068ee 1257 if (proc)
858b2719 1258 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1259 if (strong) {
1260 if (internal) {
1261 if (target_list == NULL &&
1262 node->internal_strong_refs == 0 &&
342e5c90
MC
1263 !(node->proc &&
1264 node == node->proc->context->binder_context_mgr_node &&
1265 node->has_strong_ref)) {
56b468fc
AS
1266 pr_err("invalid inc strong node for %d\n",
1267 node->debug_id);
355b0502
GKH
1268 return -EINVAL;
1269 }
1270 node->internal_strong_refs++;
1271 } else
1272 node->local_strong_refs++;
1273 if (!node->has_strong_ref && target_list) {
72196393 1274 binder_dequeue_work_ilocked(&node->work);
148ade2c
MC
1275 /*
1276 * Note: this function is the only place where we queue
1277 * directly to a thread->todo without using the
1278 * corresponding binder_enqueue_thread_work() helper
1279 * functions; in this case it's ok to not set the
1280 * process_todo flag, since we know this node work will
1281 * always be followed by other work that starts queue
1282 * processing: in case of synchronous transactions, a
1283 * BR_REPLY or BR_ERROR; in case of oneway
1284 * transactions, a BR_TRANSACTION_COMPLETE.
1285 */
72196393 1286 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1287 }
1288 } else {
1289 if (!internal)
1290 node->local_weak_refs++;
1291 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1292 if (target_list == NULL) {
56b468fc
AS
1293 pr_err("invalid inc weak node for %d\n",
1294 node->debug_id);
355b0502
GKH
1295 return -EINVAL;
1296 }
148ade2c
MC
1297 /*
1298 * See comment above
1299 */
72196393 1300 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1301 }
1302 }
1303 return 0;
1304}
1305
ed29721e
TK
1306static int binder_inc_node(struct binder_node *node, int strong, int internal,
1307 struct list_head *target_list)
1308{
1309 int ret;
1310
673068ee
TK
1311 binder_node_inner_lock(node);
1312 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1313 binder_node_inner_unlock(node);
ed29721e
TK
1314
1315 return ret;
1316}
1317
673068ee
TK
1318static bool binder_dec_node_nilocked(struct binder_node *node,
1319 int strong, int internal)
355b0502 1320{
ed29721e
TK
1321 struct binder_proc *proc = node->proc;
1322
858b2719 1323 assert_spin_locked(&node->lock);
ed29721e 1324 if (proc)
858b2719 1325 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1326 if (strong) {
1327 if (internal)
1328 node->internal_strong_refs--;
1329 else
1330 node->local_strong_refs--;
1331 if (node->local_strong_refs || node->internal_strong_refs)
ed29721e 1332 return false;
355b0502
GKH
1333 } else {
1334 if (!internal)
1335 node->local_weak_refs--;
adc18842
TK
1336 if (node->local_weak_refs || node->tmp_refs ||
1337 !hlist_empty(&node->refs))
ed29721e 1338 return false;
355b0502 1339 }
ed29721e
TK
1340
1341 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
355b0502 1342 if (list_empty(&node->work.entry)) {
72196393 1343 binder_enqueue_work_ilocked(&node->work, &proc->todo);
408c68b1 1344 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
1345 }
1346 } else {
1347 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
adc18842 1348 !node->local_weak_refs && !node->tmp_refs) {
ed29721e 1349 if (proc) {
72196393
TK
1350 binder_dequeue_work_ilocked(&node->work);
1351 rb_erase(&node->rb_node, &proc->nodes);
355b0502 1352 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1353 "refless node %d deleted\n",
355b0502
GKH
1354 node->debug_id);
1355 } else {
72196393 1356 BUG_ON(!list_empty(&node->work.entry));
c44b1231 1357 spin_lock(&binder_dead_nodes_lock);
ed29721e
TK
1358 /*
1359 * tmp_refs could have changed so
1360 * check it again
1361 */
1362 if (node->tmp_refs) {
1363 spin_unlock(&binder_dead_nodes_lock);
1364 return false;
1365 }
355b0502 1366 hlist_del(&node->dead_node);
c44b1231 1367 spin_unlock(&binder_dead_nodes_lock);
355b0502 1368 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1369 "dead node %d deleted\n",
355b0502
GKH
1370 node->debug_id);
1371 }
ed29721e 1372 return true;
355b0502
GKH
1373 }
1374 }
ed29721e
TK
1375 return false;
1376}
355b0502 1377
ed29721e
TK
1378static void binder_dec_node(struct binder_node *node, int strong, int internal)
1379{
1380 bool free_node;
1381
673068ee
TK
1382 binder_node_inner_lock(node);
1383 free_node = binder_dec_node_nilocked(node, strong, internal);
1384 binder_node_inner_unlock(node);
ed29721e
TK
1385 if (free_node)
1386 binder_free_node(node);
1387}
1388
1389static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1390{
1391 /*
1392 * No call to binder_inc_node() is needed since we
1393 * don't need to inform userspace of any changes to
1394 * tmp_refs
1395 */
1396 node->tmp_refs++;
355b0502
GKH
1397}
1398
adc18842
TK
1399/**
1400 * binder_inc_node_tmpref() - take a temporary reference on node
1401 * @node: node to reference
1402 *
1403 * Take reference on node to prevent the node from being freed
ed29721e
TK
1404 * while referenced only by a local variable. The inner lock is
1405 * needed to serialize with the node work on the queue (which
1406 * isn't needed after the node is dead). If the node is dead
1407 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1408 * node->tmp_refs against dead-node-only cases where the node
1409 * lock cannot be acquired (eg traversing the dead node list to
1410 * print nodes)
adc18842
TK
1411 */
1412static void binder_inc_node_tmpref(struct binder_node *node)
1413{
673068ee 1414 binder_node_lock(node);
ed29721e
TK
1415 if (node->proc)
1416 binder_inner_proc_lock(node->proc);
1417 else
1418 spin_lock(&binder_dead_nodes_lock);
1419 binder_inc_node_tmpref_ilocked(node);
1420 if (node->proc)
1421 binder_inner_proc_unlock(node->proc);
1422 else
1423 spin_unlock(&binder_dead_nodes_lock);
673068ee 1424 binder_node_unlock(node);
adc18842
TK
1425}
1426
1427/**
1428 * binder_dec_node_tmpref() - remove a temporary reference on node
1429 * @node: node to reference
1430 *
1431 * Release temporary reference on node taken via binder_inc_node_tmpref()
1432 */
1433static void binder_dec_node_tmpref(struct binder_node *node)
1434{
ed29721e
TK
1435 bool free_node;
1436
673068ee
TK
1437 binder_node_inner_lock(node);
1438 if (!node->proc)
ed29721e 1439 spin_lock(&binder_dead_nodes_lock);
adc18842
TK
1440 node->tmp_refs--;
1441 BUG_ON(node->tmp_refs < 0);
ed29721e
TK
1442 if (!node->proc)
1443 spin_unlock(&binder_dead_nodes_lock);
adc18842
TK
1444 /*
1445 * Call binder_dec_node() to check if all refcounts are 0
1446 * and cleanup is needed. Calling with strong=0 and internal=1
1447 * causes no actual reference to be released in binder_dec_node().
1448 * If that changes, a change is needed here too.
1449 */
673068ee
TK
1450 free_node = binder_dec_node_nilocked(node, 0, 1);
1451 binder_node_inner_unlock(node);
ed29721e
TK
1452 if (free_node)
1453 binder_free_node(node);
adc18842
TK
1454}
1455
1456static void binder_put_node(struct binder_node *node)
1457{
1458 binder_dec_node_tmpref(node);
1459}
355b0502 1460
2c1838dc
TK
1461static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1462 u32 desc, bool need_strong_ref)
355b0502
GKH
1463{
1464 struct rb_node *n = proc->refs_by_desc.rb_node;
1465 struct binder_ref *ref;
1466
1467 while (n) {
1468 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1469
372e3147 1470 if (desc < ref->data.desc) {
355b0502 1471 n = n->rb_left;
372e3147 1472 } else if (desc > ref->data.desc) {
355b0502 1473 n = n->rb_right;
372e3147 1474 } else if (need_strong_ref && !ref->data.strong) {
0a3ffab9
AH
1475 binder_user_error("tried to use weak ref as strong ref\n");
1476 return NULL;
1477 } else {
355b0502 1478 return ref;
0a3ffab9 1479 }
355b0502
GKH
1480 }
1481 return NULL;
1482}
1483
372e3147 1484/**
2c1838dc 1485 * binder_get_ref_for_node_olocked() - get the ref associated with given node
372e3147
TK
1486 * @proc: binder_proc that owns the ref
1487 * @node: binder_node of target
1488 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1489 *
1490 * Look up the ref for the given node and return it if it exists
1491 *
1492 * If it doesn't exist and the caller provides a newly allocated
1493 * ref, initialize the fields of the newly allocated ref and insert
1494 * into the given proc rb_trees and node refs list.
1495 *
1496 * Return: the ref for node. It is possible that another thread
1497 * allocated/initialized the ref first in which case the
1498 * returned ref would be different than the passed-in
1499 * new_ref. new_ref must be kfree'd by the caller in
1500 * this case.
1501 */
2c1838dc
TK
1502static struct binder_ref *binder_get_ref_for_node_olocked(
1503 struct binder_proc *proc,
1504 struct binder_node *node,
1505 struct binder_ref *new_ref)
355b0502 1506{
372e3147 1507 struct binder_context *context = proc->context;
355b0502
GKH
1508 struct rb_node **p = &proc->refs_by_node.rb_node;
1509 struct rb_node *parent = NULL;
372e3147
TK
1510 struct binder_ref *ref;
1511 struct rb_node *n;
355b0502
GKH
1512
1513 while (*p) {
1514 parent = *p;
1515 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1516
1517 if (node < ref->node)
1518 p = &(*p)->rb_left;
1519 else if (node > ref->node)
1520 p = &(*p)->rb_right;
1521 else
1522 return ref;
1523 }
372e3147 1524 if (!new_ref)
355b0502 1525 return NULL;
372e3147 1526
355b0502 1527 binder_stats_created(BINDER_STAT_REF);
372e3147 1528 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1529 new_ref->proc = proc;
1530 new_ref->node = node;
1531 rb_link_node(&new_ref->rb_node_node, parent, p);
1532 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1533
372e3147 1534 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
1535 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1536 ref = rb_entry(n, struct binder_ref, rb_node_desc);
372e3147 1537 if (ref->data.desc > new_ref->data.desc)
355b0502 1538 break;
372e3147 1539 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
1540 }
1541
1542 p = &proc->refs_by_desc.rb_node;
1543 while (*p) {
1544 parent = *p;
1545 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1546
372e3147 1547 if (new_ref->data.desc < ref->data.desc)
355b0502 1548 p = &(*p)->rb_left;
372e3147 1549 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
1550 p = &(*p)->rb_right;
1551 else
1552 BUG();
1553 }
1554 rb_link_node(&new_ref->rb_node_desc, parent, p);
1555 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
673068ee
TK
1556
1557 binder_node_lock(node);
e4cffcf4 1558 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 1559
e4cffcf4
TK
1560 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1561 "%d new ref %d desc %d for node %d\n",
372e3147 1562 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
e4cffcf4 1563 node->debug_id);
673068ee 1564 binder_node_unlock(node);
355b0502
GKH
1565 return new_ref;
1566}
1567
2c1838dc 1568static void binder_cleanup_ref_olocked(struct binder_ref *ref)
355b0502 1569{
ed29721e 1570 bool delete_node = false;
ed29721e 1571
355b0502 1572 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1573 "%d delete ref %d desc %d for node %d\n",
372e3147 1574 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 1575 ref->node->debug_id);
355b0502
GKH
1576
1577 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1578 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
372e3147 1579
673068ee 1580 binder_node_inner_lock(ref->node);
372e3147 1581 if (ref->data.strong)
673068ee 1582 binder_dec_node_nilocked(ref->node, 1, 1);
372e3147 1583
355b0502 1584 hlist_del(&ref->node_entry);
673068ee
TK
1585 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1586 binder_node_inner_unlock(ref->node);
ed29721e
TK
1587 /*
1588 * Clear ref->node unless we want the caller to free the node
1589 */
1590 if (!delete_node) {
1591 /*
1592 * The caller uses ref->node to determine
1593 * whether the node needs to be freed. Clear
1594 * it since the node is still alive.
1595 */
1596 ref->node = NULL;
1597 }
372e3147 1598
355b0502
GKH
1599 if (ref->death) {
1600 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 1601 "%d delete ref %d desc %d has death notification\n",
372e3147
TK
1602 ref->proc->pid, ref->data.debug_id,
1603 ref->data.desc);
72196393 1604 binder_dequeue_work(ref->proc, &ref->death->work);
355b0502
GKH
1605 binder_stats_deleted(BINDER_STAT_DEATH);
1606 }
355b0502
GKH
1607 binder_stats_deleted(BINDER_STAT_REF);
1608}
1609
372e3147 1610/**
2c1838dc 1611 * binder_inc_ref_olocked() - increment the ref for given handle
372e3147
TK
1612 * @ref: ref to be incremented
1613 * @strong: if true, strong increment, else weak
1614 * @target_list: list to queue node work on
1615 *
2c1838dc 1616 * Increment the ref. @ref->proc->outer_lock must be held on entry
372e3147
TK
1617 *
1618 * Return: 0, if successful, else errno
1619 */
2c1838dc
TK
1620static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1621 struct list_head *target_list)
355b0502
GKH
1622{
1623 int ret;
10f62861 1624
355b0502 1625 if (strong) {
372e3147 1626 if (ref->data.strong == 0) {
355b0502
GKH
1627 ret = binder_inc_node(ref->node, 1, 1, target_list);
1628 if (ret)
1629 return ret;
1630 }
372e3147 1631 ref->data.strong++;
355b0502 1632 } else {
372e3147 1633 if (ref->data.weak == 0) {
355b0502
GKH
1634 ret = binder_inc_node(ref->node, 0, 1, target_list);
1635 if (ret)
1636 return ret;
1637 }
372e3147 1638 ref->data.weak++;
355b0502
GKH
1639 }
1640 return 0;
1641}
1642
372e3147
TK
1643/**
1644 * binder_dec_ref() - dec the ref for given handle
1645 * @ref: ref to be decremented
1646 * @strong: if true, strong decrement, else weak
1647 *
1648 * Decrement the ref.
1649 *
372e3147
TK
1650 * Return: true if ref is cleaned up and ready to be freed
1651 */
2c1838dc 1652static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
355b0502
GKH
1653{
1654 if (strong) {
372e3147 1655 if (ref->data.strong == 0) {
56b468fc 1656 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
372e3147
TK
1657 ref->proc->pid, ref->data.debug_id,
1658 ref->data.desc, ref->data.strong,
1659 ref->data.weak);
1660 return false;
355b0502 1661 }
372e3147 1662 ref->data.strong--;
ed29721e
TK
1663 if (ref->data.strong == 0)
1664 binder_dec_node(ref->node, strong, 1);
355b0502 1665 } else {
372e3147 1666 if (ref->data.weak == 0) {
56b468fc 1667 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
372e3147
TK
1668 ref->proc->pid, ref->data.debug_id,
1669 ref->data.desc, ref->data.strong,
1670 ref->data.weak);
1671 return false;
355b0502 1672 }
372e3147 1673 ref->data.weak--;
355b0502 1674 }
372e3147 1675 if (ref->data.strong == 0 && ref->data.weak == 0) {
2c1838dc 1676 binder_cleanup_ref_olocked(ref);
372e3147
TK
1677 return true;
1678 }
1679 return false;
1680}
1681
1682/**
1683 * binder_get_node_from_ref() - get the node from the given proc/desc
1684 * @proc: proc containing the ref
1685 * @desc: the handle associated with the ref
1686 * @need_strong_ref: if true, only return node if ref is strong
1687 * @rdata: the id/refcount data for the ref
1688 *
1689 * Given a proc and ref handle, return the associated binder_node
1690 *
1691 * Return: a binder_node or NULL if not found or not strong when strong required
1692 */
1693static struct binder_node *binder_get_node_from_ref(
1694 struct binder_proc *proc,
1695 u32 desc, bool need_strong_ref,
1696 struct binder_ref_data *rdata)
1697{
1698 struct binder_node *node;
1699 struct binder_ref *ref;
1700
2c1838dc
TK
1701 binder_proc_lock(proc);
1702 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
372e3147
TK
1703 if (!ref)
1704 goto err_no_ref;
1705 node = ref->node;
adc18842
TK
1706 /*
1707 * Take an implicit reference on the node to ensure
1708 * it stays alive until the call to binder_put_node()
1709 */
1710 binder_inc_node_tmpref(node);
372e3147
TK
1711 if (rdata)
1712 *rdata = ref->data;
2c1838dc 1713 binder_proc_unlock(proc);
372e3147
TK
1714
1715 return node;
1716
1717err_no_ref:
2c1838dc 1718 binder_proc_unlock(proc);
372e3147
TK
1719 return NULL;
1720}
1721
1722/**
1723 * binder_free_ref() - free the binder_ref
1724 * @ref: ref to free
1725 *
ed29721e
TK
1726 * Free the binder_ref. Free the binder_node indicated by ref->node
1727 * (if non-NULL) and the binder_ref_death indicated by ref->death.
372e3147
TK
1728 */
1729static void binder_free_ref(struct binder_ref *ref)
1730{
ed29721e
TK
1731 if (ref->node)
1732 binder_free_node(ref->node);
372e3147
TK
1733 kfree(ref->death);
1734 kfree(ref);
1735}
1736
1737/**
1738 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1739 * @proc: proc containing the ref
1740 * @desc: the handle associated with the ref
1741 * @increment: true=inc reference, false=dec reference
1742 * @strong: true=strong reference, false=weak reference
1743 * @rdata: the id/refcount data for the ref
1744 *
1745 * Given a proc and ref handle, increment or decrement the ref
1746 * according to "increment" arg.
1747 *
1748 * Return: 0 if successful, else errno
1749 */
1750static int binder_update_ref_for_handle(struct binder_proc *proc,
1751 uint32_t desc, bool increment, bool strong,
1752 struct binder_ref_data *rdata)
1753{
1754 int ret = 0;
1755 struct binder_ref *ref;
1756 bool delete_ref = false;
1757
2c1838dc
TK
1758 binder_proc_lock(proc);
1759 ref = binder_get_ref_olocked(proc, desc, strong);
372e3147
TK
1760 if (!ref) {
1761 ret = -EINVAL;
1762 goto err_no_ref;
1763 }
1764 if (increment)
2c1838dc 1765 ret = binder_inc_ref_olocked(ref, strong, NULL);
372e3147 1766 else
2c1838dc 1767 delete_ref = binder_dec_ref_olocked(ref, strong);
372e3147
TK
1768
1769 if (rdata)
1770 *rdata = ref->data;
2c1838dc 1771 binder_proc_unlock(proc);
372e3147
TK
1772
1773 if (delete_ref)
1774 binder_free_ref(ref);
1775 return ret;
1776
1777err_no_ref:
2c1838dc 1778 binder_proc_unlock(proc);
372e3147
TK
1779 return ret;
1780}
1781
1782/**
1783 * binder_dec_ref_for_handle() - dec the ref for given handle
1784 * @proc: proc containing the ref
1785 * @desc: the handle associated with the ref
1786 * @strong: true=strong reference, false=weak reference
1787 * @rdata: the id/refcount data for the ref
1788 *
1789 * Just calls binder_update_ref_for_handle() to decrement the ref.
1790 *
1791 * Return: 0 if successful, else errno
1792 */
1793static int binder_dec_ref_for_handle(struct binder_proc *proc,
1794 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1795{
1796 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1797}
1798
1799
1800/**
1801 * binder_inc_ref_for_node() - increment the ref for given proc/node
1802 * @proc: proc containing the ref
1803 * @node: target node
1804 * @strong: true=strong reference, false=weak reference
1805 * @target_list: worklist to use if node is incremented
1806 * @rdata: the id/refcount data for the ref
1807 *
1808 * Given a proc and node, increment the ref. Create the ref if it
1809 * doesn't already exist
1810 *
1811 * Return: 0 if successful, else errno
1812 */
1813static int binder_inc_ref_for_node(struct binder_proc *proc,
1814 struct binder_node *node,
1815 bool strong,
1816 struct list_head *target_list,
1817 struct binder_ref_data *rdata)
1818{
1819 struct binder_ref *ref;
1820 struct binder_ref *new_ref = NULL;
1821 int ret = 0;
1822
2c1838dc
TK
1823 binder_proc_lock(proc);
1824 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
372e3147 1825 if (!ref) {
2c1838dc 1826 binder_proc_unlock(proc);
372e3147
TK
1827 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1828 if (!new_ref)
1829 return -ENOMEM;
2c1838dc
TK
1830 binder_proc_lock(proc);
1831 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
372e3147 1832 }
2c1838dc 1833 ret = binder_inc_ref_olocked(ref, strong, target_list);
372e3147 1834 *rdata = ref->data;
2c1838dc 1835 binder_proc_unlock(proc);
372e3147
TK
1836 if (new_ref && ref != new_ref)
1837 /*
1838 * Another thread created the ref first so
1839 * free the one we allocated
1840 */
1841 kfree(new_ref);
1842 return ret;
355b0502
GKH
1843}
1844
0b89d69a
MC
1845static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1846 struct binder_transaction *t)
355b0502 1847{
b6d282ce 1848 BUG_ON(!target_thread);
858b2719 1849 assert_spin_locked(&target_thread->proc->inner_lock);
b6d282ce
TK
1850 BUG_ON(target_thread->transaction_stack != t);
1851 BUG_ON(target_thread->transaction_stack->from != target_thread);
1852 target_thread->transaction_stack =
1853 target_thread->transaction_stack->from_parent;
1854 t->from = NULL;
1855}
1856
7a4408c6
TK
1857/**
1858 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1859 * @thread: thread to decrement
1860 *
1861 * A thread needs to be kept alive while being used to create or
1862 * handle a transaction. binder_get_txn_from() is used to safely
1863 * extract t->from from a binder_transaction and keep the thread
1864 * indicated by t->from from being freed. When done with that
1865 * binder_thread, this function is called to decrement the
1866 * tmp_ref and free if appropriate (thread has been released
1867 * and no transaction being processed by the driver)
1868 */
1869static void binder_thread_dec_tmpref(struct binder_thread *thread)
1870{
1871 /*
1872 * atomic is used to protect the counter value while
1873 * it cannot reach zero or thread->is_dead is false
7a4408c6 1874 */
7bd7b0e6 1875 binder_inner_proc_lock(thread->proc);
7a4408c6
TK
1876 atomic_dec(&thread->tmp_ref);
1877 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
7bd7b0e6 1878 binder_inner_proc_unlock(thread->proc);
7a4408c6
TK
1879 binder_free_thread(thread);
1880 return;
1881 }
7bd7b0e6 1882 binder_inner_proc_unlock(thread->proc);
7a4408c6
TK
1883}
1884
1885/**
1886 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1887 * @proc: proc to decrement
1888 *
1889 * A binder_proc needs to be kept alive while being used to create or
1890 * handle a transaction. proc->tmp_ref is incremented when
1891 * creating a new transaction or the binder_proc is currently in-use
1892 * by threads that are being released. When done with the binder_proc,
1893 * this function is called to decrement the counter and free the
1894 * proc if appropriate (proc has been released, all threads have
1895 * been released and not currenly in-use to process a transaction).
1896 */
1897static void binder_proc_dec_tmpref(struct binder_proc *proc)
1898{
7bd7b0e6 1899 binder_inner_proc_lock(proc);
7a4408c6
TK
1900 proc->tmp_ref--;
1901 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1902 !proc->tmp_ref) {
7bd7b0e6 1903 binder_inner_proc_unlock(proc);
7a4408c6
TK
1904 binder_free_proc(proc);
1905 return;
1906 }
7bd7b0e6 1907 binder_inner_proc_unlock(proc);
7a4408c6
TK
1908}
1909
1910/**
1911 * binder_get_txn_from() - safely extract the "from" thread in transaction
1912 * @t: binder transaction for t->from
1913 *
1914 * Atomically return the "from" thread and increment the tmp_ref
1915 * count for the thread to ensure it stays alive until
1916 * binder_thread_dec_tmpref() is called.
1917 *
1918 * Return: the value of t->from
1919 */
1920static struct binder_thread *binder_get_txn_from(
1921 struct binder_transaction *t)
1922{
1923 struct binder_thread *from;
1924
1925 spin_lock(&t->lock);
1926 from = t->from;
1927 if (from)
1928 atomic_inc(&from->tmp_ref);
1929 spin_unlock(&t->lock);
1930 return from;
1931}
1932
0b89d69a
MC
1933/**
1934 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1935 * @t: binder transaction for t->from
1936 *
1937 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1938 * to guarantee that the thread cannot be released while operating on it.
1939 * The caller must call binder_inner_proc_unlock() to release the inner lock
1940 * as well as call binder_dec_thread_txn() to release the reference.
1941 *
1942 * Return: the value of t->from
1943 */
1944static struct binder_thread *binder_get_txn_from_and_acq_inner(
1945 struct binder_transaction *t)
1946{
1947 struct binder_thread *from;
1948
1949 from = binder_get_txn_from(t);
1950 if (!from)
1951 return NULL;
1952 binder_inner_proc_lock(from->proc);
1953 if (t->from) {
1954 BUG_ON(from != t->from);
1955 return from;
1956 }
1957 binder_inner_proc_unlock(from->proc);
1958 binder_thread_dec_tmpref(from);
1959 return NULL;
1960}
1961
b6d282ce
TK
1962static void binder_free_transaction(struct binder_transaction *t)
1963{
355b0502
GKH
1964 if (t->buffer)
1965 t->buffer->transaction = NULL;
1966 kfree(t);
1967 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1968}
1969
1970static void binder_send_failed_reply(struct binder_transaction *t,
1971 uint32_t error_code)
1972{
1973 struct binder_thread *target_thread;
d4ec15e1 1974 struct binder_transaction *next;
10f62861 1975
355b0502
GKH
1976 BUG_ON(t->flags & TF_ONE_WAY);
1977 while (1) {
0b89d69a 1978 target_thread = binder_get_txn_from_and_acq_inner(t);
355b0502 1979 if (target_thread) {
26549d17
TK
1980 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1981 "send failed reply for transaction %d to %d:%d\n",
1982 t->debug_id,
1983 target_thread->proc->pid,
1984 target_thread->pid);
1985
0b89d69a 1986 binder_pop_transaction_ilocked(target_thread, t);
26549d17
TK
1987 if (target_thread->reply_error.cmd == BR_OK) {
1988 target_thread->reply_error.cmd = error_code;
148ade2c
MC
1989 binder_enqueue_thread_work_ilocked(
1990 target_thread,
1991 &target_thread->reply_error.work);
355b0502
GKH
1992 wake_up_interruptible(&target_thread->wait);
1993 } else {
e46a3b3b
TK
1994 /*
1995 * Cannot get here for normal operation, but
1996 * we can if multiple synchronous transactions
1997 * are sent without blocking for responses.
1998 * Just ignore the 2nd error in this case.
1999 */
2000 pr_warn("Unexpected reply error: %u\n",
2001 target_thread->reply_error.cmd);
355b0502 2002 }
0b89d69a 2003 binder_inner_proc_unlock(target_thread->proc);
7a4408c6 2004 binder_thread_dec_tmpref(target_thread);
26549d17 2005 binder_free_transaction(t);
355b0502 2006 return;
d4ec15e1
LT
2007 }
2008 next = t->from_parent;
2009
2010 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2011 "send failed reply for transaction %d, target dead\n",
2012 t->debug_id);
2013
b6d282ce 2014 binder_free_transaction(t);
d4ec15e1 2015 if (next == NULL) {
355b0502 2016 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
2017 "reply failed, no target thread at root\n");
2018 return;
355b0502 2019 }
d4ec15e1
LT
2020 t = next;
2021 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2022 "reply failed, no target thread -- retry %d\n",
2023 t->debug_id);
355b0502
GKH
2024 }
2025}
2026
fb2c4452
MC
2027/**
2028 * binder_cleanup_transaction() - cleans up undelivered transaction
2029 * @t: transaction that needs to be cleaned up
2030 * @reason: reason the transaction wasn't delivered
2031 * @error_code: error to return to caller (if synchronous call)
2032 */
2033static void binder_cleanup_transaction(struct binder_transaction *t,
2034 const char *reason,
2035 uint32_t error_code)
2036{
2037 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2038 binder_send_failed_reply(t, error_code);
2039 } else {
2040 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2041 "undelivered transaction %d, %s\n",
2042 t->debug_id, reason);
2043 binder_free_transaction(t);
2044 }
2045}
2046
feba3900
MC
2047/**
2048 * binder_validate_object() - checks for a valid metadata object in a buffer.
2049 * @buffer: binder_buffer that we're parsing.
2050 * @offset: offset in the buffer at which to validate an object.
2051 *
2052 * Return: If there's a valid metadata object at @offset in @buffer, the
2053 * size of that object. Otherwise, it returns zero.
2054 */
2055static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2056{
2057 /* Check if we can read a header first */
2058 struct binder_object_header *hdr;
2059 size_t object_size = 0;
2060
2061 if (offset > buffer->data_size - sizeof(*hdr) ||
2062 buffer->data_size < sizeof(*hdr) ||
2063 !IS_ALIGNED(offset, sizeof(u32)))
2064 return 0;
2065
2066 /* Ok, now see if we can read a complete object. */
2067 hdr = (struct binder_object_header *)(buffer->data + offset);
2068 switch (hdr->type) {
2069 case BINDER_TYPE_BINDER:
2070 case BINDER_TYPE_WEAK_BINDER:
2071 case BINDER_TYPE_HANDLE:
2072 case BINDER_TYPE_WEAK_HANDLE:
2073 object_size = sizeof(struct flat_binder_object);
2074 break;
2075 case BINDER_TYPE_FD:
2076 object_size = sizeof(struct binder_fd_object);
2077 break;
7980240b
MC
2078 case BINDER_TYPE_PTR:
2079 object_size = sizeof(struct binder_buffer_object);
2080 break;
def95c73
MC
2081 case BINDER_TYPE_FDA:
2082 object_size = sizeof(struct binder_fd_array_object);
2083 break;
feba3900
MC
2084 default:
2085 return 0;
2086 }
2087 if (offset <= buffer->data_size - object_size &&
2088 buffer->data_size >= object_size)
2089 return object_size;
2090 else
2091 return 0;
2092}
2093
7980240b
MC
2094/**
2095 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2096 * @b: binder_buffer containing the object
2097 * @index: index in offset array at which the binder_buffer_object is
2098 * located
2099 * @start: points to the start of the offset array
2100 * @num_valid: the number of valid offsets in the offset array
2101 *
2102 * Return: If @index is within the valid range of the offset array
2103 * described by @start and @num_valid, and if there's a valid
2104 * binder_buffer_object at the offset found in index @index
2105 * of the offset array, that object is returned. Otherwise,
2106 * %NULL is returned.
2107 * Note that the offset found in index @index itself is not
2108 * verified; this function assumes that @num_valid elements
2109 * from @start were previously verified to have valid offsets.
2110 */
2111static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2112 binder_size_t index,
2113 binder_size_t *start,
2114 binder_size_t num_valid)
2115{
2116 struct binder_buffer_object *buffer_obj;
2117 binder_size_t *offp;
2118
2119 if (index >= num_valid)
2120 return NULL;
2121
2122 offp = start + index;
2123 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2124 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2125 return NULL;
2126
2127 return buffer_obj;
2128}
2129
2130/**
2131 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2132 * @b: transaction buffer
2133 * @objects_start start of objects buffer
2134 * @buffer: binder_buffer_object in which to fix up
2135 * @offset: start offset in @buffer to fix up
2136 * @last_obj: last binder_buffer_object that we fixed up in
2137 * @last_min_offset: minimum fixup offset in @last_obj
2138 *
2139 * Return: %true if a fixup in buffer @buffer at offset @offset is
2140 * allowed.
2141 *
2142 * For safety reasons, we only allow fixups inside a buffer to happen
2143 * at increasing offsets; additionally, we only allow fixup on the last
2144 * buffer object that was verified, or one of its parents.
2145 *
2146 * Example of what is allowed:
2147 *
2148 * A
2149 * B (parent = A, offset = 0)
2150 * C (parent = A, offset = 16)
2151 * D (parent = C, offset = 0)
2152 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2153 *
2154 * Examples of what is not allowed:
2155 *
2156 * Decreasing offsets within the same parent:
2157 * A
2158 * C (parent = A, offset = 16)
2159 * B (parent = A, offset = 0) // decreasing offset within A
2160 *
2161 * Referring to a parent that wasn't the last object or any of its parents:
2162 * A
2163 * B (parent = A, offset = 0)
2164 * C (parent = A, offset = 0)
2165 * C (parent = A, offset = 16)
2166 * D (parent = B, offset = 0) // B is not A or any of A's parents
2167 */
2168static bool binder_validate_fixup(struct binder_buffer *b,
2169 binder_size_t *objects_start,
2170 struct binder_buffer_object *buffer,
2171 binder_size_t fixup_offset,
2172 struct binder_buffer_object *last_obj,
2173 binder_size_t last_min_offset)
2174{
2175 if (!last_obj) {
2176 /* Nothing to fix up in */
2177 return false;
2178 }
2179
2180 while (last_obj != buffer) {
2181 /*
2182 * Safe to retrieve the parent of last_obj, since it
2183 * was already previously verified by the driver.
2184 */
2185 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2186 return false;
2187 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2188 last_obj = (struct binder_buffer_object *)
2189 (b->data + *(objects_start + last_obj->parent));
2190 }
2191 return (fixup_offset >= last_min_offset);
2192}
2193
355b0502
GKH
2194static void binder_transaction_buffer_release(struct binder_proc *proc,
2195 struct binder_buffer *buffer,
da49889d 2196 binder_size_t *failed_at)
355b0502 2197{
7980240b 2198 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
2199 int debug_id = buffer->debug_id;
2200
2201 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 2202 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
2203 proc->pid, buffer->debug_id,
2204 buffer->data_size, buffer->offsets_size, failed_at);
2205
2206 if (buffer->target_node)
2207 binder_dec_node(buffer->target_node, 1, 0);
2208
7980240b
MC
2209 off_start = (binder_size_t *)(buffer->data +
2210 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
2211 if (failed_at)
2212 off_end = failed_at;
2213 else
7980240b
MC
2214 off_end = (void *)off_start + buffer->offsets_size;
2215 for (offp = off_start; offp < off_end; offp++) {
feba3900
MC
2216 struct binder_object_header *hdr;
2217 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 2218
feba3900
MC
2219 if (object_size == 0) {
2220 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 2221 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
2222 continue;
2223 }
feba3900
MC
2224 hdr = (struct binder_object_header *)(buffer->data + *offp);
2225 switch (hdr->type) {
355b0502
GKH
2226 case BINDER_TYPE_BINDER:
2227 case BINDER_TYPE_WEAK_BINDER: {
feba3900
MC
2228 struct flat_binder_object *fp;
2229 struct binder_node *node;
10f62861 2230
feba3900
MC
2231 fp = to_flat_binder_object(hdr);
2232 node = binder_get_node(proc, fp->binder);
355b0502 2233 if (node == NULL) {
da49889d
AH
2234 pr_err("transaction release %d bad node %016llx\n",
2235 debug_id, (u64)fp->binder);
355b0502
GKH
2236 break;
2237 }
2238 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
2239 " node %d u%016llx\n",
2240 node->debug_id, (u64)node->ptr);
feba3900
MC
2241 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2242 0);
adc18842 2243 binder_put_node(node);
355b0502
GKH
2244 } break;
2245 case BINDER_TYPE_HANDLE:
2246 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 2247 struct flat_binder_object *fp;
372e3147
TK
2248 struct binder_ref_data rdata;
2249 int ret;
0a3ffab9 2250
feba3900 2251 fp = to_flat_binder_object(hdr);
372e3147
TK
2252 ret = binder_dec_ref_for_handle(proc, fp->handle,
2253 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2254
2255 if (ret) {
2256 pr_err("transaction release %d bad handle %d, ret = %d\n",
2257 debug_id, fp->handle, ret);
355b0502
GKH
2258 break;
2259 }
2260 binder_debug(BINDER_DEBUG_TRANSACTION,
372e3147
TK
2261 " ref %d desc %d\n",
2262 rdata.debug_id, rdata.desc);
355b0502
GKH
2263 } break;
2264
feba3900
MC
2265 case BINDER_TYPE_FD: {
2266 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2267
355b0502 2268 binder_debug(BINDER_DEBUG_TRANSACTION,
feba3900 2269 " fd %d\n", fp->fd);
355b0502 2270 if (failed_at)
feba3900
MC
2271 task_close_fd(proc, fp->fd);
2272 } break;
7980240b
MC
2273 case BINDER_TYPE_PTR:
2274 /*
2275 * Nothing to do here, this will get cleaned up when the
2276 * transaction buffer gets freed
2277 */
2278 break;
def95c73
MC
2279 case BINDER_TYPE_FDA: {
2280 struct binder_fd_array_object *fda;
2281 struct binder_buffer_object *parent;
2282 uintptr_t parent_buffer;
2283 u32 *fd_array;
2284 size_t fd_index;
2285 binder_size_t fd_buf_size;
2286
2287 fda = to_binder_fd_array_object(hdr);
2288 parent = binder_validate_ptr(buffer, fda->parent,
2289 off_start,
2290 offp - off_start);
2291 if (!parent) {
f7f84fde 2292 pr_err("transaction release %d bad parent offset\n",
def95c73
MC
2293 debug_id);
2294 continue;
2295 }
2296 /*
2297 * Since the parent was already fixed up, convert it
2298 * back to kernel address space to access it
2299 */
2300 parent_buffer = parent->buffer -
19c98724
TK
2301 binder_alloc_get_user_buffer_offset(
2302 &proc->alloc);
def95c73
MC
2303
2304 fd_buf_size = sizeof(u32) * fda->num_fds;
2305 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2306 pr_err("transaction release %d invalid number of fds (%lld)\n",
2307 debug_id, (u64)fda->num_fds);
2308 continue;
2309 }
2310 if (fd_buf_size > parent->length ||
2311 fda->parent_offset > parent->length - fd_buf_size) {
2312 /* No space for all file descriptors here. */
2313 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2314 debug_id, (u64)fda->num_fds);
2315 continue;
2316 }
1c363eae 2317 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
def95c73
MC
2318 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2319 task_close_fd(proc, fd_array[fd_index]);
2320 } break;
355b0502 2321 default:
64dcfe6b 2322 pr_err("transaction release %d bad object type %x\n",
feba3900 2323 debug_id, hdr->type);
355b0502
GKH
2324 break;
2325 }
2326 }
2327}
2328
a056af42
MC
2329static int binder_translate_binder(struct flat_binder_object *fp,
2330 struct binder_transaction *t,
2331 struct binder_thread *thread)
2332{
2333 struct binder_node *node;
a056af42
MC
2334 struct binder_proc *proc = thread->proc;
2335 struct binder_proc *target_proc = t->to_proc;
372e3147 2336 struct binder_ref_data rdata;
adc18842 2337 int ret = 0;
a056af42
MC
2338
2339 node = binder_get_node(proc, fp->binder);
2340 if (!node) {
673068ee 2341 node = binder_new_node(proc, fp);
a056af42
MC
2342 if (!node)
2343 return -ENOMEM;
a056af42
MC
2344 }
2345 if (fp->cookie != node->cookie) {
2346 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2347 proc->pid, thread->pid, (u64)fp->binder,
2348 node->debug_id, (u64)fp->cookie,
2349 (u64)node->cookie);
adc18842
TK
2350 ret = -EINVAL;
2351 goto done;
2352 }
2353 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2354 ret = -EPERM;
2355 goto done;
a056af42 2356 }
a056af42 2357
372e3147
TK
2358 ret = binder_inc_ref_for_node(target_proc, node,
2359 fp->hdr.type == BINDER_TYPE_BINDER,
2360 &thread->todo, &rdata);
2361 if (ret)
adc18842 2362 goto done;
a056af42
MC
2363
2364 if (fp->hdr.type == BINDER_TYPE_BINDER)
2365 fp->hdr.type = BINDER_TYPE_HANDLE;
2366 else
2367 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2368 fp->binder = 0;
372e3147 2369 fp->handle = rdata.desc;
a056af42 2370 fp->cookie = 0;
a056af42 2371
372e3147 2372 trace_binder_transaction_node_to_ref(t, node, &rdata);
a056af42
MC
2373 binder_debug(BINDER_DEBUG_TRANSACTION,
2374 " node %d u%016llx -> ref %d desc %d\n",
2375 node->debug_id, (u64)node->ptr,
372e3147 2376 rdata.debug_id, rdata.desc);
adc18842
TK
2377done:
2378 binder_put_node(node);
2379 return ret;
a056af42
MC
2380}
2381
2382static int binder_translate_handle(struct flat_binder_object *fp,
2383 struct binder_transaction *t,
2384 struct binder_thread *thread)
2385{
a056af42
MC
2386 struct binder_proc *proc = thread->proc;
2387 struct binder_proc *target_proc = t->to_proc;
372e3147
TK
2388 struct binder_node *node;
2389 struct binder_ref_data src_rdata;
adc18842 2390 int ret = 0;
a056af42 2391
372e3147
TK
2392 node = binder_get_node_from_ref(proc, fp->handle,
2393 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2394 if (!node) {
a056af42
MC
2395 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2396 proc->pid, thread->pid, fp->handle);
2397 return -EINVAL;
2398 }
adc18842
TK
2399 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2400 ret = -EPERM;
2401 goto done;
2402 }
a056af42 2403
673068ee 2404 binder_node_lock(node);
372e3147 2405 if (node->proc == target_proc) {
a056af42
MC
2406 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2407 fp->hdr.type = BINDER_TYPE_BINDER;
2408 else
2409 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
372e3147
TK
2410 fp->binder = node->ptr;
2411 fp->cookie = node->cookie;
673068ee
TK
2412 if (node->proc)
2413 binder_inner_proc_lock(node->proc);
2414 binder_inc_node_nilocked(node,
2415 fp->hdr.type == BINDER_TYPE_BINDER,
2416 0, NULL);
2417 if (node->proc)
2418 binder_inner_proc_unlock(node->proc);
372e3147 2419 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
a056af42
MC
2420 binder_debug(BINDER_DEBUG_TRANSACTION,
2421 " ref %d desc %d -> node %d u%016llx\n",
372e3147
TK
2422 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2423 (u64)node->ptr);
673068ee 2424 binder_node_unlock(node);
a056af42 2425 } else {
372e3147 2426 struct binder_ref_data dest_rdata;
a056af42 2427
673068ee 2428 binder_node_unlock(node);
372e3147
TK
2429 ret = binder_inc_ref_for_node(target_proc, node,
2430 fp->hdr.type == BINDER_TYPE_HANDLE,
2431 NULL, &dest_rdata);
2432 if (ret)
adc18842 2433 goto done;
a056af42
MC
2434
2435 fp->binder = 0;
372e3147 2436 fp->handle = dest_rdata.desc;
a056af42 2437 fp->cookie = 0;
372e3147
TK
2438 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2439 &dest_rdata);
a056af42
MC
2440 binder_debug(BINDER_DEBUG_TRANSACTION,
2441 " ref %d desc %d -> ref %d desc %d (node %d)\n",
372e3147
TK
2442 src_rdata.debug_id, src_rdata.desc,
2443 dest_rdata.debug_id, dest_rdata.desc,
2444 node->debug_id);
a056af42 2445 }
adc18842
TK
2446done:
2447 binder_put_node(node);
2448 return ret;
a056af42
MC
2449}
2450
2451static int binder_translate_fd(int fd,
2452 struct binder_transaction *t,
2453 struct binder_thread *thread,
2454 struct binder_transaction *in_reply_to)
2455{
2456 struct binder_proc *proc = thread->proc;
2457 struct binder_proc *target_proc = t->to_proc;
2458 int target_fd;
2459 struct file *file;
2460 int ret;
2461 bool target_allows_fd;
2462
2463 if (in_reply_to)
2464 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2465 else
2466 target_allows_fd = t->buffer->target_node->accept_fds;
2467 if (!target_allows_fd) {
2468 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2469 proc->pid, thread->pid,
2470 in_reply_to ? "reply" : "transaction",
2471 fd);
2472 ret = -EPERM;
2473 goto err_fd_not_accepted;
2474 }
2475
2476 file = fget(fd);
2477 if (!file) {
2478 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2479 proc->pid, thread->pid, fd);
2480 ret = -EBADF;
2481 goto err_fget;
2482 }
2483 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2484 if (ret < 0) {
2485 ret = -EPERM;
2486 goto err_security;
2487 }
2488
2489 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2490 if (target_fd < 0) {
2491 ret = -ENOMEM;
2492 goto err_get_unused_fd;
2493 }
2494 task_fd_install(target_proc, target_fd, file);
2495 trace_binder_transaction_fd(t, fd, target_fd);
2496 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2497 fd, target_fd);
2498
2499 return target_fd;
2500
2501err_get_unused_fd:
2502err_security:
2503 fput(file);
2504err_fget:
2505err_fd_not_accepted:
2506 return ret;
2507}
2508
def95c73
MC
2509static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2510 struct binder_buffer_object *parent,
2511 struct binder_transaction *t,
2512 struct binder_thread *thread,
2513 struct binder_transaction *in_reply_to)
2514{
2515 binder_size_t fdi, fd_buf_size, num_installed_fds;
2516 int target_fd;
2517 uintptr_t parent_buffer;
2518 u32 *fd_array;
2519 struct binder_proc *proc = thread->proc;
2520 struct binder_proc *target_proc = t->to_proc;
2521
2522 fd_buf_size = sizeof(u32) * fda->num_fds;
2523 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2524 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2525 proc->pid, thread->pid, (u64)fda->num_fds);
2526 return -EINVAL;
2527 }
2528 if (fd_buf_size > parent->length ||
2529 fda->parent_offset > parent->length - fd_buf_size) {
2530 /* No space for all file descriptors here. */
2531 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2532 proc->pid, thread->pid, (u64)fda->num_fds);
2533 return -EINVAL;
2534 }
2535 /*
2536 * Since the parent was already fixed up, convert it
2537 * back to the kernel address space to access it
2538 */
19c98724
TK
2539 parent_buffer = parent->buffer -
2540 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
1c363eae 2541 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
def95c73
MC
2542 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2543 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2544 proc->pid, thread->pid);
2545 return -EINVAL;
2546 }
2547 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2548 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2549 in_reply_to);
2550 if (target_fd < 0)
2551 goto err_translate_fd_failed;
2552 fd_array[fdi] = target_fd;
2553 }
2554 return 0;
2555
2556err_translate_fd_failed:
2557 /*
2558 * Failed to allocate fd or security error, free fds
2559 * installed so far.
2560 */
2561 num_installed_fds = fdi;
2562 for (fdi = 0; fdi < num_installed_fds; fdi++)
2563 task_close_fd(target_proc, fd_array[fdi]);
2564 return target_fd;
2565}
2566
7980240b
MC
2567static int binder_fixup_parent(struct binder_transaction *t,
2568 struct binder_thread *thread,
2569 struct binder_buffer_object *bp,
2570 binder_size_t *off_start,
2571 binder_size_t num_valid,
2572 struct binder_buffer_object *last_fixup_obj,
2573 binder_size_t last_fixup_min_off)
2574{
2575 struct binder_buffer_object *parent;
2576 u8 *parent_buffer;
2577 struct binder_buffer *b = t->buffer;
2578 struct binder_proc *proc = thread->proc;
2579 struct binder_proc *target_proc = t->to_proc;
2580
2581 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2582 return 0;
2583
2584 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2585 if (!parent) {
2586 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2587 proc->pid, thread->pid);
2588 return -EINVAL;
2589 }
2590
2591 if (!binder_validate_fixup(b, off_start,
2592 parent, bp->parent_offset,
2593 last_fixup_obj,
2594 last_fixup_min_off)) {
2595 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2596 proc->pid, thread->pid);
2597 return -EINVAL;
2598 }
2599
2600 if (parent->length < sizeof(binder_uintptr_t) ||
2601 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2602 /* No space for a pointer here! */
2603 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2604 proc->pid, thread->pid);
2605 return -EINVAL;
2606 }
1c363eae 2607 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
19c98724
TK
2608 binder_alloc_get_user_buffer_offset(
2609 &target_proc->alloc));
7980240b
MC
2610 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2611
2612 return 0;
2613}
2614
408c68b1
MC
2615/**
2616 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2617 * @t: transaction to send
2618 * @proc: process to send the transaction to
2619 * @thread: thread in @proc to send the transaction to (may be NULL)
2620 *
2621 * This function queues a transaction to the specified process. It will try
2622 * to find a thread in the target process to handle the transaction and
2623 * wake it up. If no thread is found, the work is queued to the proc
2624 * waitqueue.
2625 *
2626 * If the @thread parameter is not NULL, the transaction is always queued
2627 * to the waitlist of that specific thread.
2628 *
2629 * Return: true if the transactions was successfully queued
2630 * false if the target process or thread is dead
2631 */
2632static bool binder_proc_transaction(struct binder_transaction *t,
2633 struct binder_proc *proc,
2634 struct binder_thread *thread)
2635{
408c68b1
MC
2636 struct binder_node *node = t->buffer->target_node;
2637 bool oneway = !!(t->flags & TF_ONE_WAY);
148ade2c 2638 bool pending_async = false;
408c68b1
MC
2639
2640 BUG_ON(!node);
2641 binder_node_lock(node);
2642 if (oneway) {
2643 BUG_ON(thread);
2644 if (node->has_async_transaction) {
148ade2c 2645 pending_async = true;
408c68b1 2646 } else {
197410ad 2647 node->has_async_transaction = true;
408c68b1
MC
2648 }
2649 }
2650
2651 binder_inner_proc_lock(proc);
2652
2653 if (proc->is_dead || (thread && thread->is_dead)) {
2654 binder_inner_proc_unlock(proc);
2655 binder_node_unlock(node);
2656 return false;
2657 }
2658
148ade2c 2659 if (!thread && !pending_async)
408c68b1
MC
2660 thread = binder_select_thread_ilocked(proc);
2661
2662 if (thread)
148ade2c
MC
2663 binder_enqueue_thread_work_ilocked(thread, &t->work);
2664 else if (!pending_async)
2665 binder_enqueue_work_ilocked(&t->work, &proc->todo);
408c68b1 2666 else
148ade2c 2667 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
408c68b1 2668
148ade2c 2669 if (!pending_async)
408c68b1
MC
2670 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2671
2672 binder_inner_proc_unlock(proc);
2673 binder_node_unlock(node);
2674
2675 return true;
2676}
2677
512cf465
TK
2678/**
2679 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2680 * @node: struct binder_node for which to get refs
2681 * @proc: returns @node->proc if valid
2682 * @error: if no @proc then returns BR_DEAD_REPLY
2683 *
2684 * User-space normally keeps the node alive when creating a transaction
2685 * since it has a reference to the target. The local strong ref keeps it
2686 * alive if the sending process dies before the target process processes
2687 * the transaction. If the source process is malicious or has a reference
2688 * counting bug, relying on the local strong ref can fail.
2689 *
2690 * Since user-space can cause the local strong ref to go away, we also take
2691 * a tmpref on the node to ensure it survives while we are constructing
2692 * the transaction. We also need a tmpref on the proc while we are
2693 * constructing the transaction, so we take that here as well.
2694 *
2695 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2696 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2697 * target proc has died, @error is set to BR_DEAD_REPLY
2698 */
2699static struct binder_node *binder_get_node_refs_for_txn(
2700 struct binder_node *node,
2701 struct binder_proc **procp,
2702 uint32_t *error)
2703{
2704 struct binder_node *target_node = NULL;
2705
2706 binder_node_inner_lock(node);
2707 if (node->proc) {
2708 target_node = node;
2709 binder_inc_node_nilocked(node, 1, 0, NULL);
2710 binder_inc_node_tmpref_ilocked(node);
2711 node->proc->tmp_ref++;
2712 *procp = node->proc;
2713 } else
2714 *error = BR_DEAD_REPLY;
2715 binder_node_inner_unlock(node);
2716
2717 return target_node;
2718}
2719
355b0502
GKH
2720static void binder_transaction(struct binder_proc *proc,
2721 struct binder_thread *thread,
4bfac80a
MC
2722 struct binder_transaction_data *tr, int reply,
2723 binder_size_t extra_buffers_size)
355b0502 2724{
a056af42 2725 int ret;
355b0502
GKH
2726 struct binder_transaction *t;
2727 struct binder_work *tcomplete;
7980240b 2728 binder_size_t *offp, *off_end, *off_start;
212265e5 2729 binder_size_t off_min;
7980240b 2730 u8 *sg_bufp, *sg_buf_end;
7a4408c6 2731 struct binder_proc *target_proc = NULL;
355b0502
GKH
2732 struct binder_thread *target_thread = NULL;
2733 struct binder_node *target_node = NULL;
355b0502
GKH
2734 struct binder_transaction *in_reply_to = NULL;
2735 struct binder_transaction_log_entry *e;
57ada2fb
TK
2736 uint32_t return_error = 0;
2737 uint32_t return_error_param = 0;
2738 uint32_t return_error_line = 0;
7980240b
MC
2739 struct binder_buffer_object *last_fixup_obj = NULL;
2740 binder_size_t last_fixup_min_off = 0;
342e5c90 2741 struct binder_context *context = proc->context;
d99c7333 2742 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
2743
2744 e = binder_transaction_log_add(&binder_transaction_log);
d99c7333 2745 e->debug_id = t_debug_id;
355b0502
GKH
2746 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2747 e->from_proc = proc->pid;
2748 e->from_thread = thread->pid;
2749 e->target_handle = tr->target.handle;
2750 e->data_size = tr->data_size;
2751 e->offsets_size = tr->offsets_size;
14db3181 2752 e->context_name = proc->context->name;
355b0502
GKH
2753
2754 if (reply) {
0b89d69a 2755 binder_inner_proc_lock(proc);
355b0502
GKH
2756 in_reply_to = thread->transaction_stack;
2757 if (in_reply_to == NULL) {
0b89d69a 2758 binder_inner_proc_unlock(proc);
56b468fc 2759 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
2760 proc->pid, thread->pid);
2761 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2762 return_error_param = -EPROTO;
2763 return_error_line = __LINE__;
355b0502
GKH
2764 goto err_empty_call_stack;
2765 }
355b0502 2766 if (in_reply_to->to_thread != thread) {
7a4408c6 2767 spin_lock(&in_reply_to->lock);
56b468fc 2768 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2769 proc->pid, thread->pid, in_reply_to->debug_id,
2770 in_reply_to->to_proc ?
2771 in_reply_to->to_proc->pid : 0,
2772 in_reply_to->to_thread ?
2773 in_reply_to->to_thread->pid : 0);
7a4408c6 2774 spin_unlock(&in_reply_to->lock);
0b89d69a 2775 binder_inner_proc_unlock(proc);
355b0502 2776 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2777 return_error_param = -EPROTO;
2778 return_error_line = __LINE__;
355b0502
GKH
2779 in_reply_to = NULL;
2780 goto err_bad_call_stack;
2781 }
2782 thread->transaction_stack = in_reply_to->to_parent;
0b89d69a
MC
2783 binder_inner_proc_unlock(proc);
2784 binder_set_nice(in_reply_to->saved_priority);
2785 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
355b0502
GKH
2786 if (target_thread == NULL) {
2787 return_error = BR_DEAD_REPLY;
57ada2fb 2788 return_error_line = __LINE__;
355b0502
GKH
2789 goto err_dead_binder;
2790 }
2791 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 2792 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
2793 proc->pid, thread->pid,
2794 target_thread->transaction_stack ?
2795 target_thread->transaction_stack->debug_id : 0,
2796 in_reply_to->debug_id);
0b89d69a 2797 binder_inner_proc_unlock(target_thread->proc);
355b0502 2798 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2799 return_error_param = -EPROTO;
2800 return_error_line = __LINE__;
355b0502
GKH
2801 in_reply_to = NULL;
2802 target_thread = NULL;
2803 goto err_dead_binder;
2804 }
2805 target_proc = target_thread->proc;
7a4408c6 2806 target_proc->tmp_ref++;
0b89d69a 2807 binder_inner_proc_unlock(target_thread->proc);
355b0502
GKH
2808 } else {
2809 if (tr->target.handle) {
2810 struct binder_ref *ref;
10f62861 2811
eb34983b
TK
2812 /*
2813 * There must already be a strong ref
2814 * on this node. If so, do a strong
2815 * increment on the node to ensure it
2816 * stays alive until the transaction is
2817 * done.
2818 */
2c1838dc
TK
2819 binder_proc_lock(proc);
2820 ref = binder_get_ref_olocked(proc, tr->target.handle,
2821 true);
eb34983b 2822 if (ref) {
512cf465
TK
2823 target_node = binder_get_node_refs_for_txn(
2824 ref->node, &target_proc,
2825 &return_error);
2826 } else {
56b468fc 2827 binder_user_error("%d:%d got transaction to invalid handle\n",
512cf465 2828 proc->pid, thread->pid);
355b0502 2829 return_error = BR_FAILED_REPLY;
355b0502 2830 }
512cf465 2831 binder_proc_unlock(proc);
355b0502 2832 } else {
c44b1231 2833 mutex_lock(&context->context_mgr_node_lock);
342e5c90 2834 target_node = context->binder_context_mgr_node;
512cf465
TK
2835 if (target_node)
2836 target_node = binder_get_node_refs_for_txn(
2837 target_node, &target_proc,
2838 &return_error);
2839 else
355b0502 2840 return_error = BR_DEAD_REPLY;
c44b1231 2841 mutex_unlock(&context->context_mgr_node_lock);
355b0502 2842 }
512cf465
TK
2843 if (!target_node) {
2844 /*
2845 * return_error is set above
2846 */
2847 return_error_param = -EINVAL;
57ada2fb 2848 return_error_line = __LINE__;
355b0502
GKH
2849 goto err_dead_binder;
2850 }
512cf465 2851 e->to_node = target_node->debug_id;
79af7307
SS
2852 if (security_binder_transaction(proc->tsk,
2853 target_proc->tsk) < 0) {
2854 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2855 return_error_param = -EPERM;
2856 return_error_line = __LINE__;
79af7307
SS
2857 goto err_invalid_target_handle;
2858 }
0b89d69a 2859 binder_inner_proc_lock(proc);
355b0502
GKH
2860 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2861 struct binder_transaction *tmp;
10f62861 2862
355b0502
GKH
2863 tmp = thread->transaction_stack;
2864 if (tmp->to_thread != thread) {
7a4408c6 2865 spin_lock(&tmp->lock);
56b468fc 2866 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2867 proc->pid, thread->pid, tmp->debug_id,
2868 tmp->to_proc ? tmp->to_proc->pid : 0,
2869 tmp->to_thread ?
2870 tmp->to_thread->pid : 0);
7a4408c6 2871 spin_unlock(&tmp->lock);
0b89d69a 2872 binder_inner_proc_unlock(proc);
355b0502 2873 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2874 return_error_param = -EPROTO;
2875 return_error_line = __LINE__;
355b0502
GKH
2876 goto err_bad_call_stack;
2877 }
2878 while (tmp) {
7a4408c6
TK
2879 struct binder_thread *from;
2880
2881 spin_lock(&tmp->lock);
2882 from = tmp->from;
2883 if (from && from->proc == target_proc) {
2884 atomic_inc(&from->tmp_ref);
2885 target_thread = from;
2886 spin_unlock(&tmp->lock);
2887 break;
2888 }
2889 spin_unlock(&tmp->lock);
355b0502
GKH
2890 tmp = tmp->from_parent;
2891 }
2892 }
0b89d69a 2893 binder_inner_proc_unlock(proc);
355b0502 2894 }
408c68b1 2895 if (target_thread)
355b0502 2896 e->to_thread = target_thread->pid;
355b0502
GKH
2897 e->to_proc = target_proc->pid;
2898
2899 /* TODO: reuse incoming transaction for reply */
2900 t = kzalloc(sizeof(*t), GFP_KERNEL);
2901 if (t == NULL) {
2902 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2903 return_error_param = -ENOMEM;
2904 return_error_line = __LINE__;
355b0502
GKH
2905 goto err_alloc_t_failed;
2906 }
2907 binder_stats_created(BINDER_STAT_TRANSACTION);
7a4408c6 2908 spin_lock_init(&t->lock);
355b0502
GKH
2909
2910 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2911 if (tcomplete == NULL) {
2912 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2913 return_error_param = -ENOMEM;
2914 return_error_line = __LINE__;
355b0502
GKH
2915 goto err_alloc_tcomplete_failed;
2916 }
2917 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2918
d99c7333 2919 t->debug_id = t_debug_id;
355b0502
GKH
2920
2921 if (reply)
2922 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2923 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2924 proc->pid, thread->pid, t->debug_id,
2925 target_proc->pid, target_thread->pid,
da49889d
AH
2926 (u64)tr->data.ptr.buffer,
2927 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2928 (u64)tr->data_size, (u64)tr->offsets_size,
2929 (u64)extra_buffers_size);
355b0502
GKH
2930 else
2931 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2932 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2933 proc->pid, thread->pid, t->debug_id,
2934 target_proc->pid, target_node->debug_id,
da49889d
AH
2935 (u64)tr->data.ptr.buffer,
2936 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2937 (u64)tr->data_size, (u64)tr->offsets_size,
2938 (u64)extra_buffers_size);
355b0502
GKH
2939
2940 if (!reply && !(tr->flags & TF_ONE_WAY))
2941 t->from = thread;
2942 else
2943 t->from = NULL;
57bab7cb 2944 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
2945 t->to_proc = target_proc;
2946 t->to_thread = target_thread;
2947 t->code = tr->code;
2948 t->flags = tr->flags;
2949 t->priority = task_nice(current);
975a1ac9
AH
2950
2951 trace_binder_transaction(reply, t, target_node);
2952
19c98724 2953 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
4bfac80a
MC
2954 tr->offsets_size, extra_buffers_size,
2955 !reply && (t->flags & TF_ONE_WAY));
57ada2fb
TK
2956 if (IS_ERR(t->buffer)) {
2957 /*
2958 * -ESRCH indicates VMA cleared. The target is dying.
2959 */
2960 return_error_param = PTR_ERR(t->buffer);
2961 return_error = return_error_param == -ESRCH ?
2962 BR_DEAD_REPLY : BR_FAILED_REPLY;
2963 return_error_line = __LINE__;
2964 t->buffer = NULL;
355b0502
GKH
2965 goto err_binder_alloc_buf_failed;
2966 }
2967 t->buffer->allow_user_free = 0;
2968 t->buffer->debug_id = t->debug_id;
2969 t->buffer->transaction = t;
2970 t->buffer->target_node = target_node;
975a1ac9 2971 trace_binder_transaction_alloc_buf(t->buffer);
7980240b
MC
2972 off_start = (binder_size_t *)(t->buffer->data +
2973 ALIGN(tr->data_size, sizeof(void *)));
2974 offp = off_start;
355b0502 2975
da49889d
AH
2976 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2977 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
2978 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2979 proc->pid, thread->pid);
355b0502 2980 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2981 return_error_param = -EFAULT;
2982 return_error_line = __LINE__;
355b0502
GKH
2983 goto err_copy_data_failed;
2984 }
da49889d
AH
2985 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2986 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
2987 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2988 proc->pid, thread->pid);
355b0502 2989 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2990 return_error_param = -EFAULT;
2991 return_error_line = __LINE__;
355b0502
GKH
2992 goto err_copy_data_failed;
2993 }
da49889d
AH
2994 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2995 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2996 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 2997 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2998 return_error_param = -EINVAL;
2999 return_error_line = __LINE__;
355b0502
GKH
3000 goto err_bad_offset;
3001 }
7980240b
MC
3002 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3003 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3004 proc->pid, thread->pid,
3005 (u64)extra_buffers_size);
3006 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3007 return_error_param = -EINVAL;
3008 return_error_line = __LINE__;
7980240b
MC
3009 goto err_bad_offset;
3010 }
3011 off_end = (void *)off_start + tr->offsets_size;
3012 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3013 sg_buf_end = sg_bufp + extra_buffers_size;
212265e5 3014 off_min = 0;
355b0502 3015 for (; offp < off_end; offp++) {
feba3900
MC
3016 struct binder_object_header *hdr;
3017 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 3018
feba3900
MC
3019 if (object_size == 0 || *offp < off_min) {
3020 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
212265e5
AH
3021 proc->pid, thread->pid, (u64)*offp,
3022 (u64)off_min,
feba3900 3023 (u64)t->buffer->data_size);
355b0502 3024 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3025 return_error_param = -EINVAL;
3026 return_error_line = __LINE__;
355b0502
GKH
3027 goto err_bad_offset;
3028 }
feba3900
MC
3029
3030 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3031 off_min = *offp + object_size;
3032 switch (hdr->type) {
355b0502
GKH
3033 case BINDER_TYPE_BINDER:
3034 case BINDER_TYPE_WEAK_BINDER: {
feba3900 3035 struct flat_binder_object *fp;
10f62861 3036
feba3900 3037 fp = to_flat_binder_object(hdr);
a056af42
MC
3038 ret = binder_translate_binder(fp, t, thread);
3039 if (ret < 0) {
355b0502 3040 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3041 return_error_param = ret;
3042 return_error_line = __LINE__;
a056af42 3043 goto err_translate_failed;
355b0502 3044 }
355b0502
GKH
3045 } break;
3046 case BINDER_TYPE_HANDLE:
3047 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 3048 struct flat_binder_object *fp;
0a3ffab9 3049
feba3900 3050 fp = to_flat_binder_object(hdr);
a056af42
MC
3051 ret = binder_translate_handle(fp, t, thread);
3052 if (ret < 0) {
79af7307 3053 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3054 return_error_param = ret;
3055 return_error_line = __LINE__;
a056af42 3056 goto err_translate_failed;
355b0502
GKH
3057 }
3058 } break;
3059
3060 case BINDER_TYPE_FD: {
feba3900 3061 struct binder_fd_object *fp = to_binder_fd_object(hdr);
a056af42
MC
3062 int target_fd = binder_translate_fd(fp->fd, t, thread,
3063 in_reply_to);
355b0502 3064
355b0502 3065 if (target_fd < 0) {
355b0502 3066 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3067 return_error_param = target_fd;
3068 return_error_line = __LINE__;
a056af42 3069 goto err_translate_failed;
355b0502 3070 }
feba3900
MC
3071 fp->pad_binder = 0;
3072 fp->fd = target_fd;
355b0502 3073 } break;
def95c73
MC
3074 case BINDER_TYPE_FDA: {
3075 struct binder_fd_array_object *fda =
3076 to_binder_fd_array_object(hdr);
3077 struct binder_buffer_object *parent =
3078 binder_validate_ptr(t->buffer, fda->parent,
3079 off_start,
3080 offp - off_start);
3081 if (!parent) {
3082 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3083 proc->pid, thread->pid);
3084 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3085 return_error_param = -EINVAL;
3086 return_error_line = __LINE__;
def95c73
MC
3087 goto err_bad_parent;
3088 }
3089 if (!binder_validate_fixup(t->buffer, off_start,
3090 parent, fda->parent_offset,
3091 last_fixup_obj,
3092 last_fixup_min_off)) {
3093 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3094 proc->pid, thread->pid);
3095 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3096 return_error_param = -EINVAL;
3097 return_error_line = __LINE__;
def95c73
MC
3098 goto err_bad_parent;
3099 }
3100 ret = binder_translate_fd_array(fda, parent, t, thread,
3101 in_reply_to);
3102 if (ret < 0) {
3103 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3104 return_error_param = ret;
3105 return_error_line = __LINE__;
def95c73
MC
3106 goto err_translate_failed;
3107 }
3108 last_fixup_obj = parent;
3109 last_fixup_min_off =
3110 fda->parent_offset + sizeof(u32) * fda->num_fds;
3111 } break;
7980240b
MC
3112 case BINDER_TYPE_PTR: {
3113 struct binder_buffer_object *bp =
3114 to_binder_buffer_object(hdr);
3115 size_t buf_left = sg_buf_end - sg_bufp;
3116
3117 if (bp->length > buf_left) {
3118 binder_user_error("%d:%d got transaction with too large buffer\n",
3119 proc->pid, thread->pid);
3120 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3121 return_error_param = -EINVAL;
3122 return_error_line = __LINE__;
7980240b
MC
3123 goto err_bad_offset;
3124 }
3125 if (copy_from_user(sg_bufp,
3126 (const void __user *)(uintptr_t)
3127 bp->buffer, bp->length)) {
3128 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3129 proc->pid, thread->pid);
57ada2fb 3130 return_error_param = -EFAULT;
7980240b 3131 return_error = BR_FAILED_REPLY;
57ada2fb 3132 return_error_line = __LINE__;
7980240b
MC
3133 goto err_copy_data_failed;
3134 }
3135 /* Fixup buffer pointer to target proc address space */
3136 bp->buffer = (uintptr_t)sg_bufp +
19c98724
TK
3137 binder_alloc_get_user_buffer_offset(
3138 &target_proc->alloc);
7980240b
MC
3139 sg_bufp += ALIGN(bp->length, sizeof(u64));
3140
3141 ret = binder_fixup_parent(t, thread, bp, off_start,
3142 offp - off_start,
3143 last_fixup_obj,
3144 last_fixup_min_off);
3145 if (ret < 0) {
3146 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3147 return_error_param = ret;
3148 return_error_line = __LINE__;
7980240b
MC
3149 goto err_translate_failed;
3150 }
3151 last_fixup_obj = bp;
3152 last_fixup_min_off = 0;
3153 } break;
355b0502 3154 default:
64dcfe6b 3155 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
feba3900 3156 proc->pid, thread->pid, hdr->type);
355b0502 3157 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3158 return_error_param = -EINVAL;
3159 return_error_line = __LINE__;
355b0502
GKH
3160 goto err_bad_object_type;
3161 }
3162 }
ccae6f67 3163 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
673068ee 3164 t->work.type = BINDER_WORK_TRANSACTION;
ccae6f67 3165
355b0502 3166 if (reply) {
148ade2c 3167 binder_enqueue_thread_work(thread, tcomplete);
0b89d69a
MC
3168 binder_inner_proc_lock(target_proc);
3169 if (target_thread->is_dead) {
3170 binder_inner_proc_unlock(target_proc);
7a4408c6 3171 goto err_dead_proc_or_thread;
0b89d69a 3172 }
355b0502 3173 BUG_ON(t->buffer->async_transaction != 0);
0b89d69a 3174 binder_pop_transaction_ilocked(target_thread, in_reply_to);
148ade2c 3175 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
0b89d69a 3176 binder_inner_proc_unlock(target_proc);
408c68b1 3177 wake_up_interruptible_sync(&target_thread->wait);
b6d282ce 3178 binder_free_transaction(in_reply_to);
355b0502
GKH
3179 } else if (!(t->flags & TF_ONE_WAY)) {
3180 BUG_ON(t->buffer->async_transaction != 0);
0b89d69a 3181 binder_inner_proc_lock(proc);
148ade2c
MC
3182 /*
3183 * Defer the TRANSACTION_COMPLETE, so we don't return to
3184 * userspace immediately; this allows the target process to
3185 * immediately start processing this transaction, reducing
3186 * latency. We will then return the TRANSACTION_COMPLETE when
3187 * the target replies (or there is an error).
3188 */
3189 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
355b0502
GKH
3190 t->need_reply = 1;
3191 t->from_parent = thread->transaction_stack;
3192 thread->transaction_stack = t;
0b89d69a 3193 binder_inner_proc_unlock(proc);
408c68b1 3194 if (!binder_proc_transaction(t, target_proc, target_thread)) {
0b89d69a
MC
3195 binder_inner_proc_lock(proc);
3196 binder_pop_transaction_ilocked(thread, t);
3197 binder_inner_proc_unlock(proc);
7a4408c6
TK
3198 goto err_dead_proc_or_thread;
3199 }
355b0502
GKH
3200 } else {
3201 BUG_ON(target_node == NULL);
3202 BUG_ON(t->buffer->async_transaction != 1);
148ade2c 3203 binder_enqueue_thread_work(thread, tcomplete);
408c68b1 3204 if (!binder_proc_transaction(t, target_proc, NULL))
7a4408c6 3205 goto err_dead_proc_or_thread;
00b40d61 3206 }
7a4408c6
TK
3207 if (target_thread)
3208 binder_thread_dec_tmpref(target_thread);
3209 binder_proc_dec_tmpref(target_proc);
512cf465
TK
3210 if (target_node)
3211 binder_dec_node_tmpref(target_node);
d99c7333
TK
3212 /*
3213 * write barrier to synchronize with initialization
3214 * of log entry
3215 */
3216 smp_wmb();
3217 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
3218 return;
3219
7a4408c6
TK
3220err_dead_proc_or_thread:
3221 return_error = BR_DEAD_REPLY;
3222 return_error_line = __LINE__;
d53bebdf 3223 binder_dequeue_work(proc, tcomplete);
a056af42 3224err_translate_failed:
355b0502
GKH
3225err_bad_object_type:
3226err_bad_offset:
def95c73 3227err_bad_parent:
355b0502 3228err_copy_data_failed:
975a1ac9 3229 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 3230 binder_transaction_buffer_release(target_proc, t->buffer, offp);
512cf465
TK
3231 if (target_node)
3232 binder_dec_node_tmpref(target_node);
eb34983b 3233 target_node = NULL;
355b0502 3234 t->buffer->transaction = NULL;
19c98724 3235 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
3236err_binder_alloc_buf_failed:
3237 kfree(tcomplete);
3238 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3239err_alloc_tcomplete_failed:
3240 kfree(t);
3241 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3242err_alloc_t_failed:
3243err_bad_call_stack:
3244err_empty_call_stack:
3245err_dead_binder:
3246err_invalid_target_handle:
7a4408c6
TK
3247 if (target_thread)
3248 binder_thread_dec_tmpref(target_thread);
3249 if (target_proc)
3250 binder_proc_dec_tmpref(target_proc);
512cf465 3251 if (target_node) {
eb34983b 3252 binder_dec_node(target_node, 1, 0);
512cf465
TK
3253 binder_dec_node_tmpref(target_node);
3254 }
eb34983b 3255
355b0502 3256 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
57ada2fb
TK
3257 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3258 proc->pid, thread->pid, return_error, return_error_param,
3259 (u64)tr->data_size, (u64)tr->offsets_size,
3260 return_error_line);
355b0502
GKH
3261
3262 {
3263 struct binder_transaction_log_entry *fe;
10f62861 3264
57ada2fb
TK
3265 e->return_error = return_error;
3266 e->return_error_param = return_error_param;
3267 e->return_error_line = return_error_line;
355b0502
GKH
3268 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3269 *fe = *e;
d99c7333
TK
3270 /*
3271 * write barrier to synchronize with initialization
3272 * of log entry
3273 */
3274 smp_wmb();
3275 WRITE_ONCE(e->debug_id_done, t_debug_id);
3276 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
3277 }
3278
26549d17 3279 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 3280 if (in_reply_to) {
26549d17 3281 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
148ade2c 3282 binder_enqueue_thread_work(thread, &thread->return_error.work);
355b0502 3283 binder_send_failed_reply(in_reply_to, return_error);
26549d17
TK
3284 } else {
3285 thread->return_error.cmd = return_error;
148ade2c 3286 binder_enqueue_thread_work(thread, &thread->return_error.work);
26549d17 3287 }
355b0502
GKH
3288}
3289
fb07ebc3
BP
3290static int binder_thread_write(struct binder_proc *proc,
3291 struct binder_thread *thread,
da49889d
AH
3292 binder_uintptr_t binder_buffer, size_t size,
3293 binder_size_t *consumed)
355b0502
GKH
3294{
3295 uint32_t cmd;
342e5c90 3296 struct binder_context *context = proc->context;
da49889d 3297 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3298 void __user *ptr = buffer + *consumed;
3299 void __user *end = buffer + size;
3300
26549d17 3301 while (ptr < end && thread->return_error.cmd == BR_OK) {
372e3147
TK
3302 int ret;
3303
355b0502
GKH
3304 if (get_user(cmd, (uint32_t __user *)ptr))
3305 return -EFAULT;
3306 ptr += sizeof(uint32_t);
975a1ac9 3307 trace_binder_command(cmd);
355b0502 3308 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
0953c797
BJS
3309 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3310 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3311 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
3312 }
3313 switch (cmd) {
3314 case BC_INCREFS:
3315 case BC_ACQUIRE:
3316 case BC_RELEASE:
3317 case BC_DECREFS: {
3318 uint32_t target;
355b0502 3319 const char *debug_string;
372e3147
TK
3320 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3321 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3322 struct binder_ref_data rdata;
355b0502
GKH
3323
3324 if (get_user(target, (uint32_t __user *)ptr))
3325 return -EFAULT;
c44b1231 3326
355b0502 3327 ptr += sizeof(uint32_t);
372e3147
TK
3328 ret = -1;
3329 if (increment && !target) {
c44b1231 3330 struct binder_node *ctx_mgr_node;
c44b1231
TK
3331 mutex_lock(&context->context_mgr_node_lock);
3332 ctx_mgr_node = context->binder_context_mgr_node;
372e3147
TK
3333 if (ctx_mgr_node)
3334 ret = binder_inc_ref_for_node(
3335 proc, ctx_mgr_node,
3336 strong, NULL, &rdata);
c44b1231
TK
3337 mutex_unlock(&context->context_mgr_node_lock);
3338 }
372e3147
TK
3339 if (ret)
3340 ret = binder_update_ref_for_handle(
3341 proc, target, increment, strong,
3342 &rdata);
3343 if (!ret && rdata.desc != target) {
3344 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3345 proc->pid, thread->pid,
3346 target, rdata.desc);
355b0502
GKH
3347 }
3348 switch (cmd) {
3349 case BC_INCREFS:
3350 debug_string = "IncRefs";
355b0502
GKH
3351 break;
3352 case BC_ACQUIRE:
3353 debug_string = "Acquire";
355b0502
GKH
3354 break;
3355 case BC_RELEASE:
3356 debug_string = "Release";
355b0502
GKH
3357 break;
3358 case BC_DECREFS:
3359 default:
3360 debug_string = "DecRefs";
372e3147
TK
3361 break;
3362 }
3363 if (ret) {
3364 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3365 proc->pid, thread->pid, debug_string,
3366 strong, target, ret);
355b0502
GKH
3367 break;
3368 }
3369 binder_debug(BINDER_DEBUG_USER_REFS,
372e3147
TK
3370 "%d:%d %s ref %d desc %d s %d w %d\n",
3371 proc->pid, thread->pid, debug_string,
3372 rdata.debug_id, rdata.desc, rdata.strong,
3373 rdata.weak);
355b0502
GKH
3374 break;
3375 }
3376 case BC_INCREFS_DONE:
3377 case BC_ACQUIRE_DONE: {
da49889d
AH
3378 binder_uintptr_t node_ptr;
3379 binder_uintptr_t cookie;
355b0502 3380 struct binder_node *node;
673068ee 3381 bool free_node;
355b0502 3382
da49889d 3383 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3384 return -EFAULT;
da49889d
AH
3385 ptr += sizeof(binder_uintptr_t);
3386 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3387 return -EFAULT;
da49889d 3388 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
3389 node = binder_get_node(proc, node_ptr);
3390 if (node == NULL) {
da49889d 3391 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
3392 proc->pid, thread->pid,
3393 cmd == BC_INCREFS_DONE ?
3394 "BC_INCREFS_DONE" :
3395 "BC_ACQUIRE_DONE",
da49889d 3396 (u64)node_ptr);
355b0502
GKH
3397 break;
3398 }
3399 if (cookie != node->cookie) {
da49889d 3400 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
3401 proc->pid, thread->pid,
3402 cmd == BC_INCREFS_DONE ?
3403 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
3404 (u64)node_ptr, node->debug_id,
3405 (u64)cookie, (u64)node->cookie);
adc18842 3406 binder_put_node(node);
355b0502
GKH
3407 break;
3408 }
673068ee 3409 binder_node_inner_lock(node);
355b0502
GKH
3410 if (cmd == BC_ACQUIRE_DONE) {
3411 if (node->pending_strong_ref == 0) {
56b468fc 3412 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
3413 proc->pid, thread->pid,
3414 node->debug_id);
673068ee 3415 binder_node_inner_unlock(node);
adc18842 3416 binder_put_node(node);
355b0502
GKH
3417 break;
3418 }
3419 node->pending_strong_ref = 0;
3420 } else {
3421 if (node->pending_weak_ref == 0) {
56b468fc 3422 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
3423 proc->pid, thread->pid,
3424 node->debug_id);
673068ee 3425 binder_node_inner_unlock(node);
adc18842 3426 binder_put_node(node);
355b0502
GKH
3427 break;
3428 }
3429 node->pending_weak_ref = 0;
3430 }
673068ee
TK
3431 free_node = binder_dec_node_nilocked(node,
3432 cmd == BC_ACQUIRE_DONE, 0);
3433 WARN_ON(free_node);
355b0502 3434 binder_debug(BINDER_DEBUG_USER_REFS,
adc18842 3435 "%d:%d %s node %d ls %d lw %d tr %d\n",
355b0502
GKH
3436 proc->pid, thread->pid,
3437 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
adc18842
TK
3438 node->debug_id, node->local_strong_refs,
3439 node->local_weak_refs, node->tmp_refs);
673068ee 3440 binder_node_inner_unlock(node);
adc18842 3441 binder_put_node(node);
355b0502
GKH
3442 break;
3443 }
3444 case BC_ATTEMPT_ACQUIRE:
56b468fc 3445 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
3446 return -EINVAL;
3447 case BC_ACQUIRE_RESULT:
56b468fc 3448 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
3449 return -EINVAL;
3450
3451 case BC_FREE_BUFFER: {
da49889d 3452 binder_uintptr_t data_ptr;
355b0502
GKH
3453 struct binder_buffer *buffer;
3454
da49889d 3455 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3456 return -EFAULT;
da49889d 3457 ptr += sizeof(binder_uintptr_t);
355b0502 3458
53d311cf
TK
3459 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3460 data_ptr);
355b0502 3461 if (buffer == NULL) {
da49889d
AH
3462 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3463 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
3464 break;
3465 }
3466 if (!buffer->allow_user_free) {
da49889d
AH
3467 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3468 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
3469 break;
3470 }
3471 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
3472 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3473 proc->pid, thread->pid, (u64)data_ptr,
3474 buffer->debug_id,
355b0502
GKH
3475 buffer->transaction ? "active" : "finished");
3476
3477 if (buffer->transaction) {
3478 buffer->transaction->buffer = NULL;
3479 buffer->transaction = NULL;
3480 }
3481 if (buffer->async_transaction && buffer->target_node) {
72196393
TK
3482 struct binder_node *buf_node;
3483 struct binder_work *w;
3484
3485 buf_node = buffer->target_node;
673068ee 3486 binder_node_inner_lock(buf_node);
72196393
TK
3487 BUG_ON(!buf_node->has_async_transaction);
3488 BUG_ON(buf_node->proc != proc);
72196393
TK
3489 w = binder_dequeue_work_head_ilocked(
3490 &buf_node->async_todo);
3a6430ce 3491 if (!w) {
197410ad 3492 buf_node->has_async_transaction = false;
3a6430ce 3493 } else {
72196393 3494 binder_enqueue_work_ilocked(
3a6430ce
MC
3495 w, &proc->todo);
3496 binder_wakeup_proc_ilocked(proc);
3497 }
673068ee 3498 binder_node_inner_unlock(buf_node);
355b0502 3499 }
975a1ac9 3500 trace_binder_transaction_buffer_release(buffer);
355b0502 3501 binder_transaction_buffer_release(proc, buffer, NULL);
19c98724 3502 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
3503 break;
3504 }
3505
7980240b
MC
3506 case BC_TRANSACTION_SG:
3507 case BC_REPLY_SG: {
3508 struct binder_transaction_data_sg tr;
3509
3510 if (copy_from_user(&tr, ptr, sizeof(tr)))
3511 return -EFAULT;
3512 ptr += sizeof(tr);
3513 binder_transaction(proc, thread, &tr.transaction_data,
3514 cmd == BC_REPLY_SG, tr.buffers_size);
3515 break;
3516 }
355b0502
GKH
3517 case BC_TRANSACTION:
3518 case BC_REPLY: {
3519 struct binder_transaction_data tr;
3520
3521 if (copy_from_user(&tr, ptr, sizeof(tr)))
3522 return -EFAULT;
3523 ptr += sizeof(tr);
4bfac80a
MC
3524 binder_transaction(proc, thread, &tr,
3525 cmd == BC_REPLY, 0);
355b0502
GKH
3526 break;
3527 }
3528
3529 case BC_REGISTER_LOOPER:
3530 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3531 "%d:%d BC_REGISTER_LOOPER\n",
355b0502 3532 proc->pid, thread->pid);
b3e68612 3533 binder_inner_proc_lock(proc);
355b0502
GKH
3534 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3535 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3536 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
3537 proc->pid, thread->pid);
3538 } else if (proc->requested_threads == 0) {
3539 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3540 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
3541 proc->pid, thread->pid);
3542 } else {
3543 proc->requested_threads--;
3544 proc->requested_threads_started++;
3545 }
3546 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
b3e68612 3547 binder_inner_proc_unlock(proc);
355b0502
GKH
3548 break;
3549 case BC_ENTER_LOOPER:
3550 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3551 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
3552 proc->pid, thread->pid);
3553 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3554 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3555 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
3556 proc->pid, thread->pid);
3557 }
3558 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3559 break;
3560 case BC_EXIT_LOOPER:
3561 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3562 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
3563 proc->pid, thread->pid);
3564 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3565 break;
3566
3567 case BC_REQUEST_DEATH_NOTIFICATION:
3568 case BC_CLEAR_DEATH_NOTIFICATION: {
3569 uint32_t target;
da49889d 3570 binder_uintptr_t cookie;
355b0502 3571 struct binder_ref *ref;
2c1838dc 3572 struct binder_ref_death *death = NULL;
355b0502
GKH
3573
3574 if (get_user(target, (uint32_t __user *)ptr))
3575 return -EFAULT;
3576 ptr += sizeof(uint32_t);
da49889d 3577 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3578 return -EFAULT;
da49889d 3579 ptr += sizeof(binder_uintptr_t);
2c1838dc
TK
3580 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3581 /*
3582 * Allocate memory for death notification
3583 * before taking lock
3584 */
3585 death = kzalloc(sizeof(*death), GFP_KERNEL);
3586 if (death == NULL) {
3587 WARN_ON(thread->return_error.cmd !=
3588 BR_OK);
3589 thread->return_error.cmd = BR_ERROR;
148ade2c
MC
3590 binder_enqueue_thread_work(
3591 thread,
3592 &thread->return_error.work);
2c1838dc
TK
3593 binder_debug(
3594 BINDER_DEBUG_FAILED_TRANSACTION,
3595 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3596 proc->pid, thread->pid);
3597 break;
3598 }
3599 }
3600 binder_proc_lock(proc);
3601 ref = binder_get_ref_olocked(proc, target, false);
355b0502 3602 if (ref == NULL) {
56b468fc 3603 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
3604 proc->pid, thread->pid,
3605 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3606 "BC_REQUEST_DEATH_NOTIFICATION" :
3607 "BC_CLEAR_DEATH_NOTIFICATION",
3608 target);
2c1838dc
TK
3609 binder_proc_unlock(proc);
3610 kfree(death);
355b0502
GKH
3611 break;
3612 }
3613
3614 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 3615 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
3616 proc->pid, thread->pid,
3617 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3618 "BC_REQUEST_DEATH_NOTIFICATION" :
3619 "BC_CLEAR_DEATH_NOTIFICATION",
372e3147
TK
3620 (u64)cookie, ref->data.debug_id,
3621 ref->data.desc, ref->data.strong,
3622 ref->data.weak, ref->node->debug_id);
355b0502 3623
ab51ec6b 3624 binder_node_lock(ref->node);
355b0502
GKH
3625 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3626 if (ref->death) {
56b468fc 3627 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502 3628 proc->pid, thread->pid);
ab51ec6b 3629 binder_node_unlock(ref->node);
2c1838dc
TK
3630 binder_proc_unlock(proc);
3631 kfree(death);
355b0502
GKH
3632 break;
3633 }
3634 binder_stats_created(BINDER_STAT_DEATH);
3635 INIT_LIST_HEAD(&death->work.entry);
3636 death->cookie = cookie;
3637 ref->death = death;
3638 if (ref->node->proc == NULL) {
3639 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
bb74562a
MC
3640
3641 binder_inner_proc_lock(proc);
3642 binder_enqueue_work_ilocked(
3643 &ref->death->work, &proc->todo);
3644 binder_wakeup_proc_ilocked(proc);
3645 binder_inner_proc_unlock(proc);
355b0502
GKH
3646 }
3647 } else {
3648 if (ref->death == NULL) {
56b468fc 3649 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502 3650 proc->pid, thread->pid);
673068ee 3651 binder_node_unlock(ref->node);
2c1838dc 3652 binder_proc_unlock(proc);
355b0502
GKH
3653 break;
3654 }
3655 death = ref->death;
3656 if (death->cookie != cookie) {
da49889d 3657 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 3658 proc->pid, thread->pid,
da49889d
AH
3659 (u64)death->cookie,
3660 (u64)cookie);
673068ee 3661 binder_node_unlock(ref->node);
2c1838dc 3662 binder_proc_unlock(proc);
355b0502
GKH
3663 break;
3664 }
3665 ref->death = NULL;
72196393 3666 binder_inner_proc_lock(proc);
355b0502
GKH
3667 if (list_empty(&death->work.entry)) {
3668 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
72196393
TK
3669 if (thread->looper &
3670 (BINDER_LOOPER_STATE_REGISTERED |
3671 BINDER_LOOPER_STATE_ENTERED))
148ade2c
MC
3672 binder_enqueue_thread_work_ilocked(
3673 thread,
3674 &death->work);
72196393
TK
3675 else {
3676 binder_enqueue_work_ilocked(
3677 &death->work,
3678 &proc->todo);
1b77e9dc 3679 binder_wakeup_proc_ilocked(
408c68b1 3680 proc);
355b0502
GKH
3681 }
3682 } else {
3683 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3684 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3685 }
72196393 3686 binder_inner_proc_unlock(proc);
355b0502 3687 }
ab51ec6b 3688 binder_node_unlock(ref->node);
2c1838dc 3689 binder_proc_unlock(proc);
355b0502
GKH
3690 } break;
3691 case BC_DEAD_BINDER_DONE: {
3692 struct binder_work *w;
da49889d 3693 binder_uintptr_t cookie;
355b0502 3694 struct binder_ref_death *death = NULL;
10f62861 3695
da49889d 3696 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
3697 return -EFAULT;
3698
7a64cd88 3699 ptr += sizeof(cookie);
72196393
TK
3700 binder_inner_proc_lock(proc);
3701 list_for_each_entry(w, &proc->delivered_death,
3702 entry) {
3703 struct binder_ref_death *tmp_death =
3704 container_of(w,
3705 struct binder_ref_death,
3706 work);
10f62861 3707
355b0502
GKH
3708 if (tmp_death->cookie == cookie) {
3709 death = tmp_death;
3710 break;
3711 }
3712 }
3713 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
3714 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3715 proc->pid, thread->pid, (u64)cookie,
3716 death);
355b0502 3717 if (death == NULL) {
da49889d
AH
3718 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3719 proc->pid, thread->pid, (u64)cookie);
72196393 3720 binder_inner_proc_unlock(proc);
355b0502
GKH
3721 break;
3722 }
72196393 3723 binder_dequeue_work_ilocked(&death->work);
355b0502
GKH
3724 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3725 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
72196393
TK
3726 if (thread->looper &
3727 (BINDER_LOOPER_STATE_REGISTERED |
3728 BINDER_LOOPER_STATE_ENTERED))
148ade2c
MC
3729 binder_enqueue_thread_work_ilocked(
3730 thread, &death->work);
72196393
TK
3731 else {
3732 binder_enqueue_work_ilocked(
3733 &death->work,
3734 &proc->todo);
408c68b1 3735 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
3736 }
3737 }
72196393 3738 binder_inner_proc_unlock(proc);
355b0502
GKH
3739 } break;
3740
3741 default:
56b468fc 3742 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
3743 proc->pid, thread->pid, cmd);
3744 return -EINVAL;
3745 }
3746 *consumed = ptr - buffer;
3747 }
3748 return 0;
3749}
3750
fb07ebc3
BP
3751static void binder_stat_br(struct binder_proc *proc,
3752 struct binder_thread *thread, uint32_t cmd)
355b0502 3753{
975a1ac9 3754 trace_binder_return(cmd);
355b0502 3755 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
0953c797
BJS
3756 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3757 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3758 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
3759 }
3760}
3761
26b47d8a
TK
3762static int binder_put_node_cmd(struct binder_proc *proc,
3763 struct binder_thread *thread,
3764 void __user **ptrp,
3765 binder_uintptr_t node_ptr,
3766 binder_uintptr_t node_cookie,
3767 int node_debug_id,
3768 uint32_t cmd, const char *cmd_name)
3769{
3770 void __user *ptr = *ptrp;
3771
3772 if (put_user(cmd, (uint32_t __user *)ptr))
3773 return -EFAULT;
3774 ptr += sizeof(uint32_t);
3775
3776 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3777 return -EFAULT;
3778 ptr += sizeof(binder_uintptr_t);
3779
3780 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3781 return -EFAULT;
3782 ptr += sizeof(binder_uintptr_t);
3783
3784 binder_stat_br(proc, thread, cmd);
3785 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3786 proc->pid, thread->pid, cmd_name, node_debug_id,
3787 (u64)node_ptr, (u64)node_cookie);
3788
3789 *ptrp = ptr;
3790 return 0;
3791}
3792
1b77e9dc
MC
3793static int binder_wait_for_work(struct binder_thread *thread,
3794 bool do_proc_work)
3795{
3796 DEFINE_WAIT(wait);
3797 struct binder_proc *proc = thread->proc;
3798 int ret = 0;
3799
3800 freezer_do_not_count();
3801 binder_inner_proc_lock(proc);
3802 for (;;) {
3803 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3804 if (binder_has_work_ilocked(thread, do_proc_work))
3805 break;
3806 if (do_proc_work)
3807 list_add(&thread->waiting_thread_node,
3808 &proc->waiting_threads);
3809 binder_inner_proc_unlock(proc);
3810 schedule();
3811 binder_inner_proc_lock(proc);
3812 list_del_init(&thread->waiting_thread_node);
3813 if (signal_pending(current)) {
3814 ret = -ERESTARTSYS;
3815 break;
3816 }
3817 }
3818 finish_wait(&thread->wait, &wait);
3819 binder_inner_proc_unlock(proc);
3820 freezer_count();
3821
3822 return ret;
3823}
3824
355b0502
GKH
3825static int binder_thread_read(struct binder_proc *proc,
3826 struct binder_thread *thread,
da49889d
AH
3827 binder_uintptr_t binder_buffer, size_t size,
3828 binder_size_t *consumed, int non_block)
355b0502 3829{
da49889d 3830 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3831 void __user *ptr = buffer + *consumed;
3832 void __user *end = buffer + size;
3833
3834 int ret = 0;
3835 int wait_for_proc_work;
3836
3837 if (*consumed == 0) {
3838 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3839 return -EFAULT;
3840 ptr += sizeof(uint32_t);
3841 }
3842
3843retry:
0b89d69a 3844 binder_inner_proc_lock(proc);
1b77e9dc 3845 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
0b89d69a 3846 binder_inner_proc_unlock(proc);
355b0502 3847
355b0502 3848 thread->looper |= BINDER_LOOPER_STATE_WAITING;
975a1ac9 3849
975a1ac9
AH
3850 trace_binder_wait_for_work(wait_for_proc_work,
3851 !!thread->transaction_stack,
72196393 3852 !binder_worklist_empty(proc, &thread->todo));
355b0502
GKH
3853 if (wait_for_proc_work) {
3854 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3855 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 3856 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
3857 proc->pid, thread->pid, thread->looper);
3858 wait_event_interruptible(binder_user_error_wait,
3859 binder_stop_on_user_error < 2);
3860 }
3861 binder_set_nice(proc->default_priority);
1b77e9dc
MC
3862 }
3863
3864 if (non_block) {
3865 if (!binder_has_work(thread, wait_for_proc_work))
3866 ret = -EAGAIN;
355b0502 3867 } else {
1b77e9dc 3868 ret = binder_wait_for_work(thread, wait_for_proc_work);
355b0502 3869 }
975a1ac9 3870
355b0502
GKH
3871 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3872
3873 if (ret)
3874 return ret;
3875
3876 while (1) {
3877 uint32_t cmd;
3878 struct binder_transaction_data tr;
72196393
TK
3879 struct binder_work *w = NULL;
3880 struct list_head *list = NULL;
355b0502 3881 struct binder_transaction *t = NULL;
7a4408c6 3882 struct binder_thread *t_from;
355b0502 3883
ed29721e 3884 binder_inner_proc_lock(proc);
72196393
TK
3885 if (!binder_worklist_empty_ilocked(&thread->todo))
3886 list = &thread->todo;
3887 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3888 wait_for_proc_work)
3889 list = &proc->todo;
3890 else {
3891 binder_inner_proc_unlock(proc);
3892
395262a9 3893 /* no data added */
08dabcee 3894 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
3895 goto retry;
3896 break;
3897 }
3898
ed29721e
TK
3899 if (end - ptr < sizeof(tr) + 4) {
3900 binder_inner_proc_unlock(proc);
355b0502 3901 break;
ed29721e 3902 }
72196393 3903 w = binder_dequeue_work_head_ilocked(list);
148ade2c
MC
3904 if (binder_worklist_empty_ilocked(&thread->todo))
3905 thread->process_todo = false;
355b0502
GKH
3906
3907 switch (w->type) {
3908 case BINDER_WORK_TRANSACTION: {
ed29721e 3909 binder_inner_proc_unlock(proc);
355b0502
GKH
3910 t = container_of(w, struct binder_transaction, work);
3911 } break;
26549d17
TK
3912 case BINDER_WORK_RETURN_ERROR: {
3913 struct binder_error *e = container_of(
3914 w, struct binder_error, work);
3915
3916 WARN_ON(e->cmd == BR_OK);
ed29721e 3917 binder_inner_proc_unlock(proc);
26549d17
TK
3918 if (put_user(e->cmd, (uint32_t __user *)ptr))
3919 return -EFAULT;
3920 e->cmd = BR_OK;
3921 ptr += sizeof(uint32_t);
3922
4f9adc8f 3923 binder_stat_br(proc, thread, e->cmd);
26549d17 3924 } break;
355b0502 3925 case BINDER_WORK_TRANSACTION_COMPLETE: {
ed29721e 3926 binder_inner_proc_unlock(proc);
355b0502
GKH
3927 cmd = BR_TRANSACTION_COMPLETE;
3928 if (put_user(cmd, (uint32_t __user *)ptr))
3929 return -EFAULT;
3930 ptr += sizeof(uint32_t);
3931
3932 binder_stat_br(proc, thread, cmd);
3933 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 3934 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502 3935 proc->pid, thread->pid);
355b0502
GKH
3936 kfree(w);
3937 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3938 } break;
3939 case BINDER_WORK_NODE: {
3940 struct binder_node *node = container_of(w, struct binder_node, work);
26b47d8a
TK
3941 int strong, weak;
3942 binder_uintptr_t node_ptr = node->ptr;
3943 binder_uintptr_t node_cookie = node->cookie;
3944 int node_debug_id = node->debug_id;
3945 int has_weak_ref;
3946 int has_strong_ref;
3947 void __user *orig_ptr = ptr;
3948
3949 BUG_ON(proc != node->proc);
3950 strong = node->internal_strong_refs ||
3951 node->local_strong_refs;
3952 weak = !hlist_empty(&node->refs) ||
adc18842
TK
3953 node->local_weak_refs ||
3954 node->tmp_refs || strong;
26b47d8a
TK
3955 has_strong_ref = node->has_strong_ref;
3956 has_weak_ref = node->has_weak_ref;
3957
3958 if (weak && !has_weak_ref) {
355b0502
GKH
3959 node->has_weak_ref = 1;
3960 node->pending_weak_ref = 1;
3961 node->local_weak_refs++;
26b47d8a
TK
3962 }
3963 if (strong && !has_strong_ref) {
355b0502
GKH
3964 node->has_strong_ref = 1;
3965 node->pending_strong_ref = 1;
3966 node->local_strong_refs++;
26b47d8a
TK
3967 }
3968 if (!strong && has_strong_ref)
355b0502 3969 node->has_strong_ref = 0;
26b47d8a 3970 if (!weak && has_weak_ref)
355b0502 3971 node->has_weak_ref = 0;
26b47d8a
TK
3972 if (!weak && !strong) {
3973 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3974 "%d:%d node %d u%016llx c%016llx deleted\n",
3975 proc->pid, thread->pid,
3976 node_debug_id,
3977 (u64)node_ptr,
3978 (u64)node_cookie);
3979 rb_erase(&node->rb_node, &proc->nodes);
ed29721e 3980 binder_inner_proc_unlock(proc);
673068ee
TK
3981 binder_node_lock(node);
3982 /*
3983 * Acquire the node lock before freeing the
3984 * node to serialize with other threads that
3985 * may have been holding the node lock while
3986 * decrementing this node (avoids race where
3987 * this thread frees while the other thread
3988 * is unlocking the node after the final
3989 * decrement)
3990 */
3991 binder_node_unlock(node);
ed29721e
TK
3992 binder_free_node(node);
3993 } else
3994 binder_inner_proc_unlock(proc);
3995
26b47d8a
TK
3996 if (weak && !has_weak_ref)
3997 ret = binder_put_node_cmd(
3998 proc, thread, &ptr, node_ptr,
3999 node_cookie, node_debug_id,
4000 BR_INCREFS, "BR_INCREFS");
4001 if (!ret && strong && !has_strong_ref)
4002 ret = binder_put_node_cmd(
4003 proc, thread, &ptr, node_ptr,
4004 node_cookie, node_debug_id,
4005 BR_ACQUIRE, "BR_ACQUIRE");
4006 if (!ret && !strong && has_strong_ref)
4007 ret = binder_put_node_cmd(
4008 proc, thread, &ptr, node_ptr,
4009 node_cookie, node_debug_id,
4010 BR_RELEASE, "BR_RELEASE");
4011 if (!ret && !weak && has_weak_ref)
4012 ret = binder_put_node_cmd(
4013 proc, thread, &ptr, node_ptr,
4014 node_cookie, node_debug_id,
4015 BR_DECREFS, "BR_DECREFS");
4016 if (orig_ptr == ptr)
4017 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4018 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4019 proc->pid, thread->pid,
4020 node_debug_id,
4021 (u64)node_ptr,
4022 (u64)node_cookie);
4023 if (ret)
4024 return ret;
355b0502
GKH
4025 } break;
4026 case BINDER_WORK_DEAD_BINDER:
4027 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4028 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4029 struct binder_ref_death *death;
4030 uint32_t cmd;
ab51ec6b 4031 binder_uintptr_t cookie;
355b0502
GKH
4032
4033 death = container_of(w, struct binder_ref_death, work);
4034 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4035 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4036 else
4037 cmd = BR_DEAD_BINDER;
ab51ec6b
MC
4038 cookie = death->cookie;
4039
355b0502 4040 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 4041 "%d:%d %s %016llx\n",
355b0502
GKH
4042 proc->pid, thread->pid,
4043 cmd == BR_DEAD_BINDER ?
4044 "BR_DEAD_BINDER" :
4045 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
ab51ec6b 4046 (u64)cookie);
355b0502 4047 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
ab51ec6b 4048 binder_inner_proc_unlock(proc);
355b0502
GKH
4049 kfree(death);
4050 binder_stats_deleted(BINDER_STAT_DEATH);
ed29721e 4051 } else {
72196393
TK
4052 binder_enqueue_work_ilocked(
4053 w, &proc->delivered_death);
ed29721e
TK
4054 binder_inner_proc_unlock(proc);
4055 }
ab51ec6b
MC
4056 if (put_user(cmd, (uint32_t __user *)ptr))
4057 return -EFAULT;
4058 ptr += sizeof(uint32_t);
4059 if (put_user(cookie,
4060 (binder_uintptr_t __user *)ptr))
4061 return -EFAULT;
4062 ptr += sizeof(binder_uintptr_t);
4063 binder_stat_br(proc, thread, cmd);
355b0502
GKH
4064 if (cmd == BR_DEAD_BINDER)
4065 goto done; /* DEAD_BINDER notifications can cause transactions */
4066 } break;
4067 }
4068
4069 if (!t)
4070 continue;
4071
4072 BUG_ON(t->buffer == NULL);
4073 if (t->buffer->target_node) {
4074 struct binder_node *target_node = t->buffer->target_node;
10f62861 4075
355b0502
GKH
4076 tr.target.ptr = target_node->ptr;
4077 tr.cookie = target_node->cookie;
4078 t->saved_priority = task_nice(current);
4079 if (t->priority < target_node->min_priority &&
4080 !(t->flags & TF_ONE_WAY))
4081 binder_set_nice(t->priority);
4082 else if (!(t->flags & TF_ONE_WAY) ||
4083 t->saved_priority > target_node->min_priority)
4084 binder_set_nice(target_node->min_priority);
4085 cmd = BR_TRANSACTION;
4086 } else {
da49889d
AH
4087 tr.target.ptr = 0;
4088 tr.cookie = 0;
355b0502
GKH
4089 cmd = BR_REPLY;
4090 }
4091 tr.code = t->code;
4092 tr.flags = t->flags;
4a2ebb93 4093 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 4094
7a4408c6
TK
4095 t_from = binder_get_txn_from(t);
4096 if (t_from) {
4097 struct task_struct *sender = t_from->proc->tsk;
10f62861 4098
355b0502 4099 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 4100 task_active_pid_ns(current));
355b0502
GKH
4101 } else {
4102 tr.sender_pid = 0;
4103 }
4104
4105 tr.data_size = t->buffer->data_size;
4106 tr.offsets_size = t->buffer->offsets_size;
19c98724
TK
4107 tr.data.ptr.buffer = (binder_uintptr_t)
4108 ((uintptr_t)t->buffer->data +
4109 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
4110 tr.data.ptr.offsets = tr.data.ptr.buffer +
4111 ALIGN(t->buffer->data_size,
4112 sizeof(void *));
4113
7a4408c6
TK
4114 if (put_user(cmd, (uint32_t __user *)ptr)) {
4115 if (t_from)
4116 binder_thread_dec_tmpref(t_from);
fb2c4452
MC
4117
4118 binder_cleanup_transaction(t, "put_user failed",
4119 BR_FAILED_REPLY);
4120
355b0502 4121 return -EFAULT;
7a4408c6 4122 }
355b0502 4123 ptr += sizeof(uint32_t);
7a4408c6
TK
4124 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4125 if (t_from)
4126 binder_thread_dec_tmpref(t_from);
fb2c4452
MC
4127
4128 binder_cleanup_transaction(t, "copy_to_user failed",
4129 BR_FAILED_REPLY);
4130
355b0502 4131 return -EFAULT;
7a4408c6 4132 }
355b0502
GKH
4133 ptr += sizeof(tr);
4134
975a1ac9 4135 trace_binder_transaction_received(t);
355b0502
GKH
4136 binder_stat_br(proc, thread, cmd);
4137 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 4138 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
4139 proc->pid, thread->pid,
4140 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4141 "BR_REPLY",
7a4408c6
TK
4142 t->debug_id, t_from ? t_from->proc->pid : 0,
4143 t_from ? t_from->pid : 0, cmd,
355b0502 4144 t->buffer->data_size, t->buffer->offsets_size,
da49889d 4145 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502 4146
7a4408c6
TK
4147 if (t_from)
4148 binder_thread_dec_tmpref(t_from);
355b0502
GKH
4149 t->buffer->allow_user_free = 1;
4150 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
0b89d69a 4151 binder_inner_proc_lock(thread->proc);
355b0502
GKH
4152 t->to_parent = thread->transaction_stack;
4153 t->to_thread = thread;
4154 thread->transaction_stack = t;
0b89d69a 4155 binder_inner_proc_unlock(thread->proc);
355b0502 4156 } else {
b6d282ce 4157 binder_free_transaction(t);
355b0502
GKH
4158 }
4159 break;
4160 }
4161
4162done:
4163
4164 *consumed = ptr - buffer;
b3e68612 4165 binder_inner_proc_lock(proc);
1b77e9dc
MC
4166 if (proc->requested_threads == 0 &&
4167 list_empty(&thread->proc->waiting_threads) &&
355b0502
GKH
4168 proc->requested_threads_started < proc->max_threads &&
4169 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4170 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4171 /*spawn a new thread if we leave this out */) {
4172 proc->requested_threads++;
b3e68612 4173 binder_inner_proc_unlock(proc);
355b0502 4174 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 4175 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
4176 proc->pid, thread->pid);
4177 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4178 return -EFAULT;
89334ab4 4179 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
b3e68612
TK
4180 } else
4181 binder_inner_proc_unlock(proc);
355b0502
GKH
4182 return 0;
4183}
4184
72196393
TK
4185static void binder_release_work(struct binder_proc *proc,
4186 struct list_head *list)
355b0502
GKH
4187{
4188 struct binder_work *w;
10f62861 4189
72196393
TK
4190 while (1) {
4191 w = binder_dequeue_work_head(proc, list);
4192 if (!w)
4193 return;
4194
355b0502
GKH
4195 switch (w->type) {
4196 case BINDER_WORK_TRANSACTION: {
4197 struct binder_transaction *t;
4198
4199 t = container_of(w, struct binder_transaction, work);
fb2c4452
MC
4200
4201 binder_cleanup_transaction(t, "process died.",
4202 BR_DEAD_REPLY);
355b0502 4203 } break;
26549d17
TK
4204 case BINDER_WORK_RETURN_ERROR: {
4205 struct binder_error *e = container_of(
4206 w, struct binder_error, work);
4207
4208 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4209 "undelivered TRANSACTION_ERROR: %u\n",
4210 e->cmd);
4211 } break;
355b0502 4212 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 4213 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 4214 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
4215 kfree(w);
4216 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4217 } break;
675d66b0
AH
4218 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4219 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4220 struct binder_ref_death *death;
4221
4222 death = container_of(w, struct binder_ref_death, work);
4223 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
4224 "undelivered death notification, %016llx\n",
4225 (u64)death->cookie);
675d66b0
AH
4226 kfree(death);
4227 binder_stats_deleted(BINDER_STAT_DEATH);
4228 } break;
355b0502 4229 default:
56b468fc 4230 pr_err("unexpected work type, %d, not freed\n",
675d66b0 4231 w->type);
355b0502
GKH
4232 break;
4233 }
4234 }
4235
4236}
4237
7bd7b0e6
TK
4238static struct binder_thread *binder_get_thread_ilocked(
4239 struct binder_proc *proc, struct binder_thread *new_thread)
355b0502
GKH
4240{
4241 struct binder_thread *thread = NULL;
4242 struct rb_node *parent = NULL;
4243 struct rb_node **p = &proc->threads.rb_node;
4244
4245 while (*p) {
4246 parent = *p;
4247 thread = rb_entry(parent, struct binder_thread, rb_node);
4248
4249 if (current->pid < thread->pid)
4250 p = &(*p)->rb_left;
4251 else if (current->pid > thread->pid)
4252 p = &(*p)->rb_right;
4253 else
7bd7b0e6 4254 return thread;
355b0502 4255 }
7bd7b0e6
TK
4256 if (!new_thread)
4257 return NULL;
4258 thread = new_thread;
4259 binder_stats_created(BINDER_STAT_THREAD);
4260 thread->proc = proc;
4261 thread->pid = current->pid;
4262 atomic_set(&thread->tmp_ref, 0);
4263 init_waitqueue_head(&thread->wait);
4264 INIT_LIST_HEAD(&thread->todo);
4265 rb_link_node(&thread->rb_node, parent, p);
4266 rb_insert_color(&thread->rb_node, &proc->threads);
4267 thread->looper_need_return = true;
4268 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4269 thread->return_error.cmd = BR_OK;
4270 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4271 thread->reply_error.cmd = BR_OK;
1b77e9dc 4272 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
7bd7b0e6
TK
4273 return thread;
4274}
4275
4276static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4277{
4278 struct binder_thread *thread;
4279 struct binder_thread *new_thread;
4280
4281 binder_inner_proc_lock(proc);
4282 thread = binder_get_thread_ilocked(proc, NULL);
4283 binder_inner_proc_unlock(proc);
4284 if (!thread) {
4285 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4286 if (new_thread == NULL)
355b0502 4287 return NULL;
7bd7b0e6
TK
4288 binder_inner_proc_lock(proc);
4289 thread = binder_get_thread_ilocked(proc, new_thread);
4290 binder_inner_proc_unlock(proc);
4291 if (thread != new_thread)
4292 kfree(new_thread);
355b0502
GKH
4293 }
4294 return thread;
4295}
4296
7a4408c6
TK
4297static void binder_free_proc(struct binder_proc *proc)
4298{
4299 BUG_ON(!list_empty(&proc->todo));
4300 BUG_ON(!list_empty(&proc->delivered_death));
4301 binder_alloc_deferred_release(&proc->alloc);
4302 put_task_struct(proc->tsk);
4303 binder_stats_deleted(BINDER_STAT_PROC);
4304 kfree(proc);
4305}
4306
4307static void binder_free_thread(struct binder_thread *thread)
4308{
4309 BUG_ON(!list_empty(&thread->todo));
4310 binder_stats_deleted(BINDER_STAT_THREAD);
4311 binder_proc_dec_tmpref(thread->proc);
4312 kfree(thread);
4313}
4314
4315static int binder_thread_release(struct binder_proc *proc,
4316 struct binder_thread *thread)
355b0502
GKH
4317{
4318 struct binder_transaction *t;
4319 struct binder_transaction *send_reply = NULL;
4320 int active_transactions = 0;
7a4408c6 4321 struct binder_transaction *last_t = NULL;
355b0502 4322
7bd7b0e6 4323 binder_inner_proc_lock(thread->proc);
7a4408c6
TK
4324 /*
4325 * take a ref on the proc so it survives
4326 * after we remove this thread from proc->threads.
4327 * The corresponding dec is when we actually
4328 * free the thread in binder_free_thread()
4329 */
4330 proc->tmp_ref++;
4331 /*
4332 * take a ref on this thread to ensure it
4333 * survives while we are releasing it
4334 */
4335 atomic_inc(&thread->tmp_ref);
355b0502
GKH
4336 rb_erase(&thread->rb_node, &proc->threads);
4337 t = thread->transaction_stack;
7a4408c6
TK
4338 if (t) {
4339 spin_lock(&t->lock);
4340 if (t->to_thread == thread)
4341 send_reply = t;
4342 }
4343 thread->is_dead = true;
4344
355b0502 4345 while (t) {
7a4408c6 4346 last_t = t;
355b0502
GKH
4347 active_transactions++;
4348 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
4349 "release %d:%d transaction %d %s, still active\n",
4350 proc->pid, thread->pid,
355b0502
GKH
4351 t->debug_id,
4352 (t->to_thread == thread) ? "in" : "out");
4353
4354 if (t->to_thread == thread) {
4355 t->to_proc = NULL;
4356 t->to_thread = NULL;
4357 if (t->buffer) {
4358 t->buffer->transaction = NULL;
4359 t->buffer = NULL;
4360 }
4361 t = t->to_parent;
4362 } else if (t->from == thread) {
4363 t->from = NULL;
4364 t = t->from_parent;
4365 } else
4366 BUG();
7a4408c6
TK
4367 spin_unlock(&last_t->lock);
4368 if (t)
4369 spin_lock(&t->lock);
355b0502 4370 }
f5cb779b
MC
4371
4372 /*
4373 * If this thread used poll, make sure we remove the waitqueue
4374 * from any epoll data structures holding it with POLLFREE.
4375 * waitqueue_active() is safe to use here because we're holding
4376 * the inner lock.
4377 */
4378 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4379 waitqueue_active(&thread->wait)) {
a9a08845 4380 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
f5cb779b
MC
4381 }
4382
7bd7b0e6 4383 binder_inner_proc_unlock(thread->proc);
7a4408c6 4384
355b0502
GKH
4385 if (send_reply)
4386 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
72196393 4387 binder_release_work(proc, &thread->todo);
7a4408c6 4388 binder_thread_dec_tmpref(thread);
355b0502
GKH
4389 return active_transactions;
4390}
4391
afc9a42b 4392static __poll_t binder_poll(struct file *filp,
355b0502
GKH
4393 struct poll_table_struct *wait)
4394{
4395 struct binder_proc *proc = filp->private_data;
4396 struct binder_thread *thread = NULL;
1b77e9dc 4397 bool wait_for_proc_work;
355b0502 4398
355b0502 4399 thread = binder_get_thread(proc);
f8898267
EB
4400 if (!thread)
4401 return POLLERR;
355b0502 4402
0b89d69a 4403 binder_inner_proc_lock(thread->proc);
1b77e9dc
MC
4404 thread->looper |= BINDER_LOOPER_STATE_POLL;
4405 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4406
0b89d69a 4407 binder_inner_proc_unlock(thread->proc);
975a1ac9 4408
1b77e9dc
MC
4409 poll_wait(filp, &thread->wait, wait);
4410
66b83a4c 4411 if (binder_has_work(thread, wait_for_proc_work))
a9a08845 4412 return EPOLLIN;
1b77e9dc 4413
355b0502
GKH
4414 return 0;
4415}
4416
78260ac6
TR
4417static int binder_ioctl_write_read(struct file *filp,
4418 unsigned int cmd, unsigned long arg,
4419 struct binder_thread *thread)
4420{
4421 int ret = 0;
4422 struct binder_proc *proc = filp->private_data;
4423 unsigned int size = _IOC_SIZE(cmd);
4424 void __user *ubuf = (void __user *)arg;
4425 struct binder_write_read bwr;
4426
4427 if (size != sizeof(struct binder_write_read)) {
4428 ret = -EINVAL;
4429 goto out;
4430 }
4431 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4432 ret = -EFAULT;
4433 goto out;
4434 }
4435 binder_debug(BINDER_DEBUG_READ_WRITE,
4436 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4437 proc->pid, thread->pid,
4438 (u64)bwr.write_size, (u64)bwr.write_buffer,
4439 (u64)bwr.read_size, (u64)bwr.read_buffer);
4440
4441 if (bwr.write_size > 0) {
4442 ret = binder_thread_write(proc, thread,
4443 bwr.write_buffer,
4444 bwr.write_size,
4445 &bwr.write_consumed);
4446 trace_binder_write_done(ret);
4447 if (ret < 0) {
4448 bwr.read_consumed = 0;
4449 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4450 ret = -EFAULT;
4451 goto out;
4452 }
4453 }
4454 if (bwr.read_size > 0) {
4455 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4456 bwr.read_size,
4457 &bwr.read_consumed,
4458 filp->f_flags & O_NONBLOCK);
4459 trace_binder_read_done(ret);
1b77e9dc
MC
4460 binder_inner_proc_lock(proc);
4461 if (!binder_worklist_empty_ilocked(&proc->todo))
408c68b1 4462 binder_wakeup_proc_ilocked(proc);
1b77e9dc 4463 binder_inner_proc_unlock(proc);
78260ac6
TR
4464 if (ret < 0) {
4465 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4466 ret = -EFAULT;
4467 goto out;
4468 }
4469 }
4470 binder_debug(BINDER_DEBUG_READ_WRITE,
4471 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4472 proc->pid, thread->pid,
4473 (u64)bwr.write_consumed, (u64)bwr.write_size,
4474 (u64)bwr.read_consumed, (u64)bwr.read_size);
4475 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4476 ret = -EFAULT;
4477 goto out;
4478 }
4479out:
4480 return ret;
4481}
4482
4483static int binder_ioctl_set_ctx_mgr(struct file *filp)
4484{
4485 int ret = 0;
4486 struct binder_proc *proc = filp->private_data;
342e5c90 4487 struct binder_context *context = proc->context;
c44b1231 4488 struct binder_node *new_node;
78260ac6
TR
4489 kuid_t curr_euid = current_euid();
4490
c44b1231 4491 mutex_lock(&context->context_mgr_node_lock);
342e5c90 4492 if (context->binder_context_mgr_node) {
78260ac6
TR
4493 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4494 ret = -EBUSY;
4495 goto out;
4496 }
79af7307
SS
4497 ret = security_binder_set_context_mgr(proc->tsk);
4498 if (ret < 0)
4499 goto out;
342e5c90
MC
4500 if (uid_valid(context->binder_context_mgr_uid)) {
4501 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
4502 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4503 from_kuid(&init_user_ns, curr_euid),
4504 from_kuid(&init_user_ns,
342e5c90 4505 context->binder_context_mgr_uid));
78260ac6
TR
4506 ret = -EPERM;
4507 goto out;
4508 }
4509 } else {
342e5c90 4510 context->binder_context_mgr_uid = curr_euid;
78260ac6 4511 }
673068ee 4512 new_node = binder_new_node(proc, NULL);
c44b1231 4513 if (!new_node) {
78260ac6
TR
4514 ret = -ENOMEM;
4515 goto out;
4516 }
673068ee 4517 binder_node_lock(new_node);
c44b1231
TK
4518 new_node->local_weak_refs++;
4519 new_node->local_strong_refs++;
4520 new_node->has_strong_ref = 1;
4521 new_node->has_weak_ref = 1;
4522 context->binder_context_mgr_node = new_node;
673068ee 4523 binder_node_unlock(new_node);
adc18842 4524 binder_put_node(new_node);
78260ac6 4525out:
c44b1231 4526 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
4527 return ret;
4528}
4529
abcc6153
CC
4530static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4531 struct binder_node_debug_info *info)
4532{
4533 struct rb_node *n;
4534 binder_uintptr_t ptr = info->ptr;
4535
4536 memset(info, 0, sizeof(*info));
4537
4538 binder_inner_proc_lock(proc);
4539 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4540 struct binder_node *node = rb_entry(n, struct binder_node,
4541 rb_node);
4542 if (node->ptr > ptr) {
4543 info->ptr = node->ptr;
4544 info->cookie = node->cookie;
4545 info->has_strong_ref = node->has_strong_ref;
4546 info->has_weak_ref = node->has_weak_ref;
4547 break;
4548 }
4549 }
4550 binder_inner_proc_unlock(proc);
4551
4552 return 0;
4553}
4554
355b0502
GKH
4555static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4556{
4557 int ret;
4558 struct binder_proc *proc = filp->private_data;
4559 struct binder_thread *thread;
4560 unsigned int size = _IOC_SIZE(cmd);
4561 void __user *ubuf = (void __user *)arg;
4562
78260ac6
TR
4563 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4564 proc->pid, current->pid, cmd, arg);*/
355b0502 4565
4175e2b4
SY
4566 binder_selftest_alloc(&proc->alloc);
4567
975a1ac9
AH
4568 trace_binder_ioctl(cmd, arg);
4569
355b0502
GKH
4570 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4571 if (ret)
975a1ac9 4572 goto err_unlocked;
355b0502 4573
355b0502
GKH
4574 thread = binder_get_thread(proc);
4575 if (thread == NULL) {
4576 ret = -ENOMEM;
4577 goto err;
4578 }
4579
4580 switch (cmd) {
78260ac6
TR
4581 case BINDER_WRITE_READ:
4582 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4583 if (ret)
355b0502 4584 goto err;
355b0502 4585 break;
b3e68612
TK
4586 case BINDER_SET_MAX_THREADS: {
4587 int max_threads;
4588
4589 if (copy_from_user(&max_threads, ubuf,
4590 sizeof(max_threads))) {
355b0502
GKH
4591 ret = -EINVAL;
4592 goto err;
4593 }
b3e68612
TK
4594 binder_inner_proc_lock(proc);
4595 proc->max_threads = max_threads;
4596 binder_inner_proc_unlock(proc);
355b0502 4597 break;
b3e68612 4598 }
355b0502 4599 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
4600 ret = binder_ioctl_set_ctx_mgr(filp);
4601 if (ret)
355b0502 4602 goto err;
355b0502
GKH
4603 break;
4604 case BINDER_THREAD_EXIT:
56b468fc 4605 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 4606 proc->pid, thread->pid);
7a4408c6 4607 binder_thread_release(proc, thread);
355b0502
GKH
4608 thread = NULL;
4609 break;
36c89c0a
MM
4610 case BINDER_VERSION: {
4611 struct binder_version __user *ver = ubuf;
4612
355b0502
GKH
4613 if (size != sizeof(struct binder_version)) {
4614 ret = -EINVAL;
4615 goto err;
4616 }
36c89c0a
MM
4617 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4618 &ver->protocol_version)) {
355b0502
GKH
4619 ret = -EINVAL;
4620 goto err;
4621 }
4622 break;
36c89c0a 4623 }
abcc6153
CC
4624 case BINDER_GET_NODE_DEBUG_INFO: {
4625 struct binder_node_debug_info info;
4626
4627 if (copy_from_user(&info, ubuf, sizeof(info))) {
4628 ret = -EFAULT;
4629 goto err;
4630 }
4631
4632 ret = binder_ioctl_get_node_debug_info(proc, &info);
4633 if (ret < 0)
4634 goto err;
4635
4636 if (copy_to_user(ubuf, &info, sizeof(info))) {
4637 ret = -EFAULT;
4638 goto err;
4639 }
4640 break;
4641 }
355b0502
GKH
4642 default:
4643 ret = -EINVAL;
4644 goto err;
4645 }
4646 ret = 0;
4647err:
4648 if (thread)
08dabcee 4649 thread->looper_need_return = false;
355b0502
GKH
4650 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4651 if (ret && ret != -ERESTARTSYS)
56b468fc 4652 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
4653err_unlocked:
4654 trace_binder_ioctl_done(ret);
355b0502
GKH
4655 return ret;
4656}
4657
4658static void binder_vma_open(struct vm_area_struct *vma)
4659{
4660 struct binder_proc *proc = vma->vm_private_data;
10f62861 4661
355b0502 4662 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4663 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4664 proc->pid, vma->vm_start, vma->vm_end,
4665 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4666 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
4667}
4668
4669static void binder_vma_close(struct vm_area_struct *vma)
4670{
4671 struct binder_proc *proc = vma->vm_private_data;
10f62861 4672
355b0502 4673 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4674 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4675 proc->pid, vma->vm_start, vma->vm_end,
4676 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4677 (unsigned long)pgprot_val(vma->vm_page_prot));
19c98724 4678 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
4679 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4680}
4681
11bac800 4682static int binder_vm_fault(struct vm_fault *vmf)
ddac7d5f
VM
4683{
4684 return VM_FAULT_SIGBUS;
4685}
4686
7cbea8dc 4687static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
4688 .open = binder_vma_open,
4689 .close = binder_vma_close,
ddac7d5f 4690 .fault = binder_vm_fault,
355b0502
GKH
4691};
4692
19c98724
TK
4693static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4694{
4695 int ret;
4696 struct binder_proc *proc = filp->private_data;
4697 const char *failure_string;
4698
4699 if (proc->tsk != current->group_leader)
4700 return -EINVAL;
4701
4702 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4703 vma->vm_end = vma->vm_start + SZ_4M;
4704
4705 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4706 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4707 __func__, proc->pid, vma->vm_start, vma->vm_end,
4708 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4709 (unsigned long)pgprot_val(vma->vm_page_prot));
4710
4711 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4712 ret = -EPERM;
4713 failure_string = "bad vm_flags";
4714 goto err_bad_arg;
4715 }
4716 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4717 vma->vm_ops = &binder_vm_ops;
4718 vma->vm_private_data = proc;
4719
4720 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4721 if (ret)
4722 return ret;
7f3dc008 4723 mutex_lock(&proc->files_lock);
19c98724 4724 proc->files = get_files_struct(current);
7f3dc008 4725 mutex_unlock(&proc->files_lock);
19c98724
TK
4726 return 0;
4727
355b0502 4728err_bad_arg:
00c41cdd 4729 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
355b0502
GKH
4730 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4731 return ret;
4732}
4733
4734static int binder_open(struct inode *nodp, struct file *filp)
4735{
4736 struct binder_proc *proc;
ac4812c5 4737 struct binder_device *binder_dev;
355b0502 4738
00c41cdd 4739 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
355b0502
GKH
4740 current->group_leader->pid, current->pid);
4741
4742 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4743 if (proc == NULL)
4744 return -ENOMEM;
9630fe88
TK
4745 spin_lock_init(&proc->inner_lock);
4746 spin_lock_init(&proc->outer_lock);
c4ea41ba
TK
4747 get_task_struct(current->group_leader);
4748 proc->tsk = current->group_leader;
7f3dc008 4749 mutex_init(&proc->files_lock);
355b0502 4750 INIT_LIST_HEAD(&proc->todo);
355b0502 4751 proc->default_priority = task_nice(current);
ac4812c5
MC
4752 binder_dev = container_of(filp->private_data, struct binder_device,
4753 miscdev);
4754 proc->context = &binder_dev->context;
19c98724 4755 binder_alloc_init(&proc->alloc);
975a1ac9 4756
355b0502 4757 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
4758 proc->pid = current->group_leader->pid;
4759 INIT_LIST_HEAD(&proc->delivered_death);
1b77e9dc 4760 INIT_LIST_HEAD(&proc->waiting_threads);
355b0502 4761 filp->private_data = proc;
975a1ac9 4762
c44b1231
TK
4763 mutex_lock(&binder_procs_lock);
4764 hlist_add_head(&proc->proc_node, &binder_procs);
4765 mutex_unlock(&binder_procs_lock);
4766
16b66554 4767 if (binder_debugfs_dir_entry_proc) {
355b0502 4768 char strbuf[11];
10f62861 4769
355b0502 4770 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
14db3181
MC
4771 /*
4772 * proc debug entries are shared between contexts, so
4773 * this will fail if the process tries to open the driver
4774 * again with a different context. The priting code will
4775 * anyway print all contexts that a given PID has, so this
4776 * is not a problem.
4777 */
21d02ddf 4778 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
14db3181
MC
4779 binder_debugfs_dir_entry_proc,
4780 (void *)(unsigned long)proc->pid,
4781 &binder_proc_fops);
355b0502
GKH
4782 }
4783
4784 return 0;
4785}
4786
4787static int binder_flush(struct file *filp, fl_owner_t id)
4788{
4789 struct binder_proc *proc = filp->private_data;
4790
4791 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4792
4793 return 0;
4794}
4795
4796static void binder_deferred_flush(struct binder_proc *proc)
4797{
4798 struct rb_node *n;
4799 int wake_count = 0;
10f62861 4800
7bd7b0e6 4801 binder_inner_proc_lock(proc);
355b0502
GKH
4802 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4803 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 4804
08dabcee 4805 thread->looper_need_return = true;
355b0502
GKH
4806 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4807 wake_up_interruptible(&thread->wait);
4808 wake_count++;
4809 }
4810 }
7bd7b0e6 4811 binder_inner_proc_unlock(proc);
355b0502
GKH
4812
4813 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4814 "binder_flush: %d woke %d threads\n", proc->pid,
4815 wake_count);
4816}
4817
4818static int binder_release(struct inode *nodp, struct file *filp)
4819{
4820 struct binder_proc *proc = filp->private_data;
10f62861 4821
16b66554 4822 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
4823 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4824
4825 return 0;
4826}
4827
008fa749
ME
4828static int binder_node_release(struct binder_node *node, int refs)
4829{
4830 struct binder_ref *ref;
4831 int death = 0;
ed29721e 4832 struct binder_proc *proc = node->proc;
008fa749 4833
72196393 4834 binder_release_work(proc, &node->async_todo);
ed29721e 4835
673068ee 4836 binder_node_lock(node);
ed29721e 4837 binder_inner_proc_lock(proc);
72196393 4838 binder_dequeue_work_ilocked(&node->work);
adc18842
TK
4839 /*
4840 * The caller must have taken a temporary ref on the node,
4841 */
4842 BUG_ON(!node->tmp_refs);
4843 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
ed29721e 4844 binder_inner_proc_unlock(proc);
673068ee 4845 binder_node_unlock(node);
ed29721e 4846 binder_free_node(node);
008fa749
ME
4847
4848 return refs;
4849 }
4850
4851 node->proc = NULL;
4852 node->local_strong_refs = 0;
4853 node->local_weak_refs = 0;
ed29721e 4854 binder_inner_proc_unlock(proc);
c44b1231
TK
4855
4856 spin_lock(&binder_dead_nodes_lock);
008fa749 4857 hlist_add_head(&node->dead_node, &binder_dead_nodes);
c44b1231 4858 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
4859
4860 hlist_for_each_entry(ref, &node->refs, node_entry) {
4861 refs++;
ab51ec6b
MC
4862 /*
4863 * Need the node lock to synchronize
4864 * with new notification requests and the
4865 * inner lock to synchronize with queued
4866 * death notifications.
4867 */
4868 binder_inner_proc_lock(ref->proc);
4869 if (!ref->death) {
4870 binder_inner_proc_unlock(ref->proc);
e194fd8a 4871 continue;
ab51ec6b 4872 }
008fa749
ME
4873
4874 death++;
4875
ab51ec6b
MC
4876 BUG_ON(!list_empty(&ref->death->work.entry));
4877 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4878 binder_enqueue_work_ilocked(&ref->death->work,
4879 &ref->proc->todo);
408c68b1 4880 binder_wakeup_proc_ilocked(ref->proc);
72196393 4881 binder_inner_proc_unlock(ref->proc);
008fa749
ME
4882 }
4883
008fa749
ME
4884 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4885 "node %d now dead, refs %d, death %d\n",
4886 node->debug_id, refs, death);
673068ee 4887 binder_node_unlock(node);
adc18842 4888 binder_put_node(node);
008fa749
ME
4889
4890 return refs;
4891}
4892
355b0502
GKH
4893static void binder_deferred_release(struct binder_proc *proc)
4894{
342e5c90 4895 struct binder_context *context = proc->context;
355b0502 4896 struct rb_node *n;
19c98724 4897 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 4898
355b0502
GKH
4899 BUG_ON(proc->files);
4900
c44b1231 4901 mutex_lock(&binder_procs_lock);
355b0502 4902 hlist_del(&proc->proc_node);
c44b1231 4903 mutex_unlock(&binder_procs_lock);
53413e7d 4904
c44b1231 4905 mutex_lock(&context->context_mgr_node_lock);
342e5c90
MC
4906 if (context->binder_context_mgr_node &&
4907 context->binder_context_mgr_node->proc == proc) {
355b0502 4908 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
4909 "%s: %d context_mgr_node gone\n",
4910 __func__, proc->pid);
342e5c90 4911 context->binder_context_mgr_node = NULL;
355b0502 4912 }
c44b1231 4913 mutex_unlock(&context->context_mgr_node_lock);
7bd7b0e6 4914 binder_inner_proc_lock(proc);
7a4408c6
TK
4915 /*
4916 * Make sure proc stays alive after we
4917 * remove all the threads
4918 */
4919 proc->tmp_ref++;
355b0502 4920
7a4408c6 4921 proc->is_dead = true;
355b0502
GKH
4922 threads = 0;
4923 active_transactions = 0;
4924 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
4925 struct binder_thread *thread;
4926
4927 thread = rb_entry(n, struct binder_thread, rb_node);
7bd7b0e6 4928 binder_inner_proc_unlock(proc);
355b0502 4929 threads++;
7a4408c6 4930 active_transactions += binder_thread_release(proc, thread);
7bd7b0e6 4931 binder_inner_proc_lock(proc);
355b0502 4932 }
53413e7d 4933
355b0502
GKH
4934 nodes = 0;
4935 incoming_refs = 0;
4936 while ((n = rb_first(&proc->nodes))) {
53413e7d 4937 struct binder_node *node;
355b0502 4938
53413e7d 4939 node = rb_entry(n, struct binder_node, rb_node);
355b0502 4940 nodes++;
adc18842
TK
4941 /*
4942 * take a temporary ref on the node before
4943 * calling binder_node_release() which will either
4944 * kfree() the node or call binder_put_node()
4945 */
da0fa9e4 4946 binder_inc_node_tmpref_ilocked(node);
355b0502 4947 rb_erase(&node->rb_node, &proc->nodes);
da0fa9e4 4948 binder_inner_proc_unlock(proc);
008fa749 4949 incoming_refs = binder_node_release(node, incoming_refs);
da0fa9e4 4950 binder_inner_proc_lock(proc);
355b0502 4951 }
da0fa9e4 4952 binder_inner_proc_unlock(proc);
53413e7d 4953
355b0502 4954 outgoing_refs = 0;
2c1838dc 4955 binder_proc_lock(proc);
355b0502 4956 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
4957 struct binder_ref *ref;
4958
4959 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 4960 outgoing_refs++;
2c1838dc
TK
4961 binder_cleanup_ref_olocked(ref);
4962 binder_proc_unlock(proc);
372e3147 4963 binder_free_ref(ref);
2c1838dc 4964 binder_proc_lock(proc);
355b0502 4965 }
2c1838dc 4966 binder_proc_unlock(proc);
53413e7d 4967
72196393
TK
4968 binder_release_work(proc, &proc->todo);
4969 binder_release_work(proc, &proc->delivered_death);
355b0502 4970
355b0502 4971 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
19c98724 4972 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 4973 __func__, proc->pid, threads, nodes, incoming_refs,
19c98724 4974 outgoing_refs, active_transactions);
355b0502 4975
7a4408c6 4976 binder_proc_dec_tmpref(proc);
355b0502
GKH
4977}
4978
4979static void binder_deferred_func(struct work_struct *work)
4980{
4981 struct binder_proc *proc;
4982 struct files_struct *files;
4983
4984 int defer;
10f62861 4985
355b0502 4986 do {
355b0502
GKH
4987 mutex_lock(&binder_deferred_lock);
4988 if (!hlist_empty(&binder_deferred_list)) {
4989 proc = hlist_entry(binder_deferred_list.first,
4990 struct binder_proc, deferred_work_node);
4991 hlist_del_init(&proc->deferred_work_node);
4992 defer = proc->deferred_work;
4993 proc->deferred_work = 0;
4994 } else {
4995 proc = NULL;
4996 defer = 0;
4997 }
4998 mutex_unlock(&binder_deferred_lock);
4999
5000 files = NULL;
5001 if (defer & BINDER_DEFERRED_PUT_FILES) {
7f3dc008 5002 mutex_lock(&proc->files_lock);
355b0502
GKH
5003 files = proc->files;
5004 if (files)
5005 proc->files = NULL;
7f3dc008 5006 mutex_unlock(&proc->files_lock);
355b0502
GKH
5007 }
5008
5009 if (defer & BINDER_DEFERRED_FLUSH)
5010 binder_deferred_flush(proc);
5011
5012 if (defer & BINDER_DEFERRED_RELEASE)
5013 binder_deferred_release(proc); /* frees proc */
5014
355b0502
GKH
5015 if (files)
5016 put_files_struct(files);
5017 } while (proc);
5018}
5019static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5020
5021static void
5022binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5023{
5024 mutex_lock(&binder_deferred_lock);
5025 proc->deferred_work |= defer;
5026 if (hlist_unhashed(&proc->deferred_work_node)) {
5027 hlist_add_head(&proc->deferred_work_node,
5028 &binder_deferred_list);
1beba52d 5029 schedule_work(&binder_deferred_work);
355b0502
GKH
5030 }
5031 mutex_unlock(&binder_deferred_lock);
5032}
5033
5f2f6369
TK
5034static void print_binder_transaction_ilocked(struct seq_file *m,
5035 struct binder_proc *proc,
5036 const char *prefix,
5037 struct binder_transaction *t)
5249f488 5038{
5f2f6369
TK
5039 struct binder_proc *to_proc;
5040 struct binder_buffer *buffer = t->buffer;
5041
7a4408c6 5042 spin_lock(&t->lock);
5f2f6369 5043 to_proc = t->to_proc;
5249f488
AH
5044 seq_printf(m,
5045 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5046 prefix, t->debug_id, t,
5047 t->from ? t->from->proc->pid : 0,
5048 t->from ? t->from->pid : 0,
5f2f6369 5049 to_proc ? to_proc->pid : 0,
5249f488
AH
5050 t->to_thread ? t->to_thread->pid : 0,
5051 t->code, t->flags, t->priority, t->need_reply);
7a4408c6
TK
5052 spin_unlock(&t->lock);
5053
5f2f6369
TK
5054 if (proc != to_proc) {
5055 /*
5056 * Can only safely deref buffer if we are holding the
5057 * correct proc inner lock for this node
5058 */
5059 seq_puts(m, "\n");
5060 return;
5061 }
5062
5063 if (buffer == NULL) {
5249f488
AH
5064 seq_puts(m, " buffer free\n");
5065 return;
355b0502 5066 }
5f2f6369
TK
5067 if (buffer->target_node)
5068 seq_printf(m, " node %d", buffer->target_node->debug_id);
5249f488 5069 seq_printf(m, " size %zd:%zd data %p\n",
5f2f6369
TK
5070 buffer->data_size, buffer->offsets_size,
5071 buffer->data);
355b0502
GKH
5072}
5073
5f2f6369
TK
5074static void print_binder_work_ilocked(struct seq_file *m,
5075 struct binder_proc *proc,
5076 const char *prefix,
5077 const char *transaction_prefix,
5078 struct binder_work *w)
355b0502
GKH
5079{
5080 struct binder_node *node;
5081 struct binder_transaction *t;
5082
5083 switch (w->type) {
5084 case BINDER_WORK_TRANSACTION:
5085 t = container_of(w, struct binder_transaction, work);
5f2f6369
TK
5086 print_binder_transaction_ilocked(
5087 m, proc, transaction_prefix, t);
355b0502 5088 break;
26549d17
TK
5089 case BINDER_WORK_RETURN_ERROR: {
5090 struct binder_error *e = container_of(
5091 w, struct binder_error, work);
5092
5093 seq_printf(m, "%stransaction error: %u\n",
5094 prefix, e->cmd);
5095 } break;
355b0502 5096 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 5097 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
5098 break;
5099 case BINDER_WORK_NODE:
5100 node = container_of(w, struct binder_node, work);
da49889d
AH
5101 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5102 prefix, node->debug_id,
5103 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
5104 break;
5105 case BINDER_WORK_DEAD_BINDER:
5249f488 5106 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
5107 break;
5108 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 5109 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
5110 break;
5111 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 5112 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
5113 break;
5114 default:
5249f488 5115 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
5116 break;
5117 }
355b0502
GKH
5118}
5119
72196393
TK
5120static void print_binder_thread_ilocked(struct seq_file *m,
5121 struct binder_thread *thread,
5122 int print_always)
355b0502
GKH
5123{
5124 struct binder_transaction *t;
5125 struct binder_work *w;
5249f488
AH
5126 size_t start_pos = m->count;
5127 size_t header_pos;
355b0502 5128
7a4408c6 5129 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
08dabcee 5130 thread->pid, thread->looper,
7a4408c6
TK
5131 thread->looper_need_return,
5132 atomic_read(&thread->tmp_ref));
5249f488 5133 header_pos = m->count;
355b0502
GKH
5134 t = thread->transaction_stack;
5135 while (t) {
355b0502 5136 if (t->from == thread) {
5f2f6369
TK
5137 print_binder_transaction_ilocked(m, thread->proc,
5138 " outgoing transaction", t);
355b0502
GKH
5139 t = t->from_parent;
5140 } else if (t->to_thread == thread) {
5f2f6369 5141 print_binder_transaction_ilocked(m, thread->proc,
5249f488 5142 " incoming transaction", t);
355b0502
GKH
5143 t = t->to_parent;
5144 } else {
5f2f6369
TK
5145 print_binder_transaction_ilocked(m, thread->proc,
5146 " bad transaction", t);
355b0502
GKH
5147 t = NULL;
5148 }
5149 }
5150 list_for_each_entry(w, &thread->todo, entry) {
5f2f6369 5151 print_binder_work_ilocked(m, thread->proc, " ",
72196393 5152 " pending transaction", w);
355b0502 5153 }
5249f488
AH
5154 if (!print_always && m->count == header_pos)
5155 m->count = start_pos;
355b0502
GKH
5156}
5157
da0fa9e4
TK
5158static void print_binder_node_nilocked(struct seq_file *m,
5159 struct binder_node *node)
355b0502
GKH
5160{
5161 struct binder_ref *ref;
355b0502
GKH
5162 struct binder_work *w;
5163 int count;
5164
5165 count = 0;
b67bfe0d 5166 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
5167 count++;
5168
adc18842 5169 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
da49889d 5170 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
5171 node->has_strong_ref, node->has_weak_ref,
5172 node->local_strong_refs, node->local_weak_refs,
adc18842 5173 node->internal_strong_refs, count, node->tmp_refs);
355b0502 5174 if (count) {
5249f488 5175 seq_puts(m, " proc");
b67bfe0d 5176 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 5177 seq_printf(m, " %d", ref->proc->pid);
355b0502 5178 }
5249f488 5179 seq_puts(m, "\n");
72196393 5180 if (node->proc) {
72196393 5181 list_for_each_entry(w, &node->async_todo, entry)
5f2f6369 5182 print_binder_work_ilocked(m, node->proc, " ",
72196393 5183 " pending async transaction", w);
72196393 5184 }
355b0502
GKH
5185}
5186
2c1838dc
TK
5187static void print_binder_ref_olocked(struct seq_file *m,
5188 struct binder_ref *ref)
355b0502 5189{
673068ee 5190 binder_node_lock(ref->node);
372e3147
TK
5191 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5192 ref->data.debug_id, ref->data.desc,
5193 ref->node->proc ? "" : "dead ",
5194 ref->node->debug_id, ref->data.strong,
5195 ref->data.weak, ref->death);
673068ee 5196 binder_node_unlock(ref->node);
355b0502
GKH
5197}
5198
5249f488
AH
5199static void print_binder_proc(struct seq_file *m,
5200 struct binder_proc *proc, int print_all)
355b0502
GKH
5201{
5202 struct binder_work *w;
5203 struct rb_node *n;
5249f488
AH
5204 size_t start_pos = m->count;
5205 size_t header_pos;
da0fa9e4 5206 struct binder_node *last_node = NULL;
5249f488
AH
5207
5208 seq_printf(m, "proc %d\n", proc->pid);
14db3181 5209 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
5210 header_pos = m->count;
5211
72196393 5212 binder_inner_proc_lock(proc);
5249f488 5213 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
72196393 5214 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5249f488 5215 rb_node), print_all);
da0fa9e4 5216
5249f488 5217 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
5218 struct binder_node *node = rb_entry(n, struct binder_node,
5219 rb_node);
da0fa9e4
TK
5220 /*
5221 * take a temporary reference on the node so it
5222 * survives and isn't removed from the tree
5223 * while we print it.
5224 */
5225 binder_inc_node_tmpref_ilocked(node);
5226 /* Need to drop inner lock to take node lock */
5227 binder_inner_proc_unlock(proc);
5228 if (last_node)
5229 binder_put_node(last_node);
5230 binder_node_inner_lock(node);
5231 print_binder_node_nilocked(m, node);
5232 binder_node_inner_unlock(node);
5233 last_node = node;
5234 binder_inner_proc_lock(proc);
355b0502 5235 }
da0fa9e4
TK
5236 binder_inner_proc_unlock(proc);
5237 if (last_node)
5238 binder_put_node(last_node);
5239
355b0502 5240 if (print_all) {
2c1838dc 5241 binder_proc_lock(proc);
355b0502 5242 for (n = rb_first(&proc->refs_by_desc);
5249f488 5243 n != NULL;
355b0502 5244 n = rb_next(n))
2c1838dc
TK
5245 print_binder_ref_olocked(m, rb_entry(n,
5246 struct binder_ref,
5247 rb_node_desc));
5248 binder_proc_unlock(proc);
355b0502 5249 }
19c98724 5250 binder_alloc_print_allocated(m, &proc->alloc);
72196393 5251 binder_inner_proc_lock(proc);
5249f488 5252 list_for_each_entry(w, &proc->todo, entry)
5f2f6369
TK
5253 print_binder_work_ilocked(m, proc, " ",
5254 " pending transaction", w);
355b0502 5255 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 5256 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
5257 break;
5258 }
72196393 5259 binder_inner_proc_unlock(proc);
5249f488
AH
5260 if (!print_all && m->count == header_pos)
5261 m->count = start_pos;
355b0502
GKH
5262}
5263
167bccbd 5264static const char * const binder_return_strings[] = {
355b0502
GKH
5265 "BR_ERROR",
5266 "BR_OK",
5267 "BR_TRANSACTION",
5268 "BR_REPLY",
5269 "BR_ACQUIRE_RESULT",
5270 "BR_DEAD_REPLY",
5271 "BR_TRANSACTION_COMPLETE",
5272 "BR_INCREFS",
5273 "BR_ACQUIRE",
5274 "BR_RELEASE",
5275 "BR_DECREFS",
5276 "BR_ATTEMPT_ACQUIRE",
5277 "BR_NOOP",
5278 "BR_SPAWN_LOOPER",
5279 "BR_FINISHED",
5280 "BR_DEAD_BINDER",
5281 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5282 "BR_FAILED_REPLY"
5283};
5284
167bccbd 5285static const char * const binder_command_strings[] = {
355b0502
GKH
5286 "BC_TRANSACTION",
5287 "BC_REPLY",
5288 "BC_ACQUIRE_RESULT",
5289 "BC_FREE_BUFFER",
5290 "BC_INCREFS",
5291 "BC_ACQUIRE",
5292 "BC_RELEASE",
5293 "BC_DECREFS",
5294 "BC_INCREFS_DONE",
5295 "BC_ACQUIRE_DONE",
5296 "BC_ATTEMPT_ACQUIRE",
5297 "BC_REGISTER_LOOPER",
5298 "BC_ENTER_LOOPER",
5299 "BC_EXIT_LOOPER",
5300 "BC_REQUEST_DEATH_NOTIFICATION",
5301 "BC_CLEAR_DEATH_NOTIFICATION",
7980240b
MC
5302 "BC_DEAD_BINDER_DONE",
5303 "BC_TRANSACTION_SG",
5304 "BC_REPLY_SG",
355b0502
GKH
5305};
5306
167bccbd 5307static const char * const binder_objstat_strings[] = {
355b0502
GKH
5308 "proc",
5309 "thread",
5310 "node",
5311 "ref",
5312 "death",
5313 "transaction",
5314 "transaction_complete"
5315};
5316
5249f488
AH
5317static void print_binder_stats(struct seq_file *m, const char *prefix,
5318 struct binder_stats *stats)
355b0502
GKH
5319{
5320 int i;
5321
5322 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 5323 ARRAY_SIZE(binder_command_strings));
355b0502 5324 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
0953c797
BJS
5325 int temp = atomic_read(&stats->bc[i]);
5326
5327 if (temp)
5249f488 5328 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 5329 binder_command_strings[i], temp);
355b0502
GKH
5330 }
5331
5332 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 5333 ARRAY_SIZE(binder_return_strings));
355b0502 5334 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
0953c797
BJS
5335 int temp = atomic_read(&stats->br[i]);
5336
5337 if (temp)
5249f488 5338 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 5339 binder_return_strings[i], temp);
355b0502
GKH
5340 }
5341
5342 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 5343 ARRAY_SIZE(binder_objstat_strings));
355b0502 5344 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 5345 ARRAY_SIZE(stats->obj_deleted));
355b0502 5346 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
0953c797
BJS
5347 int created = atomic_read(&stats->obj_created[i]);
5348 int deleted = atomic_read(&stats->obj_deleted[i]);
5349
5350 if (created || deleted)
5351 seq_printf(m, "%s%s: active %d total %d\n",
5352 prefix,
5249f488 5353 binder_objstat_strings[i],
0953c797
BJS
5354 created - deleted,
5355 created);
355b0502 5356 }
355b0502
GKH
5357}
5358
5249f488
AH
5359static void print_binder_proc_stats(struct seq_file *m,
5360 struct binder_proc *proc)
355b0502
GKH
5361{
5362 struct binder_work *w;
1b77e9dc 5363 struct binder_thread *thread;
355b0502 5364 struct rb_node *n;
1b77e9dc 5365 int count, strong, weak, ready_threads;
7bd7b0e6
TK
5366 size_t free_async_space =
5367 binder_alloc_get_free_async_space(&proc->alloc);
355b0502 5368
5249f488 5369 seq_printf(m, "proc %d\n", proc->pid);
14db3181 5370 seq_printf(m, "context %s\n", proc->context->name);
355b0502 5371 count = 0;
1b77e9dc 5372 ready_threads = 0;
7bd7b0e6 5373 binder_inner_proc_lock(proc);
355b0502
GKH
5374 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5375 count++;
1b77e9dc
MC
5376
5377 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5378 ready_threads++;
5379
5249f488
AH
5380 seq_printf(m, " threads: %d\n", count);
5381 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
5382 " ready threads %d\n"
5383 " free async space %zd\n", proc->requested_threads,
5384 proc->requested_threads_started, proc->max_threads,
1b77e9dc 5385 ready_threads,
7bd7b0e6 5386 free_async_space);
355b0502
GKH
5387 count = 0;
5388 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5389 count++;
da0fa9e4 5390 binder_inner_proc_unlock(proc);
5249f488 5391 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
5392 count = 0;
5393 strong = 0;
5394 weak = 0;
2c1838dc 5395 binder_proc_lock(proc);
355b0502
GKH
5396 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5397 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5398 rb_node_desc);
5399 count++;
372e3147
TK
5400 strong += ref->data.strong;
5401 weak += ref->data.weak;
355b0502 5402 }
2c1838dc 5403 binder_proc_unlock(proc);
5249f488 5404 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 5405
19c98724 5406 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 5407 seq_printf(m, " buffers: %d\n", count);
355b0502 5408
8ef4665a
SY
5409 binder_alloc_print_pages(m, &proc->alloc);
5410
355b0502 5411 count = 0;
72196393 5412 binder_inner_proc_lock(proc);
355b0502 5413 list_for_each_entry(w, &proc->todo, entry) {
72196393 5414 if (w->type == BINDER_WORK_TRANSACTION)
355b0502 5415 count++;
355b0502 5416 }
72196393 5417 binder_inner_proc_unlock(proc);
5249f488 5418 seq_printf(m, " pending transactions: %d\n", count);
355b0502 5419
5249f488 5420 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
5421}
5422
5423
5249f488 5424static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
5425{
5426 struct binder_proc *proc;
355b0502 5427 struct binder_node *node;
673068ee 5428 struct binder_node *last_node = NULL;
355b0502 5429
5249f488 5430 seq_puts(m, "binder state:\n");
355b0502 5431
c44b1231 5432 spin_lock(&binder_dead_nodes_lock);
355b0502 5433 if (!hlist_empty(&binder_dead_nodes))
5249f488 5434 seq_puts(m, "dead nodes:\n");
673068ee
TK
5435 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5436 /*
5437 * take a temporary reference on the node so it
5438 * survives and isn't removed from the list
5439 * while we print it.
5440 */
5441 node->tmp_refs++;
5442 spin_unlock(&binder_dead_nodes_lock);
5443 if (last_node)
5444 binder_put_node(last_node);
5445 binder_node_lock(node);
da0fa9e4 5446 print_binder_node_nilocked(m, node);
673068ee
TK
5447 binder_node_unlock(node);
5448 last_node = node;
5449 spin_lock(&binder_dead_nodes_lock);
5450 }
c44b1231 5451 spin_unlock(&binder_dead_nodes_lock);
673068ee
TK
5452 if (last_node)
5453 binder_put_node(last_node);
355b0502 5454
c44b1231 5455 mutex_lock(&binder_procs_lock);
b67bfe0d 5456 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 5457 print_binder_proc(m, proc, 1);
c44b1231 5458 mutex_unlock(&binder_procs_lock);
a60b890f 5459
5249f488 5460 return 0;
355b0502
GKH
5461}
5462
5249f488 5463static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
5464{
5465 struct binder_proc *proc;
355b0502 5466
5249f488 5467 seq_puts(m, "binder stats:\n");
355b0502 5468
5249f488 5469 print_binder_stats(m, "", &binder_stats);
355b0502 5470
c44b1231 5471 mutex_lock(&binder_procs_lock);
b67bfe0d 5472 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 5473 print_binder_proc_stats(m, proc);
c44b1231 5474 mutex_unlock(&binder_procs_lock);
a60b890f 5475
5249f488 5476 return 0;
355b0502
GKH
5477}
5478
5249f488 5479static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
5480{
5481 struct binder_proc *proc;
355b0502 5482
5249f488 5483 seq_puts(m, "binder transactions:\n");
c44b1231 5484 mutex_lock(&binder_procs_lock);
b67bfe0d 5485 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 5486 print_binder_proc(m, proc, 0);
c44b1231 5487 mutex_unlock(&binder_procs_lock);
a60b890f 5488
5249f488 5489 return 0;
355b0502
GKH
5490}
5491
5249f488 5492static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 5493{
83050a4e 5494 struct binder_proc *itr;
14db3181 5495 int pid = (unsigned long)m->private;
355b0502 5496
c44b1231 5497 mutex_lock(&binder_procs_lock);
83050a4e 5498 hlist_for_each_entry(itr, &binder_procs, proc_node) {
14db3181
MC
5499 if (itr->pid == pid) {
5500 seq_puts(m, "binder proc state:\n");
5501 print_binder_proc(m, itr, 1);
83050a4e
RA
5502 }
5503 }
c44b1231
TK
5504 mutex_unlock(&binder_procs_lock);
5505
5249f488 5506 return 0;
355b0502
GKH
5507}
5508
5249f488 5509static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
5510 struct binder_transaction_log_entry *e)
5511{
d99c7333
TK
5512 int debug_id = READ_ONCE(e->debug_id_done);
5513 /*
5514 * read barrier to guarantee debug_id_done read before
5515 * we print the log values
5516 */
5517 smp_rmb();
5249f488 5518 seq_printf(m,
d99c7333 5519 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
5520 e->debug_id, (e->call_type == 2) ? "reply" :
5521 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
14db3181 5522 e->from_thread, e->to_proc, e->to_thread, e->context_name,
57ada2fb
TK
5523 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5524 e->return_error, e->return_error_param,
5525 e->return_error_line);
d99c7333
TK
5526 /*
5527 * read-barrier to guarantee read of debug_id_done after
5528 * done printing the fields of the entry
5529 */
5530 smp_rmb();
5531 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5532 "\n" : " (incomplete)\n");
355b0502
GKH
5533}
5534
5249f488 5535static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 5536{
5249f488 5537 struct binder_transaction_log *log = m->private;
d99c7333
TK
5538 unsigned int log_cur = atomic_read(&log->cur);
5539 unsigned int count;
5540 unsigned int cur;
355b0502 5541 int i;
355b0502 5542
d99c7333
TK
5543 count = log_cur + 1;
5544 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5545 0 : count % ARRAY_SIZE(log->entry);
5546 if (count > ARRAY_SIZE(log->entry) || log->full)
5547 count = ARRAY_SIZE(log->entry);
5548 for (i = 0; i < count; i++) {
5549 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5550
5551 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 5552 }
5249f488 5553 return 0;
355b0502
GKH
5554}
5555
5556static const struct file_operations binder_fops = {
5557 .owner = THIS_MODULE,
5558 .poll = binder_poll,
5559 .unlocked_ioctl = binder_ioctl,
da49889d 5560 .compat_ioctl = binder_ioctl,
355b0502
GKH
5561 .mmap = binder_mmap,
5562 .open = binder_open,
5563 .flush = binder_flush,
5564 .release = binder_release,
5565};
5566
5249f488
AH
5567BINDER_DEBUG_ENTRY(state);
5568BINDER_DEBUG_ENTRY(stats);
5569BINDER_DEBUG_ENTRY(transactions);
5570BINDER_DEBUG_ENTRY(transaction_log);
5571
ac4812c5
MC
5572static int __init init_binder_device(const char *name)
5573{
5574 int ret;
5575 struct binder_device *binder_device;
5576
5577 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5578 if (!binder_device)
5579 return -ENOMEM;
5580
5581 binder_device->miscdev.fops = &binder_fops;
5582 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5583 binder_device->miscdev.name = name;
5584
5585 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5586 binder_device->context.name = name;
c44b1231 5587 mutex_init(&binder_device->context.context_mgr_node_lock);
ac4812c5
MC
5588
5589 ret = misc_register(&binder_device->miscdev);
5590 if (ret < 0) {
5591 kfree(binder_device);
5592 return ret;
5593 }
5594
5595 hlist_add_head(&binder_device->hlist, &binder_devices);
5596
5597 return ret;
5598}
5599
355b0502
GKH
5600static int __init binder_init(void)
5601{
5602 int ret;
22eb9476 5603 char *device_name, *device_names, *device_tmp;
ac4812c5
MC
5604 struct binder_device *device;
5605 struct hlist_node *tmp;
355b0502 5606
533dfb25
TH
5607 ret = binder_alloc_shrinker_init();
5608 if (ret)
5609 return ret;
f2517eb7 5610
d99c7333
TK
5611 atomic_set(&binder_transaction_log.cur, ~0U);
5612 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5613
16b66554
AH
5614 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5615 if (binder_debugfs_dir_entry_root)
5616 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5617 binder_debugfs_dir_entry_root);
ac4812c5 5618
16b66554
AH
5619 if (binder_debugfs_dir_entry_root) {
5620 debugfs_create_file("state",
21d02ddf 5621 0444,
16b66554
AH
5622 binder_debugfs_dir_entry_root,
5623 NULL,
5624 &binder_state_fops);
5625 debugfs_create_file("stats",
21d02ddf 5626 0444,
16b66554
AH
5627 binder_debugfs_dir_entry_root,
5628 NULL,
5629 &binder_stats_fops);
5630 debugfs_create_file("transactions",
21d02ddf 5631 0444,
16b66554
AH
5632 binder_debugfs_dir_entry_root,
5633 NULL,
5634 &binder_transactions_fops);
5635 debugfs_create_file("transaction_log",
21d02ddf 5636 0444,
16b66554
AH
5637 binder_debugfs_dir_entry_root,
5638 &binder_transaction_log,
5639 &binder_transaction_log_fops);
5640 debugfs_create_file("failed_transaction_log",
21d02ddf 5641 0444,
16b66554
AH
5642 binder_debugfs_dir_entry_root,
5643 &binder_transaction_log_failed,
5644 &binder_transaction_log_fops);
355b0502 5645 }
ac4812c5
MC
5646
5647 /*
5648 * Copy the module_parameter string, because we don't want to
5649 * tokenize it in-place.
5650 */
5651 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5652 if (!device_names) {
5653 ret = -ENOMEM;
5654 goto err_alloc_device_names_failed;
5655 }
5656 strcpy(device_names, binder_devices_param);
5657
22eb9476
CB
5658 device_tmp = device_names;
5659 while ((device_name = strsep(&device_tmp, ","))) {
ac4812c5
MC
5660 ret = init_binder_device(device_name);
5661 if (ret)
5662 goto err_init_binder_device_failed;
5663 }
5664
5665 return ret;
5666
5667err_init_binder_device_failed:
5668 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5669 misc_deregister(&device->miscdev);
5670 hlist_del(&device->hlist);
5671 kfree(device);
5672 }
22eb9476
CB
5673
5674 kfree(device_names);
5675
ac4812c5
MC
5676err_alloc_device_names_failed:
5677 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5678
355b0502
GKH
5679 return ret;
5680}
5681
5682device_initcall(binder_init);
5683
975a1ac9
AH
5684#define CREATE_TRACE_POINTS
5685#include "binder_trace.h"
5686
355b0502 5687MODULE_LICENSE("GPL v2");