]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/android/binder.c
mei: fix an && vs || typo
[mirror_ubuntu-jammy-kernel.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
9630fe88
TK
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
1b77e9dc
MC
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
9630fe88
TK
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
56b468fc
AS
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
355b0502
GKH
54#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
e2610b26 57#include <linux/freezer.h>
355b0502
GKH
58#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
355b0502
GKH
61#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
16b66554 65#include <linux/debugfs.h>
355b0502 66#include <linux/rbtree.h>
3f07c014 67#include <linux/sched/signal.h>
6e84f315 68#include <linux/sched/mm.h>
5249f488 69#include <linux/seq_file.h>
355b0502 70#include <linux/uaccess.h>
17cf22c3 71#include <linux/pid_namespace.h>
79af7307 72#include <linux/security.h>
9630fe88 73#include <linux/spinlock.h>
355b0502 74
9246a4a9
GKH
75#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76#define BINDER_IPC_32BIT 1
77#endif
78
79#include <uapi/linux/android/binder.h>
0c972a05 80#include "binder_alloc.h"
975a1ac9 81#include "binder_trace.h"
355b0502 82
c44b1231 83static HLIST_HEAD(binder_deferred_list);
355b0502
GKH
84static DEFINE_MUTEX(binder_deferred_lock);
85
ac4812c5 86static HLIST_HEAD(binder_devices);
355b0502 87static HLIST_HEAD(binder_procs);
c44b1231
TK
88static DEFINE_MUTEX(binder_procs_lock);
89
355b0502 90static HLIST_HEAD(binder_dead_nodes);
c44b1231 91static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 92
16b66554
AH
93static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
656a800a 95static atomic_t binder_last_id;
355b0502 96
5249f488
AH
97#define BINDER_DEBUG_ENTRY(name) \
98static int binder_##name##_open(struct inode *inode, struct file *file) \
99{ \
16b66554 100 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
101} \
102\
103static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
106 .read = seq_read, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
109}
110
111static int binder_proc_show(struct seq_file *m, void *unused);
112BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
113
114/* This is only defined in include/asm-arm/sizes.h */
115#ifndef SZ_1K
116#define SZ_1K 0x400
117#endif
118
119#ifndef SZ_4M
120#define SZ_4M 0x400000
121#endif
122
123#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
124
355b0502
GKH
125enum {
126 BINDER_DEBUG_USER_ERROR = 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
132 BINDER_DEBUG_READ_WRITE = 1U << 6,
133 BINDER_DEBUG_USER_REFS = 1U << 7,
134 BINDER_DEBUG_THREADS = 1U << 8,
135 BINDER_DEBUG_TRANSACTION = 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
19c98724 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
9630fe88 140 BINDER_DEBUG_SPINLOCKS = 1U << 14,
355b0502
GKH
141};
142static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
145
ac4812c5
MC
146static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147module_param_named(devices, binder_devices_param, charp, 0444);
148
355b0502
GKH
149static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150static int binder_stop_on_user_error;
151
152static int binder_set_stop_on_user_error(const char *val,
e4dca7b7 153 const struct kernel_param *kp)
355b0502
GKH
154{
155 int ret;
10f62861 156
355b0502
GKH
157 ret = param_set_int(val, kp);
158 if (binder_stop_on_user_error < 2)
159 wake_up(&binder_user_error_wait);
160 return ret;
161}
162module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
164
165#define binder_debug(mask, x...) \
166 do { \
167 if (binder_debug_mask & mask) \
258767fe 168 pr_info(x); \
355b0502
GKH
169 } while (0)
170
171#define binder_user_error(x...) \
172 do { \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 174 pr_info(x); \
355b0502
GKH
175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
177 } while (0)
178
feba3900
MC
179#define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
181
182#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
7980240b
MC
184#define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
186
def95c73
MC
187#define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
189
355b0502
GKH
190enum binder_stat_types {
191 BINDER_STAT_PROC,
192 BINDER_STAT_THREAD,
193 BINDER_STAT_NODE,
194 BINDER_STAT_REF,
195 BINDER_STAT_DEATH,
196 BINDER_STAT_TRANSACTION,
197 BINDER_STAT_TRANSACTION_COMPLETE,
198 BINDER_STAT_COUNT
199};
200
201struct binder_stats {
0953c797
BJS
202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 atomic_t obj_created[BINDER_STAT_COUNT];
205 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
206};
207
208static struct binder_stats binder_stats;
209
210static inline void binder_stats_deleted(enum binder_stat_types type)
211{
0953c797 212 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
213}
214
215static inline void binder_stats_created(enum binder_stat_types type)
216{
0953c797 217 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
218}
219
220struct binder_transaction_log_entry {
221 int debug_id;
d99c7333 222 int debug_id_done;
355b0502
GKH
223 int call_type;
224 int from_proc;
225 int from_thread;
226 int target_handle;
227 int to_proc;
228 int to_thread;
229 int to_node;
230 int data_size;
231 int offsets_size;
57ada2fb
TK
232 int return_error_line;
233 uint32_t return_error;
234 uint32_t return_error_param;
14db3181 235 const char *context_name;
355b0502
GKH
236};
237struct binder_transaction_log {
d99c7333
TK
238 atomic_t cur;
239 bool full;
355b0502
GKH
240 struct binder_transaction_log_entry entry[32];
241};
242static struct binder_transaction_log binder_transaction_log;
243static struct binder_transaction_log binder_transaction_log_failed;
244
245static struct binder_transaction_log_entry *binder_transaction_log_add(
246 struct binder_transaction_log *log)
247{
248 struct binder_transaction_log_entry *e;
d99c7333 249 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 250
d99c7333 251 if (cur >= ARRAY_SIZE(log->entry))
355b0502 252 log->full = 1;
d99c7333
TK
253 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 WRITE_ONCE(e->debug_id_done, 0);
255 /*
256 * write-barrier to synchronize access to e->debug_id_done.
257 * We make sure the initialized 0 value is seen before
258 * memset() other fields are zeroed by memset.
259 */
260 smp_wmb();
261 memset(e, 0, sizeof(*e));
355b0502
GKH
262 return e;
263}
264
342e5c90
MC
265struct binder_context {
266 struct binder_node *binder_context_mgr_node;
c44b1231
TK
267 struct mutex context_mgr_node_lock;
268
342e5c90 269 kuid_t binder_context_mgr_uid;
14db3181 270 const char *name;
342e5c90
MC
271};
272
ac4812c5
MC
273struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
342e5c90
MC
277};
278
72196393
TK
279/**
280 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list
282 * @type: type of work to be performed
283 *
284 * There are separate work lists for proc, thread, and node (async).
285 */
355b0502
GKH
286struct binder_work {
287 struct list_head entry;
72196393 288
355b0502
GKH
289 enum {
290 BINDER_WORK_TRANSACTION = 1,
291 BINDER_WORK_TRANSACTION_COMPLETE,
26549d17 292 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
293 BINDER_WORK_NODE,
294 BINDER_WORK_DEAD_BINDER,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297 } type;
298};
299
26549d17
TK
300struct binder_error {
301 struct binder_work work;
302 uint32_t cmd;
303};
304
9630fe88
TK
305/**
306 * struct binder_node - binder node bookkeeping
307 * @debug_id: unique ID for debugging
308 * (invariant after initialized)
309 * @lock: lock for node fields
310 * @work: worklist element for node work
72196393 311 * (protected by @proc->inner_lock)
9630fe88 312 * @rb_node: element for proc->nodes tree
da0fa9e4 313 * (protected by @proc->inner_lock)
9630fe88
TK
314 * @dead_node: element for binder_dead_nodes list
315 * (protected by binder_dead_nodes_lock)
316 * @proc: binder_proc that owns this node
317 * (invariant after initialized)
318 * @refs: list of references on this node
673068ee 319 * (protected by @lock)
9630fe88
TK
320 * @internal_strong_refs: used to take strong references when
321 * initiating a transaction
ed29721e
TK
322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
9630fe88 324 * @local_weak_refs: weak user refs from local process
ed29721e
TK
325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
9630fe88 327 * @local_strong_refs: strong user refs from local process
ed29721e
TK
328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
9630fe88 330 * @tmp_refs: temporary kernel refs
ed29721e
TK
331 * (protected by @proc->inner_lock while @proc
332 * is valid, and by binder_dead_nodes_lock
333 * if @proc is NULL. During inc/dec and node release
334 * it is also protected by @lock to provide safety
335 * as the node dies and @proc becomes NULL)
9630fe88
TK
336 * @ptr: userspace pointer for node
337 * (invariant, no lock needed)
338 * @cookie: userspace cookie for node
339 * (invariant, no lock needed)
340 * @has_strong_ref: userspace notified of strong ref
ed29721e
TK
341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
9630fe88 343 * @pending_strong_ref: userspace has acked notification of strong ref
ed29721e
TK
344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
9630fe88 346 * @has_weak_ref: userspace notified of weak ref
ed29721e
TK
347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
9630fe88 349 * @pending_weak_ref: userspace has acked notification of weak ref
ed29721e
TK
350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
9630fe88 352 * @has_async_transaction: async transaction to node in progress
673068ee 353 * (protected by @lock)
9630fe88
TK
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @async_todo: list of async work items
72196393 359 * (protected by @proc->inner_lock)
9630fe88
TK
360 *
361 * Bookkeeping structure for binder nodes.
362 */
355b0502
GKH
363struct binder_node {
364 int debug_id;
9630fe88 365 spinlock_t lock;
355b0502
GKH
366 struct binder_work work;
367 union {
368 struct rb_node rb_node;
369 struct hlist_node dead_node;
370 };
371 struct binder_proc *proc;
372 struct hlist_head refs;
373 int internal_strong_refs;
374 int local_weak_refs;
375 int local_strong_refs;
adc18842 376 int tmp_refs;
da49889d
AH
377 binder_uintptr_t ptr;
378 binder_uintptr_t cookie;
ed29721e
TK
379 struct {
380 /*
381 * bitfield elements protected by
382 * proc inner_lock
383 */
384 u8 has_strong_ref:1;
385 u8 pending_strong_ref:1;
386 u8 has_weak_ref:1;
387 u8 pending_weak_ref:1;
388 };
389 struct {
390 /*
391 * invariant after initialization
392 */
393 u8 accept_fds:1;
394 u8 min_priority;
395 };
396 bool has_async_transaction;
355b0502
GKH
397 struct list_head async_todo;
398};
399
400struct binder_ref_death {
72196393
TK
401 /**
402 * @work: worklist element for death notifications
403 * (protected by inner_lock of the proc that
404 * this ref belongs to)
405 */
355b0502 406 struct binder_work work;
da49889d 407 binder_uintptr_t cookie;
355b0502
GKH
408};
409
372e3147
TK
410/**
411 * struct binder_ref_data - binder_ref counts and id
412 * @debug_id: unique ID for the ref
413 * @desc: unique userspace handle for ref
414 * @strong: strong ref count (debugging only if not locked)
415 * @weak: weak ref count (debugging only if not locked)
416 *
417 * Structure to hold ref count and ref id information. Since
418 * the actual ref can only be accessed with a lock, this structure
419 * is used to return information about the ref to callers of
420 * ref inc/dec functions.
421 */
422struct binder_ref_data {
423 int debug_id;
424 uint32_t desc;
425 int strong;
426 int weak;
427};
428
429/**
430 * struct binder_ref - struct to track references on nodes
431 * @data: binder_ref_data containing id, handle, and current refcounts
432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433 * @rb_node_node: node for lookup by @node in proc's rb_tree
434 * @node_entry: list entry for node->refs list in target node
673068ee 435 * (protected by @node->lock)
372e3147
TK
436 * @proc: binder_proc containing ref
437 * @node: binder_node of target node. When cleaning up a
438 * ref for deletion in binder_cleanup_ref, a non-NULL
439 * @node indicates the node must be freed
440 * @death: pointer to death notification (ref_death) if requested
ab51ec6b 441 * (protected by @node->lock)
372e3147
TK
442 *
443 * Structure to track references from procA to target node (on procB). This
444 * structure is unsafe to access without holding @proc->outer_lock.
445 */
355b0502
GKH
446struct binder_ref {
447 /* Lookups needed: */
448 /* node + proc => ref (transaction) */
449 /* desc + proc => ref (transaction, inc/dec ref) */
450 /* node => refs + procs (proc exit) */
372e3147 451 struct binder_ref_data data;
355b0502
GKH
452 struct rb_node rb_node_desc;
453 struct rb_node rb_node_node;
454 struct hlist_node node_entry;
455 struct binder_proc *proc;
456 struct binder_node *node;
355b0502
GKH
457 struct binder_ref_death *death;
458};
459
355b0502
GKH
460enum binder_deferred_state {
461 BINDER_DEFERRED_PUT_FILES = 0x01,
462 BINDER_DEFERRED_FLUSH = 0x02,
463 BINDER_DEFERRED_RELEASE = 0x04,
464};
465
9630fe88
TK
466/**
467 * struct binder_proc - binder process bookkeeping
468 * @proc_node: element for binder_procs list
469 * @threads: rbtree of binder_threads in this proc
7bd7b0e6 470 * (protected by @inner_lock)
9630fe88
TK
471 * @nodes: rbtree of binder nodes associated with
472 * this proc ordered by node->ptr
da0fa9e4 473 * (protected by @inner_lock)
9630fe88 474 * @refs_by_desc: rbtree of refs ordered by ref->desc
2c1838dc 475 * (protected by @outer_lock)
9630fe88 476 * @refs_by_node: rbtree of refs ordered by ref->node
2c1838dc 477 * (protected by @outer_lock)
1b77e9dc
MC
478 * @waiting_threads: threads currently waiting for proc work
479 * (protected by @inner_lock)
9630fe88
TK
480 * @pid PID of group_leader of process
481 * (invariant after initialized)
482 * @tsk task_struct for group_leader of process
483 * (invariant after initialized)
484 * @files files_struct for process
7f3dc008
TK
485 * (protected by @files_lock)
486 * @files_lock mutex to protect @files
9630fe88
TK
487 * @deferred_work_node: element for binder_deferred_list
488 * (protected by binder_deferred_lock)
489 * @deferred_work: bitmap of deferred work to perform
490 * (protected by binder_deferred_lock)
491 * @is_dead: process is dead and awaiting free
492 * when outstanding transactions are cleaned up
7bd7b0e6 493 * (protected by @inner_lock)
9630fe88 494 * @todo: list of work for this process
72196393 495 * (protected by @inner_lock)
9630fe88
TK
496 * @wait: wait queue head to wait for proc work
497 * (invariant after initialized)
498 * @stats: per-process binder statistics
499 * (atomics, no lock needed)
500 * @delivered_death: list of delivered death notification
72196393 501 * (protected by @inner_lock)
9630fe88 502 * @max_threads: cap on number of binder threads
b3e68612 503 * (protected by @inner_lock)
9630fe88
TK
504 * @requested_threads: number of binder threads requested but not
505 * yet started. In current implementation, can
506 * only be 0 or 1.
b3e68612 507 * (protected by @inner_lock)
9630fe88 508 * @requested_threads_started: number binder threads started
b3e68612 509 * (protected by @inner_lock)
9630fe88 510 * @tmp_ref: temporary reference to indicate proc is in use
7bd7b0e6 511 * (protected by @inner_lock)
9630fe88
TK
512 * @default_priority: default scheduler priority
513 * (invariant after initialized)
514 * @debugfs_entry: debugfs node
515 * @alloc: binder allocator bookkeeping
516 * @context: binder_context for this proc
517 * (invariant after initialized)
518 * @inner_lock: can nest under outer_lock and/or node lock
519 * @outer_lock: no nesting under innor or node lock
520 * Lock order: 1) outer, 2) node, 3) inner
521 *
522 * Bookkeeping structure for binder processes
523 */
355b0502
GKH
524struct binder_proc {
525 struct hlist_node proc_node;
526 struct rb_root threads;
527 struct rb_root nodes;
528 struct rb_root refs_by_desc;
529 struct rb_root refs_by_node;
1b77e9dc 530 struct list_head waiting_threads;
355b0502 531 int pid;
355b0502
GKH
532 struct task_struct *tsk;
533 struct files_struct *files;
7f3dc008 534 struct mutex files_lock;
355b0502
GKH
535 struct hlist_node deferred_work_node;
536 int deferred_work;
7a4408c6 537 bool is_dead;
355b0502 538
355b0502
GKH
539 struct list_head todo;
540 wait_queue_head_t wait;
541 struct binder_stats stats;
542 struct list_head delivered_death;
543 int max_threads;
544 int requested_threads;
545 int requested_threads_started;
7a4408c6 546 int tmp_ref;
355b0502 547 long default_priority;
16b66554 548 struct dentry *debugfs_entry;
fdfb4a99 549 struct binder_alloc alloc;
342e5c90 550 struct binder_context *context;
9630fe88
TK
551 spinlock_t inner_lock;
552 spinlock_t outer_lock;
355b0502
GKH
553};
554
555enum {
556 BINDER_LOOPER_STATE_REGISTERED = 0x01,
557 BINDER_LOOPER_STATE_ENTERED = 0x02,
558 BINDER_LOOPER_STATE_EXITED = 0x04,
559 BINDER_LOOPER_STATE_INVALID = 0x08,
560 BINDER_LOOPER_STATE_WAITING = 0x10,
1b77e9dc 561 BINDER_LOOPER_STATE_POLL = 0x20,
355b0502
GKH
562};
563
9630fe88
TK
564/**
565 * struct binder_thread - binder thread bookkeeping
566 * @proc: binder process for this thread
567 * (invariant after initialization)
568 * @rb_node: element for proc->threads rbtree
7bd7b0e6 569 * (protected by @proc->inner_lock)
1b77e9dc
MC
570 * @waiting_thread_node: element for @proc->waiting_threads list
571 * (protected by @proc->inner_lock)
9630fe88
TK
572 * @pid: PID for this thread
573 * (invariant after initialization)
574 * @looper: bitmap of looping state
575 * (only accessed by this thread)
576 * @looper_needs_return: looping thread needs to exit driver
577 * (no lock needed)
578 * @transaction_stack: stack of in-progress transactions for this thread
0b89d69a 579 * (protected by @proc->inner_lock)
9630fe88 580 * @todo: list of work to do for this thread
72196393 581 * (protected by @proc->inner_lock)
148ade2c
MC
582 * @process_todo: whether work in @todo should be processed
583 * (protected by @proc->inner_lock)
9630fe88
TK
584 * @return_error: transaction errors reported by this thread
585 * (only accessed by this thread)
586 * @reply_error: transaction errors reported by target thread
0b89d69a 587 * (protected by @proc->inner_lock)
9630fe88
TK
588 * @wait: wait queue for thread work
589 * @stats: per-thread statistics
590 * (atomics, no lock needed)
591 * @tmp_ref: temporary reference to indicate thread is in use
592 * (atomic since @proc->inner_lock cannot
593 * always be acquired)
594 * @is_dead: thread is dead and awaiting free
595 * when outstanding transactions are cleaned up
7bd7b0e6 596 * (protected by @proc->inner_lock)
9630fe88
TK
597 *
598 * Bookkeeping structure for binder threads.
599 */
355b0502
GKH
600struct binder_thread {
601 struct binder_proc *proc;
602 struct rb_node rb_node;
1b77e9dc 603 struct list_head waiting_thread_node;
355b0502 604 int pid;
08dabcee
TK
605 int looper; /* only modified by this thread */
606 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
607 struct binder_transaction *transaction_stack;
608 struct list_head todo;
148ade2c 609 bool process_todo;
26549d17
TK
610 struct binder_error return_error;
611 struct binder_error reply_error;
355b0502
GKH
612 wait_queue_head_t wait;
613 struct binder_stats stats;
7a4408c6
TK
614 atomic_t tmp_ref;
615 bool is_dead;
355b0502
GKH
616};
617
618struct binder_transaction {
619 int debug_id;
620 struct binder_work work;
621 struct binder_thread *from;
622 struct binder_transaction *from_parent;
623 struct binder_proc *to_proc;
624 struct binder_thread *to_thread;
625 struct binder_transaction *to_parent;
626 unsigned need_reply:1;
627 /* unsigned is_dead:1; */ /* not used at the moment */
628
629 struct binder_buffer *buffer;
630 unsigned int code;
631 unsigned int flags;
632 long priority;
633 long saved_priority;
4a2ebb93 634 kuid_t sender_euid;
7a4408c6
TK
635 /**
636 * @lock: protects @from, @to_proc, and @to_thread
637 *
638 * @from, @to_proc, and @to_thread can be set to NULL
639 * during thread teardown
640 */
641 spinlock_t lock;
355b0502
GKH
642};
643
9630fe88
TK
644/**
645 * binder_proc_lock() - Acquire outer lock for given binder_proc
646 * @proc: struct binder_proc to acquire
647 *
648 * Acquires proc->outer_lock. Used to protect binder_ref
649 * structures associated with the given proc.
650 */
651#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
652static void
653_binder_proc_lock(struct binder_proc *proc, int line)
654{
655 binder_debug(BINDER_DEBUG_SPINLOCKS,
656 "%s: line=%d\n", __func__, line);
657 spin_lock(&proc->outer_lock);
658}
659
660/**
661 * binder_proc_unlock() - Release spinlock for given binder_proc
662 * @proc: struct binder_proc to acquire
663 *
664 * Release lock acquired via binder_proc_lock()
665 */
666#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
667static void
668_binder_proc_unlock(struct binder_proc *proc, int line)
669{
670 binder_debug(BINDER_DEBUG_SPINLOCKS,
671 "%s: line=%d\n", __func__, line);
672 spin_unlock(&proc->outer_lock);
673}
674
675/**
676 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
677 * @proc: struct binder_proc to acquire
678 *
679 * Acquires proc->inner_lock. Used to protect todo lists
680 */
681#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
682static void
683_binder_inner_proc_lock(struct binder_proc *proc, int line)
684{
685 binder_debug(BINDER_DEBUG_SPINLOCKS,
686 "%s: line=%d\n", __func__, line);
687 spin_lock(&proc->inner_lock);
688}
689
690/**
691 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
692 * @proc: struct binder_proc to acquire
693 *
694 * Release lock acquired via binder_inner_proc_lock()
695 */
696#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
697static void
698_binder_inner_proc_unlock(struct binder_proc *proc, int line)
699{
700 binder_debug(BINDER_DEBUG_SPINLOCKS,
701 "%s: line=%d\n", __func__, line);
702 spin_unlock(&proc->inner_lock);
703}
704
705/**
706 * binder_node_lock() - Acquire spinlock for given binder_node
707 * @node: struct binder_node to acquire
708 *
709 * Acquires node->lock. Used to protect binder_node fields
710 */
711#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
712static void
713_binder_node_lock(struct binder_node *node, int line)
714{
715 binder_debug(BINDER_DEBUG_SPINLOCKS,
716 "%s: line=%d\n", __func__, line);
717 spin_lock(&node->lock);
718}
719
720/**
721 * binder_node_unlock() - Release spinlock for given binder_proc
722 * @node: struct binder_node to acquire
723 *
724 * Release lock acquired via binder_node_lock()
725 */
726#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
727static void
728_binder_node_unlock(struct binder_node *node, int line)
729{
730 binder_debug(BINDER_DEBUG_SPINLOCKS,
731 "%s: line=%d\n", __func__, line);
732 spin_unlock(&node->lock);
733}
734
673068ee
TK
735/**
736 * binder_node_inner_lock() - Acquire node and inner locks
737 * @node: struct binder_node to acquire
738 *
739 * Acquires node->lock. If node->proc also acquires
740 * proc->inner_lock. Used to protect binder_node fields
741 */
742#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
743static void
744_binder_node_inner_lock(struct binder_node *node, int line)
745{
746 binder_debug(BINDER_DEBUG_SPINLOCKS,
747 "%s: line=%d\n", __func__, line);
748 spin_lock(&node->lock);
749 if (node->proc)
750 binder_inner_proc_lock(node->proc);
751}
752
753/**
754 * binder_node_unlock() - Release node and inner locks
755 * @node: struct binder_node to acquire
756 *
757 * Release lock acquired via binder_node_lock()
758 */
759#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
760static void
761_binder_node_inner_unlock(struct binder_node *node, int line)
762{
763 struct binder_proc *proc = node->proc;
764
765 binder_debug(BINDER_DEBUG_SPINLOCKS,
766 "%s: line=%d\n", __func__, line);
767 if (proc)
768 binder_inner_proc_unlock(proc);
769 spin_unlock(&node->lock);
770}
771
72196393
TK
772static bool binder_worklist_empty_ilocked(struct list_head *list)
773{
774 return list_empty(list);
775}
776
777/**
778 * binder_worklist_empty() - Check if no items on the work list
779 * @proc: binder_proc associated with list
780 * @list: list to check
781 *
782 * Return: true if there are no items on list, else false
783 */
784static bool binder_worklist_empty(struct binder_proc *proc,
785 struct list_head *list)
786{
787 bool ret;
788
789 binder_inner_proc_lock(proc);
790 ret = binder_worklist_empty_ilocked(list);
791 binder_inner_proc_unlock(proc);
792 return ret;
793}
794
148ade2c
MC
795/**
796 * binder_enqueue_work_ilocked() - Add an item to the work list
797 * @work: struct binder_work to add to list
798 * @target_list: list to add work to
799 *
800 * Adds the work to the specified list. Asserts that work
801 * is not already on a list.
802 *
803 * Requires the proc->inner_lock to be held.
804 */
72196393
TK
805static void
806binder_enqueue_work_ilocked(struct binder_work *work,
807 struct list_head *target_list)
808{
809 BUG_ON(target_list == NULL);
810 BUG_ON(work->entry.next && !list_empty(&work->entry));
811 list_add_tail(&work->entry, target_list);
812}
813
814/**
148ade2c
MC
815 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
816 * @thread: thread to queue work to
72196393 817 * @work: struct binder_work to add to list
72196393 818 *
148ade2c
MC
819 * Adds the work to the todo list of the thread. Doesn't set the process_todo
820 * flag, which means that (if it wasn't already set) the thread will go to
821 * sleep without handling this work when it calls read.
822 *
823 * Requires the proc->inner_lock to be held.
72196393
TK
824 */
825static void
148ade2c
MC
826binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
827 struct binder_work *work)
72196393 828{
148ade2c
MC
829 binder_enqueue_work_ilocked(work, &thread->todo);
830}
831
832/**
833 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
834 * @thread: thread to queue work to
835 * @work: struct binder_work to add to list
836 *
837 * Adds the work to the todo list of the thread, and enables processing
838 * of the todo queue.
839 *
840 * Requires the proc->inner_lock to be held.
841 */
842static void
843binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
844 struct binder_work *work)
845{
846 binder_enqueue_work_ilocked(work, &thread->todo);
847 thread->process_todo = true;
848}
849
850/**
851 * binder_enqueue_thread_work() - Add an item to the thread work list
852 * @thread: thread to queue work to
853 * @work: struct binder_work to add to list
854 *
855 * Adds the work to the todo list of the thread, and enables processing
856 * of the todo queue.
857 */
858static void
859binder_enqueue_thread_work(struct binder_thread *thread,
860 struct binder_work *work)
861{
862 binder_inner_proc_lock(thread->proc);
863 binder_enqueue_thread_work_ilocked(thread, work);
864 binder_inner_proc_unlock(thread->proc);
72196393
TK
865}
866
867static void
868binder_dequeue_work_ilocked(struct binder_work *work)
869{
870 list_del_init(&work->entry);
871}
872
873/**
874 * binder_dequeue_work() - Removes an item from the work list
875 * @proc: binder_proc associated with list
876 * @work: struct binder_work to remove from list
877 *
878 * Removes the specified work item from whatever list it is on.
879 * Can safely be called if work is not on any list.
880 */
881static void
882binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
883{
884 binder_inner_proc_lock(proc);
885 binder_dequeue_work_ilocked(work);
886 binder_inner_proc_unlock(proc);
887}
888
889static struct binder_work *binder_dequeue_work_head_ilocked(
890 struct list_head *list)
891{
892 struct binder_work *w;
893
894 w = list_first_entry_or_null(list, struct binder_work, entry);
895 if (w)
896 list_del_init(&w->entry);
897 return w;
898}
899
900/**
901 * binder_dequeue_work_head() - Dequeues the item at head of list
902 * @proc: binder_proc associated with list
903 * @list: list to dequeue head
904 *
905 * Removes the head of the list if there are items on the list
906 *
907 * Return: pointer dequeued binder_work, NULL if list was empty
908 */
909static struct binder_work *binder_dequeue_work_head(
910 struct binder_proc *proc,
911 struct list_head *list)
912{
913 struct binder_work *w;
914
915 binder_inner_proc_lock(proc);
916 w = binder_dequeue_work_head_ilocked(list);
917 binder_inner_proc_unlock(proc);
918 return w;
919}
920
355b0502
GKH
921static void
922binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
7a4408c6
TK
923static void binder_free_thread(struct binder_thread *thread);
924static void binder_free_proc(struct binder_proc *proc);
da0fa9e4 925static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
355b0502 926
efde99cd 927static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502 928{
355b0502
GKH
929 unsigned long rlim_cur;
930 unsigned long irqs;
7f3dc008 931 int ret;
355b0502 932
7f3dc008
TK
933 mutex_lock(&proc->files_lock);
934 if (proc->files == NULL) {
935 ret = -ESRCH;
936 goto err;
937 }
938 if (!lock_task_sighand(proc->tsk, &irqs)) {
939 ret = -EMFILE;
940 goto err;
941 }
dcfadfa4
AV
942 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
943 unlock_task_sighand(proc->tsk, &irqs);
355b0502 944
7f3dc008
TK
945 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
946err:
947 mutex_unlock(&proc->files_lock);
948 return ret;
355b0502
GKH
949}
950
951/*
952 * copied from fd_install
953 */
954static void task_fd_install(
955 struct binder_proc *proc, unsigned int fd, struct file *file)
956{
7f3dc008 957 mutex_lock(&proc->files_lock);
f869e8a7
AV
958 if (proc->files)
959 __fd_install(proc->files, fd, file);
7f3dc008 960 mutex_unlock(&proc->files_lock);
355b0502
GKH
961}
962
963/*
964 * copied from sys_close
965 */
966static long task_close_fd(struct binder_proc *proc, unsigned int fd)
967{
355b0502
GKH
968 int retval;
969
7f3dc008
TK
970 mutex_lock(&proc->files_lock);
971 if (proc->files == NULL) {
972 retval = -ESRCH;
973 goto err;
974 }
483ce1d4 975 retval = __close_fd(proc->files, fd);
355b0502
GKH
976 /* can't restart close syscall because file table entry was cleared */
977 if (unlikely(retval == -ERESTARTSYS ||
978 retval == -ERESTARTNOINTR ||
979 retval == -ERESTARTNOHAND ||
980 retval == -ERESTART_RESTARTBLOCK))
981 retval = -EINTR;
7f3dc008
TK
982err:
983 mutex_unlock(&proc->files_lock);
355b0502 984 return retval;
355b0502
GKH
985}
986
1b77e9dc
MC
987static bool binder_has_work_ilocked(struct binder_thread *thread,
988 bool do_proc_work)
989{
148ade2c 990 return thread->process_todo ||
1b77e9dc
MC
991 thread->looper_need_return ||
992 (do_proc_work &&
993 !binder_worklist_empty_ilocked(&thread->proc->todo));
994}
995
996static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
997{
998 bool has_work;
999
1000 binder_inner_proc_lock(thread->proc);
1001 has_work = binder_has_work_ilocked(thread, do_proc_work);
1002 binder_inner_proc_unlock(thread->proc);
1003
1004 return has_work;
1005}
1006
1007static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1008{
1009 return !thread->transaction_stack &&
1010 binder_worklist_empty_ilocked(&thread->todo) &&
1011 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1012 BINDER_LOOPER_STATE_REGISTERED));
1013}
1014
1015static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1016 bool sync)
1017{
1018 struct rb_node *n;
1019 struct binder_thread *thread;
1020
1021 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1022 thread = rb_entry(n, struct binder_thread, rb_node);
1023 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1024 binder_available_for_proc_work_ilocked(thread)) {
1025 if (sync)
1026 wake_up_interruptible_sync(&thread->wait);
1027 else
1028 wake_up_interruptible(&thread->wait);
1029 }
1030 }
1031}
1032
408c68b1
MC
1033/**
1034 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1035 * @proc: process to select a thread from
1036 *
1037 * Note that calling this function moves the thread off the waiting_threads
1038 * list, so it can only be woken up by the caller of this function, or a
1039 * signal. Therefore, callers *should* always wake up the thread this function
1040 * returns.
1041 *
1042 * Return: If there's a thread currently waiting for process work,
1043 * returns that thread. Otherwise returns NULL.
1044 */
1045static struct binder_thread *
1046binder_select_thread_ilocked(struct binder_proc *proc)
1b77e9dc
MC
1047{
1048 struct binder_thread *thread;
1049
858b2719 1050 assert_spin_locked(&proc->inner_lock);
1b77e9dc
MC
1051 thread = list_first_entry_or_null(&proc->waiting_threads,
1052 struct binder_thread,
1053 waiting_thread_node);
1054
408c68b1 1055 if (thread)
1b77e9dc 1056 list_del_init(&thread->waiting_thread_node);
408c68b1
MC
1057
1058 return thread;
1059}
1060
1061/**
1062 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1063 * @proc: process to wake up a thread in
1064 * @thread: specific thread to wake-up (may be NULL)
1065 * @sync: whether to do a synchronous wake-up
1066 *
1067 * This function wakes up a thread in the @proc process.
1068 * The caller may provide a specific thread to wake-up in
1069 * the @thread parameter. If @thread is NULL, this function
1070 * will wake up threads that have called poll().
1071 *
1072 * Note that for this function to work as expected, callers
1073 * should first call binder_select_thread() to find a thread
1074 * to handle the work (if they don't have a thread already),
1075 * and pass the result into the @thread parameter.
1076 */
1077static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1078 struct binder_thread *thread,
1079 bool sync)
1080{
858b2719 1081 assert_spin_locked(&proc->inner_lock);
408c68b1
MC
1082
1083 if (thread) {
1b77e9dc
MC
1084 if (sync)
1085 wake_up_interruptible_sync(&thread->wait);
1086 else
1087 wake_up_interruptible(&thread->wait);
1088 return;
1089 }
1090
1091 /* Didn't find a thread waiting for proc work; this can happen
1092 * in two scenarios:
1093 * 1. All threads are busy handling transactions
1094 * In that case, one of those threads should call back into
1095 * the kernel driver soon and pick up this work.
1096 * 2. Threads are using the (e)poll interface, in which case
1097 * they may be blocked on the waitqueue without having been
1098 * added to waiting_threads. For this case, we just iterate
1099 * over all threads not handling transaction work, and
1100 * wake them all up. We wake all because we don't know whether
1101 * a thread that called into (e)poll is handling non-binder
1102 * work currently.
1103 */
1104 binder_wakeup_poll_threads_ilocked(proc, sync);
1105}
1106
408c68b1
MC
1107static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1108{
1109 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1110
1111 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1112}
1113
355b0502
GKH
1114static void binder_set_nice(long nice)
1115{
1116 long min_nice;
10f62861 1117
355b0502
GKH
1118 if (can_nice(current, nice)) {
1119 set_user_nice(current, nice);
1120 return;
1121 }
c3643b69 1122 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
355b0502 1123 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
1124 "%d: nice value %ld not allowed use %ld instead\n",
1125 current->pid, nice, min_nice);
355b0502 1126 set_user_nice(current, min_nice);
8698a745 1127 if (min_nice <= MAX_NICE)
355b0502 1128 return;
56b468fc 1129 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
1130}
1131
da0fa9e4
TK
1132static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1133 binder_uintptr_t ptr)
355b0502
GKH
1134{
1135 struct rb_node *n = proc->nodes.rb_node;
1136 struct binder_node *node;
1137
858b2719 1138 assert_spin_locked(&proc->inner_lock);
da0fa9e4 1139
355b0502
GKH
1140 while (n) {
1141 node = rb_entry(n, struct binder_node, rb_node);
1142
1143 if (ptr < node->ptr)
1144 n = n->rb_left;
1145 else if (ptr > node->ptr)
1146 n = n->rb_right;
adc18842
TK
1147 else {
1148 /*
1149 * take an implicit weak reference
1150 * to ensure node stays alive until
1151 * call to binder_put_node()
1152 */
da0fa9e4 1153 binder_inc_node_tmpref_ilocked(node);
355b0502 1154 return node;
adc18842 1155 }
355b0502
GKH
1156 }
1157 return NULL;
1158}
1159
da0fa9e4
TK
1160static struct binder_node *binder_get_node(struct binder_proc *proc,
1161 binder_uintptr_t ptr)
1162{
1163 struct binder_node *node;
1164
1165 binder_inner_proc_lock(proc);
1166 node = binder_get_node_ilocked(proc, ptr);
1167 binder_inner_proc_unlock(proc);
1168 return node;
1169}
1170
1171static struct binder_node *binder_init_node_ilocked(
1172 struct binder_proc *proc,
1173 struct binder_node *new_node,
1174 struct flat_binder_object *fp)
355b0502
GKH
1175{
1176 struct rb_node **p = &proc->nodes.rb_node;
1177 struct rb_node *parent = NULL;
1178 struct binder_node *node;
673068ee
TK
1179 binder_uintptr_t ptr = fp ? fp->binder : 0;
1180 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1181 __u32 flags = fp ? fp->flags : 0;
355b0502 1182
858b2719
MC
1183 assert_spin_locked(&proc->inner_lock);
1184
355b0502 1185 while (*p) {
da0fa9e4 1186
355b0502
GKH
1187 parent = *p;
1188 node = rb_entry(parent, struct binder_node, rb_node);
1189
1190 if (ptr < node->ptr)
1191 p = &(*p)->rb_left;
1192 else if (ptr > node->ptr)
1193 p = &(*p)->rb_right;
da0fa9e4
TK
1194 else {
1195 /*
1196 * A matching node is already in
1197 * the rb tree. Abandon the init
1198 * and return it.
1199 */
1200 binder_inc_node_tmpref_ilocked(node);
1201 return node;
1202 }
355b0502 1203 }
da0fa9e4 1204 node = new_node;
355b0502 1205 binder_stats_created(BINDER_STAT_NODE);
adc18842 1206 node->tmp_refs++;
355b0502
GKH
1207 rb_link_node(&node->rb_node, parent, p);
1208 rb_insert_color(&node->rb_node, &proc->nodes);
656a800a 1209 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1210 node->proc = proc;
1211 node->ptr = ptr;
1212 node->cookie = cookie;
1213 node->work.type = BINDER_WORK_NODE;
673068ee
TK
1214 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1215 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
9630fe88 1216 spin_lock_init(&node->lock);
355b0502
GKH
1217 INIT_LIST_HEAD(&node->work.entry);
1218 INIT_LIST_HEAD(&node->async_todo);
1219 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 1220 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 1221 proc->pid, current->pid, node->debug_id,
da49889d 1222 (u64)node->ptr, (u64)node->cookie);
da0fa9e4
TK
1223
1224 return node;
1225}
1226
1227static struct binder_node *binder_new_node(struct binder_proc *proc,
1228 struct flat_binder_object *fp)
1229{
1230 struct binder_node *node;
1231 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1232
1233 if (!new_node)
1234 return NULL;
1235 binder_inner_proc_lock(proc);
1236 node = binder_init_node_ilocked(proc, new_node, fp);
1237 binder_inner_proc_unlock(proc);
1238 if (node != new_node)
1239 /*
1240 * The node was already added by another thread
1241 */
1242 kfree(new_node);
1243
355b0502
GKH
1244 return node;
1245}
1246
ed29721e 1247static void binder_free_node(struct binder_node *node)
355b0502 1248{
ed29721e
TK
1249 kfree(node);
1250 binder_stats_deleted(BINDER_STAT_NODE);
1251}
1252
673068ee
TK
1253static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1254 int internal,
1255 struct list_head *target_list)
ed29721e 1256{
673068ee
TK
1257 struct binder_proc *proc = node->proc;
1258
858b2719 1259 assert_spin_locked(&node->lock);
673068ee 1260 if (proc)
858b2719 1261 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1262 if (strong) {
1263 if (internal) {
1264 if (target_list == NULL &&
1265 node->internal_strong_refs == 0 &&
342e5c90
MC
1266 !(node->proc &&
1267 node == node->proc->context->binder_context_mgr_node &&
1268 node->has_strong_ref)) {
56b468fc
AS
1269 pr_err("invalid inc strong node for %d\n",
1270 node->debug_id);
355b0502
GKH
1271 return -EINVAL;
1272 }
1273 node->internal_strong_refs++;
1274 } else
1275 node->local_strong_refs++;
1276 if (!node->has_strong_ref && target_list) {
72196393 1277 binder_dequeue_work_ilocked(&node->work);
148ade2c
MC
1278 /*
1279 * Note: this function is the only place where we queue
1280 * directly to a thread->todo without using the
1281 * corresponding binder_enqueue_thread_work() helper
1282 * functions; in this case it's ok to not set the
1283 * process_todo flag, since we know this node work will
1284 * always be followed by other work that starts queue
1285 * processing: in case of synchronous transactions, a
1286 * BR_REPLY or BR_ERROR; in case of oneway
1287 * transactions, a BR_TRANSACTION_COMPLETE.
1288 */
72196393 1289 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1290 }
1291 } else {
1292 if (!internal)
1293 node->local_weak_refs++;
1294 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1295 if (target_list == NULL) {
56b468fc
AS
1296 pr_err("invalid inc weak node for %d\n",
1297 node->debug_id);
355b0502
GKH
1298 return -EINVAL;
1299 }
148ade2c
MC
1300 /*
1301 * See comment above
1302 */
72196393 1303 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1304 }
1305 }
1306 return 0;
1307}
1308
ed29721e
TK
1309static int binder_inc_node(struct binder_node *node, int strong, int internal,
1310 struct list_head *target_list)
1311{
1312 int ret;
1313
673068ee
TK
1314 binder_node_inner_lock(node);
1315 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1316 binder_node_inner_unlock(node);
ed29721e
TK
1317
1318 return ret;
1319}
1320
673068ee
TK
1321static bool binder_dec_node_nilocked(struct binder_node *node,
1322 int strong, int internal)
355b0502 1323{
ed29721e
TK
1324 struct binder_proc *proc = node->proc;
1325
858b2719 1326 assert_spin_locked(&node->lock);
ed29721e 1327 if (proc)
858b2719 1328 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1329 if (strong) {
1330 if (internal)
1331 node->internal_strong_refs--;
1332 else
1333 node->local_strong_refs--;
1334 if (node->local_strong_refs || node->internal_strong_refs)
ed29721e 1335 return false;
355b0502
GKH
1336 } else {
1337 if (!internal)
1338 node->local_weak_refs--;
adc18842
TK
1339 if (node->local_weak_refs || node->tmp_refs ||
1340 !hlist_empty(&node->refs))
ed29721e 1341 return false;
355b0502 1342 }
ed29721e
TK
1343
1344 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
355b0502 1345 if (list_empty(&node->work.entry)) {
72196393 1346 binder_enqueue_work_ilocked(&node->work, &proc->todo);
408c68b1 1347 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
1348 }
1349 } else {
1350 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
adc18842 1351 !node->local_weak_refs && !node->tmp_refs) {
ed29721e 1352 if (proc) {
72196393
TK
1353 binder_dequeue_work_ilocked(&node->work);
1354 rb_erase(&node->rb_node, &proc->nodes);
355b0502 1355 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1356 "refless node %d deleted\n",
355b0502
GKH
1357 node->debug_id);
1358 } else {
72196393 1359 BUG_ON(!list_empty(&node->work.entry));
c44b1231 1360 spin_lock(&binder_dead_nodes_lock);
ed29721e
TK
1361 /*
1362 * tmp_refs could have changed so
1363 * check it again
1364 */
1365 if (node->tmp_refs) {
1366 spin_unlock(&binder_dead_nodes_lock);
1367 return false;
1368 }
355b0502 1369 hlist_del(&node->dead_node);
c44b1231 1370 spin_unlock(&binder_dead_nodes_lock);
355b0502 1371 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1372 "dead node %d deleted\n",
355b0502
GKH
1373 node->debug_id);
1374 }
ed29721e 1375 return true;
355b0502
GKH
1376 }
1377 }
ed29721e
TK
1378 return false;
1379}
355b0502 1380
ed29721e
TK
1381static void binder_dec_node(struct binder_node *node, int strong, int internal)
1382{
1383 bool free_node;
1384
673068ee
TK
1385 binder_node_inner_lock(node);
1386 free_node = binder_dec_node_nilocked(node, strong, internal);
1387 binder_node_inner_unlock(node);
ed29721e
TK
1388 if (free_node)
1389 binder_free_node(node);
1390}
1391
1392static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1393{
1394 /*
1395 * No call to binder_inc_node() is needed since we
1396 * don't need to inform userspace of any changes to
1397 * tmp_refs
1398 */
1399 node->tmp_refs++;
355b0502
GKH
1400}
1401
adc18842
TK
1402/**
1403 * binder_inc_node_tmpref() - take a temporary reference on node
1404 * @node: node to reference
1405 *
1406 * Take reference on node to prevent the node from being freed
ed29721e
TK
1407 * while referenced only by a local variable. The inner lock is
1408 * needed to serialize with the node work on the queue (which
1409 * isn't needed after the node is dead). If the node is dead
1410 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1411 * node->tmp_refs against dead-node-only cases where the node
1412 * lock cannot be acquired (eg traversing the dead node list to
1413 * print nodes)
adc18842
TK
1414 */
1415static void binder_inc_node_tmpref(struct binder_node *node)
1416{
673068ee 1417 binder_node_lock(node);
ed29721e
TK
1418 if (node->proc)
1419 binder_inner_proc_lock(node->proc);
1420 else
1421 spin_lock(&binder_dead_nodes_lock);
1422 binder_inc_node_tmpref_ilocked(node);
1423 if (node->proc)
1424 binder_inner_proc_unlock(node->proc);
1425 else
1426 spin_unlock(&binder_dead_nodes_lock);
673068ee 1427 binder_node_unlock(node);
adc18842
TK
1428}
1429
1430/**
1431 * binder_dec_node_tmpref() - remove a temporary reference on node
1432 * @node: node to reference
1433 *
1434 * Release temporary reference on node taken via binder_inc_node_tmpref()
1435 */
1436static void binder_dec_node_tmpref(struct binder_node *node)
1437{
ed29721e
TK
1438 bool free_node;
1439
673068ee
TK
1440 binder_node_inner_lock(node);
1441 if (!node->proc)
ed29721e 1442 spin_lock(&binder_dead_nodes_lock);
adc18842
TK
1443 node->tmp_refs--;
1444 BUG_ON(node->tmp_refs < 0);
ed29721e
TK
1445 if (!node->proc)
1446 spin_unlock(&binder_dead_nodes_lock);
adc18842
TK
1447 /*
1448 * Call binder_dec_node() to check if all refcounts are 0
1449 * and cleanup is needed. Calling with strong=0 and internal=1
1450 * causes no actual reference to be released in binder_dec_node().
1451 * If that changes, a change is needed here too.
1452 */
673068ee
TK
1453 free_node = binder_dec_node_nilocked(node, 0, 1);
1454 binder_node_inner_unlock(node);
ed29721e
TK
1455 if (free_node)
1456 binder_free_node(node);
adc18842
TK
1457}
1458
1459static void binder_put_node(struct binder_node *node)
1460{
1461 binder_dec_node_tmpref(node);
1462}
355b0502 1463
2c1838dc
TK
1464static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1465 u32 desc, bool need_strong_ref)
355b0502
GKH
1466{
1467 struct rb_node *n = proc->refs_by_desc.rb_node;
1468 struct binder_ref *ref;
1469
1470 while (n) {
1471 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1472
372e3147 1473 if (desc < ref->data.desc) {
355b0502 1474 n = n->rb_left;
372e3147 1475 } else if (desc > ref->data.desc) {
355b0502 1476 n = n->rb_right;
372e3147 1477 } else if (need_strong_ref && !ref->data.strong) {
0a3ffab9
AH
1478 binder_user_error("tried to use weak ref as strong ref\n");
1479 return NULL;
1480 } else {
355b0502 1481 return ref;
0a3ffab9 1482 }
355b0502
GKH
1483 }
1484 return NULL;
1485}
1486
372e3147 1487/**
2c1838dc 1488 * binder_get_ref_for_node_olocked() - get the ref associated with given node
372e3147
TK
1489 * @proc: binder_proc that owns the ref
1490 * @node: binder_node of target
1491 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1492 *
1493 * Look up the ref for the given node and return it if it exists
1494 *
1495 * If it doesn't exist and the caller provides a newly allocated
1496 * ref, initialize the fields of the newly allocated ref and insert
1497 * into the given proc rb_trees and node refs list.
1498 *
1499 * Return: the ref for node. It is possible that another thread
1500 * allocated/initialized the ref first in which case the
1501 * returned ref would be different than the passed-in
1502 * new_ref. new_ref must be kfree'd by the caller in
1503 * this case.
1504 */
2c1838dc
TK
1505static struct binder_ref *binder_get_ref_for_node_olocked(
1506 struct binder_proc *proc,
1507 struct binder_node *node,
1508 struct binder_ref *new_ref)
355b0502 1509{
372e3147 1510 struct binder_context *context = proc->context;
355b0502
GKH
1511 struct rb_node **p = &proc->refs_by_node.rb_node;
1512 struct rb_node *parent = NULL;
372e3147
TK
1513 struct binder_ref *ref;
1514 struct rb_node *n;
355b0502
GKH
1515
1516 while (*p) {
1517 parent = *p;
1518 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1519
1520 if (node < ref->node)
1521 p = &(*p)->rb_left;
1522 else if (node > ref->node)
1523 p = &(*p)->rb_right;
1524 else
1525 return ref;
1526 }
372e3147 1527 if (!new_ref)
355b0502 1528 return NULL;
372e3147 1529
355b0502 1530 binder_stats_created(BINDER_STAT_REF);
372e3147 1531 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1532 new_ref->proc = proc;
1533 new_ref->node = node;
1534 rb_link_node(&new_ref->rb_node_node, parent, p);
1535 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1536
372e3147 1537 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
1538 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1539 ref = rb_entry(n, struct binder_ref, rb_node_desc);
372e3147 1540 if (ref->data.desc > new_ref->data.desc)
355b0502 1541 break;
372e3147 1542 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
1543 }
1544
1545 p = &proc->refs_by_desc.rb_node;
1546 while (*p) {
1547 parent = *p;
1548 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1549
372e3147 1550 if (new_ref->data.desc < ref->data.desc)
355b0502 1551 p = &(*p)->rb_left;
372e3147 1552 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
1553 p = &(*p)->rb_right;
1554 else
1555 BUG();
1556 }
1557 rb_link_node(&new_ref->rb_node_desc, parent, p);
1558 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
673068ee
TK
1559
1560 binder_node_lock(node);
e4cffcf4 1561 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 1562
e4cffcf4
TK
1563 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1564 "%d new ref %d desc %d for node %d\n",
372e3147 1565 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
e4cffcf4 1566 node->debug_id);
673068ee 1567 binder_node_unlock(node);
355b0502
GKH
1568 return new_ref;
1569}
1570
2c1838dc 1571static void binder_cleanup_ref_olocked(struct binder_ref *ref)
355b0502 1572{
ed29721e 1573 bool delete_node = false;
ed29721e 1574
355b0502 1575 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1576 "%d delete ref %d desc %d for node %d\n",
372e3147 1577 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 1578 ref->node->debug_id);
355b0502
GKH
1579
1580 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1581 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
372e3147 1582
673068ee 1583 binder_node_inner_lock(ref->node);
372e3147 1584 if (ref->data.strong)
673068ee 1585 binder_dec_node_nilocked(ref->node, 1, 1);
372e3147 1586
355b0502 1587 hlist_del(&ref->node_entry);
673068ee
TK
1588 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1589 binder_node_inner_unlock(ref->node);
ed29721e
TK
1590 /*
1591 * Clear ref->node unless we want the caller to free the node
1592 */
1593 if (!delete_node) {
1594 /*
1595 * The caller uses ref->node to determine
1596 * whether the node needs to be freed. Clear
1597 * it since the node is still alive.
1598 */
1599 ref->node = NULL;
1600 }
372e3147 1601
355b0502
GKH
1602 if (ref->death) {
1603 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 1604 "%d delete ref %d desc %d has death notification\n",
372e3147
TK
1605 ref->proc->pid, ref->data.debug_id,
1606 ref->data.desc);
72196393 1607 binder_dequeue_work(ref->proc, &ref->death->work);
355b0502
GKH
1608 binder_stats_deleted(BINDER_STAT_DEATH);
1609 }
355b0502
GKH
1610 binder_stats_deleted(BINDER_STAT_REF);
1611}
1612
372e3147 1613/**
2c1838dc 1614 * binder_inc_ref_olocked() - increment the ref for given handle
372e3147
TK
1615 * @ref: ref to be incremented
1616 * @strong: if true, strong increment, else weak
1617 * @target_list: list to queue node work on
1618 *
2c1838dc 1619 * Increment the ref. @ref->proc->outer_lock must be held on entry
372e3147
TK
1620 *
1621 * Return: 0, if successful, else errno
1622 */
2c1838dc
TK
1623static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1624 struct list_head *target_list)
355b0502
GKH
1625{
1626 int ret;
10f62861 1627
355b0502 1628 if (strong) {
372e3147 1629 if (ref->data.strong == 0) {
355b0502
GKH
1630 ret = binder_inc_node(ref->node, 1, 1, target_list);
1631 if (ret)
1632 return ret;
1633 }
372e3147 1634 ref->data.strong++;
355b0502 1635 } else {
372e3147 1636 if (ref->data.weak == 0) {
355b0502
GKH
1637 ret = binder_inc_node(ref->node, 0, 1, target_list);
1638 if (ret)
1639 return ret;
1640 }
372e3147 1641 ref->data.weak++;
355b0502
GKH
1642 }
1643 return 0;
1644}
1645
372e3147
TK
1646/**
1647 * binder_dec_ref() - dec the ref for given handle
1648 * @ref: ref to be decremented
1649 * @strong: if true, strong decrement, else weak
1650 *
1651 * Decrement the ref.
1652 *
372e3147
TK
1653 * Return: true if ref is cleaned up and ready to be freed
1654 */
2c1838dc 1655static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
355b0502
GKH
1656{
1657 if (strong) {
372e3147 1658 if (ref->data.strong == 0) {
56b468fc 1659 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
372e3147
TK
1660 ref->proc->pid, ref->data.debug_id,
1661 ref->data.desc, ref->data.strong,
1662 ref->data.weak);
1663 return false;
355b0502 1664 }
372e3147 1665 ref->data.strong--;
ed29721e
TK
1666 if (ref->data.strong == 0)
1667 binder_dec_node(ref->node, strong, 1);
355b0502 1668 } else {
372e3147 1669 if (ref->data.weak == 0) {
56b468fc 1670 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
372e3147
TK
1671 ref->proc->pid, ref->data.debug_id,
1672 ref->data.desc, ref->data.strong,
1673 ref->data.weak);
1674 return false;
355b0502 1675 }
372e3147 1676 ref->data.weak--;
355b0502 1677 }
372e3147 1678 if (ref->data.strong == 0 && ref->data.weak == 0) {
2c1838dc 1679 binder_cleanup_ref_olocked(ref);
372e3147
TK
1680 return true;
1681 }
1682 return false;
1683}
1684
1685/**
1686 * binder_get_node_from_ref() - get the node from the given proc/desc
1687 * @proc: proc containing the ref
1688 * @desc: the handle associated with the ref
1689 * @need_strong_ref: if true, only return node if ref is strong
1690 * @rdata: the id/refcount data for the ref
1691 *
1692 * Given a proc and ref handle, return the associated binder_node
1693 *
1694 * Return: a binder_node or NULL if not found or not strong when strong required
1695 */
1696static struct binder_node *binder_get_node_from_ref(
1697 struct binder_proc *proc,
1698 u32 desc, bool need_strong_ref,
1699 struct binder_ref_data *rdata)
1700{
1701 struct binder_node *node;
1702 struct binder_ref *ref;
1703
2c1838dc
TK
1704 binder_proc_lock(proc);
1705 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
372e3147
TK
1706 if (!ref)
1707 goto err_no_ref;
1708 node = ref->node;
adc18842
TK
1709 /*
1710 * Take an implicit reference on the node to ensure
1711 * it stays alive until the call to binder_put_node()
1712 */
1713 binder_inc_node_tmpref(node);
372e3147
TK
1714 if (rdata)
1715 *rdata = ref->data;
2c1838dc 1716 binder_proc_unlock(proc);
372e3147
TK
1717
1718 return node;
1719
1720err_no_ref:
2c1838dc 1721 binder_proc_unlock(proc);
372e3147
TK
1722 return NULL;
1723}
1724
1725/**
1726 * binder_free_ref() - free the binder_ref
1727 * @ref: ref to free
1728 *
ed29721e
TK
1729 * Free the binder_ref. Free the binder_node indicated by ref->node
1730 * (if non-NULL) and the binder_ref_death indicated by ref->death.
372e3147
TK
1731 */
1732static void binder_free_ref(struct binder_ref *ref)
1733{
ed29721e
TK
1734 if (ref->node)
1735 binder_free_node(ref->node);
372e3147
TK
1736 kfree(ref->death);
1737 kfree(ref);
1738}
1739
1740/**
1741 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1742 * @proc: proc containing the ref
1743 * @desc: the handle associated with the ref
1744 * @increment: true=inc reference, false=dec reference
1745 * @strong: true=strong reference, false=weak reference
1746 * @rdata: the id/refcount data for the ref
1747 *
1748 * Given a proc and ref handle, increment or decrement the ref
1749 * according to "increment" arg.
1750 *
1751 * Return: 0 if successful, else errno
1752 */
1753static int binder_update_ref_for_handle(struct binder_proc *proc,
1754 uint32_t desc, bool increment, bool strong,
1755 struct binder_ref_data *rdata)
1756{
1757 int ret = 0;
1758 struct binder_ref *ref;
1759 bool delete_ref = false;
1760
2c1838dc
TK
1761 binder_proc_lock(proc);
1762 ref = binder_get_ref_olocked(proc, desc, strong);
372e3147
TK
1763 if (!ref) {
1764 ret = -EINVAL;
1765 goto err_no_ref;
1766 }
1767 if (increment)
2c1838dc 1768 ret = binder_inc_ref_olocked(ref, strong, NULL);
372e3147 1769 else
2c1838dc 1770 delete_ref = binder_dec_ref_olocked(ref, strong);
372e3147
TK
1771
1772 if (rdata)
1773 *rdata = ref->data;
2c1838dc 1774 binder_proc_unlock(proc);
372e3147
TK
1775
1776 if (delete_ref)
1777 binder_free_ref(ref);
1778 return ret;
1779
1780err_no_ref:
2c1838dc 1781 binder_proc_unlock(proc);
372e3147
TK
1782 return ret;
1783}
1784
1785/**
1786 * binder_dec_ref_for_handle() - dec the ref for given handle
1787 * @proc: proc containing the ref
1788 * @desc: the handle associated with the ref
1789 * @strong: true=strong reference, false=weak reference
1790 * @rdata: the id/refcount data for the ref
1791 *
1792 * Just calls binder_update_ref_for_handle() to decrement the ref.
1793 *
1794 * Return: 0 if successful, else errno
1795 */
1796static int binder_dec_ref_for_handle(struct binder_proc *proc,
1797 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1798{
1799 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1800}
1801
1802
1803/**
1804 * binder_inc_ref_for_node() - increment the ref for given proc/node
1805 * @proc: proc containing the ref
1806 * @node: target node
1807 * @strong: true=strong reference, false=weak reference
1808 * @target_list: worklist to use if node is incremented
1809 * @rdata: the id/refcount data for the ref
1810 *
1811 * Given a proc and node, increment the ref. Create the ref if it
1812 * doesn't already exist
1813 *
1814 * Return: 0 if successful, else errno
1815 */
1816static int binder_inc_ref_for_node(struct binder_proc *proc,
1817 struct binder_node *node,
1818 bool strong,
1819 struct list_head *target_list,
1820 struct binder_ref_data *rdata)
1821{
1822 struct binder_ref *ref;
1823 struct binder_ref *new_ref = NULL;
1824 int ret = 0;
1825
2c1838dc
TK
1826 binder_proc_lock(proc);
1827 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
372e3147 1828 if (!ref) {
2c1838dc 1829 binder_proc_unlock(proc);
372e3147
TK
1830 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1831 if (!new_ref)
1832 return -ENOMEM;
2c1838dc
TK
1833 binder_proc_lock(proc);
1834 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
372e3147 1835 }
2c1838dc 1836 ret = binder_inc_ref_olocked(ref, strong, target_list);
372e3147 1837 *rdata = ref->data;
2c1838dc 1838 binder_proc_unlock(proc);
372e3147
TK
1839 if (new_ref && ref != new_ref)
1840 /*
1841 * Another thread created the ref first so
1842 * free the one we allocated
1843 */
1844 kfree(new_ref);
1845 return ret;
355b0502
GKH
1846}
1847
0b89d69a
MC
1848static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1849 struct binder_transaction *t)
355b0502 1850{
b6d282ce 1851 BUG_ON(!target_thread);
858b2719 1852 assert_spin_locked(&target_thread->proc->inner_lock);
b6d282ce
TK
1853 BUG_ON(target_thread->transaction_stack != t);
1854 BUG_ON(target_thread->transaction_stack->from != target_thread);
1855 target_thread->transaction_stack =
1856 target_thread->transaction_stack->from_parent;
1857 t->from = NULL;
1858}
1859
7a4408c6
TK
1860/**
1861 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1862 * @thread: thread to decrement
1863 *
1864 * A thread needs to be kept alive while being used to create or
1865 * handle a transaction. binder_get_txn_from() is used to safely
1866 * extract t->from from a binder_transaction and keep the thread
1867 * indicated by t->from from being freed. When done with that
1868 * binder_thread, this function is called to decrement the
1869 * tmp_ref and free if appropriate (thread has been released
1870 * and no transaction being processed by the driver)
1871 */
1872static void binder_thread_dec_tmpref(struct binder_thread *thread)
1873{
1874 /*
1875 * atomic is used to protect the counter value while
1876 * it cannot reach zero or thread->is_dead is false
7a4408c6 1877 */
7bd7b0e6 1878 binder_inner_proc_lock(thread->proc);
7a4408c6
TK
1879 atomic_dec(&thread->tmp_ref);
1880 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
7bd7b0e6 1881 binder_inner_proc_unlock(thread->proc);
7a4408c6
TK
1882 binder_free_thread(thread);
1883 return;
1884 }
7bd7b0e6 1885 binder_inner_proc_unlock(thread->proc);
7a4408c6
TK
1886}
1887
1888/**
1889 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1890 * @proc: proc to decrement
1891 *
1892 * A binder_proc needs to be kept alive while being used to create or
1893 * handle a transaction. proc->tmp_ref is incremented when
1894 * creating a new transaction or the binder_proc is currently in-use
1895 * by threads that are being released. When done with the binder_proc,
1896 * this function is called to decrement the counter and free the
1897 * proc if appropriate (proc has been released, all threads have
1898 * been released and not currenly in-use to process a transaction).
1899 */
1900static void binder_proc_dec_tmpref(struct binder_proc *proc)
1901{
7bd7b0e6 1902 binder_inner_proc_lock(proc);
7a4408c6
TK
1903 proc->tmp_ref--;
1904 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1905 !proc->tmp_ref) {
7bd7b0e6 1906 binder_inner_proc_unlock(proc);
7a4408c6
TK
1907 binder_free_proc(proc);
1908 return;
1909 }
7bd7b0e6 1910 binder_inner_proc_unlock(proc);
7a4408c6
TK
1911}
1912
1913/**
1914 * binder_get_txn_from() - safely extract the "from" thread in transaction
1915 * @t: binder transaction for t->from
1916 *
1917 * Atomically return the "from" thread and increment the tmp_ref
1918 * count for the thread to ensure it stays alive until
1919 * binder_thread_dec_tmpref() is called.
1920 *
1921 * Return: the value of t->from
1922 */
1923static struct binder_thread *binder_get_txn_from(
1924 struct binder_transaction *t)
1925{
1926 struct binder_thread *from;
1927
1928 spin_lock(&t->lock);
1929 from = t->from;
1930 if (from)
1931 atomic_inc(&from->tmp_ref);
1932 spin_unlock(&t->lock);
1933 return from;
1934}
1935
0b89d69a
MC
1936/**
1937 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1938 * @t: binder transaction for t->from
1939 *
1940 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1941 * to guarantee that the thread cannot be released while operating on it.
1942 * The caller must call binder_inner_proc_unlock() to release the inner lock
1943 * as well as call binder_dec_thread_txn() to release the reference.
1944 *
1945 * Return: the value of t->from
1946 */
1947static struct binder_thread *binder_get_txn_from_and_acq_inner(
1948 struct binder_transaction *t)
1949{
1950 struct binder_thread *from;
1951
1952 from = binder_get_txn_from(t);
1953 if (!from)
1954 return NULL;
1955 binder_inner_proc_lock(from->proc);
1956 if (t->from) {
1957 BUG_ON(from != t->from);
1958 return from;
1959 }
1960 binder_inner_proc_unlock(from->proc);
1961 binder_thread_dec_tmpref(from);
1962 return NULL;
1963}
1964
b6d282ce
TK
1965static void binder_free_transaction(struct binder_transaction *t)
1966{
355b0502
GKH
1967 if (t->buffer)
1968 t->buffer->transaction = NULL;
1969 kfree(t);
1970 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1971}
1972
1973static void binder_send_failed_reply(struct binder_transaction *t,
1974 uint32_t error_code)
1975{
1976 struct binder_thread *target_thread;
d4ec15e1 1977 struct binder_transaction *next;
10f62861 1978
355b0502
GKH
1979 BUG_ON(t->flags & TF_ONE_WAY);
1980 while (1) {
0b89d69a 1981 target_thread = binder_get_txn_from_and_acq_inner(t);
355b0502 1982 if (target_thread) {
26549d17
TK
1983 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1984 "send failed reply for transaction %d to %d:%d\n",
1985 t->debug_id,
1986 target_thread->proc->pid,
1987 target_thread->pid);
1988
0b89d69a 1989 binder_pop_transaction_ilocked(target_thread, t);
26549d17
TK
1990 if (target_thread->reply_error.cmd == BR_OK) {
1991 target_thread->reply_error.cmd = error_code;
148ade2c
MC
1992 binder_enqueue_thread_work_ilocked(
1993 target_thread,
1994 &target_thread->reply_error.work);
355b0502
GKH
1995 wake_up_interruptible(&target_thread->wait);
1996 } else {
26549d17
TK
1997 WARN(1, "Unexpected reply error: %u\n",
1998 target_thread->reply_error.cmd);
355b0502 1999 }
0b89d69a 2000 binder_inner_proc_unlock(target_thread->proc);
7a4408c6 2001 binder_thread_dec_tmpref(target_thread);
26549d17 2002 binder_free_transaction(t);
355b0502 2003 return;
d4ec15e1
LT
2004 }
2005 next = t->from_parent;
2006
2007 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2008 "send failed reply for transaction %d, target dead\n",
2009 t->debug_id);
2010
b6d282ce 2011 binder_free_transaction(t);
d4ec15e1 2012 if (next == NULL) {
355b0502 2013 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
2014 "reply failed, no target thread at root\n");
2015 return;
355b0502 2016 }
d4ec15e1
LT
2017 t = next;
2018 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2019 "reply failed, no target thread -- retry %d\n",
2020 t->debug_id);
355b0502
GKH
2021 }
2022}
2023
fb2c4452
MC
2024/**
2025 * binder_cleanup_transaction() - cleans up undelivered transaction
2026 * @t: transaction that needs to be cleaned up
2027 * @reason: reason the transaction wasn't delivered
2028 * @error_code: error to return to caller (if synchronous call)
2029 */
2030static void binder_cleanup_transaction(struct binder_transaction *t,
2031 const char *reason,
2032 uint32_t error_code)
2033{
2034 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2035 binder_send_failed_reply(t, error_code);
2036 } else {
2037 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2038 "undelivered transaction %d, %s\n",
2039 t->debug_id, reason);
2040 binder_free_transaction(t);
2041 }
2042}
2043
feba3900
MC
2044/**
2045 * binder_validate_object() - checks for a valid metadata object in a buffer.
2046 * @buffer: binder_buffer that we're parsing.
2047 * @offset: offset in the buffer at which to validate an object.
2048 *
2049 * Return: If there's a valid metadata object at @offset in @buffer, the
2050 * size of that object. Otherwise, it returns zero.
2051 */
2052static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2053{
2054 /* Check if we can read a header first */
2055 struct binder_object_header *hdr;
2056 size_t object_size = 0;
2057
2058 if (offset > buffer->data_size - sizeof(*hdr) ||
2059 buffer->data_size < sizeof(*hdr) ||
2060 !IS_ALIGNED(offset, sizeof(u32)))
2061 return 0;
2062
2063 /* Ok, now see if we can read a complete object. */
2064 hdr = (struct binder_object_header *)(buffer->data + offset);
2065 switch (hdr->type) {
2066 case BINDER_TYPE_BINDER:
2067 case BINDER_TYPE_WEAK_BINDER:
2068 case BINDER_TYPE_HANDLE:
2069 case BINDER_TYPE_WEAK_HANDLE:
2070 object_size = sizeof(struct flat_binder_object);
2071 break;
2072 case BINDER_TYPE_FD:
2073 object_size = sizeof(struct binder_fd_object);
2074 break;
7980240b
MC
2075 case BINDER_TYPE_PTR:
2076 object_size = sizeof(struct binder_buffer_object);
2077 break;
def95c73
MC
2078 case BINDER_TYPE_FDA:
2079 object_size = sizeof(struct binder_fd_array_object);
2080 break;
feba3900
MC
2081 default:
2082 return 0;
2083 }
2084 if (offset <= buffer->data_size - object_size &&
2085 buffer->data_size >= object_size)
2086 return object_size;
2087 else
2088 return 0;
2089}
2090
7980240b
MC
2091/**
2092 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2093 * @b: binder_buffer containing the object
2094 * @index: index in offset array at which the binder_buffer_object is
2095 * located
2096 * @start: points to the start of the offset array
2097 * @num_valid: the number of valid offsets in the offset array
2098 *
2099 * Return: If @index is within the valid range of the offset array
2100 * described by @start and @num_valid, and if there's a valid
2101 * binder_buffer_object at the offset found in index @index
2102 * of the offset array, that object is returned. Otherwise,
2103 * %NULL is returned.
2104 * Note that the offset found in index @index itself is not
2105 * verified; this function assumes that @num_valid elements
2106 * from @start were previously verified to have valid offsets.
2107 */
2108static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2109 binder_size_t index,
2110 binder_size_t *start,
2111 binder_size_t num_valid)
2112{
2113 struct binder_buffer_object *buffer_obj;
2114 binder_size_t *offp;
2115
2116 if (index >= num_valid)
2117 return NULL;
2118
2119 offp = start + index;
2120 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2121 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2122 return NULL;
2123
2124 return buffer_obj;
2125}
2126
2127/**
2128 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2129 * @b: transaction buffer
2130 * @objects_start start of objects buffer
2131 * @buffer: binder_buffer_object in which to fix up
2132 * @offset: start offset in @buffer to fix up
2133 * @last_obj: last binder_buffer_object that we fixed up in
2134 * @last_min_offset: minimum fixup offset in @last_obj
2135 *
2136 * Return: %true if a fixup in buffer @buffer at offset @offset is
2137 * allowed.
2138 *
2139 * For safety reasons, we only allow fixups inside a buffer to happen
2140 * at increasing offsets; additionally, we only allow fixup on the last
2141 * buffer object that was verified, or one of its parents.
2142 *
2143 * Example of what is allowed:
2144 *
2145 * A
2146 * B (parent = A, offset = 0)
2147 * C (parent = A, offset = 16)
2148 * D (parent = C, offset = 0)
2149 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2150 *
2151 * Examples of what is not allowed:
2152 *
2153 * Decreasing offsets within the same parent:
2154 * A
2155 * C (parent = A, offset = 16)
2156 * B (parent = A, offset = 0) // decreasing offset within A
2157 *
2158 * Referring to a parent that wasn't the last object or any of its parents:
2159 * A
2160 * B (parent = A, offset = 0)
2161 * C (parent = A, offset = 0)
2162 * C (parent = A, offset = 16)
2163 * D (parent = B, offset = 0) // B is not A or any of A's parents
2164 */
2165static bool binder_validate_fixup(struct binder_buffer *b,
2166 binder_size_t *objects_start,
2167 struct binder_buffer_object *buffer,
2168 binder_size_t fixup_offset,
2169 struct binder_buffer_object *last_obj,
2170 binder_size_t last_min_offset)
2171{
2172 if (!last_obj) {
2173 /* Nothing to fix up in */
2174 return false;
2175 }
2176
2177 while (last_obj != buffer) {
2178 /*
2179 * Safe to retrieve the parent of last_obj, since it
2180 * was already previously verified by the driver.
2181 */
2182 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2183 return false;
2184 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2185 last_obj = (struct binder_buffer_object *)
2186 (b->data + *(objects_start + last_obj->parent));
2187 }
2188 return (fixup_offset >= last_min_offset);
2189}
2190
355b0502
GKH
2191static void binder_transaction_buffer_release(struct binder_proc *proc,
2192 struct binder_buffer *buffer,
da49889d 2193 binder_size_t *failed_at)
355b0502 2194{
7980240b 2195 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
2196 int debug_id = buffer->debug_id;
2197
2198 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 2199 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
2200 proc->pid, buffer->debug_id,
2201 buffer->data_size, buffer->offsets_size, failed_at);
2202
2203 if (buffer->target_node)
2204 binder_dec_node(buffer->target_node, 1, 0);
2205
7980240b
MC
2206 off_start = (binder_size_t *)(buffer->data +
2207 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
2208 if (failed_at)
2209 off_end = failed_at;
2210 else
7980240b
MC
2211 off_end = (void *)off_start + buffer->offsets_size;
2212 for (offp = off_start; offp < off_end; offp++) {
feba3900
MC
2213 struct binder_object_header *hdr;
2214 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 2215
feba3900
MC
2216 if (object_size == 0) {
2217 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 2218 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
2219 continue;
2220 }
feba3900
MC
2221 hdr = (struct binder_object_header *)(buffer->data + *offp);
2222 switch (hdr->type) {
355b0502
GKH
2223 case BINDER_TYPE_BINDER:
2224 case BINDER_TYPE_WEAK_BINDER: {
feba3900
MC
2225 struct flat_binder_object *fp;
2226 struct binder_node *node;
10f62861 2227
feba3900
MC
2228 fp = to_flat_binder_object(hdr);
2229 node = binder_get_node(proc, fp->binder);
355b0502 2230 if (node == NULL) {
da49889d
AH
2231 pr_err("transaction release %d bad node %016llx\n",
2232 debug_id, (u64)fp->binder);
355b0502
GKH
2233 break;
2234 }
2235 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
2236 " node %d u%016llx\n",
2237 node->debug_id, (u64)node->ptr);
feba3900
MC
2238 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2239 0);
adc18842 2240 binder_put_node(node);
355b0502
GKH
2241 } break;
2242 case BINDER_TYPE_HANDLE:
2243 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 2244 struct flat_binder_object *fp;
372e3147
TK
2245 struct binder_ref_data rdata;
2246 int ret;
0a3ffab9 2247
feba3900 2248 fp = to_flat_binder_object(hdr);
372e3147
TK
2249 ret = binder_dec_ref_for_handle(proc, fp->handle,
2250 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2251
2252 if (ret) {
2253 pr_err("transaction release %d bad handle %d, ret = %d\n",
2254 debug_id, fp->handle, ret);
355b0502
GKH
2255 break;
2256 }
2257 binder_debug(BINDER_DEBUG_TRANSACTION,
372e3147
TK
2258 " ref %d desc %d\n",
2259 rdata.debug_id, rdata.desc);
355b0502
GKH
2260 } break;
2261
feba3900
MC
2262 case BINDER_TYPE_FD: {
2263 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2264
355b0502 2265 binder_debug(BINDER_DEBUG_TRANSACTION,
feba3900 2266 " fd %d\n", fp->fd);
355b0502 2267 if (failed_at)
feba3900
MC
2268 task_close_fd(proc, fp->fd);
2269 } break;
7980240b
MC
2270 case BINDER_TYPE_PTR:
2271 /*
2272 * Nothing to do here, this will get cleaned up when the
2273 * transaction buffer gets freed
2274 */
2275 break;
def95c73
MC
2276 case BINDER_TYPE_FDA: {
2277 struct binder_fd_array_object *fda;
2278 struct binder_buffer_object *parent;
2279 uintptr_t parent_buffer;
2280 u32 *fd_array;
2281 size_t fd_index;
2282 binder_size_t fd_buf_size;
2283
2284 fda = to_binder_fd_array_object(hdr);
2285 parent = binder_validate_ptr(buffer, fda->parent,
2286 off_start,
2287 offp - off_start);
2288 if (!parent) {
f7f84fde 2289 pr_err("transaction release %d bad parent offset\n",
def95c73
MC
2290 debug_id);
2291 continue;
2292 }
2293 /*
2294 * Since the parent was already fixed up, convert it
2295 * back to kernel address space to access it
2296 */
2297 parent_buffer = parent->buffer -
19c98724
TK
2298 binder_alloc_get_user_buffer_offset(
2299 &proc->alloc);
def95c73
MC
2300
2301 fd_buf_size = sizeof(u32) * fda->num_fds;
2302 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2303 pr_err("transaction release %d invalid number of fds (%lld)\n",
2304 debug_id, (u64)fda->num_fds);
2305 continue;
2306 }
2307 if (fd_buf_size > parent->length ||
2308 fda->parent_offset > parent->length - fd_buf_size) {
2309 /* No space for all file descriptors here. */
2310 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2311 debug_id, (u64)fda->num_fds);
2312 continue;
2313 }
1c363eae 2314 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
def95c73
MC
2315 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2316 task_close_fd(proc, fd_array[fd_index]);
2317 } break;
355b0502 2318 default:
64dcfe6b 2319 pr_err("transaction release %d bad object type %x\n",
feba3900 2320 debug_id, hdr->type);
355b0502
GKH
2321 break;
2322 }
2323 }
2324}
2325
a056af42
MC
2326static int binder_translate_binder(struct flat_binder_object *fp,
2327 struct binder_transaction *t,
2328 struct binder_thread *thread)
2329{
2330 struct binder_node *node;
a056af42
MC
2331 struct binder_proc *proc = thread->proc;
2332 struct binder_proc *target_proc = t->to_proc;
372e3147 2333 struct binder_ref_data rdata;
adc18842 2334 int ret = 0;
a056af42
MC
2335
2336 node = binder_get_node(proc, fp->binder);
2337 if (!node) {
673068ee 2338 node = binder_new_node(proc, fp);
a056af42
MC
2339 if (!node)
2340 return -ENOMEM;
a056af42
MC
2341 }
2342 if (fp->cookie != node->cookie) {
2343 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2344 proc->pid, thread->pid, (u64)fp->binder,
2345 node->debug_id, (u64)fp->cookie,
2346 (u64)node->cookie);
adc18842
TK
2347 ret = -EINVAL;
2348 goto done;
2349 }
2350 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2351 ret = -EPERM;
2352 goto done;
a056af42 2353 }
a056af42 2354
372e3147
TK
2355 ret = binder_inc_ref_for_node(target_proc, node,
2356 fp->hdr.type == BINDER_TYPE_BINDER,
2357 &thread->todo, &rdata);
2358 if (ret)
adc18842 2359 goto done;
a056af42
MC
2360
2361 if (fp->hdr.type == BINDER_TYPE_BINDER)
2362 fp->hdr.type = BINDER_TYPE_HANDLE;
2363 else
2364 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2365 fp->binder = 0;
372e3147 2366 fp->handle = rdata.desc;
a056af42 2367 fp->cookie = 0;
a056af42 2368
372e3147 2369 trace_binder_transaction_node_to_ref(t, node, &rdata);
a056af42
MC
2370 binder_debug(BINDER_DEBUG_TRANSACTION,
2371 " node %d u%016llx -> ref %d desc %d\n",
2372 node->debug_id, (u64)node->ptr,
372e3147 2373 rdata.debug_id, rdata.desc);
adc18842
TK
2374done:
2375 binder_put_node(node);
2376 return ret;
a056af42
MC
2377}
2378
2379static int binder_translate_handle(struct flat_binder_object *fp,
2380 struct binder_transaction *t,
2381 struct binder_thread *thread)
2382{
a056af42
MC
2383 struct binder_proc *proc = thread->proc;
2384 struct binder_proc *target_proc = t->to_proc;
372e3147
TK
2385 struct binder_node *node;
2386 struct binder_ref_data src_rdata;
adc18842 2387 int ret = 0;
a056af42 2388
372e3147
TK
2389 node = binder_get_node_from_ref(proc, fp->handle,
2390 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2391 if (!node) {
a056af42
MC
2392 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2393 proc->pid, thread->pid, fp->handle);
2394 return -EINVAL;
2395 }
adc18842
TK
2396 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2397 ret = -EPERM;
2398 goto done;
2399 }
a056af42 2400
673068ee 2401 binder_node_lock(node);
372e3147 2402 if (node->proc == target_proc) {
a056af42
MC
2403 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2404 fp->hdr.type = BINDER_TYPE_BINDER;
2405 else
2406 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
372e3147
TK
2407 fp->binder = node->ptr;
2408 fp->cookie = node->cookie;
673068ee
TK
2409 if (node->proc)
2410 binder_inner_proc_lock(node->proc);
2411 binder_inc_node_nilocked(node,
2412 fp->hdr.type == BINDER_TYPE_BINDER,
2413 0, NULL);
2414 if (node->proc)
2415 binder_inner_proc_unlock(node->proc);
372e3147 2416 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
a056af42
MC
2417 binder_debug(BINDER_DEBUG_TRANSACTION,
2418 " ref %d desc %d -> node %d u%016llx\n",
372e3147
TK
2419 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2420 (u64)node->ptr);
673068ee 2421 binder_node_unlock(node);
a056af42 2422 } else {
372e3147 2423 struct binder_ref_data dest_rdata;
a056af42 2424
673068ee 2425 binder_node_unlock(node);
372e3147
TK
2426 ret = binder_inc_ref_for_node(target_proc, node,
2427 fp->hdr.type == BINDER_TYPE_HANDLE,
2428 NULL, &dest_rdata);
2429 if (ret)
adc18842 2430 goto done;
a056af42
MC
2431
2432 fp->binder = 0;
372e3147 2433 fp->handle = dest_rdata.desc;
a056af42 2434 fp->cookie = 0;
372e3147
TK
2435 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2436 &dest_rdata);
a056af42
MC
2437 binder_debug(BINDER_DEBUG_TRANSACTION,
2438 " ref %d desc %d -> ref %d desc %d (node %d)\n",
372e3147
TK
2439 src_rdata.debug_id, src_rdata.desc,
2440 dest_rdata.debug_id, dest_rdata.desc,
2441 node->debug_id);
a056af42 2442 }
adc18842
TK
2443done:
2444 binder_put_node(node);
2445 return ret;
a056af42
MC
2446}
2447
2448static int binder_translate_fd(int fd,
2449 struct binder_transaction *t,
2450 struct binder_thread *thread,
2451 struct binder_transaction *in_reply_to)
2452{
2453 struct binder_proc *proc = thread->proc;
2454 struct binder_proc *target_proc = t->to_proc;
2455 int target_fd;
2456 struct file *file;
2457 int ret;
2458 bool target_allows_fd;
2459
2460 if (in_reply_to)
2461 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2462 else
2463 target_allows_fd = t->buffer->target_node->accept_fds;
2464 if (!target_allows_fd) {
2465 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2466 proc->pid, thread->pid,
2467 in_reply_to ? "reply" : "transaction",
2468 fd);
2469 ret = -EPERM;
2470 goto err_fd_not_accepted;
2471 }
2472
2473 file = fget(fd);
2474 if (!file) {
2475 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2476 proc->pid, thread->pid, fd);
2477 ret = -EBADF;
2478 goto err_fget;
2479 }
2480 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2481 if (ret < 0) {
2482 ret = -EPERM;
2483 goto err_security;
2484 }
2485
2486 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2487 if (target_fd < 0) {
2488 ret = -ENOMEM;
2489 goto err_get_unused_fd;
2490 }
2491 task_fd_install(target_proc, target_fd, file);
2492 trace_binder_transaction_fd(t, fd, target_fd);
2493 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2494 fd, target_fd);
2495
2496 return target_fd;
2497
2498err_get_unused_fd:
2499err_security:
2500 fput(file);
2501err_fget:
2502err_fd_not_accepted:
2503 return ret;
2504}
2505
def95c73
MC
2506static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2507 struct binder_buffer_object *parent,
2508 struct binder_transaction *t,
2509 struct binder_thread *thread,
2510 struct binder_transaction *in_reply_to)
2511{
2512 binder_size_t fdi, fd_buf_size, num_installed_fds;
2513 int target_fd;
2514 uintptr_t parent_buffer;
2515 u32 *fd_array;
2516 struct binder_proc *proc = thread->proc;
2517 struct binder_proc *target_proc = t->to_proc;
2518
2519 fd_buf_size = sizeof(u32) * fda->num_fds;
2520 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2521 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2522 proc->pid, thread->pid, (u64)fda->num_fds);
2523 return -EINVAL;
2524 }
2525 if (fd_buf_size > parent->length ||
2526 fda->parent_offset > parent->length - fd_buf_size) {
2527 /* No space for all file descriptors here. */
2528 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2529 proc->pid, thread->pid, (u64)fda->num_fds);
2530 return -EINVAL;
2531 }
2532 /*
2533 * Since the parent was already fixed up, convert it
2534 * back to the kernel address space to access it
2535 */
19c98724
TK
2536 parent_buffer = parent->buffer -
2537 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
1c363eae 2538 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
def95c73
MC
2539 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2540 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2541 proc->pid, thread->pid);
2542 return -EINVAL;
2543 }
2544 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2545 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2546 in_reply_to);
2547 if (target_fd < 0)
2548 goto err_translate_fd_failed;
2549 fd_array[fdi] = target_fd;
2550 }
2551 return 0;
2552
2553err_translate_fd_failed:
2554 /*
2555 * Failed to allocate fd or security error, free fds
2556 * installed so far.
2557 */
2558 num_installed_fds = fdi;
2559 for (fdi = 0; fdi < num_installed_fds; fdi++)
2560 task_close_fd(target_proc, fd_array[fdi]);
2561 return target_fd;
2562}
2563
7980240b
MC
2564static int binder_fixup_parent(struct binder_transaction *t,
2565 struct binder_thread *thread,
2566 struct binder_buffer_object *bp,
2567 binder_size_t *off_start,
2568 binder_size_t num_valid,
2569 struct binder_buffer_object *last_fixup_obj,
2570 binder_size_t last_fixup_min_off)
2571{
2572 struct binder_buffer_object *parent;
2573 u8 *parent_buffer;
2574 struct binder_buffer *b = t->buffer;
2575 struct binder_proc *proc = thread->proc;
2576 struct binder_proc *target_proc = t->to_proc;
2577
2578 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2579 return 0;
2580
2581 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2582 if (!parent) {
2583 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2584 proc->pid, thread->pid);
2585 return -EINVAL;
2586 }
2587
2588 if (!binder_validate_fixup(b, off_start,
2589 parent, bp->parent_offset,
2590 last_fixup_obj,
2591 last_fixup_min_off)) {
2592 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2593 proc->pid, thread->pid);
2594 return -EINVAL;
2595 }
2596
2597 if (parent->length < sizeof(binder_uintptr_t) ||
2598 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2599 /* No space for a pointer here! */
2600 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2601 proc->pid, thread->pid);
2602 return -EINVAL;
2603 }
1c363eae 2604 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
19c98724
TK
2605 binder_alloc_get_user_buffer_offset(
2606 &target_proc->alloc));
7980240b
MC
2607 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2608
2609 return 0;
2610}
2611
408c68b1
MC
2612/**
2613 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2614 * @t: transaction to send
2615 * @proc: process to send the transaction to
2616 * @thread: thread in @proc to send the transaction to (may be NULL)
2617 *
2618 * This function queues a transaction to the specified process. It will try
2619 * to find a thread in the target process to handle the transaction and
2620 * wake it up. If no thread is found, the work is queued to the proc
2621 * waitqueue.
2622 *
2623 * If the @thread parameter is not NULL, the transaction is always queued
2624 * to the waitlist of that specific thread.
2625 *
2626 * Return: true if the transactions was successfully queued
2627 * false if the target process or thread is dead
2628 */
2629static bool binder_proc_transaction(struct binder_transaction *t,
2630 struct binder_proc *proc,
2631 struct binder_thread *thread)
2632{
408c68b1
MC
2633 struct binder_node *node = t->buffer->target_node;
2634 bool oneway = !!(t->flags & TF_ONE_WAY);
148ade2c 2635 bool pending_async = false;
408c68b1
MC
2636
2637 BUG_ON(!node);
2638 binder_node_lock(node);
2639 if (oneway) {
2640 BUG_ON(thread);
2641 if (node->has_async_transaction) {
148ade2c 2642 pending_async = true;
408c68b1
MC
2643 } else {
2644 node->has_async_transaction = 1;
2645 }
2646 }
2647
2648 binder_inner_proc_lock(proc);
2649
2650 if (proc->is_dead || (thread && thread->is_dead)) {
2651 binder_inner_proc_unlock(proc);
2652 binder_node_unlock(node);
2653 return false;
2654 }
2655
148ade2c 2656 if (!thread && !pending_async)
408c68b1
MC
2657 thread = binder_select_thread_ilocked(proc);
2658
2659 if (thread)
148ade2c
MC
2660 binder_enqueue_thread_work_ilocked(thread, &t->work);
2661 else if (!pending_async)
2662 binder_enqueue_work_ilocked(&t->work, &proc->todo);
408c68b1 2663 else
148ade2c 2664 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
408c68b1 2665
148ade2c 2666 if (!pending_async)
408c68b1
MC
2667 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2668
2669 binder_inner_proc_unlock(proc);
2670 binder_node_unlock(node);
2671
2672 return true;
2673}
2674
512cf465
TK
2675/**
2676 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2677 * @node: struct binder_node for which to get refs
2678 * @proc: returns @node->proc if valid
2679 * @error: if no @proc then returns BR_DEAD_REPLY
2680 *
2681 * User-space normally keeps the node alive when creating a transaction
2682 * since it has a reference to the target. The local strong ref keeps it
2683 * alive if the sending process dies before the target process processes
2684 * the transaction. If the source process is malicious or has a reference
2685 * counting bug, relying on the local strong ref can fail.
2686 *
2687 * Since user-space can cause the local strong ref to go away, we also take
2688 * a tmpref on the node to ensure it survives while we are constructing
2689 * the transaction. We also need a tmpref on the proc while we are
2690 * constructing the transaction, so we take that here as well.
2691 *
2692 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2693 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2694 * target proc has died, @error is set to BR_DEAD_REPLY
2695 */
2696static struct binder_node *binder_get_node_refs_for_txn(
2697 struct binder_node *node,
2698 struct binder_proc **procp,
2699 uint32_t *error)
2700{
2701 struct binder_node *target_node = NULL;
2702
2703 binder_node_inner_lock(node);
2704 if (node->proc) {
2705 target_node = node;
2706 binder_inc_node_nilocked(node, 1, 0, NULL);
2707 binder_inc_node_tmpref_ilocked(node);
2708 node->proc->tmp_ref++;
2709 *procp = node->proc;
2710 } else
2711 *error = BR_DEAD_REPLY;
2712 binder_node_inner_unlock(node);
2713
2714 return target_node;
2715}
2716
355b0502
GKH
2717static void binder_transaction(struct binder_proc *proc,
2718 struct binder_thread *thread,
4bfac80a
MC
2719 struct binder_transaction_data *tr, int reply,
2720 binder_size_t extra_buffers_size)
355b0502 2721{
a056af42 2722 int ret;
355b0502
GKH
2723 struct binder_transaction *t;
2724 struct binder_work *tcomplete;
7980240b 2725 binder_size_t *offp, *off_end, *off_start;
212265e5 2726 binder_size_t off_min;
7980240b 2727 u8 *sg_bufp, *sg_buf_end;
7a4408c6 2728 struct binder_proc *target_proc = NULL;
355b0502
GKH
2729 struct binder_thread *target_thread = NULL;
2730 struct binder_node *target_node = NULL;
355b0502
GKH
2731 struct binder_transaction *in_reply_to = NULL;
2732 struct binder_transaction_log_entry *e;
57ada2fb
TK
2733 uint32_t return_error = 0;
2734 uint32_t return_error_param = 0;
2735 uint32_t return_error_line = 0;
7980240b
MC
2736 struct binder_buffer_object *last_fixup_obj = NULL;
2737 binder_size_t last_fixup_min_off = 0;
342e5c90 2738 struct binder_context *context = proc->context;
d99c7333 2739 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
2740
2741 e = binder_transaction_log_add(&binder_transaction_log);
d99c7333 2742 e->debug_id = t_debug_id;
355b0502
GKH
2743 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2744 e->from_proc = proc->pid;
2745 e->from_thread = thread->pid;
2746 e->target_handle = tr->target.handle;
2747 e->data_size = tr->data_size;
2748 e->offsets_size = tr->offsets_size;
14db3181 2749 e->context_name = proc->context->name;
355b0502
GKH
2750
2751 if (reply) {
0b89d69a 2752 binder_inner_proc_lock(proc);
355b0502
GKH
2753 in_reply_to = thread->transaction_stack;
2754 if (in_reply_to == NULL) {
0b89d69a 2755 binder_inner_proc_unlock(proc);
56b468fc 2756 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
2757 proc->pid, thread->pid);
2758 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2759 return_error_param = -EPROTO;
2760 return_error_line = __LINE__;
355b0502
GKH
2761 goto err_empty_call_stack;
2762 }
355b0502 2763 if (in_reply_to->to_thread != thread) {
7a4408c6 2764 spin_lock(&in_reply_to->lock);
56b468fc 2765 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2766 proc->pid, thread->pid, in_reply_to->debug_id,
2767 in_reply_to->to_proc ?
2768 in_reply_to->to_proc->pid : 0,
2769 in_reply_to->to_thread ?
2770 in_reply_to->to_thread->pid : 0);
7a4408c6 2771 spin_unlock(&in_reply_to->lock);
0b89d69a 2772 binder_inner_proc_unlock(proc);
355b0502 2773 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2774 return_error_param = -EPROTO;
2775 return_error_line = __LINE__;
355b0502
GKH
2776 in_reply_to = NULL;
2777 goto err_bad_call_stack;
2778 }
2779 thread->transaction_stack = in_reply_to->to_parent;
0b89d69a
MC
2780 binder_inner_proc_unlock(proc);
2781 binder_set_nice(in_reply_to->saved_priority);
2782 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
355b0502
GKH
2783 if (target_thread == NULL) {
2784 return_error = BR_DEAD_REPLY;
57ada2fb 2785 return_error_line = __LINE__;
355b0502
GKH
2786 goto err_dead_binder;
2787 }
2788 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 2789 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
2790 proc->pid, thread->pid,
2791 target_thread->transaction_stack ?
2792 target_thread->transaction_stack->debug_id : 0,
2793 in_reply_to->debug_id);
0b89d69a 2794 binder_inner_proc_unlock(target_thread->proc);
355b0502 2795 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2796 return_error_param = -EPROTO;
2797 return_error_line = __LINE__;
355b0502
GKH
2798 in_reply_to = NULL;
2799 target_thread = NULL;
2800 goto err_dead_binder;
2801 }
2802 target_proc = target_thread->proc;
7a4408c6 2803 target_proc->tmp_ref++;
0b89d69a 2804 binder_inner_proc_unlock(target_thread->proc);
355b0502
GKH
2805 } else {
2806 if (tr->target.handle) {
2807 struct binder_ref *ref;
10f62861 2808
eb34983b
TK
2809 /*
2810 * There must already be a strong ref
2811 * on this node. If so, do a strong
2812 * increment on the node to ensure it
2813 * stays alive until the transaction is
2814 * done.
2815 */
2c1838dc
TK
2816 binder_proc_lock(proc);
2817 ref = binder_get_ref_olocked(proc, tr->target.handle,
2818 true);
eb34983b 2819 if (ref) {
512cf465
TK
2820 target_node = binder_get_node_refs_for_txn(
2821 ref->node, &target_proc,
2822 &return_error);
2823 } else {
56b468fc 2824 binder_user_error("%d:%d got transaction to invalid handle\n",
512cf465 2825 proc->pid, thread->pid);
355b0502 2826 return_error = BR_FAILED_REPLY;
355b0502 2827 }
512cf465 2828 binder_proc_unlock(proc);
355b0502 2829 } else {
c44b1231 2830 mutex_lock(&context->context_mgr_node_lock);
342e5c90 2831 target_node = context->binder_context_mgr_node;
512cf465
TK
2832 if (target_node)
2833 target_node = binder_get_node_refs_for_txn(
2834 target_node, &target_proc,
2835 &return_error);
2836 else
355b0502 2837 return_error = BR_DEAD_REPLY;
c44b1231 2838 mutex_unlock(&context->context_mgr_node_lock);
355b0502 2839 }
512cf465
TK
2840 if (!target_node) {
2841 /*
2842 * return_error is set above
2843 */
2844 return_error_param = -EINVAL;
57ada2fb 2845 return_error_line = __LINE__;
355b0502
GKH
2846 goto err_dead_binder;
2847 }
512cf465 2848 e->to_node = target_node->debug_id;
79af7307
SS
2849 if (security_binder_transaction(proc->tsk,
2850 target_proc->tsk) < 0) {
2851 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2852 return_error_param = -EPERM;
2853 return_error_line = __LINE__;
79af7307
SS
2854 goto err_invalid_target_handle;
2855 }
0b89d69a 2856 binder_inner_proc_lock(proc);
355b0502
GKH
2857 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2858 struct binder_transaction *tmp;
10f62861 2859
355b0502
GKH
2860 tmp = thread->transaction_stack;
2861 if (tmp->to_thread != thread) {
7a4408c6 2862 spin_lock(&tmp->lock);
56b468fc 2863 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2864 proc->pid, thread->pid, tmp->debug_id,
2865 tmp->to_proc ? tmp->to_proc->pid : 0,
2866 tmp->to_thread ?
2867 tmp->to_thread->pid : 0);
7a4408c6 2868 spin_unlock(&tmp->lock);
0b89d69a 2869 binder_inner_proc_unlock(proc);
355b0502 2870 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2871 return_error_param = -EPROTO;
2872 return_error_line = __LINE__;
355b0502
GKH
2873 goto err_bad_call_stack;
2874 }
2875 while (tmp) {
7a4408c6
TK
2876 struct binder_thread *from;
2877
2878 spin_lock(&tmp->lock);
2879 from = tmp->from;
2880 if (from && from->proc == target_proc) {
2881 atomic_inc(&from->tmp_ref);
2882 target_thread = from;
2883 spin_unlock(&tmp->lock);
2884 break;
2885 }
2886 spin_unlock(&tmp->lock);
355b0502
GKH
2887 tmp = tmp->from_parent;
2888 }
2889 }
0b89d69a 2890 binder_inner_proc_unlock(proc);
355b0502 2891 }
408c68b1 2892 if (target_thread)
355b0502 2893 e->to_thread = target_thread->pid;
355b0502
GKH
2894 e->to_proc = target_proc->pid;
2895
2896 /* TODO: reuse incoming transaction for reply */
2897 t = kzalloc(sizeof(*t), GFP_KERNEL);
2898 if (t == NULL) {
2899 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2900 return_error_param = -ENOMEM;
2901 return_error_line = __LINE__;
355b0502
GKH
2902 goto err_alloc_t_failed;
2903 }
2904 binder_stats_created(BINDER_STAT_TRANSACTION);
7a4408c6 2905 spin_lock_init(&t->lock);
355b0502
GKH
2906
2907 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2908 if (tcomplete == NULL) {
2909 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2910 return_error_param = -ENOMEM;
2911 return_error_line = __LINE__;
355b0502
GKH
2912 goto err_alloc_tcomplete_failed;
2913 }
2914 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2915
d99c7333 2916 t->debug_id = t_debug_id;
355b0502
GKH
2917
2918 if (reply)
2919 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2920 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2921 proc->pid, thread->pid, t->debug_id,
2922 target_proc->pid, target_thread->pid,
da49889d
AH
2923 (u64)tr->data.ptr.buffer,
2924 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2925 (u64)tr->data_size, (u64)tr->offsets_size,
2926 (u64)extra_buffers_size);
355b0502
GKH
2927 else
2928 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2929 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2930 proc->pid, thread->pid, t->debug_id,
2931 target_proc->pid, target_node->debug_id,
da49889d
AH
2932 (u64)tr->data.ptr.buffer,
2933 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2934 (u64)tr->data_size, (u64)tr->offsets_size,
2935 (u64)extra_buffers_size);
355b0502
GKH
2936
2937 if (!reply && !(tr->flags & TF_ONE_WAY))
2938 t->from = thread;
2939 else
2940 t->from = NULL;
57bab7cb 2941 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
2942 t->to_proc = target_proc;
2943 t->to_thread = target_thread;
2944 t->code = tr->code;
2945 t->flags = tr->flags;
2946 t->priority = task_nice(current);
975a1ac9
AH
2947
2948 trace_binder_transaction(reply, t, target_node);
2949
19c98724 2950 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
4bfac80a
MC
2951 tr->offsets_size, extra_buffers_size,
2952 !reply && (t->flags & TF_ONE_WAY));
57ada2fb
TK
2953 if (IS_ERR(t->buffer)) {
2954 /*
2955 * -ESRCH indicates VMA cleared. The target is dying.
2956 */
2957 return_error_param = PTR_ERR(t->buffer);
2958 return_error = return_error_param == -ESRCH ?
2959 BR_DEAD_REPLY : BR_FAILED_REPLY;
2960 return_error_line = __LINE__;
2961 t->buffer = NULL;
355b0502
GKH
2962 goto err_binder_alloc_buf_failed;
2963 }
2964 t->buffer->allow_user_free = 0;
2965 t->buffer->debug_id = t->debug_id;
2966 t->buffer->transaction = t;
2967 t->buffer->target_node = target_node;
975a1ac9 2968 trace_binder_transaction_alloc_buf(t->buffer);
7980240b
MC
2969 off_start = (binder_size_t *)(t->buffer->data +
2970 ALIGN(tr->data_size, sizeof(void *)));
2971 offp = off_start;
355b0502 2972
da49889d
AH
2973 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2974 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
2975 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2976 proc->pid, thread->pid);
355b0502 2977 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2978 return_error_param = -EFAULT;
2979 return_error_line = __LINE__;
355b0502
GKH
2980 goto err_copy_data_failed;
2981 }
da49889d
AH
2982 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2983 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
2984 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2985 proc->pid, thread->pid);
355b0502 2986 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2987 return_error_param = -EFAULT;
2988 return_error_line = __LINE__;
355b0502
GKH
2989 goto err_copy_data_failed;
2990 }
da49889d
AH
2991 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2992 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2993 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 2994 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2995 return_error_param = -EINVAL;
2996 return_error_line = __LINE__;
355b0502
GKH
2997 goto err_bad_offset;
2998 }
7980240b
MC
2999 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3000 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3001 proc->pid, thread->pid,
3002 (u64)extra_buffers_size);
3003 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3004 return_error_param = -EINVAL;
3005 return_error_line = __LINE__;
7980240b
MC
3006 goto err_bad_offset;
3007 }
3008 off_end = (void *)off_start + tr->offsets_size;
3009 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3010 sg_buf_end = sg_bufp + extra_buffers_size;
212265e5 3011 off_min = 0;
355b0502 3012 for (; offp < off_end; offp++) {
feba3900
MC
3013 struct binder_object_header *hdr;
3014 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 3015
feba3900
MC
3016 if (object_size == 0 || *offp < off_min) {
3017 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
212265e5
AH
3018 proc->pid, thread->pid, (u64)*offp,
3019 (u64)off_min,
feba3900 3020 (u64)t->buffer->data_size);
355b0502 3021 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3022 return_error_param = -EINVAL;
3023 return_error_line = __LINE__;
355b0502
GKH
3024 goto err_bad_offset;
3025 }
feba3900
MC
3026
3027 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3028 off_min = *offp + object_size;
3029 switch (hdr->type) {
355b0502
GKH
3030 case BINDER_TYPE_BINDER:
3031 case BINDER_TYPE_WEAK_BINDER: {
feba3900 3032 struct flat_binder_object *fp;
10f62861 3033
feba3900 3034 fp = to_flat_binder_object(hdr);
a056af42
MC
3035 ret = binder_translate_binder(fp, t, thread);
3036 if (ret < 0) {
355b0502 3037 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3038 return_error_param = ret;
3039 return_error_line = __LINE__;
a056af42 3040 goto err_translate_failed;
355b0502 3041 }
355b0502
GKH
3042 } break;
3043 case BINDER_TYPE_HANDLE:
3044 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 3045 struct flat_binder_object *fp;
0a3ffab9 3046
feba3900 3047 fp = to_flat_binder_object(hdr);
a056af42
MC
3048 ret = binder_translate_handle(fp, t, thread);
3049 if (ret < 0) {
79af7307 3050 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3051 return_error_param = ret;
3052 return_error_line = __LINE__;
a056af42 3053 goto err_translate_failed;
355b0502
GKH
3054 }
3055 } break;
3056
3057 case BINDER_TYPE_FD: {
feba3900 3058 struct binder_fd_object *fp = to_binder_fd_object(hdr);
a056af42
MC
3059 int target_fd = binder_translate_fd(fp->fd, t, thread,
3060 in_reply_to);
355b0502 3061
355b0502 3062 if (target_fd < 0) {
355b0502 3063 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3064 return_error_param = target_fd;
3065 return_error_line = __LINE__;
a056af42 3066 goto err_translate_failed;
355b0502 3067 }
feba3900
MC
3068 fp->pad_binder = 0;
3069 fp->fd = target_fd;
355b0502 3070 } break;
def95c73
MC
3071 case BINDER_TYPE_FDA: {
3072 struct binder_fd_array_object *fda =
3073 to_binder_fd_array_object(hdr);
3074 struct binder_buffer_object *parent =
3075 binder_validate_ptr(t->buffer, fda->parent,
3076 off_start,
3077 offp - off_start);
3078 if (!parent) {
3079 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3080 proc->pid, thread->pid);
3081 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3082 return_error_param = -EINVAL;
3083 return_error_line = __LINE__;
def95c73
MC
3084 goto err_bad_parent;
3085 }
3086 if (!binder_validate_fixup(t->buffer, off_start,
3087 parent, fda->parent_offset,
3088 last_fixup_obj,
3089 last_fixup_min_off)) {
3090 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3091 proc->pid, thread->pid);
3092 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3093 return_error_param = -EINVAL;
3094 return_error_line = __LINE__;
def95c73
MC
3095 goto err_bad_parent;
3096 }
3097 ret = binder_translate_fd_array(fda, parent, t, thread,
3098 in_reply_to);
3099 if (ret < 0) {
3100 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3101 return_error_param = ret;
3102 return_error_line = __LINE__;
def95c73
MC
3103 goto err_translate_failed;
3104 }
3105 last_fixup_obj = parent;
3106 last_fixup_min_off =
3107 fda->parent_offset + sizeof(u32) * fda->num_fds;
3108 } break;
7980240b
MC
3109 case BINDER_TYPE_PTR: {
3110 struct binder_buffer_object *bp =
3111 to_binder_buffer_object(hdr);
3112 size_t buf_left = sg_buf_end - sg_bufp;
3113
3114 if (bp->length > buf_left) {
3115 binder_user_error("%d:%d got transaction with too large buffer\n",
3116 proc->pid, thread->pid);
3117 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3118 return_error_param = -EINVAL;
3119 return_error_line = __LINE__;
7980240b
MC
3120 goto err_bad_offset;
3121 }
3122 if (copy_from_user(sg_bufp,
3123 (const void __user *)(uintptr_t)
3124 bp->buffer, bp->length)) {
3125 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3126 proc->pid, thread->pid);
57ada2fb 3127 return_error_param = -EFAULT;
7980240b 3128 return_error = BR_FAILED_REPLY;
57ada2fb 3129 return_error_line = __LINE__;
7980240b
MC
3130 goto err_copy_data_failed;
3131 }
3132 /* Fixup buffer pointer to target proc address space */
3133 bp->buffer = (uintptr_t)sg_bufp +
19c98724
TK
3134 binder_alloc_get_user_buffer_offset(
3135 &target_proc->alloc);
7980240b
MC
3136 sg_bufp += ALIGN(bp->length, sizeof(u64));
3137
3138 ret = binder_fixup_parent(t, thread, bp, off_start,
3139 offp - off_start,
3140 last_fixup_obj,
3141 last_fixup_min_off);
3142 if (ret < 0) {
3143 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3144 return_error_param = ret;
3145 return_error_line = __LINE__;
7980240b
MC
3146 goto err_translate_failed;
3147 }
3148 last_fixup_obj = bp;
3149 last_fixup_min_off = 0;
3150 } break;
355b0502 3151 default:
64dcfe6b 3152 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
feba3900 3153 proc->pid, thread->pid, hdr->type);
355b0502 3154 return_error = BR_FAILED_REPLY;
57ada2fb
TK
3155 return_error_param = -EINVAL;
3156 return_error_line = __LINE__;
355b0502
GKH
3157 goto err_bad_object_type;
3158 }
3159 }
ccae6f67 3160 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
673068ee 3161 t->work.type = BINDER_WORK_TRANSACTION;
ccae6f67 3162
355b0502 3163 if (reply) {
148ade2c 3164 binder_enqueue_thread_work(thread, tcomplete);
0b89d69a
MC
3165 binder_inner_proc_lock(target_proc);
3166 if (target_thread->is_dead) {
3167 binder_inner_proc_unlock(target_proc);
7a4408c6 3168 goto err_dead_proc_or_thread;
0b89d69a 3169 }
355b0502 3170 BUG_ON(t->buffer->async_transaction != 0);
0b89d69a 3171 binder_pop_transaction_ilocked(target_thread, in_reply_to);
148ade2c 3172 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
0b89d69a 3173 binder_inner_proc_unlock(target_proc);
408c68b1 3174 wake_up_interruptible_sync(&target_thread->wait);
b6d282ce 3175 binder_free_transaction(in_reply_to);
355b0502
GKH
3176 } else if (!(t->flags & TF_ONE_WAY)) {
3177 BUG_ON(t->buffer->async_transaction != 0);
0b89d69a 3178 binder_inner_proc_lock(proc);
148ade2c
MC
3179 /*
3180 * Defer the TRANSACTION_COMPLETE, so we don't return to
3181 * userspace immediately; this allows the target process to
3182 * immediately start processing this transaction, reducing
3183 * latency. We will then return the TRANSACTION_COMPLETE when
3184 * the target replies (or there is an error).
3185 */
3186 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
355b0502
GKH
3187 t->need_reply = 1;
3188 t->from_parent = thread->transaction_stack;
3189 thread->transaction_stack = t;
0b89d69a 3190 binder_inner_proc_unlock(proc);
408c68b1 3191 if (!binder_proc_transaction(t, target_proc, target_thread)) {
0b89d69a
MC
3192 binder_inner_proc_lock(proc);
3193 binder_pop_transaction_ilocked(thread, t);
3194 binder_inner_proc_unlock(proc);
7a4408c6
TK
3195 goto err_dead_proc_or_thread;
3196 }
355b0502
GKH
3197 } else {
3198 BUG_ON(target_node == NULL);
3199 BUG_ON(t->buffer->async_transaction != 1);
148ade2c 3200 binder_enqueue_thread_work(thread, tcomplete);
408c68b1 3201 if (!binder_proc_transaction(t, target_proc, NULL))
7a4408c6 3202 goto err_dead_proc_or_thread;
00b40d61 3203 }
7a4408c6
TK
3204 if (target_thread)
3205 binder_thread_dec_tmpref(target_thread);
3206 binder_proc_dec_tmpref(target_proc);
512cf465
TK
3207 if (target_node)
3208 binder_dec_node_tmpref(target_node);
d99c7333
TK
3209 /*
3210 * write barrier to synchronize with initialization
3211 * of log entry
3212 */
3213 smp_wmb();
3214 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
3215 return;
3216
7a4408c6
TK
3217err_dead_proc_or_thread:
3218 return_error = BR_DEAD_REPLY;
3219 return_error_line = __LINE__;
d53bebdf 3220 binder_dequeue_work(proc, tcomplete);
a056af42 3221err_translate_failed:
355b0502
GKH
3222err_bad_object_type:
3223err_bad_offset:
def95c73 3224err_bad_parent:
355b0502 3225err_copy_data_failed:
975a1ac9 3226 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 3227 binder_transaction_buffer_release(target_proc, t->buffer, offp);
512cf465
TK
3228 if (target_node)
3229 binder_dec_node_tmpref(target_node);
eb34983b 3230 target_node = NULL;
355b0502 3231 t->buffer->transaction = NULL;
19c98724 3232 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
3233err_binder_alloc_buf_failed:
3234 kfree(tcomplete);
3235 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3236err_alloc_tcomplete_failed:
3237 kfree(t);
3238 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3239err_alloc_t_failed:
3240err_bad_call_stack:
3241err_empty_call_stack:
3242err_dead_binder:
3243err_invalid_target_handle:
7a4408c6
TK
3244 if (target_thread)
3245 binder_thread_dec_tmpref(target_thread);
3246 if (target_proc)
3247 binder_proc_dec_tmpref(target_proc);
512cf465 3248 if (target_node) {
eb34983b 3249 binder_dec_node(target_node, 1, 0);
512cf465
TK
3250 binder_dec_node_tmpref(target_node);
3251 }
eb34983b 3252
355b0502 3253 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
57ada2fb
TK
3254 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3255 proc->pid, thread->pid, return_error, return_error_param,
3256 (u64)tr->data_size, (u64)tr->offsets_size,
3257 return_error_line);
355b0502
GKH
3258
3259 {
3260 struct binder_transaction_log_entry *fe;
10f62861 3261
57ada2fb
TK
3262 e->return_error = return_error;
3263 e->return_error_param = return_error_param;
3264 e->return_error_line = return_error_line;
355b0502
GKH
3265 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3266 *fe = *e;
d99c7333
TK
3267 /*
3268 * write barrier to synchronize with initialization
3269 * of log entry
3270 */
3271 smp_wmb();
3272 WRITE_ONCE(e->debug_id_done, t_debug_id);
3273 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
3274 }
3275
26549d17 3276 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 3277 if (in_reply_to) {
26549d17 3278 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
148ade2c 3279 binder_enqueue_thread_work(thread, &thread->return_error.work);
355b0502 3280 binder_send_failed_reply(in_reply_to, return_error);
26549d17
TK
3281 } else {
3282 thread->return_error.cmd = return_error;
148ade2c 3283 binder_enqueue_thread_work(thread, &thread->return_error.work);
26549d17 3284 }
355b0502
GKH
3285}
3286
fb07ebc3
BP
3287static int binder_thread_write(struct binder_proc *proc,
3288 struct binder_thread *thread,
da49889d
AH
3289 binder_uintptr_t binder_buffer, size_t size,
3290 binder_size_t *consumed)
355b0502
GKH
3291{
3292 uint32_t cmd;
342e5c90 3293 struct binder_context *context = proc->context;
da49889d 3294 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3295 void __user *ptr = buffer + *consumed;
3296 void __user *end = buffer + size;
3297
26549d17 3298 while (ptr < end && thread->return_error.cmd == BR_OK) {
372e3147
TK
3299 int ret;
3300
355b0502
GKH
3301 if (get_user(cmd, (uint32_t __user *)ptr))
3302 return -EFAULT;
3303 ptr += sizeof(uint32_t);
975a1ac9 3304 trace_binder_command(cmd);
355b0502 3305 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
0953c797
BJS
3306 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3307 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3308 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
3309 }
3310 switch (cmd) {
3311 case BC_INCREFS:
3312 case BC_ACQUIRE:
3313 case BC_RELEASE:
3314 case BC_DECREFS: {
3315 uint32_t target;
355b0502 3316 const char *debug_string;
372e3147
TK
3317 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3318 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3319 struct binder_ref_data rdata;
355b0502
GKH
3320
3321 if (get_user(target, (uint32_t __user *)ptr))
3322 return -EFAULT;
c44b1231 3323
355b0502 3324 ptr += sizeof(uint32_t);
372e3147
TK
3325 ret = -1;
3326 if (increment && !target) {
c44b1231 3327 struct binder_node *ctx_mgr_node;
c44b1231
TK
3328 mutex_lock(&context->context_mgr_node_lock);
3329 ctx_mgr_node = context->binder_context_mgr_node;
372e3147
TK
3330 if (ctx_mgr_node)
3331 ret = binder_inc_ref_for_node(
3332 proc, ctx_mgr_node,
3333 strong, NULL, &rdata);
c44b1231
TK
3334 mutex_unlock(&context->context_mgr_node_lock);
3335 }
372e3147
TK
3336 if (ret)
3337 ret = binder_update_ref_for_handle(
3338 proc, target, increment, strong,
3339 &rdata);
3340 if (!ret && rdata.desc != target) {
3341 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3342 proc->pid, thread->pid,
3343 target, rdata.desc);
355b0502
GKH
3344 }
3345 switch (cmd) {
3346 case BC_INCREFS:
3347 debug_string = "IncRefs";
355b0502
GKH
3348 break;
3349 case BC_ACQUIRE:
3350 debug_string = "Acquire";
355b0502
GKH
3351 break;
3352 case BC_RELEASE:
3353 debug_string = "Release";
355b0502
GKH
3354 break;
3355 case BC_DECREFS:
3356 default:
3357 debug_string = "DecRefs";
372e3147
TK
3358 break;
3359 }
3360 if (ret) {
3361 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3362 proc->pid, thread->pid, debug_string,
3363 strong, target, ret);
355b0502
GKH
3364 break;
3365 }
3366 binder_debug(BINDER_DEBUG_USER_REFS,
372e3147
TK
3367 "%d:%d %s ref %d desc %d s %d w %d\n",
3368 proc->pid, thread->pid, debug_string,
3369 rdata.debug_id, rdata.desc, rdata.strong,
3370 rdata.weak);
355b0502
GKH
3371 break;
3372 }
3373 case BC_INCREFS_DONE:
3374 case BC_ACQUIRE_DONE: {
da49889d
AH
3375 binder_uintptr_t node_ptr;
3376 binder_uintptr_t cookie;
355b0502 3377 struct binder_node *node;
673068ee 3378 bool free_node;
355b0502 3379
da49889d 3380 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3381 return -EFAULT;
da49889d
AH
3382 ptr += sizeof(binder_uintptr_t);
3383 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3384 return -EFAULT;
da49889d 3385 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
3386 node = binder_get_node(proc, node_ptr);
3387 if (node == NULL) {
da49889d 3388 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
3389 proc->pid, thread->pid,
3390 cmd == BC_INCREFS_DONE ?
3391 "BC_INCREFS_DONE" :
3392 "BC_ACQUIRE_DONE",
da49889d 3393 (u64)node_ptr);
355b0502
GKH
3394 break;
3395 }
3396 if (cookie != node->cookie) {
da49889d 3397 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
3398 proc->pid, thread->pid,
3399 cmd == BC_INCREFS_DONE ?
3400 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
3401 (u64)node_ptr, node->debug_id,
3402 (u64)cookie, (u64)node->cookie);
adc18842 3403 binder_put_node(node);
355b0502
GKH
3404 break;
3405 }
673068ee 3406 binder_node_inner_lock(node);
355b0502
GKH
3407 if (cmd == BC_ACQUIRE_DONE) {
3408 if (node->pending_strong_ref == 0) {
56b468fc 3409 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
3410 proc->pid, thread->pid,
3411 node->debug_id);
673068ee 3412 binder_node_inner_unlock(node);
adc18842 3413 binder_put_node(node);
355b0502
GKH
3414 break;
3415 }
3416 node->pending_strong_ref = 0;
3417 } else {
3418 if (node->pending_weak_ref == 0) {
56b468fc 3419 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
3420 proc->pid, thread->pid,
3421 node->debug_id);
673068ee 3422 binder_node_inner_unlock(node);
adc18842 3423 binder_put_node(node);
355b0502
GKH
3424 break;
3425 }
3426 node->pending_weak_ref = 0;
3427 }
673068ee
TK
3428 free_node = binder_dec_node_nilocked(node,
3429 cmd == BC_ACQUIRE_DONE, 0);
3430 WARN_ON(free_node);
355b0502 3431 binder_debug(BINDER_DEBUG_USER_REFS,
adc18842 3432 "%d:%d %s node %d ls %d lw %d tr %d\n",
355b0502
GKH
3433 proc->pid, thread->pid,
3434 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
adc18842
TK
3435 node->debug_id, node->local_strong_refs,
3436 node->local_weak_refs, node->tmp_refs);
673068ee 3437 binder_node_inner_unlock(node);
adc18842 3438 binder_put_node(node);
355b0502
GKH
3439 break;
3440 }
3441 case BC_ATTEMPT_ACQUIRE:
56b468fc 3442 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
3443 return -EINVAL;
3444 case BC_ACQUIRE_RESULT:
56b468fc 3445 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
3446 return -EINVAL;
3447
3448 case BC_FREE_BUFFER: {
da49889d 3449 binder_uintptr_t data_ptr;
355b0502
GKH
3450 struct binder_buffer *buffer;
3451
da49889d 3452 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3453 return -EFAULT;
da49889d 3454 ptr += sizeof(binder_uintptr_t);
355b0502 3455
53d311cf
TK
3456 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3457 data_ptr);
355b0502 3458 if (buffer == NULL) {
da49889d
AH
3459 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3460 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
3461 break;
3462 }
3463 if (!buffer->allow_user_free) {
da49889d
AH
3464 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3465 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
3466 break;
3467 }
3468 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
3469 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3470 proc->pid, thread->pid, (u64)data_ptr,
3471 buffer->debug_id,
355b0502
GKH
3472 buffer->transaction ? "active" : "finished");
3473
3474 if (buffer->transaction) {
3475 buffer->transaction->buffer = NULL;
3476 buffer->transaction = NULL;
3477 }
3478 if (buffer->async_transaction && buffer->target_node) {
72196393
TK
3479 struct binder_node *buf_node;
3480 struct binder_work *w;
3481
3482 buf_node = buffer->target_node;
673068ee 3483 binder_node_inner_lock(buf_node);
72196393
TK
3484 BUG_ON(!buf_node->has_async_transaction);
3485 BUG_ON(buf_node->proc != proc);
72196393
TK
3486 w = binder_dequeue_work_head_ilocked(
3487 &buf_node->async_todo);
3a6430ce 3488 if (!w) {
72196393 3489 buf_node->has_async_transaction = 0;
3a6430ce 3490 } else {
72196393 3491 binder_enqueue_work_ilocked(
3a6430ce
MC
3492 w, &proc->todo);
3493 binder_wakeup_proc_ilocked(proc);
3494 }
673068ee 3495 binder_node_inner_unlock(buf_node);
355b0502 3496 }
975a1ac9 3497 trace_binder_transaction_buffer_release(buffer);
355b0502 3498 binder_transaction_buffer_release(proc, buffer, NULL);
19c98724 3499 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
3500 break;
3501 }
3502
7980240b
MC
3503 case BC_TRANSACTION_SG:
3504 case BC_REPLY_SG: {
3505 struct binder_transaction_data_sg tr;
3506
3507 if (copy_from_user(&tr, ptr, sizeof(tr)))
3508 return -EFAULT;
3509 ptr += sizeof(tr);
3510 binder_transaction(proc, thread, &tr.transaction_data,
3511 cmd == BC_REPLY_SG, tr.buffers_size);
3512 break;
3513 }
355b0502
GKH
3514 case BC_TRANSACTION:
3515 case BC_REPLY: {
3516 struct binder_transaction_data tr;
3517
3518 if (copy_from_user(&tr, ptr, sizeof(tr)))
3519 return -EFAULT;
3520 ptr += sizeof(tr);
4bfac80a
MC
3521 binder_transaction(proc, thread, &tr,
3522 cmd == BC_REPLY, 0);
355b0502
GKH
3523 break;
3524 }
3525
3526 case BC_REGISTER_LOOPER:
3527 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3528 "%d:%d BC_REGISTER_LOOPER\n",
355b0502 3529 proc->pid, thread->pid);
b3e68612 3530 binder_inner_proc_lock(proc);
355b0502
GKH
3531 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3532 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3533 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
3534 proc->pid, thread->pid);
3535 } else if (proc->requested_threads == 0) {
3536 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3537 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
3538 proc->pid, thread->pid);
3539 } else {
3540 proc->requested_threads--;
3541 proc->requested_threads_started++;
3542 }
3543 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
b3e68612 3544 binder_inner_proc_unlock(proc);
355b0502
GKH
3545 break;
3546 case BC_ENTER_LOOPER:
3547 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3548 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
3549 proc->pid, thread->pid);
3550 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3551 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3552 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
3553 proc->pid, thread->pid);
3554 }
3555 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3556 break;
3557 case BC_EXIT_LOOPER:
3558 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3559 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
3560 proc->pid, thread->pid);
3561 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3562 break;
3563
3564 case BC_REQUEST_DEATH_NOTIFICATION:
3565 case BC_CLEAR_DEATH_NOTIFICATION: {
3566 uint32_t target;
da49889d 3567 binder_uintptr_t cookie;
355b0502 3568 struct binder_ref *ref;
2c1838dc 3569 struct binder_ref_death *death = NULL;
355b0502
GKH
3570
3571 if (get_user(target, (uint32_t __user *)ptr))
3572 return -EFAULT;
3573 ptr += sizeof(uint32_t);
da49889d 3574 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3575 return -EFAULT;
da49889d 3576 ptr += sizeof(binder_uintptr_t);
2c1838dc
TK
3577 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3578 /*
3579 * Allocate memory for death notification
3580 * before taking lock
3581 */
3582 death = kzalloc(sizeof(*death), GFP_KERNEL);
3583 if (death == NULL) {
3584 WARN_ON(thread->return_error.cmd !=
3585 BR_OK);
3586 thread->return_error.cmd = BR_ERROR;
148ade2c
MC
3587 binder_enqueue_thread_work(
3588 thread,
3589 &thread->return_error.work);
2c1838dc
TK
3590 binder_debug(
3591 BINDER_DEBUG_FAILED_TRANSACTION,
3592 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3593 proc->pid, thread->pid);
3594 break;
3595 }
3596 }
3597 binder_proc_lock(proc);
3598 ref = binder_get_ref_olocked(proc, target, false);
355b0502 3599 if (ref == NULL) {
56b468fc 3600 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
3601 proc->pid, thread->pid,
3602 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3603 "BC_REQUEST_DEATH_NOTIFICATION" :
3604 "BC_CLEAR_DEATH_NOTIFICATION",
3605 target);
2c1838dc
TK
3606 binder_proc_unlock(proc);
3607 kfree(death);
355b0502
GKH
3608 break;
3609 }
3610
3611 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 3612 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
3613 proc->pid, thread->pid,
3614 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3615 "BC_REQUEST_DEATH_NOTIFICATION" :
3616 "BC_CLEAR_DEATH_NOTIFICATION",
372e3147
TK
3617 (u64)cookie, ref->data.debug_id,
3618 ref->data.desc, ref->data.strong,
3619 ref->data.weak, ref->node->debug_id);
355b0502 3620
ab51ec6b 3621 binder_node_lock(ref->node);
355b0502
GKH
3622 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3623 if (ref->death) {
56b468fc 3624 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502 3625 proc->pid, thread->pid);
ab51ec6b 3626 binder_node_unlock(ref->node);
2c1838dc
TK
3627 binder_proc_unlock(proc);
3628 kfree(death);
355b0502
GKH
3629 break;
3630 }
3631 binder_stats_created(BINDER_STAT_DEATH);
3632 INIT_LIST_HEAD(&death->work.entry);
3633 death->cookie = cookie;
3634 ref->death = death;
3635 if (ref->node->proc == NULL) {
3636 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
bb74562a
MC
3637
3638 binder_inner_proc_lock(proc);
3639 binder_enqueue_work_ilocked(
3640 &ref->death->work, &proc->todo);
3641 binder_wakeup_proc_ilocked(proc);
3642 binder_inner_proc_unlock(proc);
355b0502
GKH
3643 }
3644 } else {
3645 if (ref->death == NULL) {
56b468fc 3646 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502 3647 proc->pid, thread->pid);
673068ee 3648 binder_node_unlock(ref->node);
2c1838dc 3649 binder_proc_unlock(proc);
355b0502
GKH
3650 break;
3651 }
3652 death = ref->death;
3653 if (death->cookie != cookie) {
da49889d 3654 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 3655 proc->pid, thread->pid,
da49889d
AH
3656 (u64)death->cookie,
3657 (u64)cookie);
673068ee 3658 binder_node_unlock(ref->node);
2c1838dc 3659 binder_proc_unlock(proc);
355b0502
GKH
3660 break;
3661 }
3662 ref->death = NULL;
72196393 3663 binder_inner_proc_lock(proc);
355b0502
GKH
3664 if (list_empty(&death->work.entry)) {
3665 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
72196393
TK
3666 if (thread->looper &
3667 (BINDER_LOOPER_STATE_REGISTERED |
3668 BINDER_LOOPER_STATE_ENTERED))
148ade2c
MC
3669 binder_enqueue_thread_work_ilocked(
3670 thread,
3671 &death->work);
72196393
TK
3672 else {
3673 binder_enqueue_work_ilocked(
3674 &death->work,
3675 &proc->todo);
1b77e9dc 3676 binder_wakeup_proc_ilocked(
408c68b1 3677 proc);
355b0502
GKH
3678 }
3679 } else {
3680 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3681 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3682 }
72196393 3683 binder_inner_proc_unlock(proc);
355b0502 3684 }
ab51ec6b 3685 binder_node_unlock(ref->node);
2c1838dc 3686 binder_proc_unlock(proc);
355b0502
GKH
3687 } break;
3688 case BC_DEAD_BINDER_DONE: {
3689 struct binder_work *w;
da49889d 3690 binder_uintptr_t cookie;
355b0502 3691 struct binder_ref_death *death = NULL;
10f62861 3692
da49889d 3693 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
3694 return -EFAULT;
3695
7a64cd88 3696 ptr += sizeof(cookie);
72196393
TK
3697 binder_inner_proc_lock(proc);
3698 list_for_each_entry(w, &proc->delivered_death,
3699 entry) {
3700 struct binder_ref_death *tmp_death =
3701 container_of(w,
3702 struct binder_ref_death,
3703 work);
10f62861 3704
355b0502
GKH
3705 if (tmp_death->cookie == cookie) {
3706 death = tmp_death;
3707 break;
3708 }
3709 }
3710 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
3711 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3712 proc->pid, thread->pid, (u64)cookie,
3713 death);
355b0502 3714 if (death == NULL) {
da49889d
AH
3715 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3716 proc->pid, thread->pid, (u64)cookie);
72196393 3717 binder_inner_proc_unlock(proc);
355b0502
GKH
3718 break;
3719 }
72196393 3720 binder_dequeue_work_ilocked(&death->work);
355b0502
GKH
3721 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3722 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
72196393
TK
3723 if (thread->looper &
3724 (BINDER_LOOPER_STATE_REGISTERED |
3725 BINDER_LOOPER_STATE_ENTERED))
148ade2c
MC
3726 binder_enqueue_thread_work_ilocked(
3727 thread, &death->work);
72196393
TK
3728 else {
3729 binder_enqueue_work_ilocked(
3730 &death->work,
3731 &proc->todo);
408c68b1 3732 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
3733 }
3734 }
72196393 3735 binder_inner_proc_unlock(proc);
355b0502
GKH
3736 } break;
3737
3738 default:
56b468fc 3739 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
3740 proc->pid, thread->pid, cmd);
3741 return -EINVAL;
3742 }
3743 *consumed = ptr - buffer;
3744 }
3745 return 0;
3746}
3747
fb07ebc3
BP
3748static void binder_stat_br(struct binder_proc *proc,
3749 struct binder_thread *thread, uint32_t cmd)
355b0502 3750{
975a1ac9 3751 trace_binder_return(cmd);
355b0502 3752 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
0953c797
BJS
3753 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3754 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3755 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
3756 }
3757}
3758
26b47d8a
TK
3759static int binder_put_node_cmd(struct binder_proc *proc,
3760 struct binder_thread *thread,
3761 void __user **ptrp,
3762 binder_uintptr_t node_ptr,
3763 binder_uintptr_t node_cookie,
3764 int node_debug_id,
3765 uint32_t cmd, const char *cmd_name)
3766{
3767 void __user *ptr = *ptrp;
3768
3769 if (put_user(cmd, (uint32_t __user *)ptr))
3770 return -EFAULT;
3771 ptr += sizeof(uint32_t);
3772
3773 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3774 return -EFAULT;
3775 ptr += sizeof(binder_uintptr_t);
3776
3777 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3778 return -EFAULT;
3779 ptr += sizeof(binder_uintptr_t);
3780
3781 binder_stat_br(proc, thread, cmd);
3782 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3783 proc->pid, thread->pid, cmd_name, node_debug_id,
3784 (u64)node_ptr, (u64)node_cookie);
3785
3786 *ptrp = ptr;
3787 return 0;
3788}
3789
1b77e9dc
MC
3790static int binder_wait_for_work(struct binder_thread *thread,
3791 bool do_proc_work)
3792{
3793 DEFINE_WAIT(wait);
3794 struct binder_proc *proc = thread->proc;
3795 int ret = 0;
3796
3797 freezer_do_not_count();
3798 binder_inner_proc_lock(proc);
3799 for (;;) {
3800 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3801 if (binder_has_work_ilocked(thread, do_proc_work))
3802 break;
3803 if (do_proc_work)
3804 list_add(&thread->waiting_thread_node,
3805 &proc->waiting_threads);
3806 binder_inner_proc_unlock(proc);
3807 schedule();
3808 binder_inner_proc_lock(proc);
3809 list_del_init(&thread->waiting_thread_node);
3810 if (signal_pending(current)) {
3811 ret = -ERESTARTSYS;
3812 break;
3813 }
3814 }
3815 finish_wait(&thread->wait, &wait);
3816 binder_inner_proc_unlock(proc);
3817 freezer_count();
3818
3819 return ret;
3820}
3821
355b0502
GKH
3822static int binder_thread_read(struct binder_proc *proc,
3823 struct binder_thread *thread,
da49889d
AH
3824 binder_uintptr_t binder_buffer, size_t size,
3825 binder_size_t *consumed, int non_block)
355b0502 3826{
da49889d 3827 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3828 void __user *ptr = buffer + *consumed;
3829 void __user *end = buffer + size;
3830
3831 int ret = 0;
3832 int wait_for_proc_work;
3833
3834 if (*consumed == 0) {
3835 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3836 return -EFAULT;
3837 ptr += sizeof(uint32_t);
3838 }
3839
3840retry:
0b89d69a 3841 binder_inner_proc_lock(proc);
1b77e9dc 3842 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
0b89d69a 3843 binder_inner_proc_unlock(proc);
355b0502 3844
355b0502 3845 thread->looper |= BINDER_LOOPER_STATE_WAITING;
975a1ac9 3846
975a1ac9
AH
3847 trace_binder_wait_for_work(wait_for_proc_work,
3848 !!thread->transaction_stack,
72196393 3849 !binder_worklist_empty(proc, &thread->todo));
355b0502
GKH
3850 if (wait_for_proc_work) {
3851 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3852 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 3853 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
3854 proc->pid, thread->pid, thread->looper);
3855 wait_event_interruptible(binder_user_error_wait,
3856 binder_stop_on_user_error < 2);
3857 }
3858 binder_set_nice(proc->default_priority);
1b77e9dc
MC
3859 }
3860
3861 if (non_block) {
3862 if (!binder_has_work(thread, wait_for_proc_work))
3863 ret = -EAGAIN;
355b0502 3864 } else {
1b77e9dc 3865 ret = binder_wait_for_work(thread, wait_for_proc_work);
355b0502 3866 }
975a1ac9 3867
355b0502
GKH
3868 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3869
3870 if (ret)
3871 return ret;
3872
3873 while (1) {
3874 uint32_t cmd;
3875 struct binder_transaction_data tr;
72196393
TK
3876 struct binder_work *w = NULL;
3877 struct list_head *list = NULL;
355b0502 3878 struct binder_transaction *t = NULL;
7a4408c6 3879 struct binder_thread *t_from;
355b0502 3880
ed29721e 3881 binder_inner_proc_lock(proc);
72196393
TK
3882 if (!binder_worklist_empty_ilocked(&thread->todo))
3883 list = &thread->todo;
3884 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3885 wait_for_proc_work)
3886 list = &proc->todo;
3887 else {
3888 binder_inner_proc_unlock(proc);
3889
395262a9 3890 /* no data added */
08dabcee 3891 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
3892 goto retry;
3893 break;
3894 }
3895
ed29721e
TK
3896 if (end - ptr < sizeof(tr) + 4) {
3897 binder_inner_proc_unlock(proc);
355b0502 3898 break;
ed29721e 3899 }
72196393 3900 w = binder_dequeue_work_head_ilocked(list);
148ade2c
MC
3901 if (binder_worklist_empty_ilocked(&thread->todo))
3902 thread->process_todo = false;
355b0502
GKH
3903
3904 switch (w->type) {
3905 case BINDER_WORK_TRANSACTION: {
ed29721e 3906 binder_inner_proc_unlock(proc);
355b0502
GKH
3907 t = container_of(w, struct binder_transaction, work);
3908 } break;
26549d17
TK
3909 case BINDER_WORK_RETURN_ERROR: {
3910 struct binder_error *e = container_of(
3911 w, struct binder_error, work);
3912
3913 WARN_ON(e->cmd == BR_OK);
ed29721e 3914 binder_inner_proc_unlock(proc);
26549d17
TK
3915 if (put_user(e->cmd, (uint32_t __user *)ptr))
3916 return -EFAULT;
3917 e->cmd = BR_OK;
3918 ptr += sizeof(uint32_t);
3919
4f9adc8f 3920 binder_stat_br(proc, thread, e->cmd);
26549d17 3921 } break;
355b0502 3922 case BINDER_WORK_TRANSACTION_COMPLETE: {
ed29721e 3923 binder_inner_proc_unlock(proc);
355b0502
GKH
3924 cmd = BR_TRANSACTION_COMPLETE;
3925 if (put_user(cmd, (uint32_t __user *)ptr))
3926 return -EFAULT;
3927 ptr += sizeof(uint32_t);
3928
3929 binder_stat_br(proc, thread, cmd);
3930 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 3931 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502 3932 proc->pid, thread->pid);
355b0502
GKH
3933 kfree(w);
3934 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3935 } break;
3936 case BINDER_WORK_NODE: {
3937 struct binder_node *node = container_of(w, struct binder_node, work);
26b47d8a
TK
3938 int strong, weak;
3939 binder_uintptr_t node_ptr = node->ptr;
3940 binder_uintptr_t node_cookie = node->cookie;
3941 int node_debug_id = node->debug_id;
3942 int has_weak_ref;
3943 int has_strong_ref;
3944 void __user *orig_ptr = ptr;
3945
3946 BUG_ON(proc != node->proc);
3947 strong = node->internal_strong_refs ||
3948 node->local_strong_refs;
3949 weak = !hlist_empty(&node->refs) ||
adc18842
TK
3950 node->local_weak_refs ||
3951 node->tmp_refs || strong;
26b47d8a
TK
3952 has_strong_ref = node->has_strong_ref;
3953 has_weak_ref = node->has_weak_ref;
3954
3955 if (weak && !has_weak_ref) {
355b0502
GKH
3956 node->has_weak_ref = 1;
3957 node->pending_weak_ref = 1;
3958 node->local_weak_refs++;
26b47d8a
TK
3959 }
3960 if (strong && !has_strong_ref) {
355b0502
GKH
3961 node->has_strong_ref = 1;
3962 node->pending_strong_ref = 1;
3963 node->local_strong_refs++;
26b47d8a
TK
3964 }
3965 if (!strong && has_strong_ref)
355b0502 3966 node->has_strong_ref = 0;
26b47d8a 3967 if (!weak && has_weak_ref)
355b0502 3968 node->has_weak_ref = 0;
26b47d8a
TK
3969 if (!weak && !strong) {
3970 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3971 "%d:%d node %d u%016llx c%016llx deleted\n",
3972 proc->pid, thread->pid,
3973 node_debug_id,
3974 (u64)node_ptr,
3975 (u64)node_cookie);
3976 rb_erase(&node->rb_node, &proc->nodes);
ed29721e 3977 binder_inner_proc_unlock(proc);
673068ee
TK
3978 binder_node_lock(node);
3979 /*
3980 * Acquire the node lock before freeing the
3981 * node to serialize with other threads that
3982 * may have been holding the node lock while
3983 * decrementing this node (avoids race where
3984 * this thread frees while the other thread
3985 * is unlocking the node after the final
3986 * decrement)
3987 */
3988 binder_node_unlock(node);
ed29721e
TK
3989 binder_free_node(node);
3990 } else
3991 binder_inner_proc_unlock(proc);
3992
26b47d8a
TK
3993 if (weak && !has_weak_ref)
3994 ret = binder_put_node_cmd(
3995 proc, thread, &ptr, node_ptr,
3996 node_cookie, node_debug_id,
3997 BR_INCREFS, "BR_INCREFS");
3998 if (!ret && strong && !has_strong_ref)
3999 ret = binder_put_node_cmd(
4000 proc, thread, &ptr, node_ptr,
4001 node_cookie, node_debug_id,
4002 BR_ACQUIRE, "BR_ACQUIRE");
4003 if (!ret && !strong && has_strong_ref)
4004 ret = binder_put_node_cmd(
4005 proc, thread, &ptr, node_ptr,
4006 node_cookie, node_debug_id,
4007 BR_RELEASE, "BR_RELEASE");
4008 if (!ret && !weak && has_weak_ref)
4009 ret = binder_put_node_cmd(
4010 proc, thread, &ptr, node_ptr,
4011 node_cookie, node_debug_id,
4012 BR_DECREFS, "BR_DECREFS");
4013 if (orig_ptr == ptr)
4014 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4015 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4016 proc->pid, thread->pid,
4017 node_debug_id,
4018 (u64)node_ptr,
4019 (u64)node_cookie);
4020 if (ret)
4021 return ret;
355b0502
GKH
4022 } break;
4023 case BINDER_WORK_DEAD_BINDER:
4024 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4025 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4026 struct binder_ref_death *death;
4027 uint32_t cmd;
ab51ec6b 4028 binder_uintptr_t cookie;
355b0502
GKH
4029
4030 death = container_of(w, struct binder_ref_death, work);
4031 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4032 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4033 else
4034 cmd = BR_DEAD_BINDER;
ab51ec6b
MC
4035 cookie = death->cookie;
4036
355b0502 4037 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 4038 "%d:%d %s %016llx\n",
355b0502
GKH
4039 proc->pid, thread->pid,
4040 cmd == BR_DEAD_BINDER ?
4041 "BR_DEAD_BINDER" :
4042 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
ab51ec6b 4043 (u64)cookie);
355b0502 4044 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
ab51ec6b 4045 binder_inner_proc_unlock(proc);
355b0502
GKH
4046 kfree(death);
4047 binder_stats_deleted(BINDER_STAT_DEATH);
ed29721e 4048 } else {
72196393
TK
4049 binder_enqueue_work_ilocked(
4050 w, &proc->delivered_death);
ed29721e
TK
4051 binder_inner_proc_unlock(proc);
4052 }
ab51ec6b
MC
4053 if (put_user(cmd, (uint32_t __user *)ptr))
4054 return -EFAULT;
4055 ptr += sizeof(uint32_t);
4056 if (put_user(cookie,
4057 (binder_uintptr_t __user *)ptr))
4058 return -EFAULT;
4059 ptr += sizeof(binder_uintptr_t);
4060 binder_stat_br(proc, thread, cmd);
355b0502
GKH
4061 if (cmd == BR_DEAD_BINDER)
4062 goto done; /* DEAD_BINDER notifications can cause transactions */
4063 } break;
4064 }
4065
4066 if (!t)
4067 continue;
4068
4069 BUG_ON(t->buffer == NULL);
4070 if (t->buffer->target_node) {
4071 struct binder_node *target_node = t->buffer->target_node;
10f62861 4072
355b0502
GKH
4073 tr.target.ptr = target_node->ptr;
4074 tr.cookie = target_node->cookie;
4075 t->saved_priority = task_nice(current);
4076 if (t->priority < target_node->min_priority &&
4077 !(t->flags & TF_ONE_WAY))
4078 binder_set_nice(t->priority);
4079 else if (!(t->flags & TF_ONE_WAY) ||
4080 t->saved_priority > target_node->min_priority)
4081 binder_set_nice(target_node->min_priority);
4082 cmd = BR_TRANSACTION;
4083 } else {
da49889d
AH
4084 tr.target.ptr = 0;
4085 tr.cookie = 0;
355b0502
GKH
4086 cmd = BR_REPLY;
4087 }
4088 tr.code = t->code;
4089 tr.flags = t->flags;
4a2ebb93 4090 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 4091
7a4408c6
TK
4092 t_from = binder_get_txn_from(t);
4093 if (t_from) {
4094 struct task_struct *sender = t_from->proc->tsk;
10f62861 4095
355b0502 4096 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 4097 task_active_pid_ns(current));
355b0502
GKH
4098 } else {
4099 tr.sender_pid = 0;
4100 }
4101
4102 tr.data_size = t->buffer->data_size;
4103 tr.offsets_size = t->buffer->offsets_size;
19c98724
TK
4104 tr.data.ptr.buffer = (binder_uintptr_t)
4105 ((uintptr_t)t->buffer->data +
4106 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
4107 tr.data.ptr.offsets = tr.data.ptr.buffer +
4108 ALIGN(t->buffer->data_size,
4109 sizeof(void *));
4110
7a4408c6
TK
4111 if (put_user(cmd, (uint32_t __user *)ptr)) {
4112 if (t_from)
4113 binder_thread_dec_tmpref(t_from);
fb2c4452
MC
4114
4115 binder_cleanup_transaction(t, "put_user failed",
4116 BR_FAILED_REPLY);
4117
355b0502 4118 return -EFAULT;
7a4408c6 4119 }
355b0502 4120 ptr += sizeof(uint32_t);
7a4408c6
TK
4121 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4122 if (t_from)
4123 binder_thread_dec_tmpref(t_from);
fb2c4452
MC
4124
4125 binder_cleanup_transaction(t, "copy_to_user failed",
4126 BR_FAILED_REPLY);
4127
355b0502 4128 return -EFAULT;
7a4408c6 4129 }
355b0502
GKH
4130 ptr += sizeof(tr);
4131
975a1ac9 4132 trace_binder_transaction_received(t);
355b0502
GKH
4133 binder_stat_br(proc, thread, cmd);
4134 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 4135 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
4136 proc->pid, thread->pid,
4137 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4138 "BR_REPLY",
7a4408c6
TK
4139 t->debug_id, t_from ? t_from->proc->pid : 0,
4140 t_from ? t_from->pid : 0, cmd,
355b0502 4141 t->buffer->data_size, t->buffer->offsets_size,
da49889d 4142 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502 4143
7a4408c6
TK
4144 if (t_from)
4145 binder_thread_dec_tmpref(t_from);
355b0502
GKH
4146 t->buffer->allow_user_free = 1;
4147 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
0b89d69a 4148 binder_inner_proc_lock(thread->proc);
355b0502
GKH
4149 t->to_parent = thread->transaction_stack;
4150 t->to_thread = thread;
4151 thread->transaction_stack = t;
0b89d69a 4152 binder_inner_proc_unlock(thread->proc);
355b0502 4153 } else {
b6d282ce 4154 binder_free_transaction(t);
355b0502
GKH
4155 }
4156 break;
4157 }
4158
4159done:
4160
4161 *consumed = ptr - buffer;
b3e68612 4162 binder_inner_proc_lock(proc);
1b77e9dc
MC
4163 if (proc->requested_threads == 0 &&
4164 list_empty(&thread->proc->waiting_threads) &&
355b0502
GKH
4165 proc->requested_threads_started < proc->max_threads &&
4166 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4167 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4168 /*spawn a new thread if we leave this out */) {
4169 proc->requested_threads++;
b3e68612 4170 binder_inner_proc_unlock(proc);
355b0502 4171 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 4172 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
4173 proc->pid, thread->pid);
4174 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4175 return -EFAULT;
89334ab4 4176 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
b3e68612
TK
4177 } else
4178 binder_inner_proc_unlock(proc);
355b0502
GKH
4179 return 0;
4180}
4181
72196393
TK
4182static void binder_release_work(struct binder_proc *proc,
4183 struct list_head *list)
355b0502
GKH
4184{
4185 struct binder_work *w;
10f62861 4186
72196393
TK
4187 while (1) {
4188 w = binder_dequeue_work_head(proc, list);
4189 if (!w)
4190 return;
4191
355b0502
GKH
4192 switch (w->type) {
4193 case BINDER_WORK_TRANSACTION: {
4194 struct binder_transaction *t;
4195
4196 t = container_of(w, struct binder_transaction, work);
fb2c4452
MC
4197
4198 binder_cleanup_transaction(t, "process died.",
4199 BR_DEAD_REPLY);
355b0502 4200 } break;
26549d17
TK
4201 case BINDER_WORK_RETURN_ERROR: {
4202 struct binder_error *e = container_of(
4203 w, struct binder_error, work);
4204
4205 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4206 "undelivered TRANSACTION_ERROR: %u\n",
4207 e->cmd);
4208 } break;
355b0502 4209 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 4210 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 4211 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
4212 kfree(w);
4213 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4214 } break;
675d66b0
AH
4215 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4216 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4217 struct binder_ref_death *death;
4218
4219 death = container_of(w, struct binder_ref_death, work);
4220 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
4221 "undelivered death notification, %016llx\n",
4222 (u64)death->cookie);
675d66b0
AH
4223 kfree(death);
4224 binder_stats_deleted(BINDER_STAT_DEATH);
4225 } break;
355b0502 4226 default:
56b468fc 4227 pr_err("unexpected work type, %d, not freed\n",
675d66b0 4228 w->type);
355b0502
GKH
4229 break;
4230 }
4231 }
4232
4233}
4234
7bd7b0e6
TK
4235static struct binder_thread *binder_get_thread_ilocked(
4236 struct binder_proc *proc, struct binder_thread *new_thread)
355b0502
GKH
4237{
4238 struct binder_thread *thread = NULL;
4239 struct rb_node *parent = NULL;
4240 struct rb_node **p = &proc->threads.rb_node;
4241
4242 while (*p) {
4243 parent = *p;
4244 thread = rb_entry(parent, struct binder_thread, rb_node);
4245
4246 if (current->pid < thread->pid)
4247 p = &(*p)->rb_left;
4248 else if (current->pid > thread->pid)
4249 p = &(*p)->rb_right;
4250 else
7bd7b0e6 4251 return thread;
355b0502 4252 }
7bd7b0e6
TK
4253 if (!new_thread)
4254 return NULL;
4255 thread = new_thread;
4256 binder_stats_created(BINDER_STAT_THREAD);
4257 thread->proc = proc;
4258 thread->pid = current->pid;
4259 atomic_set(&thread->tmp_ref, 0);
4260 init_waitqueue_head(&thread->wait);
4261 INIT_LIST_HEAD(&thread->todo);
4262 rb_link_node(&thread->rb_node, parent, p);
4263 rb_insert_color(&thread->rb_node, &proc->threads);
4264 thread->looper_need_return = true;
4265 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4266 thread->return_error.cmd = BR_OK;
4267 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4268 thread->reply_error.cmd = BR_OK;
1b77e9dc 4269 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
7bd7b0e6
TK
4270 return thread;
4271}
4272
4273static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4274{
4275 struct binder_thread *thread;
4276 struct binder_thread *new_thread;
4277
4278 binder_inner_proc_lock(proc);
4279 thread = binder_get_thread_ilocked(proc, NULL);
4280 binder_inner_proc_unlock(proc);
4281 if (!thread) {
4282 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4283 if (new_thread == NULL)
355b0502 4284 return NULL;
7bd7b0e6
TK
4285 binder_inner_proc_lock(proc);
4286 thread = binder_get_thread_ilocked(proc, new_thread);
4287 binder_inner_proc_unlock(proc);
4288 if (thread != new_thread)
4289 kfree(new_thread);
355b0502
GKH
4290 }
4291 return thread;
4292}
4293
7a4408c6
TK
4294static void binder_free_proc(struct binder_proc *proc)
4295{
4296 BUG_ON(!list_empty(&proc->todo));
4297 BUG_ON(!list_empty(&proc->delivered_death));
4298 binder_alloc_deferred_release(&proc->alloc);
4299 put_task_struct(proc->tsk);
4300 binder_stats_deleted(BINDER_STAT_PROC);
4301 kfree(proc);
4302}
4303
4304static void binder_free_thread(struct binder_thread *thread)
4305{
4306 BUG_ON(!list_empty(&thread->todo));
4307 binder_stats_deleted(BINDER_STAT_THREAD);
4308 binder_proc_dec_tmpref(thread->proc);
4309 kfree(thread);
4310}
4311
4312static int binder_thread_release(struct binder_proc *proc,
4313 struct binder_thread *thread)
355b0502
GKH
4314{
4315 struct binder_transaction *t;
4316 struct binder_transaction *send_reply = NULL;
4317 int active_transactions = 0;
7a4408c6 4318 struct binder_transaction *last_t = NULL;
355b0502 4319
7bd7b0e6 4320 binder_inner_proc_lock(thread->proc);
7a4408c6
TK
4321 /*
4322 * take a ref on the proc so it survives
4323 * after we remove this thread from proc->threads.
4324 * The corresponding dec is when we actually
4325 * free the thread in binder_free_thread()
4326 */
4327 proc->tmp_ref++;
4328 /*
4329 * take a ref on this thread to ensure it
4330 * survives while we are releasing it
4331 */
4332 atomic_inc(&thread->tmp_ref);
355b0502
GKH
4333 rb_erase(&thread->rb_node, &proc->threads);
4334 t = thread->transaction_stack;
7a4408c6
TK
4335 if (t) {
4336 spin_lock(&t->lock);
4337 if (t->to_thread == thread)
4338 send_reply = t;
4339 }
4340 thread->is_dead = true;
4341
355b0502 4342 while (t) {
7a4408c6 4343 last_t = t;
355b0502
GKH
4344 active_transactions++;
4345 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
4346 "release %d:%d transaction %d %s, still active\n",
4347 proc->pid, thread->pid,
355b0502
GKH
4348 t->debug_id,
4349 (t->to_thread == thread) ? "in" : "out");
4350
4351 if (t->to_thread == thread) {
4352 t->to_proc = NULL;
4353 t->to_thread = NULL;
4354 if (t->buffer) {
4355 t->buffer->transaction = NULL;
4356 t->buffer = NULL;
4357 }
4358 t = t->to_parent;
4359 } else if (t->from == thread) {
4360 t->from = NULL;
4361 t = t->from_parent;
4362 } else
4363 BUG();
7a4408c6
TK
4364 spin_unlock(&last_t->lock);
4365 if (t)
4366 spin_lock(&t->lock);
355b0502 4367 }
7bd7b0e6 4368 binder_inner_proc_unlock(thread->proc);
7a4408c6 4369
355b0502
GKH
4370 if (send_reply)
4371 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
72196393 4372 binder_release_work(proc, &thread->todo);
7a4408c6 4373 binder_thread_dec_tmpref(thread);
355b0502
GKH
4374 return active_transactions;
4375}
4376
4377static unsigned int binder_poll(struct file *filp,
4378 struct poll_table_struct *wait)
4379{
4380 struct binder_proc *proc = filp->private_data;
4381 struct binder_thread *thread = NULL;
1b77e9dc 4382 bool wait_for_proc_work;
355b0502 4383
355b0502
GKH
4384 thread = binder_get_thread(proc);
4385
0b89d69a 4386 binder_inner_proc_lock(thread->proc);
1b77e9dc
MC
4387 thread->looper |= BINDER_LOOPER_STATE_POLL;
4388 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4389
0b89d69a 4390 binder_inner_proc_unlock(thread->proc);
975a1ac9 4391
1b77e9dc
MC
4392 poll_wait(filp, &thread->wait, wait);
4393
66b83a4c 4394 if (binder_has_work(thread, wait_for_proc_work))
1b77e9dc
MC
4395 return POLLIN;
4396
355b0502
GKH
4397 return 0;
4398}
4399
78260ac6
TR
4400static int binder_ioctl_write_read(struct file *filp,
4401 unsigned int cmd, unsigned long arg,
4402 struct binder_thread *thread)
4403{
4404 int ret = 0;
4405 struct binder_proc *proc = filp->private_data;
4406 unsigned int size = _IOC_SIZE(cmd);
4407 void __user *ubuf = (void __user *)arg;
4408 struct binder_write_read bwr;
4409
4410 if (size != sizeof(struct binder_write_read)) {
4411 ret = -EINVAL;
4412 goto out;
4413 }
4414 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4415 ret = -EFAULT;
4416 goto out;
4417 }
4418 binder_debug(BINDER_DEBUG_READ_WRITE,
4419 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4420 proc->pid, thread->pid,
4421 (u64)bwr.write_size, (u64)bwr.write_buffer,
4422 (u64)bwr.read_size, (u64)bwr.read_buffer);
4423
4424 if (bwr.write_size > 0) {
4425 ret = binder_thread_write(proc, thread,
4426 bwr.write_buffer,
4427 bwr.write_size,
4428 &bwr.write_consumed);
4429 trace_binder_write_done(ret);
4430 if (ret < 0) {
4431 bwr.read_consumed = 0;
4432 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4433 ret = -EFAULT;
4434 goto out;
4435 }
4436 }
4437 if (bwr.read_size > 0) {
4438 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4439 bwr.read_size,
4440 &bwr.read_consumed,
4441 filp->f_flags & O_NONBLOCK);
4442 trace_binder_read_done(ret);
1b77e9dc
MC
4443 binder_inner_proc_lock(proc);
4444 if (!binder_worklist_empty_ilocked(&proc->todo))
408c68b1 4445 binder_wakeup_proc_ilocked(proc);
1b77e9dc 4446 binder_inner_proc_unlock(proc);
78260ac6
TR
4447 if (ret < 0) {
4448 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4449 ret = -EFAULT;
4450 goto out;
4451 }
4452 }
4453 binder_debug(BINDER_DEBUG_READ_WRITE,
4454 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4455 proc->pid, thread->pid,
4456 (u64)bwr.write_consumed, (u64)bwr.write_size,
4457 (u64)bwr.read_consumed, (u64)bwr.read_size);
4458 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4459 ret = -EFAULT;
4460 goto out;
4461 }
4462out:
4463 return ret;
4464}
4465
4466static int binder_ioctl_set_ctx_mgr(struct file *filp)
4467{
4468 int ret = 0;
4469 struct binder_proc *proc = filp->private_data;
342e5c90 4470 struct binder_context *context = proc->context;
c44b1231 4471 struct binder_node *new_node;
78260ac6
TR
4472 kuid_t curr_euid = current_euid();
4473
c44b1231 4474 mutex_lock(&context->context_mgr_node_lock);
342e5c90 4475 if (context->binder_context_mgr_node) {
78260ac6
TR
4476 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4477 ret = -EBUSY;
4478 goto out;
4479 }
79af7307
SS
4480 ret = security_binder_set_context_mgr(proc->tsk);
4481 if (ret < 0)
4482 goto out;
342e5c90
MC
4483 if (uid_valid(context->binder_context_mgr_uid)) {
4484 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
4485 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4486 from_kuid(&init_user_ns, curr_euid),
4487 from_kuid(&init_user_ns,
342e5c90 4488 context->binder_context_mgr_uid));
78260ac6
TR
4489 ret = -EPERM;
4490 goto out;
4491 }
4492 } else {
342e5c90 4493 context->binder_context_mgr_uid = curr_euid;
78260ac6 4494 }
673068ee 4495 new_node = binder_new_node(proc, NULL);
c44b1231 4496 if (!new_node) {
78260ac6
TR
4497 ret = -ENOMEM;
4498 goto out;
4499 }
673068ee 4500 binder_node_lock(new_node);
c44b1231
TK
4501 new_node->local_weak_refs++;
4502 new_node->local_strong_refs++;
4503 new_node->has_strong_ref = 1;
4504 new_node->has_weak_ref = 1;
4505 context->binder_context_mgr_node = new_node;
673068ee 4506 binder_node_unlock(new_node);
adc18842 4507 binder_put_node(new_node);
78260ac6 4508out:
c44b1231 4509 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
4510 return ret;
4511}
4512
abcc6153
CC
4513static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4514 struct binder_node_debug_info *info)
4515{
4516 struct rb_node *n;
4517 binder_uintptr_t ptr = info->ptr;
4518
4519 memset(info, 0, sizeof(*info));
4520
4521 binder_inner_proc_lock(proc);
4522 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4523 struct binder_node *node = rb_entry(n, struct binder_node,
4524 rb_node);
4525 if (node->ptr > ptr) {
4526 info->ptr = node->ptr;
4527 info->cookie = node->cookie;
4528 info->has_strong_ref = node->has_strong_ref;
4529 info->has_weak_ref = node->has_weak_ref;
4530 break;
4531 }
4532 }
4533 binder_inner_proc_unlock(proc);
4534
4535 return 0;
4536}
4537
355b0502
GKH
4538static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4539{
4540 int ret;
4541 struct binder_proc *proc = filp->private_data;
4542 struct binder_thread *thread;
4543 unsigned int size = _IOC_SIZE(cmd);
4544 void __user *ubuf = (void __user *)arg;
4545
78260ac6
TR
4546 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4547 proc->pid, current->pid, cmd, arg);*/
355b0502 4548
4175e2b4
SY
4549 binder_selftest_alloc(&proc->alloc);
4550
975a1ac9
AH
4551 trace_binder_ioctl(cmd, arg);
4552
355b0502
GKH
4553 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4554 if (ret)
975a1ac9 4555 goto err_unlocked;
355b0502 4556
355b0502
GKH
4557 thread = binder_get_thread(proc);
4558 if (thread == NULL) {
4559 ret = -ENOMEM;
4560 goto err;
4561 }
4562
4563 switch (cmd) {
78260ac6
TR
4564 case BINDER_WRITE_READ:
4565 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4566 if (ret)
355b0502 4567 goto err;
355b0502 4568 break;
b3e68612
TK
4569 case BINDER_SET_MAX_THREADS: {
4570 int max_threads;
4571
4572 if (copy_from_user(&max_threads, ubuf,
4573 sizeof(max_threads))) {
355b0502
GKH
4574 ret = -EINVAL;
4575 goto err;
4576 }
b3e68612
TK
4577 binder_inner_proc_lock(proc);
4578 proc->max_threads = max_threads;
4579 binder_inner_proc_unlock(proc);
355b0502 4580 break;
b3e68612 4581 }
355b0502 4582 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
4583 ret = binder_ioctl_set_ctx_mgr(filp);
4584 if (ret)
355b0502 4585 goto err;
355b0502
GKH
4586 break;
4587 case BINDER_THREAD_EXIT:
56b468fc 4588 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 4589 proc->pid, thread->pid);
7a4408c6 4590 binder_thread_release(proc, thread);
355b0502
GKH
4591 thread = NULL;
4592 break;
36c89c0a
MM
4593 case BINDER_VERSION: {
4594 struct binder_version __user *ver = ubuf;
4595
355b0502
GKH
4596 if (size != sizeof(struct binder_version)) {
4597 ret = -EINVAL;
4598 goto err;
4599 }
36c89c0a
MM
4600 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4601 &ver->protocol_version)) {
355b0502
GKH
4602 ret = -EINVAL;
4603 goto err;
4604 }
4605 break;
36c89c0a 4606 }
abcc6153
CC
4607 case BINDER_GET_NODE_DEBUG_INFO: {
4608 struct binder_node_debug_info info;
4609
4610 if (copy_from_user(&info, ubuf, sizeof(info))) {
4611 ret = -EFAULT;
4612 goto err;
4613 }
4614
4615 ret = binder_ioctl_get_node_debug_info(proc, &info);
4616 if (ret < 0)
4617 goto err;
4618
4619 if (copy_to_user(ubuf, &info, sizeof(info))) {
4620 ret = -EFAULT;
4621 goto err;
4622 }
4623 break;
4624 }
355b0502
GKH
4625 default:
4626 ret = -EINVAL;
4627 goto err;
4628 }
4629 ret = 0;
4630err:
4631 if (thread)
08dabcee 4632 thread->looper_need_return = false;
355b0502
GKH
4633 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4634 if (ret && ret != -ERESTARTSYS)
56b468fc 4635 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
4636err_unlocked:
4637 trace_binder_ioctl_done(ret);
355b0502
GKH
4638 return ret;
4639}
4640
4641static void binder_vma_open(struct vm_area_struct *vma)
4642{
4643 struct binder_proc *proc = vma->vm_private_data;
10f62861 4644
355b0502 4645 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4646 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4647 proc->pid, vma->vm_start, vma->vm_end,
4648 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4649 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
4650}
4651
4652static void binder_vma_close(struct vm_area_struct *vma)
4653{
4654 struct binder_proc *proc = vma->vm_private_data;
10f62861 4655
355b0502 4656 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4657 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4658 proc->pid, vma->vm_start, vma->vm_end,
4659 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4660 (unsigned long)pgprot_val(vma->vm_page_prot));
19c98724 4661 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
4662 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4663}
4664
11bac800 4665static int binder_vm_fault(struct vm_fault *vmf)
ddac7d5f
VM
4666{
4667 return VM_FAULT_SIGBUS;
4668}
4669
7cbea8dc 4670static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
4671 .open = binder_vma_open,
4672 .close = binder_vma_close,
ddac7d5f 4673 .fault = binder_vm_fault,
355b0502
GKH
4674};
4675
19c98724
TK
4676static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4677{
4678 int ret;
4679 struct binder_proc *proc = filp->private_data;
4680 const char *failure_string;
4681
4682 if (proc->tsk != current->group_leader)
4683 return -EINVAL;
4684
4685 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4686 vma->vm_end = vma->vm_start + SZ_4M;
4687
4688 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4689 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4690 __func__, proc->pid, vma->vm_start, vma->vm_end,
4691 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4692 (unsigned long)pgprot_val(vma->vm_page_prot));
4693
4694 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4695 ret = -EPERM;
4696 failure_string = "bad vm_flags";
4697 goto err_bad_arg;
4698 }
4699 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4700 vma->vm_ops = &binder_vm_ops;
4701 vma->vm_private_data = proc;
4702
4703 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4704 if (ret)
4705 return ret;
7f3dc008 4706 mutex_lock(&proc->files_lock);
19c98724 4707 proc->files = get_files_struct(current);
7f3dc008 4708 mutex_unlock(&proc->files_lock);
19c98724
TK
4709 return 0;
4710
355b0502 4711err_bad_arg:
00c41cdd 4712 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
355b0502
GKH
4713 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4714 return ret;
4715}
4716
4717static int binder_open(struct inode *nodp, struct file *filp)
4718{
4719 struct binder_proc *proc;
ac4812c5 4720 struct binder_device *binder_dev;
355b0502 4721
00c41cdd 4722 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
355b0502
GKH
4723 current->group_leader->pid, current->pid);
4724
4725 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4726 if (proc == NULL)
4727 return -ENOMEM;
9630fe88
TK
4728 spin_lock_init(&proc->inner_lock);
4729 spin_lock_init(&proc->outer_lock);
c4ea41ba
TK
4730 get_task_struct(current->group_leader);
4731 proc->tsk = current->group_leader;
7f3dc008 4732 mutex_init(&proc->files_lock);
355b0502 4733 INIT_LIST_HEAD(&proc->todo);
355b0502 4734 proc->default_priority = task_nice(current);
ac4812c5
MC
4735 binder_dev = container_of(filp->private_data, struct binder_device,
4736 miscdev);
4737 proc->context = &binder_dev->context;
19c98724 4738 binder_alloc_init(&proc->alloc);
975a1ac9 4739
355b0502 4740 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
4741 proc->pid = current->group_leader->pid;
4742 INIT_LIST_HEAD(&proc->delivered_death);
1b77e9dc 4743 INIT_LIST_HEAD(&proc->waiting_threads);
355b0502 4744 filp->private_data = proc;
975a1ac9 4745
c44b1231
TK
4746 mutex_lock(&binder_procs_lock);
4747 hlist_add_head(&proc->proc_node, &binder_procs);
4748 mutex_unlock(&binder_procs_lock);
4749
16b66554 4750 if (binder_debugfs_dir_entry_proc) {
355b0502 4751 char strbuf[11];
10f62861 4752
355b0502 4753 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
14db3181
MC
4754 /*
4755 * proc debug entries are shared between contexts, so
4756 * this will fail if the process tries to open the driver
4757 * again with a different context. The priting code will
4758 * anyway print all contexts that a given PID has, so this
4759 * is not a problem.
4760 */
16b66554 4761 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
14db3181
MC
4762 binder_debugfs_dir_entry_proc,
4763 (void *)(unsigned long)proc->pid,
4764 &binder_proc_fops);
355b0502
GKH
4765 }
4766
4767 return 0;
4768}
4769
4770static int binder_flush(struct file *filp, fl_owner_t id)
4771{
4772 struct binder_proc *proc = filp->private_data;
4773
4774 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4775
4776 return 0;
4777}
4778
4779static void binder_deferred_flush(struct binder_proc *proc)
4780{
4781 struct rb_node *n;
4782 int wake_count = 0;
10f62861 4783
7bd7b0e6 4784 binder_inner_proc_lock(proc);
355b0502
GKH
4785 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4786 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 4787
08dabcee 4788 thread->looper_need_return = true;
355b0502
GKH
4789 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4790 wake_up_interruptible(&thread->wait);
4791 wake_count++;
4792 }
4793 }
7bd7b0e6 4794 binder_inner_proc_unlock(proc);
355b0502
GKH
4795
4796 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4797 "binder_flush: %d woke %d threads\n", proc->pid,
4798 wake_count);
4799}
4800
4801static int binder_release(struct inode *nodp, struct file *filp)
4802{
4803 struct binder_proc *proc = filp->private_data;
10f62861 4804
16b66554 4805 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
4806 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4807
4808 return 0;
4809}
4810
008fa749
ME
4811static int binder_node_release(struct binder_node *node, int refs)
4812{
4813 struct binder_ref *ref;
4814 int death = 0;
ed29721e 4815 struct binder_proc *proc = node->proc;
008fa749 4816
72196393 4817 binder_release_work(proc, &node->async_todo);
ed29721e 4818
673068ee 4819 binder_node_lock(node);
ed29721e 4820 binder_inner_proc_lock(proc);
72196393 4821 binder_dequeue_work_ilocked(&node->work);
adc18842
TK
4822 /*
4823 * The caller must have taken a temporary ref on the node,
4824 */
4825 BUG_ON(!node->tmp_refs);
4826 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
ed29721e 4827 binder_inner_proc_unlock(proc);
673068ee 4828 binder_node_unlock(node);
ed29721e 4829 binder_free_node(node);
008fa749
ME
4830
4831 return refs;
4832 }
4833
4834 node->proc = NULL;
4835 node->local_strong_refs = 0;
4836 node->local_weak_refs = 0;
ed29721e 4837 binder_inner_proc_unlock(proc);
c44b1231
TK
4838
4839 spin_lock(&binder_dead_nodes_lock);
008fa749 4840 hlist_add_head(&node->dead_node, &binder_dead_nodes);
c44b1231 4841 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
4842
4843 hlist_for_each_entry(ref, &node->refs, node_entry) {
4844 refs++;
ab51ec6b
MC
4845 /*
4846 * Need the node lock to synchronize
4847 * with new notification requests and the
4848 * inner lock to synchronize with queued
4849 * death notifications.
4850 */
4851 binder_inner_proc_lock(ref->proc);
4852 if (!ref->death) {
4853 binder_inner_proc_unlock(ref->proc);
e194fd8a 4854 continue;
ab51ec6b 4855 }
008fa749
ME
4856
4857 death++;
4858
ab51ec6b
MC
4859 BUG_ON(!list_empty(&ref->death->work.entry));
4860 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4861 binder_enqueue_work_ilocked(&ref->death->work,
4862 &ref->proc->todo);
408c68b1 4863 binder_wakeup_proc_ilocked(ref->proc);
72196393 4864 binder_inner_proc_unlock(ref->proc);
008fa749
ME
4865 }
4866
008fa749
ME
4867 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4868 "node %d now dead, refs %d, death %d\n",
4869 node->debug_id, refs, death);
673068ee 4870 binder_node_unlock(node);
adc18842 4871 binder_put_node(node);
008fa749
ME
4872
4873 return refs;
4874}
4875
355b0502
GKH
4876static void binder_deferred_release(struct binder_proc *proc)
4877{
342e5c90 4878 struct binder_context *context = proc->context;
355b0502 4879 struct rb_node *n;
19c98724 4880 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 4881
355b0502
GKH
4882 BUG_ON(proc->files);
4883
c44b1231 4884 mutex_lock(&binder_procs_lock);
355b0502 4885 hlist_del(&proc->proc_node);
c44b1231 4886 mutex_unlock(&binder_procs_lock);
53413e7d 4887
c44b1231 4888 mutex_lock(&context->context_mgr_node_lock);
342e5c90
MC
4889 if (context->binder_context_mgr_node &&
4890 context->binder_context_mgr_node->proc == proc) {
355b0502 4891 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
4892 "%s: %d context_mgr_node gone\n",
4893 __func__, proc->pid);
342e5c90 4894 context->binder_context_mgr_node = NULL;
355b0502 4895 }
c44b1231 4896 mutex_unlock(&context->context_mgr_node_lock);
7bd7b0e6 4897 binder_inner_proc_lock(proc);
7a4408c6
TK
4898 /*
4899 * Make sure proc stays alive after we
4900 * remove all the threads
4901 */
4902 proc->tmp_ref++;
355b0502 4903
7a4408c6 4904 proc->is_dead = true;
355b0502
GKH
4905 threads = 0;
4906 active_transactions = 0;
4907 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
4908 struct binder_thread *thread;
4909
4910 thread = rb_entry(n, struct binder_thread, rb_node);
7bd7b0e6 4911 binder_inner_proc_unlock(proc);
355b0502 4912 threads++;
7a4408c6 4913 active_transactions += binder_thread_release(proc, thread);
7bd7b0e6 4914 binder_inner_proc_lock(proc);
355b0502 4915 }
53413e7d 4916
355b0502
GKH
4917 nodes = 0;
4918 incoming_refs = 0;
4919 while ((n = rb_first(&proc->nodes))) {
53413e7d 4920 struct binder_node *node;
355b0502 4921
53413e7d 4922 node = rb_entry(n, struct binder_node, rb_node);
355b0502 4923 nodes++;
adc18842
TK
4924 /*
4925 * take a temporary ref on the node before
4926 * calling binder_node_release() which will either
4927 * kfree() the node or call binder_put_node()
4928 */
da0fa9e4 4929 binder_inc_node_tmpref_ilocked(node);
355b0502 4930 rb_erase(&node->rb_node, &proc->nodes);
da0fa9e4 4931 binder_inner_proc_unlock(proc);
008fa749 4932 incoming_refs = binder_node_release(node, incoming_refs);
da0fa9e4 4933 binder_inner_proc_lock(proc);
355b0502 4934 }
da0fa9e4 4935 binder_inner_proc_unlock(proc);
53413e7d 4936
355b0502 4937 outgoing_refs = 0;
2c1838dc 4938 binder_proc_lock(proc);
355b0502 4939 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
4940 struct binder_ref *ref;
4941
4942 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 4943 outgoing_refs++;
2c1838dc
TK
4944 binder_cleanup_ref_olocked(ref);
4945 binder_proc_unlock(proc);
372e3147 4946 binder_free_ref(ref);
2c1838dc 4947 binder_proc_lock(proc);
355b0502 4948 }
2c1838dc 4949 binder_proc_unlock(proc);
53413e7d 4950
72196393
TK
4951 binder_release_work(proc, &proc->todo);
4952 binder_release_work(proc, &proc->delivered_death);
355b0502 4953
355b0502 4954 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
19c98724 4955 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 4956 __func__, proc->pid, threads, nodes, incoming_refs,
19c98724 4957 outgoing_refs, active_transactions);
355b0502 4958
7a4408c6 4959 binder_proc_dec_tmpref(proc);
355b0502
GKH
4960}
4961
4962static void binder_deferred_func(struct work_struct *work)
4963{
4964 struct binder_proc *proc;
4965 struct files_struct *files;
4966
4967 int defer;
10f62861 4968
355b0502 4969 do {
355b0502
GKH
4970 mutex_lock(&binder_deferred_lock);
4971 if (!hlist_empty(&binder_deferred_list)) {
4972 proc = hlist_entry(binder_deferred_list.first,
4973 struct binder_proc, deferred_work_node);
4974 hlist_del_init(&proc->deferred_work_node);
4975 defer = proc->deferred_work;
4976 proc->deferred_work = 0;
4977 } else {
4978 proc = NULL;
4979 defer = 0;
4980 }
4981 mutex_unlock(&binder_deferred_lock);
4982
4983 files = NULL;
4984 if (defer & BINDER_DEFERRED_PUT_FILES) {
7f3dc008 4985 mutex_lock(&proc->files_lock);
355b0502
GKH
4986 files = proc->files;
4987 if (files)
4988 proc->files = NULL;
7f3dc008 4989 mutex_unlock(&proc->files_lock);
355b0502
GKH
4990 }
4991
4992 if (defer & BINDER_DEFERRED_FLUSH)
4993 binder_deferred_flush(proc);
4994
4995 if (defer & BINDER_DEFERRED_RELEASE)
4996 binder_deferred_release(proc); /* frees proc */
4997
355b0502
GKH
4998 if (files)
4999 put_files_struct(files);
5000 } while (proc);
5001}
5002static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5003
5004static void
5005binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5006{
5007 mutex_lock(&binder_deferred_lock);
5008 proc->deferred_work |= defer;
5009 if (hlist_unhashed(&proc->deferred_work_node)) {
5010 hlist_add_head(&proc->deferred_work_node,
5011 &binder_deferred_list);
1beba52d 5012 schedule_work(&binder_deferred_work);
355b0502
GKH
5013 }
5014 mutex_unlock(&binder_deferred_lock);
5015}
5016
5f2f6369
TK
5017static void print_binder_transaction_ilocked(struct seq_file *m,
5018 struct binder_proc *proc,
5019 const char *prefix,
5020 struct binder_transaction *t)
5249f488 5021{
5f2f6369
TK
5022 struct binder_proc *to_proc;
5023 struct binder_buffer *buffer = t->buffer;
5024
7a4408c6 5025 spin_lock(&t->lock);
5f2f6369 5026 to_proc = t->to_proc;
5249f488
AH
5027 seq_printf(m,
5028 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5029 prefix, t->debug_id, t,
5030 t->from ? t->from->proc->pid : 0,
5031 t->from ? t->from->pid : 0,
5f2f6369 5032 to_proc ? to_proc->pid : 0,
5249f488
AH
5033 t->to_thread ? t->to_thread->pid : 0,
5034 t->code, t->flags, t->priority, t->need_reply);
7a4408c6
TK
5035 spin_unlock(&t->lock);
5036
5f2f6369
TK
5037 if (proc != to_proc) {
5038 /*
5039 * Can only safely deref buffer if we are holding the
5040 * correct proc inner lock for this node
5041 */
5042 seq_puts(m, "\n");
5043 return;
5044 }
5045
5046 if (buffer == NULL) {
5249f488
AH
5047 seq_puts(m, " buffer free\n");
5048 return;
355b0502 5049 }
5f2f6369
TK
5050 if (buffer->target_node)
5051 seq_printf(m, " node %d", buffer->target_node->debug_id);
5249f488 5052 seq_printf(m, " size %zd:%zd data %p\n",
5f2f6369
TK
5053 buffer->data_size, buffer->offsets_size,
5054 buffer->data);
355b0502
GKH
5055}
5056
5f2f6369
TK
5057static void print_binder_work_ilocked(struct seq_file *m,
5058 struct binder_proc *proc,
5059 const char *prefix,
5060 const char *transaction_prefix,
5061 struct binder_work *w)
355b0502
GKH
5062{
5063 struct binder_node *node;
5064 struct binder_transaction *t;
5065
5066 switch (w->type) {
5067 case BINDER_WORK_TRANSACTION:
5068 t = container_of(w, struct binder_transaction, work);
5f2f6369
TK
5069 print_binder_transaction_ilocked(
5070 m, proc, transaction_prefix, t);
355b0502 5071 break;
26549d17
TK
5072 case BINDER_WORK_RETURN_ERROR: {
5073 struct binder_error *e = container_of(
5074 w, struct binder_error, work);
5075
5076 seq_printf(m, "%stransaction error: %u\n",
5077 prefix, e->cmd);
5078 } break;
355b0502 5079 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 5080 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
5081 break;
5082 case BINDER_WORK_NODE:
5083 node = container_of(w, struct binder_node, work);
da49889d
AH
5084 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5085 prefix, node->debug_id,
5086 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
5087 break;
5088 case BINDER_WORK_DEAD_BINDER:
5249f488 5089 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
5090 break;
5091 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 5092 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
5093 break;
5094 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 5095 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
5096 break;
5097 default:
5249f488 5098 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
5099 break;
5100 }
355b0502
GKH
5101}
5102
72196393
TK
5103static void print_binder_thread_ilocked(struct seq_file *m,
5104 struct binder_thread *thread,
5105 int print_always)
355b0502
GKH
5106{
5107 struct binder_transaction *t;
5108 struct binder_work *w;
5249f488
AH
5109 size_t start_pos = m->count;
5110 size_t header_pos;
355b0502 5111
7a4408c6 5112 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
08dabcee 5113 thread->pid, thread->looper,
7a4408c6
TK
5114 thread->looper_need_return,
5115 atomic_read(&thread->tmp_ref));
5249f488 5116 header_pos = m->count;
355b0502
GKH
5117 t = thread->transaction_stack;
5118 while (t) {
355b0502 5119 if (t->from == thread) {
5f2f6369
TK
5120 print_binder_transaction_ilocked(m, thread->proc,
5121 " outgoing transaction", t);
355b0502
GKH
5122 t = t->from_parent;
5123 } else if (t->to_thread == thread) {
5f2f6369 5124 print_binder_transaction_ilocked(m, thread->proc,
5249f488 5125 " incoming transaction", t);
355b0502
GKH
5126 t = t->to_parent;
5127 } else {
5f2f6369
TK
5128 print_binder_transaction_ilocked(m, thread->proc,
5129 " bad transaction", t);
355b0502
GKH
5130 t = NULL;
5131 }
5132 }
5133 list_for_each_entry(w, &thread->todo, entry) {
5f2f6369 5134 print_binder_work_ilocked(m, thread->proc, " ",
72196393 5135 " pending transaction", w);
355b0502 5136 }
5249f488
AH
5137 if (!print_always && m->count == header_pos)
5138 m->count = start_pos;
355b0502
GKH
5139}
5140
da0fa9e4
TK
5141static void print_binder_node_nilocked(struct seq_file *m,
5142 struct binder_node *node)
355b0502
GKH
5143{
5144 struct binder_ref *ref;
355b0502
GKH
5145 struct binder_work *w;
5146 int count;
5147
5148 count = 0;
b67bfe0d 5149 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
5150 count++;
5151
adc18842 5152 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
da49889d 5153 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
5154 node->has_strong_ref, node->has_weak_ref,
5155 node->local_strong_refs, node->local_weak_refs,
adc18842 5156 node->internal_strong_refs, count, node->tmp_refs);
355b0502 5157 if (count) {
5249f488 5158 seq_puts(m, " proc");
b67bfe0d 5159 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 5160 seq_printf(m, " %d", ref->proc->pid);
355b0502 5161 }
5249f488 5162 seq_puts(m, "\n");
72196393 5163 if (node->proc) {
72196393 5164 list_for_each_entry(w, &node->async_todo, entry)
5f2f6369 5165 print_binder_work_ilocked(m, node->proc, " ",
72196393 5166 " pending async transaction", w);
72196393 5167 }
355b0502
GKH
5168}
5169
2c1838dc
TK
5170static void print_binder_ref_olocked(struct seq_file *m,
5171 struct binder_ref *ref)
355b0502 5172{
673068ee 5173 binder_node_lock(ref->node);
372e3147
TK
5174 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5175 ref->data.debug_id, ref->data.desc,
5176 ref->node->proc ? "" : "dead ",
5177 ref->node->debug_id, ref->data.strong,
5178 ref->data.weak, ref->death);
673068ee 5179 binder_node_unlock(ref->node);
355b0502
GKH
5180}
5181
5249f488
AH
5182static void print_binder_proc(struct seq_file *m,
5183 struct binder_proc *proc, int print_all)
355b0502
GKH
5184{
5185 struct binder_work *w;
5186 struct rb_node *n;
5249f488
AH
5187 size_t start_pos = m->count;
5188 size_t header_pos;
da0fa9e4 5189 struct binder_node *last_node = NULL;
5249f488
AH
5190
5191 seq_printf(m, "proc %d\n", proc->pid);
14db3181 5192 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
5193 header_pos = m->count;
5194
72196393 5195 binder_inner_proc_lock(proc);
5249f488 5196 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
72196393 5197 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5249f488 5198 rb_node), print_all);
da0fa9e4 5199
5249f488 5200 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
5201 struct binder_node *node = rb_entry(n, struct binder_node,
5202 rb_node);
da0fa9e4
TK
5203 /*
5204 * take a temporary reference on the node so it
5205 * survives and isn't removed from the tree
5206 * while we print it.
5207 */
5208 binder_inc_node_tmpref_ilocked(node);
5209 /* Need to drop inner lock to take node lock */
5210 binder_inner_proc_unlock(proc);
5211 if (last_node)
5212 binder_put_node(last_node);
5213 binder_node_inner_lock(node);
5214 print_binder_node_nilocked(m, node);
5215 binder_node_inner_unlock(node);
5216 last_node = node;
5217 binder_inner_proc_lock(proc);
355b0502 5218 }
da0fa9e4
TK
5219 binder_inner_proc_unlock(proc);
5220 if (last_node)
5221 binder_put_node(last_node);
5222
355b0502 5223 if (print_all) {
2c1838dc 5224 binder_proc_lock(proc);
355b0502 5225 for (n = rb_first(&proc->refs_by_desc);
5249f488 5226 n != NULL;
355b0502 5227 n = rb_next(n))
2c1838dc
TK
5228 print_binder_ref_olocked(m, rb_entry(n,
5229 struct binder_ref,
5230 rb_node_desc));
5231 binder_proc_unlock(proc);
355b0502 5232 }
19c98724 5233 binder_alloc_print_allocated(m, &proc->alloc);
72196393 5234 binder_inner_proc_lock(proc);
5249f488 5235 list_for_each_entry(w, &proc->todo, entry)
5f2f6369
TK
5236 print_binder_work_ilocked(m, proc, " ",
5237 " pending transaction", w);
355b0502 5238 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 5239 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
5240 break;
5241 }
72196393 5242 binder_inner_proc_unlock(proc);
5249f488
AH
5243 if (!print_all && m->count == header_pos)
5244 m->count = start_pos;
355b0502
GKH
5245}
5246
167bccbd 5247static const char * const binder_return_strings[] = {
355b0502
GKH
5248 "BR_ERROR",
5249 "BR_OK",
5250 "BR_TRANSACTION",
5251 "BR_REPLY",
5252 "BR_ACQUIRE_RESULT",
5253 "BR_DEAD_REPLY",
5254 "BR_TRANSACTION_COMPLETE",
5255 "BR_INCREFS",
5256 "BR_ACQUIRE",
5257 "BR_RELEASE",
5258 "BR_DECREFS",
5259 "BR_ATTEMPT_ACQUIRE",
5260 "BR_NOOP",
5261 "BR_SPAWN_LOOPER",
5262 "BR_FINISHED",
5263 "BR_DEAD_BINDER",
5264 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5265 "BR_FAILED_REPLY"
5266};
5267
167bccbd 5268static const char * const binder_command_strings[] = {
355b0502
GKH
5269 "BC_TRANSACTION",
5270 "BC_REPLY",
5271 "BC_ACQUIRE_RESULT",
5272 "BC_FREE_BUFFER",
5273 "BC_INCREFS",
5274 "BC_ACQUIRE",
5275 "BC_RELEASE",
5276 "BC_DECREFS",
5277 "BC_INCREFS_DONE",
5278 "BC_ACQUIRE_DONE",
5279 "BC_ATTEMPT_ACQUIRE",
5280 "BC_REGISTER_LOOPER",
5281 "BC_ENTER_LOOPER",
5282 "BC_EXIT_LOOPER",
5283 "BC_REQUEST_DEATH_NOTIFICATION",
5284 "BC_CLEAR_DEATH_NOTIFICATION",
7980240b
MC
5285 "BC_DEAD_BINDER_DONE",
5286 "BC_TRANSACTION_SG",
5287 "BC_REPLY_SG",
355b0502
GKH
5288};
5289
167bccbd 5290static const char * const binder_objstat_strings[] = {
355b0502
GKH
5291 "proc",
5292 "thread",
5293 "node",
5294 "ref",
5295 "death",
5296 "transaction",
5297 "transaction_complete"
5298};
5299
5249f488
AH
5300static void print_binder_stats(struct seq_file *m, const char *prefix,
5301 struct binder_stats *stats)
355b0502
GKH
5302{
5303 int i;
5304
5305 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 5306 ARRAY_SIZE(binder_command_strings));
355b0502 5307 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
0953c797
BJS
5308 int temp = atomic_read(&stats->bc[i]);
5309
5310 if (temp)
5249f488 5311 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 5312 binder_command_strings[i], temp);
355b0502
GKH
5313 }
5314
5315 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 5316 ARRAY_SIZE(binder_return_strings));
355b0502 5317 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
0953c797
BJS
5318 int temp = atomic_read(&stats->br[i]);
5319
5320 if (temp)
5249f488 5321 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 5322 binder_return_strings[i], temp);
355b0502
GKH
5323 }
5324
5325 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 5326 ARRAY_SIZE(binder_objstat_strings));
355b0502 5327 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 5328 ARRAY_SIZE(stats->obj_deleted));
355b0502 5329 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
0953c797
BJS
5330 int created = atomic_read(&stats->obj_created[i]);
5331 int deleted = atomic_read(&stats->obj_deleted[i]);
5332
5333 if (created || deleted)
5334 seq_printf(m, "%s%s: active %d total %d\n",
5335 prefix,
5249f488 5336 binder_objstat_strings[i],
0953c797
BJS
5337 created - deleted,
5338 created);
355b0502 5339 }
355b0502
GKH
5340}
5341
5249f488
AH
5342static void print_binder_proc_stats(struct seq_file *m,
5343 struct binder_proc *proc)
355b0502
GKH
5344{
5345 struct binder_work *w;
1b77e9dc 5346 struct binder_thread *thread;
355b0502 5347 struct rb_node *n;
1b77e9dc 5348 int count, strong, weak, ready_threads;
7bd7b0e6
TK
5349 size_t free_async_space =
5350 binder_alloc_get_free_async_space(&proc->alloc);
355b0502 5351
5249f488 5352 seq_printf(m, "proc %d\n", proc->pid);
14db3181 5353 seq_printf(m, "context %s\n", proc->context->name);
355b0502 5354 count = 0;
1b77e9dc 5355 ready_threads = 0;
7bd7b0e6 5356 binder_inner_proc_lock(proc);
355b0502
GKH
5357 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5358 count++;
1b77e9dc
MC
5359
5360 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5361 ready_threads++;
5362
5249f488
AH
5363 seq_printf(m, " threads: %d\n", count);
5364 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
5365 " ready threads %d\n"
5366 " free async space %zd\n", proc->requested_threads,
5367 proc->requested_threads_started, proc->max_threads,
1b77e9dc 5368 ready_threads,
7bd7b0e6 5369 free_async_space);
355b0502
GKH
5370 count = 0;
5371 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5372 count++;
da0fa9e4 5373 binder_inner_proc_unlock(proc);
5249f488 5374 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
5375 count = 0;
5376 strong = 0;
5377 weak = 0;
2c1838dc 5378 binder_proc_lock(proc);
355b0502
GKH
5379 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5380 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5381 rb_node_desc);
5382 count++;
372e3147
TK
5383 strong += ref->data.strong;
5384 weak += ref->data.weak;
355b0502 5385 }
2c1838dc 5386 binder_proc_unlock(proc);
5249f488 5387 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 5388
19c98724 5389 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 5390 seq_printf(m, " buffers: %d\n", count);
355b0502 5391
8ef4665a
SY
5392 binder_alloc_print_pages(m, &proc->alloc);
5393
355b0502 5394 count = 0;
72196393 5395 binder_inner_proc_lock(proc);
355b0502 5396 list_for_each_entry(w, &proc->todo, entry) {
72196393 5397 if (w->type == BINDER_WORK_TRANSACTION)
355b0502 5398 count++;
355b0502 5399 }
72196393 5400 binder_inner_proc_unlock(proc);
5249f488 5401 seq_printf(m, " pending transactions: %d\n", count);
355b0502 5402
5249f488 5403 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
5404}
5405
5406
5249f488 5407static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
5408{
5409 struct binder_proc *proc;
355b0502 5410 struct binder_node *node;
673068ee 5411 struct binder_node *last_node = NULL;
355b0502 5412
5249f488 5413 seq_puts(m, "binder state:\n");
355b0502 5414
c44b1231 5415 spin_lock(&binder_dead_nodes_lock);
355b0502 5416 if (!hlist_empty(&binder_dead_nodes))
5249f488 5417 seq_puts(m, "dead nodes:\n");
673068ee
TK
5418 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5419 /*
5420 * take a temporary reference on the node so it
5421 * survives and isn't removed from the list
5422 * while we print it.
5423 */
5424 node->tmp_refs++;
5425 spin_unlock(&binder_dead_nodes_lock);
5426 if (last_node)
5427 binder_put_node(last_node);
5428 binder_node_lock(node);
da0fa9e4 5429 print_binder_node_nilocked(m, node);
673068ee
TK
5430 binder_node_unlock(node);
5431 last_node = node;
5432 spin_lock(&binder_dead_nodes_lock);
5433 }
c44b1231 5434 spin_unlock(&binder_dead_nodes_lock);
673068ee
TK
5435 if (last_node)
5436 binder_put_node(last_node);
355b0502 5437
c44b1231 5438 mutex_lock(&binder_procs_lock);
b67bfe0d 5439 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 5440 print_binder_proc(m, proc, 1);
c44b1231 5441 mutex_unlock(&binder_procs_lock);
a60b890f 5442
5249f488 5443 return 0;
355b0502
GKH
5444}
5445
5249f488 5446static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
5447{
5448 struct binder_proc *proc;
355b0502 5449
5249f488 5450 seq_puts(m, "binder stats:\n");
355b0502 5451
5249f488 5452 print_binder_stats(m, "", &binder_stats);
355b0502 5453
c44b1231 5454 mutex_lock(&binder_procs_lock);
b67bfe0d 5455 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 5456 print_binder_proc_stats(m, proc);
c44b1231 5457 mutex_unlock(&binder_procs_lock);
a60b890f 5458
5249f488 5459 return 0;
355b0502
GKH
5460}
5461
5249f488 5462static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
5463{
5464 struct binder_proc *proc;
355b0502 5465
5249f488 5466 seq_puts(m, "binder transactions:\n");
c44b1231 5467 mutex_lock(&binder_procs_lock);
b67bfe0d 5468 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 5469 print_binder_proc(m, proc, 0);
c44b1231 5470 mutex_unlock(&binder_procs_lock);
a60b890f 5471
5249f488 5472 return 0;
355b0502
GKH
5473}
5474
5249f488 5475static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 5476{
83050a4e 5477 struct binder_proc *itr;
14db3181 5478 int pid = (unsigned long)m->private;
355b0502 5479
c44b1231 5480 mutex_lock(&binder_procs_lock);
83050a4e 5481 hlist_for_each_entry(itr, &binder_procs, proc_node) {
14db3181
MC
5482 if (itr->pid == pid) {
5483 seq_puts(m, "binder proc state:\n");
5484 print_binder_proc(m, itr, 1);
83050a4e
RA
5485 }
5486 }
c44b1231
TK
5487 mutex_unlock(&binder_procs_lock);
5488
5249f488 5489 return 0;
355b0502
GKH
5490}
5491
5249f488 5492static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
5493 struct binder_transaction_log_entry *e)
5494{
d99c7333
TK
5495 int debug_id = READ_ONCE(e->debug_id_done);
5496 /*
5497 * read barrier to guarantee debug_id_done read before
5498 * we print the log values
5499 */
5500 smp_rmb();
5249f488 5501 seq_printf(m,
d99c7333 5502 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
5503 e->debug_id, (e->call_type == 2) ? "reply" :
5504 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
14db3181 5505 e->from_thread, e->to_proc, e->to_thread, e->context_name,
57ada2fb
TK
5506 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5507 e->return_error, e->return_error_param,
5508 e->return_error_line);
d99c7333
TK
5509 /*
5510 * read-barrier to guarantee read of debug_id_done after
5511 * done printing the fields of the entry
5512 */
5513 smp_rmb();
5514 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5515 "\n" : " (incomplete)\n");
355b0502
GKH
5516}
5517
5249f488 5518static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 5519{
5249f488 5520 struct binder_transaction_log *log = m->private;
d99c7333
TK
5521 unsigned int log_cur = atomic_read(&log->cur);
5522 unsigned int count;
5523 unsigned int cur;
355b0502 5524 int i;
355b0502 5525
d99c7333
TK
5526 count = log_cur + 1;
5527 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5528 0 : count % ARRAY_SIZE(log->entry);
5529 if (count > ARRAY_SIZE(log->entry) || log->full)
5530 count = ARRAY_SIZE(log->entry);
5531 for (i = 0; i < count; i++) {
5532 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5533
5534 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 5535 }
5249f488 5536 return 0;
355b0502
GKH
5537}
5538
5539static const struct file_operations binder_fops = {
5540 .owner = THIS_MODULE,
5541 .poll = binder_poll,
5542 .unlocked_ioctl = binder_ioctl,
da49889d 5543 .compat_ioctl = binder_ioctl,
355b0502
GKH
5544 .mmap = binder_mmap,
5545 .open = binder_open,
5546 .flush = binder_flush,
5547 .release = binder_release,
5548};
5549
5249f488
AH
5550BINDER_DEBUG_ENTRY(state);
5551BINDER_DEBUG_ENTRY(stats);
5552BINDER_DEBUG_ENTRY(transactions);
5553BINDER_DEBUG_ENTRY(transaction_log);
5554
ac4812c5
MC
5555static int __init init_binder_device(const char *name)
5556{
5557 int ret;
5558 struct binder_device *binder_device;
5559
5560 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5561 if (!binder_device)
5562 return -ENOMEM;
5563
5564 binder_device->miscdev.fops = &binder_fops;
5565 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5566 binder_device->miscdev.name = name;
5567
5568 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5569 binder_device->context.name = name;
c44b1231 5570 mutex_init(&binder_device->context.context_mgr_node_lock);
ac4812c5
MC
5571
5572 ret = misc_register(&binder_device->miscdev);
5573 if (ret < 0) {
5574 kfree(binder_device);
5575 return ret;
5576 }
5577
5578 hlist_add_head(&binder_device->hlist, &binder_devices);
5579
5580 return ret;
5581}
5582
355b0502
GKH
5583static int __init binder_init(void)
5584{
5585 int ret;
22eb9476 5586 char *device_name, *device_names, *device_tmp;
ac4812c5
MC
5587 struct binder_device *device;
5588 struct hlist_node *tmp;
355b0502 5589
533dfb25
TH
5590 ret = binder_alloc_shrinker_init();
5591 if (ret)
5592 return ret;
f2517eb7 5593
d99c7333
TK
5594 atomic_set(&binder_transaction_log.cur, ~0U);
5595 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5596
16b66554
AH
5597 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5598 if (binder_debugfs_dir_entry_root)
5599 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5600 binder_debugfs_dir_entry_root);
ac4812c5 5601
16b66554
AH
5602 if (binder_debugfs_dir_entry_root) {
5603 debugfs_create_file("state",
5604 S_IRUGO,
5605 binder_debugfs_dir_entry_root,
5606 NULL,
5607 &binder_state_fops);
5608 debugfs_create_file("stats",
5609 S_IRUGO,
5610 binder_debugfs_dir_entry_root,
5611 NULL,
5612 &binder_stats_fops);
5613 debugfs_create_file("transactions",
5614 S_IRUGO,
5615 binder_debugfs_dir_entry_root,
5616 NULL,
5617 &binder_transactions_fops);
5618 debugfs_create_file("transaction_log",
5619 S_IRUGO,
5620 binder_debugfs_dir_entry_root,
5621 &binder_transaction_log,
5622 &binder_transaction_log_fops);
5623 debugfs_create_file("failed_transaction_log",
5624 S_IRUGO,
5625 binder_debugfs_dir_entry_root,
5626 &binder_transaction_log_failed,
5627 &binder_transaction_log_fops);
355b0502 5628 }
ac4812c5
MC
5629
5630 /*
5631 * Copy the module_parameter string, because we don't want to
5632 * tokenize it in-place.
5633 */
5634 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5635 if (!device_names) {
5636 ret = -ENOMEM;
5637 goto err_alloc_device_names_failed;
5638 }
5639 strcpy(device_names, binder_devices_param);
5640
22eb9476
CB
5641 device_tmp = device_names;
5642 while ((device_name = strsep(&device_tmp, ","))) {
ac4812c5
MC
5643 ret = init_binder_device(device_name);
5644 if (ret)
5645 goto err_init_binder_device_failed;
5646 }
5647
5648 return ret;
5649
5650err_init_binder_device_failed:
5651 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5652 misc_deregister(&device->miscdev);
5653 hlist_del(&device->hlist);
5654 kfree(device);
5655 }
22eb9476
CB
5656
5657 kfree(device_names);
5658
ac4812c5
MC
5659err_alloc_device_names_failed:
5660 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5661
355b0502
GKH
5662 return ret;
5663}
5664
5665device_initcall(binder_init);
5666
975a1ac9
AH
5667#define CREATE_TRACE_POINTS
5668#include "binder_trace.h"
5669
355b0502 5670MODULE_LICENSE("GPL v2");