]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/android/binder.c
binder: use inner lock to sync work dq and node counts
[mirror_ubuntu-bionic-kernel.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
9630fe88
TK
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
33 * proc->delivered_death and node->async_todo).
34 * binder_inner_proc_lock() and binder_inner_proc_unlock()
35 * are used to acq/rel
36 *
37 * Any lock under procA must never be nested under any lock at the same
38 * level or below on procB.
39 *
40 * Functions that require a lock held on entry indicate which lock
41 * in the suffix of the function name:
42 *
43 * foo_olocked() : requires node->outer_lock
44 * foo_nlocked() : requires node->lock
45 * foo_ilocked() : requires proc->inner_lock
46 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
47 * foo_nilocked(): requires node->lock and proc->inner_lock
48 * ...
49 */
50
56b468fc
AS
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
355b0502
GKH
53#include <asm/cacheflush.h>
54#include <linux/fdtable.h>
55#include <linux/file.h>
e2610b26 56#include <linux/freezer.h>
355b0502
GKH
57#include <linux/fs.h>
58#include <linux/list.h>
59#include <linux/miscdevice.h>
355b0502
GKH
60#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/nsproxy.h>
63#include <linux/poll.h>
16b66554 64#include <linux/debugfs.h>
355b0502 65#include <linux/rbtree.h>
3f07c014 66#include <linux/sched/signal.h>
6e84f315 67#include <linux/sched/mm.h>
5249f488 68#include <linux/seq_file.h>
355b0502 69#include <linux/uaccess.h>
17cf22c3 70#include <linux/pid_namespace.h>
79af7307 71#include <linux/security.h>
9630fe88 72#include <linux/spinlock.h>
355b0502 73
9246a4a9
GKH
74#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
0c972a05 79#include "binder_alloc.h"
975a1ac9 80#include "binder_trace.h"
355b0502 81
975a1ac9 82static DEFINE_MUTEX(binder_main_lock);
c44b1231
TK
83
84static HLIST_HEAD(binder_deferred_list);
355b0502
GKH
85static DEFINE_MUTEX(binder_deferred_lock);
86
ac4812c5 87static HLIST_HEAD(binder_devices);
355b0502 88static HLIST_HEAD(binder_procs);
c44b1231
TK
89static DEFINE_MUTEX(binder_procs_lock);
90
355b0502 91static HLIST_HEAD(binder_dead_nodes);
c44b1231 92static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 93
16b66554
AH
94static struct dentry *binder_debugfs_dir_entry_root;
95static struct dentry *binder_debugfs_dir_entry_proc;
656a800a 96static atomic_t binder_last_id;
355b0502 97
5249f488
AH
98#define BINDER_DEBUG_ENTRY(name) \
99static int binder_##name##_open(struct inode *inode, struct file *file) \
100{ \
16b66554 101 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
102} \
103\
104static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110}
111
112static int binder_proc_show(struct seq_file *m, void *unused);
113BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
114
115/* This is only defined in include/asm-arm/sizes.h */
116#ifndef SZ_1K
117#define SZ_1K 0x400
118#endif
119
120#ifndef SZ_4M
121#define SZ_4M 0x400000
122#endif
123
124#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
19c98724 142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
9630fe88 143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
355b0502
GKH
144};
145static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
ac4812c5
MC
149static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150module_param_named(devices, binder_devices_param, charp, 0444);
151
355b0502
GKH
152static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153static int binder_stop_on_user_error;
154
155static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157{
158 int ret;
10f62861 159
355b0502
GKH
160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164}
165module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168#define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
258767fe 171 pr_info(x); \
355b0502
GKH
172 } while (0)
173
174#define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 177 pr_info(x); \
355b0502
GKH
178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
feba3900
MC
182#define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
7980240b
MC
187#define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
def95c73
MC
190#define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
355b0502
GKH
193enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202};
203
204struct binder_stats {
0953c797
BJS
205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
209};
210
211static struct binder_stats binder_stats;
212
213static inline void binder_stats_deleted(enum binder_stat_types type)
214{
0953c797 215 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
216}
217
218static inline void binder_stats_created(enum binder_stat_types type)
219{
0953c797 220 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
221}
222
223struct binder_transaction_log_entry {
224 int debug_id;
d99c7333 225 int debug_id_done;
355b0502
GKH
226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
57ada2fb
TK
235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
14db3181 238 const char *context_name;
355b0502
GKH
239};
240struct binder_transaction_log {
d99c7333
TK
241 atomic_t cur;
242 bool full;
355b0502
GKH
243 struct binder_transaction_log_entry entry[32];
244};
245static struct binder_transaction_log binder_transaction_log;
246static struct binder_transaction_log binder_transaction_log_failed;
247
248static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250{
251 struct binder_transaction_log_entry *e;
d99c7333 252 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 253
d99c7333 254 if (cur >= ARRAY_SIZE(log->entry))
355b0502 255 log->full = 1;
d99c7333
TK
256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
355b0502
GKH
265 return e;
266}
267
342e5c90
MC
268struct binder_context {
269 struct binder_node *binder_context_mgr_node;
c44b1231
TK
270 struct mutex context_mgr_node_lock;
271
342e5c90 272 kuid_t binder_context_mgr_uid;
14db3181 273 const char *name;
342e5c90
MC
274};
275
ac4812c5
MC
276struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
342e5c90
MC
280};
281
355b0502
GKH
282struct binder_work {
283 struct list_head entry;
284 enum {
285 BINDER_WORK_TRANSACTION = 1,
286 BINDER_WORK_TRANSACTION_COMPLETE,
26549d17 287 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
288 BINDER_WORK_NODE,
289 BINDER_WORK_DEAD_BINDER,
290 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
291 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
292 } type;
293};
294
26549d17
TK
295struct binder_error {
296 struct binder_work work;
297 uint32_t cmd;
298};
299
9630fe88
TK
300/**
301 * struct binder_node - binder node bookkeeping
302 * @debug_id: unique ID for debugging
303 * (invariant after initialized)
304 * @lock: lock for node fields
305 * @work: worklist element for node work
306 * @rb_node: element for proc->nodes tree
307 * @dead_node: element for binder_dead_nodes list
308 * (protected by binder_dead_nodes_lock)
309 * @proc: binder_proc that owns this node
310 * (invariant after initialized)
311 * @refs: list of references on this node
312 * @internal_strong_refs: used to take strong references when
313 * initiating a transaction
ed29721e
TK
314 * (protected by @proc->inner_lock if @proc
315 * and by @lock)
9630fe88 316 * @local_weak_refs: weak user refs from local process
ed29721e
TK
317 * (protected by @proc->inner_lock if @proc
318 * and by @lock)
9630fe88 319 * @local_strong_refs: strong user refs from local process
ed29721e
TK
320 * (protected by @proc->inner_lock if @proc
321 * and by @lock)
9630fe88 322 * @tmp_refs: temporary kernel refs
ed29721e
TK
323 * (protected by @proc->inner_lock while @proc
324 * is valid, and by binder_dead_nodes_lock
325 * if @proc is NULL. During inc/dec and node release
326 * it is also protected by @lock to provide safety
327 * as the node dies and @proc becomes NULL)
9630fe88
TK
328 * @ptr: userspace pointer for node
329 * (invariant, no lock needed)
330 * @cookie: userspace cookie for node
331 * (invariant, no lock needed)
332 * @has_strong_ref: userspace notified of strong ref
ed29721e
TK
333 * (protected by @proc->inner_lock if @proc
334 * and by @lock)
9630fe88 335 * @pending_strong_ref: userspace has acked notification of strong ref
ed29721e
TK
336 * (protected by @proc->inner_lock if @proc
337 * and by @lock)
9630fe88 338 * @has_weak_ref: userspace notified of weak ref
ed29721e
TK
339 * (protected by @proc->inner_lock if @proc
340 * and by @lock)
9630fe88 341 * @pending_weak_ref: userspace has acked notification of weak ref
ed29721e
TK
342 * (protected by @proc->inner_lock if @proc
343 * and by @lock)
9630fe88
TK
344 * @has_async_transaction: async transaction to node in progress
345 * @accept_fds: file descriptor operations supported for node
346 * (invariant after initialized)
347 * @min_priority: minimum scheduling priority
348 * (invariant after initialized)
349 * @async_todo: list of async work items
350 *
351 * Bookkeeping structure for binder nodes.
352 */
355b0502
GKH
353struct binder_node {
354 int debug_id;
9630fe88 355 spinlock_t lock;
355b0502
GKH
356 struct binder_work work;
357 union {
358 struct rb_node rb_node;
359 struct hlist_node dead_node;
360 };
361 struct binder_proc *proc;
362 struct hlist_head refs;
363 int internal_strong_refs;
364 int local_weak_refs;
365 int local_strong_refs;
adc18842 366 int tmp_refs;
da49889d
AH
367 binder_uintptr_t ptr;
368 binder_uintptr_t cookie;
ed29721e
TK
369 struct {
370 /*
371 * bitfield elements protected by
372 * proc inner_lock
373 */
374 u8 has_strong_ref:1;
375 u8 pending_strong_ref:1;
376 u8 has_weak_ref:1;
377 u8 pending_weak_ref:1;
378 };
379 struct {
380 /*
381 * invariant after initialization
382 */
383 u8 accept_fds:1;
384 u8 min_priority;
385 };
386 bool has_async_transaction;
355b0502
GKH
387 struct list_head async_todo;
388};
389
390struct binder_ref_death {
391 struct binder_work work;
da49889d 392 binder_uintptr_t cookie;
355b0502
GKH
393};
394
372e3147
TK
395/**
396 * struct binder_ref_data - binder_ref counts and id
397 * @debug_id: unique ID for the ref
398 * @desc: unique userspace handle for ref
399 * @strong: strong ref count (debugging only if not locked)
400 * @weak: weak ref count (debugging only if not locked)
401 *
402 * Structure to hold ref count and ref id information. Since
403 * the actual ref can only be accessed with a lock, this structure
404 * is used to return information about the ref to callers of
405 * ref inc/dec functions.
406 */
407struct binder_ref_data {
408 int debug_id;
409 uint32_t desc;
410 int strong;
411 int weak;
412};
413
414/**
415 * struct binder_ref - struct to track references on nodes
416 * @data: binder_ref_data containing id, handle, and current refcounts
417 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
418 * @rb_node_node: node for lookup by @node in proc's rb_tree
419 * @node_entry: list entry for node->refs list in target node
420 * @proc: binder_proc containing ref
421 * @node: binder_node of target node. When cleaning up a
422 * ref for deletion in binder_cleanup_ref, a non-NULL
423 * @node indicates the node must be freed
424 * @death: pointer to death notification (ref_death) if requested
425 *
426 * Structure to track references from procA to target node (on procB). This
427 * structure is unsafe to access without holding @proc->outer_lock.
428 */
355b0502
GKH
429struct binder_ref {
430 /* Lookups needed: */
431 /* node + proc => ref (transaction) */
432 /* desc + proc => ref (transaction, inc/dec ref) */
433 /* node => refs + procs (proc exit) */
372e3147 434 struct binder_ref_data data;
355b0502
GKH
435 struct rb_node rb_node_desc;
436 struct rb_node rb_node_node;
437 struct hlist_node node_entry;
438 struct binder_proc *proc;
439 struct binder_node *node;
355b0502
GKH
440 struct binder_ref_death *death;
441};
442
355b0502
GKH
443enum binder_deferred_state {
444 BINDER_DEFERRED_PUT_FILES = 0x01,
445 BINDER_DEFERRED_FLUSH = 0x02,
446 BINDER_DEFERRED_RELEASE = 0x04,
447};
448
9630fe88
TK
449/**
450 * struct binder_proc - binder process bookkeeping
451 * @proc_node: element for binder_procs list
452 * @threads: rbtree of binder_threads in this proc
453 * @nodes: rbtree of binder nodes associated with
454 * this proc ordered by node->ptr
455 * @refs_by_desc: rbtree of refs ordered by ref->desc
456 * @refs_by_node: rbtree of refs ordered by ref->node
457 * @pid PID of group_leader of process
458 * (invariant after initialized)
459 * @tsk task_struct for group_leader of process
460 * (invariant after initialized)
461 * @files files_struct for process
462 * (invariant after initialized)
463 * @deferred_work_node: element for binder_deferred_list
464 * (protected by binder_deferred_lock)
465 * @deferred_work: bitmap of deferred work to perform
466 * (protected by binder_deferred_lock)
467 * @is_dead: process is dead and awaiting free
468 * when outstanding transactions are cleaned up
469 * @todo: list of work for this process
470 * @wait: wait queue head to wait for proc work
471 * (invariant after initialized)
472 * @stats: per-process binder statistics
473 * (atomics, no lock needed)
474 * @delivered_death: list of delivered death notification
475 * @max_threads: cap on number of binder threads
476 * @requested_threads: number of binder threads requested but not
477 * yet started. In current implementation, can
478 * only be 0 or 1.
479 * @requested_threads_started: number binder threads started
480 * @ready_threads: number of threads waiting for proc work
481 * @tmp_ref: temporary reference to indicate proc is in use
482 * @default_priority: default scheduler priority
483 * (invariant after initialized)
484 * @debugfs_entry: debugfs node
485 * @alloc: binder allocator bookkeeping
486 * @context: binder_context for this proc
487 * (invariant after initialized)
488 * @inner_lock: can nest under outer_lock and/or node lock
489 * @outer_lock: no nesting under innor or node lock
490 * Lock order: 1) outer, 2) node, 3) inner
491 *
492 * Bookkeeping structure for binder processes
493 */
355b0502
GKH
494struct binder_proc {
495 struct hlist_node proc_node;
496 struct rb_root threads;
497 struct rb_root nodes;
498 struct rb_root refs_by_desc;
499 struct rb_root refs_by_node;
500 int pid;
355b0502
GKH
501 struct task_struct *tsk;
502 struct files_struct *files;
503 struct hlist_node deferred_work_node;
504 int deferred_work;
7a4408c6 505 bool is_dead;
355b0502 506
355b0502
GKH
507 struct list_head todo;
508 wait_queue_head_t wait;
509 struct binder_stats stats;
510 struct list_head delivered_death;
511 int max_threads;
512 int requested_threads;
513 int requested_threads_started;
514 int ready_threads;
7a4408c6 515 int tmp_ref;
355b0502 516 long default_priority;
16b66554 517 struct dentry *debugfs_entry;
fdfb4a99 518 struct binder_alloc alloc;
342e5c90 519 struct binder_context *context;
9630fe88
TK
520 spinlock_t inner_lock;
521 spinlock_t outer_lock;
355b0502
GKH
522};
523
524enum {
525 BINDER_LOOPER_STATE_REGISTERED = 0x01,
526 BINDER_LOOPER_STATE_ENTERED = 0x02,
527 BINDER_LOOPER_STATE_EXITED = 0x04,
528 BINDER_LOOPER_STATE_INVALID = 0x08,
529 BINDER_LOOPER_STATE_WAITING = 0x10,
355b0502
GKH
530};
531
9630fe88
TK
532/**
533 * struct binder_thread - binder thread bookkeeping
534 * @proc: binder process for this thread
535 * (invariant after initialization)
536 * @rb_node: element for proc->threads rbtree
537 * @pid: PID for this thread
538 * (invariant after initialization)
539 * @looper: bitmap of looping state
540 * (only accessed by this thread)
541 * @looper_needs_return: looping thread needs to exit driver
542 * (no lock needed)
543 * @transaction_stack: stack of in-progress transactions for this thread
544 * @todo: list of work to do for this thread
545 * @return_error: transaction errors reported by this thread
546 * (only accessed by this thread)
547 * @reply_error: transaction errors reported by target thread
548 * @wait: wait queue for thread work
549 * @stats: per-thread statistics
550 * (atomics, no lock needed)
551 * @tmp_ref: temporary reference to indicate thread is in use
552 * (atomic since @proc->inner_lock cannot
553 * always be acquired)
554 * @is_dead: thread is dead and awaiting free
555 * when outstanding transactions are cleaned up
556 *
557 * Bookkeeping structure for binder threads.
558 */
355b0502
GKH
559struct binder_thread {
560 struct binder_proc *proc;
561 struct rb_node rb_node;
562 int pid;
08dabcee
TK
563 int looper; /* only modified by this thread */
564 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
565 struct binder_transaction *transaction_stack;
566 struct list_head todo;
26549d17
TK
567 struct binder_error return_error;
568 struct binder_error reply_error;
355b0502
GKH
569 wait_queue_head_t wait;
570 struct binder_stats stats;
7a4408c6
TK
571 atomic_t tmp_ref;
572 bool is_dead;
355b0502
GKH
573};
574
575struct binder_transaction {
576 int debug_id;
577 struct binder_work work;
578 struct binder_thread *from;
579 struct binder_transaction *from_parent;
580 struct binder_proc *to_proc;
581 struct binder_thread *to_thread;
582 struct binder_transaction *to_parent;
583 unsigned need_reply:1;
584 /* unsigned is_dead:1; */ /* not used at the moment */
585
586 struct binder_buffer *buffer;
587 unsigned int code;
588 unsigned int flags;
589 long priority;
590 long saved_priority;
4a2ebb93 591 kuid_t sender_euid;
7a4408c6
TK
592 /**
593 * @lock: protects @from, @to_proc, and @to_thread
594 *
595 * @from, @to_proc, and @to_thread can be set to NULL
596 * during thread teardown
597 */
598 spinlock_t lock;
355b0502
GKH
599};
600
9630fe88
TK
601/**
602 * binder_proc_lock() - Acquire outer lock for given binder_proc
603 * @proc: struct binder_proc to acquire
604 *
605 * Acquires proc->outer_lock. Used to protect binder_ref
606 * structures associated with the given proc.
607 */
608#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
609static void
610_binder_proc_lock(struct binder_proc *proc, int line)
611{
612 binder_debug(BINDER_DEBUG_SPINLOCKS,
613 "%s: line=%d\n", __func__, line);
614 spin_lock(&proc->outer_lock);
615}
616
617/**
618 * binder_proc_unlock() - Release spinlock for given binder_proc
619 * @proc: struct binder_proc to acquire
620 *
621 * Release lock acquired via binder_proc_lock()
622 */
623#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
624static void
625_binder_proc_unlock(struct binder_proc *proc, int line)
626{
627 binder_debug(BINDER_DEBUG_SPINLOCKS,
628 "%s: line=%d\n", __func__, line);
629 spin_unlock(&proc->outer_lock);
630}
631
632/**
633 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
634 * @proc: struct binder_proc to acquire
635 *
636 * Acquires proc->inner_lock. Used to protect todo lists
637 */
638#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
639static void
640_binder_inner_proc_lock(struct binder_proc *proc, int line)
641{
642 binder_debug(BINDER_DEBUG_SPINLOCKS,
643 "%s: line=%d\n", __func__, line);
644 spin_lock(&proc->inner_lock);
645}
646
647/**
648 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
649 * @proc: struct binder_proc to acquire
650 *
651 * Release lock acquired via binder_inner_proc_lock()
652 */
653#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
654static void
655_binder_inner_proc_unlock(struct binder_proc *proc, int line)
656{
657 binder_debug(BINDER_DEBUG_SPINLOCKS,
658 "%s: line=%d\n", __func__, line);
659 spin_unlock(&proc->inner_lock);
660}
661
662/**
663 * binder_node_lock() - Acquire spinlock for given binder_node
664 * @node: struct binder_node to acquire
665 *
666 * Acquires node->lock. Used to protect binder_node fields
667 */
668#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
669static void
670_binder_node_lock(struct binder_node *node, int line)
671{
672 binder_debug(BINDER_DEBUG_SPINLOCKS,
673 "%s: line=%d\n", __func__, line);
674 spin_lock(&node->lock);
675}
676
677/**
678 * binder_node_unlock() - Release spinlock for given binder_proc
679 * @node: struct binder_node to acquire
680 *
681 * Release lock acquired via binder_node_lock()
682 */
683#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
684static void
685_binder_node_unlock(struct binder_node *node, int line)
686{
687 binder_debug(BINDER_DEBUG_SPINLOCKS,
688 "%s: line=%d\n", __func__, line);
689 spin_unlock(&node->lock);
690}
691
355b0502
GKH
692static void
693binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
7a4408c6
TK
694static void binder_free_thread(struct binder_thread *thread);
695static void binder_free_proc(struct binder_proc *proc);
adc18842 696static void binder_inc_node_tmpref(struct binder_node *node);
355b0502 697
efde99cd 698static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502
GKH
699{
700 struct files_struct *files = proc->files;
355b0502
GKH
701 unsigned long rlim_cur;
702 unsigned long irqs;
703
704 if (files == NULL)
705 return -ESRCH;
706
dcfadfa4
AV
707 if (!lock_task_sighand(proc->tsk, &irqs))
708 return -EMFILE;
bf202361 709
dcfadfa4
AV
710 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
711 unlock_task_sighand(proc->tsk, &irqs);
355b0502 712
dcfadfa4 713 return __alloc_fd(files, 0, rlim_cur, flags);
355b0502
GKH
714}
715
716/*
717 * copied from fd_install
718 */
719static void task_fd_install(
720 struct binder_proc *proc, unsigned int fd, struct file *file)
721{
f869e8a7
AV
722 if (proc->files)
723 __fd_install(proc->files, fd, file);
355b0502
GKH
724}
725
726/*
727 * copied from sys_close
728 */
729static long task_close_fd(struct binder_proc *proc, unsigned int fd)
730{
355b0502
GKH
731 int retval;
732
483ce1d4 733 if (proc->files == NULL)
355b0502
GKH
734 return -ESRCH;
735
483ce1d4 736 retval = __close_fd(proc->files, fd);
355b0502
GKH
737 /* can't restart close syscall because file table entry was cleared */
738 if (unlikely(retval == -ERESTARTSYS ||
739 retval == -ERESTARTNOINTR ||
740 retval == -ERESTARTNOHAND ||
741 retval == -ERESTART_RESTARTBLOCK))
742 retval = -EINTR;
743
744 return retval;
355b0502
GKH
745}
746
975a1ac9
AH
747static inline void binder_lock(const char *tag)
748{
749 trace_binder_lock(tag);
750 mutex_lock(&binder_main_lock);
751 trace_binder_locked(tag);
752}
753
754static inline void binder_unlock(const char *tag)
755{
756 trace_binder_unlock(tag);
757 mutex_unlock(&binder_main_lock);
758}
759
355b0502
GKH
760static void binder_set_nice(long nice)
761{
762 long min_nice;
10f62861 763
355b0502
GKH
764 if (can_nice(current, nice)) {
765 set_user_nice(current, nice);
766 return;
767 }
7aa2c016 768 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
355b0502 769 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
770 "%d: nice value %ld not allowed use %ld instead\n",
771 current->pid, nice, min_nice);
355b0502 772 set_user_nice(current, min_nice);
8698a745 773 if (min_nice <= MAX_NICE)
355b0502 774 return;
56b468fc 775 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
776}
777
355b0502 778static struct binder_node *binder_get_node(struct binder_proc *proc,
da49889d 779 binder_uintptr_t ptr)
355b0502
GKH
780{
781 struct rb_node *n = proc->nodes.rb_node;
782 struct binder_node *node;
783
784 while (n) {
785 node = rb_entry(n, struct binder_node, rb_node);
786
787 if (ptr < node->ptr)
788 n = n->rb_left;
789 else if (ptr > node->ptr)
790 n = n->rb_right;
adc18842
TK
791 else {
792 /*
793 * take an implicit weak reference
794 * to ensure node stays alive until
795 * call to binder_put_node()
796 */
797 binder_inc_node_tmpref(node);
355b0502 798 return node;
adc18842 799 }
355b0502
GKH
800 }
801 return NULL;
802}
803
804static struct binder_node *binder_new_node(struct binder_proc *proc,
da49889d
AH
805 binder_uintptr_t ptr,
806 binder_uintptr_t cookie)
355b0502
GKH
807{
808 struct rb_node **p = &proc->nodes.rb_node;
809 struct rb_node *parent = NULL;
810 struct binder_node *node;
811
812 while (*p) {
813 parent = *p;
814 node = rb_entry(parent, struct binder_node, rb_node);
815
816 if (ptr < node->ptr)
817 p = &(*p)->rb_left;
818 else if (ptr > node->ptr)
819 p = &(*p)->rb_right;
820 else
821 return NULL;
822 }
823
824 node = kzalloc(sizeof(*node), GFP_KERNEL);
825 if (node == NULL)
826 return NULL;
827 binder_stats_created(BINDER_STAT_NODE);
adc18842 828 node->tmp_refs++;
355b0502
GKH
829 rb_link_node(&node->rb_node, parent, p);
830 rb_insert_color(&node->rb_node, &proc->nodes);
656a800a 831 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
832 node->proc = proc;
833 node->ptr = ptr;
834 node->cookie = cookie;
835 node->work.type = BINDER_WORK_NODE;
9630fe88 836 spin_lock_init(&node->lock);
355b0502
GKH
837 INIT_LIST_HEAD(&node->work.entry);
838 INIT_LIST_HEAD(&node->async_todo);
839 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 840 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 841 proc->pid, current->pid, node->debug_id,
da49889d 842 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
843 return node;
844}
845
ed29721e 846static void binder_free_node(struct binder_node *node)
355b0502 847{
ed29721e
TK
848 kfree(node);
849 binder_stats_deleted(BINDER_STAT_NODE);
850}
851
852static int binder_inc_node_ilocked(struct binder_node *node, int strong,
853 int internal,
854 struct list_head *target_list)
855{
856 if (node->proc)
857 BUG_ON(!spin_is_locked(&node->proc->inner_lock));
355b0502
GKH
858 if (strong) {
859 if (internal) {
860 if (target_list == NULL &&
861 node->internal_strong_refs == 0 &&
342e5c90
MC
862 !(node->proc &&
863 node == node->proc->context->binder_context_mgr_node &&
864 node->has_strong_ref)) {
56b468fc
AS
865 pr_err("invalid inc strong node for %d\n",
866 node->debug_id);
355b0502
GKH
867 return -EINVAL;
868 }
869 node->internal_strong_refs++;
870 } else
871 node->local_strong_refs++;
872 if (!node->has_strong_ref && target_list) {
873 list_del_init(&node->work.entry);
874 list_add_tail(&node->work.entry, target_list);
875 }
876 } else {
877 if (!internal)
878 node->local_weak_refs++;
879 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
880 if (target_list == NULL) {
56b468fc
AS
881 pr_err("invalid inc weak node for %d\n",
882 node->debug_id);
355b0502
GKH
883 return -EINVAL;
884 }
885 list_add_tail(&node->work.entry, target_list);
886 }
887 }
888 return 0;
889}
890
ed29721e
TK
891static int binder_inc_node(struct binder_node *node, int strong, int internal,
892 struct list_head *target_list)
893{
894 int ret;
895
896 if (node->proc)
897 binder_inner_proc_lock(node->proc);
898 ret = binder_inc_node_ilocked(node, strong, internal, target_list);
899 if (node->proc)
900 binder_inner_proc_unlock(node->proc);
901
902 return ret;
903}
904
905static bool binder_dec_node_ilocked(struct binder_node *node,
906 int strong, int internal)
355b0502 907{
ed29721e
TK
908 struct binder_proc *proc = node->proc;
909
910 if (proc)
911 BUG_ON(!spin_is_locked(&proc->inner_lock));
355b0502
GKH
912 if (strong) {
913 if (internal)
914 node->internal_strong_refs--;
915 else
916 node->local_strong_refs--;
917 if (node->local_strong_refs || node->internal_strong_refs)
ed29721e 918 return false;
355b0502
GKH
919 } else {
920 if (!internal)
921 node->local_weak_refs--;
adc18842
TK
922 if (node->local_weak_refs || node->tmp_refs ||
923 !hlist_empty(&node->refs))
ed29721e 924 return false;
355b0502 925 }
ed29721e
TK
926
927 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
355b0502
GKH
928 if (list_empty(&node->work.entry)) {
929 list_add_tail(&node->work.entry, &node->proc->todo);
930 wake_up_interruptible(&node->proc->wait);
931 }
932 } else {
933 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
adc18842 934 !node->local_weak_refs && !node->tmp_refs) {
355b0502 935 list_del_init(&node->work.entry);
ed29721e 936 if (proc) {
355b0502
GKH
937 rb_erase(&node->rb_node, &node->proc->nodes);
938 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 939 "refless node %d deleted\n",
355b0502
GKH
940 node->debug_id);
941 } else {
c44b1231 942 spin_lock(&binder_dead_nodes_lock);
ed29721e
TK
943 /*
944 * tmp_refs could have changed so
945 * check it again
946 */
947 if (node->tmp_refs) {
948 spin_unlock(&binder_dead_nodes_lock);
949 return false;
950 }
355b0502 951 hlist_del(&node->dead_node);
c44b1231 952 spin_unlock(&binder_dead_nodes_lock);
355b0502 953 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 954 "dead node %d deleted\n",
355b0502
GKH
955 node->debug_id);
956 }
ed29721e 957 return true;
355b0502
GKH
958 }
959 }
ed29721e
TK
960 return false;
961}
355b0502 962
ed29721e
TK
963static void binder_dec_node(struct binder_node *node, int strong, int internal)
964{
965 bool free_node;
966
967 if (node->proc)
968 binder_inner_proc_lock(node->proc);
969 free_node = binder_dec_node_ilocked(node, strong, internal);
970 if (node->proc)
971 binder_inner_proc_unlock(node->proc);
972
973 if (free_node)
974 binder_free_node(node);
975}
976
977static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
978{
979 /*
980 * No call to binder_inc_node() is needed since we
981 * don't need to inform userspace of any changes to
982 * tmp_refs
983 */
984 node->tmp_refs++;
355b0502
GKH
985}
986
adc18842
TK
987/**
988 * binder_inc_node_tmpref() - take a temporary reference on node
989 * @node: node to reference
990 *
991 * Take reference on node to prevent the node from being freed
ed29721e
TK
992 * while referenced only by a local variable. The inner lock is
993 * needed to serialize with the node work on the queue (which
994 * isn't needed after the node is dead). If the node is dead
995 * (node->proc is NULL), use binder_dead_nodes_lock to protect
996 * node->tmp_refs against dead-node-only cases where the node
997 * lock cannot be acquired (eg traversing the dead node list to
998 * print nodes)
adc18842
TK
999 */
1000static void binder_inc_node_tmpref(struct binder_node *node)
1001{
ed29721e
TK
1002 if (node->proc)
1003 binder_inner_proc_lock(node->proc);
1004 else
1005 spin_lock(&binder_dead_nodes_lock);
1006 binder_inc_node_tmpref_ilocked(node);
1007 if (node->proc)
1008 binder_inner_proc_unlock(node->proc);
1009 else
1010 spin_unlock(&binder_dead_nodes_lock);
adc18842
TK
1011}
1012
1013/**
1014 * binder_dec_node_tmpref() - remove a temporary reference on node
1015 * @node: node to reference
1016 *
1017 * Release temporary reference on node taken via binder_inc_node_tmpref()
1018 */
1019static void binder_dec_node_tmpref(struct binder_node *node)
1020{
ed29721e
TK
1021 bool free_node;
1022
1023 if (node->proc)
1024 binder_inner_proc_lock(node->proc);
1025 else
1026 spin_lock(&binder_dead_nodes_lock);
adc18842
TK
1027 node->tmp_refs--;
1028 BUG_ON(node->tmp_refs < 0);
ed29721e
TK
1029 if (!node->proc)
1030 spin_unlock(&binder_dead_nodes_lock);
adc18842
TK
1031 /*
1032 * Call binder_dec_node() to check if all refcounts are 0
1033 * and cleanup is needed. Calling with strong=0 and internal=1
1034 * causes no actual reference to be released in binder_dec_node().
1035 * If that changes, a change is needed here too.
1036 */
ed29721e
TK
1037 free_node = binder_dec_node_ilocked(node, 0, 1);
1038 if (node->proc)
1039 binder_inner_proc_unlock(node->proc);
1040 if (free_node)
1041 binder_free_node(node);
adc18842
TK
1042}
1043
1044static void binder_put_node(struct binder_node *node)
1045{
1046 binder_dec_node_tmpref(node);
1047}
355b0502
GKH
1048
1049static struct binder_ref *binder_get_ref(struct binder_proc *proc,
0a3ffab9 1050 u32 desc, bool need_strong_ref)
355b0502
GKH
1051{
1052 struct rb_node *n = proc->refs_by_desc.rb_node;
1053 struct binder_ref *ref;
1054
1055 while (n) {
1056 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1057
372e3147 1058 if (desc < ref->data.desc) {
355b0502 1059 n = n->rb_left;
372e3147 1060 } else if (desc > ref->data.desc) {
355b0502 1061 n = n->rb_right;
372e3147 1062 } else if (need_strong_ref && !ref->data.strong) {
0a3ffab9
AH
1063 binder_user_error("tried to use weak ref as strong ref\n");
1064 return NULL;
1065 } else {
355b0502 1066 return ref;
0a3ffab9 1067 }
355b0502
GKH
1068 }
1069 return NULL;
1070}
1071
372e3147
TK
1072/**
1073 * binder_get_ref_for_node() - get the ref associated with given node
1074 * @proc: binder_proc that owns the ref
1075 * @node: binder_node of target
1076 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1077 *
1078 * Look up the ref for the given node and return it if it exists
1079 *
1080 * If it doesn't exist and the caller provides a newly allocated
1081 * ref, initialize the fields of the newly allocated ref and insert
1082 * into the given proc rb_trees and node refs list.
1083 *
1084 * Return: the ref for node. It is possible that another thread
1085 * allocated/initialized the ref first in which case the
1086 * returned ref would be different than the passed-in
1087 * new_ref. new_ref must be kfree'd by the caller in
1088 * this case.
1089 */
355b0502 1090static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
372e3147
TK
1091 struct binder_node *node,
1092 struct binder_ref *new_ref)
355b0502 1093{
372e3147 1094 struct binder_context *context = proc->context;
355b0502
GKH
1095 struct rb_node **p = &proc->refs_by_node.rb_node;
1096 struct rb_node *parent = NULL;
372e3147
TK
1097 struct binder_ref *ref;
1098 struct rb_node *n;
355b0502
GKH
1099
1100 while (*p) {
1101 parent = *p;
1102 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1103
1104 if (node < ref->node)
1105 p = &(*p)->rb_left;
1106 else if (node > ref->node)
1107 p = &(*p)->rb_right;
1108 else
1109 return ref;
1110 }
372e3147 1111 if (!new_ref)
355b0502 1112 return NULL;
372e3147 1113
355b0502 1114 binder_stats_created(BINDER_STAT_REF);
372e3147 1115 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1116 new_ref->proc = proc;
1117 new_ref->node = node;
1118 rb_link_node(&new_ref->rb_node_node, parent, p);
1119 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1120
372e3147 1121 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
1122 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1123 ref = rb_entry(n, struct binder_ref, rb_node_desc);
372e3147 1124 if (ref->data.desc > new_ref->data.desc)
355b0502 1125 break;
372e3147 1126 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
1127 }
1128
1129 p = &proc->refs_by_desc.rb_node;
1130 while (*p) {
1131 parent = *p;
1132 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1133
372e3147 1134 if (new_ref->data.desc < ref->data.desc)
355b0502 1135 p = &(*p)->rb_left;
372e3147 1136 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
1137 p = &(*p)->rb_right;
1138 else
1139 BUG();
1140 }
1141 rb_link_node(&new_ref->rb_node_desc, parent, p);
1142 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
e4cffcf4 1143 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 1144
e4cffcf4
TK
1145 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1146 "%d new ref %d desc %d for node %d\n",
372e3147 1147 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
e4cffcf4 1148 node->debug_id);
355b0502
GKH
1149 return new_ref;
1150}
1151
372e3147 1152static void binder_cleanup_ref(struct binder_ref *ref)
355b0502 1153{
ed29721e
TK
1154 bool delete_node = false;
1155 struct binder_proc *node_proc = ref->node->proc;
1156
355b0502 1157 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1158 "%d delete ref %d desc %d for node %d\n",
372e3147 1159 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 1160 ref->node->debug_id);
355b0502
GKH
1161
1162 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1163 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
372e3147 1164
ed29721e
TK
1165 if (node_proc)
1166 binder_inner_proc_lock(node_proc);
372e3147 1167 if (ref->data.strong)
ed29721e 1168 binder_dec_node_ilocked(ref->node, 1, 1);
372e3147 1169
355b0502 1170 hlist_del(&ref->node_entry);
ed29721e
TK
1171 delete_node = binder_dec_node_ilocked(ref->node, 0, 1);
1172 if (node_proc)
1173 binder_inner_proc_unlock(node_proc);
1174 /*
1175 * Clear ref->node unless we want the caller to free the node
1176 */
1177 if (!delete_node) {
1178 /*
1179 * The caller uses ref->node to determine
1180 * whether the node needs to be freed. Clear
1181 * it since the node is still alive.
1182 */
1183 ref->node = NULL;
1184 }
372e3147 1185
355b0502
GKH
1186 if (ref->death) {
1187 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 1188 "%d delete ref %d desc %d has death notification\n",
372e3147
TK
1189 ref->proc->pid, ref->data.debug_id,
1190 ref->data.desc);
355b0502 1191 list_del(&ref->death->work.entry);
355b0502
GKH
1192 binder_stats_deleted(BINDER_STAT_DEATH);
1193 }
355b0502
GKH
1194 binder_stats_deleted(BINDER_STAT_REF);
1195}
1196
372e3147
TK
1197/**
1198 * binder_inc_ref() - increment the ref for given handle
1199 * @ref: ref to be incremented
1200 * @strong: if true, strong increment, else weak
1201 * @target_list: list to queue node work on
1202 *
1203 * Increment the ref.
1204 *
1205 * Return: 0, if successful, else errno
1206 */
355b0502
GKH
1207static int binder_inc_ref(struct binder_ref *ref, int strong,
1208 struct list_head *target_list)
1209{
1210 int ret;
10f62861 1211
355b0502 1212 if (strong) {
372e3147 1213 if (ref->data.strong == 0) {
355b0502
GKH
1214 ret = binder_inc_node(ref->node, 1, 1, target_list);
1215 if (ret)
1216 return ret;
1217 }
372e3147 1218 ref->data.strong++;
355b0502 1219 } else {
372e3147 1220 if (ref->data.weak == 0) {
355b0502
GKH
1221 ret = binder_inc_node(ref->node, 0, 1, target_list);
1222 if (ret)
1223 return ret;
1224 }
372e3147 1225 ref->data.weak++;
355b0502
GKH
1226 }
1227 return 0;
1228}
1229
372e3147
TK
1230/**
1231 * binder_dec_ref() - dec the ref for given handle
1232 * @ref: ref to be decremented
1233 * @strong: if true, strong decrement, else weak
1234 *
1235 * Decrement the ref.
1236 *
1237 * TODO: kfree is avoided here since an upcoming patch
1238 * will put this under a lock.
1239 *
1240 * Return: true if ref is cleaned up and ready to be freed
1241 */
1242static bool binder_dec_ref(struct binder_ref *ref, int strong)
355b0502
GKH
1243{
1244 if (strong) {
372e3147 1245 if (ref->data.strong == 0) {
56b468fc 1246 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
372e3147
TK
1247 ref->proc->pid, ref->data.debug_id,
1248 ref->data.desc, ref->data.strong,
1249 ref->data.weak);
1250 return false;
355b0502 1251 }
372e3147 1252 ref->data.strong--;
ed29721e
TK
1253 if (ref->data.strong == 0)
1254 binder_dec_node(ref->node, strong, 1);
355b0502 1255 } else {
372e3147 1256 if (ref->data.weak == 0) {
56b468fc 1257 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
372e3147
TK
1258 ref->proc->pid, ref->data.debug_id,
1259 ref->data.desc, ref->data.strong,
1260 ref->data.weak);
1261 return false;
355b0502 1262 }
372e3147 1263 ref->data.weak--;
355b0502 1264 }
372e3147
TK
1265 if (ref->data.strong == 0 && ref->data.weak == 0) {
1266 binder_cleanup_ref(ref);
1267 /*
1268 * TODO: we could kfree(ref) here, but an upcoming
1269 * patch will call this with a lock held, so we
1270 * return an indication that the ref should be
1271 * freed.
1272 */
1273 return true;
1274 }
1275 return false;
1276}
1277
1278/**
1279 * binder_get_node_from_ref() - get the node from the given proc/desc
1280 * @proc: proc containing the ref
1281 * @desc: the handle associated with the ref
1282 * @need_strong_ref: if true, only return node if ref is strong
1283 * @rdata: the id/refcount data for the ref
1284 *
1285 * Given a proc and ref handle, return the associated binder_node
1286 *
1287 * Return: a binder_node or NULL if not found or not strong when strong required
1288 */
1289static struct binder_node *binder_get_node_from_ref(
1290 struct binder_proc *proc,
1291 u32 desc, bool need_strong_ref,
1292 struct binder_ref_data *rdata)
1293{
1294 struct binder_node *node;
1295 struct binder_ref *ref;
1296
1297 ref = binder_get_ref(proc, desc, need_strong_ref);
1298 if (!ref)
1299 goto err_no_ref;
1300 node = ref->node;
adc18842
TK
1301 /*
1302 * Take an implicit reference on the node to ensure
1303 * it stays alive until the call to binder_put_node()
1304 */
1305 binder_inc_node_tmpref(node);
372e3147
TK
1306 if (rdata)
1307 *rdata = ref->data;
1308
1309 return node;
1310
1311err_no_ref:
1312 return NULL;
1313}
1314
1315/**
1316 * binder_free_ref() - free the binder_ref
1317 * @ref: ref to free
1318 *
ed29721e
TK
1319 * Free the binder_ref. Free the binder_node indicated by ref->node
1320 * (if non-NULL) and the binder_ref_death indicated by ref->death.
372e3147
TK
1321 */
1322static void binder_free_ref(struct binder_ref *ref)
1323{
ed29721e
TK
1324 if (ref->node)
1325 binder_free_node(ref->node);
372e3147
TK
1326 kfree(ref->death);
1327 kfree(ref);
1328}
1329
1330/**
1331 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1332 * @proc: proc containing the ref
1333 * @desc: the handle associated with the ref
1334 * @increment: true=inc reference, false=dec reference
1335 * @strong: true=strong reference, false=weak reference
1336 * @rdata: the id/refcount data for the ref
1337 *
1338 * Given a proc and ref handle, increment or decrement the ref
1339 * according to "increment" arg.
1340 *
1341 * Return: 0 if successful, else errno
1342 */
1343static int binder_update_ref_for_handle(struct binder_proc *proc,
1344 uint32_t desc, bool increment, bool strong,
1345 struct binder_ref_data *rdata)
1346{
1347 int ret = 0;
1348 struct binder_ref *ref;
1349 bool delete_ref = false;
1350
1351 ref = binder_get_ref(proc, desc, strong);
1352 if (!ref) {
1353 ret = -EINVAL;
1354 goto err_no_ref;
1355 }
1356 if (increment)
1357 ret = binder_inc_ref(ref, strong, NULL);
1358 else
1359 delete_ref = binder_dec_ref(ref, strong);
1360
1361 if (rdata)
1362 *rdata = ref->data;
1363
1364 if (delete_ref)
1365 binder_free_ref(ref);
1366 return ret;
1367
1368err_no_ref:
1369 return ret;
1370}
1371
1372/**
1373 * binder_dec_ref_for_handle() - dec the ref for given handle
1374 * @proc: proc containing the ref
1375 * @desc: the handle associated with the ref
1376 * @strong: true=strong reference, false=weak reference
1377 * @rdata: the id/refcount data for the ref
1378 *
1379 * Just calls binder_update_ref_for_handle() to decrement the ref.
1380 *
1381 * Return: 0 if successful, else errno
1382 */
1383static int binder_dec_ref_for_handle(struct binder_proc *proc,
1384 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1385{
1386 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1387}
1388
1389
1390/**
1391 * binder_inc_ref_for_node() - increment the ref for given proc/node
1392 * @proc: proc containing the ref
1393 * @node: target node
1394 * @strong: true=strong reference, false=weak reference
1395 * @target_list: worklist to use if node is incremented
1396 * @rdata: the id/refcount data for the ref
1397 *
1398 * Given a proc and node, increment the ref. Create the ref if it
1399 * doesn't already exist
1400 *
1401 * Return: 0 if successful, else errno
1402 */
1403static int binder_inc_ref_for_node(struct binder_proc *proc,
1404 struct binder_node *node,
1405 bool strong,
1406 struct list_head *target_list,
1407 struct binder_ref_data *rdata)
1408{
1409 struct binder_ref *ref;
1410 struct binder_ref *new_ref = NULL;
1411 int ret = 0;
1412
1413 ref = binder_get_ref_for_node(proc, node, NULL);
1414 if (!ref) {
1415 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1416 if (!new_ref)
1417 return -ENOMEM;
1418 ref = binder_get_ref_for_node(proc, node, new_ref);
1419 }
1420 ret = binder_inc_ref(ref, strong, target_list);
1421 *rdata = ref->data;
1422 if (new_ref && ref != new_ref)
1423 /*
1424 * Another thread created the ref first so
1425 * free the one we allocated
1426 */
1427 kfree(new_ref);
1428 return ret;
355b0502
GKH
1429}
1430
1431static void binder_pop_transaction(struct binder_thread *target_thread,
1432 struct binder_transaction *t)
1433{
b6d282ce
TK
1434 BUG_ON(!target_thread);
1435 BUG_ON(target_thread->transaction_stack != t);
1436 BUG_ON(target_thread->transaction_stack->from != target_thread);
1437 target_thread->transaction_stack =
1438 target_thread->transaction_stack->from_parent;
1439 t->from = NULL;
1440}
1441
7a4408c6
TK
1442/**
1443 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1444 * @thread: thread to decrement
1445 *
1446 * A thread needs to be kept alive while being used to create or
1447 * handle a transaction. binder_get_txn_from() is used to safely
1448 * extract t->from from a binder_transaction and keep the thread
1449 * indicated by t->from from being freed. When done with that
1450 * binder_thread, this function is called to decrement the
1451 * tmp_ref and free if appropriate (thread has been released
1452 * and no transaction being processed by the driver)
1453 */
1454static void binder_thread_dec_tmpref(struct binder_thread *thread)
1455{
1456 /*
1457 * atomic is used to protect the counter value while
1458 * it cannot reach zero or thread->is_dead is false
1459 *
1460 * TODO: future patch adds locking to ensure that the
1461 * check of tmp_ref and is_dead is done with a lock held
1462 */
1463 atomic_dec(&thread->tmp_ref);
1464 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1465 binder_free_thread(thread);
1466 return;
1467 }
1468}
1469
1470/**
1471 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1472 * @proc: proc to decrement
1473 *
1474 * A binder_proc needs to be kept alive while being used to create or
1475 * handle a transaction. proc->tmp_ref is incremented when
1476 * creating a new transaction or the binder_proc is currently in-use
1477 * by threads that are being released. When done with the binder_proc,
1478 * this function is called to decrement the counter and free the
1479 * proc if appropriate (proc has been released, all threads have
1480 * been released and not currenly in-use to process a transaction).
1481 */
1482static void binder_proc_dec_tmpref(struct binder_proc *proc)
1483{
1484 proc->tmp_ref--;
1485 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1486 !proc->tmp_ref) {
1487 binder_free_proc(proc);
1488 return;
1489 }
1490}
1491
1492/**
1493 * binder_get_txn_from() - safely extract the "from" thread in transaction
1494 * @t: binder transaction for t->from
1495 *
1496 * Atomically return the "from" thread and increment the tmp_ref
1497 * count for the thread to ensure it stays alive until
1498 * binder_thread_dec_tmpref() is called.
1499 *
1500 * Return: the value of t->from
1501 */
1502static struct binder_thread *binder_get_txn_from(
1503 struct binder_transaction *t)
1504{
1505 struct binder_thread *from;
1506
1507 spin_lock(&t->lock);
1508 from = t->from;
1509 if (from)
1510 atomic_inc(&from->tmp_ref);
1511 spin_unlock(&t->lock);
1512 return from;
1513}
1514
b6d282ce
TK
1515static void binder_free_transaction(struct binder_transaction *t)
1516{
355b0502
GKH
1517 if (t->buffer)
1518 t->buffer->transaction = NULL;
1519 kfree(t);
1520 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1521}
1522
1523static void binder_send_failed_reply(struct binder_transaction *t,
1524 uint32_t error_code)
1525{
1526 struct binder_thread *target_thread;
d4ec15e1 1527 struct binder_transaction *next;
10f62861 1528
355b0502
GKH
1529 BUG_ON(t->flags & TF_ONE_WAY);
1530 while (1) {
7a4408c6 1531 target_thread = binder_get_txn_from(t);
355b0502 1532 if (target_thread) {
26549d17
TK
1533 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1534 "send failed reply for transaction %d to %d:%d\n",
1535 t->debug_id,
1536 target_thread->proc->pid,
1537 target_thread->pid);
1538
1539 binder_pop_transaction(target_thread, t);
1540 if (target_thread->reply_error.cmd == BR_OK) {
1541 target_thread->reply_error.cmd = error_code;
1542 list_add_tail(
1543 &target_thread->reply_error.work.entry,
1544 &target_thread->todo);
355b0502
GKH
1545 wake_up_interruptible(&target_thread->wait);
1546 } else {
26549d17
TK
1547 WARN(1, "Unexpected reply error: %u\n",
1548 target_thread->reply_error.cmd);
355b0502 1549 }
7a4408c6 1550 binder_thread_dec_tmpref(target_thread);
26549d17 1551 binder_free_transaction(t);
355b0502 1552 return;
d4ec15e1
LT
1553 }
1554 next = t->from_parent;
1555
1556 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1557 "send failed reply for transaction %d, target dead\n",
1558 t->debug_id);
1559
b6d282ce 1560 binder_free_transaction(t);
d4ec15e1 1561 if (next == NULL) {
355b0502 1562 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
1563 "reply failed, no target thread at root\n");
1564 return;
355b0502 1565 }
d4ec15e1
LT
1566 t = next;
1567 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1568 "reply failed, no target thread -- retry %d\n",
1569 t->debug_id);
355b0502
GKH
1570 }
1571}
1572
feba3900
MC
1573/**
1574 * binder_validate_object() - checks for a valid metadata object in a buffer.
1575 * @buffer: binder_buffer that we're parsing.
1576 * @offset: offset in the buffer at which to validate an object.
1577 *
1578 * Return: If there's a valid metadata object at @offset in @buffer, the
1579 * size of that object. Otherwise, it returns zero.
1580 */
1581static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1582{
1583 /* Check if we can read a header first */
1584 struct binder_object_header *hdr;
1585 size_t object_size = 0;
1586
1587 if (offset > buffer->data_size - sizeof(*hdr) ||
1588 buffer->data_size < sizeof(*hdr) ||
1589 !IS_ALIGNED(offset, sizeof(u32)))
1590 return 0;
1591
1592 /* Ok, now see if we can read a complete object. */
1593 hdr = (struct binder_object_header *)(buffer->data + offset);
1594 switch (hdr->type) {
1595 case BINDER_TYPE_BINDER:
1596 case BINDER_TYPE_WEAK_BINDER:
1597 case BINDER_TYPE_HANDLE:
1598 case BINDER_TYPE_WEAK_HANDLE:
1599 object_size = sizeof(struct flat_binder_object);
1600 break;
1601 case BINDER_TYPE_FD:
1602 object_size = sizeof(struct binder_fd_object);
1603 break;
7980240b
MC
1604 case BINDER_TYPE_PTR:
1605 object_size = sizeof(struct binder_buffer_object);
1606 break;
def95c73
MC
1607 case BINDER_TYPE_FDA:
1608 object_size = sizeof(struct binder_fd_array_object);
1609 break;
feba3900
MC
1610 default:
1611 return 0;
1612 }
1613 if (offset <= buffer->data_size - object_size &&
1614 buffer->data_size >= object_size)
1615 return object_size;
1616 else
1617 return 0;
1618}
1619
7980240b
MC
1620/**
1621 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1622 * @b: binder_buffer containing the object
1623 * @index: index in offset array at which the binder_buffer_object is
1624 * located
1625 * @start: points to the start of the offset array
1626 * @num_valid: the number of valid offsets in the offset array
1627 *
1628 * Return: If @index is within the valid range of the offset array
1629 * described by @start and @num_valid, and if there's a valid
1630 * binder_buffer_object at the offset found in index @index
1631 * of the offset array, that object is returned. Otherwise,
1632 * %NULL is returned.
1633 * Note that the offset found in index @index itself is not
1634 * verified; this function assumes that @num_valid elements
1635 * from @start were previously verified to have valid offsets.
1636 */
1637static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1638 binder_size_t index,
1639 binder_size_t *start,
1640 binder_size_t num_valid)
1641{
1642 struct binder_buffer_object *buffer_obj;
1643 binder_size_t *offp;
1644
1645 if (index >= num_valid)
1646 return NULL;
1647
1648 offp = start + index;
1649 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1650 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1651 return NULL;
1652
1653 return buffer_obj;
1654}
1655
1656/**
1657 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1658 * @b: transaction buffer
1659 * @objects_start start of objects buffer
1660 * @buffer: binder_buffer_object in which to fix up
1661 * @offset: start offset in @buffer to fix up
1662 * @last_obj: last binder_buffer_object that we fixed up in
1663 * @last_min_offset: minimum fixup offset in @last_obj
1664 *
1665 * Return: %true if a fixup in buffer @buffer at offset @offset is
1666 * allowed.
1667 *
1668 * For safety reasons, we only allow fixups inside a buffer to happen
1669 * at increasing offsets; additionally, we only allow fixup on the last
1670 * buffer object that was verified, or one of its parents.
1671 *
1672 * Example of what is allowed:
1673 *
1674 * A
1675 * B (parent = A, offset = 0)
1676 * C (parent = A, offset = 16)
1677 * D (parent = C, offset = 0)
1678 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1679 *
1680 * Examples of what is not allowed:
1681 *
1682 * Decreasing offsets within the same parent:
1683 * A
1684 * C (parent = A, offset = 16)
1685 * B (parent = A, offset = 0) // decreasing offset within A
1686 *
1687 * Referring to a parent that wasn't the last object or any of its parents:
1688 * A
1689 * B (parent = A, offset = 0)
1690 * C (parent = A, offset = 0)
1691 * C (parent = A, offset = 16)
1692 * D (parent = B, offset = 0) // B is not A or any of A's parents
1693 */
1694static bool binder_validate_fixup(struct binder_buffer *b,
1695 binder_size_t *objects_start,
1696 struct binder_buffer_object *buffer,
1697 binder_size_t fixup_offset,
1698 struct binder_buffer_object *last_obj,
1699 binder_size_t last_min_offset)
1700{
1701 if (!last_obj) {
1702 /* Nothing to fix up in */
1703 return false;
1704 }
1705
1706 while (last_obj != buffer) {
1707 /*
1708 * Safe to retrieve the parent of last_obj, since it
1709 * was already previously verified by the driver.
1710 */
1711 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1712 return false;
1713 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1714 last_obj = (struct binder_buffer_object *)
1715 (b->data + *(objects_start + last_obj->parent));
1716 }
1717 return (fixup_offset >= last_min_offset);
1718}
1719
355b0502
GKH
1720static void binder_transaction_buffer_release(struct binder_proc *proc,
1721 struct binder_buffer *buffer,
da49889d 1722 binder_size_t *failed_at)
355b0502 1723{
7980240b 1724 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
1725 int debug_id = buffer->debug_id;
1726
1727 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 1728 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
1729 proc->pid, buffer->debug_id,
1730 buffer->data_size, buffer->offsets_size, failed_at);
1731
1732 if (buffer->target_node)
1733 binder_dec_node(buffer->target_node, 1, 0);
1734
7980240b
MC
1735 off_start = (binder_size_t *)(buffer->data +
1736 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
1737 if (failed_at)
1738 off_end = failed_at;
1739 else
7980240b
MC
1740 off_end = (void *)off_start + buffer->offsets_size;
1741 for (offp = off_start; offp < off_end; offp++) {
feba3900
MC
1742 struct binder_object_header *hdr;
1743 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 1744
feba3900
MC
1745 if (object_size == 0) {
1746 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 1747 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
1748 continue;
1749 }
feba3900
MC
1750 hdr = (struct binder_object_header *)(buffer->data + *offp);
1751 switch (hdr->type) {
355b0502
GKH
1752 case BINDER_TYPE_BINDER:
1753 case BINDER_TYPE_WEAK_BINDER: {
feba3900
MC
1754 struct flat_binder_object *fp;
1755 struct binder_node *node;
10f62861 1756
feba3900
MC
1757 fp = to_flat_binder_object(hdr);
1758 node = binder_get_node(proc, fp->binder);
355b0502 1759 if (node == NULL) {
da49889d
AH
1760 pr_err("transaction release %d bad node %016llx\n",
1761 debug_id, (u64)fp->binder);
355b0502
GKH
1762 break;
1763 }
1764 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
1765 " node %d u%016llx\n",
1766 node->debug_id, (u64)node->ptr);
feba3900
MC
1767 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1768 0);
adc18842 1769 binder_put_node(node);
355b0502
GKH
1770 } break;
1771 case BINDER_TYPE_HANDLE:
1772 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 1773 struct flat_binder_object *fp;
372e3147
TK
1774 struct binder_ref_data rdata;
1775 int ret;
0a3ffab9 1776
feba3900 1777 fp = to_flat_binder_object(hdr);
372e3147
TK
1778 ret = binder_dec_ref_for_handle(proc, fp->handle,
1779 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1780
1781 if (ret) {
1782 pr_err("transaction release %d bad handle %d, ret = %d\n",
1783 debug_id, fp->handle, ret);
355b0502
GKH
1784 break;
1785 }
1786 binder_debug(BINDER_DEBUG_TRANSACTION,
372e3147
TK
1787 " ref %d desc %d\n",
1788 rdata.debug_id, rdata.desc);
355b0502
GKH
1789 } break;
1790
feba3900
MC
1791 case BINDER_TYPE_FD: {
1792 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1793
355b0502 1794 binder_debug(BINDER_DEBUG_TRANSACTION,
feba3900 1795 " fd %d\n", fp->fd);
355b0502 1796 if (failed_at)
feba3900
MC
1797 task_close_fd(proc, fp->fd);
1798 } break;
7980240b
MC
1799 case BINDER_TYPE_PTR:
1800 /*
1801 * Nothing to do here, this will get cleaned up when the
1802 * transaction buffer gets freed
1803 */
1804 break;
def95c73
MC
1805 case BINDER_TYPE_FDA: {
1806 struct binder_fd_array_object *fda;
1807 struct binder_buffer_object *parent;
1808 uintptr_t parent_buffer;
1809 u32 *fd_array;
1810 size_t fd_index;
1811 binder_size_t fd_buf_size;
1812
1813 fda = to_binder_fd_array_object(hdr);
1814 parent = binder_validate_ptr(buffer, fda->parent,
1815 off_start,
1816 offp - off_start);
1817 if (!parent) {
1818 pr_err("transaction release %d bad parent offset",
1819 debug_id);
1820 continue;
1821 }
1822 /*
1823 * Since the parent was already fixed up, convert it
1824 * back to kernel address space to access it
1825 */
1826 parent_buffer = parent->buffer -
19c98724
TK
1827 binder_alloc_get_user_buffer_offset(
1828 &proc->alloc);
def95c73
MC
1829
1830 fd_buf_size = sizeof(u32) * fda->num_fds;
1831 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1832 pr_err("transaction release %d invalid number of fds (%lld)\n",
1833 debug_id, (u64)fda->num_fds);
1834 continue;
1835 }
1836 if (fd_buf_size > parent->length ||
1837 fda->parent_offset > parent->length - fd_buf_size) {
1838 /* No space for all file descriptors here. */
1839 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1840 debug_id, (u64)fda->num_fds);
1841 continue;
1842 }
1843 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1844 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1845 task_close_fd(proc, fd_array[fd_index]);
1846 } break;
355b0502 1847 default:
64dcfe6b 1848 pr_err("transaction release %d bad object type %x\n",
feba3900 1849 debug_id, hdr->type);
355b0502
GKH
1850 break;
1851 }
1852 }
1853}
1854
a056af42
MC
1855static int binder_translate_binder(struct flat_binder_object *fp,
1856 struct binder_transaction *t,
1857 struct binder_thread *thread)
1858{
1859 struct binder_node *node;
a056af42
MC
1860 struct binder_proc *proc = thread->proc;
1861 struct binder_proc *target_proc = t->to_proc;
372e3147 1862 struct binder_ref_data rdata;
adc18842 1863 int ret = 0;
a056af42
MC
1864
1865 node = binder_get_node(proc, fp->binder);
1866 if (!node) {
1867 node = binder_new_node(proc, fp->binder, fp->cookie);
1868 if (!node)
1869 return -ENOMEM;
1870
1871 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1872 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1873 }
1874 if (fp->cookie != node->cookie) {
1875 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1876 proc->pid, thread->pid, (u64)fp->binder,
1877 node->debug_id, (u64)fp->cookie,
1878 (u64)node->cookie);
adc18842
TK
1879 ret = -EINVAL;
1880 goto done;
1881 }
1882 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
1883 ret = -EPERM;
1884 goto done;
a056af42 1885 }
a056af42 1886
372e3147
TK
1887 ret = binder_inc_ref_for_node(target_proc, node,
1888 fp->hdr.type == BINDER_TYPE_BINDER,
1889 &thread->todo, &rdata);
1890 if (ret)
adc18842 1891 goto done;
a056af42
MC
1892
1893 if (fp->hdr.type == BINDER_TYPE_BINDER)
1894 fp->hdr.type = BINDER_TYPE_HANDLE;
1895 else
1896 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1897 fp->binder = 0;
372e3147 1898 fp->handle = rdata.desc;
a056af42 1899 fp->cookie = 0;
a056af42 1900
372e3147 1901 trace_binder_transaction_node_to_ref(t, node, &rdata);
a056af42
MC
1902 binder_debug(BINDER_DEBUG_TRANSACTION,
1903 " node %d u%016llx -> ref %d desc %d\n",
1904 node->debug_id, (u64)node->ptr,
372e3147 1905 rdata.debug_id, rdata.desc);
adc18842
TK
1906done:
1907 binder_put_node(node);
1908 return ret;
a056af42
MC
1909}
1910
1911static int binder_translate_handle(struct flat_binder_object *fp,
1912 struct binder_transaction *t,
1913 struct binder_thread *thread)
1914{
a056af42
MC
1915 struct binder_proc *proc = thread->proc;
1916 struct binder_proc *target_proc = t->to_proc;
372e3147
TK
1917 struct binder_node *node;
1918 struct binder_ref_data src_rdata;
adc18842 1919 int ret = 0;
a056af42 1920
372e3147
TK
1921 node = binder_get_node_from_ref(proc, fp->handle,
1922 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
1923 if (!node) {
a056af42
MC
1924 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1925 proc->pid, thread->pid, fp->handle);
1926 return -EINVAL;
1927 }
adc18842
TK
1928 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
1929 ret = -EPERM;
1930 goto done;
1931 }
a056af42 1932
372e3147 1933 if (node->proc == target_proc) {
a056af42
MC
1934 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1935 fp->hdr.type = BINDER_TYPE_BINDER;
1936 else
1937 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
372e3147
TK
1938 fp->binder = node->ptr;
1939 fp->cookie = node->cookie;
1940 binder_inc_node(node,
1941 fp->hdr.type == BINDER_TYPE_BINDER,
a056af42 1942 0, NULL);
372e3147 1943 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
a056af42
MC
1944 binder_debug(BINDER_DEBUG_TRANSACTION,
1945 " ref %d desc %d -> node %d u%016llx\n",
372e3147
TK
1946 src_rdata.debug_id, src_rdata.desc, node->debug_id,
1947 (u64)node->ptr);
a056af42 1948 } else {
372e3147
TK
1949 int ret;
1950 struct binder_ref_data dest_rdata;
a056af42 1951
372e3147
TK
1952 ret = binder_inc_ref_for_node(target_proc, node,
1953 fp->hdr.type == BINDER_TYPE_HANDLE,
1954 NULL, &dest_rdata);
1955 if (ret)
adc18842 1956 goto done;
a056af42
MC
1957
1958 fp->binder = 0;
372e3147 1959 fp->handle = dest_rdata.desc;
a056af42 1960 fp->cookie = 0;
372e3147
TK
1961 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
1962 &dest_rdata);
a056af42
MC
1963 binder_debug(BINDER_DEBUG_TRANSACTION,
1964 " ref %d desc %d -> ref %d desc %d (node %d)\n",
372e3147
TK
1965 src_rdata.debug_id, src_rdata.desc,
1966 dest_rdata.debug_id, dest_rdata.desc,
1967 node->debug_id);
a056af42 1968 }
adc18842
TK
1969done:
1970 binder_put_node(node);
1971 return ret;
a056af42
MC
1972}
1973
1974static int binder_translate_fd(int fd,
1975 struct binder_transaction *t,
1976 struct binder_thread *thread,
1977 struct binder_transaction *in_reply_to)
1978{
1979 struct binder_proc *proc = thread->proc;
1980 struct binder_proc *target_proc = t->to_proc;
1981 int target_fd;
1982 struct file *file;
1983 int ret;
1984 bool target_allows_fd;
1985
1986 if (in_reply_to)
1987 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1988 else
1989 target_allows_fd = t->buffer->target_node->accept_fds;
1990 if (!target_allows_fd) {
1991 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1992 proc->pid, thread->pid,
1993 in_reply_to ? "reply" : "transaction",
1994 fd);
1995 ret = -EPERM;
1996 goto err_fd_not_accepted;
1997 }
1998
1999 file = fget(fd);
2000 if (!file) {
2001 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2002 proc->pid, thread->pid, fd);
2003 ret = -EBADF;
2004 goto err_fget;
2005 }
2006 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2007 if (ret < 0) {
2008 ret = -EPERM;
2009 goto err_security;
2010 }
2011
2012 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2013 if (target_fd < 0) {
2014 ret = -ENOMEM;
2015 goto err_get_unused_fd;
2016 }
2017 task_fd_install(target_proc, target_fd, file);
2018 trace_binder_transaction_fd(t, fd, target_fd);
2019 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2020 fd, target_fd);
2021
2022 return target_fd;
2023
2024err_get_unused_fd:
2025err_security:
2026 fput(file);
2027err_fget:
2028err_fd_not_accepted:
2029 return ret;
2030}
2031
def95c73
MC
2032static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2033 struct binder_buffer_object *parent,
2034 struct binder_transaction *t,
2035 struct binder_thread *thread,
2036 struct binder_transaction *in_reply_to)
2037{
2038 binder_size_t fdi, fd_buf_size, num_installed_fds;
2039 int target_fd;
2040 uintptr_t parent_buffer;
2041 u32 *fd_array;
2042 struct binder_proc *proc = thread->proc;
2043 struct binder_proc *target_proc = t->to_proc;
2044
2045 fd_buf_size = sizeof(u32) * fda->num_fds;
2046 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2047 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2048 proc->pid, thread->pid, (u64)fda->num_fds);
2049 return -EINVAL;
2050 }
2051 if (fd_buf_size > parent->length ||
2052 fda->parent_offset > parent->length - fd_buf_size) {
2053 /* No space for all file descriptors here. */
2054 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2055 proc->pid, thread->pid, (u64)fda->num_fds);
2056 return -EINVAL;
2057 }
2058 /*
2059 * Since the parent was already fixed up, convert it
2060 * back to the kernel address space to access it
2061 */
19c98724
TK
2062 parent_buffer = parent->buffer -
2063 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
def95c73
MC
2064 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2065 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2066 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2067 proc->pid, thread->pid);
2068 return -EINVAL;
2069 }
2070 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2071 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2072 in_reply_to);
2073 if (target_fd < 0)
2074 goto err_translate_fd_failed;
2075 fd_array[fdi] = target_fd;
2076 }
2077 return 0;
2078
2079err_translate_fd_failed:
2080 /*
2081 * Failed to allocate fd or security error, free fds
2082 * installed so far.
2083 */
2084 num_installed_fds = fdi;
2085 for (fdi = 0; fdi < num_installed_fds; fdi++)
2086 task_close_fd(target_proc, fd_array[fdi]);
2087 return target_fd;
2088}
2089
7980240b
MC
2090static int binder_fixup_parent(struct binder_transaction *t,
2091 struct binder_thread *thread,
2092 struct binder_buffer_object *bp,
2093 binder_size_t *off_start,
2094 binder_size_t num_valid,
2095 struct binder_buffer_object *last_fixup_obj,
2096 binder_size_t last_fixup_min_off)
2097{
2098 struct binder_buffer_object *parent;
2099 u8 *parent_buffer;
2100 struct binder_buffer *b = t->buffer;
2101 struct binder_proc *proc = thread->proc;
2102 struct binder_proc *target_proc = t->to_proc;
2103
2104 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2105 return 0;
2106
2107 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2108 if (!parent) {
2109 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2110 proc->pid, thread->pid);
2111 return -EINVAL;
2112 }
2113
2114 if (!binder_validate_fixup(b, off_start,
2115 parent, bp->parent_offset,
2116 last_fixup_obj,
2117 last_fixup_min_off)) {
2118 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2119 proc->pid, thread->pid);
2120 return -EINVAL;
2121 }
2122
2123 if (parent->length < sizeof(binder_uintptr_t) ||
2124 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2125 /* No space for a pointer here! */
2126 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2127 proc->pid, thread->pid);
2128 return -EINVAL;
2129 }
2130 parent_buffer = (u8 *)(parent->buffer -
19c98724
TK
2131 binder_alloc_get_user_buffer_offset(
2132 &target_proc->alloc));
7980240b
MC
2133 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2134
2135 return 0;
2136}
2137
355b0502
GKH
2138static void binder_transaction(struct binder_proc *proc,
2139 struct binder_thread *thread,
4bfac80a
MC
2140 struct binder_transaction_data *tr, int reply,
2141 binder_size_t extra_buffers_size)
355b0502 2142{
a056af42 2143 int ret;
355b0502
GKH
2144 struct binder_transaction *t;
2145 struct binder_work *tcomplete;
7980240b 2146 binder_size_t *offp, *off_end, *off_start;
212265e5 2147 binder_size_t off_min;
7980240b 2148 u8 *sg_bufp, *sg_buf_end;
7a4408c6 2149 struct binder_proc *target_proc = NULL;
355b0502
GKH
2150 struct binder_thread *target_thread = NULL;
2151 struct binder_node *target_node = NULL;
2152 struct list_head *target_list;
2153 wait_queue_head_t *target_wait;
2154 struct binder_transaction *in_reply_to = NULL;
2155 struct binder_transaction_log_entry *e;
57ada2fb
TK
2156 uint32_t return_error = 0;
2157 uint32_t return_error_param = 0;
2158 uint32_t return_error_line = 0;
7980240b
MC
2159 struct binder_buffer_object *last_fixup_obj = NULL;
2160 binder_size_t last_fixup_min_off = 0;
342e5c90 2161 struct binder_context *context = proc->context;
d99c7333 2162 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
2163
2164 e = binder_transaction_log_add(&binder_transaction_log);
d99c7333 2165 e->debug_id = t_debug_id;
355b0502
GKH
2166 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2167 e->from_proc = proc->pid;
2168 e->from_thread = thread->pid;
2169 e->target_handle = tr->target.handle;
2170 e->data_size = tr->data_size;
2171 e->offsets_size = tr->offsets_size;
14db3181 2172 e->context_name = proc->context->name;
355b0502
GKH
2173
2174 if (reply) {
2175 in_reply_to = thread->transaction_stack;
2176 if (in_reply_to == NULL) {
56b468fc 2177 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
2178 proc->pid, thread->pid);
2179 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2180 return_error_param = -EPROTO;
2181 return_error_line = __LINE__;
355b0502
GKH
2182 goto err_empty_call_stack;
2183 }
2184 binder_set_nice(in_reply_to->saved_priority);
2185 if (in_reply_to->to_thread != thread) {
7a4408c6 2186 spin_lock(&in_reply_to->lock);
56b468fc 2187 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2188 proc->pid, thread->pid, in_reply_to->debug_id,
2189 in_reply_to->to_proc ?
2190 in_reply_to->to_proc->pid : 0,
2191 in_reply_to->to_thread ?
2192 in_reply_to->to_thread->pid : 0);
7a4408c6 2193 spin_unlock(&in_reply_to->lock);
355b0502 2194 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2195 return_error_param = -EPROTO;
2196 return_error_line = __LINE__;
355b0502
GKH
2197 in_reply_to = NULL;
2198 goto err_bad_call_stack;
2199 }
2200 thread->transaction_stack = in_reply_to->to_parent;
7a4408c6 2201 target_thread = binder_get_txn_from(in_reply_to);
355b0502
GKH
2202 if (target_thread == NULL) {
2203 return_error = BR_DEAD_REPLY;
57ada2fb 2204 return_error_line = __LINE__;
355b0502
GKH
2205 goto err_dead_binder;
2206 }
2207 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 2208 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
2209 proc->pid, thread->pid,
2210 target_thread->transaction_stack ?
2211 target_thread->transaction_stack->debug_id : 0,
2212 in_reply_to->debug_id);
2213 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2214 return_error_param = -EPROTO;
2215 return_error_line = __LINE__;
355b0502
GKH
2216 in_reply_to = NULL;
2217 target_thread = NULL;
2218 goto err_dead_binder;
2219 }
2220 target_proc = target_thread->proc;
7a4408c6 2221 target_proc->tmp_ref++;
355b0502
GKH
2222 } else {
2223 if (tr->target.handle) {
2224 struct binder_ref *ref;
10f62861 2225
eb34983b
TK
2226 /*
2227 * There must already be a strong ref
2228 * on this node. If so, do a strong
2229 * increment on the node to ensure it
2230 * stays alive until the transaction is
2231 * done.
2232 */
0a3ffab9 2233 ref = binder_get_ref(proc, tr->target.handle, true);
eb34983b
TK
2234 if (ref) {
2235 binder_inc_node(ref->node, 1, 0, NULL);
2236 target_node = ref->node;
2237 }
2238 if (target_node == NULL) {
56b468fc 2239 binder_user_error("%d:%d got transaction to invalid handle\n",
355b0502
GKH
2240 proc->pid, thread->pid);
2241 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2242 return_error_param = -EINVAL;
2243 return_error_line = __LINE__;
355b0502
GKH
2244 goto err_invalid_target_handle;
2245 }
355b0502 2246 } else {
c44b1231 2247 mutex_lock(&context->context_mgr_node_lock);
342e5c90 2248 target_node = context->binder_context_mgr_node;
355b0502
GKH
2249 if (target_node == NULL) {
2250 return_error = BR_DEAD_REPLY;
c44b1231 2251 mutex_unlock(&context->context_mgr_node_lock);
57ada2fb 2252 return_error_line = __LINE__;
355b0502
GKH
2253 goto err_no_context_mgr_node;
2254 }
eb34983b 2255 binder_inc_node(target_node, 1, 0, NULL);
c44b1231 2256 mutex_unlock(&context->context_mgr_node_lock);
355b0502
GKH
2257 }
2258 e->to_node = target_node->debug_id;
2259 target_proc = target_node->proc;
2260 if (target_proc == NULL) {
2261 return_error = BR_DEAD_REPLY;
57ada2fb 2262 return_error_line = __LINE__;
355b0502
GKH
2263 goto err_dead_binder;
2264 }
7a4408c6 2265 target_proc->tmp_ref++;
79af7307
SS
2266 if (security_binder_transaction(proc->tsk,
2267 target_proc->tsk) < 0) {
2268 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2269 return_error_param = -EPERM;
2270 return_error_line = __LINE__;
79af7307
SS
2271 goto err_invalid_target_handle;
2272 }
355b0502
GKH
2273 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2274 struct binder_transaction *tmp;
10f62861 2275
355b0502
GKH
2276 tmp = thread->transaction_stack;
2277 if (tmp->to_thread != thread) {
7a4408c6 2278 spin_lock(&tmp->lock);
56b468fc 2279 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2280 proc->pid, thread->pid, tmp->debug_id,
2281 tmp->to_proc ? tmp->to_proc->pid : 0,
2282 tmp->to_thread ?
2283 tmp->to_thread->pid : 0);
7a4408c6 2284 spin_unlock(&tmp->lock);
355b0502 2285 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2286 return_error_param = -EPROTO;
2287 return_error_line = __LINE__;
355b0502
GKH
2288 goto err_bad_call_stack;
2289 }
2290 while (tmp) {
7a4408c6
TK
2291 struct binder_thread *from;
2292
2293 spin_lock(&tmp->lock);
2294 from = tmp->from;
2295 if (from && from->proc == target_proc) {
2296 atomic_inc(&from->tmp_ref);
2297 target_thread = from;
2298 spin_unlock(&tmp->lock);
2299 break;
2300 }
2301 spin_unlock(&tmp->lock);
355b0502
GKH
2302 tmp = tmp->from_parent;
2303 }
2304 }
2305 }
2306 if (target_thread) {
2307 e->to_thread = target_thread->pid;
2308 target_list = &target_thread->todo;
2309 target_wait = &target_thread->wait;
2310 } else {
2311 target_list = &target_proc->todo;
2312 target_wait = &target_proc->wait;
2313 }
2314 e->to_proc = target_proc->pid;
2315
2316 /* TODO: reuse incoming transaction for reply */
2317 t = kzalloc(sizeof(*t), GFP_KERNEL);
2318 if (t == NULL) {
2319 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2320 return_error_param = -ENOMEM;
2321 return_error_line = __LINE__;
355b0502
GKH
2322 goto err_alloc_t_failed;
2323 }
2324 binder_stats_created(BINDER_STAT_TRANSACTION);
7a4408c6 2325 spin_lock_init(&t->lock);
355b0502
GKH
2326
2327 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2328 if (tcomplete == NULL) {
2329 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2330 return_error_param = -ENOMEM;
2331 return_error_line = __LINE__;
355b0502
GKH
2332 goto err_alloc_tcomplete_failed;
2333 }
2334 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2335
d99c7333 2336 t->debug_id = t_debug_id;
355b0502
GKH
2337
2338 if (reply)
2339 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2340 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2341 proc->pid, thread->pid, t->debug_id,
2342 target_proc->pid, target_thread->pid,
da49889d
AH
2343 (u64)tr->data.ptr.buffer,
2344 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2345 (u64)tr->data_size, (u64)tr->offsets_size,
2346 (u64)extra_buffers_size);
355b0502
GKH
2347 else
2348 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2349 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2350 proc->pid, thread->pid, t->debug_id,
2351 target_proc->pid, target_node->debug_id,
da49889d
AH
2352 (u64)tr->data.ptr.buffer,
2353 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2354 (u64)tr->data_size, (u64)tr->offsets_size,
2355 (u64)extra_buffers_size);
355b0502
GKH
2356
2357 if (!reply && !(tr->flags & TF_ONE_WAY))
2358 t->from = thread;
2359 else
2360 t->from = NULL;
57bab7cb 2361 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
2362 t->to_proc = target_proc;
2363 t->to_thread = target_thread;
2364 t->code = tr->code;
2365 t->flags = tr->flags;
2366 t->priority = task_nice(current);
975a1ac9
AH
2367
2368 trace_binder_transaction(reply, t, target_node);
2369
19c98724 2370 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
4bfac80a
MC
2371 tr->offsets_size, extra_buffers_size,
2372 !reply && (t->flags & TF_ONE_WAY));
57ada2fb
TK
2373 if (IS_ERR(t->buffer)) {
2374 /*
2375 * -ESRCH indicates VMA cleared. The target is dying.
2376 */
2377 return_error_param = PTR_ERR(t->buffer);
2378 return_error = return_error_param == -ESRCH ?
2379 BR_DEAD_REPLY : BR_FAILED_REPLY;
2380 return_error_line = __LINE__;
2381 t->buffer = NULL;
355b0502
GKH
2382 goto err_binder_alloc_buf_failed;
2383 }
2384 t->buffer->allow_user_free = 0;
2385 t->buffer->debug_id = t->debug_id;
2386 t->buffer->transaction = t;
2387 t->buffer->target_node = target_node;
975a1ac9 2388 trace_binder_transaction_alloc_buf(t->buffer);
7980240b
MC
2389 off_start = (binder_size_t *)(t->buffer->data +
2390 ALIGN(tr->data_size, sizeof(void *)));
2391 offp = off_start;
355b0502 2392
da49889d
AH
2393 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2394 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
2395 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2396 proc->pid, thread->pid);
355b0502 2397 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2398 return_error_param = -EFAULT;
2399 return_error_line = __LINE__;
355b0502
GKH
2400 goto err_copy_data_failed;
2401 }
da49889d
AH
2402 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2403 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
2404 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2405 proc->pid, thread->pid);
355b0502 2406 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2407 return_error_param = -EFAULT;
2408 return_error_line = __LINE__;
355b0502
GKH
2409 goto err_copy_data_failed;
2410 }
da49889d
AH
2411 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2412 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2413 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 2414 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2415 return_error_param = -EINVAL;
2416 return_error_line = __LINE__;
355b0502
GKH
2417 goto err_bad_offset;
2418 }
7980240b
MC
2419 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2420 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2421 proc->pid, thread->pid,
2422 (u64)extra_buffers_size);
2423 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2424 return_error_param = -EINVAL;
2425 return_error_line = __LINE__;
7980240b
MC
2426 goto err_bad_offset;
2427 }
2428 off_end = (void *)off_start + tr->offsets_size;
2429 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2430 sg_buf_end = sg_bufp + extra_buffers_size;
212265e5 2431 off_min = 0;
355b0502 2432 for (; offp < off_end; offp++) {
feba3900
MC
2433 struct binder_object_header *hdr;
2434 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 2435
feba3900
MC
2436 if (object_size == 0 || *offp < off_min) {
2437 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
212265e5
AH
2438 proc->pid, thread->pid, (u64)*offp,
2439 (u64)off_min,
feba3900 2440 (u64)t->buffer->data_size);
355b0502 2441 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2442 return_error_param = -EINVAL;
2443 return_error_line = __LINE__;
355b0502
GKH
2444 goto err_bad_offset;
2445 }
feba3900
MC
2446
2447 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2448 off_min = *offp + object_size;
2449 switch (hdr->type) {
355b0502
GKH
2450 case BINDER_TYPE_BINDER:
2451 case BINDER_TYPE_WEAK_BINDER: {
feba3900 2452 struct flat_binder_object *fp;
10f62861 2453
feba3900 2454 fp = to_flat_binder_object(hdr);
a056af42
MC
2455 ret = binder_translate_binder(fp, t, thread);
2456 if (ret < 0) {
355b0502 2457 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2458 return_error_param = ret;
2459 return_error_line = __LINE__;
a056af42 2460 goto err_translate_failed;
355b0502 2461 }
355b0502
GKH
2462 } break;
2463 case BINDER_TYPE_HANDLE:
2464 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 2465 struct flat_binder_object *fp;
0a3ffab9 2466
feba3900 2467 fp = to_flat_binder_object(hdr);
a056af42
MC
2468 ret = binder_translate_handle(fp, t, thread);
2469 if (ret < 0) {
79af7307 2470 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2471 return_error_param = ret;
2472 return_error_line = __LINE__;
a056af42 2473 goto err_translate_failed;
355b0502
GKH
2474 }
2475 } break;
2476
2477 case BINDER_TYPE_FD: {
feba3900 2478 struct binder_fd_object *fp = to_binder_fd_object(hdr);
a056af42
MC
2479 int target_fd = binder_translate_fd(fp->fd, t, thread,
2480 in_reply_to);
355b0502 2481
355b0502 2482 if (target_fd < 0) {
355b0502 2483 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2484 return_error_param = target_fd;
2485 return_error_line = __LINE__;
a056af42 2486 goto err_translate_failed;
355b0502 2487 }
feba3900
MC
2488 fp->pad_binder = 0;
2489 fp->fd = target_fd;
355b0502 2490 } break;
def95c73
MC
2491 case BINDER_TYPE_FDA: {
2492 struct binder_fd_array_object *fda =
2493 to_binder_fd_array_object(hdr);
2494 struct binder_buffer_object *parent =
2495 binder_validate_ptr(t->buffer, fda->parent,
2496 off_start,
2497 offp - off_start);
2498 if (!parent) {
2499 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2500 proc->pid, thread->pid);
2501 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2502 return_error_param = -EINVAL;
2503 return_error_line = __LINE__;
def95c73
MC
2504 goto err_bad_parent;
2505 }
2506 if (!binder_validate_fixup(t->buffer, off_start,
2507 parent, fda->parent_offset,
2508 last_fixup_obj,
2509 last_fixup_min_off)) {
2510 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2511 proc->pid, thread->pid);
2512 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2513 return_error_param = -EINVAL;
2514 return_error_line = __LINE__;
def95c73
MC
2515 goto err_bad_parent;
2516 }
2517 ret = binder_translate_fd_array(fda, parent, t, thread,
2518 in_reply_to);
2519 if (ret < 0) {
2520 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2521 return_error_param = ret;
2522 return_error_line = __LINE__;
def95c73
MC
2523 goto err_translate_failed;
2524 }
2525 last_fixup_obj = parent;
2526 last_fixup_min_off =
2527 fda->parent_offset + sizeof(u32) * fda->num_fds;
2528 } break;
7980240b
MC
2529 case BINDER_TYPE_PTR: {
2530 struct binder_buffer_object *bp =
2531 to_binder_buffer_object(hdr);
2532 size_t buf_left = sg_buf_end - sg_bufp;
2533
2534 if (bp->length > buf_left) {
2535 binder_user_error("%d:%d got transaction with too large buffer\n",
2536 proc->pid, thread->pid);
2537 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2538 return_error_param = -EINVAL;
2539 return_error_line = __LINE__;
7980240b
MC
2540 goto err_bad_offset;
2541 }
2542 if (copy_from_user(sg_bufp,
2543 (const void __user *)(uintptr_t)
2544 bp->buffer, bp->length)) {
2545 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2546 proc->pid, thread->pid);
57ada2fb 2547 return_error_param = -EFAULT;
7980240b 2548 return_error = BR_FAILED_REPLY;
57ada2fb 2549 return_error_line = __LINE__;
7980240b
MC
2550 goto err_copy_data_failed;
2551 }
2552 /* Fixup buffer pointer to target proc address space */
2553 bp->buffer = (uintptr_t)sg_bufp +
19c98724
TK
2554 binder_alloc_get_user_buffer_offset(
2555 &target_proc->alloc);
7980240b
MC
2556 sg_bufp += ALIGN(bp->length, sizeof(u64));
2557
2558 ret = binder_fixup_parent(t, thread, bp, off_start,
2559 offp - off_start,
2560 last_fixup_obj,
2561 last_fixup_min_off);
2562 if (ret < 0) {
2563 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2564 return_error_param = ret;
2565 return_error_line = __LINE__;
7980240b
MC
2566 goto err_translate_failed;
2567 }
2568 last_fixup_obj = bp;
2569 last_fixup_min_off = 0;
2570 } break;
355b0502 2571 default:
64dcfe6b 2572 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
feba3900 2573 proc->pid, thread->pid, hdr->type);
355b0502 2574 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2575 return_error_param = -EINVAL;
2576 return_error_line = __LINE__;
355b0502
GKH
2577 goto err_bad_object_type;
2578 }
2579 }
ccae6f67
TK
2580 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2581 list_add_tail(&tcomplete->entry, &thread->todo);
2582
355b0502 2583 if (reply) {
7a4408c6
TK
2584 if (target_thread->is_dead)
2585 goto err_dead_proc_or_thread;
355b0502
GKH
2586 BUG_ON(t->buffer->async_transaction != 0);
2587 binder_pop_transaction(target_thread, in_reply_to);
b6d282ce 2588 binder_free_transaction(in_reply_to);
355b0502
GKH
2589 } else if (!(t->flags & TF_ONE_WAY)) {
2590 BUG_ON(t->buffer->async_transaction != 0);
2591 t->need_reply = 1;
2592 t->from_parent = thread->transaction_stack;
2593 thread->transaction_stack = t;
7a4408c6
TK
2594 if (target_proc->is_dead ||
2595 (target_thread && target_thread->is_dead)) {
2596 binder_pop_transaction(thread, t);
2597 goto err_dead_proc_or_thread;
2598 }
355b0502
GKH
2599 } else {
2600 BUG_ON(target_node == NULL);
2601 BUG_ON(t->buffer->async_transaction != 1);
2602 if (target_node->has_async_transaction) {
2603 target_list = &target_node->async_todo;
2604 target_wait = NULL;
2605 } else
2606 target_node->has_async_transaction = 1;
7a4408c6
TK
2607 if (target_proc->is_dead ||
2608 (target_thread && target_thread->is_dead))
2609 goto err_dead_proc_or_thread;
355b0502
GKH
2610 }
2611 t->work.type = BINDER_WORK_TRANSACTION;
2612 list_add_tail(&t->work.entry, target_list);
00b40d61 2613 if (target_wait) {
ccae6f67 2614 if (reply || !(tr->flags & TF_ONE_WAY))
00b40d61
RA
2615 wake_up_interruptible_sync(target_wait);
2616 else
2617 wake_up_interruptible(target_wait);
2618 }
7a4408c6
TK
2619 if (target_thread)
2620 binder_thread_dec_tmpref(target_thread);
2621 binder_proc_dec_tmpref(target_proc);
d99c7333
TK
2622 /*
2623 * write barrier to synchronize with initialization
2624 * of log entry
2625 */
2626 smp_wmb();
2627 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
2628 return;
2629
7a4408c6
TK
2630err_dead_proc_or_thread:
2631 return_error = BR_DEAD_REPLY;
2632 return_error_line = __LINE__;
a056af42 2633err_translate_failed:
355b0502
GKH
2634err_bad_object_type:
2635err_bad_offset:
def95c73 2636err_bad_parent:
355b0502 2637err_copy_data_failed:
975a1ac9 2638 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 2639 binder_transaction_buffer_release(target_proc, t->buffer, offp);
eb34983b 2640 target_node = NULL;
355b0502 2641 t->buffer->transaction = NULL;
19c98724 2642 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
2643err_binder_alloc_buf_failed:
2644 kfree(tcomplete);
2645 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2646err_alloc_tcomplete_failed:
2647 kfree(t);
2648 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2649err_alloc_t_failed:
2650err_bad_call_stack:
2651err_empty_call_stack:
2652err_dead_binder:
2653err_invalid_target_handle:
2654err_no_context_mgr_node:
7a4408c6
TK
2655 if (target_thread)
2656 binder_thread_dec_tmpref(target_thread);
2657 if (target_proc)
2658 binder_proc_dec_tmpref(target_proc);
eb34983b
TK
2659 if (target_node)
2660 binder_dec_node(target_node, 1, 0);
2661
355b0502 2662 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
57ada2fb
TK
2663 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2664 proc->pid, thread->pid, return_error, return_error_param,
2665 (u64)tr->data_size, (u64)tr->offsets_size,
2666 return_error_line);
355b0502
GKH
2667
2668 {
2669 struct binder_transaction_log_entry *fe;
10f62861 2670
57ada2fb
TK
2671 e->return_error = return_error;
2672 e->return_error_param = return_error_param;
2673 e->return_error_line = return_error_line;
355b0502
GKH
2674 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2675 *fe = *e;
d99c7333
TK
2676 /*
2677 * write barrier to synchronize with initialization
2678 * of log entry
2679 */
2680 smp_wmb();
2681 WRITE_ONCE(e->debug_id_done, t_debug_id);
2682 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
2683 }
2684
26549d17 2685 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 2686 if (in_reply_to) {
26549d17
TK
2687 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
2688 list_add_tail(&thread->return_error.work.entry,
2689 &thread->todo);
355b0502 2690 binder_send_failed_reply(in_reply_to, return_error);
26549d17
TK
2691 } else {
2692 thread->return_error.cmd = return_error;
2693 list_add_tail(&thread->return_error.work.entry,
2694 &thread->todo);
2695 }
355b0502
GKH
2696}
2697
fb07ebc3
BP
2698static int binder_thread_write(struct binder_proc *proc,
2699 struct binder_thread *thread,
da49889d
AH
2700 binder_uintptr_t binder_buffer, size_t size,
2701 binder_size_t *consumed)
355b0502
GKH
2702{
2703 uint32_t cmd;
342e5c90 2704 struct binder_context *context = proc->context;
da49889d 2705 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
2706 void __user *ptr = buffer + *consumed;
2707 void __user *end = buffer + size;
2708
26549d17 2709 while (ptr < end && thread->return_error.cmd == BR_OK) {
372e3147
TK
2710 int ret;
2711
355b0502
GKH
2712 if (get_user(cmd, (uint32_t __user *)ptr))
2713 return -EFAULT;
2714 ptr += sizeof(uint32_t);
975a1ac9 2715 trace_binder_command(cmd);
355b0502 2716 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
0953c797
BJS
2717 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2718 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2719 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
2720 }
2721 switch (cmd) {
2722 case BC_INCREFS:
2723 case BC_ACQUIRE:
2724 case BC_RELEASE:
2725 case BC_DECREFS: {
2726 uint32_t target;
355b0502 2727 const char *debug_string;
372e3147
TK
2728 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
2729 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
2730 struct binder_ref_data rdata;
355b0502
GKH
2731
2732 if (get_user(target, (uint32_t __user *)ptr))
2733 return -EFAULT;
c44b1231 2734
355b0502 2735 ptr += sizeof(uint32_t);
372e3147
TK
2736 ret = -1;
2737 if (increment && !target) {
c44b1231 2738 struct binder_node *ctx_mgr_node;
c44b1231
TK
2739 mutex_lock(&context->context_mgr_node_lock);
2740 ctx_mgr_node = context->binder_context_mgr_node;
372e3147
TK
2741 if (ctx_mgr_node)
2742 ret = binder_inc_ref_for_node(
2743 proc, ctx_mgr_node,
2744 strong, NULL, &rdata);
c44b1231
TK
2745 mutex_unlock(&context->context_mgr_node_lock);
2746 }
372e3147
TK
2747 if (ret)
2748 ret = binder_update_ref_for_handle(
2749 proc, target, increment, strong,
2750 &rdata);
2751 if (!ret && rdata.desc != target) {
2752 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
2753 proc->pid, thread->pid,
2754 target, rdata.desc);
355b0502
GKH
2755 }
2756 switch (cmd) {
2757 case BC_INCREFS:
2758 debug_string = "IncRefs";
355b0502
GKH
2759 break;
2760 case BC_ACQUIRE:
2761 debug_string = "Acquire";
355b0502
GKH
2762 break;
2763 case BC_RELEASE:
2764 debug_string = "Release";
355b0502
GKH
2765 break;
2766 case BC_DECREFS:
2767 default:
2768 debug_string = "DecRefs";
372e3147
TK
2769 break;
2770 }
2771 if (ret) {
2772 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
2773 proc->pid, thread->pid, debug_string,
2774 strong, target, ret);
355b0502
GKH
2775 break;
2776 }
2777 binder_debug(BINDER_DEBUG_USER_REFS,
372e3147
TK
2778 "%d:%d %s ref %d desc %d s %d w %d\n",
2779 proc->pid, thread->pid, debug_string,
2780 rdata.debug_id, rdata.desc, rdata.strong,
2781 rdata.weak);
355b0502
GKH
2782 break;
2783 }
2784 case BC_INCREFS_DONE:
2785 case BC_ACQUIRE_DONE: {
da49889d
AH
2786 binder_uintptr_t node_ptr;
2787 binder_uintptr_t cookie;
355b0502
GKH
2788 struct binder_node *node;
2789
da49889d 2790 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2791 return -EFAULT;
da49889d
AH
2792 ptr += sizeof(binder_uintptr_t);
2793 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2794 return -EFAULT;
da49889d 2795 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
2796 node = binder_get_node(proc, node_ptr);
2797 if (node == NULL) {
da49889d 2798 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
2799 proc->pid, thread->pid,
2800 cmd == BC_INCREFS_DONE ?
2801 "BC_INCREFS_DONE" :
2802 "BC_ACQUIRE_DONE",
da49889d 2803 (u64)node_ptr);
355b0502
GKH
2804 break;
2805 }
2806 if (cookie != node->cookie) {
da49889d 2807 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
2808 proc->pid, thread->pid,
2809 cmd == BC_INCREFS_DONE ?
2810 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
2811 (u64)node_ptr, node->debug_id,
2812 (u64)cookie, (u64)node->cookie);
adc18842 2813 binder_put_node(node);
355b0502
GKH
2814 break;
2815 }
ed29721e 2816 binder_inner_proc_lock(proc);
355b0502
GKH
2817 if (cmd == BC_ACQUIRE_DONE) {
2818 if (node->pending_strong_ref == 0) {
56b468fc 2819 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
2820 proc->pid, thread->pid,
2821 node->debug_id);
ed29721e 2822 binder_inner_proc_unlock(proc);
adc18842 2823 binder_put_node(node);
355b0502
GKH
2824 break;
2825 }
2826 node->pending_strong_ref = 0;
2827 } else {
2828 if (node->pending_weak_ref == 0) {
56b468fc 2829 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
2830 proc->pid, thread->pid,
2831 node->debug_id);
ed29721e 2832 binder_inner_proc_unlock(proc);
adc18842 2833 binder_put_node(node);
355b0502
GKH
2834 break;
2835 }
2836 node->pending_weak_ref = 0;
2837 }
ed29721e 2838 binder_inner_proc_unlock(proc);
355b0502
GKH
2839 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2840 binder_debug(BINDER_DEBUG_USER_REFS,
adc18842 2841 "%d:%d %s node %d ls %d lw %d tr %d\n",
355b0502
GKH
2842 proc->pid, thread->pid,
2843 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
adc18842
TK
2844 node->debug_id, node->local_strong_refs,
2845 node->local_weak_refs, node->tmp_refs);
2846 binder_put_node(node);
355b0502
GKH
2847 break;
2848 }
2849 case BC_ATTEMPT_ACQUIRE:
56b468fc 2850 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
2851 return -EINVAL;
2852 case BC_ACQUIRE_RESULT:
56b468fc 2853 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
2854 return -EINVAL;
2855
2856 case BC_FREE_BUFFER: {
da49889d 2857 binder_uintptr_t data_ptr;
355b0502
GKH
2858 struct binder_buffer *buffer;
2859
da49889d 2860 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2861 return -EFAULT;
da49889d 2862 ptr += sizeof(binder_uintptr_t);
355b0502 2863
53d311cf
TK
2864 buffer = binder_alloc_prepare_to_free(&proc->alloc,
2865 data_ptr);
355b0502 2866 if (buffer == NULL) {
da49889d
AH
2867 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2868 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2869 break;
2870 }
2871 if (!buffer->allow_user_free) {
da49889d
AH
2872 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2873 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2874 break;
2875 }
2876 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
2877 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2878 proc->pid, thread->pid, (u64)data_ptr,
2879 buffer->debug_id,
355b0502
GKH
2880 buffer->transaction ? "active" : "finished");
2881
2882 if (buffer->transaction) {
2883 buffer->transaction->buffer = NULL;
2884 buffer->transaction = NULL;
2885 }
2886 if (buffer->async_transaction && buffer->target_node) {
2887 BUG_ON(!buffer->target_node->has_async_transaction);
2888 if (list_empty(&buffer->target_node->async_todo))
2889 buffer->target_node->has_async_transaction = 0;
2890 else
2891 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2892 }
975a1ac9 2893 trace_binder_transaction_buffer_release(buffer);
355b0502 2894 binder_transaction_buffer_release(proc, buffer, NULL);
19c98724 2895 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
2896 break;
2897 }
2898
7980240b
MC
2899 case BC_TRANSACTION_SG:
2900 case BC_REPLY_SG: {
2901 struct binder_transaction_data_sg tr;
2902
2903 if (copy_from_user(&tr, ptr, sizeof(tr)))
2904 return -EFAULT;
2905 ptr += sizeof(tr);
2906 binder_transaction(proc, thread, &tr.transaction_data,
2907 cmd == BC_REPLY_SG, tr.buffers_size);
2908 break;
2909 }
355b0502
GKH
2910 case BC_TRANSACTION:
2911 case BC_REPLY: {
2912 struct binder_transaction_data tr;
2913
2914 if (copy_from_user(&tr, ptr, sizeof(tr)))
2915 return -EFAULT;
2916 ptr += sizeof(tr);
4bfac80a
MC
2917 binder_transaction(proc, thread, &tr,
2918 cmd == BC_REPLY, 0);
355b0502
GKH
2919 break;
2920 }
2921
2922 case BC_REGISTER_LOOPER:
2923 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2924 "%d:%d BC_REGISTER_LOOPER\n",
355b0502
GKH
2925 proc->pid, thread->pid);
2926 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2927 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2928 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
2929 proc->pid, thread->pid);
2930 } else if (proc->requested_threads == 0) {
2931 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2932 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
2933 proc->pid, thread->pid);
2934 } else {
2935 proc->requested_threads--;
2936 proc->requested_threads_started++;
2937 }
2938 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2939 break;
2940 case BC_ENTER_LOOPER:
2941 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2942 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
2943 proc->pid, thread->pid);
2944 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2945 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2946 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
2947 proc->pid, thread->pid);
2948 }
2949 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2950 break;
2951 case BC_EXIT_LOOPER:
2952 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2953 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
2954 proc->pid, thread->pid);
2955 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2956 break;
2957
2958 case BC_REQUEST_DEATH_NOTIFICATION:
2959 case BC_CLEAR_DEATH_NOTIFICATION: {
2960 uint32_t target;
da49889d 2961 binder_uintptr_t cookie;
355b0502
GKH
2962 struct binder_ref *ref;
2963 struct binder_ref_death *death;
2964
2965 if (get_user(target, (uint32_t __user *)ptr))
2966 return -EFAULT;
2967 ptr += sizeof(uint32_t);
da49889d 2968 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2969 return -EFAULT;
da49889d 2970 ptr += sizeof(binder_uintptr_t);
0a3ffab9 2971 ref = binder_get_ref(proc, target, false);
355b0502 2972 if (ref == NULL) {
56b468fc 2973 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
2974 proc->pid, thread->pid,
2975 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2976 "BC_REQUEST_DEATH_NOTIFICATION" :
2977 "BC_CLEAR_DEATH_NOTIFICATION",
2978 target);
2979 break;
2980 }
2981
2982 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2983 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
2984 proc->pid, thread->pid,
2985 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2986 "BC_REQUEST_DEATH_NOTIFICATION" :
2987 "BC_CLEAR_DEATH_NOTIFICATION",
372e3147
TK
2988 (u64)cookie, ref->data.debug_id,
2989 ref->data.desc, ref->data.strong,
2990 ref->data.weak, ref->node->debug_id);
355b0502
GKH
2991
2992 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2993 if (ref->death) {
56b468fc 2994 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502
GKH
2995 proc->pid, thread->pid);
2996 break;
2997 }
2998 death = kzalloc(sizeof(*death), GFP_KERNEL);
2999 if (death == NULL) {
26549d17
TK
3000 WARN_ON(thread->return_error.cmd !=
3001 BR_OK);
3002 thread->return_error.cmd = BR_ERROR;
3003 list_add_tail(
3004 &thread->return_error.work.entry,
3005 &thread->todo);
355b0502 3006 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
56b468fc 3007 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
355b0502
GKH
3008 proc->pid, thread->pid);
3009 break;
3010 }
3011 binder_stats_created(BINDER_STAT_DEATH);
3012 INIT_LIST_HEAD(&death->work.entry);
3013 death->cookie = cookie;
3014 ref->death = death;
3015 if (ref->node->proc == NULL) {
3016 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3017 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
3018 list_add_tail(&ref->death->work.entry, &thread->todo);
3019 } else {
3020 list_add_tail(&ref->death->work.entry, &proc->todo);
3021 wake_up_interruptible(&proc->wait);
3022 }
3023 }
3024 } else {
3025 if (ref->death == NULL) {
56b468fc 3026 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502
GKH
3027 proc->pid, thread->pid);
3028 break;
3029 }
3030 death = ref->death;
3031 if (death->cookie != cookie) {
da49889d 3032 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 3033 proc->pid, thread->pid,
da49889d
AH
3034 (u64)death->cookie,
3035 (u64)cookie);
355b0502
GKH
3036 break;
3037 }
3038 ref->death = NULL;
3039 if (list_empty(&death->work.entry)) {
3040 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3041 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
3042 list_add_tail(&death->work.entry, &thread->todo);
3043 } else {
3044 list_add_tail(&death->work.entry, &proc->todo);
3045 wake_up_interruptible(&proc->wait);
3046 }
3047 } else {
3048 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3049 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3050 }
3051 }
3052 } break;
3053 case BC_DEAD_BINDER_DONE: {
3054 struct binder_work *w;
da49889d 3055 binder_uintptr_t cookie;
355b0502 3056 struct binder_ref_death *death = NULL;
10f62861 3057
da49889d 3058 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
3059 return -EFAULT;
3060
7a64cd88 3061 ptr += sizeof(cookie);
355b0502
GKH
3062 list_for_each_entry(w, &proc->delivered_death, entry) {
3063 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
10f62861 3064
355b0502
GKH
3065 if (tmp_death->cookie == cookie) {
3066 death = tmp_death;
3067 break;
3068 }
3069 }
3070 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
3071 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3072 proc->pid, thread->pid, (u64)cookie,
3073 death);
355b0502 3074 if (death == NULL) {
da49889d
AH
3075 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3076 proc->pid, thread->pid, (u64)cookie);
355b0502
GKH
3077 break;
3078 }
3079
3080 list_del_init(&death->work.entry);
3081 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3082 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3083 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
3084 list_add_tail(&death->work.entry, &thread->todo);
3085 } else {
3086 list_add_tail(&death->work.entry, &proc->todo);
3087 wake_up_interruptible(&proc->wait);
3088 }
3089 }
3090 } break;
3091
3092 default:
56b468fc 3093 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
3094 proc->pid, thread->pid, cmd);
3095 return -EINVAL;
3096 }
3097 *consumed = ptr - buffer;
3098 }
3099 return 0;
3100}
3101
fb07ebc3
BP
3102static void binder_stat_br(struct binder_proc *proc,
3103 struct binder_thread *thread, uint32_t cmd)
355b0502 3104{
975a1ac9 3105 trace_binder_return(cmd);
355b0502 3106 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
0953c797
BJS
3107 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3108 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3109 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
3110 }
3111}
3112
3113static int binder_has_proc_work(struct binder_proc *proc,
3114 struct binder_thread *thread)
3115{
08dabcee 3116 return !list_empty(&proc->todo) || thread->looper_need_return;
355b0502
GKH
3117}
3118
3119static int binder_has_thread_work(struct binder_thread *thread)
3120{
26549d17 3121 return !list_empty(&thread->todo) || thread->looper_need_return;
355b0502
GKH
3122}
3123
26b47d8a
TK
3124static int binder_put_node_cmd(struct binder_proc *proc,
3125 struct binder_thread *thread,
3126 void __user **ptrp,
3127 binder_uintptr_t node_ptr,
3128 binder_uintptr_t node_cookie,
3129 int node_debug_id,
3130 uint32_t cmd, const char *cmd_name)
3131{
3132 void __user *ptr = *ptrp;
3133
3134 if (put_user(cmd, (uint32_t __user *)ptr))
3135 return -EFAULT;
3136 ptr += sizeof(uint32_t);
3137
3138 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3139 return -EFAULT;
3140 ptr += sizeof(binder_uintptr_t);
3141
3142 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3143 return -EFAULT;
3144 ptr += sizeof(binder_uintptr_t);
3145
3146 binder_stat_br(proc, thread, cmd);
3147 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3148 proc->pid, thread->pid, cmd_name, node_debug_id,
3149 (u64)node_ptr, (u64)node_cookie);
3150
3151 *ptrp = ptr;
3152 return 0;
3153}
3154
355b0502
GKH
3155static int binder_thread_read(struct binder_proc *proc,
3156 struct binder_thread *thread,
da49889d
AH
3157 binder_uintptr_t binder_buffer, size_t size,
3158 binder_size_t *consumed, int non_block)
355b0502 3159{
da49889d 3160 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3161 void __user *ptr = buffer + *consumed;
3162 void __user *end = buffer + size;
3163
3164 int ret = 0;
3165 int wait_for_proc_work;
3166
3167 if (*consumed == 0) {
3168 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3169 return -EFAULT;
3170 ptr += sizeof(uint32_t);
3171 }
3172
3173retry:
3174 wait_for_proc_work = thread->transaction_stack == NULL &&
3175 list_empty(&thread->todo);
3176
355b0502
GKH
3177 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3178 if (wait_for_proc_work)
3179 proc->ready_threads++;
975a1ac9
AH
3180
3181 binder_unlock(__func__);
3182
3183 trace_binder_wait_for_work(wait_for_proc_work,
3184 !!thread->transaction_stack,
3185 !list_empty(&thread->todo));
355b0502
GKH
3186 if (wait_for_proc_work) {
3187 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3188 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 3189 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
3190 proc->pid, thread->pid, thread->looper);
3191 wait_event_interruptible(binder_user_error_wait,
3192 binder_stop_on_user_error < 2);
3193 }
3194 binder_set_nice(proc->default_priority);
3195 if (non_block) {
3196 if (!binder_has_proc_work(proc, thread))
3197 ret = -EAGAIN;
3198 } else
e2610b26 3199 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
355b0502
GKH
3200 } else {
3201 if (non_block) {
3202 if (!binder_has_thread_work(thread))
3203 ret = -EAGAIN;
3204 } else
e2610b26 3205 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
355b0502 3206 }
975a1ac9
AH
3207
3208 binder_lock(__func__);
3209
355b0502
GKH
3210 if (wait_for_proc_work)
3211 proc->ready_threads--;
3212 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3213
3214 if (ret)
3215 return ret;
3216
3217 while (1) {
3218 uint32_t cmd;
3219 struct binder_transaction_data tr;
3220 struct binder_work *w;
3221 struct binder_transaction *t = NULL;
7a4408c6 3222 struct binder_thread *t_from;
355b0502 3223
ed29721e 3224 binder_inner_proc_lock(proc);
395262a9
DV
3225 if (!list_empty(&thread->todo)) {
3226 w = list_first_entry(&thread->todo, struct binder_work,
3227 entry);
3228 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
3229 w = list_first_entry(&proc->todo, struct binder_work,
3230 entry);
3231 } else {
3232 /* no data added */
08dabcee 3233 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
3234 goto retry;
3235 break;
3236 }
3237
ed29721e
TK
3238 if (end - ptr < sizeof(tr) + 4) {
3239 binder_inner_proc_unlock(proc);
355b0502 3240 break;
ed29721e
TK
3241 }
3242 list_del_init(&w->entry);
355b0502
GKH
3243
3244 switch (w->type) {
3245 case BINDER_WORK_TRANSACTION: {
ed29721e 3246 binder_inner_proc_unlock(proc);
355b0502
GKH
3247 t = container_of(w, struct binder_transaction, work);
3248 } break;
26549d17
TK
3249 case BINDER_WORK_RETURN_ERROR: {
3250 struct binder_error *e = container_of(
3251 w, struct binder_error, work);
3252
3253 WARN_ON(e->cmd == BR_OK);
ed29721e 3254 binder_inner_proc_unlock(proc);
26549d17
TK
3255 if (put_user(e->cmd, (uint32_t __user *)ptr))
3256 return -EFAULT;
3257 e->cmd = BR_OK;
3258 ptr += sizeof(uint32_t);
3259
3260 binder_stat_br(proc, thread, cmd);
26549d17 3261 } break;
355b0502 3262 case BINDER_WORK_TRANSACTION_COMPLETE: {
ed29721e 3263 binder_inner_proc_unlock(proc);
355b0502
GKH
3264 cmd = BR_TRANSACTION_COMPLETE;
3265 if (put_user(cmd, (uint32_t __user *)ptr))
3266 return -EFAULT;
3267 ptr += sizeof(uint32_t);
3268
3269 binder_stat_br(proc, thread, cmd);
3270 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 3271 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502 3272 proc->pid, thread->pid);
355b0502
GKH
3273 kfree(w);
3274 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3275 } break;
3276 case BINDER_WORK_NODE: {
3277 struct binder_node *node = container_of(w, struct binder_node, work);
26b47d8a
TK
3278 int strong, weak;
3279 binder_uintptr_t node_ptr = node->ptr;
3280 binder_uintptr_t node_cookie = node->cookie;
3281 int node_debug_id = node->debug_id;
3282 int has_weak_ref;
3283 int has_strong_ref;
3284 void __user *orig_ptr = ptr;
3285
3286 BUG_ON(proc != node->proc);
3287 strong = node->internal_strong_refs ||
3288 node->local_strong_refs;
3289 weak = !hlist_empty(&node->refs) ||
adc18842
TK
3290 node->local_weak_refs ||
3291 node->tmp_refs || strong;
26b47d8a
TK
3292 has_strong_ref = node->has_strong_ref;
3293 has_weak_ref = node->has_weak_ref;
3294
3295 if (weak && !has_weak_ref) {
355b0502
GKH
3296 node->has_weak_ref = 1;
3297 node->pending_weak_ref = 1;
3298 node->local_weak_refs++;
26b47d8a
TK
3299 }
3300 if (strong && !has_strong_ref) {
355b0502
GKH
3301 node->has_strong_ref = 1;
3302 node->pending_strong_ref = 1;
3303 node->local_strong_refs++;
26b47d8a
TK
3304 }
3305 if (!strong && has_strong_ref)
355b0502 3306 node->has_strong_ref = 0;
26b47d8a 3307 if (!weak && has_weak_ref)
355b0502 3308 node->has_weak_ref = 0;
26b47d8a
TK
3309 if (!weak && !strong) {
3310 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3311 "%d:%d node %d u%016llx c%016llx deleted\n",
3312 proc->pid, thread->pid,
3313 node_debug_id,
3314 (u64)node_ptr,
3315 (u64)node_cookie);
3316 rb_erase(&node->rb_node, &proc->nodes);
ed29721e
TK
3317 binder_inner_proc_unlock(proc);
3318 binder_free_node(node);
3319 } else
3320 binder_inner_proc_unlock(proc);
3321
26b47d8a
TK
3322 if (weak && !has_weak_ref)
3323 ret = binder_put_node_cmd(
3324 proc, thread, &ptr, node_ptr,
3325 node_cookie, node_debug_id,
3326 BR_INCREFS, "BR_INCREFS");
3327 if (!ret && strong && !has_strong_ref)
3328 ret = binder_put_node_cmd(
3329 proc, thread, &ptr, node_ptr,
3330 node_cookie, node_debug_id,
3331 BR_ACQUIRE, "BR_ACQUIRE");
3332 if (!ret && !strong && has_strong_ref)
3333 ret = binder_put_node_cmd(
3334 proc, thread, &ptr, node_ptr,
3335 node_cookie, node_debug_id,
3336 BR_RELEASE, "BR_RELEASE");
3337 if (!ret && !weak && has_weak_ref)
3338 ret = binder_put_node_cmd(
3339 proc, thread, &ptr, node_ptr,
3340 node_cookie, node_debug_id,
3341 BR_DECREFS, "BR_DECREFS");
3342 if (orig_ptr == ptr)
3343 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3344 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3345 proc->pid, thread->pid,
3346 node_debug_id,
3347 (u64)node_ptr,
3348 (u64)node_cookie);
3349 if (ret)
3350 return ret;
355b0502
GKH
3351 } break;
3352 case BINDER_WORK_DEAD_BINDER:
3353 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3354 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3355 struct binder_ref_death *death;
3356 uint32_t cmd;
3357
3358 death = container_of(w, struct binder_ref_death, work);
3359 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3360 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3361 else
3362 cmd = BR_DEAD_BINDER;
ed29721e
TK
3363 /*
3364 * TODO: there is a race condition between
3365 * death notification requests and delivery
3366 * of the notifications. This will be handled
3367 * in a later patch.
3368 */
3369 binder_inner_proc_unlock(proc);
355b0502
GKH
3370 if (put_user(cmd, (uint32_t __user *)ptr))
3371 return -EFAULT;
3372 ptr += sizeof(uint32_t);
da49889d
AH
3373 if (put_user(death->cookie,
3374 (binder_uintptr_t __user *)ptr))
355b0502 3375 return -EFAULT;
da49889d 3376 ptr += sizeof(binder_uintptr_t);
89334ab4 3377 binder_stat_br(proc, thread, cmd);
355b0502 3378 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 3379 "%d:%d %s %016llx\n",
355b0502
GKH
3380 proc->pid, thread->pid,
3381 cmd == BR_DEAD_BINDER ?
3382 "BR_DEAD_BINDER" :
3383 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
da49889d 3384 (u64)death->cookie);
355b0502
GKH
3385
3386 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
355b0502
GKH
3387 kfree(death);
3388 binder_stats_deleted(BINDER_STAT_DEATH);
ed29721e
TK
3389 } else {
3390 binder_inner_proc_lock(proc);
3391 list_add_tail(&w->entry,
3392 &proc->delivered_death);
3393 binder_inner_proc_unlock(proc);
3394 }
355b0502
GKH
3395 if (cmd == BR_DEAD_BINDER)
3396 goto done; /* DEAD_BINDER notifications can cause transactions */
3397 } break;
3398 }
3399
3400 if (!t)
3401 continue;
3402
3403 BUG_ON(t->buffer == NULL);
3404 if (t->buffer->target_node) {
3405 struct binder_node *target_node = t->buffer->target_node;
10f62861 3406
355b0502
GKH
3407 tr.target.ptr = target_node->ptr;
3408 tr.cookie = target_node->cookie;
3409 t->saved_priority = task_nice(current);
3410 if (t->priority < target_node->min_priority &&
3411 !(t->flags & TF_ONE_WAY))
3412 binder_set_nice(t->priority);
3413 else if (!(t->flags & TF_ONE_WAY) ||
3414 t->saved_priority > target_node->min_priority)
3415 binder_set_nice(target_node->min_priority);
3416 cmd = BR_TRANSACTION;
3417 } else {
da49889d
AH
3418 tr.target.ptr = 0;
3419 tr.cookie = 0;
355b0502
GKH
3420 cmd = BR_REPLY;
3421 }
3422 tr.code = t->code;
3423 tr.flags = t->flags;
4a2ebb93 3424 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 3425
7a4408c6
TK
3426 t_from = binder_get_txn_from(t);
3427 if (t_from) {
3428 struct task_struct *sender = t_from->proc->tsk;
10f62861 3429
355b0502 3430 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 3431 task_active_pid_ns(current));
355b0502
GKH
3432 } else {
3433 tr.sender_pid = 0;
3434 }
3435
3436 tr.data_size = t->buffer->data_size;
3437 tr.offsets_size = t->buffer->offsets_size;
19c98724
TK
3438 tr.data.ptr.buffer = (binder_uintptr_t)
3439 ((uintptr_t)t->buffer->data +
3440 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
3441 tr.data.ptr.offsets = tr.data.ptr.buffer +
3442 ALIGN(t->buffer->data_size,
3443 sizeof(void *));
3444
7a4408c6
TK
3445 if (put_user(cmd, (uint32_t __user *)ptr)) {
3446 if (t_from)
3447 binder_thread_dec_tmpref(t_from);
355b0502 3448 return -EFAULT;
7a4408c6 3449 }
355b0502 3450 ptr += sizeof(uint32_t);
7a4408c6
TK
3451 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3452 if (t_from)
3453 binder_thread_dec_tmpref(t_from);
355b0502 3454 return -EFAULT;
7a4408c6 3455 }
355b0502
GKH
3456 ptr += sizeof(tr);
3457
975a1ac9 3458 trace_binder_transaction_received(t);
355b0502
GKH
3459 binder_stat_br(proc, thread, cmd);
3460 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 3461 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
3462 proc->pid, thread->pid,
3463 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3464 "BR_REPLY",
7a4408c6
TK
3465 t->debug_id, t_from ? t_from->proc->pid : 0,
3466 t_from ? t_from->pid : 0, cmd,
355b0502 3467 t->buffer->data_size, t->buffer->offsets_size,
da49889d 3468 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502 3469
7a4408c6
TK
3470 if (t_from)
3471 binder_thread_dec_tmpref(t_from);
355b0502
GKH
3472 t->buffer->allow_user_free = 1;
3473 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
3474 t->to_parent = thread->transaction_stack;
3475 t->to_thread = thread;
3476 thread->transaction_stack = t;
3477 } else {
b6d282ce 3478 binder_free_transaction(t);
355b0502
GKH
3479 }
3480 break;
3481 }
3482
3483done:
3484
3485 *consumed = ptr - buffer;
3486 if (proc->requested_threads + proc->ready_threads == 0 &&
3487 proc->requested_threads_started < proc->max_threads &&
3488 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3489 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3490 /*spawn a new thread if we leave this out */) {
3491 proc->requested_threads++;
3492 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3493 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
3494 proc->pid, thread->pid);
3495 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3496 return -EFAULT;
89334ab4 3497 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
355b0502
GKH
3498 }
3499 return 0;
3500}
3501
3502static void binder_release_work(struct list_head *list)
3503{
3504 struct binder_work *w;
10f62861 3505
355b0502
GKH
3506 while (!list_empty(list)) {
3507 w = list_first_entry(list, struct binder_work, entry);
3508 list_del_init(&w->entry);
3509 switch (w->type) {
3510 case BINDER_WORK_TRANSACTION: {
3511 struct binder_transaction *t;
3512
3513 t = container_of(w, struct binder_transaction, work);
675d66b0
AH
3514 if (t->buffer->target_node &&
3515 !(t->flags & TF_ONE_WAY)) {
355b0502 3516 binder_send_failed_reply(t, BR_DEAD_REPLY);
675d66b0
AH
3517 } else {
3518 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3519 "undelivered transaction %d\n",
675d66b0 3520 t->debug_id);
b6d282ce 3521 binder_free_transaction(t);
675d66b0 3522 }
355b0502 3523 } break;
26549d17
TK
3524 case BINDER_WORK_RETURN_ERROR: {
3525 struct binder_error *e = container_of(
3526 w, struct binder_error, work);
3527
3528 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3529 "undelivered TRANSACTION_ERROR: %u\n",
3530 e->cmd);
3531 } break;
355b0502 3532 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 3533 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3534 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
3535 kfree(w);
3536 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3537 } break;
675d66b0
AH
3538 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3539 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3540 struct binder_ref_death *death;
3541
3542 death = container_of(w, struct binder_ref_death, work);
3543 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
3544 "undelivered death notification, %016llx\n",
3545 (u64)death->cookie);
675d66b0
AH
3546 kfree(death);
3547 binder_stats_deleted(BINDER_STAT_DEATH);
3548 } break;
355b0502 3549 default:
56b468fc 3550 pr_err("unexpected work type, %d, not freed\n",
675d66b0 3551 w->type);
355b0502
GKH
3552 break;
3553 }
3554 }
3555
3556}
3557
3558static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3559{
3560 struct binder_thread *thread = NULL;
3561 struct rb_node *parent = NULL;
3562 struct rb_node **p = &proc->threads.rb_node;
3563
3564 while (*p) {
3565 parent = *p;
3566 thread = rb_entry(parent, struct binder_thread, rb_node);
3567
3568 if (current->pid < thread->pid)
3569 p = &(*p)->rb_left;
3570 else if (current->pid > thread->pid)
3571 p = &(*p)->rb_right;
3572 else
3573 break;
3574 }
3575 if (*p == NULL) {
3576 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3577 if (thread == NULL)
3578 return NULL;
3579 binder_stats_created(BINDER_STAT_THREAD);
3580 thread->proc = proc;
3581 thread->pid = current->pid;
7a4408c6 3582 atomic_set(&thread->tmp_ref, 0);
355b0502
GKH
3583 init_waitqueue_head(&thread->wait);
3584 INIT_LIST_HEAD(&thread->todo);
3585 rb_link_node(&thread->rb_node, parent, p);
3586 rb_insert_color(&thread->rb_node, &proc->threads);
08dabcee 3587 thread->looper_need_return = true;
26549d17
TK
3588 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3589 thread->return_error.cmd = BR_OK;
3590 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3591 thread->reply_error.cmd = BR_OK;
355b0502
GKH
3592 }
3593 return thread;
3594}
3595
7a4408c6
TK
3596static void binder_free_proc(struct binder_proc *proc)
3597{
3598 BUG_ON(!list_empty(&proc->todo));
3599 BUG_ON(!list_empty(&proc->delivered_death));
3600 binder_alloc_deferred_release(&proc->alloc);
3601 put_task_struct(proc->tsk);
3602 binder_stats_deleted(BINDER_STAT_PROC);
3603 kfree(proc);
3604}
3605
3606static void binder_free_thread(struct binder_thread *thread)
3607{
3608 BUG_ON(!list_empty(&thread->todo));
3609 binder_stats_deleted(BINDER_STAT_THREAD);
3610 binder_proc_dec_tmpref(thread->proc);
3611 kfree(thread);
3612}
3613
3614static int binder_thread_release(struct binder_proc *proc,
3615 struct binder_thread *thread)
355b0502
GKH
3616{
3617 struct binder_transaction *t;
3618 struct binder_transaction *send_reply = NULL;
3619 int active_transactions = 0;
7a4408c6 3620 struct binder_transaction *last_t = NULL;
355b0502 3621
7a4408c6
TK
3622 /*
3623 * take a ref on the proc so it survives
3624 * after we remove this thread from proc->threads.
3625 * The corresponding dec is when we actually
3626 * free the thread in binder_free_thread()
3627 */
3628 proc->tmp_ref++;
3629 /*
3630 * take a ref on this thread to ensure it
3631 * survives while we are releasing it
3632 */
3633 atomic_inc(&thread->tmp_ref);
355b0502
GKH
3634 rb_erase(&thread->rb_node, &proc->threads);
3635 t = thread->transaction_stack;
7a4408c6
TK
3636 if (t) {
3637 spin_lock(&t->lock);
3638 if (t->to_thread == thread)
3639 send_reply = t;
3640 }
3641 thread->is_dead = true;
3642
355b0502 3643 while (t) {
7a4408c6 3644 last_t = t;
355b0502
GKH
3645 active_transactions++;
3646 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
3647 "release %d:%d transaction %d %s, still active\n",
3648 proc->pid, thread->pid,
355b0502
GKH
3649 t->debug_id,
3650 (t->to_thread == thread) ? "in" : "out");
3651
3652 if (t->to_thread == thread) {
3653 t->to_proc = NULL;
3654 t->to_thread = NULL;
3655 if (t->buffer) {
3656 t->buffer->transaction = NULL;
3657 t->buffer = NULL;
3658 }
3659 t = t->to_parent;
3660 } else if (t->from == thread) {
3661 t->from = NULL;
3662 t = t->from_parent;
3663 } else
3664 BUG();
7a4408c6
TK
3665 spin_unlock(&last_t->lock);
3666 if (t)
3667 spin_lock(&t->lock);
355b0502 3668 }
7a4408c6 3669
355b0502
GKH
3670 if (send_reply)
3671 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3672 binder_release_work(&thread->todo);
7a4408c6 3673 binder_thread_dec_tmpref(thread);
355b0502
GKH
3674 return active_transactions;
3675}
3676
3677static unsigned int binder_poll(struct file *filp,
3678 struct poll_table_struct *wait)
3679{
3680 struct binder_proc *proc = filp->private_data;
3681 struct binder_thread *thread = NULL;
3682 int wait_for_proc_work;
3683
975a1ac9
AH
3684 binder_lock(__func__);
3685
355b0502
GKH
3686 thread = binder_get_thread(proc);
3687
3688 wait_for_proc_work = thread->transaction_stack == NULL &&
26549d17 3689 list_empty(&thread->todo);
975a1ac9
AH
3690
3691 binder_unlock(__func__);
355b0502
GKH
3692
3693 if (wait_for_proc_work) {
3694 if (binder_has_proc_work(proc, thread))
3695 return POLLIN;
3696 poll_wait(filp, &proc->wait, wait);
3697 if (binder_has_proc_work(proc, thread))
3698 return POLLIN;
3699 } else {
3700 if (binder_has_thread_work(thread))
3701 return POLLIN;
3702 poll_wait(filp, &thread->wait, wait);
3703 if (binder_has_thread_work(thread))
3704 return POLLIN;
3705 }
3706 return 0;
3707}
3708
78260ac6
TR
3709static int binder_ioctl_write_read(struct file *filp,
3710 unsigned int cmd, unsigned long arg,
3711 struct binder_thread *thread)
3712{
3713 int ret = 0;
3714 struct binder_proc *proc = filp->private_data;
3715 unsigned int size = _IOC_SIZE(cmd);
3716 void __user *ubuf = (void __user *)arg;
3717 struct binder_write_read bwr;
3718
3719 if (size != sizeof(struct binder_write_read)) {
3720 ret = -EINVAL;
3721 goto out;
3722 }
3723 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3724 ret = -EFAULT;
3725 goto out;
3726 }
3727 binder_debug(BINDER_DEBUG_READ_WRITE,
3728 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3729 proc->pid, thread->pid,
3730 (u64)bwr.write_size, (u64)bwr.write_buffer,
3731 (u64)bwr.read_size, (u64)bwr.read_buffer);
3732
3733 if (bwr.write_size > 0) {
3734 ret = binder_thread_write(proc, thread,
3735 bwr.write_buffer,
3736 bwr.write_size,
3737 &bwr.write_consumed);
3738 trace_binder_write_done(ret);
3739 if (ret < 0) {
3740 bwr.read_consumed = 0;
3741 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3742 ret = -EFAULT;
3743 goto out;
3744 }
3745 }
3746 if (bwr.read_size > 0) {
3747 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3748 bwr.read_size,
3749 &bwr.read_consumed,
3750 filp->f_flags & O_NONBLOCK);
3751 trace_binder_read_done(ret);
3752 if (!list_empty(&proc->todo))
3753 wake_up_interruptible(&proc->wait);
3754 if (ret < 0) {
3755 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3756 ret = -EFAULT;
3757 goto out;
3758 }
3759 }
3760 binder_debug(BINDER_DEBUG_READ_WRITE,
3761 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3762 proc->pid, thread->pid,
3763 (u64)bwr.write_consumed, (u64)bwr.write_size,
3764 (u64)bwr.read_consumed, (u64)bwr.read_size);
3765 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3766 ret = -EFAULT;
3767 goto out;
3768 }
3769out:
3770 return ret;
3771}
3772
3773static int binder_ioctl_set_ctx_mgr(struct file *filp)
3774{
3775 int ret = 0;
3776 struct binder_proc *proc = filp->private_data;
342e5c90 3777 struct binder_context *context = proc->context;
c44b1231 3778 struct binder_node *new_node;
78260ac6
TR
3779 kuid_t curr_euid = current_euid();
3780
c44b1231 3781 mutex_lock(&context->context_mgr_node_lock);
342e5c90 3782 if (context->binder_context_mgr_node) {
78260ac6
TR
3783 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3784 ret = -EBUSY;
3785 goto out;
3786 }
79af7307
SS
3787 ret = security_binder_set_context_mgr(proc->tsk);
3788 if (ret < 0)
3789 goto out;
342e5c90
MC
3790 if (uid_valid(context->binder_context_mgr_uid)) {
3791 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
3792 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3793 from_kuid(&init_user_ns, curr_euid),
3794 from_kuid(&init_user_ns,
342e5c90 3795 context->binder_context_mgr_uid));
78260ac6
TR
3796 ret = -EPERM;
3797 goto out;
3798 }
3799 } else {
342e5c90 3800 context->binder_context_mgr_uid = curr_euid;
78260ac6 3801 }
c44b1231
TK
3802 new_node = binder_new_node(proc, 0, 0);
3803 if (!new_node) {
78260ac6
TR
3804 ret = -ENOMEM;
3805 goto out;
3806 }
c44b1231
TK
3807 new_node->local_weak_refs++;
3808 new_node->local_strong_refs++;
3809 new_node->has_strong_ref = 1;
3810 new_node->has_weak_ref = 1;
3811 context->binder_context_mgr_node = new_node;
adc18842 3812 binder_put_node(new_node);
78260ac6 3813out:
c44b1231 3814 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
3815 return ret;
3816}
3817
355b0502
GKH
3818static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3819{
3820 int ret;
3821 struct binder_proc *proc = filp->private_data;
3822 struct binder_thread *thread;
3823 unsigned int size = _IOC_SIZE(cmd);
3824 void __user *ubuf = (void __user *)arg;
3825
78260ac6
TR
3826 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3827 proc->pid, current->pid, cmd, arg);*/
355b0502 3828
975a1ac9
AH
3829 trace_binder_ioctl(cmd, arg);
3830
355b0502
GKH
3831 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3832 if (ret)
975a1ac9 3833 goto err_unlocked;
355b0502 3834
975a1ac9 3835 binder_lock(__func__);
355b0502
GKH
3836 thread = binder_get_thread(proc);
3837 if (thread == NULL) {
3838 ret = -ENOMEM;
3839 goto err;
3840 }
3841
3842 switch (cmd) {
78260ac6
TR
3843 case BINDER_WRITE_READ:
3844 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3845 if (ret)
355b0502 3846 goto err;
355b0502 3847 break;
355b0502
GKH
3848 case BINDER_SET_MAX_THREADS:
3849 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3850 ret = -EINVAL;
3851 goto err;
3852 }
3853 break;
3854 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
3855 ret = binder_ioctl_set_ctx_mgr(filp);
3856 if (ret)
355b0502 3857 goto err;
355b0502
GKH
3858 break;
3859 case BINDER_THREAD_EXIT:
56b468fc 3860 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 3861 proc->pid, thread->pid);
7a4408c6 3862 binder_thread_release(proc, thread);
355b0502
GKH
3863 thread = NULL;
3864 break;
36c89c0a
MM
3865 case BINDER_VERSION: {
3866 struct binder_version __user *ver = ubuf;
3867
355b0502
GKH
3868 if (size != sizeof(struct binder_version)) {
3869 ret = -EINVAL;
3870 goto err;
3871 }
36c89c0a
MM
3872 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3873 &ver->protocol_version)) {
355b0502
GKH
3874 ret = -EINVAL;
3875 goto err;
3876 }
3877 break;
36c89c0a 3878 }
355b0502
GKH
3879 default:
3880 ret = -EINVAL;
3881 goto err;
3882 }
3883 ret = 0;
3884err:
3885 if (thread)
08dabcee 3886 thread->looper_need_return = false;
975a1ac9 3887 binder_unlock(__func__);
355b0502
GKH
3888 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3889 if (ret && ret != -ERESTARTSYS)
56b468fc 3890 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
3891err_unlocked:
3892 trace_binder_ioctl_done(ret);
355b0502
GKH
3893 return ret;
3894}
3895
3896static void binder_vma_open(struct vm_area_struct *vma)
3897{
3898 struct binder_proc *proc = vma->vm_private_data;
10f62861 3899
355b0502 3900 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3901 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3902 proc->pid, vma->vm_start, vma->vm_end,
3903 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3904 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
3905}
3906
3907static void binder_vma_close(struct vm_area_struct *vma)
3908{
3909 struct binder_proc *proc = vma->vm_private_data;
10f62861 3910
355b0502 3911 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3912 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3913 proc->pid, vma->vm_start, vma->vm_end,
3914 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3915 (unsigned long)pgprot_val(vma->vm_page_prot));
19c98724 3916 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
3917 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3918}
3919
11bac800 3920static int binder_vm_fault(struct vm_fault *vmf)
ddac7d5f
VM
3921{
3922 return VM_FAULT_SIGBUS;
3923}
3924
7cbea8dc 3925static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
3926 .open = binder_vma_open,
3927 .close = binder_vma_close,
ddac7d5f 3928 .fault = binder_vm_fault,
355b0502
GKH
3929};
3930
19c98724
TK
3931static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3932{
3933 int ret;
3934 struct binder_proc *proc = filp->private_data;
3935 const char *failure_string;
3936
3937 if (proc->tsk != current->group_leader)
3938 return -EINVAL;
3939
3940 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3941 vma->vm_end = vma->vm_start + SZ_4M;
3942
3943 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3944 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3945 __func__, proc->pid, vma->vm_start, vma->vm_end,
3946 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3947 (unsigned long)pgprot_val(vma->vm_page_prot));
3948
3949 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3950 ret = -EPERM;
3951 failure_string = "bad vm_flags";
3952 goto err_bad_arg;
3953 }
3954 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3955 vma->vm_ops = &binder_vm_ops;
3956 vma->vm_private_data = proc;
3957
3958 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3959 if (ret)
3960 return ret;
3961 proc->files = get_files_struct(current);
3962 return 0;
3963
355b0502 3964err_bad_arg:
258767fe 3965 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
355b0502
GKH
3966 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3967 return ret;
3968}
3969
3970static int binder_open(struct inode *nodp, struct file *filp)
3971{
3972 struct binder_proc *proc;
ac4812c5 3973 struct binder_device *binder_dev;
355b0502
GKH
3974
3975 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3976 current->group_leader->pid, current->pid);
3977
3978 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3979 if (proc == NULL)
3980 return -ENOMEM;
9630fe88
TK
3981 spin_lock_init(&proc->inner_lock);
3982 spin_lock_init(&proc->outer_lock);
c4ea41ba
TK
3983 get_task_struct(current->group_leader);
3984 proc->tsk = current->group_leader;
355b0502
GKH
3985 INIT_LIST_HEAD(&proc->todo);
3986 init_waitqueue_head(&proc->wait);
3987 proc->default_priority = task_nice(current);
ac4812c5
MC
3988 binder_dev = container_of(filp->private_data, struct binder_device,
3989 miscdev);
3990 proc->context = &binder_dev->context;
19c98724 3991 binder_alloc_init(&proc->alloc);
975a1ac9
AH
3992
3993 binder_lock(__func__);
3994
355b0502 3995 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
3996 proc->pid = current->group_leader->pid;
3997 INIT_LIST_HEAD(&proc->delivered_death);
3998 filp->private_data = proc;
975a1ac9
AH
3999
4000 binder_unlock(__func__);
355b0502 4001
c44b1231
TK
4002 mutex_lock(&binder_procs_lock);
4003 hlist_add_head(&proc->proc_node, &binder_procs);
4004 mutex_unlock(&binder_procs_lock);
4005
16b66554 4006 if (binder_debugfs_dir_entry_proc) {
355b0502 4007 char strbuf[11];
10f62861 4008
355b0502 4009 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
14db3181
MC
4010 /*
4011 * proc debug entries are shared between contexts, so
4012 * this will fail if the process tries to open the driver
4013 * again with a different context. The priting code will
4014 * anyway print all contexts that a given PID has, so this
4015 * is not a problem.
4016 */
16b66554 4017 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
14db3181
MC
4018 binder_debugfs_dir_entry_proc,
4019 (void *)(unsigned long)proc->pid,
4020 &binder_proc_fops);
355b0502
GKH
4021 }
4022
4023 return 0;
4024}
4025
4026static int binder_flush(struct file *filp, fl_owner_t id)
4027{
4028 struct binder_proc *proc = filp->private_data;
4029
4030 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4031
4032 return 0;
4033}
4034
4035static void binder_deferred_flush(struct binder_proc *proc)
4036{
4037 struct rb_node *n;
4038 int wake_count = 0;
10f62861 4039
355b0502
GKH
4040 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4041 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 4042
08dabcee 4043 thread->looper_need_return = true;
355b0502
GKH
4044 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4045 wake_up_interruptible(&thread->wait);
4046 wake_count++;
4047 }
4048 }
4049 wake_up_interruptible_all(&proc->wait);
4050
4051 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4052 "binder_flush: %d woke %d threads\n", proc->pid,
4053 wake_count);
4054}
4055
4056static int binder_release(struct inode *nodp, struct file *filp)
4057{
4058 struct binder_proc *proc = filp->private_data;
10f62861 4059
16b66554 4060 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
4061 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4062
4063 return 0;
4064}
4065
008fa749
ME
4066static int binder_node_release(struct binder_node *node, int refs)
4067{
4068 struct binder_ref *ref;
4069 int death = 0;
ed29721e 4070 struct binder_proc *proc = node->proc;
008fa749 4071
008fa749 4072 binder_release_work(&node->async_todo);
ed29721e
TK
4073
4074 binder_inner_proc_lock(proc);
4075 list_del_init(&node->work.entry);
adc18842
TK
4076 /*
4077 * The caller must have taken a temporary ref on the node,
4078 */
4079 BUG_ON(!node->tmp_refs);
4080 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
ed29721e
TK
4081 binder_inner_proc_unlock(proc);
4082 binder_free_node(node);
008fa749
ME
4083
4084 return refs;
4085 }
4086
4087 node->proc = NULL;
4088 node->local_strong_refs = 0;
4089 node->local_weak_refs = 0;
ed29721e 4090 binder_inner_proc_unlock(proc);
c44b1231
TK
4091
4092 spin_lock(&binder_dead_nodes_lock);
008fa749 4093 hlist_add_head(&node->dead_node, &binder_dead_nodes);
c44b1231 4094 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
4095
4096 hlist_for_each_entry(ref, &node->refs, node_entry) {
4097 refs++;
4098
4099 if (!ref->death)
e194fd8a 4100 continue;
008fa749
ME
4101
4102 death++;
4103
4104 if (list_empty(&ref->death->work.entry)) {
4105 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4106 list_add_tail(&ref->death->work.entry,
4107 &ref->proc->todo);
4108 wake_up_interruptible(&ref->proc->wait);
4109 } else
4110 BUG();
4111 }
4112
008fa749
ME
4113 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4114 "node %d now dead, refs %d, death %d\n",
4115 node->debug_id, refs, death);
adc18842 4116 binder_put_node(node);
008fa749
ME
4117
4118 return refs;
4119}
4120
355b0502
GKH
4121static void binder_deferred_release(struct binder_proc *proc)
4122{
342e5c90 4123 struct binder_context *context = proc->context;
355b0502 4124 struct rb_node *n;
19c98724 4125 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 4126
355b0502
GKH
4127 BUG_ON(proc->files);
4128
c44b1231 4129 mutex_lock(&binder_procs_lock);
355b0502 4130 hlist_del(&proc->proc_node);
c44b1231 4131 mutex_unlock(&binder_procs_lock);
53413e7d 4132
c44b1231 4133 mutex_lock(&context->context_mgr_node_lock);
342e5c90
MC
4134 if (context->binder_context_mgr_node &&
4135 context->binder_context_mgr_node->proc == proc) {
355b0502 4136 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
4137 "%s: %d context_mgr_node gone\n",
4138 __func__, proc->pid);
342e5c90 4139 context->binder_context_mgr_node = NULL;
355b0502 4140 }
c44b1231 4141 mutex_unlock(&context->context_mgr_node_lock);
7a4408c6
TK
4142 /*
4143 * Make sure proc stays alive after we
4144 * remove all the threads
4145 */
4146 proc->tmp_ref++;
355b0502 4147
7a4408c6 4148 proc->is_dead = true;
355b0502
GKH
4149 threads = 0;
4150 active_transactions = 0;
4151 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
4152 struct binder_thread *thread;
4153
4154 thread = rb_entry(n, struct binder_thread, rb_node);
355b0502 4155 threads++;
7a4408c6 4156 active_transactions += binder_thread_release(proc, thread);
355b0502 4157 }
53413e7d 4158
355b0502
GKH
4159 nodes = 0;
4160 incoming_refs = 0;
4161 while ((n = rb_first(&proc->nodes))) {
53413e7d 4162 struct binder_node *node;
355b0502 4163
53413e7d 4164 node = rb_entry(n, struct binder_node, rb_node);
355b0502 4165 nodes++;
adc18842
TK
4166 /*
4167 * take a temporary ref on the node before
4168 * calling binder_node_release() which will either
4169 * kfree() the node or call binder_put_node()
4170 */
4171 binder_inc_node_tmpref(node);
355b0502 4172 rb_erase(&node->rb_node, &proc->nodes);
008fa749 4173 incoming_refs = binder_node_release(node, incoming_refs);
355b0502 4174 }
53413e7d 4175
355b0502
GKH
4176 outgoing_refs = 0;
4177 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
4178 struct binder_ref *ref;
4179
4180 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 4181 outgoing_refs++;
372e3147
TK
4182 binder_cleanup_ref(ref);
4183 binder_free_ref(ref);
355b0502 4184 }
53413e7d 4185
355b0502 4186 binder_release_work(&proc->todo);
675d66b0 4187 binder_release_work(&proc->delivered_death);
355b0502 4188
355b0502 4189 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
19c98724 4190 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 4191 __func__, proc->pid, threads, nodes, incoming_refs,
19c98724 4192 outgoing_refs, active_transactions);
355b0502 4193
7a4408c6 4194 binder_proc_dec_tmpref(proc);
355b0502
GKH
4195}
4196
4197static void binder_deferred_func(struct work_struct *work)
4198{
4199 struct binder_proc *proc;
4200 struct files_struct *files;
4201
4202 int defer;
10f62861 4203
355b0502 4204 do {
975a1ac9 4205 binder_lock(__func__);
355b0502
GKH
4206 mutex_lock(&binder_deferred_lock);
4207 if (!hlist_empty(&binder_deferred_list)) {
4208 proc = hlist_entry(binder_deferred_list.first,
4209 struct binder_proc, deferred_work_node);
4210 hlist_del_init(&proc->deferred_work_node);
4211 defer = proc->deferred_work;
4212 proc->deferred_work = 0;
4213 } else {
4214 proc = NULL;
4215 defer = 0;
4216 }
4217 mutex_unlock(&binder_deferred_lock);
4218
4219 files = NULL;
4220 if (defer & BINDER_DEFERRED_PUT_FILES) {
4221 files = proc->files;
4222 if (files)
4223 proc->files = NULL;
4224 }
4225
4226 if (defer & BINDER_DEFERRED_FLUSH)
4227 binder_deferred_flush(proc);
4228
4229 if (defer & BINDER_DEFERRED_RELEASE)
4230 binder_deferred_release(proc); /* frees proc */
4231
975a1ac9 4232 binder_unlock(__func__);
355b0502
GKH
4233 if (files)
4234 put_files_struct(files);
4235 } while (proc);
4236}
4237static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4238
4239static void
4240binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4241{
4242 mutex_lock(&binder_deferred_lock);
4243 proc->deferred_work |= defer;
4244 if (hlist_unhashed(&proc->deferred_work_node)) {
4245 hlist_add_head(&proc->deferred_work_node,
4246 &binder_deferred_list);
1beba52d 4247 schedule_work(&binder_deferred_work);
355b0502
GKH
4248 }
4249 mutex_unlock(&binder_deferred_lock);
4250}
4251
5249f488
AH
4252static void print_binder_transaction(struct seq_file *m, const char *prefix,
4253 struct binder_transaction *t)
4254{
7a4408c6 4255 spin_lock(&t->lock);
5249f488
AH
4256 seq_printf(m,
4257 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4258 prefix, t->debug_id, t,
4259 t->from ? t->from->proc->pid : 0,
4260 t->from ? t->from->pid : 0,
4261 t->to_proc ? t->to_proc->pid : 0,
4262 t->to_thread ? t->to_thread->pid : 0,
4263 t->code, t->flags, t->priority, t->need_reply);
7a4408c6
TK
4264 spin_unlock(&t->lock);
4265
355b0502 4266 if (t->buffer == NULL) {
5249f488
AH
4267 seq_puts(m, " buffer free\n");
4268 return;
355b0502 4269 }
5249f488
AH
4270 if (t->buffer->target_node)
4271 seq_printf(m, " node %d",
4272 t->buffer->target_node->debug_id);
4273 seq_printf(m, " size %zd:%zd data %p\n",
4274 t->buffer->data_size, t->buffer->offsets_size,
4275 t->buffer->data);
355b0502
GKH
4276}
4277
5249f488
AH
4278static void print_binder_work(struct seq_file *m, const char *prefix,
4279 const char *transaction_prefix,
4280 struct binder_work *w)
355b0502
GKH
4281{
4282 struct binder_node *node;
4283 struct binder_transaction *t;
4284
4285 switch (w->type) {
4286 case BINDER_WORK_TRANSACTION:
4287 t = container_of(w, struct binder_transaction, work);
5249f488 4288 print_binder_transaction(m, transaction_prefix, t);
355b0502 4289 break;
26549d17
TK
4290 case BINDER_WORK_RETURN_ERROR: {
4291 struct binder_error *e = container_of(
4292 w, struct binder_error, work);
4293
4294 seq_printf(m, "%stransaction error: %u\n",
4295 prefix, e->cmd);
4296 } break;
355b0502 4297 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 4298 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
4299 break;
4300 case BINDER_WORK_NODE:
4301 node = container_of(w, struct binder_node, work);
da49889d
AH
4302 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4303 prefix, node->debug_id,
4304 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
4305 break;
4306 case BINDER_WORK_DEAD_BINDER:
5249f488 4307 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
4308 break;
4309 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 4310 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
4311 break;
4312 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 4313 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
4314 break;
4315 default:
5249f488 4316 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
4317 break;
4318 }
355b0502
GKH
4319}
4320
5249f488
AH
4321static void print_binder_thread(struct seq_file *m,
4322 struct binder_thread *thread,
4323 int print_always)
355b0502
GKH
4324{
4325 struct binder_transaction *t;
4326 struct binder_work *w;
5249f488
AH
4327 size_t start_pos = m->count;
4328 size_t header_pos;
355b0502 4329
7a4408c6 4330 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
08dabcee 4331 thread->pid, thread->looper,
7a4408c6
TK
4332 thread->looper_need_return,
4333 atomic_read(&thread->tmp_ref));
5249f488 4334 header_pos = m->count;
355b0502
GKH
4335 t = thread->transaction_stack;
4336 while (t) {
355b0502 4337 if (t->from == thread) {
5249f488
AH
4338 print_binder_transaction(m,
4339 " outgoing transaction", t);
355b0502
GKH
4340 t = t->from_parent;
4341 } else if (t->to_thread == thread) {
5249f488
AH
4342 print_binder_transaction(m,
4343 " incoming transaction", t);
355b0502
GKH
4344 t = t->to_parent;
4345 } else {
5249f488 4346 print_binder_transaction(m, " bad transaction", t);
355b0502
GKH
4347 t = NULL;
4348 }
4349 }
4350 list_for_each_entry(w, &thread->todo, entry) {
5249f488 4351 print_binder_work(m, " ", " pending transaction", w);
355b0502 4352 }
5249f488
AH
4353 if (!print_always && m->count == header_pos)
4354 m->count = start_pos;
355b0502
GKH
4355}
4356
5249f488 4357static void print_binder_node(struct seq_file *m, struct binder_node *node)
355b0502
GKH
4358{
4359 struct binder_ref *ref;
355b0502
GKH
4360 struct binder_work *w;
4361 int count;
4362
4363 count = 0;
b67bfe0d 4364 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
4365 count++;
4366
adc18842 4367 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
da49889d 4368 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
4369 node->has_strong_ref, node->has_weak_ref,
4370 node->local_strong_refs, node->local_weak_refs,
adc18842 4371 node->internal_strong_refs, count, node->tmp_refs);
355b0502 4372 if (count) {
5249f488 4373 seq_puts(m, " proc");
b67bfe0d 4374 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 4375 seq_printf(m, " %d", ref->proc->pid);
355b0502 4376 }
5249f488
AH
4377 seq_puts(m, "\n");
4378 list_for_each_entry(w, &node->async_todo, entry)
4379 print_binder_work(m, " ",
4380 " pending async transaction", w);
355b0502
GKH
4381}
4382
5249f488 4383static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
355b0502 4384{
372e3147
TK
4385 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4386 ref->data.debug_id, ref->data.desc,
4387 ref->node->proc ? "" : "dead ",
4388 ref->node->debug_id, ref->data.strong,
4389 ref->data.weak, ref->death);
355b0502
GKH
4390}
4391
5249f488
AH
4392static void print_binder_proc(struct seq_file *m,
4393 struct binder_proc *proc, int print_all)
355b0502
GKH
4394{
4395 struct binder_work *w;
4396 struct rb_node *n;
5249f488
AH
4397 size_t start_pos = m->count;
4398 size_t header_pos;
4399
4400 seq_printf(m, "proc %d\n", proc->pid);
14db3181 4401 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
4402 header_pos = m->count;
4403
4404 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4405 print_binder_thread(m, rb_entry(n, struct binder_thread,
4406 rb_node), print_all);
4407 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
4408 struct binder_node *node = rb_entry(n, struct binder_node,
4409 rb_node);
4410 if (print_all || node->has_async_transaction)
5249f488 4411 print_binder_node(m, node);
355b0502
GKH
4412 }
4413 if (print_all) {
4414 for (n = rb_first(&proc->refs_by_desc);
5249f488 4415 n != NULL;
355b0502 4416 n = rb_next(n))
5249f488
AH
4417 print_binder_ref(m, rb_entry(n, struct binder_ref,
4418 rb_node_desc));
355b0502 4419 }
19c98724 4420 binder_alloc_print_allocated(m, &proc->alloc);
5249f488
AH
4421 list_for_each_entry(w, &proc->todo, entry)
4422 print_binder_work(m, " ", " pending transaction", w);
355b0502 4423 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 4424 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
4425 break;
4426 }
5249f488
AH
4427 if (!print_all && m->count == header_pos)
4428 m->count = start_pos;
355b0502
GKH
4429}
4430
167bccbd 4431static const char * const binder_return_strings[] = {
355b0502
GKH
4432 "BR_ERROR",
4433 "BR_OK",
4434 "BR_TRANSACTION",
4435 "BR_REPLY",
4436 "BR_ACQUIRE_RESULT",
4437 "BR_DEAD_REPLY",
4438 "BR_TRANSACTION_COMPLETE",
4439 "BR_INCREFS",
4440 "BR_ACQUIRE",
4441 "BR_RELEASE",
4442 "BR_DECREFS",
4443 "BR_ATTEMPT_ACQUIRE",
4444 "BR_NOOP",
4445 "BR_SPAWN_LOOPER",
4446 "BR_FINISHED",
4447 "BR_DEAD_BINDER",
4448 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4449 "BR_FAILED_REPLY"
4450};
4451
167bccbd 4452static const char * const binder_command_strings[] = {
355b0502
GKH
4453 "BC_TRANSACTION",
4454 "BC_REPLY",
4455 "BC_ACQUIRE_RESULT",
4456 "BC_FREE_BUFFER",
4457 "BC_INCREFS",
4458 "BC_ACQUIRE",
4459 "BC_RELEASE",
4460 "BC_DECREFS",
4461 "BC_INCREFS_DONE",
4462 "BC_ACQUIRE_DONE",
4463 "BC_ATTEMPT_ACQUIRE",
4464 "BC_REGISTER_LOOPER",
4465 "BC_ENTER_LOOPER",
4466 "BC_EXIT_LOOPER",
4467 "BC_REQUEST_DEATH_NOTIFICATION",
4468 "BC_CLEAR_DEATH_NOTIFICATION",
7980240b
MC
4469 "BC_DEAD_BINDER_DONE",
4470 "BC_TRANSACTION_SG",
4471 "BC_REPLY_SG",
355b0502
GKH
4472};
4473
167bccbd 4474static const char * const binder_objstat_strings[] = {
355b0502
GKH
4475 "proc",
4476 "thread",
4477 "node",
4478 "ref",
4479 "death",
4480 "transaction",
4481 "transaction_complete"
4482};
4483
5249f488
AH
4484static void print_binder_stats(struct seq_file *m, const char *prefix,
4485 struct binder_stats *stats)
355b0502
GKH
4486{
4487 int i;
4488
4489 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 4490 ARRAY_SIZE(binder_command_strings));
355b0502 4491 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
0953c797
BJS
4492 int temp = atomic_read(&stats->bc[i]);
4493
4494 if (temp)
5249f488 4495 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 4496 binder_command_strings[i], temp);
355b0502
GKH
4497 }
4498
4499 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 4500 ARRAY_SIZE(binder_return_strings));
355b0502 4501 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
0953c797
BJS
4502 int temp = atomic_read(&stats->br[i]);
4503
4504 if (temp)
5249f488 4505 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 4506 binder_return_strings[i], temp);
355b0502
GKH
4507 }
4508
4509 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 4510 ARRAY_SIZE(binder_objstat_strings));
355b0502 4511 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 4512 ARRAY_SIZE(stats->obj_deleted));
355b0502 4513 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
0953c797
BJS
4514 int created = atomic_read(&stats->obj_created[i]);
4515 int deleted = atomic_read(&stats->obj_deleted[i]);
4516
4517 if (created || deleted)
4518 seq_printf(m, "%s%s: active %d total %d\n",
4519 prefix,
5249f488 4520 binder_objstat_strings[i],
0953c797
BJS
4521 created - deleted,
4522 created);
355b0502 4523 }
355b0502
GKH
4524}
4525
5249f488
AH
4526static void print_binder_proc_stats(struct seq_file *m,
4527 struct binder_proc *proc)
355b0502
GKH
4528{
4529 struct binder_work *w;
4530 struct rb_node *n;
4531 int count, strong, weak;
4532
5249f488 4533 seq_printf(m, "proc %d\n", proc->pid);
14db3181 4534 seq_printf(m, "context %s\n", proc->context->name);
355b0502
GKH
4535 count = 0;
4536 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4537 count++;
5249f488
AH
4538 seq_printf(m, " threads: %d\n", count);
4539 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
4540 " ready threads %d\n"
4541 " free async space %zd\n", proc->requested_threads,
4542 proc->requested_threads_started, proc->max_threads,
19c98724
TK
4543 proc->ready_threads,
4544 binder_alloc_get_free_async_space(&proc->alloc));
355b0502
GKH
4545 count = 0;
4546 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4547 count++;
5249f488 4548 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
4549 count = 0;
4550 strong = 0;
4551 weak = 0;
4552 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4553 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4554 rb_node_desc);
4555 count++;
372e3147
TK
4556 strong += ref->data.strong;
4557 weak += ref->data.weak;
355b0502 4558 }
5249f488 4559 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 4560
19c98724 4561 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 4562 seq_printf(m, " buffers: %d\n", count);
355b0502
GKH
4563
4564 count = 0;
4565 list_for_each_entry(w, &proc->todo, entry) {
4566 switch (w->type) {
4567 case BINDER_WORK_TRANSACTION:
4568 count++;
4569 break;
4570 default:
4571 break;
4572 }
4573 }
5249f488 4574 seq_printf(m, " pending transactions: %d\n", count);
355b0502 4575
5249f488 4576 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
4577}
4578
4579
5249f488 4580static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
4581{
4582 struct binder_proc *proc;
355b0502 4583 struct binder_node *node;
355b0502 4584
1cf29cf4 4585 binder_lock(__func__);
355b0502 4586
5249f488 4587 seq_puts(m, "binder state:\n");
355b0502 4588
c44b1231 4589 spin_lock(&binder_dead_nodes_lock);
355b0502 4590 if (!hlist_empty(&binder_dead_nodes))
5249f488 4591 seq_puts(m, "dead nodes:\n");
b67bfe0d 4592 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
5249f488 4593 print_binder_node(m, node);
c44b1231 4594 spin_unlock(&binder_dead_nodes_lock);
355b0502 4595
c44b1231 4596 mutex_lock(&binder_procs_lock);
b67bfe0d 4597 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4598 print_binder_proc(m, proc, 1);
c44b1231 4599 mutex_unlock(&binder_procs_lock);
1cf29cf4 4600 binder_unlock(__func__);
5249f488 4601 return 0;
355b0502
GKH
4602}
4603
5249f488 4604static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
4605{
4606 struct binder_proc *proc;
355b0502 4607
1cf29cf4 4608 binder_lock(__func__);
355b0502 4609
5249f488 4610 seq_puts(m, "binder stats:\n");
355b0502 4611
5249f488 4612 print_binder_stats(m, "", &binder_stats);
355b0502 4613
c44b1231 4614 mutex_lock(&binder_procs_lock);
b67bfe0d 4615 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4616 print_binder_proc_stats(m, proc);
c44b1231 4617 mutex_unlock(&binder_procs_lock);
1cf29cf4 4618 binder_unlock(__func__);
5249f488 4619 return 0;
355b0502
GKH
4620}
4621
5249f488 4622static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
4623{
4624 struct binder_proc *proc;
355b0502 4625
1cf29cf4 4626 binder_lock(__func__);
355b0502 4627
5249f488 4628 seq_puts(m, "binder transactions:\n");
c44b1231 4629 mutex_lock(&binder_procs_lock);
b67bfe0d 4630 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4631 print_binder_proc(m, proc, 0);
c44b1231 4632 mutex_unlock(&binder_procs_lock);
1cf29cf4 4633 binder_unlock(__func__);
5249f488 4634 return 0;
355b0502
GKH
4635}
4636
5249f488 4637static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 4638{
83050a4e 4639 struct binder_proc *itr;
14db3181 4640 int pid = (unsigned long)m->private;
355b0502 4641
1cf29cf4 4642 binder_lock(__func__);
83050a4e 4643
c44b1231 4644 mutex_lock(&binder_procs_lock);
83050a4e 4645 hlist_for_each_entry(itr, &binder_procs, proc_node) {
14db3181
MC
4646 if (itr->pid == pid) {
4647 seq_puts(m, "binder proc state:\n");
4648 print_binder_proc(m, itr, 1);
83050a4e
RA
4649 }
4650 }
c44b1231
TK
4651 mutex_unlock(&binder_procs_lock);
4652
1cf29cf4 4653 binder_unlock(__func__);
5249f488 4654 return 0;
355b0502
GKH
4655}
4656
5249f488 4657static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
4658 struct binder_transaction_log_entry *e)
4659{
d99c7333
TK
4660 int debug_id = READ_ONCE(e->debug_id_done);
4661 /*
4662 * read barrier to guarantee debug_id_done read before
4663 * we print the log values
4664 */
4665 smp_rmb();
5249f488 4666 seq_printf(m,
d99c7333 4667 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
4668 e->debug_id, (e->call_type == 2) ? "reply" :
4669 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
14db3181 4670 e->from_thread, e->to_proc, e->to_thread, e->context_name,
57ada2fb
TK
4671 e->to_node, e->target_handle, e->data_size, e->offsets_size,
4672 e->return_error, e->return_error_param,
4673 e->return_error_line);
d99c7333
TK
4674 /*
4675 * read-barrier to guarantee read of debug_id_done after
4676 * done printing the fields of the entry
4677 */
4678 smp_rmb();
4679 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
4680 "\n" : " (incomplete)\n");
355b0502
GKH
4681}
4682
5249f488 4683static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 4684{
5249f488 4685 struct binder_transaction_log *log = m->private;
d99c7333
TK
4686 unsigned int log_cur = atomic_read(&log->cur);
4687 unsigned int count;
4688 unsigned int cur;
355b0502 4689 int i;
355b0502 4690
d99c7333
TK
4691 count = log_cur + 1;
4692 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
4693 0 : count % ARRAY_SIZE(log->entry);
4694 if (count > ARRAY_SIZE(log->entry) || log->full)
4695 count = ARRAY_SIZE(log->entry);
4696 for (i = 0; i < count; i++) {
4697 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
4698
4699 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 4700 }
5249f488 4701 return 0;
355b0502
GKH
4702}
4703
4704static const struct file_operations binder_fops = {
4705 .owner = THIS_MODULE,
4706 .poll = binder_poll,
4707 .unlocked_ioctl = binder_ioctl,
da49889d 4708 .compat_ioctl = binder_ioctl,
355b0502
GKH
4709 .mmap = binder_mmap,
4710 .open = binder_open,
4711 .flush = binder_flush,
4712 .release = binder_release,
4713};
4714
5249f488
AH
4715BINDER_DEBUG_ENTRY(state);
4716BINDER_DEBUG_ENTRY(stats);
4717BINDER_DEBUG_ENTRY(transactions);
4718BINDER_DEBUG_ENTRY(transaction_log);
4719
ac4812c5
MC
4720static int __init init_binder_device(const char *name)
4721{
4722 int ret;
4723 struct binder_device *binder_device;
4724
4725 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4726 if (!binder_device)
4727 return -ENOMEM;
4728
4729 binder_device->miscdev.fops = &binder_fops;
4730 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4731 binder_device->miscdev.name = name;
4732
4733 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4734 binder_device->context.name = name;
c44b1231 4735 mutex_init(&binder_device->context.context_mgr_node_lock);
ac4812c5
MC
4736
4737 ret = misc_register(&binder_device->miscdev);
4738 if (ret < 0) {
4739 kfree(binder_device);
4740 return ret;
4741 }
4742
4743 hlist_add_head(&binder_device->hlist, &binder_devices);
4744
4745 return ret;
4746}
4747
355b0502
GKH
4748static int __init binder_init(void)
4749{
4750 int ret;
ac4812c5
MC
4751 char *device_name, *device_names;
4752 struct binder_device *device;
4753 struct hlist_node *tmp;
355b0502 4754
d99c7333
TK
4755 atomic_set(&binder_transaction_log.cur, ~0U);
4756 atomic_set(&binder_transaction_log_failed.cur, ~0U);
4757
16b66554
AH
4758 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4759 if (binder_debugfs_dir_entry_root)
4760 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4761 binder_debugfs_dir_entry_root);
ac4812c5 4762
16b66554
AH
4763 if (binder_debugfs_dir_entry_root) {
4764 debugfs_create_file("state",
4765 S_IRUGO,
4766 binder_debugfs_dir_entry_root,
4767 NULL,
4768 &binder_state_fops);
4769 debugfs_create_file("stats",
4770 S_IRUGO,
4771 binder_debugfs_dir_entry_root,
4772 NULL,
4773 &binder_stats_fops);
4774 debugfs_create_file("transactions",
4775 S_IRUGO,
4776 binder_debugfs_dir_entry_root,
4777 NULL,
4778 &binder_transactions_fops);
4779 debugfs_create_file("transaction_log",
4780 S_IRUGO,
4781 binder_debugfs_dir_entry_root,
4782 &binder_transaction_log,
4783 &binder_transaction_log_fops);
4784 debugfs_create_file("failed_transaction_log",
4785 S_IRUGO,
4786 binder_debugfs_dir_entry_root,
4787 &binder_transaction_log_failed,
4788 &binder_transaction_log_fops);
355b0502 4789 }
ac4812c5
MC
4790
4791 /*
4792 * Copy the module_parameter string, because we don't want to
4793 * tokenize it in-place.
4794 */
4795 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4796 if (!device_names) {
4797 ret = -ENOMEM;
4798 goto err_alloc_device_names_failed;
4799 }
4800 strcpy(device_names, binder_devices_param);
4801
4802 while ((device_name = strsep(&device_names, ","))) {
4803 ret = init_binder_device(device_name);
4804 if (ret)
4805 goto err_init_binder_device_failed;
4806 }
4807
4808 return ret;
4809
4810err_init_binder_device_failed:
4811 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4812 misc_deregister(&device->miscdev);
4813 hlist_del(&device->hlist);
4814 kfree(device);
4815 }
4816err_alloc_device_names_failed:
4817 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4818
355b0502
GKH
4819 return ret;
4820}
4821
4822device_initcall(binder_init);
4823
975a1ac9
AH
4824#define CREATE_TRACE_POINTS
4825#include "binder_trace.h"
4826
355b0502 4827MODULE_LICENSE("GPL v2");