]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/android/binder.c
binder: make sure target_node has strong ref
[mirror_ubuntu-jammy-kernel.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
56b468fc
AS
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
355b0502
GKH
20#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
e2610b26 23#include <linux/freezer.h>
355b0502
GKH
24#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
355b0502
GKH
27#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/nsproxy.h>
30#include <linux/poll.h>
16b66554 31#include <linux/debugfs.h>
355b0502 32#include <linux/rbtree.h>
3f07c014 33#include <linux/sched/signal.h>
6e84f315 34#include <linux/sched/mm.h>
5249f488 35#include <linux/seq_file.h>
355b0502 36#include <linux/uaccess.h>
17cf22c3 37#include <linux/pid_namespace.h>
79af7307 38#include <linux/security.h>
355b0502 39
9246a4a9
GKH
40#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
41#define BINDER_IPC_32BIT 1
42#endif
43
44#include <uapi/linux/android/binder.h>
0c972a05 45#include "binder_alloc.h"
975a1ac9 46#include "binder_trace.h"
355b0502 47
975a1ac9 48static DEFINE_MUTEX(binder_main_lock);
c44b1231
TK
49
50static HLIST_HEAD(binder_deferred_list);
355b0502
GKH
51static DEFINE_MUTEX(binder_deferred_lock);
52
ac4812c5 53static HLIST_HEAD(binder_devices);
355b0502 54static HLIST_HEAD(binder_procs);
c44b1231
TK
55static DEFINE_MUTEX(binder_procs_lock);
56
355b0502 57static HLIST_HEAD(binder_dead_nodes);
c44b1231 58static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 59
16b66554
AH
60static struct dentry *binder_debugfs_dir_entry_root;
61static struct dentry *binder_debugfs_dir_entry_proc;
656a800a 62static atomic_t binder_last_id;
355b0502 63
5249f488
AH
64#define BINDER_DEBUG_ENTRY(name) \
65static int binder_##name##_open(struct inode *inode, struct file *file) \
66{ \
16b66554 67 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
68} \
69\
70static const struct file_operations binder_##name##_fops = { \
71 .owner = THIS_MODULE, \
72 .open = binder_##name##_open, \
73 .read = seq_read, \
74 .llseek = seq_lseek, \
75 .release = single_release, \
76}
77
78static int binder_proc_show(struct seq_file *m, void *unused);
79BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
80
81/* This is only defined in include/asm-arm/sizes.h */
82#ifndef SZ_1K
83#define SZ_1K 0x400
84#endif
85
86#ifndef SZ_4M
87#define SZ_4M 0x400000
88#endif
89
90#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91
92#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
93
94enum {
95 BINDER_DEBUG_USER_ERROR = 1U << 0,
96 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
97 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
98 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
99 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
100 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
101 BINDER_DEBUG_READ_WRITE = 1U << 6,
102 BINDER_DEBUG_USER_REFS = 1U << 7,
103 BINDER_DEBUG_THREADS = 1U << 8,
104 BINDER_DEBUG_TRANSACTION = 1U << 9,
105 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
106 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
107 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
19c98724 108 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
355b0502
GKH
109};
110static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
111 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
112module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
113
ac4812c5
MC
114static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
115module_param_named(devices, binder_devices_param, charp, 0444);
116
355b0502
GKH
117static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
118static int binder_stop_on_user_error;
119
120static int binder_set_stop_on_user_error(const char *val,
121 struct kernel_param *kp)
122{
123 int ret;
10f62861 124
355b0502
GKH
125 ret = param_set_int(val, kp);
126 if (binder_stop_on_user_error < 2)
127 wake_up(&binder_user_error_wait);
128 return ret;
129}
130module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
131 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
132
133#define binder_debug(mask, x...) \
134 do { \
135 if (binder_debug_mask & mask) \
258767fe 136 pr_info(x); \
355b0502
GKH
137 } while (0)
138
139#define binder_user_error(x...) \
140 do { \
141 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 142 pr_info(x); \
355b0502
GKH
143 if (binder_stop_on_user_error) \
144 binder_stop_on_user_error = 2; \
145 } while (0)
146
feba3900
MC
147#define to_flat_binder_object(hdr) \
148 container_of(hdr, struct flat_binder_object, hdr)
149
150#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
151
7980240b
MC
152#define to_binder_buffer_object(hdr) \
153 container_of(hdr, struct binder_buffer_object, hdr)
154
def95c73
MC
155#define to_binder_fd_array_object(hdr) \
156 container_of(hdr, struct binder_fd_array_object, hdr)
157
355b0502
GKH
158enum binder_stat_types {
159 BINDER_STAT_PROC,
160 BINDER_STAT_THREAD,
161 BINDER_STAT_NODE,
162 BINDER_STAT_REF,
163 BINDER_STAT_DEATH,
164 BINDER_STAT_TRANSACTION,
165 BINDER_STAT_TRANSACTION_COMPLETE,
166 BINDER_STAT_COUNT
167};
168
169struct binder_stats {
0953c797
BJS
170 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
171 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
172 atomic_t obj_created[BINDER_STAT_COUNT];
173 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
174};
175
176static struct binder_stats binder_stats;
177
178static inline void binder_stats_deleted(enum binder_stat_types type)
179{
0953c797 180 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
181}
182
183static inline void binder_stats_created(enum binder_stat_types type)
184{
0953c797 185 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
186}
187
188struct binder_transaction_log_entry {
189 int debug_id;
d99c7333 190 int debug_id_done;
355b0502
GKH
191 int call_type;
192 int from_proc;
193 int from_thread;
194 int target_handle;
195 int to_proc;
196 int to_thread;
197 int to_node;
198 int data_size;
199 int offsets_size;
57ada2fb
TK
200 int return_error_line;
201 uint32_t return_error;
202 uint32_t return_error_param;
14db3181 203 const char *context_name;
355b0502
GKH
204};
205struct binder_transaction_log {
d99c7333
TK
206 atomic_t cur;
207 bool full;
355b0502
GKH
208 struct binder_transaction_log_entry entry[32];
209};
210static struct binder_transaction_log binder_transaction_log;
211static struct binder_transaction_log binder_transaction_log_failed;
212
213static struct binder_transaction_log_entry *binder_transaction_log_add(
214 struct binder_transaction_log *log)
215{
216 struct binder_transaction_log_entry *e;
d99c7333 217 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 218
d99c7333 219 if (cur >= ARRAY_SIZE(log->entry))
355b0502 220 log->full = 1;
d99c7333
TK
221 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
222 WRITE_ONCE(e->debug_id_done, 0);
223 /*
224 * write-barrier to synchronize access to e->debug_id_done.
225 * We make sure the initialized 0 value is seen before
226 * memset() other fields are zeroed by memset.
227 */
228 smp_wmb();
229 memset(e, 0, sizeof(*e));
355b0502
GKH
230 return e;
231}
232
342e5c90
MC
233struct binder_context {
234 struct binder_node *binder_context_mgr_node;
c44b1231
TK
235 struct mutex context_mgr_node_lock;
236
342e5c90 237 kuid_t binder_context_mgr_uid;
14db3181 238 const char *name;
342e5c90
MC
239};
240
ac4812c5
MC
241struct binder_device {
242 struct hlist_node hlist;
243 struct miscdevice miscdev;
244 struct binder_context context;
342e5c90
MC
245};
246
355b0502
GKH
247struct binder_work {
248 struct list_head entry;
249 enum {
250 BINDER_WORK_TRANSACTION = 1,
251 BINDER_WORK_TRANSACTION_COMPLETE,
26549d17 252 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
253 BINDER_WORK_NODE,
254 BINDER_WORK_DEAD_BINDER,
255 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
256 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
257 } type;
258};
259
26549d17
TK
260struct binder_error {
261 struct binder_work work;
262 uint32_t cmd;
263};
264
355b0502
GKH
265struct binder_node {
266 int debug_id;
267 struct binder_work work;
268 union {
269 struct rb_node rb_node;
270 struct hlist_node dead_node;
271 };
272 struct binder_proc *proc;
273 struct hlist_head refs;
274 int internal_strong_refs;
275 int local_weak_refs;
276 int local_strong_refs;
da49889d
AH
277 binder_uintptr_t ptr;
278 binder_uintptr_t cookie;
355b0502
GKH
279 unsigned has_strong_ref:1;
280 unsigned pending_strong_ref:1;
281 unsigned has_weak_ref:1;
282 unsigned pending_weak_ref:1;
283 unsigned has_async_transaction:1;
284 unsigned accept_fds:1;
285 unsigned min_priority:8;
286 struct list_head async_todo;
287};
288
289struct binder_ref_death {
290 struct binder_work work;
da49889d 291 binder_uintptr_t cookie;
355b0502
GKH
292};
293
294struct binder_ref {
295 /* Lookups needed: */
296 /* node + proc => ref (transaction) */
297 /* desc + proc => ref (transaction, inc/dec ref) */
298 /* node => refs + procs (proc exit) */
299 int debug_id;
300 struct rb_node rb_node_desc;
301 struct rb_node rb_node_node;
302 struct hlist_node node_entry;
303 struct binder_proc *proc;
304 struct binder_node *node;
305 uint32_t desc;
306 int strong;
307 int weak;
308 struct binder_ref_death *death;
309};
310
355b0502
GKH
311enum binder_deferred_state {
312 BINDER_DEFERRED_PUT_FILES = 0x01,
313 BINDER_DEFERRED_FLUSH = 0x02,
314 BINDER_DEFERRED_RELEASE = 0x04,
315};
316
317struct binder_proc {
318 struct hlist_node proc_node;
319 struct rb_root threads;
320 struct rb_root nodes;
321 struct rb_root refs_by_desc;
322 struct rb_root refs_by_node;
323 int pid;
355b0502
GKH
324 struct task_struct *tsk;
325 struct files_struct *files;
326 struct hlist_node deferred_work_node;
327 int deferred_work;
355b0502 328
355b0502
GKH
329 struct list_head todo;
330 wait_queue_head_t wait;
331 struct binder_stats stats;
332 struct list_head delivered_death;
333 int max_threads;
334 int requested_threads;
335 int requested_threads_started;
336 int ready_threads;
337 long default_priority;
16b66554 338 struct dentry *debugfs_entry;
fdfb4a99 339 struct binder_alloc alloc;
342e5c90 340 struct binder_context *context;
355b0502
GKH
341};
342
343enum {
344 BINDER_LOOPER_STATE_REGISTERED = 0x01,
345 BINDER_LOOPER_STATE_ENTERED = 0x02,
346 BINDER_LOOPER_STATE_EXITED = 0x04,
347 BINDER_LOOPER_STATE_INVALID = 0x08,
348 BINDER_LOOPER_STATE_WAITING = 0x10,
355b0502
GKH
349};
350
351struct binder_thread {
352 struct binder_proc *proc;
353 struct rb_node rb_node;
354 int pid;
08dabcee
TK
355 int looper; /* only modified by this thread */
356 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
357 struct binder_transaction *transaction_stack;
358 struct list_head todo;
26549d17
TK
359 struct binder_error return_error;
360 struct binder_error reply_error;
355b0502
GKH
361 wait_queue_head_t wait;
362 struct binder_stats stats;
363};
364
365struct binder_transaction {
366 int debug_id;
367 struct binder_work work;
368 struct binder_thread *from;
369 struct binder_transaction *from_parent;
370 struct binder_proc *to_proc;
371 struct binder_thread *to_thread;
372 struct binder_transaction *to_parent;
373 unsigned need_reply:1;
374 /* unsigned is_dead:1; */ /* not used at the moment */
375
376 struct binder_buffer *buffer;
377 unsigned int code;
378 unsigned int flags;
379 long priority;
380 long saved_priority;
4a2ebb93 381 kuid_t sender_euid;
355b0502
GKH
382};
383
384static void
385binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
386
efde99cd 387static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502
GKH
388{
389 struct files_struct *files = proc->files;
355b0502
GKH
390 unsigned long rlim_cur;
391 unsigned long irqs;
392
393 if (files == NULL)
394 return -ESRCH;
395
dcfadfa4
AV
396 if (!lock_task_sighand(proc->tsk, &irqs))
397 return -EMFILE;
bf202361 398
dcfadfa4
AV
399 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
400 unlock_task_sighand(proc->tsk, &irqs);
355b0502 401
dcfadfa4 402 return __alloc_fd(files, 0, rlim_cur, flags);
355b0502
GKH
403}
404
405/*
406 * copied from fd_install
407 */
408static void task_fd_install(
409 struct binder_proc *proc, unsigned int fd, struct file *file)
410{
f869e8a7
AV
411 if (proc->files)
412 __fd_install(proc->files, fd, file);
355b0502
GKH
413}
414
415/*
416 * copied from sys_close
417 */
418static long task_close_fd(struct binder_proc *proc, unsigned int fd)
419{
355b0502
GKH
420 int retval;
421
483ce1d4 422 if (proc->files == NULL)
355b0502
GKH
423 return -ESRCH;
424
483ce1d4 425 retval = __close_fd(proc->files, fd);
355b0502
GKH
426 /* can't restart close syscall because file table entry was cleared */
427 if (unlikely(retval == -ERESTARTSYS ||
428 retval == -ERESTARTNOINTR ||
429 retval == -ERESTARTNOHAND ||
430 retval == -ERESTART_RESTARTBLOCK))
431 retval = -EINTR;
432
433 return retval;
355b0502
GKH
434}
435
975a1ac9
AH
436static inline void binder_lock(const char *tag)
437{
438 trace_binder_lock(tag);
439 mutex_lock(&binder_main_lock);
440 trace_binder_locked(tag);
441}
442
443static inline void binder_unlock(const char *tag)
444{
445 trace_binder_unlock(tag);
446 mutex_unlock(&binder_main_lock);
447}
448
355b0502
GKH
449static void binder_set_nice(long nice)
450{
451 long min_nice;
10f62861 452
355b0502
GKH
453 if (can_nice(current, nice)) {
454 set_user_nice(current, nice);
455 return;
456 }
7aa2c016 457 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
355b0502 458 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
459 "%d: nice value %ld not allowed use %ld instead\n",
460 current->pid, nice, min_nice);
355b0502 461 set_user_nice(current, min_nice);
8698a745 462 if (min_nice <= MAX_NICE)
355b0502 463 return;
56b468fc 464 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
465}
466
355b0502 467static struct binder_node *binder_get_node(struct binder_proc *proc,
da49889d 468 binder_uintptr_t ptr)
355b0502
GKH
469{
470 struct rb_node *n = proc->nodes.rb_node;
471 struct binder_node *node;
472
473 while (n) {
474 node = rb_entry(n, struct binder_node, rb_node);
475
476 if (ptr < node->ptr)
477 n = n->rb_left;
478 else if (ptr > node->ptr)
479 n = n->rb_right;
480 else
481 return node;
482 }
483 return NULL;
484}
485
486static struct binder_node *binder_new_node(struct binder_proc *proc,
da49889d
AH
487 binder_uintptr_t ptr,
488 binder_uintptr_t cookie)
355b0502
GKH
489{
490 struct rb_node **p = &proc->nodes.rb_node;
491 struct rb_node *parent = NULL;
492 struct binder_node *node;
493
494 while (*p) {
495 parent = *p;
496 node = rb_entry(parent, struct binder_node, rb_node);
497
498 if (ptr < node->ptr)
499 p = &(*p)->rb_left;
500 else if (ptr > node->ptr)
501 p = &(*p)->rb_right;
502 else
503 return NULL;
504 }
505
506 node = kzalloc(sizeof(*node), GFP_KERNEL);
507 if (node == NULL)
508 return NULL;
509 binder_stats_created(BINDER_STAT_NODE);
510 rb_link_node(&node->rb_node, parent, p);
511 rb_insert_color(&node->rb_node, &proc->nodes);
656a800a 512 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
513 node->proc = proc;
514 node->ptr = ptr;
515 node->cookie = cookie;
516 node->work.type = BINDER_WORK_NODE;
517 INIT_LIST_HEAD(&node->work.entry);
518 INIT_LIST_HEAD(&node->async_todo);
519 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 520 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 521 proc->pid, current->pid, node->debug_id,
da49889d 522 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
523 return node;
524}
525
526static int binder_inc_node(struct binder_node *node, int strong, int internal,
527 struct list_head *target_list)
528{
529 if (strong) {
530 if (internal) {
531 if (target_list == NULL &&
532 node->internal_strong_refs == 0 &&
342e5c90
MC
533 !(node->proc &&
534 node == node->proc->context->binder_context_mgr_node &&
535 node->has_strong_ref)) {
56b468fc
AS
536 pr_err("invalid inc strong node for %d\n",
537 node->debug_id);
355b0502
GKH
538 return -EINVAL;
539 }
540 node->internal_strong_refs++;
541 } else
542 node->local_strong_refs++;
543 if (!node->has_strong_ref && target_list) {
544 list_del_init(&node->work.entry);
545 list_add_tail(&node->work.entry, target_list);
546 }
547 } else {
548 if (!internal)
549 node->local_weak_refs++;
550 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
551 if (target_list == NULL) {
56b468fc
AS
552 pr_err("invalid inc weak node for %d\n",
553 node->debug_id);
355b0502
GKH
554 return -EINVAL;
555 }
556 list_add_tail(&node->work.entry, target_list);
557 }
558 }
559 return 0;
560}
561
562static int binder_dec_node(struct binder_node *node, int strong, int internal)
563{
564 if (strong) {
565 if (internal)
566 node->internal_strong_refs--;
567 else
568 node->local_strong_refs--;
569 if (node->local_strong_refs || node->internal_strong_refs)
570 return 0;
571 } else {
572 if (!internal)
573 node->local_weak_refs--;
574 if (node->local_weak_refs || !hlist_empty(&node->refs))
575 return 0;
576 }
577 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
578 if (list_empty(&node->work.entry)) {
579 list_add_tail(&node->work.entry, &node->proc->todo);
580 wake_up_interruptible(&node->proc->wait);
581 }
582 } else {
583 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
584 !node->local_weak_refs) {
585 list_del_init(&node->work.entry);
586 if (node->proc) {
587 rb_erase(&node->rb_node, &node->proc->nodes);
588 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 589 "refless node %d deleted\n",
355b0502
GKH
590 node->debug_id);
591 } else {
c44b1231 592 spin_lock(&binder_dead_nodes_lock);
355b0502 593 hlist_del(&node->dead_node);
c44b1231 594 spin_unlock(&binder_dead_nodes_lock);
355b0502 595 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 596 "dead node %d deleted\n",
355b0502
GKH
597 node->debug_id);
598 }
599 kfree(node);
600 binder_stats_deleted(BINDER_STAT_NODE);
601 }
602 }
603
604 return 0;
605}
606
607
608static struct binder_ref *binder_get_ref(struct binder_proc *proc,
0a3ffab9 609 u32 desc, bool need_strong_ref)
355b0502
GKH
610{
611 struct rb_node *n = proc->refs_by_desc.rb_node;
612 struct binder_ref *ref;
613
614 while (n) {
615 ref = rb_entry(n, struct binder_ref, rb_node_desc);
616
0a3ffab9 617 if (desc < ref->desc) {
355b0502 618 n = n->rb_left;
0a3ffab9 619 } else if (desc > ref->desc) {
355b0502 620 n = n->rb_right;
0a3ffab9
AH
621 } else if (need_strong_ref && !ref->strong) {
622 binder_user_error("tried to use weak ref as strong ref\n");
623 return NULL;
624 } else {
355b0502 625 return ref;
0a3ffab9 626 }
355b0502
GKH
627 }
628 return NULL;
629}
630
631static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
632 struct binder_node *node)
633{
634 struct rb_node *n;
635 struct rb_node **p = &proc->refs_by_node.rb_node;
636 struct rb_node *parent = NULL;
637 struct binder_ref *ref, *new_ref;
342e5c90 638 struct binder_context *context = proc->context;
355b0502
GKH
639
640 while (*p) {
641 parent = *p;
642 ref = rb_entry(parent, struct binder_ref, rb_node_node);
643
644 if (node < ref->node)
645 p = &(*p)->rb_left;
646 else if (node > ref->node)
647 p = &(*p)->rb_right;
648 else
649 return ref;
650 }
651 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
652 if (new_ref == NULL)
653 return NULL;
654 binder_stats_created(BINDER_STAT_REF);
656a800a 655 new_ref->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
656 new_ref->proc = proc;
657 new_ref->node = node;
658 rb_link_node(&new_ref->rb_node_node, parent, p);
659 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
660
342e5c90 661 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
662 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
663 ref = rb_entry(n, struct binder_ref, rb_node_desc);
664 if (ref->desc > new_ref->desc)
665 break;
666 new_ref->desc = ref->desc + 1;
667 }
668
669 p = &proc->refs_by_desc.rb_node;
670 while (*p) {
671 parent = *p;
672 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
673
674 if (new_ref->desc < ref->desc)
675 p = &(*p)->rb_left;
676 else if (new_ref->desc > ref->desc)
677 p = &(*p)->rb_right;
678 else
679 BUG();
680 }
681 rb_link_node(&new_ref->rb_node_desc, parent, p);
682 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
e4cffcf4 683 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 684
e4cffcf4
TK
685 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
686 "%d new ref %d desc %d for node %d\n",
687 proc->pid, new_ref->debug_id, new_ref->desc,
688 node->debug_id);
355b0502
GKH
689 return new_ref;
690}
691
692static void binder_delete_ref(struct binder_ref *ref)
693{
694 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc
AS
695 "%d delete ref %d desc %d for node %d\n",
696 ref->proc->pid, ref->debug_id, ref->desc,
697 ref->node->debug_id);
355b0502
GKH
698
699 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
700 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
701 if (ref->strong)
702 binder_dec_node(ref->node, 1, 1);
703 hlist_del(&ref->node_entry);
704 binder_dec_node(ref->node, 0, 1);
705 if (ref->death) {
706 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc
AS
707 "%d delete ref %d desc %d has death notification\n",
708 ref->proc->pid, ref->debug_id, ref->desc);
355b0502
GKH
709 list_del(&ref->death->work.entry);
710 kfree(ref->death);
711 binder_stats_deleted(BINDER_STAT_DEATH);
712 }
713 kfree(ref);
714 binder_stats_deleted(BINDER_STAT_REF);
715}
716
717static int binder_inc_ref(struct binder_ref *ref, int strong,
718 struct list_head *target_list)
719{
720 int ret;
10f62861 721
355b0502
GKH
722 if (strong) {
723 if (ref->strong == 0) {
724 ret = binder_inc_node(ref->node, 1, 1, target_list);
725 if (ret)
726 return ret;
727 }
728 ref->strong++;
729 } else {
730 if (ref->weak == 0) {
731 ret = binder_inc_node(ref->node, 0, 1, target_list);
732 if (ret)
733 return ret;
734 }
735 ref->weak++;
736 }
737 return 0;
738}
739
740
741static int binder_dec_ref(struct binder_ref *ref, int strong)
742{
743 if (strong) {
744 if (ref->strong == 0) {
56b468fc 745 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
355b0502
GKH
746 ref->proc->pid, ref->debug_id,
747 ref->desc, ref->strong, ref->weak);
748 return -EINVAL;
749 }
750 ref->strong--;
751 if (ref->strong == 0) {
752 int ret;
10f62861 753
355b0502
GKH
754 ret = binder_dec_node(ref->node, strong, 1);
755 if (ret)
756 return ret;
757 }
758 } else {
759 if (ref->weak == 0) {
56b468fc 760 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
355b0502
GKH
761 ref->proc->pid, ref->debug_id,
762 ref->desc, ref->strong, ref->weak);
763 return -EINVAL;
764 }
765 ref->weak--;
766 }
767 if (ref->strong == 0 && ref->weak == 0)
768 binder_delete_ref(ref);
769 return 0;
770}
771
772static void binder_pop_transaction(struct binder_thread *target_thread,
773 struct binder_transaction *t)
774{
b6d282ce
TK
775 BUG_ON(!target_thread);
776 BUG_ON(target_thread->transaction_stack != t);
777 BUG_ON(target_thread->transaction_stack->from != target_thread);
778 target_thread->transaction_stack =
779 target_thread->transaction_stack->from_parent;
780 t->from = NULL;
781}
782
783static void binder_free_transaction(struct binder_transaction *t)
784{
355b0502
GKH
785 if (t->buffer)
786 t->buffer->transaction = NULL;
787 kfree(t);
788 binder_stats_deleted(BINDER_STAT_TRANSACTION);
789}
790
791static void binder_send_failed_reply(struct binder_transaction *t,
792 uint32_t error_code)
793{
794 struct binder_thread *target_thread;
d4ec15e1 795 struct binder_transaction *next;
10f62861 796
355b0502
GKH
797 BUG_ON(t->flags & TF_ONE_WAY);
798 while (1) {
799 target_thread = t->from;
800 if (target_thread) {
26549d17
TK
801 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
802 "send failed reply for transaction %d to %d:%d\n",
803 t->debug_id,
804 target_thread->proc->pid,
805 target_thread->pid);
806
807 binder_pop_transaction(target_thread, t);
808 if (target_thread->reply_error.cmd == BR_OK) {
809 target_thread->reply_error.cmd = error_code;
810 list_add_tail(
811 &target_thread->reply_error.work.entry,
812 &target_thread->todo);
355b0502
GKH
813 wake_up_interruptible(&target_thread->wait);
814 } else {
26549d17
TK
815 WARN(1, "Unexpected reply error: %u\n",
816 target_thread->reply_error.cmd);
355b0502 817 }
26549d17 818 binder_free_transaction(t);
355b0502 819 return;
d4ec15e1
LT
820 }
821 next = t->from_parent;
822
823 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
824 "send failed reply for transaction %d, target dead\n",
825 t->debug_id);
826
b6d282ce 827 binder_free_transaction(t);
d4ec15e1 828 if (next == NULL) {
355b0502 829 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
830 "reply failed, no target thread at root\n");
831 return;
355b0502 832 }
d4ec15e1
LT
833 t = next;
834 binder_debug(BINDER_DEBUG_DEAD_BINDER,
835 "reply failed, no target thread -- retry %d\n",
836 t->debug_id);
355b0502
GKH
837 }
838}
839
feba3900
MC
840/**
841 * binder_validate_object() - checks for a valid metadata object in a buffer.
842 * @buffer: binder_buffer that we're parsing.
843 * @offset: offset in the buffer at which to validate an object.
844 *
845 * Return: If there's a valid metadata object at @offset in @buffer, the
846 * size of that object. Otherwise, it returns zero.
847 */
848static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
849{
850 /* Check if we can read a header first */
851 struct binder_object_header *hdr;
852 size_t object_size = 0;
853
854 if (offset > buffer->data_size - sizeof(*hdr) ||
855 buffer->data_size < sizeof(*hdr) ||
856 !IS_ALIGNED(offset, sizeof(u32)))
857 return 0;
858
859 /* Ok, now see if we can read a complete object. */
860 hdr = (struct binder_object_header *)(buffer->data + offset);
861 switch (hdr->type) {
862 case BINDER_TYPE_BINDER:
863 case BINDER_TYPE_WEAK_BINDER:
864 case BINDER_TYPE_HANDLE:
865 case BINDER_TYPE_WEAK_HANDLE:
866 object_size = sizeof(struct flat_binder_object);
867 break;
868 case BINDER_TYPE_FD:
869 object_size = sizeof(struct binder_fd_object);
870 break;
7980240b
MC
871 case BINDER_TYPE_PTR:
872 object_size = sizeof(struct binder_buffer_object);
873 break;
def95c73
MC
874 case BINDER_TYPE_FDA:
875 object_size = sizeof(struct binder_fd_array_object);
876 break;
feba3900
MC
877 default:
878 return 0;
879 }
880 if (offset <= buffer->data_size - object_size &&
881 buffer->data_size >= object_size)
882 return object_size;
883 else
884 return 0;
885}
886
7980240b
MC
887/**
888 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
889 * @b: binder_buffer containing the object
890 * @index: index in offset array at which the binder_buffer_object is
891 * located
892 * @start: points to the start of the offset array
893 * @num_valid: the number of valid offsets in the offset array
894 *
895 * Return: If @index is within the valid range of the offset array
896 * described by @start and @num_valid, and if there's a valid
897 * binder_buffer_object at the offset found in index @index
898 * of the offset array, that object is returned. Otherwise,
899 * %NULL is returned.
900 * Note that the offset found in index @index itself is not
901 * verified; this function assumes that @num_valid elements
902 * from @start were previously verified to have valid offsets.
903 */
904static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
905 binder_size_t index,
906 binder_size_t *start,
907 binder_size_t num_valid)
908{
909 struct binder_buffer_object *buffer_obj;
910 binder_size_t *offp;
911
912 if (index >= num_valid)
913 return NULL;
914
915 offp = start + index;
916 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
917 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
918 return NULL;
919
920 return buffer_obj;
921}
922
923/**
924 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
925 * @b: transaction buffer
926 * @objects_start start of objects buffer
927 * @buffer: binder_buffer_object in which to fix up
928 * @offset: start offset in @buffer to fix up
929 * @last_obj: last binder_buffer_object that we fixed up in
930 * @last_min_offset: minimum fixup offset in @last_obj
931 *
932 * Return: %true if a fixup in buffer @buffer at offset @offset is
933 * allowed.
934 *
935 * For safety reasons, we only allow fixups inside a buffer to happen
936 * at increasing offsets; additionally, we only allow fixup on the last
937 * buffer object that was verified, or one of its parents.
938 *
939 * Example of what is allowed:
940 *
941 * A
942 * B (parent = A, offset = 0)
943 * C (parent = A, offset = 16)
944 * D (parent = C, offset = 0)
945 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
946 *
947 * Examples of what is not allowed:
948 *
949 * Decreasing offsets within the same parent:
950 * A
951 * C (parent = A, offset = 16)
952 * B (parent = A, offset = 0) // decreasing offset within A
953 *
954 * Referring to a parent that wasn't the last object or any of its parents:
955 * A
956 * B (parent = A, offset = 0)
957 * C (parent = A, offset = 0)
958 * C (parent = A, offset = 16)
959 * D (parent = B, offset = 0) // B is not A or any of A's parents
960 */
961static bool binder_validate_fixup(struct binder_buffer *b,
962 binder_size_t *objects_start,
963 struct binder_buffer_object *buffer,
964 binder_size_t fixup_offset,
965 struct binder_buffer_object *last_obj,
966 binder_size_t last_min_offset)
967{
968 if (!last_obj) {
969 /* Nothing to fix up in */
970 return false;
971 }
972
973 while (last_obj != buffer) {
974 /*
975 * Safe to retrieve the parent of last_obj, since it
976 * was already previously verified by the driver.
977 */
978 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
979 return false;
980 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
981 last_obj = (struct binder_buffer_object *)
982 (b->data + *(objects_start + last_obj->parent));
983 }
984 return (fixup_offset >= last_min_offset);
985}
986
355b0502
GKH
987static void binder_transaction_buffer_release(struct binder_proc *proc,
988 struct binder_buffer *buffer,
da49889d 989 binder_size_t *failed_at)
355b0502 990{
7980240b 991 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
992 int debug_id = buffer->debug_id;
993
994 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 995 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
996 proc->pid, buffer->debug_id,
997 buffer->data_size, buffer->offsets_size, failed_at);
998
999 if (buffer->target_node)
1000 binder_dec_node(buffer->target_node, 1, 0);
1001
7980240b
MC
1002 off_start = (binder_size_t *)(buffer->data +
1003 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
1004 if (failed_at)
1005 off_end = failed_at;
1006 else
7980240b
MC
1007 off_end = (void *)off_start + buffer->offsets_size;
1008 for (offp = off_start; offp < off_end; offp++) {
feba3900
MC
1009 struct binder_object_header *hdr;
1010 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 1011
feba3900
MC
1012 if (object_size == 0) {
1013 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 1014 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
1015 continue;
1016 }
feba3900
MC
1017 hdr = (struct binder_object_header *)(buffer->data + *offp);
1018 switch (hdr->type) {
355b0502
GKH
1019 case BINDER_TYPE_BINDER:
1020 case BINDER_TYPE_WEAK_BINDER: {
feba3900
MC
1021 struct flat_binder_object *fp;
1022 struct binder_node *node;
10f62861 1023
feba3900
MC
1024 fp = to_flat_binder_object(hdr);
1025 node = binder_get_node(proc, fp->binder);
355b0502 1026 if (node == NULL) {
da49889d
AH
1027 pr_err("transaction release %d bad node %016llx\n",
1028 debug_id, (u64)fp->binder);
355b0502
GKH
1029 break;
1030 }
1031 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
1032 " node %d u%016llx\n",
1033 node->debug_id, (u64)node->ptr);
feba3900
MC
1034 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1035 0);
355b0502
GKH
1036 } break;
1037 case BINDER_TYPE_HANDLE:
1038 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 1039 struct flat_binder_object *fp;
0a3ffab9
AH
1040 struct binder_ref *ref;
1041
feba3900 1042 fp = to_flat_binder_object(hdr);
0a3ffab9 1043 ref = binder_get_ref(proc, fp->handle,
feba3900 1044 hdr->type == BINDER_TYPE_HANDLE);
355b0502 1045 if (ref == NULL) {
64dcfe6b 1046 pr_err("transaction release %d bad handle %d\n",
56b468fc 1047 debug_id, fp->handle);
355b0502
GKH
1048 break;
1049 }
1050 binder_debug(BINDER_DEBUG_TRANSACTION,
1051 " ref %d desc %d (node %d)\n",
1052 ref->debug_id, ref->desc, ref->node->debug_id);
feba3900 1053 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
355b0502
GKH
1054 } break;
1055
feba3900
MC
1056 case BINDER_TYPE_FD: {
1057 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1058
355b0502 1059 binder_debug(BINDER_DEBUG_TRANSACTION,
feba3900 1060 " fd %d\n", fp->fd);
355b0502 1061 if (failed_at)
feba3900
MC
1062 task_close_fd(proc, fp->fd);
1063 } break;
7980240b
MC
1064 case BINDER_TYPE_PTR:
1065 /*
1066 * Nothing to do here, this will get cleaned up when the
1067 * transaction buffer gets freed
1068 */
1069 break;
def95c73
MC
1070 case BINDER_TYPE_FDA: {
1071 struct binder_fd_array_object *fda;
1072 struct binder_buffer_object *parent;
1073 uintptr_t parent_buffer;
1074 u32 *fd_array;
1075 size_t fd_index;
1076 binder_size_t fd_buf_size;
1077
1078 fda = to_binder_fd_array_object(hdr);
1079 parent = binder_validate_ptr(buffer, fda->parent,
1080 off_start,
1081 offp - off_start);
1082 if (!parent) {
1083 pr_err("transaction release %d bad parent offset",
1084 debug_id);
1085 continue;
1086 }
1087 /*
1088 * Since the parent was already fixed up, convert it
1089 * back to kernel address space to access it
1090 */
1091 parent_buffer = parent->buffer -
19c98724
TK
1092 binder_alloc_get_user_buffer_offset(
1093 &proc->alloc);
def95c73
MC
1094
1095 fd_buf_size = sizeof(u32) * fda->num_fds;
1096 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1097 pr_err("transaction release %d invalid number of fds (%lld)\n",
1098 debug_id, (u64)fda->num_fds);
1099 continue;
1100 }
1101 if (fd_buf_size > parent->length ||
1102 fda->parent_offset > parent->length - fd_buf_size) {
1103 /* No space for all file descriptors here. */
1104 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1105 debug_id, (u64)fda->num_fds);
1106 continue;
1107 }
1108 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1109 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1110 task_close_fd(proc, fd_array[fd_index]);
1111 } break;
355b0502 1112 default:
64dcfe6b 1113 pr_err("transaction release %d bad object type %x\n",
feba3900 1114 debug_id, hdr->type);
355b0502
GKH
1115 break;
1116 }
1117 }
1118}
1119
a056af42
MC
1120static int binder_translate_binder(struct flat_binder_object *fp,
1121 struct binder_transaction *t,
1122 struct binder_thread *thread)
1123{
1124 struct binder_node *node;
1125 struct binder_ref *ref;
1126 struct binder_proc *proc = thread->proc;
1127 struct binder_proc *target_proc = t->to_proc;
1128
1129 node = binder_get_node(proc, fp->binder);
1130 if (!node) {
1131 node = binder_new_node(proc, fp->binder, fp->cookie);
1132 if (!node)
1133 return -ENOMEM;
1134
1135 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1136 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1137 }
1138 if (fp->cookie != node->cookie) {
1139 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1140 proc->pid, thread->pid, (u64)fp->binder,
1141 node->debug_id, (u64)fp->cookie,
1142 (u64)node->cookie);
1143 return -EINVAL;
1144 }
1145 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1146 return -EPERM;
1147
1148 ref = binder_get_ref_for_node(target_proc, node);
1149 if (!ref)
57ada2fb 1150 return -ENOMEM;
a056af42
MC
1151
1152 if (fp->hdr.type == BINDER_TYPE_BINDER)
1153 fp->hdr.type = BINDER_TYPE_HANDLE;
1154 else
1155 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1156 fp->binder = 0;
1157 fp->handle = ref->desc;
1158 fp->cookie = 0;
1159 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1160
1161 trace_binder_transaction_node_to_ref(t, node, ref);
1162 binder_debug(BINDER_DEBUG_TRANSACTION,
1163 " node %d u%016llx -> ref %d desc %d\n",
1164 node->debug_id, (u64)node->ptr,
1165 ref->debug_id, ref->desc);
1166
1167 return 0;
1168}
1169
1170static int binder_translate_handle(struct flat_binder_object *fp,
1171 struct binder_transaction *t,
1172 struct binder_thread *thread)
1173{
1174 struct binder_ref *ref;
1175 struct binder_proc *proc = thread->proc;
1176 struct binder_proc *target_proc = t->to_proc;
1177
1178 ref = binder_get_ref(proc, fp->handle,
1179 fp->hdr.type == BINDER_TYPE_HANDLE);
1180 if (!ref) {
1181 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1182 proc->pid, thread->pid, fp->handle);
1183 return -EINVAL;
1184 }
1185 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1186 return -EPERM;
1187
1188 if (ref->node->proc == target_proc) {
1189 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1190 fp->hdr.type = BINDER_TYPE_BINDER;
1191 else
1192 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1193 fp->binder = ref->node->ptr;
1194 fp->cookie = ref->node->cookie;
1195 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1196 0, NULL);
1197 trace_binder_transaction_ref_to_node(t, ref);
1198 binder_debug(BINDER_DEBUG_TRANSACTION,
1199 " ref %d desc %d -> node %d u%016llx\n",
1200 ref->debug_id, ref->desc, ref->node->debug_id,
1201 (u64)ref->node->ptr);
1202 } else {
1203 struct binder_ref *new_ref;
1204
1205 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1206 if (!new_ref)
57ada2fb 1207 return -ENOMEM;
a056af42
MC
1208
1209 fp->binder = 0;
1210 fp->handle = new_ref->desc;
1211 fp->cookie = 0;
1212 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1213 NULL);
1214 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1215 binder_debug(BINDER_DEBUG_TRANSACTION,
1216 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1217 ref->debug_id, ref->desc, new_ref->debug_id,
1218 new_ref->desc, ref->node->debug_id);
1219 }
1220 return 0;
1221}
1222
1223static int binder_translate_fd(int fd,
1224 struct binder_transaction *t,
1225 struct binder_thread *thread,
1226 struct binder_transaction *in_reply_to)
1227{
1228 struct binder_proc *proc = thread->proc;
1229 struct binder_proc *target_proc = t->to_proc;
1230 int target_fd;
1231 struct file *file;
1232 int ret;
1233 bool target_allows_fd;
1234
1235 if (in_reply_to)
1236 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1237 else
1238 target_allows_fd = t->buffer->target_node->accept_fds;
1239 if (!target_allows_fd) {
1240 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1241 proc->pid, thread->pid,
1242 in_reply_to ? "reply" : "transaction",
1243 fd);
1244 ret = -EPERM;
1245 goto err_fd_not_accepted;
1246 }
1247
1248 file = fget(fd);
1249 if (!file) {
1250 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1251 proc->pid, thread->pid, fd);
1252 ret = -EBADF;
1253 goto err_fget;
1254 }
1255 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1256 if (ret < 0) {
1257 ret = -EPERM;
1258 goto err_security;
1259 }
1260
1261 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1262 if (target_fd < 0) {
1263 ret = -ENOMEM;
1264 goto err_get_unused_fd;
1265 }
1266 task_fd_install(target_proc, target_fd, file);
1267 trace_binder_transaction_fd(t, fd, target_fd);
1268 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1269 fd, target_fd);
1270
1271 return target_fd;
1272
1273err_get_unused_fd:
1274err_security:
1275 fput(file);
1276err_fget:
1277err_fd_not_accepted:
1278 return ret;
1279}
1280
def95c73
MC
1281static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1282 struct binder_buffer_object *parent,
1283 struct binder_transaction *t,
1284 struct binder_thread *thread,
1285 struct binder_transaction *in_reply_to)
1286{
1287 binder_size_t fdi, fd_buf_size, num_installed_fds;
1288 int target_fd;
1289 uintptr_t parent_buffer;
1290 u32 *fd_array;
1291 struct binder_proc *proc = thread->proc;
1292 struct binder_proc *target_proc = t->to_proc;
1293
1294 fd_buf_size = sizeof(u32) * fda->num_fds;
1295 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1296 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1297 proc->pid, thread->pid, (u64)fda->num_fds);
1298 return -EINVAL;
1299 }
1300 if (fd_buf_size > parent->length ||
1301 fda->parent_offset > parent->length - fd_buf_size) {
1302 /* No space for all file descriptors here. */
1303 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1304 proc->pid, thread->pid, (u64)fda->num_fds);
1305 return -EINVAL;
1306 }
1307 /*
1308 * Since the parent was already fixed up, convert it
1309 * back to the kernel address space to access it
1310 */
19c98724
TK
1311 parent_buffer = parent->buffer -
1312 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
def95c73
MC
1313 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1314 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1315 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1316 proc->pid, thread->pid);
1317 return -EINVAL;
1318 }
1319 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1320 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1321 in_reply_to);
1322 if (target_fd < 0)
1323 goto err_translate_fd_failed;
1324 fd_array[fdi] = target_fd;
1325 }
1326 return 0;
1327
1328err_translate_fd_failed:
1329 /*
1330 * Failed to allocate fd or security error, free fds
1331 * installed so far.
1332 */
1333 num_installed_fds = fdi;
1334 for (fdi = 0; fdi < num_installed_fds; fdi++)
1335 task_close_fd(target_proc, fd_array[fdi]);
1336 return target_fd;
1337}
1338
7980240b
MC
1339static int binder_fixup_parent(struct binder_transaction *t,
1340 struct binder_thread *thread,
1341 struct binder_buffer_object *bp,
1342 binder_size_t *off_start,
1343 binder_size_t num_valid,
1344 struct binder_buffer_object *last_fixup_obj,
1345 binder_size_t last_fixup_min_off)
1346{
1347 struct binder_buffer_object *parent;
1348 u8 *parent_buffer;
1349 struct binder_buffer *b = t->buffer;
1350 struct binder_proc *proc = thread->proc;
1351 struct binder_proc *target_proc = t->to_proc;
1352
1353 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1354 return 0;
1355
1356 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1357 if (!parent) {
1358 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1359 proc->pid, thread->pid);
1360 return -EINVAL;
1361 }
1362
1363 if (!binder_validate_fixup(b, off_start,
1364 parent, bp->parent_offset,
1365 last_fixup_obj,
1366 last_fixup_min_off)) {
1367 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1368 proc->pid, thread->pid);
1369 return -EINVAL;
1370 }
1371
1372 if (parent->length < sizeof(binder_uintptr_t) ||
1373 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1374 /* No space for a pointer here! */
1375 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1376 proc->pid, thread->pid);
1377 return -EINVAL;
1378 }
1379 parent_buffer = (u8 *)(parent->buffer -
19c98724
TK
1380 binder_alloc_get_user_buffer_offset(
1381 &target_proc->alloc));
7980240b
MC
1382 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1383
1384 return 0;
1385}
1386
355b0502
GKH
1387static void binder_transaction(struct binder_proc *proc,
1388 struct binder_thread *thread,
4bfac80a
MC
1389 struct binder_transaction_data *tr, int reply,
1390 binder_size_t extra_buffers_size)
355b0502 1391{
a056af42 1392 int ret;
355b0502
GKH
1393 struct binder_transaction *t;
1394 struct binder_work *tcomplete;
7980240b 1395 binder_size_t *offp, *off_end, *off_start;
212265e5 1396 binder_size_t off_min;
7980240b 1397 u8 *sg_bufp, *sg_buf_end;
355b0502
GKH
1398 struct binder_proc *target_proc;
1399 struct binder_thread *target_thread = NULL;
1400 struct binder_node *target_node = NULL;
1401 struct list_head *target_list;
1402 wait_queue_head_t *target_wait;
1403 struct binder_transaction *in_reply_to = NULL;
1404 struct binder_transaction_log_entry *e;
57ada2fb
TK
1405 uint32_t return_error = 0;
1406 uint32_t return_error_param = 0;
1407 uint32_t return_error_line = 0;
7980240b
MC
1408 struct binder_buffer_object *last_fixup_obj = NULL;
1409 binder_size_t last_fixup_min_off = 0;
342e5c90 1410 struct binder_context *context = proc->context;
d99c7333 1411 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1412
1413 e = binder_transaction_log_add(&binder_transaction_log);
d99c7333 1414 e->debug_id = t_debug_id;
355b0502
GKH
1415 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1416 e->from_proc = proc->pid;
1417 e->from_thread = thread->pid;
1418 e->target_handle = tr->target.handle;
1419 e->data_size = tr->data_size;
1420 e->offsets_size = tr->offsets_size;
14db3181 1421 e->context_name = proc->context->name;
355b0502
GKH
1422
1423 if (reply) {
1424 in_reply_to = thread->transaction_stack;
1425 if (in_reply_to == NULL) {
56b468fc 1426 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
1427 proc->pid, thread->pid);
1428 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1429 return_error_param = -EPROTO;
1430 return_error_line = __LINE__;
355b0502
GKH
1431 goto err_empty_call_stack;
1432 }
1433 binder_set_nice(in_reply_to->saved_priority);
1434 if (in_reply_to->to_thread != thread) {
56b468fc 1435 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
1436 proc->pid, thread->pid, in_reply_to->debug_id,
1437 in_reply_to->to_proc ?
1438 in_reply_to->to_proc->pid : 0,
1439 in_reply_to->to_thread ?
1440 in_reply_to->to_thread->pid : 0);
1441 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1442 return_error_param = -EPROTO;
1443 return_error_line = __LINE__;
355b0502
GKH
1444 in_reply_to = NULL;
1445 goto err_bad_call_stack;
1446 }
1447 thread->transaction_stack = in_reply_to->to_parent;
1448 target_thread = in_reply_to->from;
1449 if (target_thread == NULL) {
1450 return_error = BR_DEAD_REPLY;
57ada2fb 1451 return_error_line = __LINE__;
355b0502
GKH
1452 goto err_dead_binder;
1453 }
1454 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 1455 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
1456 proc->pid, thread->pid,
1457 target_thread->transaction_stack ?
1458 target_thread->transaction_stack->debug_id : 0,
1459 in_reply_to->debug_id);
1460 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1461 return_error_param = -EPROTO;
1462 return_error_line = __LINE__;
355b0502
GKH
1463 in_reply_to = NULL;
1464 target_thread = NULL;
1465 goto err_dead_binder;
1466 }
1467 target_proc = target_thread->proc;
1468 } else {
1469 if (tr->target.handle) {
1470 struct binder_ref *ref;
10f62861 1471
eb34983b
TK
1472 /*
1473 * There must already be a strong ref
1474 * on this node. If so, do a strong
1475 * increment on the node to ensure it
1476 * stays alive until the transaction is
1477 * done.
1478 */
0a3ffab9 1479 ref = binder_get_ref(proc, tr->target.handle, true);
eb34983b
TK
1480 if (ref) {
1481 binder_inc_node(ref->node, 1, 0, NULL);
1482 target_node = ref->node;
1483 }
1484 if (target_node == NULL) {
56b468fc 1485 binder_user_error("%d:%d got transaction to invalid handle\n",
355b0502
GKH
1486 proc->pid, thread->pid);
1487 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1488 return_error_param = -EINVAL;
1489 return_error_line = __LINE__;
355b0502
GKH
1490 goto err_invalid_target_handle;
1491 }
355b0502 1492 } else {
c44b1231 1493 mutex_lock(&context->context_mgr_node_lock);
342e5c90 1494 target_node = context->binder_context_mgr_node;
355b0502
GKH
1495 if (target_node == NULL) {
1496 return_error = BR_DEAD_REPLY;
c44b1231 1497 mutex_unlock(&context->context_mgr_node_lock);
57ada2fb 1498 return_error_line = __LINE__;
355b0502
GKH
1499 goto err_no_context_mgr_node;
1500 }
eb34983b 1501 binder_inc_node(target_node, 1, 0, NULL);
c44b1231 1502 mutex_unlock(&context->context_mgr_node_lock);
355b0502
GKH
1503 }
1504 e->to_node = target_node->debug_id;
1505 target_proc = target_node->proc;
1506 if (target_proc == NULL) {
1507 return_error = BR_DEAD_REPLY;
57ada2fb 1508 return_error_line = __LINE__;
355b0502
GKH
1509 goto err_dead_binder;
1510 }
79af7307
SS
1511 if (security_binder_transaction(proc->tsk,
1512 target_proc->tsk) < 0) {
1513 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1514 return_error_param = -EPERM;
1515 return_error_line = __LINE__;
79af7307
SS
1516 goto err_invalid_target_handle;
1517 }
355b0502
GKH
1518 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1519 struct binder_transaction *tmp;
10f62861 1520
355b0502
GKH
1521 tmp = thread->transaction_stack;
1522 if (tmp->to_thread != thread) {
56b468fc 1523 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
1524 proc->pid, thread->pid, tmp->debug_id,
1525 tmp->to_proc ? tmp->to_proc->pid : 0,
1526 tmp->to_thread ?
1527 tmp->to_thread->pid : 0);
1528 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1529 return_error_param = -EPROTO;
1530 return_error_line = __LINE__;
355b0502
GKH
1531 goto err_bad_call_stack;
1532 }
1533 while (tmp) {
1534 if (tmp->from && tmp->from->proc == target_proc)
1535 target_thread = tmp->from;
1536 tmp = tmp->from_parent;
1537 }
1538 }
1539 }
1540 if (target_thread) {
1541 e->to_thread = target_thread->pid;
1542 target_list = &target_thread->todo;
1543 target_wait = &target_thread->wait;
1544 } else {
1545 target_list = &target_proc->todo;
1546 target_wait = &target_proc->wait;
1547 }
1548 e->to_proc = target_proc->pid;
1549
1550 /* TODO: reuse incoming transaction for reply */
1551 t = kzalloc(sizeof(*t), GFP_KERNEL);
1552 if (t == NULL) {
1553 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1554 return_error_param = -ENOMEM;
1555 return_error_line = __LINE__;
355b0502
GKH
1556 goto err_alloc_t_failed;
1557 }
1558 binder_stats_created(BINDER_STAT_TRANSACTION);
1559
1560 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1561 if (tcomplete == NULL) {
1562 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1563 return_error_param = -ENOMEM;
1564 return_error_line = __LINE__;
355b0502
GKH
1565 goto err_alloc_tcomplete_failed;
1566 }
1567 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1568
d99c7333 1569 t->debug_id = t_debug_id;
355b0502
GKH
1570
1571 if (reply)
1572 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 1573 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
1574 proc->pid, thread->pid, t->debug_id,
1575 target_proc->pid, target_thread->pid,
da49889d
AH
1576 (u64)tr->data.ptr.buffer,
1577 (u64)tr->data.ptr.offsets,
4bfac80a
MC
1578 (u64)tr->data_size, (u64)tr->offsets_size,
1579 (u64)extra_buffers_size);
355b0502
GKH
1580 else
1581 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 1582 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
1583 proc->pid, thread->pid, t->debug_id,
1584 target_proc->pid, target_node->debug_id,
da49889d
AH
1585 (u64)tr->data.ptr.buffer,
1586 (u64)tr->data.ptr.offsets,
4bfac80a
MC
1587 (u64)tr->data_size, (u64)tr->offsets_size,
1588 (u64)extra_buffers_size);
355b0502
GKH
1589
1590 if (!reply && !(tr->flags & TF_ONE_WAY))
1591 t->from = thread;
1592 else
1593 t->from = NULL;
57bab7cb 1594 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
1595 t->to_proc = target_proc;
1596 t->to_thread = target_thread;
1597 t->code = tr->code;
1598 t->flags = tr->flags;
1599 t->priority = task_nice(current);
975a1ac9
AH
1600
1601 trace_binder_transaction(reply, t, target_node);
1602
19c98724 1603 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
4bfac80a
MC
1604 tr->offsets_size, extra_buffers_size,
1605 !reply && (t->flags & TF_ONE_WAY));
57ada2fb
TK
1606 if (IS_ERR(t->buffer)) {
1607 /*
1608 * -ESRCH indicates VMA cleared. The target is dying.
1609 */
1610 return_error_param = PTR_ERR(t->buffer);
1611 return_error = return_error_param == -ESRCH ?
1612 BR_DEAD_REPLY : BR_FAILED_REPLY;
1613 return_error_line = __LINE__;
1614 t->buffer = NULL;
355b0502
GKH
1615 goto err_binder_alloc_buf_failed;
1616 }
1617 t->buffer->allow_user_free = 0;
1618 t->buffer->debug_id = t->debug_id;
1619 t->buffer->transaction = t;
1620 t->buffer->target_node = target_node;
975a1ac9 1621 trace_binder_transaction_alloc_buf(t->buffer);
7980240b
MC
1622 off_start = (binder_size_t *)(t->buffer->data +
1623 ALIGN(tr->data_size, sizeof(void *)));
1624 offp = off_start;
355b0502 1625
da49889d
AH
1626 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1627 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
1628 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1629 proc->pid, thread->pid);
355b0502 1630 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1631 return_error_param = -EFAULT;
1632 return_error_line = __LINE__;
355b0502
GKH
1633 goto err_copy_data_failed;
1634 }
da49889d
AH
1635 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1636 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
1637 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1638 proc->pid, thread->pid);
355b0502 1639 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1640 return_error_param = -EFAULT;
1641 return_error_line = __LINE__;
355b0502
GKH
1642 goto err_copy_data_failed;
1643 }
da49889d
AH
1644 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1645 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1646 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 1647 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1648 return_error_param = -EINVAL;
1649 return_error_line = __LINE__;
355b0502
GKH
1650 goto err_bad_offset;
1651 }
7980240b
MC
1652 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
1653 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
1654 proc->pid, thread->pid,
1655 (u64)extra_buffers_size);
1656 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1657 return_error_param = -EINVAL;
1658 return_error_line = __LINE__;
7980240b
MC
1659 goto err_bad_offset;
1660 }
1661 off_end = (void *)off_start + tr->offsets_size;
1662 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
1663 sg_buf_end = sg_bufp + extra_buffers_size;
212265e5 1664 off_min = 0;
355b0502 1665 for (; offp < off_end; offp++) {
feba3900
MC
1666 struct binder_object_header *hdr;
1667 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 1668
feba3900
MC
1669 if (object_size == 0 || *offp < off_min) {
1670 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
212265e5
AH
1671 proc->pid, thread->pid, (u64)*offp,
1672 (u64)off_min,
feba3900 1673 (u64)t->buffer->data_size);
355b0502 1674 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1675 return_error_param = -EINVAL;
1676 return_error_line = __LINE__;
355b0502
GKH
1677 goto err_bad_offset;
1678 }
feba3900
MC
1679
1680 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1681 off_min = *offp + object_size;
1682 switch (hdr->type) {
355b0502
GKH
1683 case BINDER_TYPE_BINDER:
1684 case BINDER_TYPE_WEAK_BINDER: {
feba3900 1685 struct flat_binder_object *fp;
10f62861 1686
feba3900 1687 fp = to_flat_binder_object(hdr);
a056af42
MC
1688 ret = binder_translate_binder(fp, t, thread);
1689 if (ret < 0) {
355b0502 1690 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1691 return_error_param = ret;
1692 return_error_line = __LINE__;
a056af42 1693 goto err_translate_failed;
355b0502 1694 }
355b0502
GKH
1695 } break;
1696 case BINDER_TYPE_HANDLE:
1697 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 1698 struct flat_binder_object *fp;
0a3ffab9 1699
feba3900 1700 fp = to_flat_binder_object(hdr);
a056af42
MC
1701 ret = binder_translate_handle(fp, t, thread);
1702 if (ret < 0) {
79af7307 1703 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1704 return_error_param = ret;
1705 return_error_line = __LINE__;
a056af42 1706 goto err_translate_failed;
355b0502
GKH
1707 }
1708 } break;
1709
1710 case BINDER_TYPE_FD: {
feba3900 1711 struct binder_fd_object *fp = to_binder_fd_object(hdr);
a056af42
MC
1712 int target_fd = binder_translate_fd(fp->fd, t, thread,
1713 in_reply_to);
355b0502 1714
355b0502 1715 if (target_fd < 0) {
355b0502 1716 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1717 return_error_param = target_fd;
1718 return_error_line = __LINE__;
a056af42 1719 goto err_translate_failed;
355b0502 1720 }
feba3900
MC
1721 fp->pad_binder = 0;
1722 fp->fd = target_fd;
355b0502 1723 } break;
def95c73
MC
1724 case BINDER_TYPE_FDA: {
1725 struct binder_fd_array_object *fda =
1726 to_binder_fd_array_object(hdr);
1727 struct binder_buffer_object *parent =
1728 binder_validate_ptr(t->buffer, fda->parent,
1729 off_start,
1730 offp - off_start);
1731 if (!parent) {
1732 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1733 proc->pid, thread->pid);
1734 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1735 return_error_param = -EINVAL;
1736 return_error_line = __LINE__;
def95c73
MC
1737 goto err_bad_parent;
1738 }
1739 if (!binder_validate_fixup(t->buffer, off_start,
1740 parent, fda->parent_offset,
1741 last_fixup_obj,
1742 last_fixup_min_off)) {
1743 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1744 proc->pid, thread->pid);
1745 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1746 return_error_param = -EINVAL;
1747 return_error_line = __LINE__;
def95c73
MC
1748 goto err_bad_parent;
1749 }
1750 ret = binder_translate_fd_array(fda, parent, t, thread,
1751 in_reply_to);
1752 if (ret < 0) {
1753 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1754 return_error_param = ret;
1755 return_error_line = __LINE__;
def95c73
MC
1756 goto err_translate_failed;
1757 }
1758 last_fixup_obj = parent;
1759 last_fixup_min_off =
1760 fda->parent_offset + sizeof(u32) * fda->num_fds;
1761 } break;
7980240b
MC
1762 case BINDER_TYPE_PTR: {
1763 struct binder_buffer_object *bp =
1764 to_binder_buffer_object(hdr);
1765 size_t buf_left = sg_buf_end - sg_bufp;
1766
1767 if (bp->length > buf_left) {
1768 binder_user_error("%d:%d got transaction with too large buffer\n",
1769 proc->pid, thread->pid);
1770 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1771 return_error_param = -EINVAL;
1772 return_error_line = __LINE__;
7980240b
MC
1773 goto err_bad_offset;
1774 }
1775 if (copy_from_user(sg_bufp,
1776 (const void __user *)(uintptr_t)
1777 bp->buffer, bp->length)) {
1778 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1779 proc->pid, thread->pid);
57ada2fb 1780 return_error_param = -EFAULT;
7980240b 1781 return_error = BR_FAILED_REPLY;
57ada2fb 1782 return_error_line = __LINE__;
7980240b
MC
1783 goto err_copy_data_failed;
1784 }
1785 /* Fixup buffer pointer to target proc address space */
1786 bp->buffer = (uintptr_t)sg_bufp +
19c98724
TK
1787 binder_alloc_get_user_buffer_offset(
1788 &target_proc->alloc);
7980240b
MC
1789 sg_bufp += ALIGN(bp->length, sizeof(u64));
1790
1791 ret = binder_fixup_parent(t, thread, bp, off_start,
1792 offp - off_start,
1793 last_fixup_obj,
1794 last_fixup_min_off);
1795 if (ret < 0) {
1796 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1797 return_error_param = ret;
1798 return_error_line = __LINE__;
7980240b
MC
1799 goto err_translate_failed;
1800 }
1801 last_fixup_obj = bp;
1802 last_fixup_min_off = 0;
1803 } break;
355b0502 1804 default:
64dcfe6b 1805 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
feba3900 1806 proc->pid, thread->pid, hdr->type);
355b0502 1807 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1808 return_error_param = -EINVAL;
1809 return_error_line = __LINE__;
355b0502
GKH
1810 goto err_bad_object_type;
1811 }
1812 }
ccae6f67
TK
1813 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1814 list_add_tail(&tcomplete->entry, &thread->todo);
1815
355b0502
GKH
1816 if (reply) {
1817 BUG_ON(t->buffer->async_transaction != 0);
1818 binder_pop_transaction(target_thread, in_reply_to);
b6d282ce 1819 binder_free_transaction(in_reply_to);
355b0502
GKH
1820 } else if (!(t->flags & TF_ONE_WAY)) {
1821 BUG_ON(t->buffer->async_transaction != 0);
1822 t->need_reply = 1;
1823 t->from_parent = thread->transaction_stack;
1824 thread->transaction_stack = t;
1825 } else {
1826 BUG_ON(target_node == NULL);
1827 BUG_ON(t->buffer->async_transaction != 1);
1828 if (target_node->has_async_transaction) {
1829 target_list = &target_node->async_todo;
1830 target_wait = NULL;
1831 } else
1832 target_node->has_async_transaction = 1;
1833 }
1834 t->work.type = BINDER_WORK_TRANSACTION;
1835 list_add_tail(&t->work.entry, target_list);
00b40d61 1836 if (target_wait) {
ccae6f67 1837 if (reply || !(tr->flags & TF_ONE_WAY))
00b40d61
RA
1838 wake_up_interruptible_sync(target_wait);
1839 else
1840 wake_up_interruptible(target_wait);
1841 }
d99c7333
TK
1842 /*
1843 * write barrier to synchronize with initialization
1844 * of log entry
1845 */
1846 smp_wmb();
1847 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
1848 return;
1849
a056af42 1850err_translate_failed:
355b0502
GKH
1851err_bad_object_type:
1852err_bad_offset:
def95c73 1853err_bad_parent:
355b0502 1854err_copy_data_failed:
975a1ac9 1855 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 1856 binder_transaction_buffer_release(target_proc, t->buffer, offp);
eb34983b 1857 target_node = NULL;
355b0502 1858 t->buffer->transaction = NULL;
19c98724 1859 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
1860err_binder_alloc_buf_failed:
1861 kfree(tcomplete);
1862 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1863err_alloc_tcomplete_failed:
1864 kfree(t);
1865 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1866err_alloc_t_failed:
1867err_bad_call_stack:
1868err_empty_call_stack:
1869err_dead_binder:
1870err_invalid_target_handle:
1871err_no_context_mgr_node:
eb34983b
TK
1872 if (target_node)
1873 binder_dec_node(target_node, 1, 0);
1874
355b0502 1875 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
57ada2fb
TK
1876 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
1877 proc->pid, thread->pid, return_error, return_error_param,
1878 (u64)tr->data_size, (u64)tr->offsets_size,
1879 return_error_line);
355b0502
GKH
1880
1881 {
1882 struct binder_transaction_log_entry *fe;
10f62861 1883
57ada2fb
TK
1884 e->return_error = return_error;
1885 e->return_error_param = return_error_param;
1886 e->return_error_line = return_error_line;
355b0502
GKH
1887 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1888 *fe = *e;
d99c7333
TK
1889 /*
1890 * write barrier to synchronize with initialization
1891 * of log entry
1892 */
1893 smp_wmb();
1894 WRITE_ONCE(e->debug_id_done, t_debug_id);
1895 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
1896 }
1897
26549d17 1898 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 1899 if (in_reply_to) {
26549d17
TK
1900 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
1901 list_add_tail(&thread->return_error.work.entry,
1902 &thread->todo);
355b0502 1903 binder_send_failed_reply(in_reply_to, return_error);
26549d17
TK
1904 } else {
1905 thread->return_error.cmd = return_error;
1906 list_add_tail(&thread->return_error.work.entry,
1907 &thread->todo);
1908 }
355b0502
GKH
1909}
1910
fb07ebc3
BP
1911static int binder_thread_write(struct binder_proc *proc,
1912 struct binder_thread *thread,
da49889d
AH
1913 binder_uintptr_t binder_buffer, size_t size,
1914 binder_size_t *consumed)
355b0502
GKH
1915{
1916 uint32_t cmd;
342e5c90 1917 struct binder_context *context = proc->context;
da49889d 1918 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
1919 void __user *ptr = buffer + *consumed;
1920 void __user *end = buffer + size;
1921
26549d17 1922 while (ptr < end && thread->return_error.cmd == BR_OK) {
355b0502
GKH
1923 if (get_user(cmd, (uint32_t __user *)ptr))
1924 return -EFAULT;
1925 ptr += sizeof(uint32_t);
975a1ac9 1926 trace_binder_command(cmd);
355b0502 1927 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
0953c797
BJS
1928 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
1929 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
1930 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
1931 }
1932 switch (cmd) {
1933 case BC_INCREFS:
1934 case BC_ACQUIRE:
1935 case BC_RELEASE:
1936 case BC_DECREFS: {
1937 uint32_t target;
c44b1231 1938 struct binder_ref *ref = NULL;
355b0502
GKH
1939 const char *debug_string;
1940
1941 if (get_user(target, (uint32_t __user *)ptr))
1942 return -EFAULT;
c44b1231 1943
355b0502 1944 ptr += sizeof(uint32_t);
c44b1231 1945 if (target == 0 &&
355b0502 1946 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
c44b1231
TK
1947 struct binder_node *ctx_mgr_node;
1948
1949 mutex_lock(&context->context_mgr_node_lock);
1950 ctx_mgr_node = context->binder_context_mgr_node;
1951 if (ctx_mgr_node) {
1952 ref = binder_get_ref_for_node(proc,
1953 ctx_mgr_node);
1954 if (ref && ref->desc != target) {
1955 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1956 proc->pid, thread->pid,
1957 ref->desc);
1958 }
355b0502 1959 }
c44b1231
TK
1960 mutex_unlock(&context->context_mgr_node_lock);
1961 }
1962 if (ref == NULL)
0a3ffab9
AH
1963 ref = binder_get_ref(proc, target,
1964 cmd == BC_ACQUIRE ||
1965 cmd == BC_RELEASE);
355b0502 1966 if (ref == NULL) {
56b468fc 1967 binder_user_error("%d:%d refcount change on invalid ref %d\n",
355b0502
GKH
1968 proc->pid, thread->pid, target);
1969 break;
1970 }
1971 switch (cmd) {
1972 case BC_INCREFS:
1973 debug_string = "IncRefs";
1974 binder_inc_ref(ref, 0, NULL);
1975 break;
1976 case BC_ACQUIRE:
1977 debug_string = "Acquire";
1978 binder_inc_ref(ref, 1, NULL);
1979 break;
1980 case BC_RELEASE:
1981 debug_string = "Release";
1982 binder_dec_ref(ref, 1);
1983 break;
1984 case BC_DECREFS:
1985 default:
1986 debug_string = "DecRefs";
1987 binder_dec_ref(ref, 0);
1988 break;
1989 }
1990 binder_debug(BINDER_DEBUG_USER_REFS,
56b468fc 1991 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
1992 proc->pid, thread->pid, debug_string, ref->debug_id,
1993 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1994 break;
1995 }
1996 case BC_INCREFS_DONE:
1997 case BC_ACQUIRE_DONE: {
da49889d
AH
1998 binder_uintptr_t node_ptr;
1999 binder_uintptr_t cookie;
355b0502
GKH
2000 struct binder_node *node;
2001
da49889d 2002 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2003 return -EFAULT;
da49889d
AH
2004 ptr += sizeof(binder_uintptr_t);
2005 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2006 return -EFAULT;
da49889d 2007 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
2008 node = binder_get_node(proc, node_ptr);
2009 if (node == NULL) {
da49889d 2010 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
2011 proc->pid, thread->pid,
2012 cmd == BC_INCREFS_DONE ?
2013 "BC_INCREFS_DONE" :
2014 "BC_ACQUIRE_DONE",
da49889d 2015 (u64)node_ptr);
355b0502
GKH
2016 break;
2017 }
2018 if (cookie != node->cookie) {
da49889d 2019 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
2020 proc->pid, thread->pid,
2021 cmd == BC_INCREFS_DONE ?
2022 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
2023 (u64)node_ptr, node->debug_id,
2024 (u64)cookie, (u64)node->cookie);
355b0502
GKH
2025 break;
2026 }
2027 if (cmd == BC_ACQUIRE_DONE) {
2028 if (node->pending_strong_ref == 0) {
56b468fc 2029 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
2030 proc->pid, thread->pid,
2031 node->debug_id);
2032 break;
2033 }
2034 node->pending_strong_ref = 0;
2035 } else {
2036 if (node->pending_weak_ref == 0) {
56b468fc 2037 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
2038 proc->pid, thread->pid,
2039 node->debug_id);
2040 break;
2041 }
2042 node->pending_weak_ref = 0;
2043 }
2044 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2045 binder_debug(BINDER_DEBUG_USER_REFS,
56b468fc 2046 "%d:%d %s node %d ls %d lw %d\n",
355b0502
GKH
2047 proc->pid, thread->pid,
2048 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2049 node->debug_id, node->local_strong_refs, node->local_weak_refs);
2050 break;
2051 }
2052 case BC_ATTEMPT_ACQUIRE:
56b468fc 2053 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
2054 return -EINVAL;
2055 case BC_ACQUIRE_RESULT:
56b468fc 2056 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
2057 return -EINVAL;
2058
2059 case BC_FREE_BUFFER: {
da49889d 2060 binder_uintptr_t data_ptr;
355b0502
GKH
2061 struct binder_buffer *buffer;
2062
da49889d 2063 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2064 return -EFAULT;
da49889d 2065 ptr += sizeof(binder_uintptr_t);
355b0502 2066
53d311cf
TK
2067 buffer = binder_alloc_prepare_to_free(&proc->alloc,
2068 data_ptr);
355b0502 2069 if (buffer == NULL) {
da49889d
AH
2070 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2071 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2072 break;
2073 }
2074 if (!buffer->allow_user_free) {
da49889d
AH
2075 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2076 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2077 break;
2078 }
2079 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
2080 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2081 proc->pid, thread->pid, (u64)data_ptr,
2082 buffer->debug_id,
355b0502
GKH
2083 buffer->transaction ? "active" : "finished");
2084
2085 if (buffer->transaction) {
2086 buffer->transaction->buffer = NULL;
2087 buffer->transaction = NULL;
2088 }
2089 if (buffer->async_transaction && buffer->target_node) {
2090 BUG_ON(!buffer->target_node->has_async_transaction);
2091 if (list_empty(&buffer->target_node->async_todo))
2092 buffer->target_node->has_async_transaction = 0;
2093 else
2094 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2095 }
975a1ac9 2096 trace_binder_transaction_buffer_release(buffer);
355b0502 2097 binder_transaction_buffer_release(proc, buffer, NULL);
19c98724 2098 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
2099 break;
2100 }
2101
7980240b
MC
2102 case BC_TRANSACTION_SG:
2103 case BC_REPLY_SG: {
2104 struct binder_transaction_data_sg tr;
2105
2106 if (copy_from_user(&tr, ptr, sizeof(tr)))
2107 return -EFAULT;
2108 ptr += sizeof(tr);
2109 binder_transaction(proc, thread, &tr.transaction_data,
2110 cmd == BC_REPLY_SG, tr.buffers_size);
2111 break;
2112 }
355b0502
GKH
2113 case BC_TRANSACTION:
2114 case BC_REPLY: {
2115 struct binder_transaction_data tr;
2116
2117 if (copy_from_user(&tr, ptr, sizeof(tr)))
2118 return -EFAULT;
2119 ptr += sizeof(tr);
4bfac80a
MC
2120 binder_transaction(proc, thread, &tr,
2121 cmd == BC_REPLY, 0);
355b0502
GKH
2122 break;
2123 }
2124
2125 case BC_REGISTER_LOOPER:
2126 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2127 "%d:%d BC_REGISTER_LOOPER\n",
355b0502
GKH
2128 proc->pid, thread->pid);
2129 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2130 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2131 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
2132 proc->pid, thread->pid);
2133 } else if (proc->requested_threads == 0) {
2134 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2135 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
2136 proc->pid, thread->pid);
2137 } else {
2138 proc->requested_threads--;
2139 proc->requested_threads_started++;
2140 }
2141 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2142 break;
2143 case BC_ENTER_LOOPER:
2144 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2145 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
2146 proc->pid, thread->pid);
2147 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2148 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2149 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
2150 proc->pid, thread->pid);
2151 }
2152 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2153 break;
2154 case BC_EXIT_LOOPER:
2155 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2156 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
2157 proc->pid, thread->pid);
2158 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2159 break;
2160
2161 case BC_REQUEST_DEATH_NOTIFICATION:
2162 case BC_CLEAR_DEATH_NOTIFICATION: {
2163 uint32_t target;
da49889d 2164 binder_uintptr_t cookie;
355b0502
GKH
2165 struct binder_ref *ref;
2166 struct binder_ref_death *death;
2167
2168 if (get_user(target, (uint32_t __user *)ptr))
2169 return -EFAULT;
2170 ptr += sizeof(uint32_t);
da49889d 2171 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2172 return -EFAULT;
da49889d 2173 ptr += sizeof(binder_uintptr_t);
0a3ffab9 2174 ref = binder_get_ref(proc, target, false);
355b0502 2175 if (ref == NULL) {
56b468fc 2176 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
2177 proc->pid, thread->pid,
2178 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2179 "BC_REQUEST_DEATH_NOTIFICATION" :
2180 "BC_CLEAR_DEATH_NOTIFICATION",
2181 target);
2182 break;
2183 }
2184
2185 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2186 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
2187 proc->pid, thread->pid,
2188 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2189 "BC_REQUEST_DEATH_NOTIFICATION" :
2190 "BC_CLEAR_DEATH_NOTIFICATION",
da49889d 2191 (u64)cookie, ref->debug_id, ref->desc,
355b0502
GKH
2192 ref->strong, ref->weak, ref->node->debug_id);
2193
2194 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2195 if (ref->death) {
56b468fc 2196 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502
GKH
2197 proc->pid, thread->pid);
2198 break;
2199 }
2200 death = kzalloc(sizeof(*death), GFP_KERNEL);
2201 if (death == NULL) {
26549d17
TK
2202 WARN_ON(thread->return_error.cmd !=
2203 BR_OK);
2204 thread->return_error.cmd = BR_ERROR;
2205 list_add_tail(
2206 &thread->return_error.work.entry,
2207 &thread->todo);
355b0502 2208 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
56b468fc 2209 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
355b0502
GKH
2210 proc->pid, thread->pid);
2211 break;
2212 }
2213 binder_stats_created(BINDER_STAT_DEATH);
2214 INIT_LIST_HEAD(&death->work.entry);
2215 death->cookie = cookie;
2216 ref->death = death;
2217 if (ref->node->proc == NULL) {
2218 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2219 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2220 list_add_tail(&ref->death->work.entry, &thread->todo);
2221 } else {
2222 list_add_tail(&ref->death->work.entry, &proc->todo);
2223 wake_up_interruptible(&proc->wait);
2224 }
2225 }
2226 } else {
2227 if (ref->death == NULL) {
56b468fc 2228 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502
GKH
2229 proc->pid, thread->pid);
2230 break;
2231 }
2232 death = ref->death;
2233 if (death->cookie != cookie) {
da49889d 2234 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 2235 proc->pid, thread->pid,
da49889d
AH
2236 (u64)death->cookie,
2237 (u64)cookie);
355b0502
GKH
2238 break;
2239 }
2240 ref->death = NULL;
2241 if (list_empty(&death->work.entry)) {
2242 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2243 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2244 list_add_tail(&death->work.entry, &thread->todo);
2245 } else {
2246 list_add_tail(&death->work.entry, &proc->todo);
2247 wake_up_interruptible(&proc->wait);
2248 }
2249 } else {
2250 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2251 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2252 }
2253 }
2254 } break;
2255 case BC_DEAD_BINDER_DONE: {
2256 struct binder_work *w;
da49889d 2257 binder_uintptr_t cookie;
355b0502 2258 struct binder_ref_death *death = NULL;
10f62861 2259
da49889d 2260 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
2261 return -EFAULT;
2262
7a64cd88 2263 ptr += sizeof(cookie);
355b0502
GKH
2264 list_for_each_entry(w, &proc->delivered_death, entry) {
2265 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
10f62861 2266
355b0502
GKH
2267 if (tmp_death->cookie == cookie) {
2268 death = tmp_death;
2269 break;
2270 }
2271 }
2272 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
2273 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2274 proc->pid, thread->pid, (u64)cookie,
2275 death);
355b0502 2276 if (death == NULL) {
da49889d
AH
2277 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2278 proc->pid, thread->pid, (u64)cookie);
355b0502
GKH
2279 break;
2280 }
2281
2282 list_del_init(&death->work.entry);
2283 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2284 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2285 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2286 list_add_tail(&death->work.entry, &thread->todo);
2287 } else {
2288 list_add_tail(&death->work.entry, &proc->todo);
2289 wake_up_interruptible(&proc->wait);
2290 }
2291 }
2292 } break;
2293
2294 default:
56b468fc 2295 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
2296 proc->pid, thread->pid, cmd);
2297 return -EINVAL;
2298 }
2299 *consumed = ptr - buffer;
2300 }
2301 return 0;
2302}
2303
fb07ebc3
BP
2304static void binder_stat_br(struct binder_proc *proc,
2305 struct binder_thread *thread, uint32_t cmd)
355b0502 2306{
975a1ac9 2307 trace_binder_return(cmd);
355b0502 2308 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
0953c797
BJS
2309 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
2310 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
2311 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
2312 }
2313}
2314
2315static int binder_has_proc_work(struct binder_proc *proc,
2316 struct binder_thread *thread)
2317{
08dabcee 2318 return !list_empty(&proc->todo) || thread->looper_need_return;
355b0502
GKH
2319}
2320
2321static int binder_has_thread_work(struct binder_thread *thread)
2322{
26549d17 2323 return !list_empty(&thread->todo) || thread->looper_need_return;
355b0502
GKH
2324}
2325
26b47d8a
TK
2326static int binder_put_node_cmd(struct binder_proc *proc,
2327 struct binder_thread *thread,
2328 void __user **ptrp,
2329 binder_uintptr_t node_ptr,
2330 binder_uintptr_t node_cookie,
2331 int node_debug_id,
2332 uint32_t cmd, const char *cmd_name)
2333{
2334 void __user *ptr = *ptrp;
2335
2336 if (put_user(cmd, (uint32_t __user *)ptr))
2337 return -EFAULT;
2338 ptr += sizeof(uint32_t);
2339
2340 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
2341 return -EFAULT;
2342 ptr += sizeof(binder_uintptr_t);
2343
2344 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
2345 return -EFAULT;
2346 ptr += sizeof(binder_uintptr_t);
2347
2348 binder_stat_br(proc, thread, cmd);
2349 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
2350 proc->pid, thread->pid, cmd_name, node_debug_id,
2351 (u64)node_ptr, (u64)node_cookie);
2352
2353 *ptrp = ptr;
2354 return 0;
2355}
2356
355b0502
GKH
2357static int binder_thread_read(struct binder_proc *proc,
2358 struct binder_thread *thread,
da49889d
AH
2359 binder_uintptr_t binder_buffer, size_t size,
2360 binder_size_t *consumed, int non_block)
355b0502 2361{
da49889d 2362 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
2363 void __user *ptr = buffer + *consumed;
2364 void __user *end = buffer + size;
2365
2366 int ret = 0;
2367 int wait_for_proc_work;
2368
2369 if (*consumed == 0) {
2370 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2371 return -EFAULT;
2372 ptr += sizeof(uint32_t);
2373 }
2374
2375retry:
2376 wait_for_proc_work = thread->transaction_stack == NULL &&
2377 list_empty(&thread->todo);
2378
355b0502
GKH
2379 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2380 if (wait_for_proc_work)
2381 proc->ready_threads++;
975a1ac9
AH
2382
2383 binder_unlock(__func__);
2384
2385 trace_binder_wait_for_work(wait_for_proc_work,
2386 !!thread->transaction_stack,
2387 !list_empty(&thread->todo));
355b0502
GKH
2388 if (wait_for_proc_work) {
2389 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2390 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 2391 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
2392 proc->pid, thread->pid, thread->looper);
2393 wait_event_interruptible(binder_user_error_wait,
2394 binder_stop_on_user_error < 2);
2395 }
2396 binder_set_nice(proc->default_priority);
2397 if (non_block) {
2398 if (!binder_has_proc_work(proc, thread))
2399 ret = -EAGAIN;
2400 } else
e2610b26 2401 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
355b0502
GKH
2402 } else {
2403 if (non_block) {
2404 if (!binder_has_thread_work(thread))
2405 ret = -EAGAIN;
2406 } else
e2610b26 2407 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
355b0502 2408 }
975a1ac9
AH
2409
2410 binder_lock(__func__);
2411
355b0502
GKH
2412 if (wait_for_proc_work)
2413 proc->ready_threads--;
2414 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2415
2416 if (ret)
2417 return ret;
2418
2419 while (1) {
2420 uint32_t cmd;
2421 struct binder_transaction_data tr;
2422 struct binder_work *w;
2423 struct binder_transaction *t = NULL;
2424
395262a9
DV
2425 if (!list_empty(&thread->todo)) {
2426 w = list_first_entry(&thread->todo, struct binder_work,
2427 entry);
2428 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2429 w = list_first_entry(&proc->todo, struct binder_work,
2430 entry);
2431 } else {
2432 /* no data added */
08dabcee 2433 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
2434 goto retry;
2435 break;
2436 }
2437
2438 if (end - ptr < sizeof(tr) + 4)
2439 break;
2440
2441 switch (w->type) {
2442 case BINDER_WORK_TRANSACTION: {
2443 t = container_of(w, struct binder_transaction, work);
2444 } break;
26549d17
TK
2445 case BINDER_WORK_RETURN_ERROR: {
2446 struct binder_error *e = container_of(
2447 w, struct binder_error, work);
2448
2449 WARN_ON(e->cmd == BR_OK);
2450 if (put_user(e->cmd, (uint32_t __user *)ptr))
2451 return -EFAULT;
2452 e->cmd = BR_OK;
2453 ptr += sizeof(uint32_t);
2454
2455 binder_stat_br(proc, thread, cmd);
2456 list_del(&w->entry);
2457 } break;
355b0502
GKH
2458 case BINDER_WORK_TRANSACTION_COMPLETE: {
2459 cmd = BR_TRANSACTION_COMPLETE;
2460 if (put_user(cmd, (uint32_t __user *)ptr))
2461 return -EFAULT;
2462 ptr += sizeof(uint32_t);
2463
2464 binder_stat_br(proc, thread, cmd);
2465 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 2466 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502
GKH
2467 proc->pid, thread->pid);
2468
2469 list_del(&w->entry);
2470 kfree(w);
2471 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2472 } break;
2473 case BINDER_WORK_NODE: {
2474 struct binder_node *node = container_of(w, struct binder_node, work);
26b47d8a
TK
2475 int strong, weak;
2476 binder_uintptr_t node_ptr = node->ptr;
2477 binder_uintptr_t node_cookie = node->cookie;
2478 int node_debug_id = node->debug_id;
2479 int has_weak_ref;
2480 int has_strong_ref;
2481 void __user *orig_ptr = ptr;
2482
2483 BUG_ON(proc != node->proc);
2484 strong = node->internal_strong_refs ||
2485 node->local_strong_refs;
2486 weak = !hlist_empty(&node->refs) ||
2487 node->local_weak_refs || strong;
2488 has_strong_ref = node->has_strong_ref;
2489 has_weak_ref = node->has_weak_ref;
2490
2491 if (weak && !has_weak_ref) {
355b0502
GKH
2492 node->has_weak_ref = 1;
2493 node->pending_weak_ref = 1;
2494 node->local_weak_refs++;
26b47d8a
TK
2495 }
2496 if (strong && !has_strong_ref) {
355b0502
GKH
2497 node->has_strong_ref = 1;
2498 node->pending_strong_ref = 1;
2499 node->local_strong_refs++;
26b47d8a
TK
2500 }
2501 if (!strong && has_strong_ref)
355b0502 2502 node->has_strong_ref = 0;
26b47d8a 2503 if (!weak && has_weak_ref)
355b0502 2504 node->has_weak_ref = 0;
26b47d8a
TK
2505 list_del(&w->entry);
2506
2507 if (!weak && !strong) {
2508 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2509 "%d:%d node %d u%016llx c%016llx deleted\n",
2510 proc->pid, thread->pid,
2511 node_debug_id,
2512 (u64)node_ptr,
2513 (u64)node_cookie);
2514 rb_erase(&node->rb_node, &proc->nodes);
2515 kfree(node);
2516 binder_stats_deleted(BINDER_STAT_NODE);
355b0502 2517 }
26b47d8a
TK
2518 if (weak && !has_weak_ref)
2519 ret = binder_put_node_cmd(
2520 proc, thread, &ptr, node_ptr,
2521 node_cookie, node_debug_id,
2522 BR_INCREFS, "BR_INCREFS");
2523 if (!ret && strong && !has_strong_ref)
2524 ret = binder_put_node_cmd(
2525 proc, thread, &ptr, node_ptr,
2526 node_cookie, node_debug_id,
2527 BR_ACQUIRE, "BR_ACQUIRE");
2528 if (!ret && !strong && has_strong_ref)
2529 ret = binder_put_node_cmd(
2530 proc, thread, &ptr, node_ptr,
2531 node_cookie, node_debug_id,
2532 BR_RELEASE, "BR_RELEASE");
2533 if (!ret && !weak && has_weak_ref)
2534 ret = binder_put_node_cmd(
2535 proc, thread, &ptr, node_ptr,
2536 node_cookie, node_debug_id,
2537 BR_DECREFS, "BR_DECREFS");
2538 if (orig_ptr == ptr)
2539 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2540 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2541 proc->pid, thread->pid,
2542 node_debug_id,
2543 (u64)node_ptr,
2544 (u64)node_cookie);
2545 if (ret)
2546 return ret;
355b0502
GKH
2547 } break;
2548 case BINDER_WORK_DEAD_BINDER:
2549 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2550 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2551 struct binder_ref_death *death;
2552 uint32_t cmd;
2553
2554 death = container_of(w, struct binder_ref_death, work);
2555 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2556 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2557 else
2558 cmd = BR_DEAD_BINDER;
2559 if (put_user(cmd, (uint32_t __user *)ptr))
2560 return -EFAULT;
2561 ptr += sizeof(uint32_t);
da49889d
AH
2562 if (put_user(death->cookie,
2563 (binder_uintptr_t __user *)ptr))
355b0502 2564 return -EFAULT;
da49889d 2565 ptr += sizeof(binder_uintptr_t);
89334ab4 2566 binder_stat_br(proc, thread, cmd);
355b0502 2567 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2568 "%d:%d %s %016llx\n",
355b0502
GKH
2569 proc->pid, thread->pid,
2570 cmd == BR_DEAD_BINDER ?
2571 "BR_DEAD_BINDER" :
2572 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
da49889d 2573 (u64)death->cookie);
355b0502
GKH
2574
2575 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2576 list_del(&w->entry);
2577 kfree(death);
2578 binder_stats_deleted(BINDER_STAT_DEATH);
2579 } else
2580 list_move(&w->entry, &proc->delivered_death);
2581 if (cmd == BR_DEAD_BINDER)
2582 goto done; /* DEAD_BINDER notifications can cause transactions */
2583 } break;
2584 }
2585
2586 if (!t)
2587 continue;
2588
2589 BUG_ON(t->buffer == NULL);
2590 if (t->buffer->target_node) {
2591 struct binder_node *target_node = t->buffer->target_node;
10f62861 2592
355b0502
GKH
2593 tr.target.ptr = target_node->ptr;
2594 tr.cookie = target_node->cookie;
2595 t->saved_priority = task_nice(current);
2596 if (t->priority < target_node->min_priority &&
2597 !(t->flags & TF_ONE_WAY))
2598 binder_set_nice(t->priority);
2599 else if (!(t->flags & TF_ONE_WAY) ||
2600 t->saved_priority > target_node->min_priority)
2601 binder_set_nice(target_node->min_priority);
2602 cmd = BR_TRANSACTION;
2603 } else {
da49889d
AH
2604 tr.target.ptr = 0;
2605 tr.cookie = 0;
355b0502
GKH
2606 cmd = BR_REPLY;
2607 }
2608 tr.code = t->code;
2609 tr.flags = t->flags;
4a2ebb93 2610 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502
GKH
2611
2612 if (t->from) {
2613 struct task_struct *sender = t->from->proc->tsk;
10f62861 2614
355b0502 2615 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 2616 task_active_pid_ns(current));
355b0502
GKH
2617 } else {
2618 tr.sender_pid = 0;
2619 }
2620
2621 tr.data_size = t->buffer->data_size;
2622 tr.offsets_size = t->buffer->offsets_size;
19c98724
TK
2623 tr.data.ptr.buffer = (binder_uintptr_t)
2624 ((uintptr_t)t->buffer->data +
2625 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
2626 tr.data.ptr.offsets = tr.data.ptr.buffer +
2627 ALIGN(t->buffer->data_size,
2628 sizeof(void *));
2629
2630 if (put_user(cmd, (uint32_t __user *)ptr))
2631 return -EFAULT;
2632 ptr += sizeof(uint32_t);
2633 if (copy_to_user(ptr, &tr, sizeof(tr)))
2634 return -EFAULT;
2635 ptr += sizeof(tr);
2636
975a1ac9 2637 trace_binder_transaction_received(t);
355b0502
GKH
2638 binder_stat_br(proc, thread, cmd);
2639 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 2640 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
2641 proc->pid, thread->pid,
2642 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2643 "BR_REPLY",
2644 t->debug_id, t->from ? t->from->proc->pid : 0,
2645 t->from ? t->from->pid : 0, cmd,
2646 t->buffer->data_size, t->buffer->offsets_size,
da49889d 2647 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502
GKH
2648
2649 list_del(&t->work.entry);
2650 t->buffer->allow_user_free = 1;
2651 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2652 t->to_parent = thread->transaction_stack;
2653 t->to_thread = thread;
2654 thread->transaction_stack = t;
2655 } else {
b6d282ce 2656 binder_free_transaction(t);
355b0502
GKH
2657 }
2658 break;
2659 }
2660
2661done:
2662
2663 *consumed = ptr - buffer;
2664 if (proc->requested_threads + proc->ready_threads == 0 &&
2665 proc->requested_threads_started < proc->max_threads &&
2666 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2667 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2668 /*spawn a new thread if we leave this out */) {
2669 proc->requested_threads++;
2670 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2671 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
2672 proc->pid, thread->pid);
2673 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2674 return -EFAULT;
89334ab4 2675 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
355b0502
GKH
2676 }
2677 return 0;
2678}
2679
2680static void binder_release_work(struct list_head *list)
2681{
2682 struct binder_work *w;
10f62861 2683
355b0502
GKH
2684 while (!list_empty(list)) {
2685 w = list_first_entry(list, struct binder_work, entry);
2686 list_del_init(&w->entry);
2687 switch (w->type) {
2688 case BINDER_WORK_TRANSACTION: {
2689 struct binder_transaction *t;
2690
2691 t = container_of(w, struct binder_transaction, work);
675d66b0
AH
2692 if (t->buffer->target_node &&
2693 !(t->flags & TF_ONE_WAY)) {
355b0502 2694 binder_send_failed_reply(t, BR_DEAD_REPLY);
675d66b0
AH
2695 } else {
2696 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 2697 "undelivered transaction %d\n",
675d66b0 2698 t->debug_id);
b6d282ce 2699 binder_free_transaction(t);
675d66b0 2700 }
355b0502 2701 } break;
26549d17
TK
2702 case BINDER_WORK_RETURN_ERROR: {
2703 struct binder_error *e = container_of(
2704 w, struct binder_error, work);
2705
2706 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2707 "undelivered TRANSACTION_ERROR: %u\n",
2708 e->cmd);
2709 } break;
355b0502 2710 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 2711 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 2712 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
2713 kfree(w);
2714 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2715 } break;
675d66b0
AH
2716 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2717 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2718 struct binder_ref_death *death;
2719
2720 death = container_of(w, struct binder_ref_death, work);
2721 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
2722 "undelivered death notification, %016llx\n",
2723 (u64)death->cookie);
675d66b0
AH
2724 kfree(death);
2725 binder_stats_deleted(BINDER_STAT_DEATH);
2726 } break;
355b0502 2727 default:
56b468fc 2728 pr_err("unexpected work type, %d, not freed\n",
675d66b0 2729 w->type);
355b0502
GKH
2730 break;
2731 }
2732 }
2733
2734}
2735
2736static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2737{
2738 struct binder_thread *thread = NULL;
2739 struct rb_node *parent = NULL;
2740 struct rb_node **p = &proc->threads.rb_node;
2741
2742 while (*p) {
2743 parent = *p;
2744 thread = rb_entry(parent, struct binder_thread, rb_node);
2745
2746 if (current->pid < thread->pid)
2747 p = &(*p)->rb_left;
2748 else if (current->pid > thread->pid)
2749 p = &(*p)->rb_right;
2750 else
2751 break;
2752 }
2753 if (*p == NULL) {
2754 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2755 if (thread == NULL)
2756 return NULL;
2757 binder_stats_created(BINDER_STAT_THREAD);
2758 thread->proc = proc;
2759 thread->pid = current->pid;
2760 init_waitqueue_head(&thread->wait);
2761 INIT_LIST_HEAD(&thread->todo);
2762 rb_link_node(&thread->rb_node, parent, p);
2763 rb_insert_color(&thread->rb_node, &proc->threads);
08dabcee 2764 thread->looper_need_return = true;
26549d17
TK
2765 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
2766 thread->return_error.cmd = BR_OK;
2767 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
2768 thread->reply_error.cmd = BR_OK;
355b0502
GKH
2769 }
2770 return thread;
2771}
2772
2773static int binder_free_thread(struct binder_proc *proc,
2774 struct binder_thread *thread)
2775{
2776 struct binder_transaction *t;
2777 struct binder_transaction *send_reply = NULL;
2778 int active_transactions = 0;
2779
2780 rb_erase(&thread->rb_node, &proc->threads);
2781 t = thread->transaction_stack;
2782 if (t && t->to_thread == thread)
2783 send_reply = t;
2784 while (t) {
2785 active_transactions++;
2786 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
2787 "release %d:%d transaction %d %s, still active\n",
2788 proc->pid, thread->pid,
355b0502
GKH
2789 t->debug_id,
2790 (t->to_thread == thread) ? "in" : "out");
2791
2792 if (t->to_thread == thread) {
2793 t->to_proc = NULL;
2794 t->to_thread = NULL;
2795 if (t->buffer) {
2796 t->buffer->transaction = NULL;
2797 t->buffer = NULL;
2798 }
2799 t = t->to_parent;
2800 } else if (t->from == thread) {
2801 t->from = NULL;
2802 t = t->from_parent;
2803 } else
2804 BUG();
2805 }
2806 if (send_reply)
2807 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2808 binder_release_work(&thread->todo);
2809 kfree(thread);
2810 binder_stats_deleted(BINDER_STAT_THREAD);
2811 return active_transactions;
2812}
2813
2814static unsigned int binder_poll(struct file *filp,
2815 struct poll_table_struct *wait)
2816{
2817 struct binder_proc *proc = filp->private_data;
2818 struct binder_thread *thread = NULL;
2819 int wait_for_proc_work;
2820
975a1ac9
AH
2821 binder_lock(__func__);
2822
355b0502
GKH
2823 thread = binder_get_thread(proc);
2824
2825 wait_for_proc_work = thread->transaction_stack == NULL &&
26549d17 2826 list_empty(&thread->todo);
975a1ac9
AH
2827
2828 binder_unlock(__func__);
355b0502
GKH
2829
2830 if (wait_for_proc_work) {
2831 if (binder_has_proc_work(proc, thread))
2832 return POLLIN;
2833 poll_wait(filp, &proc->wait, wait);
2834 if (binder_has_proc_work(proc, thread))
2835 return POLLIN;
2836 } else {
2837 if (binder_has_thread_work(thread))
2838 return POLLIN;
2839 poll_wait(filp, &thread->wait, wait);
2840 if (binder_has_thread_work(thread))
2841 return POLLIN;
2842 }
2843 return 0;
2844}
2845
78260ac6
TR
2846static int binder_ioctl_write_read(struct file *filp,
2847 unsigned int cmd, unsigned long arg,
2848 struct binder_thread *thread)
2849{
2850 int ret = 0;
2851 struct binder_proc *proc = filp->private_data;
2852 unsigned int size = _IOC_SIZE(cmd);
2853 void __user *ubuf = (void __user *)arg;
2854 struct binder_write_read bwr;
2855
2856 if (size != sizeof(struct binder_write_read)) {
2857 ret = -EINVAL;
2858 goto out;
2859 }
2860 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2861 ret = -EFAULT;
2862 goto out;
2863 }
2864 binder_debug(BINDER_DEBUG_READ_WRITE,
2865 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2866 proc->pid, thread->pid,
2867 (u64)bwr.write_size, (u64)bwr.write_buffer,
2868 (u64)bwr.read_size, (u64)bwr.read_buffer);
2869
2870 if (bwr.write_size > 0) {
2871 ret = binder_thread_write(proc, thread,
2872 bwr.write_buffer,
2873 bwr.write_size,
2874 &bwr.write_consumed);
2875 trace_binder_write_done(ret);
2876 if (ret < 0) {
2877 bwr.read_consumed = 0;
2878 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2879 ret = -EFAULT;
2880 goto out;
2881 }
2882 }
2883 if (bwr.read_size > 0) {
2884 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2885 bwr.read_size,
2886 &bwr.read_consumed,
2887 filp->f_flags & O_NONBLOCK);
2888 trace_binder_read_done(ret);
2889 if (!list_empty(&proc->todo))
2890 wake_up_interruptible(&proc->wait);
2891 if (ret < 0) {
2892 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2893 ret = -EFAULT;
2894 goto out;
2895 }
2896 }
2897 binder_debug(BINDER_DEBUG_READ_WRITE,
2898 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2899 proc->pid, thread->pid,
2900 (u64)bwr.write_consumed, (u64)bwr.write_size,
2901 (u64)bwr.read_consumed, (u64)bwr.read_size);
2902 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2903 ret = -EFAULT;
2904 goto out;
2905 }
2906out:
2907 return ret;
2908}
2909
2910static int binder_ioctl_set_ctx_mgr(struct file *filp)
2911{
2912 int ret = 0;
2913 struct binder_proc *proc = filp->private_data;
342e5c90 2914 struct binder_context *context = proc->context;
c44b1231 2915 struct binder_node *new_node;
78260ac6
TR
2916 kuid_t curr_euid = current_euid();
2917
c44b1231 2918 mutex_lock(&context->context_mgr_node_lock);
342e5c90 2919 if (context->binder_context_mgr_node) {
78260ac6
TR
2920 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2921 ret = -EBUSY;
2922 goto out;
2923 }
79af7307
SS
2924 ret = security_binder_set_context_mgr(proc->tsk);
2925 if (ret < 0)
2926 goto out;
342e5c90
MC
2927 if (uid_valid(context->binder_context_mgr_uid)) {
2928 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
2929 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2930 from_kuid(&init_user_ns, curr_euid),
2931 from_kuid(&init_user_ns,
342e5c90 2932 context->binder_context_mgr_uid));
78260ac6
TR
2933 ret = -EPERM;
2934 goto out;
2935 }
2936 } else {
342e5c90 2937 context->binder_context_mgr_uid = curr_euid;
78260ac6 2938 }
c44b1231
TK
2939 new_node = binder_new_node(proc, 0, 0);
2940 if (!new_node) {
78260ac6
TR
2941 ret = -ENOMEM;
2942 goto out;
2943 }
c44b1231
TK
2944 new_node->local_weak_refs++;
2945 new_node->local_strong_refs++;
2946 new_node->has_strong_ref = 1;
2947 new_node->has_weak_ref = 1;
2948 context->binder_context_mgr_node = new_node;
78260ac6 2949out:
c44b1231 2950 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
2951 return ret;
2952}
2953
355b0502
GKH
2954static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2955{
2956 int ret;
2957 struct binder_proc *proc = filp->private_data;
2958 struct binder_thread *thread;
2959 unsigned int size = _IOC_SIZE(cmd);
2960 void __user *ubuf = (void __user *)arg;
2961
78260ac6
TR
2962 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2963 proc->pid, current->pid, cmd, arg);*/
355b0502 2964
975a1ac9
AH
2965 trace_binder_ioctl(cmd, arg);
2966
355b0502
GKH
2967 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2968 if (ret)
975a1ac9 2969 goto err_unlocked;
355b0502 2970
975a1ac9 2971 binder_lock(__func__);
355b0502
GKH
2972 thread = binder_get_thread(proc);
2973 if (thread == NULL) {
2974 ret = -ENOMEM;
2975 goto err;
2976 }
2977
2978 switch (cmd) {
78260ac6
TR
2979 case BINDER_WRITE_READ:
2980 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2981 if (ret)
355b0502 2982 goto err;
355b0502 2983 break;
355b0502
GKH
2984 case BINDER_SET_MAX_THREADS:
2985 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2986 ret = -EINVAL;
2987 goto err;
2988 }
2989 break;
2990 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
2991 ret = binder_ioctl_set_ctx_mgr(filp);
2992 if (ret)
355b0502 2993 goto err;
355b0502
GKH
2994 break;
2995 case BINDER_THREAD_EXIT:
56b468fc 2996 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502
GKH
2997 proc->pid, thread->pid);
2998 binder_free_thread(proc, thread);
2999 thread = NULL;
3000 break;
36c89c0a
MM
3001 case BINDER_VERSION: {
3002 struct binder_version __user *ver = ubuf;
3003
355b0502
GKH
3004 if (size != sizeof(struct binder_version)) {
3005 ret = -EINVAL;
3006 goto err;
3007 }
36c89c0a
MM
3008 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3009 &ver->protocol_version)) {
355b0502
GKH
3010 ret = -EINVAL;
3011 goto err;
3012 }
3013 break;
36c89c0a 3014 }
355b0502
GKH
3015 default:
3016 ret = -EINVAL;
3017 goto err;
3018 }
3019 ret = 0;
3020err:
3021 if (thread)
08dabcee 3022 thread->looper_need_return = false;
975a1ac9 3023 binder_unlock(__func__);
355b0502
GKH
3024 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3025 if (ret && ret != -ERESTARTSYS)
56b468fc 3026 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
3027err_unlocked:
3028 trace_binder_ioctl_done(ret);
355b0502
GKH
3029 return ret;
3030}
3031
3032static void binder_vma_open(struct vm_area_struct *vma)
3033{
3034 struct binder_proc *proc = vma->vm_private_data;
10f62861 3035
355b0502 3036 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3037 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3038 proc->pid, vma->vm_start, vma->vm_end,
3039 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3040 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
3041}
3042
3043static void binder_vma_close(struct vm_area_struct *vma)
3044{
3045 struct binder_proc *proc = vma->vm_private_data;
10f62861 3046
355b0502 3047 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3048 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3049 proc->pid, vma->vm_start, vma->vm_end,
3050 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3051 (unsigned long)pgprot_val(vma->vm_page_prot));
19c98724 3052 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
3053 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3054}
3055
11bac800 3056static int binder_vm_fault(struct vm_fault *vmf)
ddac7d5f
VM
3057{
3058 return VM_FAULT_SIGBUS;
3059}
3060
7cbea8dc 3061static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
3062 .open = binder_vma_open,
3063 .close = binder_vma_close,
ddac7d5f 3064 .fault = binder_vm_fault,
355b0502
GKH
3065};
3066
19c98724
TK
3067static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3068{
3069 int ret;
3070 struct binder_proc *proc = filp->private_data;
3071 const char *failure_string;
3072
3073 if (proc->tsk != current->group_leader)
3074 return -EINVAL;
3075
3076 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3077 vma->vm_end = vma->vm_start + SZ_4M;
3078
3079 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3080 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3081 __func__, proc->pid, vma->vm_start, vma->vm_end,
3082 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3083 (unsigned long)pgprot_val(vma->vm_page_prot));
3084
3085 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3086 ret = -EPERM;
3087 failure_string = "bad vm_flags";
3088 goto err_bad_arg;
3089 }
3090 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3091 vma->vm_ops = &binder_vm_ops;
3092 vma->vm_private_data = proc;
3093
3094 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3095 if (ret)
3096 return ret;
3097 proc->files = get_files_struct(current);
3098 return 0;
3099
355b0502 3100err_bad_arg:
258767fe 3101 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
355b0502
GKH
3102 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3103 return ret;
3104}
3105
3106static int binder_open(struct inode *nodp, struct file *filp)
3107{
3108 struct binder_proc *proc;
ac4812c5 3109 struct binder_device *binder_dev;
355b0502
GKH
3110
3111 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3112 current->group_leader->pid, current->pid);
3113
3114 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3115 if (proc == NULL)
3116 return -ENOMEM;
c4ea41ba
TK
3117 get_task_struct(current->group_leader);
3118 proc->tsk = current->group_leader;
355b0502
GKH
3119 INIT_LIST_HEAD(&proc->todo);
3120 init_waitqueue_head(&proc->wait);
3121 proc->default_priority = task_nice(current);
ac4812c5
MC
3122 binder_dev = container_of(filp->private_data, struct binder_device,
3123 miscdev);
3124 proc->context = &binder_dev->context;
19c98724 3125 binder_alloc_init(&proc->alloc);
975a1ac9
AH
3126
3127 binder_lock(__func__);
3128
355b0502 3129 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
3130 proc->pid = current->group_leader->pid;
3131 INIT_LIST_HEAD(&proc->delivered_death);
3132 filp->private_data = proc;
975a1ac9
AH
3133
3134 binder_unlock(__func__);
355b0502 3135
c44b1231
TK
3136 mutex_lock(&binder_procs_lock);
3137 hlist_add_head(&proc->proc_node, &binder_procs);
3138 mutex_unlock(&binder_procs_lock);
3139
16b66554 3140 if (binder_debugfs_dir_entry_proc) {
355b0502 3141 char strbuf[11];
10f62861 3142
355b0502 3143 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
14db3181
MC
3144 /*
3145 * proc debug entries are shared between contexts, so
3146 * this will fail if the process tries to open the driver
3147 * again with a different context. The priting code will
3148 * anyway print all contexts that a given PID has, so this
3149 * is not a problem.
3150 */
16b66554 3151 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
14db3181
MC
3152 binder_debugfs_dir_entry_proc,
3153 (void *)(unsigned long)proc->pid,
3154 &binder_proc_fops);
355b0502
GKH
3155 }
3156
3157 return 0;
3158}
3159
3160static int binder_flush(struct file *filp, fl_owner_t id)
3161{
3162 struct binder_proc *proc = filp->private_data;
3163
3164 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3165
3166 return 0;
3167}
3168
3169static void binder_deferred_flush(struct binder_proc *proc)
3170{
3171 struct rb_node *n;
3172 int wake_count = 0;
10f62861 3173
355b0502
GKH
3174 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3175 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 3176
08dabcee 3177 thread->looper_need_return = true;
355b0502
GKH
3178 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3179 wake_up_interruptible(&thread->wait);
3180 wake_count++;
3181 }
3182 }
3183 wake_up_interruptible_all(&proc->wait);
3184
3185 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3186 "binder_flush: %d woke %d threads\n", proc->pid,
3187 wake_count);
3188}
3189
3190static int binder_release(struct inode *nodp, struct file *filp)
3191{
3192 struct binder_proc *proc = filp->private_data;
10f62861 3193
16b66554 3194 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
3195 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3196
3197 return 0;
3198}
3199
008fa749
ME
3200static int binder_node_release(struct binder_node *node, int refs)
3201{
3202 struct binder_ref *ref;
3203 int death = 0;
3204
3205 list_del_init(&node->work.entry);
3206 binder_release_work(&node->async_todo);
3207
3208 if (hlist_empty(&node->refs)) {
3209 kfree(node);
3210 binder_stats_deleted(BINDER_STAT_NODE);
3211
3212 return refs;
3213 }
3214
3215 node->proc = NULL;
3216 node->local_strong_refs = 0;
3217 node->local_weak_refs = 0;
c44b1231
TK
3218
3219 spin_lock(&binder_dead_nodes_lock);
008fa749 3220 hlist_add_head(&node->dead_node, &binder_dead_nodes);
c44b1231 3221 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
3222
3223 hlist_for_each_entry(ref, &node->refs, node_entry) {
3224 refs++;
3225
3226 if (!ref->death)
e194fd8a 3227 continue;
008fa749
ME
3228
3229 death++;
3230
3231 if (list_empty(&ref->death->work.entry)) {
3232 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3233 list_add_tail(&ref->death->work.entry,
3234 &ref->proc->todo);
3235 wake_up_interruptible(&ref->proc->wait);
3236 } else
3237 BUG();
3238 }
3239
008fa749
ME
3240 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3241 "node %d now dead, refs %d, death %d\n",
3242 node->debug_id, refs, death);
3243
3244 return refs;
3245}
3246
355b0502
GKH
3247static void binder_deferred_release(struct binder_proc *proc)
3248{
342e5c90 3249 struct binder_context *context = proc->context;
355b0502 3250 struct rb_node *n;
19c98724 3251 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 3252
355b0502
GKH
3253 BUG_ON(proc->files);
3254
c44b1231 3255 mutex_lock(&binder_procs_lock);
355b0502 3256 hlist_del(&proc->proc_node);
c44b1231 3257 mutex_unlock(&binder_procs_lock);
53413e7d 3258
c44b1231 3259 mutex_lock(&context->context_mgr_node_lock);
342e5c90
MC
3260 if (context->binder_context_mgr_node &&
3261 context->binder_context_mgr_node->proc == proc) {
355b0502 3262 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
3263 "%s: %d context_mgr_node gone\n",
3264 __func__, proc->pid);
342e5c90 3265 context->binder_context_mgr_node = NULL;
355b0502 3266 }
c44b1231 3267 mutex_unlock(&context->context_mgr_node_lock);
355b0502
GKH
3268
3269 threads = 0;
3270 active_transactions = 0;
3271 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
3272 struct binder_thread *thread;
3273
3274 thread = rb_entry(n, struct binder_thread, rb_node);
355b0502
GKH
3275 threads++;
3276 active_transactions += binder_free_thread(proc, thread);
3277 }
53413e7d 3278
355b0502
GKH
3279 nodes = 0;
3280 incoming_refs = 0;
3281 while ((n = rb_first(&proc->nodes))) {
53413e7d 3282 struct binder_node *node;
355b0502 3283
53413e7d 3284 node = rb_entry(n, struct binder_node, rb_node);
355b0502
GKH
3285 nodes++;
3286 rb_erase(&node->rb_node, &proc->nodes);
008fa749 3287 incoming_refs = binder_node_release(node, incoming_refs);
355b0502 3288 }
53413e7d 3289
355b0502
GKH
3290 outgoing_refs = 0;
3291 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
3292 struct binder_ref *ref;
3293
3294 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502
GKH
3295 outgoing_refs++;
3296 binder_delete_ref(ref);
3297 }
53413e7d 3298
355b0502 3299 binder_release_work(&proc->todo);
675d66b0 3300 binder_release_work(&proc->delivered_death);
355b0502 3301
19c98724 3302 binder_alloc_deferred_release(&proc->alloc);
355b0502
GKH
3303 binder_stats_deleted(BINDER_STAT_PROC);
3304
355b0502
GKH
3305 put_task_struct(proc->tsk);
3306
3307 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
19c98724 3308 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 3309 __func__, proc->pid, threads, nodes, incoming_refs,
19c98724 3310 outgoing_refs, active_transactions);
355b0502
GKH
3311
3312 kfree(proc);
3313}
3314
3315static void binder_deferred_func(struct work_struct *work)
3316{
3317 struct binder_proc *proc;
3318 struct files_struct *files;
3319
3320 int defer;
10f62861 3321
355b0502 3322 do {
975a1ac9 3323 binder_lock(__func__);
355b0502
GKH
3324 mutex_lock(&binder_deferred_lock);
3325 if (!hlist_empty(&binder_deferred_list)) {
3326 proc = hlist_entry(binder_deferred_list.first,
3327 struct binder_proc, deferred_work_node);
3328 hlist_del_init(&proc->deferred_work_node);
3329 defer = proc->deferred_work;
3330 proc->deferred_work = 0;
3331 } else {
3332 proc = NULL;
3333 defer = 0;
3334 }
3335 mutex_unlock(&binder_deferred_lock);
3336
3337 files = NULL;
3338 if (defer & BINDER_DEFERRED_PUT_FILES) {
3339 files = proc->files;
3340 if (files)
3341 proc->files = NULL;
3342 }
3343
3344 if (defer & BINDER_DEFERRED_FLUSH)
3345 binder_deferred_flush(proc);
3346
3347 if (defer & BINDER_DEFERRED_RELEASE)
3348 binder_deferred_release(proc); /* frees proc */
3349
975a1ac9 3350 binder_unlock(__func__);
355b0502
GKH
3351 if (files)
3352 put_files_struct(files);
3353 } while (proc);
3354}
3355static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3356
3357static void
3358binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3359{
3360 mutex_lock(&binder_deferred_lock);
3361 proc->deferred_work |= defer;
3362 if (hlist_unhashed(&proc->deferred_work_node)) {
3363 hlist_add_head(&proc->deferred_work_node,
3364 &binder_deferred_list);
1beba52d 3365 schedule_work(&binder_deferred_work);
355b0502
GKH
3366 }
3367 mutex_unlock(&binder_deferred_lock);
3368}
3369
5249f488
AH
3370static void print_binder_transaction(struct seq_file *m, const char *prefix,
3371 struct binder_transaction *t)
3372{
3373 seq_printf(m,
3374 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3375 prefix, t->debug_id, t,
3376 t->from ? t->from->proc->pid : 0,
3377 t->from ? t->from->pid : 0,
3378 t->to_proc ? t->to_proc->pid : 0,
3379 t->to_thread ? t->to_thread->pid : 0,
3380 t->code, t->flags, t->priority, t->need_reply);
355b0502 3381 if (t->buffer == NULL) {
5249f488
AH
3382 seq_puts(m, " buffer free\n");
3383 return;
355b0502 3384 }
5249f488
AH
3385 if (t->buffer->target_node)
3386 seq_printf(m, " node %d",
3387 t->buffer->target_node->debug_id);
3388 seq_printf(m, " size %zd:%zd data %p\n",
3389 t->buffer->data_size, t->buffer->offsets_size,
3390 t->buffer->data);
355b0502
GKH
3391}
3392
5249f488
AH
3393static void print_binder_work(struct seq_file *m, const char *prefix,
3394 const char *transaction_prefix,
3395 struct binder_work *w)
355b0502
GKH
3396{
3397 struct binder_node *node;
3398 struct binder_transaction *t;
3399
3400 switch (w->type) {
3401 case BINDER_WORK_TRANSACTION:
3402 t = container_of(w, struct binder_transaction, work);
5249f488 3403 print_binder_transaction(m, transaction_prefix, t);
355b0502 3404 break;
26549d17
TK
3405 case BINDER_WORK_RETURN_ERROR: {
3406 struct binder_error *e = container_of(
3407 w, struct binder_error, work);
3408
3409 seq_printf(m, "%stransaction error: %u\n",
3410 prefix, e->cmd);
3411 } break;
355b0502 3412 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 3413 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
3414 break;
3415 case BINDER_WORK_NODE:
3416 node = container_of(w, struct binder_node, work);
da49889d
AH
3417 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3418 prefix, node->debug_id,
3419 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
3420 break;
3421 case BINDER_WORK_DEAD_BINDER:
5249f488 3422 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
3423 break;
3424 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 3425 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
3426 break;
3427 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 3428 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
3429 break;
3430 default:
5249f488 3431 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
3432 break;
3433 }
355b0502
GKH
3434}
3435
5249f488
AH
3436static void print_binder_thread(struct seq_file *m,
3437 struct binder_thread *thread,
3438 int print_always)
355b0502
GKH
3439{
3440 struct binder_transaction *t;
3441 struct binder_work *w;
5249f488
AH
3442 size_t start_pos = m->count;
3443 size_t header_pos;
355b0502 3444
08dabcee
TK
3445 seq_printf(m, " thread %d: l %02x need_return %d\n",
3446 thread->pid, thread->looper,
3447 thread->looper_need_return);
5249f488 3448 header_pos = m->count;
355b0502
GKH
3449 t = thread->transaction_stack;
3450 while (t) {
355b0502 3451 if (t->from == thread) {
5249f488
AH
3452 print_binder_transaction(m,
3453 " outgoing transaction", t);
355b0502
GKH
3454 t = t->from_parent;
3455 } else if (t->to_thread == thread) {
5249f488
AH
3456 print_binder_transaction(m,
3457 " incoming transaction", t);
355b0502
GKH
3458 t = t->to_parent;
3459 } else {
5249f488 3460 print_binder_transaction(m, " bad transaction", t);
355b0502
GKH
3461 t = NULL;
3462 }
3463 }
3464 list_for_each_entry(w, &thread->todo, entry) {
5249f488 3465 print_binder_work(m, " ", " pending transaction", w);
355b0502 3466 }
5249f488
AH
3467 if (!print_always && m->count == header_pos)
3468 m->count = start_pos;
355b0502
GKH
3469}
3470
5249f488 3471static void print_binder_node(struct seq_file *m, struct binder_node *node)
355b0502
GKH
3472{
3473 struct binder_ref *ref;
355b0502
GKH
3474 struct binder_work *w;
3475 int count;
3476
3477 count = 0;
b67bfe0d 3478 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
3479 count++;
3480
da49889d
AH
3481 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3482 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
3483 node->has_strong_ref, node->has_weak_ref,
3484 node->local_strong_refs, node->local_weak_refs,
3485 node->internal_strong_refs, count);
355b0502 3486 if (count) {
5249f488 3487 seq_puts(m, " proc");
b67bfe0d 3488 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 3489 seq_printf(m, " %d", ref->proc->pid);
355b0502 3490 }
5249f488
AH
3491 seq_puts(m, "\n");
3492 list_for_each_entry(w, &node->async_todo, entry)
3493 print_binder_work(m, " ",
3494 " pending async transaction", w);
355b0502
GKH
3495}
3496
5249f488 3497static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
355b0502 3498{
5249f488
AH
3499 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3500 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3501 ref->node->debug_id, ref->strong, ref->weak, ref->death);
355b0502
GKH
3502}
3503
5249f488
AH
3504static void print_binder_proc(struct seq_file *m,
3505 struct binder_proc *proc, int print_all)
355b0502
GKH
3506{
3507 struct binder_work *w;
3508 struct rb_node *n;
5249f488
AH
3509 size_t start_pos = m->count;
3510 size_t header_pos;
3511
3512 seq_printf(m, "proc %d\n", proc->pid);
14db3181 3513 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
3514 header_pos = m->count;
3515
3516 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3517 print_binder_thread(m, rb_entry(n, struct binder_thread,
3518 rb_node), print_all);
3519 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
3520 struct binder_node *node = rb_entry(n, struct binder_node,
3521 rb_node);
3522 if (print_all || node->has_async_transaction)
5249f488 3523 print_binder_node(m, node);
355b0502
GKH
3524 }
3525 if (print_all) {
3526 for (n = rb_first(&proc->refs_by_desc);
5249f488 3527 n != NULL;
355b0502 3528 n = rb_next(n))
5249f488
AH
3529 print_binder_ref(m, rb_entry(n, struct binder_ref,
3530 rb_node_desc));
355b0502 3531 }
19c98724 3532 binder_alloc_print_allocated(m, &proc->alloc);
5249f488
AH
3533 list_for_each_entry(w, &proc->todo, entry)
3534 print_binder_work(m, " ", " pending transaction", w);
355b0502 3535 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 3536 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
3537 break;
3538 }
5249f488
AH
3539 if (!print_all && m->count == header_pos)
3540 m->count = start_pos;
355b0502
GKH
3541}
3542
167bccbd 3543static const char * const binder_return_strings[] = {
355b0502
GKH
3544 "BR_ERROR",
3545 "BR_OK",
3546 "BR_TRANSACTION",
3547 "BR_REPLY",
3548 "BR_ACQUIRE_RESULT",
3549 "BR_DEAD_REPLY",
3550 "BR_TRANSACTION_COMPLETE",
3551 "BR_INCREFS",
3552 "BR_ACQUIRE",
3553 "BR_RELEASE",
3554 "BR_DECREFS",
3555 "BR_ATTEMPT_ACQUIRE",
3556 "BR_NOOP",
3557 "BR_SPAWN_LOOPER",
3558 "BR_FINISHED",
3559 "BR_DEAD_BINDER",
3560 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3561 "BR_FAILED_REPLY"
3562};
3563
167bccbd 3564static const char * const binder_command_strings[] = {
355b0502
GKH
3565 "BC_TRANSACTION",
3566 "BC_REPLY",
3567 "BC_ACQUIRE_RESULT",
3568 "BC_FREE_BUFFER",
3569 "BC_INCREFS",
3570 "BC_ACQUIRE",
3571 "BC_RELEASE",
3572 "BC_DECREFS",
3573 "BC_INCREFS_DONE",
3574 "BC_ACQUIRE_DONE",
3575 "BC_ATTEMPT_ACQUIRE",
3576 "BC_REGISTER_LOOPER",
3577 "BC_ENTER_LOOPER",
3578 "BC_EXIT_LOOPER",
3579 "BC_REQUEST_DEATH_NOTIFICATION",
3580 "BC_CLEAR_DEATH_NOTIFICATION",
7980240b
MC
3581 "BC_DEAD_BINDER_DONE",
3582 "BC_TRANSACTION_SG",
3583 "BC_REPLY_SG",
355b0502
GKH
3584};
3585
167bccbd 3586static const char * const binder_objstat_strings[] = {
355b0502
GKH
3587 "proc",
3588 "thread",
3589 "node",
3590 "ref",
3591 "death",
3592 "transaction",
3593 "transaction_complete"
3594};
3595
5249f488
AH
3596static void print_binder_stats(struct seq_file *m, const char *prefix,
3597 struct binder_stats *stats)
355b0502
GKH
3598{
3599 int i;
3600
3601 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 3602 ARRAY_SIZE(binder_command_strings));
355b0502 3603 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
0953c797
BJS
3604 int temp = atomic_read(&stats->bc[i]);
3605
3606 if (temp)
5249f488 3607 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 3608 binder_command_strings[i], temp);
355b0502
GKH
3609 }
3610
3611 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 3612 ARRAY_SIZE(binder_return_strings));
355b0502 3613 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
0953c797
BJS
3614 int temp = atomic_read(&stats->br[i]);
3615
3616 if (temp)
5249f488 3617 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 3618 binder_return_strings[i], temp);
355b0502
GKH
3619 }
3620
3621 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 3622 ARRAY_SIZE(binder_objstat_strings));
355b0502 3623 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 3624 ARRAY_SIZE(stats->obj_deleted));
355b0502 3625 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
0953c797
BJS
3626 int created = atomic_read(&stats->obj_created[i]);
3627 int deleted = atomic_read(&stats->obj_deleted[i]);
3628
3629 if (created || deleted)
3630 seq_printf(m, "%s%s: active %d total %d\n",
3631 prefix,
5249f488 3632 binder_objstat_strings[i],
0953c797
BJS
3633 created - deleted,
3634 created);
355b0502 3635 }
355b0502
GKH
3636}
3637
5249f488
AH
3638static void print_binder_proc_stats(struct seq_file *m,
3639 struct binder_proc *proc)
355b0502
GKH
3640{
3641 struct binder_work *w;
3642 struct rb_node *n;
3643 int count, strong, weak;
3644
5249f488 3645 seq_printf(m, "proc %d\n", proc->pid);
14db3181 3646 seq_printf(m, "context %s\n", proc->context->name);
355b0502
GKH
3647 count = 0;
3648 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3649 count++;
5249f488
AH
3650 seq_printf(m, " threads: %d\n", count);
3651 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
3652 " ready threads %d\n"
3653 " free async space %zd\n", proc->requested_threads,
3654 proc->requested_threads_started, proc->max_threads,
19c98724
TK
3655 proc->ready_threads,
3656 binder_alloc_get_free_async_space(&proc->alloc));
355b0502
GKH
3657 count = 0;
3658 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3659 count++;
5249f488 3660 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
3661 count = 0;
3662 strong = 0;
3663 weak = 0;
3664 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3665 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3666 rb_node_desc);
3667 count++;
3668 strong += ref->strong;
3669 weak += ref->weak;
3670 }
5249f488 3671 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 3672
19c98724 3673 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 3674 seq_printf(m, " buffers: %d\n", count);
355b0502
GKH
3675
3676 count = 0;
3677 list_for_each_entry(w, &proc->todo, entry) {
3678 switch (w->type) {
3679 case BINDER_WORK_TRANSACTION:
3680 count++;
3681 break;
3682 default:
3683 break;
3684 }
3685 }
5249f488 3686 seq_printf(m, " pending transactions: %d\n", count);
355b0502 3687
5249f488 3688 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
3689}
3690
3691
5249f488 3692static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
3693{
3694 struct binder_proc *proc;
355b0502 3695 struct binder_node *node;
355b0502 3696
1cf29cf4 3697 binder_lock(__func__);
355b0502 3698
5249f488 3699 seq_puts(m, "binder state:\n");
355b0502 3700
c44b1231 3701 spin_lock(&binder_dead_nodes_lock);
355b0502 3702 if (!hlist_empty(&binder_dead_nodes))
5249f488 3703 seq_puts(m, "dead nodes:\n");
b67bfe0d 3704 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
5249f488 3705 print_binder_node(m, node);
c44b1231 3706 spin_unlock(&binder_dead_nodes_lock);
355b0502 3707
c44b1231 3708 mutex_lock(&binder_procs_lock);
b67bfe0d 3709 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 3710 print_binder_proc(m, proc, 1);
c44b1231 3711 mutex_unlock(&binder_procs_lock);
1cf29cf4 3712 binder_unlock(__func__);
5249f488 3713 return 0;
355b0502
GKH
3714}
3715
5249f488 3716static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
3717{
3718 struct binder_proc *proc;
355b0502 3719
1cf29cf4 3720 binder_lock(__func__);
355b0502 3721
5249f488 3722 seq_puts(m, "binder stats:\n");
355b0502 3723
5249f488 3724 print_binder_stats(m, "", &binder_stats);
355b0502 3725
c44b1231 3726 mutex_lock(&binder_procs_lock);
b67bfe0d 3727 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 3728 print_binder_proc_stats(m, proc);
c44b1231 3729 mutex_unlock(&binder_procs_lock);
1cf29cf4 3730 binder_unlock(__func__);
5249f488 3731 return 0;
355b0502
GKH
3732}
3733
5249f488 3734static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
3735{
3736 struct binder_proc *proc;
355b0502 3737
1cf29cf4 3738 binder_lock(__func__);
355b0502 3739
5249f488 3740 seq_puts(m, "binder transactions:\n");
c44b1231 3741 mutex_lock(&binder_procs_lock);
b67bfe0d 3742 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 3743 print_binder_proc(m, proc, 0);
c44b1231 3744 mutex_unlock(&binder_procs_lock);
1cf29cf4 3745 binder_unlock(__func__);
5249f488 3746 return 0;
355b0502
GKH
3747}
3748
5249f488 3749static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 3750{
83050a4e 3751 struct binder_proc *itr;
14db3181 3752 int pid = (unsigned long)m->private;
355b0502 3753
1cf29cf4 3754 binder_lock(__func__);
83050a4e 3755
c44b1231 3756 mutex_lock(&binder_procs_lock);
83050a4e 3757 hlist_for_each_entry(itr, &binder_procs, proc_node) {
14db3181
MC
3758 if (itr->pid == pid) {
3759 seq_puts(m, "binder proc state:\n");
3760 print_binder_proc(m, itr, 1);
83050a4e
RA
3761 }
3762 }
c44b1231
TK
3763 mutex_unlock(&binder_procs_lock);
3764
1cf29cf4 3765 binder_unlock(__func__);
5249f488 3766 return 0;
355b0502
GKH
3767}
3768
5249f488 3769static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
3770 struct binder_transaction_log_entry *e)
3771{
d99c7333
TK
3772 int debug_id = READ_ONCE(e->debug_id_done);
3773 /*
3774 * read barrier to guarantee debug_id_done read before
3775 * we print the log values
3776 */
3777 smp_rmb();
5249f488 3778 seq_printf(m,
d99c7333 3779 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
3780 e->debug_id, (e->call_type == 2) ? "reply" :
3781 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
14db3181 3782 e->from_thread, e->to_proc, e->to_thread, e->context_name,
57ada2fb
TK
3783 e->to_node, e->target_handle, e->data_size, e->offsets_size,
3784 e->return_error, e->return_error_param,
3785 e->return_error_line);
d99c7333
TK
3786 /*
3787 * read-barrier to guarantee read of debug_id_done after
3788 * done printing the fields of the entry
3789 */
3790 smp_rmb();
3791 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
3792 "\n" : " (incomplete)\n");
355b0502
GKH
3793}
3794
5249f488 3795static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 3796{
5249f488 3797 struct binder_transaction_log *log = m->private;
d99c7333
TK
3798 unsigned int log_cur = atomic_read(&log->cur);
3799 unsigned int count;
3800 unsigned int cur;
355b0502 3801 int i;
355b0502 3802
d99c7333
TK
3803 count = log_cur + 1;
3804 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
3805 0 : count % ARRAY_SIZE(log->entry);
3806 if (count > ARRAY_SIZE(log->entry) || log->full)
3807 count = ARRAY_SIZE(log->entry);
3808 for (i = 0; i < count; i++) {
3809 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
3810
3811 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 3812 }
5249f488 3813 return 0;
355b0502
GKH
3814}
3815
3816static const struct file_operations binder_fops = {
3817 .owner = THIS_MODULE,
3818 .poll = binder_poll,
3819 .unlocked_ioctl = binder_ioctl,
da49889d 3820 .compat_ioctl = binder_ioctl,
355b0502
GKH
3821 .mmap = binder_mmap,
3822 .open = binder_open,
3823 .flush = binder_flush,
3824 .release = binder_release,
3825};
3826
5249f488
AH
3827BINDER_DEBUG_ENTRY(state);
3828BINDER_DEBUG_ENTRY(stats);
3829BINDER_DEBUG_ENTRY(transactions);
3830BINDER_DEBUG_ENTRY(transaction_log);
3831
ac4812c5
MC
3832static int __init init_binder_device(const char *name)
3833{
3834 int ret;
3835 struct binder_device *binder_device;
3836
3837 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
3838 if (!binder_device)
3839 return -ENOMEM;
3840
3841 binder_device->miscdev.fops = &binder_fops;
3842 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
3843 binder_device->miscdev.name = name;
3844
3845 binder_device->context.binder_context_mgr_uid = INVALID_UID;
3846 binder_device->context.name = name;
c44b1231 3847 mutex_init(&binder_device->context.context_mgr_node_lock);
ac4812c5
MC
3848
3849 ret = misc_register(&binder_device->miscdev);
3850 if (ret < 0) {
3851 kfree(binder_device);
3852 return ret;
3853 }
3854
3855 hlist_add_head(&binder_device->hlist, &binder_devices);
3856
3857 return ret;
3858}
3859
355b0502
GKH
3860static int __init binder_init(void)
3861{
3862 int ret;
ac4812c5
MC
3863 char *device_name, *device_names;
3864 struct binder_device *device;
3865 struct hlist_node *tmp;
355b0502 3866
d99c7333
TK
3867 atomic_set(&binder_transaction_log.cur, ~0U);
3868 atomic_set(&binder_transaction_log_failed.cur, ~0U);
3869
16b66554
AH
3870 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3871 if (binder_debugfs_dir_entry_root)
3872 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3873 binder_debugfs_dir_entry_root);
ac4812c5 3874
16b66554
AH
3875 if (binder_debugfs_dir_entry_root) {
3876 debugfs_create_file("state",
3877 S_IRUGO,
3878 binder_debugfs_dir_entry_root,
3879 NULL,
3880 &binder_state_fops);
3881 debugfs_create_file("stats",
3882 S_IRUGO,
3883 binder_debugfs_dir_entry_root,
3884 NULL,
3885 &binder_stats_fops);
3886 debugfs_create_file("transactions",
3887 S_IRUGO,
3888 binder_debugfs_dir_entry_root,
3889 NULL,
3890 &binder_transactions_fops);
3891 debugfs_create_file("transaction_log",
3892 S_IRUGO,
3893 binder_debugfs_dir_entry_root,
3894 &binder_transaction_log,
3895 &binder_transaction_log_fops);
3896 debugfs_create_file("failed_transaction_log",
3897 S_IRUGO,
3898 binder_debugfs_dir_entry_root,
3899 &binder_transaction_log_failed,
3900 &binder_transaction_log_fops);
355b0502 3901 }
ac4812c5
MC
3902
3903 /*
3904 * Copy the module_parameter string, because we don't want to
3905 * tokenize it in-place.
3906 */
3907 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
3908 if (!device_names) {
3909 ret = -ENOMEM;
3910 goto err_alloc_device_names_failed;
3911 }
3912 strcpy(device_names, binder_devices_param);
3913
3914 while ((device_name = strsep(&device_names, ","))) {
3915 ret = init_binder_device(device_name);
3916 if (ret)
3917 goto err_init_binder_device_failed;
3918 }
3919
3920 return ret;
3921
3922err_init_binder_device_failed:
3923 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
3924 misc_deregister(&device->miscdev);
3925 hlist_del(&device->hlist);
3926 kfree(device);
3927 }
3928err_alloc_device_names_failed:
3929 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
3930
355b0502
GKH
3931 return ret;
3932}
3933
3934device_initcall(binder_init);
3935
975a1ac9
AH
3936#define CREATE_TRACE_POINTS
3937#include "binder_trace.h"
3938
355b0502 3939MODULE_LICENSE("GPL v2");