3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
49 static DEFINE_MUTEX(binder_main_lock
);
50 static DEFINE_MUTEX(binder_deferred_lock
);
51 static DEFINE_MUTEX(binder_mmap_lock
);
53 static HLIST_HEAD(binder_devices
);
54 static HLIST_HEAD(binder_procs
);
55 static HLIST_HEAD(binder_deferred_list
);
56 static HLIST_HEAD(binder_dead_nodes
);
58 static struct dentry
*binder_debugfs_dir_entry_root
;
59 static struct dentry
*binder_debugfs_dir_entry_proc
;
60 static int binder_last_id
;
62 #define BINDER_DEBUG_ENTRY(name) \
63 static int binder_##name##_open(struct inode *inode, struct file *file) \
65 return single_open(file, binder_##name##_show, inode->i_private); \
68 static const struct file_operations binder_##name##_fops = { \
69 .owner = THIS_MODULE, \
70 .open = binder_##name##_open, \
72 .llseek = seq_lseek, \
73 .release = single_release, \
76 static int binder_proc_show(struct seq_file
*m
, void *unused
);
77 BINDER_DEBUG_ENTRY(proc
);
79 /* This is only defined in include/asm-arm/sizes.h */
85 #define SZ_4M 0x400000
88 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
90 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
93 BINDER_DEBUG_USER_ERROR
= 1U << 0,
94 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
95 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
96 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
97 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
98 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
99 BINDER_DEBUG_READ_WRITE
= 1U << 6,
100 BINDER_DEBUG_USER_REFS
= 1U << 7,
101 BINDER_DEBUG_THREADS
= 1U << 8,
102 BINDER_DEBUG_TRANSACTION
= 1U << 9,
103 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
104 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
105 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
106 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 13,
107 BINDER_DEBUG_PRIORITY_CAP
= 1U << 14,
108 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 15,
110 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
111 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
112 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
114 static bool binder_debug_no_lock
;
115 module_param_named(proc_no_lock
, binder_debug_no_lock
, bool, S_IWUSR
| S_IRUGO
);
117 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
118 module_param_named(devices
, binder_devices_param
, charp
, 0444);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
121 static int binder_stop_on_user_error
;
123 static int binder_set_stop_on_user_error(const char *val
,
124 struct kernel_param
*kp
)
128 ret
= param_set_int(val
, kp
);
129 if (binder_stop_on_user_error
< 2)
130 wake_up(&binder_user_error_wait
);
133 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
134 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
136 #define binder_debug(mask, x...) \
138 if (binder_debug_mask & mask) \
142 #define binder_user_error(x...) \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
150 #define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
155 #define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
158 #define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
161 enum binder_stat_types
{
167 BINDER_STAT_TRANSACTION
,
168 BINDER_STAT_TRANSACTION_COMPLETE
,
172 struct binder_stats
{
173 int br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
174 int bc
[_IOC_NR(BC_REPLY_SG
) + 1];
175 int obj_created
[BINDER_STAT_COUNT
];
176 int obj_deleted
[BINDER_STAT_COUNT
];
179 static struct binder_stats binder_stats
;
181 static inline void binder_stats_deleted(enum binder_stat_types type
)
183 binder_stats
.obj_deleted
[type
]++;
186 static inline void binder_stats_created(enum binder_stat_types type
)
188 binder_stats
.obj_created
[type
]++;
191 struct binder_transaction_log_entry
{
202 const char *context_name
;
204 struct binder_transaction_log
{
207 struct binder_transaction_log_entry entry
[32];
209 static struct binder_transaction_log binder_transaction_log
;
210 static struct binder_transaction_log binder_transaction_log_failed
;
212 static struct binder_transaction_log_entry
*binder_transaction_log_add(
213 struct binder_transaction_log
*log
)
215 struct binder_transaction_log_entry
*e
;
217 e
= &log
->entry
[log
->next
];
218 memset(e
, 0, sizeof(*e
));
220 if (log
->next
== ARRAY_SIZE(log
->entry
)) {
227 struct binder_context
{
228 struct binder_node
*binder_context_mgr_node
;
229 kuid_t binder_context_mgr_uid
;
233 struct binder_device
{
234 struct hlist_node hlist
;
235 struct miscdevice miscdev
;
236 struct binder_context context
;
240 struct list_head entry
;
242 BINDER_WORK_TRANSACTION
= 1,
243 BINDER_WORK_TRANSACTION_COMPLETE
,
245 BINDER_WORK_DEAD_BINDER
,
246 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
247 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
253 struct binder_work work
;
255 struct rb_node rb_node
;
256 struct hlist_node dead_node
;
258 struct binder_proc
*proc
;
259 struct hlist_head refs
;
260 int internal_strong_refs
;
262 int local_strong_refs
;
263 binder_uintptr_t ptr
;
264 binder_uintptr_t cookie
;
265 unsigned has_strong_ref
:1;
266 unsigned pending_strong_ref
:1;
267 unsigned has_weak_ref
:1;
268 unsigned pending_weak_ref
:1;
269 unsigned has_async_transaction
:1;
270 unsigned accept_fds
:1;
271 unsigned min_priority
:8;
272 struct list_head async_todo
;
275 struct binder_ref_death
{
276 struct binder_work work
;
277 binder_uintptr_t cookie
;
281 /* Lookups needed: */
282 /* node + proc => ref (transaction) */
283 /* desc + proc => ref (transaction, inc/dec ref) */
284 /* node => refs + procs (proc exit) */
286 struct rb_node rb_node_desc
;
287 struct rb_node rb_node_node
;
288 struct hlist_node node_entry
;
289 struct binder_proc
*proc
;
290 struct binder_node
*node
;
294 struct binder_ref_death
*death
;
297 struct binder_buffer
{
298 struct list_head entry
; /* free and allocated entries by address */
299 struct rb_node rb_node
; /* free entry by size or allocated entry */
302 unsigned allow_user_free
:1;
303 unsigned async_transaction
:1;
304 unsigned debug_id
:29;
306 struct binder_transaction
*transaction
;
308 struct binder_node
*target_node
;
311 size_t extra_buffers_size
;
315 enum binder_deferred_state
{
316 BINDER_DEFERRED_PUT_FILES
= 0x01,
317 BINDER_DEFERRED_FLUSH
= 0x02,
318 BINDER_DEFERRED_RELEASE
= 0x04,
322 struct hlist_node proc_node
;
323 struct rb_root threads
;
324 struct rb_root nodes
;
325 struct rb_root refs_by_desc
;
326 struct rb_root refs_by_node
;
328 struct vm_area_struct
*vma
;
329 struct mm_struct
*vma_vm_mm
;
330 struct task_struct
*tsk
;
331 struct files_struct
*files
;
332 struct hlist_node deferred_work_node
;
335 ptrdiff_t user_buffer_offset
;
337 struct list_head buffers
;
338 struct rb_root free_buffers
;
339 struct rb_root allocated_buffers
;
340 size_t free_async_space
;
344 uint32_t buffer_free
;
345 struct list_head todo
;
346 wait_queue_head_t wait
;
347 struct binder_stats stats
;
348 struct list_head delivered_death
;
350 int requested_threads
;
351 int requested_threads_started
;
353 long default_priority
;
354 struct dentry
*debugfs_entry
;
355 struct binder_context
*context
;
359 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
360 BINDER_LOOPER_STATE_ENTERED
= 0x02,
361 BINDER_LOOPER_STATE_EXITED
= 0x04,
362 BINDER_LOOPER_STATE_INVALID
= 0x08,
363 BINDER_LOOPER_STATE_WAITING
= 0x10,
364 BINDER_LOOPER_STATE_NEED_RETURN
= 0x20
367 struct binder_thread
{
368 struct binder_proc
*proc
;
369 struct rb_node rb_node
;
372 struct binder_transaction
*transaction_stack
;
373 struct list_head todo
;
374 uint32_t return_error
; /* Write failed, return error code in read buf */
375 uint32_t return_error2
; /* Write failed, return error code in read */
376 /* buffer. Used when sending a reply to a dead process that */
377 /* we are also waiting on */
378 wait_queue_head_t wait
;
379 struct binder_stats stats
;
382 struct binder_transaction
{
384 struct binder_work work
;
385 struct binder_thread
*from
;
386 struct binder_transaction
*from_parent
;
387 struct binder_proc
*to_proc
;
388 struct binder_thread
*to_thread
;
389 struct binder_transaction
*to_parent
;
390 unsigned need_reply
:1;
391 /* unsigned is_dead:1; */ /* not used at the moment */
393 struct binder_buffer
*buffer
;
402 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
404 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
406 struct files_struct
*files
= proc
->files
;
407 unsigned long rlim_cur
;
413 if (!lock_task_sighand(proc
->tsk
, &irqs
))
416 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
417 unlock_task_sighand(proc
->tsk
, &irqs
);
419 return __alloc_fd(files
, 0, rlim_cur
, flags
);
423 * copied from fd_install
425 static void task_fd_install(
426 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
429 __fd_install(proc
->files
, fd
, file
);
433 * copied from sys_close
435 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
439 if (proc
->files
== NULL
)
442 retval
= __close_fd(proc
->files
, fd
);
443 /* can't restart close syscall because file table entry was cleared */
444 if (unlikely(retval
== -ERESTARTSYS
||
445 retval
== -ERESTARTNOINTR
||
446 retval
== -ERESTARTNOHAND
||
447 retval
== -ERESTART_RESTARTBLOCK
))
453 static inline void binder_lock(const char *tag
)
455 trace_binder_lock(tag
);
456 mutex_lock(&binder_main_lock
);
457 trace_binder_locked(tag
);
460 static inline void binder_unlock(const char *tag
)
462 trace_binder_unlock(tag
);
463 mutex_unlock(&binder_main_lock
);
466 static void binder_set_nice(long nice
)
470 if (can_nice(current
, nice
)) {
471 set_user_nice(current
, nice
);
474 min_nice
= rlimit_to_nice(current
->signal
->rlim
[RLIMIT_NICE
].rlim_cur
);
475 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
476 "%d: nice value %ld not allowed use %ld instead\n",
477 current
->pid
, nice
, min_nice
);
478 set_user_nice(current
, min_nice
);
479 if (min_nice
<= MAX_NICE
)
481 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
484 static size_t binder_buffer_size(struct binder_proc
*proc
,
485 struct binder_buffer
*buffer
)
487 if (list_is_last(&buffer
->entry
, &proc
->buffers
))
488 return proc
->buffer
+ proc
->buffer_size
- (void *)buffer
->data
;
489 return (size_t)list_entry(buffer
->entry
.next
,
490 struct binder_buffer
, entry
) - (size_t)buffer
->data
;
493 static void binder_insert_free_buffer(struct binder_proc
*proc
,
494 struct binder_buffer
*new_buffer
)
496 struct rb_node
**p
= &proc
->free_buffers
.rb_node
;
497 struct rb_node
*parent
= NULL
;
498 struct binder_buffer
*buffer
;
500 size_t new_buffer_size
;
502 BUG_ON(!new_buffer
->free
);
504 new_buffer_size
= binder_buffer_size(proc
, new_buffer
);
506 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
507 "%d: add free buffer, size %zd, at %p\n",
508 proc
->pid
, new_buffer_size
, new_buffer
);
512 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
513 BUG_ON(!buffer
->free
);
515 buffer_size
= binder_buffer_size(proc
, buffer
);
517 if (new_buffer_size
< buffer_size
)
518 p
= &parent
->rb_left
;
520 p
= &parent
->rb_right
;
522 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
523 rb_insert_color(&new_buffer
->rb_node
, &proc
->free_buffers
);
526 static void binder_insert_allocated_buffer(struct binder_proc
*proc
,
527 struct binder_buffer
*new_buffer
)
529 struct rb_node
**p
= &proc
->allocated_buffers
.rb_node
;
530 struct rb_node
*parent
= NULL
;
531 struct binder_buffer
*buffer
;
533 BUG_ON(new_buffer
->free
);
537 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
538 BUG_ON(buffer
->free
);
540 if (new_buffer
< buffer
)
541 p
= &parent
->rb_left
;
542 else if (new_buffer
> buffer
)
543 p
= &parent
->rb_right
;
547 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
548 rb_insert_color(&new_buffer
->rb_node
, &proc
->allocated_buffers
);
551 static struct binder_buffer
*binder_buffer_lookup(struct binder_proc
*proc
,
554 struct rb_node
*n
= proc
->allocated_buffers
.rb_node
;
555 struct binder_buffer
*buffer
;
556 struct binder_buffer
*kern_ptr
;
558 kern_ptr
= (struct binder_buffer
*)(user_ptr
- proc
->user_buffer_offset
559 - offsetof(struct binder_buffer
, data
));
562 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
563 BUG_ON(buffer
->free
);
565 if (kern_ptr
< buffer
)
567 else if (kern_ptr
> buffer
)
575 static int binder_update_page_range(struct binder_proc
*proc
, int allocate
,
576 void *start
, void *end
,
577 struct vm_area_struct
*vma
)
580 unsigned long user_page_addr
;
582 struct mm_struct
*mm
;
584 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
585 "%d: %s pages %p-%p\n", proc
->pid
,
586 allocate
? "allocate" : "free", start
, end
);
591 trace_binder_update_page_range(proc
, allocate
, start
, end
);
596 mm
= get_task_mm(proc
->tsk
);
599 down_write(&mm
->mmap_sem
);
601 if (vma
&& mm
!= proc
->vma_vm_mm
) {
602 pr_err("%d: vma mm and task mm mismatch\n",
612 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
617 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
620 page
= &proc
->pages
[(page_addr
- proc
->buffer
) / PAGE_SIZE
];
623 *page
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
);
625 pr_err("%d: binder_alloc_buf failed for page at %p\n",
626 proc
->pid
, page_addr
);
627 goto err_alloc_page_failed
;
629 ret
= map_kernel_range_noflush((unsigned long)page_addr
,
630 PAGE_SIZE
, PAGE_KERNEL
, page
);
631 flush_cache_vmap((unsigned long)page_addr
,
632 (unsigned long)page_addr
+ PAGE_SIZE
);
634 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
635 proc
->pid
, page_addr
);
636 goto err_map_kernel_failed
;
639 (uintptr_t)page_addr
+ proc
->user_buffer_offset
;
640 ret
= vm_insert_page(vma
, user_page_addr
, page
[0]);
642 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
643 proc
->pid
, user_page_addr
);
644 goto err_vm_insert_page_failed
;
646 /* vm_insert_page does not seem to increment the refcount */
649 up_write(&mm
->mmap_sem
);
655 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
656 page_addr
-= PAGE_SIZE
) {
657 page
= &proc
->pages
[(page_addr
- proc
->buffer
) / PAGE_SIZE
];
659 zap_page_range(vma
, (uintptr_t)page_addr
+
660 proc
->user_buffer_offset
, PAGE_SIZE
);
661 err_vm_insert_page_failed
:
662 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
663 err_map_kernel_failed
:
666 err_alloc_page_failed
:
671 up_write(&mm
->mmap_sem
);
677 static struct binder_buffer
*binder_alloc_buf(struct binder_proc
*proc
,
680 size_t extra_buffers_size
,
683 struct rb_node
*n
= proc
->free_buffers
.rb_node
;
684 struct binder_buffer
*buffer
;
686 struct rb_node
*best_fit
= NULL
;
689 size_t size
, data_offsets_size
;
691 if (proc
->vma
== NULL
) {
692 pr_err("%d: binder_alloc_buf, no vma\n",
697 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
698 ALIGN(offsets_size
, sizeof(void *));
700 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
701 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
702 proc
->pid
, data_size
, offsets_size
);
705 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
706 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
707 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
708 proc
->pid
, extra_buffers_size
);
712 proc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
713 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
714 "%d: binder_alloc_buf size %zd failed, no async space left\n",
720 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
721 BUG_ON(!buffer
->free
);
722 buffer_size
= binder_buffer_size(proc
, buffer
);
724 if (size
< buffer_size
) {
727 } else if (size
> buffer_size
)
734 if (best_fit
== NULL
) {
735 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
740 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
741 buffer_size
= binder_buffer_size(proc
, buffer
);
744 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
745 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
746 proc
->pid
, size
, buffer
, buffer_size
);
749 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
751 if (size
+ sizeof(struct binder_buffer
) + 4 >= buffer_size
)
752 buffer_size
= size
; /* no room for other buffers */
754 buffer_size
= size
+ sizeof(struct binder_buffer
);
757 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ buffer_size
);
758 if (end_page_addr
> has_page_addr
)
759 end_page_addr
= has_page_addr
;
760 if (binder_update_page_range(proc
, 1,
761 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
, NULL
))
764 rb_erase(best_fit
, &proc
->free_buffers
);
766 binder_insert_allocated_buffer(proc
, buffer
);
767 if (buffer_size
!= size
) {
768 struct binder_buffer
*new_buffer
= (void *)buffer
->data
+ size
;
770 list_add(&new_buffer
->entry
, &buffer
->entry
);
771 new_buffer
->free
= 1;
772 binder_insert_free_buffer(proc
, new_buffer
);
774 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
775 "%d: binder_alloc_buf size %zd got %p\n",
776 proc
->pid
, size
, buffer
);
777 buffer
->data_size
= data_size
;
778 buffer
->offsets_size
= offsets_size
;
779 buffer
->extra_buffers_size
= extra_buffers_size
;
780 buffer
->async_transaction
= is_async
;
782 proc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
783 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
784 "%d: binder_alloc_buf size %zd async free %zd\n",
785 proc
->pid
, size
, proc
->free_async_space
);
791 static void *buffer_start_page(struct binder_buffer
*buffer
)
793 return (void *)((uintptr_t)buffer
& PAGE_MASK
);
796 static void *buffer_end_page(struct binder_buffer
*buffer
)
798 return (void *)(((uintptr_t)(buffer
+ 1) - 1) & PAGE_MASK
);
801 static void binder_delete_free_buffer(struct binder_proc
*proc
,
802 struct binder_buffer
*buffer
)
804 struct binder_buffer
*prev
, *next
= NULL
;
805 int free_page_end
= 1;
806 int free_page_start
= 1;
808 BUG_ON(proc
->buffers
.next
== &buffer
->entry
);
809 prev
= list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
811 if (buffer_end_page(prev
) == buffer_start_page(buffer
)) {
813 if (buffer_end_page(prev
) == buffer_end_page(buffer
))
815 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
816 "%d: merge free, buffer %p share page with %p\n",
817 proc
->pid
, buffer
, prev
);
820 if (!list_is_last(&buffer
->entry
, &proc
->buffers
)) {
821 next
= list_entry(buffer
->entry
.next
,
822 struct binder_buffer
, entry
);
823 if (buffer_start_page(next
) == buffer_end_page(buffer
)) {
825 if (buffer_start_page(next
) ==
826 buffer_start_page(buffer
))
828 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
829 "%d: merge free, buffer %p share page with %p\n",
830 proc
->pid
, buffer
, prev
);
833 list_del(&buffer
->entry
);
834 if (free_page_start
|| free_page_end
) {
835 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
836 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
837 proc
->pid
, buffer
, free_page_start
? "" : " end",
838 free_page_end
? "" : " start", prev
, next
);
839 binder_update_page_range(proc
, 0, free_page_start
?
840 buffer_start_page(buffer
) : buffer_end_page(buffer
),
841 (free_page_end
? buffer_end_page(buffer
) :
842 buffer_start_page(buffer
)) + PAGE_SIZE
, NULL
);
846 static void binder_free_buf(struct binder_proc
*proc
,
847 struct binder_buffer
*buffer
)
849 size_t size
, buffer_size
;
851 buffer_size
= binder_buffer_size(proc
, buffer
);
853 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
854 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
855 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
857 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
858 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
859 proc
->pid
, buffer
, size
, buffer_size
);
861 BUG_ON(buffer
->free
);
862 BUG_ON(size
> buffer_size
);
863 BUG_ON(buffer
->transaction
!= NULL
);
864 BUG_ON((void *)buffer
< proc
->buffer
);
865 BUG_ON((void *)buffer
> proc
->buffer
+ proc
->buffer_size
);
867 if (buffer
->async_transaction
) {
868 proc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
870 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
871 "%d: binder_free_buf size %zd async free %zd\n",
872 proc
->pid
, size
, proc
->free_async_space
);
875 binder_update_page_range(proc
, 0,
876 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
877 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
),
879 rb_erase(&buffer
->rb_node
, &proc
->allocated_buffers
);
881 if (!list_is_last(&buffer
->entry
, &proc
->buffers
)) {
882 struct binder_buffer
*next
= list_entry(buffer
->entry
.next
,
883 struct binder_buffer
, entry
);
886 rb_erase(&next
->rb_node
, &proc
->free_buffers
);
887 binder_delete_free_buffer(proc
, next
);
890 if (proc
->buffers
.next
!= &buffer
->entry
) {
891 struct binder_buffer
*prev
= list_entry(buffer
->entry
.prev
,
892 struct binder_buffer
, entry
);
895 binder_delete_free_buffer(proc
, buffer
);
896 rb_erase(&prev
->rb_node
, &proc
->free_buffers
);
900 binder_insert_free_buffer(proc
, buffer
);
903 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
904 binder_uintptr_t ptr
)
906 struct rb_node
*n
= proc
->nodes
.rb_node
;
907 struct binder_node
*node
;
910 node
= rb_entry(n
, struct binder_node
, rb_node
);
914 else if (ptr
> node
->ptr
)
922 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
923 binder_uintptr_t ptr
,
924 binder_uintptr_t cookie
)
926 struct rb_node
**p
= &proc
->nodes
.rb_node
;
927 struct rb_node
*parent
= NULL
;
928 struct binder_node
*node
;
932 node
= rb_entry(parent
, struct binder_node
, rb_node
);
936 else if (ptr
> node
->ptr
)
942 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
945 binder_stats_created(BINDER_STAT_NODE
);
946 rb_link_node(&node
->rb_node
, parent
, p
);
947 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
948 node
->debug_id
= ++binder_last_id
;
951 node
->cookie
= cookie
;
952 node
->work
.type
= BINDER_WORK_NODE
;
953 INIT_LIST_HEAD(&node
->work
.entry
);
954 INIT_LIST_HEAD(&node
->async_todo
);
955 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
956 "%d:%d node %d u%016llx c%016llx created\n",
957 proc
->pid
, current
->pid
, node
->debug_id
,
958 (u64
)node
->ptr
, (u64
)node
->cookie
);
962 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
963 struct list_head
*target_list
)
967 if (target_list
== NULL
&&
968 node
->internal_strong_refs
== 0 &&
970 node
== node
->proc
->context
->binder_context_mgr_node
&&
971 node
->has_strong_ref
)) {
972 pr_err("invalid inc strong node for %d\n",
976 node
->internal_strong_refs
++;
978 node
->local_strong_refs
++;
979 if (!node
->has_strong_ref
&& target_list
) {
980 list_del_init(&node
->work
.entry
);
981 list_add_tail(&node
->work
.entry
, target_list
);
985 node
->local_weak_refs
++;
986 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
987 if (target_list
== NULL
) {
988 pr_err("invalid inc weak node for %d\n",
992 list_add_tail(&node
->work
.entry
, target_list
);
998 static int binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1002 node
->internal_strong_refs
--;
1004 node
->local_strong_refs
--;
1005 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1009 node
->local_weak_refs
--;
1010 if (node
->local_weak_refs
|| !hlist_empty(&node
->refs
))
1013 if (node
->proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1014 if (list_empty(&node
->work
.entry
)) {
1015 list_add_tail(&node
->work
.entry
, &node
->proc
->todo
);
1016 wake_up_interruptible(&node
->proc
->wait
);
1019 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1020 !node
->local_weak_refs
) {
1021 list_del_init(&node
->work
.entry
);
1023 rb_erase(&node
->rb_node
, &node
->proc
->nodes
);
1024 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1025 "refless node %d deleted\n",
1028 hlist_del(&node
->dead_node
);
1029 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1030 "dead node %d deleted\n",
1034 binder_stats_deleted(BINDER_STAT_NODE
);
1042 static struct binder_ref
*binder_get_ref(struct binder_proc
*proc
,
1043 u32 desc
, bool need_strong_ref
)
1045 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1046 struct binder_ref
*ref
;
1049 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1051 if (desc
< ref
->desc
) {
1053 } else if (desc
> ref
->desc
) {
1055 } else if (need_strong_ref
&& !ref
->strong
) {
1056 binder_user_error("tried to use weak ref as strong ref\n");
1065 static struct binder_ref
*binder_get_ref_for_node(struct binder_proc
*proc
,
1066 struct binder_node
*node
)
1069 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1070 struct rb_node
*parent
= NULL
;
1071 struct binder_ref
*ref
, *new_ref
;
1072 struct binder_context
*context
= proc
->context
;
1076 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1078 if (node
< ref
->node
)
1080 else if (node
> ref
->node
)
1081 p
= &(*p
)->rb_right
;
1085 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1086 if (new_ref
== NULL
)
1088 binder_stats_created(BINDER_STAT_REF
);
1089 new_ref
->debug_id
= ++binder_last_id
;
1090 new_ref
->proc
= proc
;
1091 new_ref
->node
= node
;
1092 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1093 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1095 new_ref
->desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1096 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1097 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1098 if (ref
->desc
> new_ref
->desc
)
1100 new_ref
->desc
= ref
->desc
+ 1;
1103 p
= &proc
->refs_by_desc
.rb_node
;
1106 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1108 if (new_ref
->desc
< ref
->desc
)
1110 else if (new_ref
->desc
> ref
->desc
)
1111 p
= &(*p
)->rb_right
;
1115 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1116 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1118 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1120 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1121 "%d new ref %d desc %d for node %d\n",
1122 proc
->pid
, new_ref
->debug_id
, new_ref
->desc
,
1125 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1126 "%d new ref %d desc %d for dead node\n",
1127 proc
->pid
, new_ref
->debug_id
, new_ref
->desc
);
1132 static void binder_delete_ref(struct binder_ref
*ref
)
1134 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1135 "%d delete ref %d desc %d for node %d\n",
1136 ref
->proc
->pid
, ref
->debug_id
, ref
->desc
,
1137 ref
->node
->debug_id
);
1139 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1140 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1142 binder_dec_node(ref
->node
, 1, 1);
1143 hlist_del(&ref
->node_entry
);
1144 binder_dec_node(ref
->node
, 0, 1);
1146 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1147 "%d delete ref %d desc %d has death notification\n",
1148 ref
->proc
->pid
, ref
->debug_id
, ref
->desc
);
1149 list_del(&ref
->death
->work
.entry
);
1151 binder_stats_deleted(BINDER_STAT_DEATH
);
1154 binder_stats_deleted(BINDER_STAT_REF
);
1157 static int binder_inc_ref(struct binder_ref
*ref
, int strong
,
1158 struct list_head
*target_list
)
1163 if (ref
->strong
== 0) {
1164 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1170 if (ref
->weak
== 0) {
1171 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1181 static int binder_dec_ref(struct binder_ref
*ref
, int strong
)
1184 if (ref
->strong
== 0) {
1185 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1186 ref
->proc
->pid
, ref
->debug_id
,
1187 ref
->desc
, ref
->strong
, ref
->weak
);
1191 if (ref
->strong
== 0) {
1194 ret
= binder_dec_node(ref
->node
, strong
, 1);
1199 if (ref
->weak
== 0) {
1200 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1201 ref
->proc
->pid
, ref
->debug_id
,
1202 ref
->desc
, ref
->strong
, ref
->weak
);
1207 if (ref
->strong
== 0 && ref
->weak
== 0)
1208 binder_delete_ref(ref
);
1212 static void binder_pop_transaction(struct binder_thread
*target_thread
,
1213 struct binder_transaction
*t
)
1215 if (target_thread
) {
1216 BUG_ON(target_thread
->transaction_stack
!= t
);
1217 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1218 target_thread
->transaction_stack
=
1219 target_thread
->transaction_stack
->from_parent
;
1224 t
->buffer
->transaction
= NULL
;
1226 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1229 static void binder_send_failed_reply(struct binder_transaction
*t
,
1230 uint32_t error_code
)
1232 struct binder_thread
*target_thread
;
1233 struct binder_transaction
*next
;
1235 BUG_ON(t
->flags
& TF_ONE_WAY
);
1237 target_thread
= t
->from
;
1238 if (target_thread
) {
1239 if (target_thread
->return_error
!= BR_OK
&&
1240 target_thread
->return_error2
== BR_OK
) {
1241 target_thread
->return_error2
=
1242 target_thread
->return_error
;
1243 target_thread
->return_error
= BR_OK
;
1245 if (target_thread
->return_error
== BR_OK
) {
1246 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1247 "send failed reply for transaction %d to %d:%d\n",
1249 target_thread
->proc
->pid
,
1250 target_thread
->pid
);
1252 binder_pop_transaction(target_thread
, t
);
1253 target_thread
->return_error
= error_code
;
1254 wake_up_interruptible(&target_thread
->wait
);
1256 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1257 target_thread
->proc
->pid
,
1259 target_thread
->return_error
);
1263 next
= t
->from_parent
;
1265 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1266 "send failed reply for transaction %d, target dead\n",
1269 binder_pop_transaction(target_thread
, t
);
1271 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1272 "reply failed, no target thread at root\n");
1276 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1277 "reply failed, no target thread -- retry %d\n",
1283 * binder_validate_object() - checks for a valid metadata object in a buffer.
1284 * @buffer: binder_buffer that we're parsing.
1285 * @offset: offset in the buffer at which to validate an object.
1287 * Return: If there's a valid metadata object at @offset in @buffer, the
1288 * size of that object. Otherwise, it returns zero.
1290 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
1292 /* Check if we can read a header first */
1293 struct binder_object_header
*hdr
;
1294 size_t object_size
= 0;
1296 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
1297 buffer
->data_size
< sizeof(*hdr
) ||
1298 !IS_ALIGNED(offset
, sizeof(u32
)))
1301 /* Ok, now see if we can read a complete object. */
1302 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
1303 switch (hdr
->type
) {
1304 case BINDER_TYPE_BINDER
:
1305 case BINDER_TYPE_WEAK_BINDER
:
1306 case BINDER_TYPE_HANDLE
:
1307 case BINDER_TYPE_WEAK_HANDLE
:
1308 object_size
= sizeof(struct flat_binder_object
);
1310 case BINDER_TYPE_FD
:
1311 object_size
= sizeof(struct binder_fd_object
);
1313 case BINDER_TYPE_PTR
:
1314 object_size
= sizeof(struct binder_buffer_object
);
1316 case BINDER_TYPE_FDA
:
1317 object_size
= sizeof(struct binder_fd_array_object
);
1322 if (offset
<= buffer
->data_size
- object_size
&&
1323 buffer
->data_size
>= object_size
)
1330 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1331 * @b: binder_buffer containing the object
1332 * @index: index in offset array at which the binder_buffer_object is
1334 * @start: points to the start of the offset array
1335 * @num_valid: the number of valid offsets in the offset array
1337 * Return: If @index is within the valid range of the offset array
1338 * described by @start and @num_valid, and if there's a valid
1339 * binder_buffer_object at the offset found in index @index
1340 * of the offset array, that object is returned. Otherwise,
1341 * %NULL is returned.
1342 * Note that the offset found in index @index itself is not
1343 * verified; this function assumes that @num_valid elements
1344 * from @start were previously verified to have valid offsets.
1346 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
1347 binder_size_t index
,
1348 binder_size_t
*start
,
1349 binder_size_t num_valid
)
1351 struct binder_buffer_object
*buffer_obj
;
1352 binder_size_t
*offp
;
1354 if (index
>= num_valid
)
1357 offp
= start
+ index
;
1358 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
1359 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
1366 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1367 * @b: transaction buffer
1368 * @objects_start start of objects buffer
1369 * @buffer: binder_buffer_object in which to fix up
1370 * @offset: start offset in @buffer to fix up
1371 * @last_obj: last binder_buffer_object that we fixed up in
1372 * @last_min_offset: minimum fixup offset in @last_obj
1374 * Return: %true if a fixup in buffer @buffer at offset @offset is
1377 * For safety reasons, we only allow fixups inside a buffer to happen
1378 * at increasing offsets; additionally, we only allow fixup on the last
1379 * buffer object that was verified, or one of its parents.
1381 * Example of what is allowed:
1384 * B (parent = A, offset = 0)
1385 * C (parent = A, offset = 16)
1386 * D (parent = C, offset = 0)
1387 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1389 * Examples of what is not allowed:
1391 * Decreasing offsets within the same parent:
1393 * C (parent = A, offset = 16)
1394 * B (parent = A, offset = 0) // decreasing offset within A
1396 * Referring to a parent that wasn't the last object or any of its parents:
1398 * B (parent = A, offset = 0)
1399 * C (parent = A, offset = 0)
1400 * C (parent = A, offset = 16)
1401 * D (parent = B, offset = 0) // B is not A or any of A's parents
1403 static bool binder_validate_fixup(struct binder_buffer
*b
,
1404 binder_size_t
*objects_start
,
1405 struct binder_buffer_object
*buffer
,
1406 binder_size_t fixup_offset
,
1407 struct binder_buffer_object
*last_obj
,
1408 binder_size_t last_min_offset
)
1411 /* Nothing to fix up in */
1415 while (last_obj
!= buffer
) {
1417 * Safe to retrieve the parent of last_obj, since it
1418 * was already previously verified by the driver.
1420 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
1422 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
1423 last_obj
= (struct binder_buffer_object
*)
1424 (b
->data
+ *(objects_start
+ last_obj
->parent
));
1426 return (fixup_offset
>= last_min_offset
);
1429 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
1430 struct binder_buffer
*buffer
,
1431 binder_size_t
*failed_at
)
1433 binder_size_t
*offp
, *off_start
, *off_end
;
1434 int debug_id
= buffer
->debug_id
;
1436 binder_debug(BINDER_DEBUG_TRANSACTION
,
1437 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1438 proc
->pid
, buffer
->debug_id
,
1439 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
1441 if (buffer
->target_node
)
1442 binder_dec_node(buffer
->target_node
, 1, 0);
1444 off_start
= (binder_size_t
*)(buffer
->data
+
1445 ALIGN(buffer
->data_size
, sizeof(void *)));
1447 off_end
= failed_at
;
1449 off_end
= (void *)off_start
+ buffer
->offsets_size
;
1450 for (offp
= off_start
; offp
< off_end
; offp
++) {
1451 struct binder_object_header
*hdr
;
1452 size_t object_size
= binder_validate_object(buffer
, *offp
);
1454 if (object_size
== 0) {
1455 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1456 debug_id
, (u64
)*offp
, buffer
->data_size
);
1459 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
1460 switch (hdr
->type
) {
1461 case BINDER_TYPE_BINDER
:
1462 case BINDER_TYPE_WEAK_BINDER
: {
1463 struct flat_binder_object
*fp
;
1464 struct binder_node
*node
;
1466 fp
= to_flat_binder_object(hdr
);
1467 node
= binder_get_node(proc
, fp
->binder
);
1469 pr_err("transaction release %d bad node %016llx\n",
1470 debug_id
, (u64
)fp
->binder
);
1473 binder_debug(BINDER_DEBUG_TRANSACTION
,
1474 " node %d u%016llx\n",
1475 node
->debug_id
, (u64
)node
->ptr
);
1476 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
1479 case BINDER_TYPE_HANDLE
:
1480 case BINDER_TYPE_WEAK_HANDLE
: {
1481 struct flat_binder_object
*fp
;
1482 struct binder_ref
*ref
;
1484 fp
= to_flat_binder_object(hdr
);
1485 ref
= binder_get_ref(proc
, fp
->handle
,
1486 hdr
->type
== BINDER_TYPE_HANDLE
);
1488 pr_err("transaction release %d bad handle %d\n",
1489 debug_id
, fp
->handle
);
1492 binder_debug(BINDER_DEBUG_TRANSACTION
,
1493 " ref %d desc %d (node %d)\n",
1494 ref
->debug_id
, ref
->desc
, ref
->node
->debug_id
);
1495 binder_dec_ref(ref
, hdr
->type
== BINDER_TYPE_HANDLE
);
1498 case BINDER_TYPE_FD
: {
1499 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
1501 binder_debug(BINDER_DEBUG_TRANSACTION
,
1502 " fd %d\n", fp
->fd
);
1504 task_close_fd(proc
, fp
->fd
);
1506 case BINDER_TYPE_PTR
:
1508 * Nothing to do here, this will get cleaned up when the
1509 * transaction buffer gets freed
1512 case BINDER_TYPE_FDA
: {
1513 struct binder_fd_array_object
*fda
;
1514 struct binder_buffer_object
*parent
;
1515 uintptr_t parent_buffer
;
1518 binder_size_t fd_buf_size
;
1520 fda
= to_binder_fd_array_object(hdr
);
1521 parent
= binder_validate_ptr(buffer
, fda
->parent
,
1525 pr_err("transaction release %d bad parent offset",
1530 * Since the parent was already fixed up, convert it
1531 * back to kernel address space to access it
1533 parent_buffer
= parent
->buffer
-
1534 proc
->user_buffer_offset
;
1536 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
1537 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
1538 pr_err("transaction release %d invalid number of fds (%lld)\n",
1539 debug_id
, (u64
)fda
->num_fds
);
1542 if (fd_buf_size
> parent
->length
||
1543 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
1544 /* No space for all file descriptors here. */
1545 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1546 debug_id
, (u64
)fda
->num_fds
);
1549 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
1550 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
1551 task_close_fd(proc
, fd_array
[fd_index
]);
1554 pr_err("transaction release %d bad object type %x\n",
1555 debug_id
, hdr
->type
);
1561 static int binder_translate_binder(struct flat_binder_object
*fp
,
1562 struct binder_transaction
*t
,
1563 struct binder_thread
*thread
)
1565 struct binder_node
*node
;
1566 struct binder_ref
*ref
;
1567 struct binder_proc
*proc
= thread
->proc
;
1568 struct binder_proc
*target_proc
= t
->to_proc
;
1570 node
= binder_get_node(proc
, fp
->binder
);
1572 node
= binder_new_node(proc
, fp
->binder
, fp
->cookie
);
1576 node
->min_priority
= fp
->flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1577 node
->accept_fds
= !!(fp
->flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1579 if (fp
->cookie
!= node
->cookie
) {
1580 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1581 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
1582 node
->debug_id
, (u64
)fp
->cookie
,
1586 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
))
1589 ref
= binder_get_ref_for_node(target_proc
, node
);
1593 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
1594 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
1596 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
1598 fp
->handle
= ref
->desc
;
1600 binder_inc_ref(ref
, fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &thread
->todo
);
1602 trace_binder_transaction_node_to_ref(t
, node
, ref
);
1603 binder_debug(BINDER_DEBUG_TRANSACTION
,
1604 " node %d u%016llx -> ref %d desc %d\n",
1605 node
->debug_id
, (u64
)node
->ptr
,
1606 ref
->debug_id
, ref
->desc
);
1611 static int binder_translate_handle(struct flat_binder_object
*fp
,
1612 struct binder_transaction
*t
,
1613 struct binder_thread
*thread
)
1615 struct binder_ref
*ref
;
1616 struct binder_proc
*proc
= thread
->proc
;
1617 struct binder_proc
*target_proc
= t
->to_proc
;
1619 ref
= binder_get_ref(proc
, fp
->handle
,
1620 fp
->hdr
.type
== BINDER_TYPE_HANDLE
);
1622 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1623 proc
->pid
, thread
->pid
, fp
->handle
);
1626 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
))
1629 if (ref
->node
->proc
== target_proc
) {
1630 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
1631 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
1633 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
1634 fp
->binder
= ref
->node
->ptr
;
1635 fp
->cookie
= ref
->node
->cookie
;
1636 binder_inc_node(ref
->node
, fp
->hdr
.type
== BINDER_TYPE_BINDER
,
1638 trace_binder_transaction_ref_to_node(t
, ref
);
1639 binder_debug(BINDER_DEBUG_TRANSACTION
,
1640 " ref %d desc %d -> node %d u%016llx\n",
1641 ref
->debug_id
, ref
->desc
, ref
->node
->debug_id
,
1642 (u64
)ref
->node
->ptr
);
1644 struct binder_ref
*new_ref
;
1646 new_ref
= binder_get_ref_for_node(target_proc
, ref
->node
);
1651 fp
->handle
= new_ref
->desc
;
1653 binder_inc_ref(new_ref
, fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
1655 trace_binder_transaction_ref_to_ref(t
, ref
, new_ref
);
1656 binder_debug(BINDER_DEBUG_TRANSACTION
,
1657 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1658 ref
->debug_id
, ref
->desc
, new_ref
->debug_id
,
1659 new_ref
->desc
, ref
->node
->debug_id
);
1664 static int binder_translate_fd(int fd
,
1665 struct binder_transaction
*t
,
1666 struct binder_thread
*thread
,
1667 struct binder_transaction
*in_reply_to
)
1669 struct binder_proc
*proc
= thread
->proc
;
1670 struct binder_proc
*target_proc
= t
->to_proc
;
1674 bool target_allows_fd
;
1677 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
1679 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
1680 if (!target_allows_fd
) {
1681 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1682 proc
->pid
, thread
->pid
,
1683 in_reply_to
? "reply" : "transaction",
1686 goto err_fd_not_accepted
;
1691 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1692 proc
->pid
, thread
->pid
, fd
);
1696 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
1702 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
1703 if (target_fd
< 0) {
1705 goto err_get_unused_fd
;
1707 task_fd_install(target_proc
, target_fd
, file
);
1708 trace_binder_transaction_fd(t
, fd
, target_fd
);
1709 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
1718 err_fd_not_accepted
:
1722 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
1723 struct binder_buffer_object
*parent
,
1724 struct binder_transaction
*t
,
1725 struct binder_thread
*thread
,
1726 struct binder_transaction
*in_reply_to
)
1728 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
1730 uintptr_t parent_buffer
;
1732 struct binder_proc
*proc
= thread
->proc
;
1733 struct binder_proc
*target_proc
= t
->to_proc
;
1735 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
1736 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
1737 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1738 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
1741 if (fd_buf_size
> parent
->length
||
1742 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
1743 /* No space for all file descriptors here. */
1744 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1745 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
1749 * Since the parent was already fixed up, convert it
1750 * back to the kernel address space to access it
1752 parent_buffer
= parent
->buffer
- target_proc
->user_buffer_offset
;
1753 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
1754 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
1755 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1756 proc
->pid
, thread
->pid
);
1759 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
1760 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
1763 goto err_translate_fd_failed
;
1764 fd_array
[fdi
] = target_fd
;
1768 err_translate_fd_failed
:
1770 * Failed to allocate fd or security error, free fds
1773 num_installed_fds
= fdi
;
1774 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
1775 task_close_fd(target_proc
, fd_array
[fdi
]);
1779 static int binder_fixup_parent(struct binder_transaction
*t
,
1780 struct binder_thread
*thread
,
1781 struct binder_buffer_object
*bp
,
1782 binder_size_t
*off_start
,
1783 binder_size_t num_valid
,
1784 struct binder_buffer_object
*last_fixup_obj
,
1785 binder_size_t last_fixup_min_off
)
1787 struct binder_buffer_object
*parent
;
1789 struct binder_buffer
*b
= t
->buffer
;
1790 struct binder_proc
*proc
= thread
->proc
;
1791 struct binder_proc
*target_proc
= t
->to_proc
;
1793 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
1796 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
1798 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1799 proc
->pid
, thread
->pid
);
1803 if (!binder_validate_fixup(b
, off_start
,
1804 parent
, bp
->parent_offset
,
1806 last_fixup_min_off
)) {
1807 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1808 proc
->pid
, thread
->pid
);
1812 if (parent
->length
< sizeof(binder_uintptr_t
) ||
1813 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
1814 /* No space for a pointer here! */
1815 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1816 proc
->pid
, thread
->pid
);
1819 parent_buffer
= (u8
*)(parent
->buffer
-
1820 target_proc
->user_buffer_offset
);
1821 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
1826 static void binder_transaction(struct binder_proc
*proc
,
1827 struct binder_thread
*thread
,
1828 struct binder_transaction_data
*tr
, int reply
,
1829 binder_size_t extra_buffers_size
)
1832 struct binder_transaction
*t
;
1833 struct binder_work
*tcomplete
;
1834 binder_size_t
*offp
, *off_end
, *off_start
;
1835 binder_size_t off_min
;
1836 u8
*sg_bufp
, *sg_buf_end
;
1837 struct binder_proc
*target_proc
;
1838 struct binder_thread
*target_thread
= NULL
;
1839 struct binder_node
*target_node
= NULL
;
1840 struct list_head
*target_list
;
1841 wait_queue_head_t
*target_wait
;
1842 struct binder_transaction
*in_reply_to
= NULL
;
1843 struct binder_transaction_log_entry
*e
;
1844 uint32_t return_error
;
1845 struct binder_buffer_object
*last_fixup_obj
= NULL
;
1846 binder_size_t last_fixup_min_off
= 0;
1847 struct binder_context
*context
= proc
->context
;
1849 e
= binder_transaction_log_add(&binder_transaction_log
);
1850 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
1851 e
->from_proc
= proc
->pid
;
1852 e
->from_thread
= thread
->pid
;
1853 e
->target_handle
= tr
->target
.handle
;
1854 e
->data_size
= tr
->data_size
;
1855 e
->offsets_size
= tr
->offsets_size
;
1856 e
->context_name
= proc
->context
->name
;
1859 in_reply_to
= thread
->transaction_stack
;
1860 if (in_reply_to
== NULL
) {
1861 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1862 proc
->pid
, thread
->pid
);
1863 return_error
= BR_FAILED_REPLY
;
1864 goto err_empty_call_stack
;
1866 binder_set_nice(in_reply_to
->saved_priority
);
1867 if (in_reply_to
->to_thread
!= thread
) {
1868 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1869 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
1870 in_reply_to
->to_proc
?
1871 in_reply_to
->to_proc
->pid
: 0,
1872 in_reply_to
->to_thread
?
1873 in_reply_to
->to_thread
->pid
: 0);
1874 return_error
= BR_FAILED_REPLY
;
1876 goto err_bad_call_stack
;
1878 thread
->transaction_stack
= in_reply_to
->to_parent
;
1879 target_thread
= in_reply_to
->from
;
1880 if (target_thread
== NULL
) {
1881 return_error
= BR_DEAD_REPLY
;
1882 goto err_dead_binder
;
1884 if (target_thread
->transaction_stack
!= in_reply_to
) {
1885 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1886 proc
->pid
, thread
->pid
,
1887 target_thread
->transaction_stack
?
1888 target_thread
->transaction_stack
->debug_id
: 0,
1889 in_reply_to
->debug_id
);
1890 return_error
= BR_FAILED_REPLY
;
1892 target_thread
= NULL
;
1893 goto err_dead_binder
;
1895 target_proc
= target_thread
->proc
;
1897 if (tr
->target
.handle
) {
1898 struct binder_ref
*ref
;
1900 ref
= binder_get_ref(proc
, tr
->target
.handle
, true);
1902 binder_user_error("%d:%d got transaction to invalid handle\n",
1903 proc
->pid
, thread
->pid
);
1904 return_error
= BR_FAILED_REPLY
;
1905 goto err_invalid_target_handle
;
1907 target_node
= ref
->node
;
1909 target_node
= context
->binder_context_mgr_node
;
1910 if (target_node
== NULL
) {
1911 return_error
= BR_DEAD_REPLY
;
1912 goto err_no_context_mgr_node
;
1915 e
->to_node
= target_node
->debug_id
;
1916 target_proc
= target_node
->proc
;
1917 if (target_proc
== NULL
) {
1918 return_error
= BR_DEAD_REPLY
;
1919 goto err_dead_binder
;
1921 if (security_binder_transaction(proc
->tsk
,
1922 target_proc
->tsk
) < 0) {
1923 return_error
= BR_FAILED_REPLY
;
1924 goto err_invalid_target_handle
;
1926 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
1927 struct binder_transaction
*tmp
;
1929 tmp
= thread
->transaction_stack
;
1930 if (tmp
->to_thread
!= thread
) {
1931 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1932 proc
->pid
, thread
->pid
, tmp
->debug_id
,
1933 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
1935 tmp
->to_thread
->pid
: 0);
1936 return_error
= BR_FAILED_REPLY
;
1937 goto err_bad_call_stack
;
1940 if (tmp
->from
&& tmp
->from
->proc
== target_proc
)
1941 target_thread
= tmp
->from
;
1942 tmp
= tmp
->from_parent
;
1946 if (target_thread
) {
1947 e
->to_thread
= target_thread
->pid
;
1948 target_list
= &target_thread
->todo
;
1949 target_wait
= &target_thread
->wait
;
1951 target_list
= &target_proc
->todo
;
1952 target_wait
= &target_proc
->wait
;
1954 e
->to_proc
= target_proc
->pid
;
1956 /* TODO: reuse incoming transaction for reply */
1957 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
1959 return_error
= BR_FAILED_REPLY
;
1960 goto err_alloc_t_failed
;
1962 binder_stats_created(BINDER_STAT_TRANSACTION
);
1964 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
1965 if (tcomplete
== NULL
) {
1966 return_error
= BR_FAILED_REPLY
;
1967 goto err_alloc_tcomplete_failed
;
1969 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
1971 t
->debug_id
= ++binder_last_id
;
1972 e
->debug_id
= t
->debug_id
;
1975 binder_debug(BINDER_DEBUG_TRANSACTION
,
1976 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1977 proc
->pid
, thread
->pid
, t
->debug_id
,
1978 target_proc
->pid
, target_thread
->pid
,
1979 (u64
)tr
->data
.ptr
.buffer
,
1980 (u64
)tr
->data
.ptr
.offsets
,
1981 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
1982 (u64
)extra_buffers_size
);
1984 binder_debug(BINDER_DEBUG_TRANSACTION
,
1985 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1986 proc
->pid
, thread
->pid
, t
->debug_id
,
1987 target_proc
->pid
, target_node
->debug_id
,
1988 (u64
)tr
->data
.ptr
.buffer
,
1989 (u64
)tr
->data
.ptr
.offsets
,
1990 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
1991 (u64
)extra_buffers_size
);
1993 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
1997 t
->sender_euid
= task_euid(proc
->tsk
);
1998 t
->to_proc
= target_proc
;
1999 t
->to_thread
= target_thread
;
2001 t
->flags
= tr
->flags
;
2002 t
->priority
= task_nice(current
);
2004 trace_binder_transaction(reply
, t
, target_node
);
2006 t
->buffer
= binder_alloc_buf(target_proc
, tr
->data_size
,
2007 tr
->offsets_size
, extra_buffers_size
,
2008 !reply
&& (t
->flags
& TF_ONE_WAY
));
2009 if (t
->buffer
== NULL
) {
2010 return_error
= BR_FAILED_REPLY
;
2011 goto err_binder_alloc_buf_failed
;
2013 t
->buffer
->allow_user_free
= 0;
2014 t
->buffer
->debug_id
= t
->debug_id
;
2015 t
->buffer
->transaction
= t
;
2016 t
->buffer
->target_node
= target_node
;
2017 trace_binder_transaction_alloc_buf(t
->buffer
);
2019 binder_inc_node(target_node
, 1, 0, NULL
);
2021 off_start
= (binder_size_t
*)(t
->buffer
->data
+
2022 ALIGN(tr
->data_size
, sizeof(void *)));
2025 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
2026 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
2027 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2028 proc
->pid
, thread
->pid
);
2029 return_error
= BR_FAILED_REPLY
;
2030 goto err_copy_data_failed
;
2032 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
2033 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
2034 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2035 proc
->pid
, thread
->pid
);
2036 return_error
= BR_FAILED_REPLY
;
2037 goto err_copy_data_failed
;
2039 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
2040 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2041 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
2042 return_error
= BR_FAILED_REPLY
;
2043 goto err_bad_offset
;
2045 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
2046 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2047 proc
->pid
, thread
->pid
,
2048 (u64
)extra_buffers_size
);
2049 return_error
= BR_FAILED_REPLY
;
2050 goto err_bad_offset
;
2052 off_end
= (void *)off_start
+ tr
->offsets_size
;
2053 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
2054 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
2056 for (; offp
< off_end
; offp
++) {
2057 struct binder_object_header
*hdr
;
2058 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
2060 if (object_size
== 0 || *offp
< off_min
) {
2061 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2062 proc
->pid
, thread
->pid
, (u64
)*offp
,
2064 (u64
)t
->buffer
->data_size
);
2065 return_error
= BR_FAILED_REPLY
;
2066 goto err_bad_offset
;
2069 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
2070 off_min
= *offp
+ object_size
;
2071 switch (hdr
->type
) {
2072 case BINDER_TYPE_BINDER
:
2073 case BINDER_TYPE_WEAK_BINDER
: {
2074 struct flat_binder_object
*fp
;
2076 fp
= to_flat_binder_object(hdr
);
2077 ret
= binder_translate_binder(fp
, t
, thread
);
2079 return_error
= BR_FAILED_REPLY
;
2080 goto err_translate_failed
;
2083 case BINDER_TYPE_HANDLE
:
2084 case BINDER_TYPE_WEAK_HANDLE
: {
2085 struct flat_binder_object
*fp
;
2087 fp
= to_flat_binder_object(hdr
);
2088 ret
= binder_translate_handle(fp
, t
, thread
);
2090 return_error
= BR_FAILED_REPLY
;
2091 goto err_translate_failed
;
2095 case BINDER_TYPE_FD
: {
2096 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2097 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
2100 if (target_fd
< 0) {
2101 return_error
= BR_FAILED_REPLY
;
2102 goto err_translate_failed
;
2107 case BINDER_TYPE_FDA
: {
2108 struct binder_fd_array_object
*fda
=
2109 to_binder_fd_array_object(hdr
);
2110 struct binder_buffer_object
*parent
=
2111 binder_validate_ptr(t
->buffer
, fda
->parent
,
2115 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2116 proc
->pid
, thread
->pid
);
2117 return_error
= BR_FAILED_REPLY
;
2118 goto err_bad_parent
;
2120 if (!binder_validate_fixup(t
->buffer
, off_start
,
2121 parent
, fda
->parent_offset
,
2123 last_fixup_min_off
)) {
2124 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2125 proc
->pid
, thread
->pid
);
2126 return_error
= BR_FAILED_REPLY
;
2127 goto err_bad_parent
;
2129 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
2132 return_error
= BR_FAILED_REPLY
;
2133 goto err_translate_failed
;
2135 last_fixup_obj
= parent
;
2136 last_fixup_min_off
=
2137 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
2139 case BINDER_TYPE_PTR
: {
2140 struct binder_buffer_object
*bp
=
2141 to_binder_buffer_object(hdr
);
2142 size_t buf_left
= sg_buf_end
- sg_bufp
;
2144 if (bp
->length
> buf_left
) {
2145 binder_user_error("%d:%d got transaction with too large buffer\n",
2146 proc
->pid
, thread
->pid
);
2147 return_error
= BR_FAILED_REPLY
;
2148 goto err_bad_offset
;
2150 if (copy_from_user(sg_bufp
,
2151 (const void __user
*)(uintptr_t)
2152 bp
->buffer
, bp
->length
)) {
2153 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2154 proc
->pid
, thread
->pid
);
2155 return_error
= BR_FAILED_REPLY
;
2156 goto err_copy_data_failed
;
2158 /* Fixup buffer pointer to target proc address space */
2159 bp
->buffer
= (uintptr_t)sg_bufp
+
2160 target_proc
->user_buffer_offset
;
2161 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
2163 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
2166 last_fixup_min_off
);
2168 return_error
= BR_FAILED_REPLY
;
2169 goto err_translate_failed
;
2171 last_fixup_obj
= bp
;
2172 last_fixup_min_off
= 0;
2175 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2176 proc
->pid
, thread
->pid
, hdr
->type
);
2177 return_error
= BR_FAILED_REPLY
;
2178 goto err_bad_object_type
;
2182 BUG_ON(t
->buffer
->async_transaction
!= 0);
2183 binder_pop_transaction(target_thread
, in_reply_to
);
2184 } else if (!(t
->flags
& TF_ONE_WAY
)) {
2185 BUG_ON(t
->buffer
->async_transaction
!= 0);
2187 t
->from_parent
= thread
->transaction_stack
;
2188 thread
->transaction_stack
= t
;
2190 BUG_ON(target_node
== NULL
);
2191 BUG_ON(t
->buffer
->async_transaction
!= 1);
2192 if (target_node
->has_async_transaction
) {
2193 target_list
= &target_node
->async_todo
;
2196 target_node
->has_async_transaction
= 1;
2198 t
->work
.type
= BINDER_WORK_TRANSACTION
;
2199 list_add_tail(&t
->work
.entry
, target_list
);
2200 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
2201 list_add_tail(&tcomplete
->entry
, &thread
->todo
);
2203 wake_up_interruptible(target_wait
);
2206 err_translate_failed
:
2207 err_bad_object_type
:
2210 err_copy_data_failed
:
2211 trace_binder_transaction_failed_buffer_release(t
->buffer
);
2212 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
2213 t
->buffer
->transaction
= NULL
;
2214 binder_free_buf(target_proc
, t
->buffer
);
2215 err_binder_alloc_buf_failed
:
2217 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
2218 err_alloc_tcomplete_failed
:
2220 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2223 err_empty_call_stack
:
2225 err_invalid_target_handle
:
2226 err_no_context_mgr_node
:
2227 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2228 "%d:%d transaction failed %d, size %lld-%lld\n",
2229 proc
->pid
, thread
->pid
, return_error
,
2230 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
);
2233 struct binder_transaction_log_entry
*fe
;
2235 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
2239 BUG_ON(thread
->return_error
!= BR_OK
);
2241 thread
->return_error
= BR_TRANSACTION_COMPLETE
;
2242 binder_send_failed_reply(in_reply_to
, return_error
);
2244 thread
->return_error
= return_error
;
2247 static int binder_thread_write(struct binder_proc
*proc
,
2248 struct binder_thread
*thread
,
2249 binder_uintptr_t binder_buffer
, size_t size
,
2250 binder_size_t
*consumed
)
2253 struct binder_context
*context
= proc
->context
;
2254 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
2255 void __user
*ptr
= buffer
+ *consumed
;
2256 void __user
*end
= buffer
+ size
;
2258 while (ptr
< end
&& thread
->return_error
== BR_OK
) {
2259 if (get_user(cmd
, (uint32_t __user
*)ptr
))
2261 ptr
+= sizeof(uint32_t);
2262 trace_binder_command(cmd
);
2263 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
2264 binder_stats
.bc
[_IOC_NR(cmd
)]++;
2265 proc
->stats
.bc
[_IOC_NR(cmd
)]++;
2266 thread
->stats
.bc
[_IOC_NR(cmd
)]++;
2274 struct binder_ref
*ref
;
2275 const char *debug_string
;
2277 if (get_user(target
, (uint32_t __user
*)ptr
))
2279 ptr
+= sizeof(uint32_t);
2280 if (target
== 0 && context
->binder_context_mgr_node
&&
2281 (cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
)) {
2282 ref
= binder_get_ref_for_node(proc
,
2283 context
->binder_context_mgr_node
);
2284 if (ref
->desc
!= target
) {
2285 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2286 proc
->pid
, thread
->pid
,
2290 ref
= binder_get_ref(proc
, target
,
2291 cmd
== BC_ACQUIRE
||
2294 binder_user_error("%d:%d refcount change on invalid ref %d\n",
2295 proc
->pid
, thread
->pid
, target
);
2300 debug_string
= "IncRefs";
2301 binder_inc_ref(ref
, 0, NULL
);
2304 debug_string
= "Acquire";
2305 binder_inc_ref(ref
, 1, NULL
);
2308 debug_string
= "Release";
2309 binder_dec_ref(ref
, 1);
2313 debug_string
= "DecRefs";
2314 binder_dec_ref(ref
, 0);
2317 binder_debug(BINDER_DEBUG_USER_REFS
,
2318 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
2319 proc
->pid
, thread
->pid
, debug_string
, ref
->debug_id
,
2320 ref
->desc
, ref
->strong
, ref
->weak
, ref
->node
->debug_id
);
2323 case BC_INCREFS_DONE
:
2324 case BC_ACQUIRE_DONE
: {
2325 binder_uintptr_t node_ptr
;
2326 binder_uintptr_t cookie
;
2327 struct binder_node
*node
;
2329 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
2331 ptr
+= sizeof(binder_uintptr_t
);
2332 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
2334 ptr
+= sizeof(binder_uintptr_t
);
2335 node
= binder_get_node(proc
, node_ptr
);
2337 binder_user_error("%d:%d %s u%016llx no match\n",
2338 proc
->pid
, thread
->pid
,
2339 cmd
== BC_INCREFS_DONE
?
2345 if (cookie
!= node
->cookie
) {
2346 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2347 proc
->pid
, thread
->pid
,
2348 cmd
== BC_INCREFS_DONE
?
2349 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2350 (u64
)node_ptr
, node
->debug_id
,
2351 (u64
)cookie
, (u64
)node
->cookie
);
2354 if (cmd
== BC_ACQUIRE_DONE
) {
2355 if (node
->pending_strong_ref
== 0) {
2356 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2357 proc
->pid
, thread
->pid
,
2361 node
->pending_strong_ref
= 0;
2363 if (node
->pending_weak_ref
== 0) {
2364 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2365 proc
->pid
, thread
->pid
,
2369 node
->pending_weak_ref
= 0;
2371 binder_dec_node(node
, cmd
== BC_ACQUIRE_DONE
, 0);
2372 binder_debug(BINDER_DEBUG_USER_REFS
,
2373 "%d:%d %s node %d ls %d lw %d\n",
2374 proc
->pid
, thread
->pid
,
2375 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2376 node
->debug_id
, node
->local_strong_refs
, node
->local_weak_refs
);
2379 case BC_ATTEMPT_ACQUIRE
:
2380 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2382 case BC_ACQUIRE_RESULT
:
2383 pr_err("BC_ACQUIRE_RESULT not supported\n");
2386 case BC_FREE_BUFFER
: {
2387 binder_uintptr_t data_ptr
;
2388 struct binder_buffer
*buffer
;
2390 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
2392 ptr
+= sizeof(binder_uintptr_t
);
2394 buffer
= binder_buffer_lookup(proc
, data_ptr
);
2395 if (buffer
== NULL
) {
2396 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2397 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
2400 if (!buffer
->allow_user_free
) {
2401 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2402 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
2405 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
2406 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2407 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
2409 buffer
->transaction
? "active" : "finished");
2411 if (buffer
->transaction
) {
2412 buffer
->transaction
->buffer
= NULL
;
2413 buffer
->transaction
= NULL
;
2415 if (buffer
->async_transaction
&& buffer
->target_node
) {
2416 BUG_ON(!buffer
->target_node
->has_async_transaction
);
2417 if (list_empty(&buffer
->target_node
->async_todo
))
2418 buffer
->target_node
->has_async_transaction
= 0;
2420 list_move_tail(buffer
->target_node
->async_todo
.next
, &thread
->todo
);
2422 trace_binder_transaction_buffer_release(buffer
);
2423 binder_transaction_buffer_release(proc
, buffer
, NULL
);
2424 binder_free_buf(proc
, buffer
);
2428 case BC_TRANSACTION_SG
:
2430 struct binder_transaction_data_sg tr
;
2432 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
2435 binder_transaction(proc
, thread
, &tr
.transaction_data
,
2436 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
2439 case BC_TRANSACTION
:
2441 struct binder_transaction_data tr
;
2443 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
2446 binder_transaction(proc
, thread
, &tr
,
2447 cmd
== BC_REPLY
, 0);
2451 case BC_REGISTER_LOOPER
:
2452 binder_debug(BINDER_DEBUG_THREADS
,
2453 "%d:%d BC_REGISTER_LOOPER\n",
2454 proc
->pid
, thread
->pid
);
2455 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
2456 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
2457 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2458 proc
->pid
, thread
->pid
);
2459 } else if (proc
->requested_threads
== 0) {
2460 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
2461 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2462 proc
->pid
, thread
->pid
);
2464 proc
->requested_threads
--;
2465 proc
->requested_threads_started
++;
2467 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
2469 case BC_ENTER_LOOPER
:
2470 binder_debug(BINDER_DEBUG_THREADS
,
2471 "%d:%d BC_ENTER_LOOPER\n",
2472 proc
->pid
, thread
->pid
);
2473 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
2474 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
2475 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2476 proc
->pid
, thread
->pid
);
2478 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
2480 case BC_EXIT_LOOPER
:
2481 binder_debug(BINDER_DEBUG_THREADS
,
2482 "%d:%d BC_EXIT_LOOPER\n",
2483 proc
->pid
, thread
->pid
);
2484 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
2487 case BC_REQUEST_DEATH_NOTIFICATION
:
2488 case BC_CLEAR_DEATH_NOTIFICATION
: {
2490 binder_uintptr_t cookie
;
2491 struct binder_ref
*ref
;
2492 struct binder_ref_death
*death
;
2494 if (get_user(target
, (uint32_t __user
*)ptr
))
2496 ptr
+= sizeof(uint32_t);
2497 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
2499 ptr
+= sizeof(binder_uintptr_t
);
2500 ref
= binder_get_ref(proc
, target
, false);
2502 binder_user_error("%d:%d %s invalid ref %d\n",
2503 proc
->pid
, thread
->pid
,
2504 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
2505 "BC_REQUEST_DEATH_NOTIFICATION" :
2506 "BC_CLEAR_DEATH_NOTIFICATION",
2511 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
2512 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2513 proc
->pid
, thread
->pid
,
2514 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
2515 "BC_REQUEST_DEATH_NOTIFICATION" :
2516 "BC_CLEAR_DEATH_NOTIFICATION",
2517 (u64
)cookie
, ref
->debug_id
, ref
->desc
,
2518 ref
->strong
, ref
->weak
, ref
->node
->debug_id
);
2520 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
2522 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2523 proc
->pid
, thread
->pid
);
2526 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
2527 if (death
== NULL
) {
2528 thread
->return_error
= BR_ERROR
;
2529 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2530 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2531 proc
->pid
, thread
->pid
);
2534 binder_stats_created(BINDER_STAT_DEATH
);
2535 INIT_LIST_HEAD(&death
->work
.entry
);
2536 death
->cookie
= cookie
;
2538 if (ref
->node
->proc
== NULL
) {
2539 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
2540 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
2541 list_add_tail(&ref
->death
->work
.entry
, &thread
->todo
);
2543 list_add_tail(&ref
->death
->work
.entry
, &proc
->todo
);
2544 wake_up_interruptible(&proc
->wait
);
2548 if (ref
->death
== NULL
) {
2549 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2550 proc
->pid
, thread
->pid
);
2554 if (death
->cookie
!= cookie
) {
2555 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2556 proc
->pid
, thread
->pid
,
2562 if (list_empty(&death
->work
.entry
)) {
2563 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
2564 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
2565 list_add_tail(&death
->work
.entry
, &thread
->todo
);
2567 list_add_tail(&death
->work
.entry
, &proc
->todo
);
2568 wake_up_interruptible(&proc
->wait
);
2571 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
2572 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
2576 case BC_DEAD_BINDER_DONE
: {
2577 struct binder_work
*w
;
2578 binder_uintptr_t cookie
;
2579 struct binder_ref_death
*death
= NULL
;
2581 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
2584 ptr
+= sizeof(cookie
);
2585 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
2586 struct binder_ref_death
*tmp_death
= container_of(w
, struct binder_ref_death
, work
);
2588 if (tmp_death
->cookie
== cookie
) {
2593 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2594 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2595 proc
->pid
, thread
->pid
, (u64
)cookie
,
2597 if (death
== NULL
) {
2598 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2599 proc
->pid
, thread
->pid
, (u64
)cookie
);
2603 list_del_init(&death
->work
.entry
);
2604 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
2605 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
2606 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
2607 list_add_tail(&death
->work
.entry
, &thread
->todo
);
2609 list_add_tail(&death
->work
.entry
, &proc
->todo
);
2610 wake_up_interruptible(&proc
->wait
);
2616 pr_err("%d:%d unknown command %d\n",
2617 proc
->pid
, thread
->pid
, cmd
);
2620 *consumed
= ptr
- buffer
;
2625 static void binder_stat_br(struct binder_proc
*proc
,
2626 struct binder_thread
*thread
, uint32_t cmd
)
2628 trace_binder_return(cmd
);
2629 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
2630 binder_stats
.br
[_IOC_NR(cmd
)]++;
2631 proc
->stats
.br
[_IOC_NR(cmd
)]++;
2632 thread
->stats
.br
[_IOC_NR(cmd
)]++;
2636 static int binder_has_proc_work(struct binder_proc
*proc
,
2637 struct binder_thread
*thread
)
2639 return !list_empty(&proc
->todo
) ||
2640 (thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
);
2643 static int binder_has_thread_work(struct binder_thread
*thread
)
2645 return !list_empty(&thread
->todo
) || thread
->return_error
!= BR_OK
||
2646 (thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
);
2649 static int binder_thread_read(struct binder_proc
*proc
,
2650 struct binder_thread
*thread
,
2651 binder_uintptr_t binder_buffer
, size_t size
,
2652 binder_size_t
*consumed
, int non_block
)
2654 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
2655 void __user
*ptr
= buffer
+ *consumed
;
2656 void __user
*end
= buffer
+ size
;
2659 int wait_for_proc_work
;
2661 if (*consumed
== 0) {
2662 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
2664 ptr
+= sizeof(uint32_t);
2668 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
2669 list_empty(&thread
->todo
);
2671 if (thread
->return_error
!= BR_OK
&& ptr
< end
) {
2672 if (thread
->return_error2
!= BR_OK
) {
2673 if (put_user(thread
->return_error2
, (uint32_t __user
*)ptr
))
2675 ptr
+= sizeof(uint32_t);
2676 binder_stat_br(proc
, thread
, thread
->return_error2
);
2679 thread
->return_error2
= BR_OK
;
2681 if (put_user(thread
->return_error
, (uint32_t __user
*)ptr
))
2683 ptr
+= sizeof(uint32_t);
2684 binder_stat_br(proc
, thread
, thread
->return_error
);
2685 thread
->return_error
= BR_OK
;
2690 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
2691 if (wait_for_proc_work
)
2692 proc
->ready_threads
++;
2694 binder_unlock(__func__
);
2696 trace_binder_wait_for_work(wait_for_proc_work
,
2697 !!thread
->transaction_stack
,
2698 !list_empty(&thread
->todo
));
2699 if (wait_for_proc_work
) {
2700 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
2701 BINDER_LOOPER_STATE_ENTERED
))) {
2702 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2703 proc
->pid
, thread
->pid
, thread
->looper
);
2704 wait_event_interruptible(binder_user_error_wait
,
2705 binder_stop_on_user_error
< 2);
2707 binder_set_nice(proc
->default_priority
);
2709 if (!binder_has_proc_work(proc
, thread
))
2712 ret
= wait_event_freezable_exclusive(proc
->wait
, binder_has_proc_work(proc
, thread
));
2715 if (!binder_has_thread_work(thread
))
2718 ret
= wait_event_freezable(thread
->wait
, binder_has_thread_work(thread
));
2721 binder_lock(__func__
);
2723 if (wait_for_proc_work
)
2724 proc
->ready_threads
--;
2725 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
2732 struct binder_transaction_data tr
;
2733 struct binder_work
*w
;
2734 struct binder_transaction
*t
= NULL
;
2736 if (!list_empty(&thread
->todo
)) {
2737 w
= list_first_entry(&thread
->todo
, struct binder_work
,
2739 } else if (!list_empty(&proc
->todo
) && wait_for_proc_work
) {
2740 w
= list_first_entry(&proc
->todo
, struct binder_work
,
2744 if (ptr
- buffer
== 4 &&
2745 !(thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
))
2750 if (end
- ptr
< sizeof(tr
) + 4)
2754 case BINDER_WORK_TRANSACTION
: {
2755 t
= container_of(w
, struct binder_transaction
, work
);
2757 case BINDER_WORK_TRANSACTION_COMPLETE
: {
2758 cmd
= BR_TRANSACTION_COMPLETE
;
2759 if (put_user(cmd
, (uint32_t __user
*)ptr
))
2761 ptr
+= sizeof(uint32_t);
2763 binder_stat_br(proc
, thread
, cmd
);
2764 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
2765 "%d:%d BR_TRANSACTION_COMPLETE\n",
2766 proc
->pid
, thread
->pid
);
2768 list_del(&w
->entry
);
2770 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
2772 case BINDER_WORK_NODE
: {
2773 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
2774 uint32_t cmd
= BR_NOOP
;
2775 const char *cmd_name
;
2776 int strong
= node
->internal_strong_refs
|| node
->local_strong_refs
;
2777 int weak
= !hlist_empty(&node
->refs
) || node
->local_weak_refs
|| strong
;
2779 if (weak
&& !node
->has_weak_ref
) {
2781 cmd_name
= "BR_INCREFS";
2782 node
->has_weak_ref
= 1;
2783 node
->pending_weak_ref
= 1;
2784 node
->local_weak_refs
++;
2785 } else if (strong
&& !node
->has_strong_ref
) {
2787 cmd_name
= "BR_ACQUIRE";
2788 node
->has_strong_ref
= 1;
2789 node
->pending_strong_ref
= 1;
2790 node
->local_strong_refs
++;
2791 } else if (!strong
&& node
->has_strong_ref
) {
2793 cmd_name
= "BR_RELEASE";
2794 node
->has_strong_ref
= 0;
2795 } else if (!weak
&& node
->has_weak_ref
) {
2797 cmd_name
= "BR_DECREFS";
2798 node
->has_weak_ref
= 0;
2800 if (cmd
!= BR_NOOP
) {
2801 if (put_user(cmd
, (uint32_t __user
*)ptr
))
2803 ptr
+= sizeof(uint32_t);
2804 if (put_user(node
->ptr
,
2805 (binder_uintptr_t __user
*)ptr
))
2807 ptr
+= sizeof(binder_uintptr_t
);
2808 if (put_user(node
->cookie
,
2809 (binder_uintptr_t __user
*)ptr
))
2811 ptr
+= sizeof(binder_uintptr_t
);
2813 binder_stat_br(proc
, thread
, cmd
);
2814 binder_debug(BINDER_DEBUG_USER_REFS
,
2815 "%d:%d %s %d u%016llx c%016llx\n",
2816 proc
->pid
, thread
->pid
, cmd_name
,
2818 (u64
)node
->ptr
, (u64
)node
->cookie
);
2820 list_del_init(&w
->entry
);
2821 if (!weak
&& !strong
) {
2822 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2823 "%d:%d node %d u%016llx c%016llx deleted\n",
2824 proc
->pid
, thread
->pid
,
2828 rb_erase(&node
->rb_node
, &proc
->nodes
);
2830 binder_stats_deleted(BINDER_STAT_NODE
);
2832 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2833 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2834 proc
->pid
, thread
->pid
,
2841 case BINDER_WORK_DEAD_BINDER
:
2842 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
2843 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
2844 struct binder_ref_death
*death
;
2847 death
= container_of(w
, struct binder_ref_death
, work
);
2848 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
2849 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
2851 cmd
= BR_DEAD_BINDER
;
2852 if (put_user(cmd
, (uint32_t __user
*)ptr
))
2854 ptr
+= sizeof(uint32_t);
2855 if (put_user(death
->cookie
,
2856 (binder_uintptr_t __user
*)ptr
))
2858 ptr
+= sizeof(binder_uintptr_t
);
2859 binder_stat_br(proc
, thread
, cmd
);
2860 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
2861 "%d:%d %s %016llx\n",
2862 proc
->pid
, thread
->pid
,
2863 cmd
== BR_DEAD_BINDER
?
2865 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2866 (u64
)death
->cookie
);
2868 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
2869 list_del(&w
->entry
);
2871 binder_stats_deleted(BINDER_STAT_DEATH
);
2873 list_move(&w
->entry
, &proc
->delivered_death
);
2874 if (cmd
== BR_DEAD_BINDER
)
2875 goto done
; /* DEAD_BINDER notifications can cause transactions */
2882 BUG_ON(t
->buffer
== NULL
);
2883 if (t
->buffer
->target_node
) {
2884 struct binder_node
*target_node
= t
->buffer
->target_node
;
2886 tr
.target
.ptr
= target_node
->ptr
;
2887 tr
.cookie
= target_node
->cookie
;
2888 t
->saved_priority
= task_nice(current
);
2889 if (t
->priority
< target_node
->min_priority
&&
2890 !(t
->flags
& TF_ONE_WAY
))
2891 binder_set_nice(t
->priority
);
2892 else if (!(t
->flags
& TF_ONE_WAY
) ||
2893 t
->saved_priority
> target_node
->min_priority
)
2894 binder_set_nice(target_node
->min_priority
);
2895 cmd
= BR_TRANSACTION
;
2902 tr
.flags
= t
->flags
;
2903 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
2906 struct task_struct
*sender
= t
->from
->proc
->tsk
;
2908 tr
.sender_pid
= task_tgid_nr_ns(sender
,
2909 task_active_pid_ns(current
));
2914 tr
.data_size
= t
->buffer
->data_size
;
2915 tr
.offsets_size
= t
->buffer
->offsets_size
;
2916 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)(
2917 (uintptr_t)t
->buffer
->data
+
2918 proc
->user_buffer_offset
);
2919 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
2920 ALIGN(t
->buffer
->data_size
,
2923 if (put_user(cmd
, (uint32_t __user
*)ptr
))
2925 ptr
+= sizeof(uint32_t);
2926 if (copy_to_user(ptr
, &tr
, sizeof(tr
)))
2930 trace_binder_transaction_received(t
);
2931 binder_stat_br(proc
, thread
, cmd
);
2932 binder_debug(BINDER_DEBUG_TRANSACTION
,
2933 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2934 proc
->pid
, thread
->pid
,
2935 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
2937 t
->debug_id
, t
->from
? t
->from
->proc
->pid
: 0,
2938 t
->from
? t
->from
->pid
: 0, cmd
,
2939 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
2940 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
2942 list_del(&t
->work
.entry
);
2943 t
->buffer
->allow_user_free
= 1;
2944 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
2945 t
->to_parent
= thread
->transaction_stack
;
2946 t
->to_thread
= thread
;
2947 thread
->transaction_stack
= t
;
2949 t
->buffer
->transaction
= NULL
;
2951 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2958 *consumed
= ptr
- buffer
;
2959 if (proc
->requested_threads
+ proc
->ready_threads
== 0 &&
2960 proc
->requested_threads_started
< proc
->max_threads
&&
2961 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
2962 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
2963 /*spawn a new thread if we leave this out */) {
2964 proc
->requested_threads
++;
2965 binder_debug(BINDER_DEBUG_THREADS
,
2966 "%d:%d BR_SPAWN_LOOPER\n",
2967 proc
->pid
, thread
->pid
);
2968 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
2970 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
2975 static void binder_release_work(struct list_head
*list
)
2977 struct binder_work
*w
;
2979 while (!list_empty(list
)) {
2980 w
= list_first_entry(list
, struct binder_work
, entry
);
2981 list_del_init(&w
->entry
);
2983 case BINDER_WORK_TRANSACTION
: {
2984 struct binder_transaction
*t
;
2986 t
= container_of(w
, struct binder_transaction
, work
);
2987 if (t
->buffer
->target_node
&&
2988 !(t
->flags
& TF_ONE_WAY
)) {
2989 binder_send_failed_reply(t
, BR_DEAD_REPLY
);
2991 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
2992 "undelivered transaction %d\n",
2994 t
->buffer
->transaction
= NULL
;
2996 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2999 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3000 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3001 "undelivered TRANSACTION_COMPLETE\n");
3003 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3005 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3006 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
3007 struct binder_ref_death
*death
;
3009 death
= container_of(w
, struct binder_ref_death
, work
);
3010 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3011 "undelivered death notification, %016llx\n",
3012 (u64
)death
->cookie
);
3014 binder_stats_deleted(BINDER_STAT_DEATH
);
3017 pr_err("unexpected work type, %d, not freed\n",
3025 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
3027 struct binder_thread
*thread
= NULL
;
3028 struct rb_node
*parent
= NULL
;
3029 struct rb_node
**p
= &proc
->threads
.rb_node
;
3033 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
3035 if (current
->pid
< thread
->pid
)
3037 else if (current
->pid
> thread
->pid
)
3038 p
= &(*p
)->rb_right
;
3043 thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
3046 binder_stats_created(BINDER_STAT_THREAD
);
3047 thread
->proc
= proc
;
3048 thread
->pid
= current
->pid
;
3049 init_waitqueue_head(&thread
->wait
);
3050 INIT_LIST_HEAD(&thread
->todo
);
3051 rb_link_node(&thread
->rb_node
, parent
, p
);
3052 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
3053 thread
->looper
|= BINDER_LOOPER_STATE_NEED_RETURN
;
3054 thread
->return_error
= BR_OK
;
3055 thread
->return_error2
= BR_OK
;
3060 static int binder_free_thread(struct binder_proc
*proc
,
3061 struct binder_thread
*thread
)
3063 struct binder_transaction
*t
;
3064 struct binder_transaction
*send_reply
= NULL
;
3065 int active_transactions
= 0;
3067 rb_erase(&thread
->rb_node
, &proc
->threads
);
3068 t
= thread
->transaction_stack
;
3069 if (t
&& t
->to_thread
== thread
)
3072 active_transactions
++;
3073 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3074 "release %d:%d transaction %d %s, still active\n",
3075 proc
->pid
, thread
->pid
,
3077 (t
->to_thread
== thread
) ? "in" : "out");
3079 if (t
->to_thread
== thread
) {
3081 t
->to_thread
= NULL
;
3083 t
->buffer
->transaction
= NULL
;
3087 } else if (t
->from
== thread
) {
3094 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
3095 binder_release_work(&thread
->todo
);
3097 binder_stats_deleted(BINDER_STAT_THREAD
);
3098 return active_transactions
;
3101 static unsigned int binder_poll(struct file
*filp
,
3102 struct poll_table_struct
*wait
)
3104 struct binder_proc
*proc
= filp
->private_data
;
3105 struct binder_thread
*thread
= NULL
;
3106 int wait_for_proc_work
;
3108 binder_lock(__func__
);
3110 thread
= binder_get_thread(proc
);
3112 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
3113 list_empty(&thread
->todo
) && thread
->return_error
== BR_OK
;
3115 binder_unlock(__func__
);
3117 if (wait_for_proc_work
) {
3118 if (binder_has_proc_work(proc
, thread
))
3120 poll_wait(filp
, &proc
->wait
, wait
);
3121 if (binder_has_proc_work(proc
, thread
))
3124 if (binder_has_thread_work(thread
))
3126 poll_wait(filp
, &thread
->wait
, wait
);
3127 if (binder_has_thread_work(thread
))
3133 static int binder_ioctl_write_read(struct file
*filp
,
3134 unsigned int cmd
, unsigned long arg
,
3135 struct binder_thread
*thread
)
3138 struct binder_proc
*proc
= filp
->private_data
;
3139 unsigned int size
= _IOC_SIZE(cmd
);
3140 void __user
*ubuf
= (void __user
*)arg
;
3141 struct binder_write_read bwr
;
3143 if (size
!= sizeof(struct binder_write_read
)) {
3147 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
3151 binder_debug(BINDER_DEBUG_READ_WRITE
,
3152 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3153 proc
->pid
, thread
->pid
,
3154 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
3155 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
3157 if (bwr
.write_size
> 0) {
3158 ret
= binder_thread_write(proc
, thread
,
3161 &bwr
.write_consumed
);
3162 trace_binder_write_done(ret
);
3164 bwr
.read_consumed
= 0;
3165 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
3170 if (bwr
.read_size
> 0) {
3171 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
3174 filp
->f_flags
& O_NONBLOCK
);
3175 trace_binder_read_done(ret
);
3176 if (!list_empty(&proc
->todo
))
3177 wake_up_interruptible(&proc
->wait
);
3179 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
3184 binder_debug(BINDER_DEBUG_READ_WRITE
,
3185 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3186 proc
->pid
, thread
->pid
,
3187 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
3188 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
3189 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
3197 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
3200 struct binder_proc
*proc
= filp
->private_data
;
3201 struct binder_context
*context
= proc
->context
;
3203 kuid_t curr_euid
= current_euid();
3205 if (context
->binder_context_mgr_node
) {
3206 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3210 ret
= security_binder_set_context_mgr(proc
->tsk
);
3213 if (uid_valid(context
->binder_context_mgr_uid
)) {
3214 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
3215 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3216 from_kuid(&init_user_ns
, curr_euid
),
3217 from_kuid(&init_user_ns
,
3218 context
->binder_context_mgr_uid
));
3223 context
->binder_context_mgr_uid
= curr_euid
;
3225 context
->binder_context_mgr_node
= binder_new_node(proc
, 0, 0);
3226 if (!context
->binder_context_mgr_node
) {
3230 context
->binder_context_mgr_node
->local_weak_refs
++;
3231 context
->binder_context_mgr_node
->local_strong_refs
++;
3232 context
->binder_context_mgr_node
->has_strong_ref
= 1;
3233 context
->binder_context_mgr_node
->has_weak_ref
= 1;
3238 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3241 struct binder_proc
*proc
= filp
->private_data
;
3242 struct binder_thread
*thread
;
3243 unsigned int size
= _IOC_SIZE(cmd
);
3244 void __user
*ubuf
= (void __user
*)arg
;
3246 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3247 proc->pid, current->pid, cmd, arg);*/
3249 if (unlikely(current
->mm
!= proc
->vma_vm_mm
)) {
3250 pr_err("current mm mismatch proc mm\n");
3253 trace_binder_ioctl(cmd
, arg
);
3255 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
3259 binder_lock(__func__
);
3260 thread
= binder_get_thread(proc
);
3261 if (thread
== NULL
) {
3267 case BINDER_WRITE_READ
:
3268 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
3272 case BINDER_SET_MAX_THREADS
:
3273 if (copy_from_user(&proc
->max_threads
, ubuf
, sizeof(proc
->max_threads
))) {
3278 case BINDER_SET_CONTEXT_MGR
:
3279 ret
= binder_ioctl_set_ctx_mgr(filp
);
3283 case BINDER_THREAD_EXIT
:
3284 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
3285 proc
->pid
, thread
->pid
);
3286 binder_free_thread(proc
, thread
);
3289 case BINDER_VERSION
: {
3290 struct binder_version __user
*ver
= ubuf
;
3292 if (size
!= sizeof(struct binder_version
)) {
3296 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
3297 &ver
->protocol_version
)) {
3310 thread
->looper
&= ~BINDER_LOOPER_STATE_NEED_RETURN
;
3311 binder_unlock(__func__
);
3312 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
3313 if (ret
&& ret
!= -ERESTARTSYS
)
3314 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
3316 trace_binder_ioctl_done(ret
);
3320 static void binder_vma_open(struct vm_area_struct
*vma
)
3322 struct binder_proc
*proc
= vma
->vm_private_data
;
3324 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3325 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3326 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
3327 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
3328 (unsigned long)pgprot_val(vma
->vm_page_prot
));
3331 static void binder_vma_close(struct vm_area_struct
*vma
)
3333 struct binder_proc
*proc
= vma
->vm_private_data
;
3335 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3336 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3337 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
3338 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
3339 (unsigned long)pgprot_val(vma
->vm_page_prot
));
3341 proc
->vma_vm_mm
= NULL
;
3342 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
3345 static int binder_vm_fault(struct vm_fault
*vmf
)
3347 return VM_FAULT_SIGBUS
;
3350 static const struct vm_operations_struct binder_vm_ops
= {
3351 .open
= binder_vma_open
,
3352 .close
= binder_vma_close
,
3353 .fault
= binder_vm_fault
,
3356 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
3359 struct vm_struct
*area
;
3360 struct binder_proc
*proc
= filp
->private_data
;
3361 const char *failure_string
;
3362 struct binder_buffer
*buffer
;
3364 if (proc
->tsk
!= current
)
3367 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
3368 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
3370 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3371 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3372 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
3373 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
3374 (unsigned long)pgprot_val(vma
->vm_page_prot
));
3376 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
3378 failure_string
= "bad vm_flags";
3381 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
3383 mutex_lock(&binder_mmap_lock
);
3386 failure_string
= "already mapped";
3387 goto err_already_mapped
;
3390 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_IOREMAP
);
3393 failure_string
= "get_vm_area";
3394 goto err_get_vm_area_failed
;
3396 proc
->buffer
= area
->addr
;
3397 proc
->user_buffer_offset
= vma
->vm_start
- (uintptr_t)proc
->buffer
;
3398 mutex_unlock(&binder_mmap_lock
);
3400 #ifdef CONFIG_CPU_CACHE_VIPT
3401 if (cache_is_vipt_aliasing()) {
3402 while (CACHE_COLOUR((vma
->vm_start
^ (uint32_t)proc
->buffer
))) {
3403 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc
->pid
, vma
->vm_start
, vma
->vm_end
, proc
->buffer
);
3404 vma
->vm_start
+= PAGE_SIZE
;
3408 proc
->pages
= kzalloc(sizeof(proc
->pages
[0]) * ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
), GFP_KERNEL
);
3409 if (proc
->pages
== NULL
) {
3411 failure_string
= "alloc page array";
3412 goto err_alloc_pages_failed
;
3414 proc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
3416 vma
->vm_ops
= &binder_vm_ops
;
3417 vma
->vm_private_data
= proc
;
3419 if (binder_update_page_range(proc
, 1, proc
->buffer
, proc
->buffer
+ PAGE_SIZE
, vma
)) {
3421 failure_string
= "alloc small buf";
3422 goto err_alloc_small_buf_failed
;
3424 buffer
= proc
->buffer
;
3425 INIT_LIST_HEAD(&proc
->buffers
);
3426 list_add(&buffer
->entry
, &proc
->buffers
);
3428 binder_insert_free_buffer(proc
, buffer
);
3429 proc
->free_async_space
= proc
->buffer_size
/ 2;
3431 proc
->files
= get_files_struct(current
);
3433 proc
->vma_vm_mm
= vma
->vm_mm
;
3435 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
3436 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3439 err_alloc_small_buf_failed
:
3442 err_alloc_pages_failed
:
3443 mutex_lock(&binder_mmap_lock
);
3444 vfree(proc
->buffer
);
3445 proc
->buffer
= NULL
;
3446 err_get_vm_area_failed
:
3448 mutex_unlock(&binder_mmap_lock
);
3450 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3451 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
3455 static int binder_open(struct inode
*nodp
, struct file
*filp
)
3457 struct binder_proc
*proc
;
3458 struct binder_device
*binder_dev
;
3460 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
3461 current
->group_leader
->pid
, current
->pid
);
3463 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
3466 get_task_struct(current
);
3467 proc
->tsk
= current
;
3468 proc
->vma_vm_mm
= current
->mm
;
3469 INIT_LIST_HEAD(&proc
->todo
);
3470 init_waitqueue_head(&proc
->wait
);
3471 proc
->default_priority
= task_nice(current
);
3472 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
3474 proc
->context
= &binder_dev
->context
;
3476 binder_lock(__func__
);
3478 binder_stats_created(BINDER_STAT_PROC
);
3479 hlist_add_head(&proc
->proc_node
, &binder_procs
);
3480 proc
->pid
= current
->group_leader
->pid
;
3481 INIT_LIST_HEAD(&proc
->delivered_death
);
3482 filp
->private_data
= proc
;
3484 binder_unlock(__func__
);
3486 if (binder_debugfs_dir_entry_proc
) {
3489 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
3491 * proc debug entries are shared between contexts, so
3492 * this will fail if the process tries to open the driver
3493 * again with a different context. The priting code will
3494 * anyway print all contexts that a given PID has, so this
3497 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
3498 binder_debugfs_dir_entry_proc
,
3499 (void *)(unsigned long)proc
->pid
,
3506 static int binder_flush(struct file
*filp
, fl_owner_t id
)
3508 struct binder_proc
*proc
= filp
->private_data
;
3510 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
3515 static void binder_deferred_flush(struct binder_proc
*proc
)
3520 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
3521 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
3523 thread
->looper
|= BINDER_LOOPER_STATE_NEED_RETURN
;
3524 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
3525 wake_up_interruptible(&thread
->wait
);
3529 wake_up_interruptible_all(&proc
->wait
);
3531 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3532 "binder_flush: %d woke %d threads\n", proc
->pid
,
3536 static int binder_release(struct inode
*nodp
, struct file
*filp
)
3538 struct binder_proc
*proc
= filp
->private_data
;
3540 debugfs_remove(proc
->debugfs_entry
);
3541 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
3546 static int binder_node_release(struct binder_node
*node
, int refs
)
3548 struct binder_ref
*ref
;
3551 list_del_init(&node
->work
.entry
);
3552 binder_release_work(&node
->async_todo
);
3554 if (hlist_empty(&node
->refs
)) {
3556 binder_stats_deleted(BINDER_STAT_NODE
);
3562 node
->local_strong_refs
= 0;
3563 node
->local_weak_refs
= 0;
3564 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
3566 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
3574 if (list_empty(&ref
->death
->work
.entry
)) {
3575 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3576 list_add_tail(&ref
->death
->work
.entry
,
3578 wake_up_interruptible(&ref
->proc
->wait
);
3583 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3584 "node %d now dead, refs %d, death %d\n",
3585 node
->debug_id
, refs
, death
);
3590 static void binder_deferred_release(struct binder_proc
*proc
)
3592 struct binder_transaction
*t
;
3593 struct binder_context
*context
= proc
->context
;
3595 int threads
, nodes
, incoming_refs
, outgoing_refs
, buffers
,
3596 active_transactions
, page_count
;
3599 BUG_ON(proc
->files
);
3601 hlist_del(&proc
->proc_node
);
3603 if (context
->binder_context_mgr_node
&&
3604 context
->binder_context_mgr_node
->proc
== proc
) {
3605 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3606 "%s: %d context_mgr_node gone\n",
3607 __func__
, proc
->pid
);
3608 context
->binder_context_mgr_node
= NULL
;
3612 active_transactions
= 0;
3613 while ((n
= rb_first(&proc
->threads
))) {
3614 struct binder_thread
*thread
;
3616 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
3618 active_transactions
+= binder_free_thread(proc
, thread
);
3623 while ((n
= rb_first(&proc
->nodes
))) {
3624 struct binder_node
*node
;
3626 node
= rb_entry(n
, struct binder_node
, rb_node
);
3628 rb_erase(&node
->rb_node
, &proc
->nodes
);
3629 incoming_refs
= binder_node_release(node
, incoming_refs
);
3633 while ((n
= rb_first(&proc
->refs_by_desc
))) {
3634 struct binder_ref
*ref
;
3636 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
3638 binder_delete_ref(ref
);
3641 binder_release_work(&proc
->todo
);
3642 binder_release_work(&proc
->delivered_death
);
3645 while ((n
= rb_first(&proc
->allocated_buffers
))) {
3646 struct binder_buffer
*buffer
;
3648 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
3650 t
= buffer
->transaction
;
3653 buffer
->transaction
= NULL
;
3654 pr_err("release proc %d, transaction %d, not freed\n",
3655 proc
->pid
, t
->debug_id
);
3659 binder_free_buf(proc
, buffer
);
3663 binder_stats_deleted(BINDER_STAT_PROC
);
3669 for (i
= 0; i
< proc
->buffer_size
/ PAGE_SIZE
; i
++) {
3672 if (!proc
->pages
[i
])
3675 page_addr
= proc
->buffer
+ i
* PAGE_SIZE
;
3676 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
3677 "%s: %d: page %d at %p not freed\n",
3678 __func__
, proc
->pid
, i
, page_addr
);
3679 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
3680 __free_page(proc
->pages
[i
]);
3684 vfree(proc
->buffer
);
3687 put_task_struct(proc
->tsk
);
3689 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3690 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3691 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
3692 outgoing_refs
, active_transactions
, buffers
, page_count
);
3697 static void binder_deferred_func(struct work_struct
*work
)
3699 struct binder_proc
*proc
;
3700 struct files_struct
*files
;
3705 binder_lock(__func__
);
3706 mutex_lock(&binder_deferred_lock
);
3707 if (!hlist_empty(&binder_deferred_list
)) {
3708 proc
= hlist_entry(binder_deferred_list
.first
,
3709 struct binder_proc
, deferred_work_node
);
3710 hlist_del_init(&proc
->deferred_work_node
);
3711 defer
= proc
->deferred_work
;
3712 proc
->deferred_work
= 0;
3717 mutex_unlock(&binder_deferred_lock
);
3720 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
3721 files
= proc
->files
;
3726 if (defer
& BINDER_DEFERRED_FLUSH
)
3727 binder_deferred_flush(proc
);
3729 if (defer
& BINDER_DEFERRED_RELEASE
)
3730 binder_deferred_release(proc
); /* frees proc */
3732 binder_unlock(__func__
);
3734 put_files_struct(files
);
3737 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
3740 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
3742 mutex_lock(&binder_deferred_lock
);
3743 proc
->deferred_work
|= defer
;
3744 if (hlist_unhashed(&proc
->deferred_work_node
)) {
3745 hlist_add_head(&proc
->deferred_work_node
,
3746 &binder_deferred_list
);
3747 schedule_work(&binder_deferred_work
);
3749 mutex_unlock(&binder_deferred_lock
);
3752 static void print_binder_transaction(struct seq_file
*m
, const char *prefix
,
3753 struct binder_transaction
*t
)
3756 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3757 prefix
, t
->debug_id
, t
,
3758 t
->from
? t
->from
->proc
->pid
: 0,
3759 t
->from
? t
->from
->pid
: 0,
3760 t
->to_proc
? t
->to_proc
->pid
: 0,
3761 t
->to_thread
? t
->to_thread
->pid
: 0,
3762 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
3763 if (t
->buffer
== NULL
) {
3764 seq_puts(m
, " buffer free\n");
3767 if (t
->buffer
->target_node
)
3768 seq_printf(m
, " node %d",
3769 t
->buffer
->target_node
->debug_id
);
3770 seq_printf(m
, " size %zd:%zd data %p\n",
3771 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
3775 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
3776 struct binder_buffer
*buffer
)
3778 seq_printf(m
, "%s %d: %p size %zd:%zd %s\n",
3779 prefix
, buffer
->debug_id
, buffer
->data
,
3780 buffer
->data_size
, buffer
->offsets_size
,
3781 buffer
->transaction
? "active" : "delivered");
3784 static void print_binder_work(struct seq_file
*m
, const char *prefix
,
3785 const char *transaction_prefix
,
3786 struct binder_work
*w
)
3788 struct binder_node
*node
;
3789 struct binder_transaction
*t
;
3792 case BINDER_WORK_TRANSACTION
:
3793 t
= container_of(w
, struct binder_transaction
, work
);
3794 print_binder_transaction(m
, transaction_prefix
, t
);
3796 case BINDER_WORK_TRANSACTION_COMPLETE
:
3797 seq_printf(m
, "%stransaction complete\n", prefix
);
3799 case BINDER_WORK_NODE
:
3800 node
= container_of(w
, struct binder_node
, work
);
3801 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
3802 prefix
, node
->debug_id
,
3803 (u64
)node
->ptr
, (u64
)node
->cookie
);
3805 case BINDER_WORK_DEAD_BINDER
:
3806 seq_printf(m
, "%shas dead binder\n", prefix
);
3808 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3809 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
3811 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
3812 seq_printf(m
, "%shas cleared death notification\n", prefix
);
3815 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
3820 static void print_binder_thread(struct seq_file
*m
,
3821 struct binder_thread
*thread
,
3824 struct binder_transaction
*t
;
3825 struct binder_work
*w
;
3826 size_t start_pos
= m
->count
;
3829 seq_printf(m
, " thread %d: l %02x\n", thread
->pid
, thread
->looper
);
3830 header_pos
= m
->count
;
3831 t
= thread
->transaction_stack
;
3833 if (t
->from
== thread
) {
3834 print_binder_transaction(m
,
3835 " outgoing transaction", t
);
3837 } else if (t
->to_thread
== thread
) {
3838 print_binder_transaction(m
,
3839 " incoming transaction", t
);
3842 print_binder_transaction(m
, " bad transaction", t
);
3846 list_for_each_entry(w
, &thread
->todo
, entry
) {
3847 print_binder_work(m
, " ", " pending transaction", w
);
3849 if (!print_always
&& m
->count
== header_pos
)
3850 m
->count
= start_pos
;
3853 static void print_binder_node(struct seq_file
*m
, struct binder_node
*node
)
3855 struct binder_ref
*ref
;
3856 struct binder_work
*w
;
3860 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
3863 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3864 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
3865 node
->has_strong_ref
, node
->has_weak_ref
,
3866 node
->local_strong_refs
, node
->local_weak_refs
,
3867 node
->internal_strong_refs
, count
);
3869 seq_puts(m
, " proc");
3870 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
3871 seq_printf(m
, " %d", ref
->proc
->pid
);
3874 list_for_each_entry(w
, &node
->async_todo
, entry
)
3875 print_binder_work(m
, " ",
3876 " pending async transaction", w
);
3879 static void print_binder_ref(struct seq_file
*m
, struct binder_ref
*ref
)
3881 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3882 ref
->debug_id
, ref
->desc
, ref
->node
->proc
? "" : "dead ",
3883 ref
->node
->debug_id
, ref
->strong
, ref
->weak
, ref
->death
);
3886 static void print_binder_proc(struct seq_file
*m
,
3887 struct binder_proc
*proc
, int print_all
)
3889 struct binder_work
*w
;
3891 size_t start_pos
= m
->count
;
3894 seq_printf(m
, "proc %d\n", proc
->pid
);
3895 seq_printf(m
, "context %s\n", proc
->context
->name
);
3896 header_pos
= m
->count
;
3898 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
3899 print_binder_thread(m
, rb_entry(n
, struct binder_thread
,
3900 rb_node
), print_all
);
3901 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
3902 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
3904 if (print_all
|| node
->has_async_transaction
)
3905 print_binder_node(m
, node
);
3908 for (n
= rb_first(&proc
->refs_by_desc
);
3911 print_binder_ref(m
, rb_entry(n
, struct binder_ref
,
3914 for (n
= rb_first(&proc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
3915 print_binder_buffer(m
, " buffer",
3916 rb_entry(n
, struct binder_buffer
, rb_node
));
3917 list_for_each_entry(w
, &proc
->todo
, entry
)
3918 print_binder_work(m
, " ", " pending transaction", w
);
3919 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
3920 seq_puts(m
, " has delivered dead binder\n");
3923 if (!print_all
&& m
->count
== header_pos
)
3924 m
->count
= start_pos
;
3927 static const char * const binder_return_strings
[] = {
3932 "BR_ACQUIRE_RESULT",
3934 "BR_TRANSACTION_COMPLETE",
3939 "BR_ATTEMPT_ACQUIRE",
3944 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3948 static const char * const binder_command_strings
[] = {
3951 "BC_ACQUIRE_RESULT",
3959 "BC_ATTEMPT_ACQUIRE",
3960 "BC_REGISTER_LOOPER",
3963 "BC_REQUEST_DEATH_NOTIFICATION",
3964 "BC_CLEAR_DEATH_NOTIFICATION",
3965 "BC_DEAD_BINDER_DONE",
3966 "BC_TRANSACTION_SG",
3970 static const char * const binder_objstat_strings
[] = {
3977 "transaction_complete"
3980 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
3981 struct binder_stats
*stats
)
3985 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
3986 ARRAY_SIZE(binder_command_strings
));
3987 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
3989 seq_printf(m
, "%s%s: %d\n", prefix
,
3990 binder_command_strings
[i
], stats
->bc
[i
]);
3993 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
3994 ARRAY_SIZE(binder_return_strings
));
3995 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
3997 seq_printf(m
, "%s%s: %d\n", prefix
,
3998 binder_return_strings
[i
], stats
->br
[i
]);
4001 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4002 ARRAY_SIZE(binder_objstat_strings
));
4003 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4004 ARRAY_SIZE(stats
->obj_deleted
));
4005 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
4006 if (stats
->obj_created
[i
] || stats
->obj_deleted
[i
])
4007 seq_printf(m
, "%s%s: active %d total %d\n", prefix
,
4008 binder_objstat_strings
[i
],
4009 stats
->obj_created
[i
] - stats
->obj_deleted
[i
],
4010 stats
->obj_created
[i
]);
4014 static void print_binder_proc_stats(struct seq_file
*m
,
4015 struct binder_proc
*proc
)
4017 struct binder_work
*w
;
4019 int count
, strong
, weak
;
4021 seq_printf(m
, "proc %d\n", proc
->pid
);
4022 seq_printf(m
, "context %s\n", proc
->context
->name
);
4024 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
4026 seq_printf(m
, " threads: %d\n", count
);
4027 seq_printf(m
, " requested threads: %d+%d/%d\n"
4028 " ready threads %d\n"
4029 " free async space %zd\n", proc
->requested_threads
,
4030 proc
->requested_threads_started
, proc
->max_threads
,
4031 proc
->ready_threads
, proc
->free_async_space
);
4033 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
4035 seq_printf(m
, " nodes: %d\n", count
);
4039 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
4040 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
4043 strong
+= ref
->strong
;
4046 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
4049 for (n
= rb_first(&proc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
4051 seq_printf(m
, " buffers: %d\n", count
);
4054 list_for_each_entry(w
, &proc
->todo
, entry
) {
4056 case BINDER_WORK_TRANSACTION
:
4063 seq_printf(m
, " pending transactions: %d\n", count
);
4065 print_binder_stats(m
, " ", &proc
->stats
);
4069 static int binder_state_show(struct seq_file
*m
, void *unused
)
4071 struct binder_proc
*proc
;
4072 struct binder_node
*node
;
4073 int do_lock
= !binder_debug_no_lock
;
4076 binder_lock(__func__
);
4078 seq_puts(m
, "binder state:\n");
4080 if (!hlist_empty(&binder_dead_nodes
))
4081 seq_puts(m
, "dead nodes:\n");
4082 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
)
4083 print_binder_node(m
, node
);
4085 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4086 print_binder_proc(m
, proc
, 1);
4088 binder_unlock(__func__
);
4092 static int binder_stats_show(struct seq_file
*m
, void *unused
)
4094 struct binder_proc
*proc
;
4095 int do_lock
= !binder_debug_no_lock
;
4098 binder_lock(__func__
);
4100 seq_puts(m
, "binder stats:\n");
4102 print_binder_stats(m
, "", &binder_stats
);
4104 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4105 print_binder_proc_stats(m
, proc
);
4107 binder_unlock(__func__
);
4111 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
4113 struct binder_proc
*proc
;
4114 int do_lock
= !binder_debug_no_lock
;
4117 binder_lock(__func__
);
4119 seq_puts(m
, "binder transactions:\n");
4120 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4121 print_binder_proc(m
, proc
, 0);
4123 binder_unlock(__func__
);
4127 static int binder_proc_show(struct seq_file
*m
, void *unused
)
4129 struct binder_proc
*itr
;
4130 int pid
= (unsigned long)m
->private;
4131 int do_lock
= !binder_debug_no_lock
;
4134 binder_lock(__func__
);
4136 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
4137 if (itr
->pid
== pid
) {
4138 seq_puts(m
, "binder proc state:\n");
4139 print_binder_proc(m
, itr
, 1);
4143 binder_unlock(__func__
);
4147 static void print_binder_transaction_log_entry(struct seq_file
*m
,
4148 struct binder_transaction_log_entry
*e
)
4151 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
4152 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
4153 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
4154 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
4155 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
);
4158 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
4160 struct binder_transaction_log
*log
= m
->private;
4164 for (i
= log
->next
; i
< ARRAY_SIZE(log
->entry
); i
++)
4165 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
4167 for (i
= 0; i
< log
->next
; i
++)
4168 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
4172 static const struct file_operations binder_fops
= {
4173 .owner
= THIS_MODULE
,
4174 .poll
= binder_poll
,
4175 .unlocked_ioctl
= binder_ioctl
,
4176 .compat_ioctl
= binder_ioctl
,
4177 .mmap
= binder_mmap
,
4178 .open
= binder_open
,
4179 .flush
= binder_flush
,
4180 .release
= binder_release
,
4183 BINDER_DEBUG_ENTRY(state
);
4184 BINDER_DEBUG_ENTRY(stats
);
4185 BINDER_DEBUG_ENTRY(transactions
);
4186 BINDER_DEBUG_ENTRY(transaction_log
);
4188 static int __init
init_binder_device(const char *name
)
4191 struct binder_device
*binder_device
;
4193 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
4197 binder_device
->miscdev
.fops
= &binder_fops
;
4198 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
4199 binder_device
->miscdev
.name
= name
;
4201 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
4202 binder_device
->context
.name
= name
;
4204 ret
= misc_register(&binder_device
->miscdev
);
4206 kfree(binder_device
);
4210 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
4215 static int __init
binder_init(void)
4218 char *device_name
, *device_names
;
4219 struct binder_device
*device
;
4220 struct hlist_node
*tmp
;
4222 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
4223 if (binder_debugfs_dir_entry_root
)
4224 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
4225 binder_debugfs_dir_entry_root
);
4227 if (binder_debugfs_dir_entry_root
) {
4228 debugfs_create_file("state",
4230 binder_debugfs_dir_entry_root
,
4232 &binder_state_fops
);
4233 debugfs_create_file("stats",
4235 binder_debugfs_dir_entry_root
,
4237 &binder_stats_fops
);
4238 debugfs_create_file("transactions",
4240 binder_debugfs_dir_entry_root
,
4242 &binder_transactions_fops
);
4243 debugfs_create_file("transaction_log",
4245 binder_debugfs_dir_entry_root
,
4246 &binder_transaction_log
,
4247 &binder_transaction_log_fops
);
4248 debugfs_create_file("failed_transaction_log",
4250 binder_debugfs_dir_entry_root
,
4251 &binder_transaction_log_failed
,
4252 &binder_transaction_log_fops
);
4256 * Copy the module_parameter string, because we don't want to
4257 * tokenize it in-place.
4259 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
4260 if (!device_names
) {
4262 goto err_alloc_device_names_failed
;
4264 strcpy(device_names
, binder_devices_param
);
4266 while ((device_name
= strsep(&device_names
, ","))) {
4267 ret
= init_binder_device(device_name
);
4269 goto err_init_binder_device_failed
;
4274 err_init_binder_device_failed
:
4275 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
4276 misc_deregister(&device
->miscdev
);
4277 hlist_del(&device
->hlist
);
4280 err_alloc_device_names_failed
:
4281 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
4286 device_initcall(binder_init
);
4288 #define CREATE_TRACE_POINTS
4289 #include "binder_trace.h"
4291 MODULE_LICENSE("GPL v2");