3 * Android IPC Subsystem
5 * Copyright (C) 2007-2017 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/list.h>
22 #include <linux/sched/mm.h>
23 #include <linux/module.h>
24 #include <linux/rtmutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/seq_file.h>
27 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/list_lru.h>
31 #include "binder_alloc.h"
32 #include "binder_trace.h"
34 struct list_lru binder_alloc_lru
;
36 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
39 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
40 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
41 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
43 static uint32_t binder_alloc_debug_mask
;
45 module_param_named(debug_mask
, binder_alloc_debug_mask
,
48 #define binder_alloc_debug(mask, x...) \
50 if (binder_alloc_debug_mask & mask) \
54 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
56 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
59 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
61 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
64 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
65 struct binder_buffer
*buffer
)
67 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
68 return (u8
*)alloc
->buffer
+
69 alloc
->buffer_size
- (u8
*)buffer
->data
;
70 return (u8
*)binder_buffer_next(buffer
)->data
- (u8
*)buffer
->data
;
73 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
74 struct binder_buffer
*new_buffer
)
76 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
77 struct rb_node
*parent
= NULL
;
78 struct binder_buffer
*buffer
;
80 size_t new_buffer_size
;
82 BUG_ON(!new_buffer
->free
);
84 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
86 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
87 "%d: add free buffer, size %zd, at %pK\n",
88 alloc
->pid
, new_buffer_size
, new_buffer
);
92 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
93 BUG_ON(!buffer
->free
);
95 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
97 if (new_buffer_size
< buffer_size
)
100 p
= &parent
->rb_right
;
102 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
103 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
106 static void binder_insert_allocated_buffer_locked(
107 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
109 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
110 struct rb_node
*parent
= NULL
;
111 struct binder_buffer
*buffer
;
113 BUG_ON(new_buffer
->free
);
117 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
118 BUG_ON(buffer
->free
);
120 if (new_buffer
->data
< buffer
->data
)
121 p
= &parent
->rb_left
;
122 else if (new_buffer
->data
> buffer
->data
)
123 p
= &parent
->rb_right
;
127 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
128 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
131 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
132 struct binder_alloc
*alloc
,
135 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
136 struct binder_buffer
*buffer
;
139 kern_ptr
= (void *)(user_ptr
- alloc
->user_buffer_offset
);
142 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
143 BUG_ON(buffer
->free
);
145 if (kern_ptr
< buffer
->data
)
147 else if (kern_ptr
> buffer
->data
)
151 * Guard against user threads attempting to
152 * free the buffer twice
154 if (buffer
->free_in_progress
) {
155 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
156 alloc
->pid
, current
->pid
, (u64
)user_ptr
);
159 buffer
->free_in_progress
= 1;
167 * binder_alloc_buffer_lookup() - get buffer given user ptr
168 * @alloc: binder_alloc for this proc
169 * @user_ptr: User pointer to buffer data
171 * Validate userspace pointer to buffer data and return buffer corresponding to
172 * that user pointer. Search the rb tree for buffer that matches user data
175 * Return: Pointer to buffer or NULL
177 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
180 struct binder_buffer
*buffer
;
182 mutex_lock(&alloc
->mutex
);
183 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
184 mutex_unlock(&alloc
->mutex
);
188 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
189 void *start
, void *end
)
192 unsigned long user_page_addr
;
193 struct binder_lru_page
*page
;
194 struct vm_area_struct
*vma
= NULL
;
195 struct mm_struct
*mm
= NULL
;
196 bool need_mm
= false;
198 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
199 "%d: %s pages %pK-%pK\n", alloc
->pid
,
200 allocate
? "allocate" : "free", start
, end
);
205 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
210 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
211 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
212 if (!page
->page_ptr
) {
218 if (need_mm
&& mmget_not_zero(alloc
->vma_vm_mm
))
219 mm
= alloc
->vma_vm_mm
;
222 down_write(&mm
->mmap_sem
);
226 if (!vma
&& need_mm
) {
227 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
232 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
237 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
238 page
= &alloc
->pages
[index
];
240 if (page
->page_ptr
) {
241 trace_binder_alloc_lru_start(alloc
, index
);
243 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
246 trace_binder_alloc_lru_end(alloc
, index
);
251 goto err_page_ptr_cleared
;
253 trace_binder_alloc_page_start(alloc
, index
);
254 page
->page_ptr
= alloc_page(GFP_KERNEL
|
257 if (!page
->page_ptr
) {
258 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
259 alloc
->pid
, page_addr
);
260 goto err_alloc_page_failed
;
263 INIT_LIST_HEAD(&page
->lru
);
265 ret
= map_kernel_range_noflush((unsigned long)page_addr
,
266 PAGE_SIZE
, PAGE_KERNEL
,
268 flush_cache_vmap((unsigned long)page_addr
,
269 (unsigned long)page_addr
+ PAGE_SIZE
);
271 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
272 alloc
->pid
, page_addr
);
273 goto err_map_kernel_failed
;
276 (uintptr_t)page_addr
+ alloc
->user_buffer_offset
;
277 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
279 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
280 alloc
->pid
, user_page_addr
);
281 goto err_vm_insert_page_failed
;
284 trace_binder_alloc_page_end(alloc
, index
);
285 /* vm_insert_page does not seem to increment the refcount */
288 up_write(&mm
->mmap_sem
);
294 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
295 page_addr
-= PAGE_SIZE
) {
299 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
300 page
= &alloc
->pages
[index
];
302 trace_binder_free_lru_start(alloc
, index
);
304 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
307 trace_binder_free_lru_end(alloc
, index
);
310 err_vm_insert_page_failed
:
311 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
312 err_map_kernel_failed
:
313 __free_page(page
->page_ptr
);
314 page
->page_ptr
= NULL
;
315 err_alloc_page_failed
:
316 err_page_ptr_cleared
:
321 up_write(&mm
->mmap_sem
);
324 return vma
? -ENOMEM
: -ESRCH
;
327 struct binder_buffer
*binder_alloc_new_buf_locked(struct binder_alloc
*alloc
,
330 size_t extra_buffers_size
,
333 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
334 struct binder_buffer
*buffer
;
336 struct rb_node
*best_fit
= NULL
;
339 size_t size
, data_offsets_size
;
342 if (alloc
->vma
== NULL
) {
343 pr_err("%d: binder_alloc_buf, no vma\n",
345 return ERR_PTR(-ESRCH
);
348 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
349 ALIGN(offsets_size
, sizeof(void *));
351 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
352 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
353 "%d: got transaction with invalid size %zd-%zd\n",
354 alloc
->pid
, data_size
, offsets_size
);
355 return ERR_PTR(-EINVAL
);
357 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
358 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
359 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
360 "%d: got transaction with invalid extra_buffers_size %zd\n",
361 alloc
->pid
, extra_buffers_size
);
362 return ERR_PTR(-EINVAL
);
365 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
366 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
367 "%d: binder_alloc_buf size %zd failed, no async space left\n",
369 return ERR_PTR(-ENOSPC
);
372 /* Pad 0-size buffers so they get assigned unique addresses */
373 size
= max(size
, sizeof(void *));
376 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
377 BUG_ON(!buffer
->free
);
378 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
380 if (size
< buffer_size
) {
383 } else if (size
> buffer_size
)
390 if (best_fit
== NULL
) {
391 size_t allocated_buffers
= 0;
392 size_t largest_alloc_size
= 0;
393 size_t total_alloc_size
= 0;
394 size_t free_buffers
= 0;
395 size_t largest_free_size
= 0;
396 size_t total_free_size
= 0;
398 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
400 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
401 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
403 total_alloc_size
+= buffer_size
;
404 if (buffer_size
> largest_alloc_size
)
405 largest_alloc_size
= buffer_size
;
407 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
409 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
410 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
412 total_free_size
+= buffer_size
;
413 if (buffer_size
> largest_free_size
)
414 largest_free_size
= buffer_size
;
416 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
418 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
419 total_alloc_size
, allocated_buffers
, largest_alloc_size
,
420 total_free_size
, free_buffers
, largest_free_size
);
421 return ERR_PTR(-ENOSPC
);
424 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
425 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
428 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
429 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
430 alloc
->pid
, size
, buffer
, buffer_size
);
433 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
434 WARN_ON(n
&& buffer_size
!= size
);
436 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ size
);
437 if (end_page_addr
> has_page_addr
)
438 end_page_addr
= has_page_addr
;
439 ret
= binder_update_page_range(alloc
, 1,
440 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
);
444 if (buffer_size
!= size
) {
445 struct binder_buffer
*new_buffer
;
447 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
449 pr_err("%s: %d failed to alloc new buffer struct\n",
450 __func__
, alloc
->pid
);
451 goto err_alloc_buf_struct_failed
;
453 new_buffer
->data
= (u8
*)buffer
->data
+ size
;
454 list_add(&new_buffer
->entry
, &buffer
->entry
);
455 new_buffer
->free
= 1;
456 binder_insert_free_buffer(alloc
, new_buffer
);
459 rb_erase(best_fit
, &alloc
->free_buffers
);
461 buffer
->free_in_progress
= 0;
462 binder_insert_allocated_buffer_locked(alloc
, buffer
);
463 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
464 "%d: binder_alloc_buf size %zd got %pK\n",
465 alloc
->pid
, size
, buffer
);
466 buffer
->data_size
= data_size
;
467 buffer
->offsets_size
= offsets_size
;
468 buffer
->async_transaction
= is_async
;
469 buffer
->extra_buffers_size
= extra_buffers_size
;
471 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
472 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
473 "%d: binder_alloc_buf size %zd async free %zd\n",
474 alloc
->pid
, size
, alloc
->free_async_space
);
478 err_alloc_buf_struct_failed
:
479 binder_update_page_range(alloc
, 0,
480 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
482 return ERR_PTR(-ENOMEM
);
486 * binder_alloc_new_buf() - Allocate a new binder buffer
487 * @alloc: binder_alloc for this proc
488 * @data_size: size of user data buffer
489 * @offsets_size: user specified buffer offset
490 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
491 * @is_async: buffer for async transaction
493 * Allocate a new buffer given the requested sizes. Returns
494 * the kernel version of the buffer pointer. The size allocated
495 * is the sum of the three given sizes (each rounded up to
496 * pointer-sized boundary)
498 * Return: The allocated buffer or %NULL if error
500 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
503 size_t extra_buffers_size
,
506 struct binder_buffer
*buffer
;
508 mutex_lock(&alloc
->mutex
);
509 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
510 extra_buffers_size
, is_async
);
511 mutex_unlock(&alloc
->mutex
);
515 static void *buffer_start_page(struct binder_buffer
*buffer
)
517 return (void *)((uintptr_t)buffer
->data
& PAGE_MASK
);
520 static void *prev_buffer_end_page(struct binder_buffer
*buffer
)
522 return (void *)(((uintptr_t)(buffer
->data
) - 1) & PAGE_MASK
);
525 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
526 struct binder_buffer
*buffer
)
528 struct binder_buffer
*prev
, *next
= NULL
;
530 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
531 prev
= binder_buffer_prev(buffer
);
533 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
535 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
536 "%d: merge free, buffer %pK share page with %pK\n",
537 alloc
->pid
, buffer
->data
, prev
->data
);
540 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
541 next
= binder_buffer_next(buffer
);
542 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
544 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
545 "%d: merge free, buffer %pK share page with %pK\n",
552 if (PAGE_ALIGNED(buffer
->data
)) {
553 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
554 "%d: merge free, buffer start %pK is page aligned\n",
555 alloc
->pid
, buffer
->data
);
560 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
561 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
562 alloc
->pid
, buffer
->data
,
563 prev
->data
, next
? next
->data
: NULL
);
564 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
565 buffer_start_page(buffer
) + PAGE_SIZE
);
567 list_del(&buffer
->entry
);
571 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
572 struct binder_buffer
*buffer
)
574 size_t size
, buffer_size
;
576 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
578 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
579 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
580 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
582 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
583 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
584 alloc
->pid
, buffer
, size
, buffer_size
);
586 BUG_ON(buffer
->free
);
587 BUG_ON(size
> buffer_size
);
588 BUG_ON(buffer
->transaction
!= NULL
);
589 BUG_ON(buffer
->data
< alloc
->buffer
);
590 BUG_ON(buffer
->data
> alloc
->buffer
+ alloc
->buffer_size
);
592 if (buffer
->async_transaction
) {
593 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
595 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
596 "%d: binder_free_buf size %zd async free %zd\n",
597 alloc
->pid
, size
, alloc
->free_async_space
);
600 binder_update_page_range(alloc
, 0,
601 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
602 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
));
604 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
606 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
607 struct binder_buffer
*next
= binder_buffer_next(buffer
);
610 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
611 binder_delete_free_buffer(alloc
, next
);
614 if (alloc
->buffers
.next
!= &buffer
->entry
) {
615 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
618 binder_delete_free_buffer(alloc
, buffer
);
619 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
623 binder_insert_free_buffer(alloc
, buffer
);
627 * binder_alloc_free_buf() - free a binder buffer
628 * @alloc: binder_alloc for this proc
629 * @buffer: kernel pointer to buffer
631 * Free the buffer allocated via binder_alloc_new_buffer()
633 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
634 struct binder_buffer
*buffer
)
636 mutex_lock(&alloc
->mutex
);
637 binder_free_buf_locked(alloc
, buffer
);
638 mutex_unlock(&alloc
->mutex
);
642 * binder_alloc_mmap_handler() - map virtual address space for proc
643 * @alloc: alloc structure for this proc
644 * @vma: vma passed to mmap()
646 * Called by binder_mmap() to initialize the space specified in
647 * vma for allocating binder buffers
651 * -EBUSY = address space already mapped
652 * -ENOMEM = failed to map memory to given address space
654 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
655 struct vm_area_struct
*vma
)
658 struct vm_struct
*area
;
659 const char *failure_string
;
660 struct binder_buffer
*buffer
;
662 mutex_lock(&binder_alloc_mmap_lock
);
665 failure_string
= "already mapped";
666 goto err_already_mapped
;
669 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_ALLOC
);
672 failure_string
= "get_vm_area";
673 goto err_get_vm_area_failed
;
675 alloc
->buffer
= area
->addr
;
676 alloc
->user_buffer_offset
=
677 vma
->vm_start
- (uintptr_t)alloc
->buffer
;
678 mutex_unlock(&binder_alloc_mmap_lock
);
680 #ifdef CONFIG_CPU_CACHE_VIPT
681 if (cache_is_vipt_aliasing()) {
683 (vma
->vm_start
^ (uint32_t)alloc
->buffer
))) {
684 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
685 __func__
, alloc
->pid
, vma
->vm_start
,
686 vma
->vm_end
, alloc
->buffer
);
687 vma
->vm_start
+= PAGE_SIZE
;
691 alloc
->pages
= kzalloc(sizeof(alloc
->pages
[0]) *
692 ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
),
694 if (alloc
->pages
== NULL
) {
696 failure_string
= "alloc page array";
697 goto err_alloc_pages_failed
;
699 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
701 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
704 failure_string
= "alloc buffer struct";
705 goto err_alloc_buf_struct_failed
;
708 buffer
->data
= alloc
->buffer
;
709 list_add(&buffer
->entry
, &alloc
->buffers
);
711 binder_insert_free_buffer(alloc
, buffer
);
712 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
715 alloc
->vma_vm_mm
= vma
->vm_mm
;
716 mmgrab(alloc
->vma_vm_mm
);
720 err_alloc_buf_struct_failed
:
723 err_alloc_pages_failed
:
724 mutex_lock(&binder_alloc_mmap_lock
);
725 vfree(alloc
->buffer
);
726 alloc
->buffer
= NULL
;
727 err_get_vm_area_failed
:
729 mutex_unlock(&binder_alloc_mmap_lock
);
730 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
731 alloc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
736 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
739 int buffers
, page_count
;
740 struct binder_buffer
*buffer
;
745 mutex_lock(&alloc
->mutex
);
746 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
747 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
749 /* Transaction should already have been freed */
750 BUG_ON(buffer
->transaction
);
752 binder_free_buf_locked(alloc
, buffer
);
756 while (!list_empty(&alloc
->buffers
)) {
757 buffer
= list_first_entry(&alloc
->buffers
,
758 struct binder_buffer
, entry
);
759 WARN_ON(!buffer
->free
);
761 list_del(&buffer
->entry
);
762 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
770 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
774 if (!alloc
->pages
[i
].page_ptr
)
777 on_lru
= list_lru_del(&binder_alloc_lru
,
778 &alloc
->pages
[i
].lru
);
779 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
780 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
781 "%s: %d: page %d at %pK %s\n",
782 __func__
, alloc
->pid
, i
, page_addr
,
783 on_lru
? "on lru" : "active");
784 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
785 __free_page(alloc
->pages
[i
].page_ptr
);
789 vfree(alloc
->buffer
);
791 mutex_unlock(&alloc
->mutex
);
792 if (alloc
->vma_vm_mm
)
793 mmdrop(alloc
->vma_vm_mm
);
795 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
796 "%s: %d buffers %d, pages %d\n",
797 __func__
, alloc
->pid
, buffers
, page_count
);
800 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
801 struct binder_buffer
*buffer
)
803 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
804 prefix
, buffer
->debug_id
, buffer
->data
,
805 buffer
->data_size
, buffer
->offsets_size
,
806 buffer
->extra_buffers_size
,
807 buffer
->transaction
? "active" : "delivered");
811 * binder_alloc_print_allocated() - print buffer info
812 * @m: seq_file for output via seq_printf()
813 * @alloc: binder_alloc for this proc
815 * Prints information about every buffer associated with
816 * the binder_alloc state to the given seq_file
818 void binder_alloc_print_allocated(struct seq_file
*m
,
819 struct binder_alloc
*alloc
)
823 mutex_lock(&alloc
->mutex
);
824 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
825 print_binder_buffer(m
, " buffer",
826 rb_entry(n
, struct binder_buffer
, rb_node
));
827 mutex_unlock(&alloc
->mutex
);
831 * binder_alloc_print_pages() - print page usage
832 * @m: seq_file for output via seq_printf()
833 * @alloc: binder_alloc for this proc
835 void binder_alloc_print_pages(struct seq_file
*m
,
836 struct binder_alloc
*alloc
)
838 struct binder_lru_page
*page
;
844 mutex_lock(&alloc
->mutex
);
845 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
846 page
= &alloc
->pages
[i
];
849 else if (list_empty(&page
->lru
))
854 mutex_unlock(&alloc
->mutex
);
855 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
859 * binder_alloc_get_allocated_count() - return count of buffers
860 * @alloc: binder_alloc for this proc
862 * Return: count of allocated buffers
864 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
869 mutex_lock(&alloc
->mutex
);
870 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
872 mutex_unlock(&alloc
->mutex
);
878 * binder_alloc_vma_close() - invalidate address space
879 * @alloc: binder_alloc for this proc
881 * Called from binder_vma_close() when releasing address space.
882 * Clears alloc->vma to prevent new incoming transactions from
883 * allocating more buffers.
885 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
887 WRITE_ONCE(alloc
->vma
, NULL
);
891 * binder_alloc_free_page() - shrinker callback to free pages
892 * @item: item to free
893 * @lock: lock protecting the item
894 * @cb_arg: callback argument
896 * Called from list_lru_walk() in binder_shrink_scan() to free
897 * up pages when the system is under memory pressure.
899 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
900 struct list_lru_one
*lru
,
904 struct mm_struct
*mm
= NULL
;
905 struct binder_lru_page
*page
= container_of(item
,
906 struct binder_lru_page
,
908 struct binder_alloc
*alloc
;
911 struct vm_area_struct
*vma
;
914 if (!mutex_trylock(&alloc
->mutex
))
915 goto err_get_alloc_mutex_failed
;
918 goto err_page_already_freed
;
920 index
= page
- alloc
->pages
;
921 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
924 if (!mmget_not_zero(alloc
->vma_vm_mm
))
926 mm
= alloc
->vma_vm_mm
;
927 if (!down_write_trylock(&mm
->mmap_sem
))
928 goto err_down_write_mmap_sem_failed
;
931 list_lru_isolate(lru
, item
);
935 trace_binder_unmap_user_start(alloc
, index
);
938 page_addr
+ alloc
->user_buffer_offset
,
941 trace_binder_unmap_user_end(alloc
, index
);
943 up_write(&mm
->mmap_sem
);
947 trace_binder_unmap_kernel_start(alloc
, index
);
949 unmap_kernel_range(page_addr
, PAGE_SIZE
);
950 __free_page(page
->page_ptr
);
951 page
->page_ptr
= NULL
;
953 trace_binder_unmap_kernel_end(alloc
, index
);
956 mutex_unlock(&alloc
->mutex
);
957 return LRU_REMOVED_RETRY
;
959 err_down_write_mmap_sem_failed
:
962 err_page_already_freed
:
963 mutex_unlock(&alloc
->mutex
);
964 err_get_alloc_mutex_failed
:
969 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
971 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
976 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
980 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
981 NULL
, sc
->nr_to_scan
);
985 static struct shrinker binder_shrinker
= {
986 .count_objects
= binder_shrink_count
,
987 .scan_objects
= binder_shrink_scan
,
988 .seeks
= DEFAULT_SEEKS
,
992 * binder_alloc_init() - called by binder_open() for per-proc initialization
993 * @alloc: binder_alloc for this proc
995 * Called from binder_open() to initialize binder_alloc fields for
998 void binder_alloc_init(struct binder_alloc
*alloc
)
1000 alloc
->pid
= current
->group_leader
->pid
;
1001 mutex_init(&alloc
->mutex
);
1002 INIT_LIST_HEAD(&alloc
->buffers
);
1005 void binder_alloc_shrinker_init(void)
1007 list_lru_init(&binder_alloc_lru
);
1008 register_shrinker(&binder_shrinker
);