]>
Commit | Line | Data |
---|---|---|
1 | /* binder_alloc.c | |
2 | * | |
3 | * Android IPC Subsystem | |
4 | * | |
5 | * Copyright (C) 2007-2017 Google, Inc. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
19 | ||
20 | #include <asm/cacheflush.h> | |
21 | #include <linux/list.h> | |
22 | #include <linux/sched/mm.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/rtmutex.h> | |
25 | #include <linux/rbtree.h> | |
26 | #include <linux/seq_file.h> | |
27 | #include <linux/vmalloc.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/list_lru.h> | |
31 | #include "binder_alloc.h" | |
32 | #include "binder_trace.h" | |
33 | ||
34 | struct list_lru binder_alloc_lru; | |
35 | ||
36 | static DEFINE_MUTEX(binder_alloc_mmap_lock); | |
37 | ||
38 | enum { | |
39 | BINDER_DEBUG_OPEN_CLOSE = 1U << 1, | |
40 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, | |
41 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, | |
42 | }; | |
43 | static uint32_t binder_alloc_debug_mask; | |
44 | ||
45 | module_param_named(debug_mask, binder_alloc_debug_mask, | |
46 | uint, 0644); | |
47 | ||
48 | #define binder_alloc_debug(mask, x...) \ | |
49 | do { \ | |
50 | if (binder_alloc_debug_mask & mask) \ | |
51 | pr_info(x); \ | |
52 | } while (0) | |
53 | ||
54 | static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) | |
55 | { | |
56 | return list_entry(buffer->entry.next, struct binder_buffer, entry); | |
57 | } | |
58 | ||
59 | static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) | |
60 | { | |
61 | return list_entry(buffer->entry.prev, struct binder_buffer, entry); | |
62 | } | |
63 | ||
64 | static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, | |
65 | struct binder_buffer *buffer) | |
66 | { | |
67 | if (list_is_last(&buffer->entry, &alloc->buffers)) | |
68 | return (u8 *)alloc->buffer + | |
69 | alloc->buffer_size - (u8 *)buffer->data; | |
70 | return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; | |
71 | } | |
72 | ||
73 | static void binder_insert_free_buffer(struct binder_alloc *alloc, | |
74 | struct binder_buffer *new_buffer) | |
75 | { | |
76 | struct rb_node **p = &alloc->free_buffers.rb_node; | |
77 | struct rb_node *parent = NULL; | |
78 | struct binder_buffer *buffer; | |
79 | size_t buffer_size; | |
80 | size_t new_buffer_size; | |
81 | ||
82 | BUG_ON(!new_buffer->free); | |
83 | ||
84 | new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); | |
85 | ||
86 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
87 | "%d: add free buffer, size %zd, at %pK\n", | |
88 | alloc->pid, new_buffer_size, new_buffer); | |
89 | ||
90 | while (*p) { | |
91 | parent = *p; | |
92 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
93 | BUG_ON(!buffer->free); | |
94 | ||
95 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
96 | ||
97 | if (new_buffer_size < buffer_size) | |
98 | p = &parent->rb_left; | |
99 | else | |
100 | p = &parent->rb_right; | |
101 | } | |
102 | rb_link_node(&new_buffer->rb_node, parent, p); | |
103 | rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); | |
104 | } | |
105 | ||
106 | static void binder_insert_allocated_buffer_locked( | |
107 | struct binder_alloc *alloc, struct binder_buffer *new_buffer) | |
108 | { | |
109 | struct rb_node **p = &alloc->allocated_buffers.rb_node; | |
110 | struct rb_node *parent = NULL; | |
111 | struct binder_buffer *buffer; | |
112 | ||
113 | BUG_ON(new_buffer->free); | |
114 | ||
115 | while (*p) { | |
116 | parent = *p; | |
117 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
118 | BUG_ON(buffer->free); | |
119 | ||
120 | if (new_buffer->data < buffer->data) | |
121 | p = &parent->rb_left; | |
122 | else if (new_buffer->data > buffer->data) | |
123 | p = &parent->rb_right; | |
124 | else | |
125 | BUG(); | |
126 | } | |
127 | rb_link_node(&new_buffer->rb_node, parent, p); | |
128 | rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); | |
129 | } | |
130 | ||
131 | static struct binder_buffer *binder_alloc_prepare_to_free_locked( | |
132 | struct binder_alloc *alloc, | |
133 | uintptr_t user_ptr) | |
134 | { | |
135 | struct rb_node *n = alloc->allocated_buffers.rb_node; | |
136 | struct binder_buffer *buffer; | |
137 | void *kern_ptr; | |
138 | ||
139 | kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); | |
140 | ||
141 | while (n) { | |
142 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
143 | BUG_ON(buffer->free); | |
144 | ||
145 | if (kern_ptr < buffer->data) | |
146 | n = n->rb_left; | |
147 | else if (kern_ptr > buffer->data) | |
148 | n = n->rb_right; | |
149 | else { | |
150 | /* | |
151 | * Guard against user threads attempting to | |
152 | * free the buffer twice | |
153 | */ | |
154 | if (buffer->free_in_progress) { | |
155 | pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", | |
156 | alloc->pid, current->pid, (u64)user_ptr); | |
157 | return NULL; | |
158 | } | |
159 | buffer->free_in_progress = 1; | |
160 | return buffer; | |
161 | } | |
162 | } | |
163 | return NULL; | |
164 | } | |
165 | ||
166 | /** | |
167 | * binder_alloc_buffer_lookup() - get buffer given user ptr | |
168 | * @alloc: binder_alloc for this proc | |
169 | * @user_ptr: User pointer to buffer data | |
170 | * | |
171 | * Validate userspace pointer to buffer data and return buffer corresponding to | |
172 | * that user pointer. Search the rb tree for buffer that matches user data | |
173 | * pointer. | |
174 | * | |
175 | * Return: Pointer to buffer or NULL | |
176 | */ | |
177 | struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, | |
178 | uintptr_t user_ptr) | |
179 | { | |
180 | struct binder_buffer *buffer; | |
181 | ||
182 | mutex_lock(&alloc->mutex); | |
183 | buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); | |
184 | mutex_unlock(&alloc->mutex); | |
185 | return buffer; | |
186 | } | |
187 | ||
188 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |
189 | void *start, void *end) | |
190 | { | |
191 | void *page_addr; | |
192 | unsigned long user_page_addr; | |
193 | struct binder_lru_page *page; | |
194 | struct vm_area_struct *vma = NULL; | |
195 | struct mm_struct *mm = NULL; | |
196 | bool need_mm = false; | |
197 | ||
198 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
199 | "%d: %s pages %pK-%pK\n", alloc->pid, | |
200 | allocate ? "allocate" : "free", start, end); | |
201 | ||
202 | if (end <= start) | |
203 | return 0; | |
204 | ||
205 | trace_binder_update_page_range(alloc, allocate, start, end); | |
206 | ||
207 | if (allocate == 0) | |
208 | goto free_range; | |
209 | ||
210 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
211 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; | |
212 | if (!page->page_ptr) { | |
213 | need_mm = true; | |
214 | break; | |
215 | } | |
216 | } | |
217 | ||
218 | if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) | |
219 | mm = alloc->vma_vm_mm; | |
220 | ||
221 | if (mm) { | |
222 | down_write(&mm->mmap_sem); | |
223 | vma = alloc->vma; | |
224 | } | |
225 | ||
226 | if (!vma && need_mm) { | |
227 | pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", | |
228 | alloc->pid); | |
229 | goto err_no_vma; | |
230 | } | |
231 | ||
232 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
233 | int ret; | |
234 | bool on_lru; | |
235 | size_t index; | |
236 | ||
237 | index = (page_addr - alloc->buffer) / PAGE_SIZE; | |
238 | page = &alloc->pages[index]; | |
239 | ||
240 | if (page->page_ptr) { | |
241 | trace_binder_alloc_lru_start(alloc, index); | |
242 | ||
243 | on_lru = list_lru_del(&binder_alloc_lru, &page->lru); | |
244 | WARN_ON(!on_lru); | |
245 | ||
246 | trace_binder_alloc_lru_end(alloc, index); | |
247 | continue; | |
248 | } | |
249 | ||
250 | if (WARN_ON(!vma)) | |
251 | goto err_page_ptr_cleared; | |
252 | ||
253 | trace_binder_alloc_page_start(alloc, index); | |
254 | page->page_ptr = alloc_page(GFP_KERNEL | | |
255 | __GFP_HIGHMEM | | |
256 | __GFP_ZERO); | |
257 | if (!page->page_ptr) { | |
258 | pr_err("%d: binder_alloc_buf failed for page at %pK\n", | |
259 | alloc->pid, page_addr); | |
260 | goto err_alloc_page_failed; | |
261 | } | |
262 | page->alloc = alloc; | |
263 | INIT_LIST_HEAD(&page->lru); | |
264 | ||
265 | ret = map_kernel_range_noflush((unsigned long)page_addr, | |
266 | PAGE_SIZE, PAGE_KERNEL, | |
267 | &page->page_ptr); | |
268 | flush_cache_vmap((unsigned long)page_addr, | |
269 | (unsigned long)page_addr + PAGE_SIZE); | |
270 | if (ret != 1) { | |
271 | pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", | |
272 | alloc->pid, page_addr); | |
273 | goto err_map_kernel_failed; | |
274 | } | |
275 | user_page_addr = | |
276 | (uintptr_t)page_addr + alloc->user_buffer_offset; | |
277 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); | |
278 | if (ret) { | |
279 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", | |
280 | alloc->pid, user_page_addr); | |
281 | goto err_vm_insert_page_failed; | |
282 | } | |
283 | ||
284 | trace_binder_alloc_page_end(alloc, index); | |
285 | /* vm_insert_page does not seem to increment the refcount */ | |
286 | } | |
287 | if (mm) { | |
288 | up_write(&mm->mmap_sem); | |
289 | mmput(mm); | |
290 | } | |
291 | return 0; | |
292 | ||
293 | free_range: | |
294 | for (page_addr = end - PAGE_SIZE; page_addr >= start; | |
295 | page_addr -= PAGE_SIZE) { | |
296 | bool ret; | |
297 | size_t index; | |
298 | ||
299 | index = (page_addr - alloc->buffer) / PAGE_SIZE; | |
300 | page = &alloc->pages[index]; | |
301 | ||
302 | trace_binder_free_lru_start(alloc, index); | |
303 | ||
304 | ret = list_lru_add(&binder_alloc_lru, &page->lru); | |
305 | WARN_ON(!ret); | |
306 | ||
307 | trace_binder_free_lru_end(alloc, index); | |
308 | continue; | |
309 | ||
310 | err_vm_insert_page_failed: | |
311 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | |
312 | err_map_kernel_failed: | |
313 | __free_page(page->page_ptr); | |
314 | page->page_ptr = NULL; | |
315 | err_alloc_page_failed: | |
316 | err_page_ptr_cleared: | |
317 | ; | |
318 | } | |
319 | err_no_vma: | |
320 | if (mm) { | |
321 | up_write(&mm->mmap_sem); | |
322 | mmput(mm); | |
323 | } | |
324 | return vma ? -ENOMEM : -ESRCH; | |
325 | } | |
326 | ||
327 | static inline void binder_alloc_set_vma(struct binder_alloc *alloc, | |
328 | struct vm_area_struct *vma) | |
329 | { | |
330 | if (vma) | |
331 | alloc->vma_vm_mm = vma->vm_mm; | |
332 | /* | |
333 | * If we see alloc->vma is not NULL, buffer data structures set up | |
334 | * completely. Look at smp_rmb side binder_alloc_get_vma. | |
335 | * We also want to guarantee new alloc->vma_vm_mm is always visible | |
336 | * if alloc->vma is set. | |
337 | */ | |
338 | smp_wmb(); | |
339 | alloc->vma = vma; | |
340 | } | |
341 | ||
342 | static inline struct vm_area_struct *binder_alloc_get_vma( | |
343 | struct binder_alloc *alloc) | |
344 | { | |
345 | struct vm_area_struct *vma = NULL; | |
346 | ||
347 | if (alloc->vma) { | |
348 | /* Look at description in binder_alloc_set_vma */ | |
349 | smp_rmb(); | |
350 | vma = alloc->vma; | |
351 | } | |
352 | return vma; | |
353 | } | |
354 | ||
355 | struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, | |
356 | size_t data_size, | |
357 | size_t offsets_size, | |
358 | size_t extra_buffers_size, | |
359 | int is_async) | |
360 | { | |
361 | struct rb_node *n = alloc->free_buffers.rb_node; | |
362 | struct binder_buffer *buffer; | |
363 | size_t buffer_size; | |
364 | struct rb_node *best_fit = NULL; | |
365 | void *has_page_addr; | |
366 | void *end_page_addr; | |
367 | size_t size, data_offsets_size; | |
368 | int ret; | |
369 | ||
370 | if (!binder_alloc_get_vma(alloc)) { | |
371 | pr_err("%d: binder_alloc_buf, no vma\n", | |
372 | alloc->pid); | |
373 | return ERR_PTR(-ESRCH); | |
374 | } | |
375 | ||
376 | data_offsets_size = ALIGN(data_size, sizeof(void *)) + | |
377 | ALIGN(offsets_size, sizeof(void *)); | |
378 | ||
379 | if (data_offsets_size < data_size || data_offsets_size < offsets_size) { | |
380 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
381 | "%d: got transaction with invalid size %zd-%zd\n", | |
382 | alloc->pid, data_size, offsets_size); | |
383 | return ERR_PTR(-EINVAL); | |
384 | } | |
385 | size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); | |
386 | if (size < data_offsets_size || size < extra_buffers_size) { | |
387 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
388 | "%d: got transaction with invalid extra_buffers_size %zd\n", | |
389 | alloc->pid, extra_buffers_size); | |
390 | return ERR_PTR(-EINVAL); | |
391 | } | |
392 | if (is_async && | |
393 | alloc->free_async_space < size + sizeof(struct binder_buffer)) { | |
394 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
395 | "%d: binder_alloc_buf size %zd failed, no async space left\n", | |
396 | alloc->pid, size); | |
397 | return ERR_PTR(-ENOSPC); | |
398 | } | |
399 | ||
400 | /* Pad 0-size buffers so they get assigned unique addresses */ | |
401 | size = max(size, sizeof(void *)); | |
402 | ||
403 | while (n) { | |
404 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
405 | BUG_ON(!buffer->free); | |
406 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
407 | ||
408 | if (size < buffer_size) { | |
409 | best_fit = n; | |
410 | n = n->rb_left; | |
411 | } else if (size > buffer_size) | |
412 | n = n->rb_right; | |
413 | else { | |
414 | best_fit = n; | |
415 | break; | |
416 | } | |
417 | } | |
418 | if (best_fit == NULL) { | |
419 | size_t allocated_buffers = 0; | |
420 | size_t largest_alloc_size = 0; | |
421 | size_t total_alloc_size = 0; | |
422 | size_t free_buffers = 0; | |
423 | size_t largest_free_size = 0; | |
424 | size_t total_free_size = 0; | |
425 | ||
426 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; | |
427 | n = rb_next(n)) { | |
428 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
429 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
430 | allocated_buffers++; | |
431 | total_alloc_size += buffer_size; | |
432 | if (buffer_size > largest_alloc_size) | |
433 | largest_alloc_size = buffer_size; | |
434 | } | |
435 | for (n = rb_first(&alloc->free_buffers); n != NULL; | |
436 | n = rb_next(n)) { | |
437 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
438 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
439 | free_buffers++; | |
440 | total_free_size += buffer_size; | |
441 | if (buffer_size > largest_free_size) | |
442 | largest_free_size = buffer_size; | |
443 | } | |
444 | pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", | |
445 | alloc->pid, size); | |
446 | pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", | |
447 | total_alloc_size, allocated_buffers, largest_alloc_size, | |
448 | total_free_size, free_buffers, largest_free_size); | |
449 | return ERR_PTR(-ENOSPC); | |
450 | } | |
451 | if (n == NULL) { | |
452 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); | |
453 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
454 | } | |
455 | ||
456 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
457 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", | |
458 | alloc->pid, size, buffer, buffer_size); | |
459 | ||
460 | has_page_addr = | |
461 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); | |
462 | WARN_ON(n && buffer_size != size); | |
463 | end_page_addr = | |
464 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); | |
465 | if (end_page_addr > has_page_addr) | |
466 | end_page_addr = has_page_addr; | |
467 | ret = binder_update_page_range(alloc, 1, | |
468 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); | |
469 | if (ret) | |
470 | return ERR_PTR(ret); | |
471 | ||
472 | if (buffer_size != size) { | |
473 | struct binder_buffer *new_buffer; | |
474 | ||
475 | new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); | |
476 | if (!new_buffer) { | |
477 | pr_err("%s: %d failed to alloc new buffer struct\n", | |
478 | __func__, alloc->pid); | |
479 | goto err_alloc_buf_struct_failed; | |
480 | } | |
481 | new_buffer->data = (u8 *)buffer->data + size; | |
482 | list_add(&new_buffer->entry, &buffer->entry); | |
483 | new_buffer->free = 1; | |
484 | binder_insert_free_buffer(alloc, new_buffer); | |
485 | } | |
486 | ||
487 | rb_erase(best_fit, &alloc->free_buffers); | |
488 | buffer->free = 0; | |
489 | buffer->free_in_progress = 0; | |
490 | binder_insert_allocated_buffer_locked(alloc, buffer); | |
491 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
492 | "%d: binder_alloc_buf size %zd got %pK\n", | |
493 | alloc->pid, size, buffer); | |
494 | buffer->data_size = data_size; | |
495 | buffer->offsets_size = offsets_size; | |
496 | buffer->async_transaction = is_async; | |
497 | buffer->extra_buffers_size = extra_buffers_size; | |
498 | if (is_async) { | |
499 | alloc->free_async_space -= size + sizeof(struct binder_buffer); | |
500 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
501 | "%d: binder_alloc_buf size %zd async free %zd\n", | |
502 | alloc->pid, size, alloc->free_async_space); | |
503 | } | |
504 | return buffer; | |
505 | ||
506 | err_alloc_buf_struct_failed: | |
507 | binder_update_page_range(alloc, 0, | |
508 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | |
509 | end_page_addr); | |
510 | return ERR_PTR(-ENOMEM); | |
511 | } | |
512 | ||
513 | /** | |
514 | * binder_alloc_new_buf() - Allocate a new binder buffer | |
515 | * @alloc: binder_alloc for this proc | |
516 | * @data_size: size of user data buffer | |
517 | * @offsets_size: user specified buffer offset | |
518 | * @extra_buffers_size: size of extra space for meta-data (eg, security context) | |
519 | * @is_async: buffer for async transaction | |
520 | * | |
521 | * Allocate a new buffer given the requested sizes. Returns | |
522 | * the kernel version of the buffer pointer. The size allocated | |
523 | * is the sum of the three given sizes (each rounded up to | |
524 | * pointer-sized boundary) | |
525 | * | |
526 | * Return: The allocated buffer or %NULL if error | |
527 | */ | |
528 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, | |
529 | size_t data_size, | |
530 | size_t offsets_size, | |
531 | size_t extra_buffers_size, | |
532 | int is_async) | |
533 | { | |
534 | struct binder_buffer *buffer; | |
535 | ||
536 | mutex_lock(&alloc->mutex); | |
537 | buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, | |
538 | extra_buffers_size, is_async); | |
539 | mutex_unlock(&alloc->mutex); | |
540 | return buffer; | |
541 | } | |
542 | ||
543 | static void *buffer_start_page(struct binder_buffer *buffer) | |
544 | { | |
545 | return (void *)((uintptr_t)buffer->data & PAGE_MASK); | |
546 | } | |
547 | ||
548 | static void *prev_buffer_end_page(struct binder_buffer *buffer) | |
549 | { | |
550 | return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); | |
551 | } | |
552 | ||
553 | static void binder_delete_free_buffer(struct binder_alloc *alloc, | |
554 | struct binder_buffer *buffer) | |
555 | { | |
556 | struct binder_buffer *prev, *next = NULL; | |
557 | bool to_free = true; | |
558 | BUG_ON(alloc->buffers.next == &buffer->entry); | |
559 | prev = binder_buffer_prev(buffer); | |
560 | BUG_ON(!prev->free); | |
561 | if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { | |
562 | to_free = false; | |
563 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
564 | "%d: merge free, buffer %pK share page with %pK\n", | |
565 | alloc->pid, buffer->data, prev->data); | |
566 | } | |
567 | ||
568 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
569 | next = binder_buffer_next(buffer); | |
570 | if (buffer_start_page(next) == buffer_start_page(buffer)) { | |
571 | to_free = false; | |
572 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
573 | "%d: merge free, buffer %pK share page with %pK\n", | |
574 | alloc->pid, | |
575 | buffer->data, | |
576 | next->data); | |
577 | } | |
578 | } | |
579 | ||
580 | if (PAGE_ALIGNED(buffer->data)) { | |
581 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
582 | "%d: merge free, buffer start %pK is page aligned\n", | |
583 | alloc->pid, buffer->data); | |
584 | to_free = false; | |
585 | } | |
586 | ||
587 | if (to_free) { | |
588 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
589 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", | |
590 | alloc->pid, buffer->data, | |
591 | prev->data, next ? next->data : NULL); | |
592 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), | |
593 | buffer_start_page(buffer) + PAGE_SIZE); | |
594 | } | |
595 | list_del(&buffer->entry); | |
596 | kfree(buffer); | |
597 | } | |
598 | ||
599 | static void binder_free_buf_locked(struct binder_alloc *alloc, | |
600 | struct binder_buffer *buffer) | |
601 | { | |
602 | size_t size, buffer_size; | |
603 | ||
604 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
605 | ||
606 | size = ALIGN(buffer->data_size, sizeof(void *)) + | |
607 | ALIGN(buffer->offsets_size, sizeof(void *)) + | |
608 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); | |
609 | ||
610 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
611 | "%d: binder_free_buf %pK size %zd buffer_size %zd\n", | |
612 | alloc->pid, buffer, size, buffer_size); | |
613 | ||
614 | BUG_ON(buffer->free); | |
615 | BUG_ON(size > buffer_size); | |
616 | BUG_ON(buffer->transaction != NULL); | |
617 | BUG_ON(buffer->data < alloc->buffer); | |
618 | BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); | |
619 | ||
620 | if (buffer->async_transaction) { | |
621 | alloc->free_async_space += size + sizeof(struct binder_buffer); | |
622 | ||
623 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
624 | "%d: binder_free_buf size %zd async free %zd\n", | |
625 | alloc->pid, size, alloc->free_async_space); | |
626 | } | |
627 | ||
628 | binder_update_page_range(alloc, 0, | |
629 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | |
630 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); | |
631 | ||
632 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); | |
633 | buffer->free = 1; | |
634 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
635 | struct binder_buffer *next = binder_buffer_next(buffer); | |
636 | ||
637 | if (next->free) { | |
638 | rb_erase(&next->rb_node, &alloc->free_buffers); | |
639 | binder_delete_free_buffer(alloc, next); | |
640 | } | |
641 | } | |
642 | if (alloc->buffers.next != &buffer->entry) { | |
643 | struct binder_buffer *prev = binder_buffer_prev(buffer); | |
644 | ||
645 | if (prev->free) { | |
646 | binder_delete_free_buffer(alloc, buffer); | |
647 | rb_erase(&prev->rb_node, &alloc->free_buffers); | |
648 | buffer = prev; | |
649 | } | |
650 | } | |
651 | binder_insert_free_buffer(alloc, buffer); | |
652 | } | |
653 | ||
654 | /** | |
655 | * binder_alloc_free_buf() - free a binder buffer | |
656 | * @alloc: binder_alloc for this proc | |
657 | * @buffer: kernel pointer to buffer | |
658 | * | |
659 | * Free the buffer allocated via binder_alloc_new_buffer() | |
660 | */ | |
661 | void binder_alloc_free_buf(struct binder_alloc *alloc, | |
662 | struct binder_buffer *buffer) | |
663 | { | |
664 | mutex_lock(&alloc->mutex); | |
665 | binder_free_buf_locked(alloc, buffer); | |
666 | mutex_unlock(&alloc->mutex); | |
667 | } | |
668 | ||
669 | /** | |
670 | * binder_alloc_mmap_handler() - map virtual address space for proc | |
671 | * @alloc: alloc structure for this proc | |
672 | * @vma: vma passed to mmap() | |
673 | * | |
674 | * Called by binder_mmap() to initialize the space specified in | |
675 | * vma for allocating binder buffers | |
676 | * | |
677 | * Return: | |
678 | * 0 = success | |
679 | * -EBUSY = address space already mapped | |
680 | * -ENOMEM = failed to map memory to given address space | |
681 | */ | |
682 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |
683 | struct vm_area_struct *vma) | |
684 | { | |
685 | int ret; | |
686 | struct vm_struct *area; | |
687 | const char *failure_string; | |
688 | struct binder_buffer *buffer; | |
689 | ||
690 | mutex_lock(&binder_alloc_mmap_lock); | |
691 | if (alloc->buffer) { | |
692 | ret = -EBUSY; | |
693 | failure_string = "already mapped"; | |
694 | goto err_already_mapped; | |
695 | } | |
696 | ||
697 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); | |
698 | if (area == NULL) { | |
699 | ret = -ENOMEM; | |
700 | failure_string = "get_vm_area"; | |
701 | goto err_get_vm_area_failed; | |
702 | } | |
703 | alloc->buffer = area->addr; | |
704 | alloc->user_buffer_offset = | |
705 | vma->vm_start - (uintptr_t)alloc->buffer; | |
706 | mutex_unlock(&binder_alloc_mmap_lock); | |
707 | ||
708 | #ifdef CONFIG_CPU_CACHE_VIPT | |
709 | if (cache_is_vipt_aliasing()) { | |
710 | while (CACHE_COLOUR( | |
711 | (vma->vm_start ^ (uint32_t)alloc->buffer))) { | |
712 | pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", | |
713 | __func__, alloc->pid, vma->vm_start, | |
714 | vma->vm_end, alloc->buffer); | |
715 | vma->vm_start += PAGE_SIZE; | |
716 | } | |
717 | } | |
718 | #endif | |
719 | alloc->pages = kzalloc(sizeof(alloc->pages[0]) * | |
720 | ((vma->vm_end - vma->vm_start) / PAGE_SIZE), | |
721 | GFP_KERNEL); | |
722 | if (alloc->pages == NULL) { | |
723 | ret = -ENOMEM; | |
724 | failure_string = "alloc page array"; | |
725 | goto err_alloc_pages_failed; | |
726 | } | |
727 | alloc->buffer_size = vma->vm_end - vma->vm_start; | |
728 | ||
729 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); | |
730 | if (!buffer) { | |
731 | ret = -ENOMEM; | |
732 | failure_string = "alloc buffer struct"; | |
733 | goto err_alloc_buf_struct_failed; | |
734 | } | |
735 | ||
736 | buffer->data = alloc->buffer; | |
737 | list_add(&buffer->entry, &alloc->buffers); | |
738 | buffer->free = 1; | |
739 | binder_insert_free_buffer(alloc, buffer); | |
740 | alloc->free_async_space = alloc->buffer_size / 2; | |
741 | binder_alloc_set_vma(alloc, vma); | |
742 | mmgrab(alloc->vma_vm_mm); | |
743 | ||
744 | return 0; | |
745 | ||
746 | err_alloc_buf_struct_failed: | |
747 | kfree(alloc->pages); | |
748 | alloc->pages = NULL; | |
749 | err_alloc_pages_failed: | |
750 | mutex_lock(&binder_alloc_mmap_lock); | |
751 | vfree(alloc->buffer); | |
752 | alloc->buffer = NULL; | |
753 | err_get_vm_area_failed: | |
754 | err_already_mapped: | |
755 | mutex_unlock(&binder_alloc_mmap_lock); | |
756 | pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, | |
757 | alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); | |
758 | return ret; | |
759 | } | |
760 | ||
761 | ||
762 | void binder_alloc_deferred_release(struct binder_alloc *alloc) | |
763 | { | |
764 | struct rb_node *n; | |
765 | int buffers, page_count; | |
766 | struct binder_buffer *buffer; | |
767 | ||
768 | buffers = 0; | |
769 | mutex_lock(&alloc->mutex); | |
770 | BUG_ON(alloc->vma); | |
771 | ||
772 | while ((n = rb_first(&alloc->allocated_buffers))) { | |
773 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
774 | ||
775 | /* Transaction should already have been freed */ | |
776 | BUG_ON(buffer->transaction); | |
777 | ||
778 | binder_free_buf_locked(alloc, buffer); | |
779 | buffers++; | |
780 | } | |
781 | ||
782 | while (!list_empty(&alloc->buffers)) { | |
783 | buffer = list_first_entry(&alloc->buffers, | |
784 | struct binder_buffer, entry); | |
785 | WARN_ON(!buffer->free); | |
786 | ||
787 | list_del(&buffer->entry); | |
788 | WARN_ON_ONCE(!list_empty(&alloc->buffers)); | |
789 | kfree(buffer); | |
790 | } | |
791 | ||
792 | page_count = 0; | |
793 | if (alloc->pages) { | |
794 | int i; | |
795 | ||
796 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
797 | void *page_addr; | |
798 | bool on_lru; | |
799 | ||
800 | if (!alloc->pages[i].page_ptr) | |
801 | continue; | |
802 | ||
803 | on_lru = list_lru_del(&binder_alloc_lru, | |
804 | &alloc->pages[i].lru); | |
805 | page_addr = alloc->buffer + i * PAGE_SIZE; | |
806 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
807 | "%s: %d: page %d at %pK %s\n", | |
808 | __func__, alloc->pid, i, page_addr, | |
809 | on_lru ? "on lru" : "active"); | |
810 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | |
811 | __free_page(alloc->pages[i].page_ptr); | |
812 | page_count++; | |
813 | } | |
814 | kfree(alloc->pages); | |
815 | vfree(alloc->buffer); | |
816 | } | |
817 | mutex_unlock(&alloc->mutex); | |
818 | if (alloc->vma_vm_mm) | |
819 | mmdrop(alloc->vma_vm_mm); | |
820 | ||
821 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, | |
822 | "%s: %d buffers %d, pages %d\n", | |
823 | __func__, alloc->pid, buffers, page_count); | |
824 | } | |
825 | ||
826 | static void print_binder_buffer(struct seq_file *m, const char *prefix, | |
827 | struct binder_buffer *buffer) | |
828 | { | |
829 | seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", | |
830 | prefix, buffer->debug_id, buffer->data, | |
831 | buffer->data_size, buffer->offsets_size, | |
832 | buffer->extra_buffers_size, | |
833 | buffer->transaction ? "active" : "delivered"); | |
834 | } | |
835 | ||
836 | /** | |
837 | * binder_alloc_print_allocated() - print buffer info | |
838 | * @m: seq_file for output via seq_printf() | |
839 | * @alloc: binder_alloc for this proc | |
840 | * | |
841 | * Prints information about every buffer associated with | |
842 | * the binder_alloc state to the given seq_file | |
843 | */ | |
844 | void binder_alloc_print_allocated(struct seq_file *m, | |
845 | struct binder_alloc *alloc) | |
846 | { | |
847 | struct rb_node *n; | |
848 | ||
849 | mutex_lock(&alloc->mutex); | |
850 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
851 | print_binder_buffer(m, " buffer", | |
852 | rb_entry(n, struct binder_buffer, rb_node)); | |
853 | mutex_unlock(&alloc->mutex); | |
854 | } | |
855 | ||
856 | /** | |
857 | * binder_alloc_print_pages() - print page usage | |
858 | * @m: seq_file for output via seq_printf() | |
859 | * @alloc: binder_alloc for this proc | |
860 | */ | |
861 | void binder_alloc_print_pages(struct seq_file *m, | |
862 | struct binder_alloc *alloc) | |
863 | { | |
864 | struct binder_lru_page *page; | |
865 | int i; | |
866 | int active = 0; | |
867 | int lru = 0; | |
868 | int free = 0; | |
869 | ||
870 | mutex_lock(&alloc->mutex); | |
871 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
872 | page = &alloc->pages[i]; | |
873 | if (!page->page_ptr) | |
874 | free++; | |
875 | else if (list_empty(&page->lru)) | |
876 | active++; | |
877 | else | |
878 | lru++; | |
879 | } | |
880 | mutex_unlock(&alloc->mutex); | |
881 | seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); | |
882 | } | |
883 | ||
884 | /** | |
885 | * binder_alloc_get_allocated_count() - return count of buffers | |
886 | * @alloc: binder_alloc for this proc | |
887 | * | |
888 | * Return: count of allocated buffers | |
889 | */ | |
890 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |
891 | { | |
892 | struct rb_node *n; | |
893 | int count = 0; | |
894 | ||
895 | mutex_lock(&alloc->mutex); | |
896 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
897 | count++; | |
898 | mutex_unlock(&alloc->mutex); | |
899 | return count; | |
900 | } | |
901 | ||
902 | ||
903 | /** | |
904 | * binder_alloc_vma_close() - invalidate address space | |
905 | * @alloc: binder_alloc for this proc | |
906 | * | |
907 | * Called from binder_vma_close() when releasing address space. | |
908 | * Clears alloc->vma to prevent new incoming transactions from | |
909 | * allocating more buffers. | |
910 | */ | |
911 | void binder_alloc_vma_close(struct binder_alloc *alloc) | |
912 | { | |
913 | binder_alloc_set_vma(alloc, NULL); | |
914 | } | |
915 | ||
916 | /** | |
917 | * binder_alloc_free_page() - shrinker callback to free pages | |
918 | * @item: item to free | |
919 | * @lock: lock protecting the item | |
920 | * @cb_arg: callback argument | |
921 | * | |
922 | * Called from list_lru_walk() in binder_shrink_scan() to free | |
923 | * up pages when the system is under memory pressure. | |
924 | */ | |
925 | enum lru_status binder_alloc_free_page(struct list_head *item, | |
926 | struct list_lru_one *lru, | |
927 | spinlock_t *lock, | |
928 | void *cb_arg) | |
929 | { | |
930 | struct mm_struct *mm = NULL; | |
931 | struct binder_lru_page *page = container_of(item, | |
932 | struct binder_lru_page, | |
933 | lru); | |
934 | struct binder_alloc *alloc; | |
935 | uintptr_t page_addr; | |
936 | size_t index; | |
937 | struct vm_area_struct *vma; | |
938 | ||
939 | alloc = page->alloc; | |
940 | if (!mutex_trylock(&alloc->mutex)) | |
941 | goto err_get_alloc_mutex_failed; | |
942 | ||
943 | if (!page->page_ptr) | |
944 | goto err_page_already_freed; | |
945 | ||
946 | index = page - alloc->pages; | |
947 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | |
948 | vma = binder_alloc_get_vma(alloc); | |
949 | if (vma) { | |
950 | if (!mmget_not_zero(alloc->vma_vm_mm)) | |
951 | goto err_mmget; | |
952 | mm = alloc->vma_vm_mm; | |
953 | if (!down_write_trylock(&mm->mmap_sem)) | |
954 | goto err_down_write_mmap_sem_failed; | |
955 | } | |
956 | ||
957 | list_lru_isolate(lru, item); | |
958 | spin_unlock(lock); | |
959 | ||
960 | if (vma) { | |
961 | trace_binder_unmap_user_start(alloc, index); | |
962 | ||
963 | zap_page_range(vma, | |
964 | page_addr + alloc->user_buffer_offset, | |
965 | PAGE_SIZE); | |
966 | ||
967 | trace_binder_unmap_user_end(alloc, index); | |
968 | ||
969 | up_write(&mm->mmap_sem); | |
970 | mmput(mm); | |
971 | } | |
972 | ||
973 | trace_binder_unmap_kernel_start(alloc, index); | |
974 | ||
975 | unmap_kernel_range(page_addr, PAGE_SIZE); | |
976 | __free_page(page->page_ptr); | |
977 | page->page_ptr = NULL; | |
978 | ||
979 | trace_binder_unmap_kernel_end(alloc, index); | |
980 | ||
981 | spin_lock(lock); | |
982 | mutex_unlock(&alloc->mutex); | |
983 | return LRU_REMOVED_RETRY; | |
984 | ||
985 | err_down_write_mmap_sem_failed: | |
986 | mmput_async(mm); | |
987 | err_mmget: | |
988 | err_page_already_freed: | |
989 | mutex_unlock(&alloc->mutex); | |
990 | err_get_alloc_mutex_failed: | |
991 | return LRU_SKIP; | |
992 | } | |
993 | ||
994 | static unsigned long | |
995 | binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
996 | { | |
997 | unsigned long ret = list_lru_count(&binder_alloc_lru); | |
998 | return ret; | |
999 | } | |
1000 | ||
1001 | static unsigned long | |
1002 | binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1003 | { | |
1004 | unsigned long ret; | |
1005 | ||
1006 | ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, | |
1007 | NULL, sc->nr_to_scan); | |
1008 | return ret; | |
1009 | } | |
1010 | ||
1011 | static struct shrinker binder_shrinker = { | |
1012 | .count_objects = binder_shrink_count, | |
1013 | .scan_objects = binder_shrink_scan, | |
1014 | .seeks = DEFAULT_SEEKS, | |
1015 | }; | |
1016 | ||
1017 | /** | |
1018 | * binder_alloc_init() - called by binder_open() for per-proc initialization | |
1019 | * @alloc: binder_alloc for this proc | |
1020 | * | |
1021 | * Called from binder_open() to initialize binder_alloc fields for | |
1022 | * new binder proc | |
1023 | */ | |
1024 | void binder_alloc_init(struct binder_alloc *alloc) | |
1025 | { | |
1026 | alloc->pid = current->group_leader->pid; | |
1027 | mutex_init(&alloc->mutex); | |
1028 | INIT_LIST_HEAD(&alloc->buffers); | |
1029 | } | |
1030 | ||
1031 | void binder_alloc_shrinker_init(void) | |
1032 | { | |
1033 | list_lru_init(&binder_alloc_lru); | |
1034 | register_shrinker(&binder_shrinker); | |
1035 | } |