]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/util.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / mm / util.c
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/err.h>
7 #include <linux/sched.h>
8 #include <linux/security.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/mman.h>
12 #include <linux/hugetlb.h>
13 #include <linux/vmalloc.h>
14 #include <linux/userfaultfd_k.h>
15
16 #include <asm/sections.h>
17 #include <linux/uaccess.h>
18
19 #include "internal.h"
20
21 static inline int is_kernel_rodata(unsigned long addr)
22 {
23 return addr >= (unsigned long)__start_rodata &&
24 addr < (unsigned long)__end_rodata;
25 }
26
27 /**
28 * kfree_const - conditionally free memory
29 * @x: pointer to the memory
30 *
31 * Function calls kfree only if @x is not in .rodata section.
32 */
33 void kfree_const(const void *x)
34 {
35 if (!is_kernel_rodata((unsigned long)x))
36 kfree(x);
37 }
38 EXPORT_SYMBOL(kfree_const);
39
40 /**
41 * kstrdup - allocate space for and copy an existing string
42 * @s: the string to duplicate
43 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
44 */
45 char *kstrdup(const char *s, gfp_t gfp)
46 {
47 size_t len;
48 char *buf;
49
50 if (!s)
51 return NULL;
52
53 len = strlen(s) + 1;
54 buf = kmalloc_track_caller(len, gfp);
55 if (buf)
56 memcpy(buf, s, len);
57 return buf;
58 }
59 EXPORT_SYMBOL(kstrdup);
60
61 /**
62 * kstrdup_const - conditionally duplicate an existing const string
63 * @s: the string to duplicate
64 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
65 *
66 * Function returns source string if it is in .rodata section otherwise it
67 * fallbacks to kstrdup.
68 * Strings allocated by kstrdup_const should be freed by kfree_const.
69 */
70 const char *kstrdup_const(const char *s, gfp_t gfp)
71 {
72 if (is_kernel_rodata((unsigned long)s))
73 return s;
74
75 return kstrdup(s, gfp);
76 }
77 EXPORT_SYMBOL(kstrdup_const);
78
79 /**
80 * kstrndup - allocate space for and copy an existing string
81 * @s: the string to duplicate
82 * @max: read at most @max chars from @s
83 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
84 */
85 char *kstrndup(const char *s, size_t max, gfp_t gfp)
86 {
87 size_t len;
88 char *buf;
89
90 if (!s)
91 return NULL;
92
93 len = strnlen(s, max);
94 buf = kmalloc_track_caller(len+1, gfp);
95 if (buf) {
96 memcpy(buf, s, len);
97 buf[len] = '\0';
98 }
99 return buf;
100 }
101 EXPORT_SYMBOL(kstrndup);
102
103 /**
104 * kmemdup - duplicate region of memory
105 *
106 * @src: memory region to duplicate
107 * @len: memory region length
108 * @gfp: GFP mask to use
109 */
110 void *kmemdup(const void *src, size_t len, gfp_t gfp)
111 {
112 void *p;
113
114 p = kmalloc_track_caller(len, gfp);
115 if (p)
116 memcpy(p, src, len);
117 return p;
118 }
119 EXPORT_SYMBOL(kmemdup);
120
121 /**
122 * memdup_user - duplicate memory region from user space
123 *
124 * @src: source address in user space
125 * @len: number of bytes to copy
126 *
127 * Returns an ERR_PTR() on failure.
128 */
129 void *memdup_user(const void __user *src, size_t len)
130 {
131 void *p;
132
133 /*
134 * Always use GFP_KERNEL, since copy_from_user() can sleep and
135 * cause pagefault, which makes it pointless to use GFP_NOFS
136 * or GFP_ATOMIC.
137 */
138 p = kmalloc_track_caller(len, GFP_KERNEL);
139 if (!p)
140 return ERR_PTR(-ENOMEM);
141
142 if (copy_from_user(p, src, len)) {
143 kfree(p);
144 return ERR_PTR(-EFAULT);
145 }
146
147 return p;
148 }
149 EXPORT_SYMBOL(memdup_user);
150
151 /*
152 * strndup_user - duplicate an existing string from user space
153 * @s: The string to duplicate
154 * @n: Maximum number of bytes to copy, including the trailing NUL.
155 */
156 char *strndup_user(const char __user *s, long n)
157 {
158 char *p;
159 long length;
160
161 length = strnlen_user(s, n);
162
163 if (!length)
164 return ERR_PTR(-EFAULT);
165
166 if (length > n)
167 return ERR_PTR(-EINVAL);
168
169 p = memdup_user(s, length);
170
171 if (IS_ERR(p))
172 return p;
173
174 p[length - 1] = '\0';
175
176 return p;
177 }
178 EXPORT_SYMBOL(strndup_user);
179
180 /**
181 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
182 *
183 * @src: source address in user space
184 * @len: number of bytes to copy
185 *
186 * Returns an ERR_PTR() on failure.
187 */
188 void *memdup_user_nul(const void __user *src, size_t len)
189 {
190 char *p;
191
192 /*
193 * Always use GFP_KERNEL, since copy_from_user() can sleep and
194 * cause pagefault, which makes it pointless to use GFP_NOFS
195 * or GFP_ATOMIC.
196 */
197 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
198 if (!p)
199 return ERR_PTR(-ENOMEM);
200
201 if (copy_from_user(p, src, len)) {
202 kfree(p);
203 return ERR_PTR(-EFAULT);
204 }
205 p[len] = '\0';
206
207 return p;
208 }
209 EXPORT_SYMBOL(memdup_user_nul);
210
211 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
212 struct vm_area_struct *prev, struct rb_node *rb_parent)
213 {
214 struct vm_area_struct *next;
215
216 vma->vm_prev = prev;
217 if (prev) {
218 next = prev->vm_next;
219 prev->vm_next = vma;
220 } else {
221 mm->mmap = vma;
222 if (rb_parent)
223 next = rb_entry(rb_parent,
224 struct vm_area_struct, vm_rb);
225 else
226 next = NULL;
227 }
228 vma->vm_next = next;
229 if (next)
230 next->vm_prev = vma;
231 }
232
233 /* Check if the vma is being used as a stack by this task */
234 int vma_is_stack_for_current(struct vm_area_struct *vma)
235 {
236 struct task_struct * __maybe_unused t = current;
237
238 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
239 }
240
241 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
242 void arch_pick_mmap_layout(struct mm_struct *mm)
243 {
244 mm->mmap_base = TASK_UNMAPPED_BASE;
245 mm->get_unmapped_area = arch_get_unmapped_area;
246 }
247 #endif
248
249 /*
250 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
251 * back to the regular GUP.
252 * If the architecture not support this function, simply return with no
253 * page pinned
254 */
255 int __weak __get_user_pages_fast(unsigned long start,
256 int nr_pages, int write, struct page **pages)
257 {
258 return 0;
259 }
260 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
261
262 /**
263 * get_user_pages_fast() - pin user pages in memory
264 * @start: starting user address
265 * @nr_pages: number of pages from start to pin
266 * @write: whether pages will be written to
267 * @pages: array that receives pointers to the pages pinned.
268 * Should be at least nr_pages long.
269 *
270 * Returns number of pages pinned. This may be fewer than the number
271 * requested. If nr_pages is 0 or negative, returns 0. If no pages
272 * were pinned, returns -errno.
273 *
274 * get_user_pages_fast provides equivalent functionality to get_user_pages,
275 * operating on current and current->mm, with force=0 and vma=NULL. However
276 * unlike get_user_pages, it must be called without mmap_sem held.
277 *
278 * get_user_pages_fast may take mmap_sem and page table locks, so no
279 * assumptions can be made about lack of locking. get_user_pages_fast is to be
280 * implemented in a way that is advantageous (vs get_user_pages()) when the
281 * user memory area is already faulted in and present in ptes. However if the
282 * pages have to be faulted in, it may turn out to be slightly slower so
283 * callers need to carefully consider what to use. On many architectures,
284 * get_user_pages_fast simply falls back to get_user_pages.
285 */
286 int __weak get_user_pages_fast(unsigned long start,
287 int nr_pages, int write, struct page **pages)
288 {
289 return get_user_pages_unlocked(start, nr_pages, pages,
290 write ? FOLL_WRITE : 0);
291 }
292 EXPORT_SYMBOL_GPL(get_user_pages_fast);
293
294 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
295 unsigned long len, unsigned long prot,
296 unsigned long flag, unsigned long pgoff)
297 {
298 unsigned long ret;
299 struct mm_struct *mm = current->mm;
300 unsigned long populate;
301 LIST_HEAD(uf);
302
303 ret = security_mmap_file(file, prot, flag);
304 if (!ret) {
305 if (down_write_killable(&mm->mmap_sem))
306 return -EINTR;
307 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
308 &populate, &uf);
309 up_write(&mm->mmap_sem);
310 userfaultfd_unmap_complete(mm, &uf);
311 if (populate)
312 mm_populate(ret, populate);
313 }
314 return ret;
315 }
316
317 unsigned long vm_mmap(struct file *file, unsigned long addr,
318 unsigned long len, unsigned long prot,
319 unsigned long flag, unsigned long offset)
320 {
321 if (unlikely(offset + PAGE_ALIGN(len) < offset))
322 return -EINVAL;
323 if (unlikely(offset_in_page(offset)))
324 return -EINVAL;
325
326 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
327 }
328 EXPORT_SYMBOL(vm_mmap);
329
330 void kvfree(const void *addr)
331 {
332 if (is_vmalloc_addr(addr))
333 vfree(addr);
334 else
335 kfree(addr);
336 }
337 EXPORT_SYMBOL(kvfree);
338
339 static inline void *__page_rmapping(struct page *page)
340 {
341 unsigned long mapping;
342
343 mapping = (unsigned long)page->mapping;
344 mapping &= ~PAGE_MAPPING_FLAGS;
345
346 return (void *)mapping;
347 }
348
349 /* Neutral page->mapping pointer to address_space or anon_vma or other */
350 void *page_rmapping(struct page *page)
351 {
352 page = compound_head(page);
353 return __page_rmapping(page);
354 }
355
356 /*
357 * Return true if this page is mapped into pagetables.
358 * For compound page it returns true if any subpage of compound page is mapped.
359 */
360 bool page_mapped(struct page *page)
361 {
362 int i;
363
364 if (likely(!PageCompound(page)))
365 return atomic_read(&page->_mapcount) >= 0;
366 page = compound_head(page);
367 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
368 return true;
369 if (PageHuge(page))
370 return false;
371 for (i = 0; i < hpage_nr_pages(page); i++) {
372 if (atomic_read(&page[i]._mapcount) >= 0)
373 return true;
374 }
375 return false;
376 }
377 EXPORT_SYMBOL(page_mapped);
378
379 struct anon_vma *page_anon_vma(struct page *page)
380 {
381 unsigned long mapping;
382
383 page = compound_head(page);
384 mapping = (unsigned long)page->mapping;
385 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
386 return NULL;
387 return __page_rmapping(page);
388 }
389
390 struct address_space *page_mapping(struct page *page)
391 {
392 struct address_space *mapping;
393
394 page = compound_head(page);
395
396 /* This happens if someone calls flush_dcache_page on slab page */
397 if (unlikely(PageSlab(page)))
398 return NULL;
399
400 if (unlikely(PageSwapCache(page))) {
401 swp_entry_t entry;
402
403 entry.val = page_private(page);
404 return swap_address_space(entry);
405 }
406
407 mapping = page->mapping;
408 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
409 return NULL;
410
411 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
412 }
413 EXPORT_SYMBOL(page_mapping);
414
415 /* Slow path of page_mapcount() for compound pages */
416 int __page_mapcount(struct page *page)
417 {
418 int ret;
419
420 ret = atomic_read(&page->_mapcount) + 1;
421 /*
422 * For file THP page->_mapcount contains total number of mapping
423 * of the page: no need to look into compound_mapcount.
424 */
425 if (!PageAnon(page) && !PageHuge(page))
426 return ret;
427 page = compound_head(page);
428 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
429 if (PageDoubleMap(page))
430 ret--;
431 return ret;
432 }
433 EXPORT_SYMBOL_GPL(__page_mapcount);
434
435 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
436 int sysctl_overcommit_ratio __read_mostly = 50;
437 unsigned long sysctl_overcommit_kbytes __read_mostly;
438 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
439 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
440 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
441
442 int overcommit_ratio_handler(struct ctl_table *table, int write,
443 void __user *buffer, size_t *lenp,
444 loff_t *ppos)
445 {
446 int ret;
447
448 ret = proc_dointvec(table, write, buffer, lenp, ppos);
449 if (ret == 0 && write)
450 sysctl_overcommit_kbytes = 0;
451 return ret;
452 }
453
454 int overcommit_kbytes_handler(struct ctl_table *table, int write,
455 void __user *buffer, size_t *lenp,
456 loff_t *ppos)
457 {
458 int ret;
459
460 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
461 if (ret == 0 && write)
462 sysctl_overcommit_ratio = 0;
463 return ret;
464 }
465
466 /*
467 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
468 */
469 unsigned long vm_commit_limit(void)
470 {
471 unsigned long allowed;
472
473 if (sysctl_overcommit_kbytes)
474 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
475 else
476 allowed = ((totalram_pages - hugetlb_total_pages())
477 * sysctl_overcommit_ratio / 100);
478 allowed += total_swap_pages;
479
480 return allowed;
481 }
482
483 /*
484 * Make sure vm_committed_as in one cacheline and not cacheline shared with
485 * other variables. It can be updated by several CPUs frequently.
486 */
487 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
488
489 /*
490 * The global memory commitment made in the system can be a metric
491 * that can be used to drive ballooning decisions when Linux is hosted
492 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
493 * balancing memory across competing virtual machines that are hosted.
494 * Several metrics drive this policy engine including the guest reported
495 * memory commitment.
496 */
497 unsigned long vm_memory_committed(void)
498 {
499 return percpu_counter_read_positive(&vm_committed_as);
500 }
501 EXPORT_SYMBOL_GPL(vm_memory_committed);
502
503 /*
504 * Check that a process has enough memory to allocate a new virtual
505 * mapping. 0 means there is enough memory for the allocation to
506 * succeed and -ENOMEM implies there is not.
507 *
508 * We currently support three overcommit policies, which are set via the
509 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
510 *
511 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
512 * Additional code 2002 Jul 20 by Robert Love.
513 *
514 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
515 *
516 * Note this is a helper function intended to be used by LSMs which
517 * wish to use this logic.
518 */
519 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
520 {
521 long free, allowed, reserve;
522
523 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
524 -(s64)vm_committed_as_batch * num_online_cpus(),
525 "memory commitment underflow");
526
527 vm_acct_memory(pages);
528
529 /*
530 * Sometimes we want to use more memory than we have
531 */
532 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
533 return 0;
534
535 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
536 free = global_page_state(NR_FREE_PAGES);
537 free += global_node_page_state(NR_FILE_PAGES);
538
539 /*
540 * shmem pages shouldn't be counted as free in this
541 * case, they can't be purged, only swapped out, and
542 * that won't affect the overall amount of available
543 * memory in the system.
544 */
545 free -= global_node_page_state(NR_SHMEM);
546
547 free += get_nr_swap_pages();
548
549 /*
550 * Any slabs which are created with the
551 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
552 * which are reclaimable, under pressure. The dentry
553 * cache and most inode caches should fall into this
554 */
555 free += global_page_state(NR_SLAB_RECLAIMABLE);
556
557 /*
558 * Leave reserved pages. The pages are not for anonymous pages.
559 */
560 if (free <= totalreserve_pages)
561 goto error;
562 else
563 free -= totalreserve_pages;
564
565 /*
566 * Reserve some for root
567 */
568 if (!cap_sys_admin)
569 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
570
571 if (free > pages)
572 return 0;
573
574 goto error;
575 }
576
577 allowed = vm_commit_limit();
578 /*
579 * Reserve some for root
580 */
581 if (!cap_sys_admin)
582 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
583
584 /*
585 * Don't let a single process grow so big a user can't recover
586 */
587 if (mm) {
588 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
589 allowed -= min_t(long, mm->total_vm / 32, reserve);
590 }
591
592 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
593 return 0;
594 error:
595 vm_unacct_memory(pages);
596
597 return -ENOMEM;
598 }
599
600 /**
601 * get_cmdline() - copy the cmdline value to a buffer.
602 * @task: the task whose cmdline value to copy.
603 * @buffer: the buffer to copy to.
604 * @buflen: the length of the buffer. Larger cmdline values are truncated
605 * to this length.
606 * Returns the size of the cmdline field copied. Note that the copy does
607 * not guarantee an ending NULL byte.
608 */
609 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
610 {
611 int res = 0;
612 unsigned int len;
613 struct mm_struct *mm = get_task_mm(task);
614 unsigned long arg_start, arg_end, env_start, env_end;
615 if (!mm)
616 goto out;
617 if (!mm->arg_end)
618 goto out_mm; /* Shh! No looking before we're done */
619
620 down_read(&mm->mmap_sem);
621 arg_start = mm->arg_start;
622 arg_end = mm->arg_end;
623 env_start = mm->env_start;
624 env_end = mm->env_end;
625 up_read(&mm->mmap_sem);
626
627 len = arg_end - arg_start;
628
629 if (len > buflen)
630 len = buflen;
631
632 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
633
634 /*
635 * If the nul at the end of args has been overwritten, then
636 * assume application is using setproctitle(3).
637 */
638 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
639 len = strnlen(buffer, res);
640 if (len < res) {
641 res = len;
642 } else {
643 len = env_end - env_start;
644 if (len > buflen - res)
645 len = buflen - res;
646 res += access_process_vm(task, env_start,
647 buffer+res, len,
648 FOLL_FORCE);
649 res = strnlen(buffer, res);
650 }
651 }
652 out_mm:
653 mmput(mm);
654 out:
655 return res;
656 }