]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - mm/util.c
mm/vmalloc.c: spelling> s/informaion/information/
[mirror_ubuntu-hirsute-kernel.git] / mm / util.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/security.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/mman.h>
15 #include <linux/hugetlb.h>
16 #include <linux/vmalloc.h>
17 #include <linux/userfaultfd_k.h>
18
19 #include <linux/uaccess.h>
20
21 #include "internal.h"
22
23 /**
24 * kfree_const - conditionally free memory
25 * @x: pointer to the memory
26 *
27 * Function calls kfree only if @x is not in .rodata section.
28 */
29 void kfree_const(const void *x)
30 {
31 if (!is_kernel_rodata((unsigned long)x))
32 kfree(x);
33 }
34 EXPORT_SYMBOL(kfree_const);
35
36 /**
37 * kstrdup - allocate space for and copy an existing string
38 * @s: the string to duplicate
39 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
40 *
41 * Return: newly allocated copy of @s or %NULL in case of error
42 */
43 char *kstrdup(const char *s, gfp_t gfp)
44 {
45 size_t len;
46 char *buf;
47
48 if (!s)
49 return NULL;
50
51 len = strlen(s) + 1;
52 buf = kmalloc_track_caller(len, gfp);
53 if (buf)
54 memcpy(buf, s, len);
55 return buf;
56 }
57 EXPORT_SYMBOL(kstrdup);
58
59 /**
60 * kstrdup_const - conditionally duplicate an existing const string
61 * @s: the string to duplicate
62 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
63 *
64 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
65 *
66 * Return: source string if it is in .rodata section otherwise
67 * fallback to kstrdup.
68 */
69 const char *kstrdup_const(const char *s, gfp_t gfp)
70 {
71 if (is_kernel_rodata((unsigned long)s))
72 return s;
73
74 return kstrdup(s, gfp);
75 }
76 EXPORT_SYMBOL(kstrdup_const);
77
78 /**
79 * kstrndup - allocate space for and copy an existing string
80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
83 *
84 * Note: Use kmemdup_nul() instead if the size is known exactly.
85 *
86 * Return: newly allocated copy of @s or %NULL in case of error
87 */
88 char *kstrndup(const char *s, size_t max, gfp_t gfp)
89 {
90 size_t len;
91 char *buf;
92
93 if (!s)
94 return NULL;
95
96 len = strnlen(s, max);
97 buf = kmalloc_track_caller(len+1, gfp);
98 if (buf) {
99 memcpy(buf, s, len);
100 buf[len] = '\0';
101 }
102 return buf;
103 }
104 EXPORT_SYMBOL(kstrndup);
105
106 /**
107 * kmemdup - duplicate region of memory
108 *
109 * @src: memory region to duplicate
110 * @len: memory region length
111 * @gfp: GFP mask to use
112 *
113 * Return: newly allocated copy of @src or %NULL in case of error
114 */
115 void *kmemdup(const void *src, size_t len, gfp_t gfp)
116 {
117 void *p;
118
119 p = kmalloc_track_caller(len, gfp);
120 if (p)
121 memcpy(p, src, len);
122 return p;
123 }
124 EXPORT_SYMBOL(kmemdup);
125
126 /**
127 * kmemdup_nul - Create a NUL-terminated string from unterminated data
128 * @s: The data to stringify
129 * @len: The size of the data
130 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
131 *
132 * Return: newly allocated copy of @s with NUL-termination or %NULL in
133 * case of error
134 */
135 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
136 {
137 char *buf;
138
139 if (!s)
140 return NULL;
141
142 buf = kmalloc_track_caller(len + 1, gfp);
143 if (buf) {
144 memcpy(buf, s, len);
145 buf[len] = '\0';
146 }
147 return buf;
148 }
149 EXPORT_SYMBOL(kmemdup_nul);
150
151 /**
152 * memdup_user - duplicate memory region from user space
153 *
154 * @src: source address in user space
155 * @len: number of bytes to copy
156 *
157 * Return: an ERR_PTR() on failure. Result is physically
158 * contiguous, to be freed by kfree().
159 */
160 void *memdup_user(const void __user *src, size_t len)
161 {
162 void *p;
163
164 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
165 if (!p)
166 return ERR_PTR(-ENOMEM);
167
168 if (copy_from_user(p, src, len)) {
169 kfree(p);
170 return ERR_PTR(-EFAULT);
171 }
172
173 return p;
174 }
175 EXPORT_SYMBOL(memdup_user);
176
177 /**
178 * vmemdup_user - duplicate memory region from user space
179 *
180 * @src: source address in user space
181 * @len: number of bytes to copy
182 *
183 * Return: an ERR_PTR() on failure. Result may be not
184 * physically contiguous. Use kvfree() to free.
185 */
186 void *vmemdup_user(const void __user *src, size_t len)
187 {
188 void *p;
189
190 p = kvmalloc(len, GFP_USER);
191 if (!p)
192 return ERR_PTR(-ENOMEM);
193
194 if (copy_from_user(p, src, len)) {
195 kvfree(p);
196 return ERR_PTR(-EFAULT);
197 }
198
199 return p;
200 }
201 EXPORT_SYMBOL(vmemdup_user);
202
203 /**
204 * strndup_user - duplicate an existing string from user space
205 * @s: The string to duplicate
206 * @n: Maximum number of bytes to copy, including the trailing NUL.
207 *
208 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
209 */
210 char *strndup_user(const char __user *s, long n)
211 {
212 char *p;
213 long length;
214
215 length = strnlen_user(s, n);
216
217 if (!length)
218 return ERR_PTR(-EFAULT);
219
220 if (length > n)
221 return ERR_PTR(-EINVAL);
222
223 p = memdup_user(s, length);
224
225 if (IS_ERR(p))
226 return p;
227
228 p[length - 1] = '\0';
229
230 return p;
231 }
232 EXPORT_SYMBOL(strndup_user);
233
234 /**
235 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
236 *
237 * @src: source address in user space
238 * @len: number of bytes to copy
239 *
240 * Return: an ERR_PTR() on failure.
241 */
242 void *memdup_user_nul(const void __user *src, size_t len)
243 {
244 char *p;
245
246 /*
247 * Always use GFP_KERNEL, since copy_from_user() can sleep and
248 * cause pagefault, which makes it pointless to use GFP_NOFS
249 * or GFP_ATOMIC.
250 */
251 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
252 if (!p)
253 return ERR_PTR(-ENOMEM);
254
255 if (copy_from_user(p, src, len)) {
256 kfree(p);
257 return ERR_PTR(-EFAULT);
258 }
259 p[len] = '\0';
260
261 return p;
262 }
263 EXPORT_SYMBOL(memdup_user_nul);
264
265 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
266 struct vm_area_struct *prev, struct rb_node *rb_parent)
267 {
268 struct vm_area_struct *next;
269
270 vma->vm_prev = prev;
271 if (prev) {
272 next = prev->vm_next;
273 prev->vm_next = vma;
274 } else {
275 mm->mmap = vma;
276 if (rb_parent)
277 next = rb_entry(rb_parent,
278 struct vm_area_struct, vm_rb);
279 else
280 next = NULL;
281 }
282 vma->vm_next = next;
283 if (next)
284 next->vm_prev = vma;
285 }
286
287 /* Check if the vma is being used as a stack by this task */
288 int vma_is_stack_for_current(struct vm_area_struct *vma)
289 {
290 struct task_struct * __maybe_unused t = current;
291
292 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
293 }
294
295 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
296 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
297 {
298 mm->mmap_base = TASK_UNMAPPED_BASE;
299 mm->get_unmapped_area = arch_get_unmapped_area;
300 }
301 #endif
302
303 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
304 unsigned long len, unsigned long prot,
305 unsigned long flag, unsigned long pgoff)
306 {
307 unsigned long ret;
308 struct mm_struct *mm = current->mm;
309 unsigned long populate;
310 LIST_HEAD(uf);
311
312 ret = security_mmap_file(file, prot, flag);
313 if (!ret) {
314 if (down_write_killable(&mm->mmap_sem))
315 return -EINTR;
316 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
317 &populate, &uf);
318 up_write(&mm->mmap_sem);
319 userfaultfd_unmap_complete(mm, &uf);
320 if (populate)
321 mm_populate(ret, populate);
322 }
323 return ret;
324 }
325
326 unsigned long vm_mmap(struct file *file, unsigned long addr,
327 unsigned long len, unsigned long prot,
328 unsigned long flag, unsigned long offset)
329 {
330 if (unlikely(offset + PAGE_ALIGN(len) < offset))
331 return -EINVAL;
332 if (unlikely(offset_in_page(offset)))
333 return -EINVAL;
334
335 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
336 }
337 EXPORT_SYMBOL(vm_mmap);
338
339 /**
340 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
341 * failure, fall back to non-contiguous (vmalloc) allocation.
342 * @size: size of the request.
343 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
344 * @node: numa node to allocate from
345 *
346 * Uses kmalloc to get the memory but if the allocation fails then falls back
347 * to the vmalloc allocator. Use kvfree for freeing the memory.
348 *
349 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
350 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
351 * preferable to the vmalloc fallback, due to visible performance drawbacks.
352 *
353 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
354 * fall back to vmalloc.
355 *
356 * Return: pointer to the allocated memory of %NULL in case of failure
357 */
358 void *kvmalloc_node(size_t size, gfp_t flags, int node)
359 {
360 gfp_t kmalloc_flags = flags;
361 void *ret;
362
363 /*
364 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
365 * so the given set of flags has to be compatible.
366 */
367 if ((flags & GFP_KERNEL) != GFP_KERNEL)
368 return kmalloc_node(size, flags, node);
369
370 /*
371 * We want to attempt a large physically contiguous block first because
372 * it is less likely to fragment multiple larger blocks and therefore
373 * contribute to a long term fragmentation less than vmalloc fallback.
374 * However make sure that larger requests are not too disruptive - no
375 * OOM killer and no allocation failure warnings as we have a fallback.
376 */
377 if (size > PAGE_SIZE) {
378 kmalloc_flags |= __GFP_NOWARN;
379
380 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
381 kmalloc_flags |= __GFP_NORETRY;
382 }
383
384 ret = kmalloc_node(size, kmalloc_flags, node);
385
386 /*
387 * It doesn't really make sense to fallback to vmalloc for sub page
388 * requests
389 */
390 if (ret || size <= PAGE_SIZE)
391 return ret;
392
393 return __vmalloc_node_flags_caller(size, node, flags,
394 __builtin_return_address(0));
395 }
396 EXPORT_SYMBOL(kvmalloc_node);
397
398 /**
399 * kvfree() - Free memory.
400 * @addr: Pointer to allocated memory.
401 *
402 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
403 * It is slightly more efficient to use kfree() or vfree() if you are certain
404 * that you know which one to use.
405 *
406 * Context: Either preemptible task context or not-NMI interrupt.
407 */
408 void kvfree(const void *addr)
409 {
410 if (is_vmalloc_addr(addr))
411 vfree(addr);
412 else
413 kfree(addr);
414 }
415 EXPORT_SYMBOL(kvfree);
416
417 static inline void *__page_rmapping(struct page *page)
418 {
419 unsigned long mapping;
420
421 mapping = (unsigned long)page->mapping;
422 mapping &= ~PAGE_MAPPING_FLAGS;
423
424 return (void *)mapping;
425 }
426
427 /* Neutral page->mapping pointer to address_space or anon_vma or other */
428 void *page_rmapping(struct page *page)
429 {
430 page = compound_head(page);
431 return __page_rmapping(page);
432 }
433
434 /*
435 * Return true if this page is mapped into pagetables.
436 * For compound page it returns true if any subpage of compound page is mapped.
437 */
438 bool page_mapped(struct page *page)
439 {
440 int i;
441
442 if (likely(!PageCompound(page)))
443 return atomic_read(&page->_mapcount) >= 0;
444 page = compound_head(page);
445 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
446 return true;
447 if (PageHuge(page))
448 return false;
449 for (i = 0; i < (1 << compound_order(page)); i++) {
450 if (atomic_read(&page[i]._mapcount) >= 0)
451 return true;
452 }
453 return false;
454 }
455 EXPORT_SYMBOL(page_mapped);
456
457 struct anon_vma *page_anon_vma(struct page *page)
458 {
459 unsigned long mapping;
460
461 page = compound_head(page);
462 mapping = (unsigned long)page->mapping;
463 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
464 return NULL;
465 return __page_rmapping(page);
466 }
467
468 struct address_space *page_mapping(struct page *page)
469 {
470 struct address_space *mapping;
471
472 page = compound_head(page);
473
474 /* This happens if someone calls flush_dcache_page on slab page */
475 if (unlikely(PageSlab(page)))
476 return NULL;
477
478 if (unlikely(PageSwapCache(page))) {
479 swp_entry_t entry;
480
481 entry.val = page_private(page);
482 return swap_address_space(entry);
483 }
484
485 mapping = page->mapping;
486 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
487 return NULL;
488
489 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
490 }
491 EXPORT_SYMBOL(page_mapping);
492
493 /*
494 * For file cache pages, return the address_space, otherwise return NULL
495 */
496 struct address_space *page_mapping_file(struct page *page)
497 {
498 if (unlikely(PageSwapCache(page)))
499 return NULL;
500 return page_mapping(page);
501 }
502
503 /* Slow path of page_mapcount() for compound pages */
504 int __page_mapcount(struct page *page)
505 {
506 int ret;
507
508 ret = atomic_read(&page->_mapcount) + 1;
509 /*
510 * For file THP page->_mapcount contains total number of mapping
511 * of the page: no need to look into compound_mapcount.
512 */
513 if (!PageAnon(page) && !PageHuge(page))
514 return ret;
515 page = compound_head(page);
516 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
517 if (PageDoubleMap(page))
518 ret--;
519 return ret;
520 }
521 EXPORT_SYMBOL_GPL(__page_mapcount);
522
523 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
524 int sysctl_overcommit_ratio __read_mostly = 50;
525 unsigned long sysctl_overcommit_kbytes __read_mostly;
526 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
527 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
528 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
529
530 int overcommit_ratio_handler(struct ctl_table *table, int write,
531 void __user *buffer, size_t *lenp,
532 loff_t *ppos)
533 {
534 int ret;
535
536 ret = proc_dointvec(table, write, buffer, lenp, ppos);
537 if (ret == 0 && write)
538 sysctl_overcommit_kbytes = 0;
539 return ret;
540 }
541
542 int overcommit_kbytes_handler(struct ctl_table *table, int write,
543 void __user *buffer, size_t *lenp,
544 loff_t *ppos)
545 {
546 int ret;
547
548 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
549 if (ret == 0 && write)
550 sysctl_overcommit_ratio = 0;
551 return ret;
552 }
553
554 /*
555 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
556 */
557 unsigned long vm_commit_limit(void)
558 {
559 unsigned long allowed;
560
561 if (sysctl_overcommit_kbytes)
562 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
563 else
564 allowed = ((totalram_pages() - hugetlb_total_pages())
565 * sysctl_overcommit_ratio / 100);
566 allowed += total_swap_pages;
567
568 return allowed;
569 }
570
571 /*
572 * Make sure vm_committed_as in one cacheline and not cacheline shared with
573 * other variables. It can be updated by several CPUs frequently.
574 */
575 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
576
577 /*
578 * The global memory commitment made in the system can be a metric
579 * that can be used to drive ballooning decisions when Linux is hosted
580 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
581 * balancing memory across competing virtual machines that are hosted.
582 * Several metrics drive this policy engine including the guest reported
583 * memory commitment.
584 */
585 unsigned long vm_memory_committed(void)
586 {
587 return percpu_counter_read_positive(&vm_committed_as);
588 }
589 EXPORT_SYMBOL_GPL(vm_memory_committed);
590
591 /*
592 * Check that a process has enough memory to allocate a new virtual
593 * mapping. 0 means there is enough memory for the allocation to
594 * succeed and -ENOMEM implies there is not.
595 *
596 * We currently support three overcommit policies, which are set via the
597 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
598 *
599 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
600 * Additional code 2002 Jul 20 by Robert Love.
601 *
602 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
603 *
604 * Note this is a helper function intended to be used by LSMs which
605 * wish to use this logic.
606 */
607 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
608 {
609 long allowed;
610
611 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
612 -(s64)vm_committed_as_batch * num_online_cpus(),
613 "memory commitment underflow");
614
615 vm_acct_memory(pages);
616
617 /*
618 * Sometimes we want to use more memory than we have
619 */
620 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
621 return 0;
622
623 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
624 if (pages > totalram_pages() + total_swap_pages)
625 goto error;
626 return 0;
627 }
628
629 allowed = vm_commit_limit();
630 /*
631 * Reserve some for root
632 */
633 if (!cap_sys_admin)
634 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
635
636 /*
637 * Don't let a single process grow so big a user can't recover
638 */
639 if (mm) {
640 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
641
642 allowed -= min_t(long, mm->total_vm / 32, reserve);
643 }
644
645 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
646 return 0;
647 error:
648 vm_unacct_memory(pages);
649
650 return -ENOMEM;
651 }
652
653 /**
654 * get_cmdline() - copy the cmdline value to a buffer.
655 * @task: the task whose cmdline value to copy.
656 * @buffer: the buffer to copy to.
657 * @buflen: the length of the buffer. Larger cmdline values are truncated
658 * to this length.
659 *
660 * Return: the size of the cmdline field copied. Note that the copy does
661 * not guarantee an ending NULL byte.
662 */
663 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
664 {
665 int res = 0;
666 unsigned int len;
667 struct mm_struct *mm = get_task_mm(task);
668 unsigned long arg_start, arg_end, env_start, env_end;
669 if (!mm)
670 goto out;
671 if (!mm->arg_end)
672 goto out_mm; /* Shh! No looking before we're done */
673
674 spin_lock(&mm->arg_lock);
675 arg_start = mm->arg_start;
676 arg_end = mm->arg_end;
677 env_start = mm->env_start;
678 env_end = mm->env_end;
679 spin_unlock(&mm->arg_lock);
680
681 len = arg_end - arg_start;
682
683 if (len > buflen)
684 len = buflen;
685
686 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
687
688 /*
689 * If the nul at the end of args has been overwritten, then
690 * assume application is using setproctitle(3).
691 */
692 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
693 len = strnlen(buffer, res);
694 if (len < res) {
695 res = len;
696 } else {
697 len = env_end - env_start;
698 if (len > buflen - res)
699 len = buflen - res;
700 res += access_process_vm(task, env_start,
701 buffer+res, len,
702 FOLL_FORCE);
703 res = strnlen(buffer, res);
704 }
705 }
706 out_mm:
707 mmput(mm);
708 out:
709 return res;
710 }