]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/util.c
gpiolib: improve coding style for local variables
[mirror_ubuntu-jammy-kernel.git] / mm / util.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
16d69265 2#include <linux/mm.h>
30992c97
MM
3#include <linux/slab.h>
4#include <linux/string.h>
3b32123d 5#include <linux/compiler.h>
b95f1b31 6#include <linux/export.h>
96840aa0 7#include <linux/err.h>
3b8f14b4 8#include <linux/sched.h>
6e84f315 9#include <linux/sched/mm.h>
79eb597c 10#include <linux/sched/signal.h>
68db0cf1 11#include <linux/sched/task_stack.h>
eb36c587 12#include <linux/security.h>
9800339b 13#include <linux/swap.h>
33806f06 14#include <linux/swapops.h>
00619bcc
JM
15#include <linux/mman.h>
16#include <linux/hugetlb.h>
39f1f78d 17#include <linux/vmalloc.h>
897ab3e0 18#include <linux/userfaultfd_k.h>
649775be 19#include <linux/elf.h>
67f3977f
AG
20#include <linux/elf-randomize.h>
21#include <linux/personality.h>
649775be 22#include <linux/random.h>
67f3977f
AG
23#include <linux/processor.h>
24#include <linux/sizes.h>
25#include <linux/compat.h>
00619bcc 26
7c0f6ba6 27#include <linux/uaccess.h>
30992c97 28
6038def0
NK
29#include "internal.h"
30
a4bb1e43
AH
31/**
32 * kfree_const - conditionally free memory
33 * @x: pointer to the memory
34 *
35 * Function calls kfree only if @x is not in .rodata section.
36 */
37void kfree_const(const void *x)
38{
39 if (!is_kernel_rodata((unsigned long)x))
40 kfree(x);
41}
42EXPORT_SYMBOL(kfree_const);
43
30992c97 44/**
30992c97 45 * kstrdup - allocate space for and copy an existing string
30992c97
MM
46 * @s: the string to duplicate
47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
a862f68a
MR
48 *
49 * Return: newly allocated copy of @s or %NULL in case of error
30992c97
MM
50 */
51char *kstrdup(const char *s, gfp_t gfp)
52{
53 size_t len;
54 char *buf;
55
56 if (!s)
57 return NULL;
58
59 len = strlen(s) + 1;
1d2c8eea 60 buf = kmalloc_track_caller(len, gfp);
30992c97
MM
61 if (buf)
62 memcpy(buf, s, len);
63 return buf;
64}
65EXPORT_SYMBOL(kstrdup);
96840aa0 66
a4bb1e43
AH
67/**
68 * kstrdup_const - conditionally duplicate an existing const string
69 * @s: the string to duplicate
70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
71 *
295a1730
BG
72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
73 * must not be passed to krealloc().
a862f68a
MR
74 *
75 * Return: source string if it is in .rodata section otherwise
76 * fallback to kstrdup.
a4bb1e43
AH
77 */
78const char *kstrdup_const(const char *s, gfp_t gfp)
79{
80 if (is_kernel_rodata((unsigned long)s))
81 return s;
82
83 return kstrdup(s, gfp);
84}
85EXPORT_SYMBOL(kstrdup_const);
86
1e66df3e
JF
87/**
88 * kstrndup - allocate space for and copy an existing string
89 * @s: the string to duplicate
90 * @max: read at most @max chars from @s
91 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
f3515741
DH
92 *
93 * Note: Use kmemdup_nul() instead if the size is known exactly.
a862f68a
MR
94 *
95 * Return: newly allocated copy of @s or %NULL in case of error
1e66df3e
JF
96 */
97char *kstrndup(const char *s, size_t max, gfp_t gfp)
98{
99 size_t len;
100 char *buf;
101
102 if (!s)
103 return NULL;
104
105 len = strnlen(s, max);
106 buf = kmalloc_track_caller(len+1, gfp);
107 if (buf) {
108 memcpy(buf, s, len);
109 buf[len] = '\0';
110 }
111 return buf;
112}
113EXPORT_SYMBOL(kstrndup);
114
1a2f67b4
AD
115/**
116 * kmemdup - duplicate region of memory
117 *
118 * @src: memory region to duplicate
119 * @len: memory region length
120 * @gfp: GFP mask to use
a862f68a
MR
121 *
122 * Return: newly allocated copy of @src or %NULL in case of error
1a2f67b4
AD
123 */
124void *kmemdup(const void *src, size_t len, gfp_t gfp)
125{
126 void *p;
127
1d2c8eea 128 p = kmalloc_track_caller(len, gfp);
1a2f67b4
AD
129 if (p)
130 memcpy(p, src, len);
131 return p;
132}
133EXPORT_SYMBOL(kmemdup);
134
f3515741
DH
135/**
136 * kmemdup_nul - Create a NUL-terminated string from unterminated data
137 * @s: The data to stringify
138 * @len: The size of the data
139 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
a862f68a
MR
140 *
141 * Return: newly allocated copy of @s with NUL-termination or %NULL in
142 * case of error
f3515741
DH
143 */
144char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145{
146 char *buf;
147
148 if (!s)
149 return NULL;
150
151 buf = kmalloc_track_caller(len + 1, gfp);
152 if (buf) {
153 memcpy(buf, s, len);
154 buf[len] = '\0';
155 }
156 return buf;
157}
158EXPORT_SYMBOL(kmemdup_nul);
159
610a77e0
LZ
160/**
161 * memdup_user - duplicate memory region from user space
162 *
163 * @src: source address in user space
164 * @len: number of bytes to copy
165 *
a862f68a 166 * Return: an ERR_PTR() on failure. Result is physically
50fd2f29 167 * contiguous, to be freed by kfree().
610a77e0
LZ
168 */
169void *memdup_user(const void __user *src, size_t len)
170{
171 void *p;
172
6c8fcc09 173 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
610a77e0
LZ
174 if (!p)
175 return ERR_PTR(-ENOMEM);
176
177 if (copy_from_user(p, src, len)) {
178 kfree(p);
179 return ERR_PTR(-EFAULT);
180 }
181
182 return p;
183}
184EXPORT_SYMBOL(memdup_user);
185
50fd2f29
AV
186/**
187 * vmemdup_user - duplicate memory region from user space
188 *
189 * @src: source address in user space
190 * @len: number of bytes to copy
191 *
a862f68a 192 * Return: an ERR_PTR() on failure. Result may be not
50fd2f29
AV
193 * physically contiguous. Use kvfree() to free.
194 */
195void *vmemdup_user(const void __user *src, size_t len)
196{
197 void *p;
198
199 p = kvmalloc(len, GFP_USER);
200 if (!p)
201 return ERR_PTR(-ENOMEM);
202
203 if (copy_from_user(p, src, len)) {
204 kvfree(p);
205 return ERR_PTR(-EFAULT);
206 }
207
208 return p;
209}
210EXPORT_SYMBOL(vmemdup_user);
211
b86181f1 212/**
96840aa0 213 * strndup_user - duplicate an existing string from user space
96840aa0
DA
214 * @s: The string to duplicate
215 * @n: Maximum number of bytes to copy, including the trailing NUL.
a862f68a 216 *
e9145521 217 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
96840aa0
DA
218 */
219char *strndup_user(const char __user *s, long n)
220{
221 char *p;
222 long length;
223
224 length = strnlen_user(s, n);
225
226 if (!length)
227 return ERR_PTR(-EFAULT);
228
229 if (length > n)
230 return ERR_PTR(-EINVAL);
231
90d74045 232 p = memdup_user(s, length);
96840aa0 233
90d74045
JL
234 if (IS_ERR(p))
235 return p;
96840aa0
DA
236
237 p[length - 1] = '\0';
238
239 return p;
240}
241EXPORT_SYMBOL(strndup_user);
16d69265 242
e9d408e1
AV
243/**
244 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
245 *
246 * @src: source address in user space
247 * @len: number of bytes to copy
248 *
a862f68a 249 * Return: an ERR_PTR() on failure.
e9d408e1
AV
250 */
251void *memdup_user_nul(const void __user *src, size_t len)
252{
253 char *p;
254
255 /*
256 * Always use GFP_KERNEL, since copy_from_user() can sleep and
257 * cause pagefault, which makes it pointless to use GFP_NOFS
258 * or GFP_ATOMIC.
259 */
260 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
261 if (!p)
262 return ERR_PTR(-ENOMEM);
263
264 if (copy_from_user(p, src, len)) {
265 kfree(p);
266 return ERR_PTR(-EFAULT);
267 }
268 p[len] = '\0';
269
270 return p;
271}
272EXPORT_SYMBOL(memdup_user_nul);
273
6038def0 274void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
aba6dfb7 275 struct vm_area_struct *prev)
6038def0
NK
276{
277 struct vm_area_struct *next;
278
279 vma->vm_prev = prev;
280 if (prev) {
281 next = prev->vm_next;
282 prev->vm_next = vma;
283 } else {
aba6dfb7 284 next = mm->mmap;
6038def0 285 mm->mmap = vma;
6038def0
NK
286 }
287 vma->vm_next = next;
288 if (next)
289 next->vm_prev = vma;
290}
291
1b9fc5b2
WY
292void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
293{
294 struct vm_area_struct *prev, *next;
295
296 next = vma->vm_next;
297 prev = vma->vm_prev;
298 if (prev)
299 prev->vm_next = next;
300 else
301 mm->mmap = next;
302 if (next)
303 next->vm_prev = prev;
304}
305
b7643757 306/* Check if the vma is being used as a stack by this task */
d17af505 307int vma_is_stack_for_current(struct vm_area_struct *vma)
b7643757 308{
d17af505
AL
309 struct task_struct * __maybe_unused t = current;
310
b7643757
SP
311 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
312}
313
295992fb
CK
314/*
315 * Change backing file, only valid to use during initial VMA setup.
316 */
317void vma_set_file(struct vm_area_struct *vma, struct file *file)
318{
319 /* Changing an anonymous vma with this is illegal */
320 get_file(file);
321 swap(vma->vm_file, file);
322 fput(file);
323}
324EXPORT_SYMBOL(vma_set_file);
325
649775be
AG
326#ifndef STACK_RND_MASK
327#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
328#endif
329
330unsigned long randomize_stack_top(unsigned long stack_top)
331{
332 unsigned long random_variable = 0;
333
334 if (current->flags & PF_RANDOMIZE) {
335 random_variable = get_random_long();
336 random_variable &= STACK_RND_MASK;
337 random_variable <<= PAGE_SHIFT;
338 }
339#ifdef CONFIG_STACK_GROWSUP
340 return PAGE_ALIGN(stack_top) + random_variable;
341#else
342 return PAGE_ALIGN(stack_top) - random_variable;
343#endif
344}
345
53fc25d6
JD
346/**
347 * randomize_page - Generate a random, page aligned address
348 * @start: The smallest acceptable address the caller will take.
349 * @range: The size of the area, starting at @start, within which the
350 * random address must fall.
351 *
352 * If @start + @range would overflow, @range is capped.
353 *
354 * NOTE: Historical use of randomize_range, which this replaces, presumed that
355 * @start was already page aligned. We now align it regardless.
356 *
357 * Return: A page aligned address within [start, start + range). On error,
358 * @start is returned.
359 */
360unsigned long randomize_page(unsigned long start, unsigned long range)
361{
362 if (!PAGE_ALIGNED(start)) {
363 range -= PAGE_ALIGN(start) - start;
364 start = PAGE_ALIGN(start);
365 }
366
367 if (start > ULONG_MAX - range)
368 range = ULONG_MAX - start;
369
370 range >>= PAGE_SHIFT;
371
372 if (range == 0)
373 return start;
374
375 return start + (get_random_long() % range << PAGE_SHIFT);
376}
377
67f3977f 378#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
e7142bf5
AG
379unsigned long arch_randomize_brk(struct mm_struct *mm)
380{
381 /* Is the current task 32bit ? */
382 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
383 return randomize_page(mm->brk, SZ_32M);
384
385 return randomize_page(mm->brk, SZ_1G);
386}
387
67f3977f
AG
388unsigned long arch_mmap_rnd(void)
389{
390 unsigned long rnd;
391
392#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
393 if (is_compat_task())
394 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
395 else
396#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
397 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
398
399 return rnd << PAGE_SHIFT;
400}
67f3977f
AG
401
402static int mmap_is_legacy(struct rlimit *rlim_stack)
403{
404 if (current->personality & ADDR_COMPAT_LAYOUT)
405 return 1;
406
407 if (rlim_stack->rlim_cur == RLIM_INFINITY)
408 return 1;
409
410 return sysctl_legacy_va_layout;
411}
412
413/*
414 * Leave enough space between the mmap area and the stack to honour ulimit in
415 * the face of randomisation.
416 */
417#define MIN_GAP (SZ_128M)
418#define MAX_GAP (STACK_TOP / 6 * 5)
419
420static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
421{
422 unsigned long gap = rlim_stack->rlim_cur;
423 unsigned long pad = stack_guard_gap;
424
425 /* Account for stack randomization if necessary */
426 if (current->flags & PF_RANDOMIZE)
427 pad += (STACK_RND_MASK << PAGE_SHIFT);
428
429 /* Values close to RLIM_INFINITY can overflow. */
430 if (gap + pad > gap)
431 gap += pad;
432
433 if (gap < MIN_GAP)
434 gap = MIN_GAP;
435 else if (gap > MAX_GAP)
436 gap = MAX_GAP;
437
438 return PAGE_ALIGN(STACK_TOP - gap - rnd);
439}
440
441void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
442{
443 unsigned long random_factor = 0UL;
444
445 if (current->flags & PF_RANDOMIZE)
446 random_factor = arch_mmap_rnd();
447
448 if (mmap_is_legacy(rlim_stack)) {
449 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
450 mm->get_unmapped_area = arch_get_unmapped_area;
451 } else {
452 mm->mmap_base = mmap_base(random_factor, rlim_stack);
453 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
454 }
455}
456#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
8f2af155 457void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
16d69265
AM
458{
459 mm->mmap_base = TASK_UNMAPPED_BASE;
460 mm->get_unmapped_area = arch_get_unmapped_area;
16d69265
AM
461}
462#endif
912985dc 463
79eb597c
DJ
464/**
465 * __account_locked_vm - account locked pages to an mm's locked_vm
466 * @mm: mm to account against
467 * @pages: number of pages to account
468 * @inc: %true if @pages should be considered positive, %false if not
469 * @task: task used to check RLIMIT_MEMLOCK
470 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
471 *
472 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
c1e8d7c6 473 * that mmap_lock is held as writer.
79eb597c
DJ
474 *
475 * Return:
476 * * 0 on success
477 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
478 */
479int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
480 struct task_struct *task, bool bypass_rlim)
481{
482 unsigned long locked_vm, limit;
483 int ret = 0;
484
42fc5414 485 mmap_assert_write_locked(mm);
79eb597c
DJ
486
487 locked_vm = mm->locked_vm;
488 if (inc) {
489 if (!bypass_rlim) {
490 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
491 if (locked_vm + pages > limit)
492 ret = -ENOMEM;
493 }
494 if (!ret)
495 mm->locked_vm = locked_vm + pages;
496 } else {
497 WARN_ON_ONCE(pages > locked_vm);
498 mm->locked_vm = locked_vm - pages;
499 }
500
501 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
502 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
503 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
504 ret ? " - exceeded" : "");
505
506 return ret;
507}
508EXPORT_SYMBOL_GPL(__account_locked_vm);
509
510/**
511 * account_locked_vm - account locked pages to an mm's locked_vm
512 * @mm: mm to account against, may be NULL
513 * @pages: number of pages to account
514 * @inc: %true if @pages should be considered positive, %false if not
515 *
516 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
517 *
518 * Return:
519 * * 0 on success, or if mm is NULL
520 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
521 */
522int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
523{
524 int ret;
525
526 if (pages == 0 || !mm)
527 return 0;
528
d8ed45c5 529 mmap_write_lock(mm);
79eb597c
DJ
530 ret = __account_locked_vm(mm, pages, inc, current,
531 capable(CAP_IPC_LOCK));
d8ed45c5 532 mmap_write_unlock(mm);
79eb597c
DJ
533
534 return ret;
535}
536EXPORT_SYMBOL_GPL(account_locked_vm);
537
eb36c587
AV
538unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
539 unsigned long len, unsigned long prot,
9fbeb5ab 540 unsigned long flag, unsigned long pgoff)
eb36c587
AV
541{
542 unsigned long ret;
543 struct mm_struct *mm = current->mm;
41badc15 544 unsigned long populate;
897ab3e0 545 LIST_HEAD(uf);
eb36c587
AV
546
547 ret = security_mmap_file(file, prot, flag);
548 if (!ret) {
d8ed45c5 549 if (mmap_write_lock_killable(mm))
9fbeb5ab 550 return -EINTR;
45e55300
PC
551 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
552 &uf);
d8ed45c5 553 mmap_write_unlock(mm);
897ab3e0 554 userfaultfd_unmap_complete(mm, &uf);
41badc15
ML
555 if (populate)
556 mm_populate(ret, populate);
eb36c587
AV
557 }
558 return ret;
559}
560
561unsigned long vm_mmap(struct file *file, unsigned long addr,
562 unsigned long len, unsigned long prot,
563 unsigned long flag, unsigned long offset)
564{
565 if (unlikely(offset + PAGE_ALIGN(len) < offset))
566 return -EINVAL;
ea53cde0 567 if (unlikely(offset_in_page(offset)))
eb36c587
AV
568 return -EINVAL;
569
9fbeb5ab 570 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
eb36c587
AV
571}
572EXPORT_SYMBOL(vm_mmap);
573
a7c3e901
MH
574/**
575 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
576 * failure, fall back to non-contiguous (vmalloc) allocation.
577 * @size: size of the request.
578 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
579 * @node: numa node to allocate from
580 *
581 * Uses kmalloc to get the memory but if the allocation fails then falls back
582 * to the vmalloc allocator. Use kvfree for freeing the memory.
583 *
cc965a29
MH
584 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
585 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
586 * preferable to the vmalloc fallback, due to visible performance drawbacks.
a7c3e901 587 *
ce91f6ee
MH
588 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
589 * fall back to vmalloc.
a862f68a
MR
590 *
591 * Return: pointer to the allocated memory of %NULL in case of failure
a7c3e901
MH
592 */
593void *kvmalloc_node(size_t size, gfp_t flags, int node)
594{
595 gfp_t kmalloc_flags = flags;
596 void *ret;
597
598 /*
599 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
600 * so the given set of flags has to be compatible.
601 */
ce91f6ee
MH
602 if ((flags & GFP_KERNEL) != GFP_KERNEL)
603 return kmalloc_node(size, flags, node);
a7c3e901
MH
604
605 /*
4f4f2ba9
MH
606 * We want to attempt a large physically contiguous block first because
607 * it is less likely to fragment multiple larger blocks and therefore
608 * contribute to a long term fragmentation less than vmalloc fallback.
609 * However make sure that larger requests are not too disruptive - no
610 * OOM killer and no allocation failure warnings as we have a fallback.
a7c3e901 611 */
6c5ab651
MH
612 if (size > PAGE_SIZE) {
613 kmalloc_flags |= __GFP_NOWARN;
614
cc965a29 615 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
6c5ab651
MH
616 kmalloc_flags |= __GFP_NORETRY;
617 }
a7c3e901
MH
618
619 ret = kmalloc_node(size, kmalloc_flags, node);
620
621 /*
622 * It doesn't really make sense to fallback to vmalloc for sub page
623 * requests
624 */
625 if (ret || size <= PAGE_SIZE)
626 return ret;
627
7661809d 628 /* Don't even allow crazy sizes */
1dac6e69
DB
629 if (unlikely(size > INT_MAX)) {
630 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
7661809d 631 return NULL;
1dac6e69 632 }
7661809d 633
2b905948 634 return __vmalloc_node(size, 1, flags, node,
8594a21c 635 __builtin_return_address(0));
a7c3e901
MH
636}
637EXPORT_SYMBOL(kvmalloc_node);
638
ff4dc772 639/**
04b8e946
AM
640 * kvfree() - Free memory.
641 * @addr: Pointer to allocated memory.
ff4dc772 642 *
04b8e946
AM
643 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
644 * It is slightly more efficient to use kfree() or vfree() if you are certain
645 * that you know which one to use.
646 *
52414d33 647 * Context: Either preemptible task context or not-NMI interrupt.
ff4dc772 648 */
39f1f78d
AV
649void kvfree(const void *addr)
650{
651 if (is_vmalloc_addr(addr))
652 vfree(addr);
653 else
654 kfree(addr);
655}
656EXPORT_SYMBOL(kvfree);
657
d4eaa283
WL
658/**
659 * kvfree_sensitive - Free a data object containing sensitive information.
660 * @addr: address of the data object to be freed.
661 * @len: length of the data object.
662 *
663 * Use the special memzero_explicit() function to clear the content of a
664 * kvmalloc'ed object containing sensitive data to make sure that the
665 * compiler won't optimize out the data clearing.
666 */
667void kvfree_sensitive(const void *addr, size_t len)
668{
669 if (likely(!ZERO_OR_NULL_PTR(addr))) {
670 memzero_explicit((void *)addr, len);
671 kvfree(addr);
672 }
673}
674EXPORT_SYMBOL(kvfree_sensitive);
675
de2860f4
DC
676void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
677{
678 void *newp;
679
680 if (oldsize >= newsize)
681 return (void *)p;
682 newp = kvmalloc(newsize, flags);
683 if (!newp)
684 return NULL;
685 memcpy(newp, p, oldsize);
686 kvfree(p);
687 return newp;
688}
689EXPORT_SYMBOL(kvrealloc);
690
e39155ea
KS
691static inline void *__page_rmapping(struct page *page)
692{
693 unsigned long mapping;
694
695 mapping = (unsigned long)page->mapping;
696 mapping &= ~PAGE_MAPPING_FLAGS;
697
698 return (void *)mapping;
699}
700
54990675
PB
701/**
702 * __vmalloc_array - allocate memory for a virtually contiguous array.
703 * @n: number of elements.
704 * @size: element size.
705 * @flags: the type of memory to allocate (see kmalloc).
706 */
707void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
708{
709 size_t bytes;
710
711 if (unlikely(check_mul_overflow(n, size, &bytes)))
712 return NULL;
713 return __vmalloc(bytes, flags);
714}
715EXPORT_SYMBOL(__vmalloc_array);
716
717/**
718 * vmalloc_array - allocate memory for a virtually contiguous array.
719 * @n: number of elements.
720 * @size: element size.
721 */
722void *vmalloc_array(size_t n, size_t size)
723{
724 return __vmalloc_array(n, size, GFP_KERNEL);
725}
726EXPORT_SYMBOL(vmalloc_array);
727
728/**
729 * __vcalloc - allocate and zero memory for a virtually contiguous array.
730 * @n: number of elements.
731 * @size: element size.
732 * @flags: the type of memory to allocate (see kmalloc).
733 */
734void *__vcalloc(size_t n, size_t size, gfp_t flags)
735{
736 return __vmalloc_array(n, size, flags | __GFP_ZERO);
737}
738EXPORT_SYMBOL(__vcalloc);
739
740/**
741 * vcalloc - allocate and zero memory for a virtually contiguous array.
742 * @n: number of elements.
743 * @size: element size.
744 */
745void *vcalloc(size_t n, size_t size)
746{
747 return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
748}
749EXPORT_SYMBOL(vcalloc);
750
e39155ea
KS
751/* Neutral page->mapping pointer to address_space or anon_vma or other */
752void *page_rmapping(struct page *page)
753{
754 page = compound_head(page);
755 return __page_rmapping(page);
756}
757
1aa8aea5
AM
758/*
759 * Return true if this page is mapped into pagetables.
760 * For compound page it returns true if any subpage of compound page is mapped.
761 */
762bool page_mapped(struct page *page)
763{
764 int i;
765
766 if (likely(!PageCompound(page)))
767 return atomic_read(&page->_mapcount) >= 0;
768 page = compound_head(page);
769 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
770 return true;
771 if (PageHuge(page))
772 return false;
d8c6546b 773 for (i = 0; i < compound_nr(page); i++) {
1aa8aea5
AM
774 if (atomic_read(&page[i]._mapcount) >= 0)
775 return true;
776 }
777 return false;
778}
779EXPORT_SYMBOL(page_mapped);
780
e39155ea
KS
781struct anon_vma *page_anon_vma(struct page *page)
782{
783 unsigned long mapping;
784
785 page = compound_head(page);
786 mapping = (unsigned long)page->mapping;
787 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
788 return NULL;
789 return __page_rmapping(page);
790}
791
9800339b
SL
792struct address_space *page_mapping(struct page *page)
793{
1c290f64
KS
794 struct address_space *mapping;
795
796 page = compound_head(page);
9800339b 797
03e5ac2f
MP
798 /* This happens if someone calls flush_dcache_page on slab page */
799 if (unlikely(PageSlab(page)))
800 return NULL;
801
33806f06
SL
802 if (unlikely(PageSwapCache(page))) {
803 swp_entry_t entry;
804
805 entry.val = page_private(page);
e39155ea
KS
806 return swap_address_space(entry);
807 }
808
1c290f64 809 mapping = page->mapping;
bda807d4 810 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
e39155ea 811 return NULL;
bda807d4
MK
812
813 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
9800339b 814}
bda807d4 815EXPORT_SYMBOL(page_mapping);
9800339b 816
b20ce5e0
KS
817/* Slow path of page_mapcount() for compound pages */
818int __page_mapcount(struct page *page)
819{
820 int ret;
821
822 ret = atomic_read(&page->_mapcount) + 1;
dd78fedd
KS
823 /*
824 * For file THP page->_mapcount contains total number of mapping
825 * of the page: no need to look into compound_mapcount.
826 */
827 if (!PageAnon(page) && !PageHuge(page))
828 return ret;
b20ce5e0
KS
829 page = compound_head(page);
830 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
831 if (PageDoubleMap(page))
832 ret--;
833 return ret;
834}
835EXPORT_SYMBOL_GPL(__page_mapcount);
836
79789db0
MWO
837void copy_huge_page(struct page *dst, struct page *src)
838{
839 unsigned i, nr = compound_nr(src);
840
841 for (i = 0; i < nr; i++) {
842 cond_resched();
843 copy_highpage(nth_page(dst, i), nth_page(src, i));
844 }
845}
846
39a1aa8e
AR
847int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
848int sysctl_overcommit_ratio __read_mostly = 50;
849unsigned long sysctl_overcommit_kbytes __read_mostly;
850int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
851unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
852unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
853
32927393
CH
854int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
855 size_t *lenp, loff_t *ppos)
49f0ce5f
JM
856{
857 int ret;
858
859 ret = proc_dointvec(table, write, buffer, lenp, ppos);
860 if (ret == 0 && write)
861 sysctl_overcommit_kbytes = 0;
862 return ret;
863}
864
56f3547b
FT
865static void sync_overcommit_as(struct work_struct *dummy)
866{
867 percpu_counter_sync(&vm_committed_as);
868}
869
870int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
871 size_t *lenp, loff_t *ppos)
872{
873 struct ctl_table t;
bcbda810 874 int new_policy = -1;
56f3547b
FT
875 int ret;
876
877 /*
878 * The deviation of sync_overcommit_as could be big with loose policy
879 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
880 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
31454980 881 * with the strict "NEVER", and to avoid possible race condition (even
56f3547b
FT
882 * though user usually won't too frequently do the switching to policy
883 * OVERCOMMIT_NEVER), the switch is done in the following order:
884 * 1. changing the batch
885 * 2. sync percpu count on each CPU
886 * 3. switch the policy
887 */
888 if (write) {
889 t = *table;
890 t.data = &new_policy;
891 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
bcbda810 892 if (ret || new_policy == -1)
56f3547b
FT
893 return ret;
894
895 mm_compute_batch(new_policy);
896 if (new_policy == OVERCOMMIT_NEVER)
897 schedule_on_each_cpu(sync_overcommit_as);
898 sysctl_overcommit_memory = new_policy;
899 } else {
900 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
901 }
902
903 return ret;
904}
905
32927393
CH
906int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
907 size_t *lenp, loff_t *ppos)
49f0ce5f
JM
908{
909 int ret;
910
911 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
912 if (ret == 0 && write)
913 sysctl_overcommit_ratio = 0;
914 return ret;
915}
916
00619bcc
JM
917/*
918 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
919 */
920unsigned long vm_commit_limit(void)
921{
49f0ce5f
JM
922 unsigned long allowed;
923
924 if (sysctl_overcommit_kbytes)
925 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
926 else
ca79b0c2 927 allowed = ((totalram_pages() - hugetlb_total_pages())
49f0ce5f
JM
928 * sysctl_overcommit_ratio / 100);
929 allowed += total_swap_pages;
930
931 return allowed;
00619bcc
JM
932}
933
39a1aa8e
AR
934/*
935 * Make sure vm_committed_as in one cacheline and not cacheline shared with
936 * other variables. It can be updated by several CPUs frequently.
937 */
938struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
939
940/*
941 * The global memory commitment made in the system can be a metric
942 * that can be used to drive ballooning decisions when Linux is hosted
943 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
944 * balancing memory across competing virtual machines that are hosted.
945 * Several metrics drive this policy engine including the guest reported
946 * memory commitment.
4e2ee51e
FT
947 *
948 * The time cost of this is very low for small platforms, and for big
949 * platform like a 2S/36C/72T Skylake server, in worst case where
950 * vm_committed_as's spinlock is under severe contention, the time cost
951 * could be about 30~40 microseconds.
39a1aa8e
AR
952 */
953unsigned long vm_memory_committed(void)
954{
4e2ee51e 955 return percpu_counter_sum_positive(&vm_committed_as);
39a1aa8e
AR
956}
957EXPORT_SYMBOL_GPL(vm_memory_committed);
958
959/*
960 * Check that a process has enough memory to allocate a new virtual
961 * mapping. 0 means there is enough memory for the allocation to
962 * succeed and -ENOMEM implies there is not.
963 *
964 * We currently support three overcommit policies, which are set via the
ad56b738 965 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
39a1aa8e
AR
966 *
967 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
968 * Additional code 2002 Jul 20 by Robert Love.
969 *
970 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
971 *
972 * Note this is a helper function intended to be used by LSMs which
973 * wish to use this logic.
974 */
975int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
976{
8c7829b0 977 long allowed;
39a1aa8e 978
39a1aa8e
AR
979 vm_acct_memory(pages);
980
981 /*
982 * Sometimes we want to use more memory than we have
983 */
984 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
985 return 0;
986
987 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
8c7829b0 988 if (pages > totalram_pages() + total_swap_pages)
39a1aa8e 989 goto error;
8c7829b0 990 return 0;
39a1aa8e
AR
991 }
992
993 allowed = vm_commit_limit();
994 /*
995 * Reserve some for root
996 */
997 if (!cap_sys_admin)
998 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
999
1000 /*
1001 * Don't let a single process grow so big a user can't recover
1002 */
1003 if (mm) {
8c7829b0
JW
1004 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1005
39a1aa8e
AR
1006 allowed -= min_t(long, mm->total_vm / 32, reserve);
1007 }
1008
1009 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1010 return 0;
1011error:
1012 vm_unacct_memory(pages);
1013
1014 return -ENOMEM;
1015}
1016
a9090253
WR
1017/**
1018 * get_cmdline() - copy the cmdline value to a buffer.
1019 * @task: the task whose cmdline value to copy.
1020 * @buffer: the buffer to copy to.
1021 * @buflen: the length of the buffer. Larger cmdline values are truncated
1022 * to this length.
a862f68a
MR
1023 *
1024 * Return: the size of the cmdline field copied. Note that the copy does
a9090253
WR
1025 * not guarantee an ending NULL byte.
1026 */
1027int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1028{
1029 int res = 0;
1030 unsigned int len;
1031 struct mm_struct *mm = get_task_mm(task);
a3b609ef 1032 unsigned long arg_start, arg_end, env_start, env_end;
a9090253
WR
1033 if (!mm)
1034 goto out;
1035 if (!mm->arg_end)
1036 goto out_mm; /* Shh! No looking before we're done */
1037
bc81426f 1038 spin_lock(&mm->arg_lock);
a3b609ef
MG
1039 arg_start = mm->arg_start;
1040 arg_end = mm->arg_end;
1041 env_start = mm->env_start;
1042 env_end = mm->env_end;
bc81426f 1043 spin_unlock(&mm->arg_lock);
a3b609ef
MG
1044
1045 len = arg_end - arg_start;
a9090253
WR
1046
1047 if (len > buflen)
1048 len = buflen;
1049
f307ab6d 1050 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
a9090253
WR
1051
1052 /*
1053 * If the nul at the end of args has been overwritten, then
1054 * assume application is using setproctitle(3).
1055 */
1056 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1057 len = strnlen(buffer, res);
1058 if (len < res) {
1059 res = len;
1060 } else {
a3b609ef 1061 len = env_end - env_start;
a9090253
WR
1062 if (len > buflen - res)
1063 len = buflen - res;
a3b609ef 1064 res += access_process_vm(task, env_start,
f307ab6d
LS
1065 buffer+res, len,
1066 FOLL_FORCE);
a9090253
WR
1067 res = strnlen(buffer, res);
1068 }
1069 }
1070out_mm:
1071 mmput(mm);
1072out:
1073 return res;
1074}
010c164a 1075
4d1a8a2d 1076int __weak memcmp_pages(struct page *page1, struct page *page2)
010c164a
SL
1077{
1078 char *addr1, *addr2;
1079 int ret;
1080
1081 addr1 = kmap_atomic(page1);
1082 addr2 = kmap_atomic(page2);
1083 ret = memcmp(addr1, addr2, PAGE_SIZE);
1084 kunmap_atomic(addr2);
1085 kunmap_atomic(addr1);
1086 return ret;
1087}
8e7f37f2 1088
5bb1bb35 1089#ifdef CONFIG_PRINTK
8e7f37f2
PM
1090/**
1091 * mem_dump_obj - Print available provenance information
1092 * @object: object for which to find provenance information.
1093 *
1094 * This function uses pr_cont(), so that the caller is expected to have
1095 * printed out whatever preamble is appropriate. The provenance information
1096 * depends on the type of object and on how much debugging is enabled.
1097 * For example, for a slab-cache object, the slab name is printed, and,
1098 * if available, the return address and stack trace from the allocation
e548eaa1 1099 * and last free path of that object.
8e7f37f2
PM
1100 */
1101void mem_dump_obj(void *object)
1102{
2521781c
JP
1103 const char *type;
1104
98f18083
PM
1105 if (kmem_valid_obj(object)) {
1106 kmem_dump_obj(object);
1107 return;
1108 }
2521781c 1109
98f18083
PM
1110 if (vmalloc_dump_obj(object))
1111 return;
2521781c
JP
1112
1113 if (virt_addr_valid(object))
1114 type = "non-slab/vmalloc memory";
1115 else if (object == NULL)
1116 type = "NULL pointer";
1117 else if (object == ZERO_SIZE_PTR)
1118 type = "zero-size pointer";
1119 else
1120 type = "non-paged memory";
1121
1122 pr_cont(" %s\n", type);
8e7f37f2 1123}
0d3dd2c8 1124EXPORT_SYMBOL_GPL(mem_dump_obj);
5bb1bb35 1125#endif
82840451
DH
1126
1127/*
1128 * A driver might set a page logically offline -- PageOffline() -- and
1129 * turn the page inaccessible in the hypervisor; after that, access to page
1130 * content can be fatal.
1131 *
1132 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1133 * pages after checking PageOffline(); however, these PFN walkers can race
1134 * with drivers that set PageOffline().
1135 *
1136 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1137 * synchronize with such drivers, achieving that a page cannot be set
1138 * PageOffline() while frozen.
1139 *
1140 * page_offline_begin()/page_offline_end() is used by drivers that care about
1141 * such races when setting a page PageOffline().
1142 */
1143static DECLARE_RWSEM(page_offline_rwsem);
1144
1145void page_offline_freeze(void)
1146{
1147 down_read(&page_offline_rwsem);
1148}
1149
1150void page_offline_thaw(void)
1151{
1152 up_read(&page_offline_rwsem);
1153}
1154
1155void page_offline_begin(void)
1156{
1157 down_write(&page_offline_rwsem);
1158}
1159EXPORT_SYMBOL(page_offline_begin);
1160
1161void page_offline_end(void)
1162{
1163 up_write(&page_offline_rwsem);
1164}
1165EXPORT_SYMBOL(page_offline_end);