]>
Commit | Line | Data |
---|---|---|
16d69265 | 1 | #include <linux/mm.h> |
30992c97 MM |
2 | #include <linux/slab.h> |
3 | #include <linux/string.h> | |
3b32123d | 4 | #include <linux/compiler.h> |
b95f1b31 | 5 | #include <linux/export.h> |
96840aa0 | 6 | #include <linux/err.h> |
3b8f14b4 | 7 | #include <linux/sched.h> |
eb36c587 | 8 | #include <linux/security.h> |
9800339b | 9 | #include <linux/swap.h> |
33806f06 | 10 | #include <linux/swapops.h> |
00619bcc JM |
11 | #include <linux/mman.h> |
12 | #include <linux/hugetlb.h> | |
39f1f78d | 13 | #include <linux/vmalloc.h> |
00619bcc | 14 | |
a4bb1e43 | 15 | #include <asm/sections.h> |
96840aa0 | 16 | #include <asm/uaccess.h> |
30992c97 | 17 | |
6038def0 NK |
18 | #include "internal.h" |
19 | ||
a4bb1e43 AH |
20 | static inline int is_kernel_rodata(unsigned long addr) |
21 | { | |
22 | return addr >= (unsigned long)__start_rodata && | |
23 | addr < (unsigned long)__end_rodata; | |
24 | } | |
25 | ||
26 | /** | |
27 | * kfree_const - conditionally free memory | |
28 | * @x: pointer to the memory | |
29 | * | |
30 | * Function calls kfree only if @x is not in .rodata section. | |
31 | */ | |
32 | void kfree_const(const void *x) | |
33 | { | |
34 | if (!is_kernel_rodata((unsigned long)x)) | |
35 | kfree(x); | |
36 | } | |
37 | EXPORT_SYMBOL(kfree_const); | |
38 | ||
30992c97 | 39 | /** |
30992c97 | 40 | * kstrdup - allocate space for and copy an existing string |
30992c97 MM |
41 | * @s: the string to duplicate |
42 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
43 | */ | |
44 | char *kstrdup(const char *s, gfp_t gfp) | |
45 | { | |
46 | size_t len; | |
47 | char *buf; | |
48 | ||
49 | if (!s) | |
50 | return NULL; | |
51 | ||
52 | len = strlen(s) + 1; | |
1d2c8eea | 53 | buf = kmalloc_track_caller(len, gfp); |
30992c97 MM |
54 | if (buf) |
55 | memcpy(buf, s, len); | |
56 | return buf; | |
57 | } | |
58 | EXPORT_SYMBOL(kstrdup); | |
96840aa0 | 59 | |
a4bb1e43 AH |
60 | /** |
61 | * kstrdup_const - conditionally duplicate an existing const string | |
62 | * @s: the string to duplicate | |
63 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
64 | * | |
65 | * Function returns source string if it is in .rodata section otherwise it | |
66 | * fallbacks to kstrdup. | |
67 | * Strings allocated by kstrdup_const should be freed by kfree_const. | |
68 | */ | |
69 | const char *kstrdup_const(const char *s, gfp_t gfp) | |
70 | { | |
71 | if (is_kernel_rodata((unsigned long)s)) | |
72 | return s; | |
73 | ||
74 | return kstrdup(s, gfp); | |
75 | } | |
76 | EXPORT_SYMBOL(kstrdup_const); | |
77 | ||
1e66df3e JF |
78 | /** |
79 | * kstrndup - allocate space for and copy an existing string | |
80 | * @s: the string to duplicate | |
81 | * @max: read at most @max chars from @s | |
82 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
83 | */ | |
84 | char *kstrndup(const char *s, size_t max, gfp_t gfp) | |
85 | { | |
86 | size_t len; | |
87 | char *buf; | |
88 | ||
89 | if (!s) | |
90 | return NULL; | |
91 | ||
92 | len = strnlen(s, max); | |
93 | buf = kmalloc_track_caller(len+1, gfp); | |
94 | if (buf) { | |
95 | memcpy(buf, s, len); | |
96 | buf[len] = '\0'; | |
97 | } | |
98 | return buf; | |
99 | } | |
100 | EXPORT_SYMBOL(kstrndup); | |
101 | ||
1a2f67b4 AD |
102 | /** |
103 | * kmemdup - duplicate region of memory | |
104 | * | |
105 | * @src: memory region to duplicate | |
106 | * @len: memory region length | |
107 | * @gfp: GFP mask to use | |
108 | */ | |
109 | void *kmemdup(const void *src, size_t len, gfp_t gfp) | |
110 | { | |
111 | void *p; | |
112 | ||
1d2c8eea | 113 | p = kmalloc_track_caller(len, gfp); |
1a2f67b4 AD |
114 | if (p) |
115 | memcpy(p, src, len); | |
116 | return p; | |
117 | } | |
118 | EXPORT_SYMBOL(kmemdup); | |
119 | ||
610a77e0 LZ |
120 | /** |
121 | * memdup_user - duplicate memory region from user space | |
122 | * | |
123 | * @src: source address in user space | |
124 | * @len: number of bytes to copy | |
125 | * | |
126 | * Returns an ERR_PTR() on failure. | |
127 | */ | |
128 | void *memdup_user(const void __user *src, size_t len) | |
129 | { | |
130 | void *p; | |
131 | ||
132 | /* | |
133 | * Always use GFP_KERNEL, since copy_from_user() can sleep and | |
134 | * cause pagefault, which makes it pointless to use GFP_NOFS | |
135 | * or GFP_ATOMIC. | |
136 | */ | |
137 | p = kmalloc_track_caller(len, GFP_KERNEL); | |
138 | if (!p) | |
139 | return ERR_PTR(-ENOMEM); | |
140 | ||
141 | if (copy_from_user(p, src, len)) { | |
142 | kfree(p); | |
143 | return ERR_PTR(-EFAULT); | |
144 | } | |
145 | ||
146 | return p; | |
147 | } | |
148 | EXPORT_SYMBOL(memdup_user); | |
149 | ||
96840aa0 DA |
150 | /* |
151 | * strndup_user - duplicate an existing string from user space | |
96840aa0 DA |
152 | * @s: The string to duplicate |
153 | * @n: Maximum number of bytes to copy, including the trailing NUL. | |
154 | */ | |
155 | char *strndup_user(const char __user *s, long n) | |
156 | { | |
157 | char *p; | |
158 | long length; | |
159 | ||
160 | length = strnlen_user(s, n); | |
161 | ||
162 | if (!length) | |
163 | return ERR_PTR(-EFAULT); | |
164 | ||
165 | if (length > n) | |
166 | return ERR_PTR(-EINVAL); | |
167 | ||
90d74045 | 168 | p = memdup_user(s, length); |
96840aa0 | 169 | |
90d74045 JL |
170 | if (IS_ERR(p)) |
171 | return p; | |
96840aa0 DA |
172 | |
173 | p[length - 1] = '\0'; | |
174 | ||
175 | return p; | |
176 | } | |
177 | EXPORT_SYMBOL(strndup_user); | |
16d69265 | 178 | |
e9d408e1 AV |
179 | /** |
180 | * memdup_user_nul - duplicate memory region from user space and NUL-terminate | |
181 | * | |
182 | * @src: source address in user space | |
183 | * @len: number of bytes to copy | |
184 | * | |
185 | * Returns an ERR_PTR() on failure. | |
186 | */ | |
187 | void *memdup_user_nul(const void __user *src, size_t len) | |
188 | { | |
189 | char *p; | |
190 | ||
191 | /* | |
192 | * Always use GFP_KERNEL, since copy_from_user() can sleep and | |
193 | * cause pagefault, which makes it pointless to use GFP_NOFS | |
194 | * or GFP_ATOMIC. | |
195 | */ | |
196 | p = kmalloc_track_caller(len + 1, GFP_KERNEL); | |
197 | if (!p) | |
198 | return ERR_PTR(-ENOMEM); | |
199 | ||
200 | if (copy_from_user(p, src, len)) { | |
201 | kfree(p); | |
202 | return ERR_PTR(-EFAULT); | |
203 | } | |
204 | p[len] = '\0'; | |
205 | ||
206 | return p; | |
207 | } | |
208 | EXPORT_SYMBOL(memdup_user_nul); | |
209 | ||
6038def0 NK |
210 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
211 | struct vm_area_struct *prev, struct rb_node *rb_parent) | |
212 | { | |
213 | struct vm_area_struct *next; | |
214 | ||
215 | vma->vm_prev = prev; | |
216 | if (prev) { | |
217 | next = prev->vm_next; | |
218 | prev->vm_next = vma; | |
219 | } else { | |
220 | mm->mmap = vma; | |
221 | if (rb_parent) | |
222 | next = rb_entry(rb_parent, | |
223 | struct vm_area_struct, vm_rb); | |
224 | else | |
225 | next = NULL; | |
226 | } | |
227 | vma->vm_next = next; | |
228 | if (next) | |
229 | next->vm_prev = vma; | |
230 | } | |
231 | ||
b7643757 | 232 | /* Check if the vma is being used as a stack by this task */ |
d17af505 | 233 | int vma_is_stack_for_current(struct vm_area_struct *vma) |
b7643757 | 234 | { |
d17af505 AL |
235 | struct task_struct * __maybe_unused t = current; |
236 | ||
b7643757 SP |
237 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
238 | } | |
239 | ||
efc1a3b1 | 240 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
16d69265 AM |
241 | void arch_pick_mmap_layout(struct mm_struct *mm) |
242 | { | |
243 | mm->mmap_base = TASK_UNMAPPED_BASE; | |
244 | mm->get_unmapped_area = arch_get_unmapped_area; | |
16d69265 AM |
245 | } |
246 | #endif | |
912985dc | 247 | |
45888a0c XG |
248 | /* |
249 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall | |
250 | * back to the regular GUP. | |
25985edc | 251 | * If the architecture not support this function, simply return with no |
45888a0c XG |
252 | * page pinned |
253 | */ | |
3b32123d | 254 | int __weak __get_user_pages_fast(unsigned long start, |
45888a0c XG |
255 | int nr_pages, int write, struct page **pages) |
256 | { | |
257 | return 0; | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(__get_user_pages_fast); | |
260 | ||
9de100d0 AG |
261 | /** |
262 | * get_user_pages_fast() - pin user pages in memory | |
263 | * @start: starting user address | |
264 | * @nr_pages: number of pages from start to pin | |
265 | * @write: whether pages will be written to | |
266 | * @pages: array that receives pointers to the pages pinned. | |
267 | * Should be at least nr_pages long. | |
268 | * | |
9de100d0 AG |
269 | * Returns number of pages pinned. This may be fewer than the number |
270 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
271 | * were pinned, returns -errno. | |
d2bf6be8 NP |
272 | * |
273 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | |
274 | * operating on current and current->mm, with force=0 and vma=NULL. However | |
275 | * unlike get_user_pages, it must be called without mmap_sem held. | |
276 | * | |
277 | * get_user_pages_fast may take mmap_sem and page table locks, so no | |
278 | * assumptions can be made about lack of locking. get_user_pages_fast is to be | |
279 | * implemented in a way that is advantageous (vs get_user_pages()) when the | |
280 | * user memory area is already faulted in and present in ptes. However if the | |
281 | * pages have to be faulted in, it may turn out to be slightly slower so | |
282 | * callers need to carefully consider what to use. On many architectures, | |
283 | * get_user_pages_fast simply falls back to get_user_pages. | |
9de100d0 | 284 | */ |
3b32123d | 285 | int __weak get_user_pages_fast(unsigned long start, |
912985dc RR |
286 | int nr_pages, int write, struct page **pages) |
287 | { | |
c164154f LS |
288 | return get_user_pages_unlocked(start, nr_pages, pages, |
289 | write ? FOLL_WRITE : 0); | |
912985dc RR |
290 | } |
291 | EXPORT_SYMBOL_GPL(get_user_pages_fast); | |
ca2b84cb | 292 | |
eb36c587 AV |
293 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
294 | unsigned long len, unsigned long prot, | |
9fbeb5ab | 295 | unsigned long flag, unsigned long pgoff) |
eb36c587 AV |
296 | { |
297 | unsigned long ret; | |
298 | struct mm_struct *mm = current->mm; | |
41badc15 | 299 | unsigned long populate; |
eb36c587 AV |
300 | |
301 | ret = security_mmap_file(file, prot, flag); | |
302 | if (!ret) { | |
9fbeb5ab MH |
303 | if (down_write_killable(&mm->mmap_sem)) |
304 | return -EINTR; | |
bebeb3d6 ML |
305 | ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, |
306 | &populate); | |
eb36c587 | 307 | up_write(&mm->mmap_sem); |
41badc15 ML |
308 | if (populate) |
309 | mm_populate(ret, populate); | |
eb36c587 AV |
310 | } |
311 | return ret; | |
312 | } | |
313 | ||
314 | unsigned long vm_mmap(struct file *file, unsigned long addr, | |
315 | unsigned long len, unsigned long prot, | |
316 | unsigned long flag, unsigned long offset) | |
317 | { | |
318 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) | |
319 | return -EINVAL; | |
ea53cde0 | 320 | if (unlikely(offset_in_page(offset))) |
eb36c587 AV |
321 | return -EINVAL; |
322 | ||
9fbeb5ab | 323 | return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); |
eb36c587 AV |
324 | } |
325 | EXPORT_SYMBOL(vm_mmap); | |
326 | ||
39f1f78d AV |
327 | void kvfree(const void *addr) |
328 | { | |
329 | if (is_vmalloc_addr(addr)) | |
330 | vfree(addr); | |
331 | else | |
332 | kfree(addr); | |
333 | } | |
334 | EXPORT_SYMBOL(kvfree); | |
335 | ||
e39155ea KS |
336 | static inline void *__page_rmapping(struct page *page) |
337 | { | |
338 | unsigned long mapping; | |
339 | ||
340 | mapping = (unsigned long)page->mapping; | |
341 | mapping &= ~PAGE_MAPPING_FLAGS; | |
342 | ||
343 | return (void *)mapping; | |
344 | } | |
345 | ||
346 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | |
347 | void *page_rmapping(struct page *page) | |
348 | { | |
349 | page = compound_head(page); | |
350 | return __page_rmapping(page); | |
351 | } | |
352 | ||
1aa8aea5 AM |
353 | /* |
354 | * Return true if this page is mapped into pagetables. | |
355 | * For compound page it returns true if any subpage of compound page is mapped. | |
356 | */ | |
357 | bool page_mapped(struct page *page) | |
358 | { | |
359 | int i; | |
360 | ||
361 | if (likely(!PageCompound(page))) | |
362 | return atomic_read(&page->_mapcount) >= 0; | |
363 | page = compound_head(page); | |
364 | if (atomic_read(compound_mapcount_ptr(page)) >= 0) | |
365 | return true; | |
366 | if (PageHuge(page)) | |
367 | return false; | |
368 | for (i = 0; i < hpage_nr_pages(page); i++) { | |
369 | if (atomic_read(&page[i]._mapcount) >= 0) | |
370 | return true; | |
371 | } | |
372 | return false; | |
373 | } | |
374 | EXPORT_SYMBOL(page_mapped); | |
375 | ||
e39155ea KS |
376 | struct anon_vma *page_anon_vma(struct page *page) |
377 | { | |
378 | unsigned long mapping; | |
379 | ||
380 | page = compound_head(page); | |
381 | mapping = (unsigned long)page->mapping; | |
382 | if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) | |
383 | return NULL; | |
384 | return __page_rmapping(page); | |
385 | } | |
386 | ||
9800339b SL |
387 | struct address_space *page_mapping(struct page *page) |
388 | { | |
1c290f64 KS |
389 | struct address_space *mapping; |
390 | ||
391 | page = compound_head(page); | |
9800339b | 392 | |
03e5ac2f MP |
393 | /* This happens if someone calls flush_dcache_page on slab page */ |
394 | if (unlikely(PageSlab(page))) | |
395 | return NULL; | |
396 | ||
33806f06 SL |
397 | if (unlikely(PageSwapCache(page))) { |
398 | swp_entry_t entry; | |
399 | ||
400 | entry.val = page_private(page); | |
e39155ea KS |
401 | return swap_address_space(entry); |
402 | } | |
403 | ||
1c290f64 | 404 | mapping = page->mapping; |
bda807d4 | 405 | if ((unsigned long)mapping & PAGE_MAPPING_ANON) |
e39155ea | 406 | return NULL; |
bda807d4 MK |
407 | |
408 | return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); | |
9800339b | 409 | } |
bda807d4 | 410 | EXPORT_SYMBOL(page_mapping); |
9800339b | 411 | |
b20ce5e0 KS |
412 | /* Slow path of page_mapcount() for compound pages */ |
413 | int __page_mapcount(struct page *page) | |
414 | { | |
415 | int ret; | |
416 | ||
417 | ret = atomic_read(&page->_mapcount) + 1; | |
dd78fedd KS |
418 | /* |
419 | * For file THP page->_mapcount contains total number of mapping | |
420 | * of the page: no need to look into compound_mapcount. | |
421 | */ | |
422 | if (!PageAnon(page) && !PageHuge(page)) | |
423 | return ret; | |
b20ce5e0 KS |
424 | page = compound_head(page); |
425 | ret += atomic_read(compound_mapcount_ptr(page)) + 1; | |
426 | if (PageDoubleMap(page)) | |
427 | ret--; | |
428 | return ret; | |
429 | } | |
430 | EXPORT_SYMBOL_GPL(__page_mapcount); | |
431 | ||
39a1aa8e AR |
432 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; |
433 | int sysctl_overcommit_ratio __read_mostly = 50; | |
434 | unsigned long sysctl_overcommit_kbytes __read_mostly; | |
435 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | |
436 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ | |
437 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ | |
438 | ||
49f0ce5f JM |
439 | int overcommit_ratio_handler(struct ctl_table *table, int write, |
440 | void __user *buffer, size_t *lenp, | |
441 | loff_t *ppos) | |
442 | { | |
443 | int ret; | |
444 | ||
445 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | |
446 | if (ret == 0 && write) | |
447 | sysctl_overcommit_kbytes = 0; | |
448 | return ret; | |
449 | } | |
450 | ||
451 | int overcommit_kbytes_handler(struct ctl_table *table, int write, | |
452 | void __user *buffer, size_t *lenp, | |
453 | loff_t *ppos) | |
454 | { | |
455 | int ret; | |
456 | ||
457 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | |
458 | if (ret == 0 && write) | |
459 | sysctl_overcommit_ratio = 0; | |
460 | return ret; | |
461 | } | |
462 | ||
00619bcc JM |
463 | /* |
464 | * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used | |
465 | */ | |
466 | unsigned long vm_commit_limit(void) | |
467 | { | |
49f0ce5f JM |
468 | unsigned long allowed; |
469 | ||
470 | if (sysctl_overcommit_kbytes) | |
471 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); | |
472 | else | |
473 | allowed = ((totalram_pages - hugetlb_total_pages()) | |
474 | * sysctl_overcommit_ratio / 100); | |
475 | allowed += total_swap_pages; | |
476 | ||
477 | return allowed; | |
00619bcc JM |
478 | } |
479 | ||
39a1aa8e AR |
480 | /* |
481 | * Make sure vm_committed_as in one cacheline and not cacheline shared with | |
482 | * other variables. It can be updated by several CPUs frequently. | |
483 | */ | |
484 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; | |
485 | ||
486 | /* | |
487 | * The global memory commitment made in the system can be a metric | |
488 | * that can be used to drive ballooning decisions when Linux is hosted | |
489 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically | |
490 | * balancing memory across competing virtual machines that are hosted. | |
491 | * Several metrics drive this policy engine including the guest reported | |
492 | * memory commitment. | |
493 | */ | |
494 | unsigned long vm_memory_committed(void) | |
495 | { | |
496 | return percpu_counter_read_positive(&vm_committed_as); | |
497 | } | |
498 | EXPORT_SYMBOL_GPL(vm_memory_committed); | |
499 | ||
500 | /* | |
501 | * Check that a process has enough memory to allocate a new virtual | |
502 | * mapping. 0 means there is enough memory for the allocation to | |
503 | * succeed and -ENOMEM implies there is not. | |
504 | * | |
505 | * We currently support three overcommit policies, which are set via the | |
506 | * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting | |
507 | * | |
508 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | |
509 | * Additional code 2002 Jul 20 by Robert Love. | |
510 | * | |
511 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | |
512 | * | |
513 | * Note this is a helper function intended to be used by LSMs which | |
514 | * wish to use this logic. | |
515 | */ | |
516 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |
517 | { | |
518 | long free, allowed, reserve; | |
519 | ||
520 | VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < | |
521 | -(s64)vm_committed_as_batch * num_online_cpus(), | |
522 | "memory commitment underflow"); | |
523 | ||
524 | vm_acct_memory(pages); | |
525 | ||
526 | /* | |
527 | * Sometimes we want to use more memory than we have | |
528 | */ | |
529 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | |
530 | return 0; | |
531 | ||
532 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | |
533 | free = global_page_state(NR_FREE_PAGES); | |
11fb9989 | 534 | free += global_node_page_state(NR_FILE_PAGES); |
39a1aa8e AR |
535 | |
536 | /* | |
537 | * shmem pages shouldn't be counted as free in this | |
538 | * case, they can't be purged, only swapped out, and | |
539 | * that won't affect the overall amount of available | |
540 | * memory in the system. | |
541 | */ | |
11fb9989 | 542 | free -= global_node_page_state(NR_SHMEM); |
39a1aa8e AR |
543 | |
544 | free += get_nr_swap_pages(); | |
545 | ||
546 | /* | |
547 | * Any slabs which are created with the | |
548 | * SLAB_RECLAIM_ACCOUNT flag claim to have contents | |
549 | * which are reclaimable, under pressure. The dentry | |
550 | * cache and most inode caches should fall into this | |
551 | */ | |
552 | free += global_page_state(NR_SLAB_RECLAIMABLE); | |
553 | ||
554 | /* | |
555 | * Leave reserved pages. The pages are not for anonymous pages. | |
556 | */ | |
557 | if (free <= totalreserve_pages) | |
558 | goto error; | |
559 | else | |
560 | free -= totalreserve_pages; | |
561 | ||
562 | /* | |
563 | * Reserve some for root | |
564 | */ | |
565 | if (!cap_sys_admin) | |
566 | free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | |
567 | ||
568 | if (free > pages) | |
569 | return 0; | |
570 | ||
571 | goto error; | |
572 | } | |
573 | ||
574 | allowed = vm_commit_limit(); | |
575 | /* | |
576 | * Reserve some for root | |
577 | */ | |
578 | if (!cap_sys_admin) | |
579 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | |
580 | ||
581 | /* | |
582 | * Don't let a single process grow so big a user can't recover | |
583 | */ | |
584 | if (mm) { | |
585 | reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); | |
586 | allowed -= min_t(long, mm->total_vm / 32, reserve); | |
587 | } | |
588 | ||
589 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) | |
590 | return 0; | |
591 | error: | |
592 | vm_unacct_memory(pages); | |
593 | ||
594 | return -ENOMEM; | |
595 | } | |
596 | ||
a9090253 WR |
597 | /** |
598 | * get_cmdline() - copy the cmdline value to a buffer. | |
599 | * @task: the task whose cmdline value to copy. | |
600 | * @buffer: the buffer to copy to. | |
601 | * @buflen: the length of the buffer. Larger cmdline values are truncated | |
602 | * to this length. | |
603 | * Returns the size of the cmdline field copied. Note that the copy does | |
604 | * not guarantee an ending NULL byte. | |
605 | */ | |
606 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |
607 | { | |
608 | int res = 0; | |
609 | unsigned int len; | |
610 | struct mm_struct *mm = get_task_mm(task); | |
a3b609ef | 611 | unsigned long arg_start, arg_end, env_start, env_end; |
a9090253 WR |
612 | if (!mm) |
613 | goto out; | |
614 | if (!mm->arg_end) | |
615 | goto out_mm; /* Shh! No looking before we're done */ | |
616 | ||
a3b609ef MG |
617 | down_read(&mm->mmap_sem); |
618 | arg_start = mm->arg_start; | |
619 | arg_end = mm->arg_end; | |
620 | env_start = mm->env_start; | |
621 | env_end = mm->env_end; | |
622 | up_read(&mm->mmap_sem); | |
623 | ||
624 | len = arg_end - arg_start; | |
a9090253 WR |
625 | |
626 | if (len > buflen) | |
627 | len = buflen; | |
628 | ||
f307ab6d | 629 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
a9090253 WR |
630 | |
631 | /* | |
632 | * If the nul at the end of args has been overwritten, then | |
633 | * assume application is using setproctitle(3). | |
634 | */ | |
635 | if (res > 0 && buffer[res-1] != '\0' && len < buflen) { | |
636 | len = strnlen(buffer, res); | |
637 | if (len < res) { | |
638 | res = len; | |
639 | } else { | |
a3b609ef | 640 | len = env_end - env_start; |
a9090253 WR |
641 | if (len > buflen - res) |
642 | len = buflen - res; | |
a3b609ef | 643 | res += access_process_vm(task, env_start, |
f307ab6d LS |
644 | buffer+res, len, |
645 | FOLL_FORCE); | |
a9090253 WR |
646 | res = strnlen(buffer, res); |
647 | } | |
648 | } | |
649 | out_mm: | |
650 | mmput(mm); | |
651 | out: | |
652 | return res; | |
653 | } |