]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/util.c
prctl: take mmap sem for writing to protect against others
[mirror_ubuntu-zesty-kernel.git] / mm / util.c
CommitLineData
16d69265 1#include <linux/mm.h>
30992c97
MM
2#include <linux/slab.h>
3#include <linux/string.h>
3b32123d 4#include <linux/compiler.h>
b95f1b31 5#include <linux/export.h>
96840aa0 6#include <linux/err.h>
3b8f14b4 7#include <linux/sched.h>
eb36c587 8#include <linux/security.h>
9800339b 9#include <linux/swap.h>
33806f06 10#include <linux/swapops.h>
00619bcc
JM
11#include <linux/mman.h>
12#include <linux/hugetlb.h>
39f1f78d 13#include <linux/vmalloc.h>
00619bcc 14
a4bb1e43 15#include <asm/sections.h>
96840aa0 16#include <asm/uaccess.h>
30992c97 17
6038def0
NK
18#include "internal.h"
19
a4bb1e43
AH
20static inline int is_kernel_rodata(unsigned long addr)
21{
22 return addr >= (unsigned long)__start_rodata &&
23 addr < (unsigned long)__end_rodata;
24}
25
26/**
27 * kfree_const - conditionally free memory
28 * @x: pointer to the memory
29 *
30 * Function calls kfree only if @x is not in .rodata section.
31 */
32void kfree_const(const void *x)
33{
34 if (!is_kernel_rodata((unsigned long)x))
35 kfree(x);
36}
37EXPORT_SYMBOL(kfree_const);
38
30992c97 39/**
30992c97 40 * kstrdup - allocate space for and copy an existing string
30992c97
MM
41 * @s: the string to duplicate
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
43 */
44char *kstrdup(const char *s, gfp_t gfp)
45{
46 size_t len;
47 char *buf;
48
49 if (!s)
50 return NULL;
51
52 len = strlen(s) + 1;
1d2c8eea 53 buf = kmalloc_track_caller(len, gfp);
30992c97
MM
54 if (buf)
55 memcpy(buf, s, len);
56 return buf;
57}
58EXPORT_SYMBOL(kstrdup);
96840aa0 59
a4bb1e43
AH
60/**
61 * kstrdup_const - conditionally duplicate an existing const string
62 * @s: the string to duplicate
63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
64 *
65 * Function returns source string if it is in .rodata section otherwise it
66 * fallbacks to kstrdup.
67 * Strings allocated by kstrdup_const should be freed by kfree_const.
68 */
69const char *kstrdup_const(const char *s, gfp_t gfp)
70{
71 if (is_kernel_rodata((unsigned long)s))
72 return s;
73
74 return kstrdup(s, gfp);
75}
76EXPORT_SYMBOL(kstrdup_const);
77
1e66df3e
JF
78/**
79 * kstrndup - allocate space for and copy an existing string
80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
83 */
84char *kstrndup(const char *s, size_t max, gfp_t gfp)
85{
86 size_t len;
87 char *buf;
88
89 if (!s)
90 return NULL;
91
92 len = strnlen(s, max);
93 buf = kmalloc_track_caller(len+1, gfp);
94 if (buf) {
95 memcpy(buf, s, len);
96 buf[len] = '\0';
97 }
98 return buf;
99}
100EXPORT_SYMBOL(kstrndup);
101
1a2f67b4
AD
102/**
103 * kmemdup - duplicate region of memory
104 *
105 * @src: memory region to duplicate
106 * @len: memory region length
107 * @gfp: GFP mask to use
108 */
109void *kmemdup(const void *src, size_t len, gfp_t gfp)
110{
111 void *p;
112
1d2c8eea 113 p = kmalloc_track_caller(len, gfp);
1a2f67b4
AD
114 if (p)
115 memcpy(p, src, len);
116 return p;
117}
118EXPORT_SYMBOL(kmemdup);
119
610a77e0
LZ
120/**
121 * memdup_user - duplicate memory region from user space
122 *
123 * @src: source address in user space
124 * @len: number of bytes to copy
125 *
126 * Returns an ERR_PTR() on failure.
127 */
128void *memdup_user(const void __user *src, size_t len)
129{
130 void *p;
131
132 /*
133 * Always use GFP_KERNEL, since copy_from_user() can sleep and
134 * cause pagefault, which makes it pointless to use GFP_NOFS
135 * or GFP_ATOMIC.
136 */
137 p = kmalloc_track_caller(len, GFP_KERNEL);
138 if (!p)
139 return ERR_PTR(-ENOMEM);
140
141 if (copy_from_user(p, src, len)) {
142 kfree(p);
143 return ERR_PTR(-EFAULT);
144 }
145
146 return p;
147}
148EXPORT_SYMBOL(memdup_user);
149
96840aa0
DA
150/*
151 * strndup_user - duplicate an existing string from user space
96840aa0
DA
152 * @s: The string to duplicate
153 * @n: Maximum number of bytes to copy, including the trailing NUL.
154 */
155char *strndup_user(const char __user *s, long n)
156{
157 char *p;
158 long length;
159
160 length = strnlen_user(s, n);
161
162 if (!length)
163 return ERR_PTR(-EFAULT);
164
165 if (length > n)
166 return ERR_PTR(-EINVAL);
167
90d74045 168 p = memdup_user(s, length);
96840aa0 169
90d74045
JL
170 if (IS_ERR(p))
171 return p;
96840aa0
DA
172
173 p[length - 1] = '\0';
174
175 return p;
176}
177EXPORT_SYMBOL(strndup_user);
16d69265 178
e9d408e1
AV
179/**
180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
181 *
182 * @src: source address in user space
183 * @len: number of bytes to copy
184 *
185 * Returns an ERR_PTR() on failure.
186 */
187void *memdup_user_nul(const void __user *src, size_t len)
188{
189 char *p;
190
191 /*
192 * Always use GFP_KERNEL, since copy_from_user() can sleep and
193 * cause pagefault, which makes it pointless to use GFP_NOFS
194 * or GFP_ATOMIC.
195 */
196 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
197 if (!p)
198 return ERR_PTR(-ENOMEM);
199
200 if (copy_from_user(p, src, len)) {
201 kfree(p);
202 return ERR_PTR(-EFAULT);
203 }
204 p[len] = '\0';
205
206 return p;
207}
208EXPORT_SYMBOL(memdup_user_nul);
209
6038def0
NK
210void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
211 struct vm_area_struct *prev, struct rb_node *rb_parent)
212{
213 struct vm_area_struct *next;
214
215 vma->vm_prev = prev;
216 if (prev) {
217 next = prev->vm_next;
218 prev->vm_next = vma;
219 } else {
220 mm->mmap = vma;
221 if (rb_parent)
222 next = rb_entry(rb_parent,
223 struct vm_area_struct, vm_rb);
224 else
225 next = NULL;
226 }
227 vma->vm_next = next;
228 if (next)
229 next->vm_prev = vma;
230}
231
b7643757
SP
232/* Check if the vma is being used as a stack by this task */
233static int vm_is_stack_for_task(struct task_struct *t,
234 struct vm_area_struct *vma)
235{
236 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
237}
238
239/*
240 * Check if the vma is being used as a stack.
241 * If is_group is non-zero, check in the entire thread group or else
58cb6548
ON
242 * just check in the current task. Returns the task_struct of the task
243 * that the vma is stack for. Must be called under rcu_read_lock().
b7643757 244 */
58cb6548
ON
245struct task_struct *task_of_stack(struct task_struct *task,
246 struct vm_area_struct *vma, bool in_group)
b7643757 247{
b7643757 248 if (vm_is_stack_for_task(task, vma))
58cb6548 249 return task;
b7643757
SP
250
251 if (in_group) {
252 struct task_struct *t;
b7643757 253
4449a51a 254 for_each_thread(task, t) {
58cb6548
ON
255 if (vm_is_stack_for_task(t, vma))
256 return t;
4449a51a 257 }
b7643757
SP
258 }
259
58cb6548 260 return NULL;
b7643757
SP
261}
262
efc1a3b1 263#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265
AM
264void arch_pick_mmap_layout(struct mm_struct *mm)
265{
266 mm->mmap_base = TASK_UNMAPPED_BASE;
267 mm->get_unmapped_area = arch_get_unmapped_area;
16d69265
AM
268}
269#endif
912985dc 270
45888a0c
XG
271/*
272 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
273 * back to the regular GUP.
25985edc 274 * If the architecture not support this function, simply return with no
45888a0c
XG
275 * page pinned
276 */
3b32123d 277int __weak __get_user_pages_fast(unsigned long start,
45888a0c
XG
278 int nr_pages, int write, struct page **pages)
279{
280 return 0;
281}
282EXPORT_SYMBOL_GPL(__get_user_pages_fast);
283
9de100d0
AG
284/**
285 * get_user_pages_fast() - pin user pages in memory
286 * @start: starting user address
287 * @nr_pages: number of pages from start to pin
288 * @write: whether pages will be written to
289 * @pages: array that receives pointers to the pages pinned.
290 * Should be at least nr_pages long.
291 *
9de100d0
AG
292 * Returns number of pages pinned. This may be fewer than the number
293 * requested. If nr_pages is 0 or negative, returns 0. If no pages
294 * were pinned, returns -errno.
d2bf6be8
NP
295 *
296 * get_user_pages_fast provides equivalent functionality to get_user_pages,
297 * operating on current and current->mm, with force=0 and vma=NULL. However
298 * unlike get_user_pages, it must be called without mmap_sem held.
299 *
300 * get_user_pages_fast may take mmap_sem and page table locks, so no
301 * assumptions can be made about lack of locking. get_user_pages_fast is to be
302 * implemented in a way that is advantageous (vs get_user_pages()) when the
303 * user memory area is already faulted in and present in ptes. However if the
304 * pages have to be faulted in, it may turn out to be slightly slower so
305 * callers need to carefully consider what to use. On many architectures,
306 * get_user_pages_fast simply falls back to get_user_pages.
9de100d0 307 */
3b32123d 308int __weak get_user_pages_fast(unsigned long start,
912985dc
RR
309 int nr_pages, int write, struct page **pages)
310{
311 struct mm_struct *mm = current->mm;
a7b78075
AA
312 return get_user_pages_unlocked(current, mm, start, nr_pages,
313 write, 0, pages);
912985dc
RR
314}
315EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb 316
eb36c587
AV
317unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
318 unsigned long len, unsigned long prot,
319 unsigned long flag, unsigned long pgoff)
320{
321 unsigned long ret;
322 struct mm_struct *mm = current->mm;
41badc15 323 unsigned long populate;
eb36c587
AV
324
325 ret = security_mmap_file(file, prot, flag);
326 if (!ret) {
327 down_write(&mm->mmap_sem);
bebeb3d6
ML
328 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
329 &populate);
eb36c587 330 up_write(&mm->mmap_sem);
41badc15
ML
331 if (populate)
332 mm_populate(ret, populate);
eb36c587
AV
333 }
334 return ret;
335}
336
337unsigned long vm_mmap(struct file *file, unsigned long addr,
338 unsigned long len, unsigned long prot,
339 unsigned long flag, unsigned long offset)
340{
341 if (unlikely(offset + PAGE_ALIGN(len) < offset))
342 return -EINVAL;
ea53cde0 343 if (unlikely(offset_in_page(offset)))
eb36c587
AV
344 return -EINVAL;
345
346 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
347}
348EXPORT_SYMBOL(vm_mmap);
349
39f1f78d
AV
350void kvfree(const void *addr)
351{
352 if (is_vmalloc_addr(addr))
353 vfree(addr);
354 else
355 kfree(addr);
356}
357EXPORT_SYMBOL(kvfree);
358
e39155ea
KS
359static inline void *__page_rmapping(struct page *page)
360{
361 unsigned long mapping;
362
363 mapping = (unsigned long)page->mapping;
364 mapping &= ~PAGE_MAPPING_FLAGS;
365
366 return (void *)mapping;
367}
368
369/* Neutral page->mapping pointer to address_space or anon_vma or other */
370void *page_rmapping(struct page *page)
371{
372 page = compound_head(page);
373 return __page_rmapping(page);
374}
375
376struct anon_vma *page_anon_vma(struct page *page)
377{
378 unsigned long mapping;
379
380 page = compound_head(page);
381 mapping = (unsigned long)page->mapping;
382 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
383 return NULL;
384 return __page_rmapping(page);
385}
386
9800339b
SL
387struct address_space *page_mapping(struct page *page)
388{
1c290f64
KS
389 struct address_space *mapping;
390
391 page = compound_head(page);
9800339b 392
03e5ac2f
MP
393 /* This happens if someone calls flush_dcache_page on slab page */
394 if (unlikely(PageSlab(page)))
395 return NULL;
396
33806f06
SL
397 if (unlikely(PageSwapCache(page))) {
398 swp_entry_t entry;
399
400 entry.val = page_private(page);
e39155ea
KS
401 return swap_address_space(entry);
402 }
403
1c290f64
KS
404 mapping = page->mapping;
405 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
e39155ea 406 return NULL;
1c290f64 407 return mapping;
9800339b
SL
408}
409
b20ce5e0
KS
410/* Slow path of page_mapcount() for compound pages */
411int __page_mapcount(struct page *page)
412{
413 int ret;
414
415 ret = atomic_read(&page->_mapcount) + 1;
416 page = compound_head(page);
417 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
418 if (PageDoubleMap(page))
419 ret--;
420 return ret;
421}
422EXPORT_SYMBOL_GPL(__page_mapcount);
423
49f0ce5f
JM
424int overcommit_ratio_handler(struct ctl_table *table, int write,
425 void __user *buffer, size_t *lenp,
426 loff_t *ppos)
427{
428 int ret;
429
430 ret = proc_dointvec(table, write, buffer, lenp, ppos);
431 if (ret == 0 && write)
432 sysctl_overcommit_kbytes = 0;
433 return ret;
434}
435
436int overcommit_kbytes_handler(struct ctl_table *table, int write,
437 void __user *buffer, size_t *lenp,
438 loff_t *ppos)
439{
440 int ret;
441
442 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
443 if (ret == 0 && write)
444 sysctl_overcommit_ratio = 0;
445 return ret;
446}
447
00619bcc
JM
448/*
449 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
450 */
451unsigned long vm_commit_limit(void)
452{
49f0ce5f
JM
453 unsigned long allowed;
454
455 if (sysctl_overcommit_kbytes)
456 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
457 else
458 allowed = ((totalram_pages - hugetlb_total_pages())
459 * sysctl_overcommit_ratio / 100);
460 allowed += total_swap_pages;
461
462 return allowed;
00619bcc
JM
463}
464
a9090253
WR
465/**
466 * get_cmdline() - copy the cmdline value to a buffer.
467 * @task: the task whose cmdline value to copy.
468 * @buffer: the buffer to copy to.
469 * @buflen: the length of the buffer. Larger cmdline values are truncated
470 * to this length.
471 * Returns the size of the cmdline field copied. Note that the copy does
472 * not guarantee an ending NULL byte.
473 */
474int get_cmdline(struct task_struct *task, char *buffer, int buflen)
475{
476 int res = 0;
477 unsigned int len;
478 struct mm_struct *mm = get_task_mm(task);
479 if (!mm)
480 goto out;
481 if (!mm->arg_end)
482 goto out_mm; /* Shh! No looking before we're done */
483
484 len = mm->arg_end - mm->arg_start;
485
486 if (len > buflen)
487 len = buflen;
488
489 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
490
491 /*
492 * If the nul at the end of args has been overwritten, then
493 * assume application is using setproctitle(3).
494 */
495 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
496 len = strnlen(buffer, res);
497 if (len < res) {
498 res = len;
499 } else {
500 len = mm->env_end - mm->env_start;
501 if (len > buflen - res)
502 len = buflen - res;
503 res += access_process_vm(task, mm->env_start,
504 buffer+res, len, 0);
505 res = strnlen(buffer, res);
506 }
507 }
508out_mm:
509 mmput(mm);
510out:
511 return res;
512}