]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/madvise.c
UBUNTU: Ubuntu-snapdragon-4.4.0-1064.69
[mirror_ubuntu-artful-kernel.git] / mm / madvise.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
05b74384 11#include <linux/mempolicy.h>
afcf938e 12#include <linux/page-isolation.h>
1da177e4 13#include <linux/hugetlb.h>
3f31d075 14#include <linux/falloc.h>
e8edc6e0 15#include <linux/sched.h>
f8af4da3 16#include <linux/ksm.h>
3f31d075 17#include <linux/fs.h>
9ab4233d 18#include <linux/file.h>
1998cc04 19#include <linux/blkdev.h>
66114cad 20#include <linux/backing-dev.h>
1998cc04
SL
21#include <linux/swap.h>
22#include <linux/swapops.h>
1da177e4 23
0a27a14a
NP
24/*
25 * Any behaviour which results in changes to the vma->vm_flags needs to
26 * take mmap_sem for writing. Others, which simply traverse vmas, need
27 * to only take it for reading.
28 */
29static int madvise_need_mmap_write(int behavior)
30{
31 switch (behavior) {
32 case MADV_REMOVE:
33 case MADV_WILLNEED:
34 case MADV_DONTNEED:
35 return 0;
36 default:
37 /* be safe, default to 1. list exceptions explicitly */
38 return 1;
39 }
40}
41
1da177e4
LT
42/*
43 * We can potentially split a vm area into separate
44 * areas, each area with its own behavior.
45 */
ec9bed9d 46static long madvise_behavior(struct vm_area_struct *vma,
05b74384
PM
47 struct vm_area_struct **prev,
48 unsigned long start, unsigned long end, int behavior)
1da177e4 49{
ec9bed9d 50 struct mm_struct *mm = vma->vm_mm;
1da177e4 51 int error = 0;
05b74384 52 pgoff_t pgoff;
3866ea90 53 unsigned long new_flags = vma->vm_flags;
e798c6e8
PM
54
55 switch (behavior) {
f8225661
MT
56 case MADV_NORMAL:
57 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
58 break;
e798c6e8 59 case MADV_SEQUENTIAL:
f8225661 60 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
e798c6e8
PM
61 break;
62 case MADV_RANDOM:
f8225661 63 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
e798c6e8 64 break;
f8225661
MT
65 case MADV_DONTFORK:
66 new_flags |= VM_DONTCOPY;
67 break;
68 case MADV_DOFORK:
3866ea90
HD
69 if (vma->vm_flags & VM_IO) {
70 error = -EINVAL;
71 goto out;
72 }
f8225661 73 new_flags &= ~VM_DONTCOPY;
e798c6e8 74 break;
accb61fe 75 case MADV_DONTDUMP:
0103bd16 76 new_flags |= VM_DONTDUMP;
accb61fe
JB
77 break;
78 case MADV_DODUMP:
0103bd16
KK
79 if (new_flags & VM_SPECIAL) {
80 error = -EINVAL;
81 goto out;
82 }
83 new_flags &= ~VM_DONTDUMP;
accb61fe 84 break;
f8af4da3
HD
85 case MADV_MERGEABLE:
86 case MADV_UNMERGEABLE:
87 error = ksm_madvise(vma, start, end, behavior, &new_flags);
88 if (error)
89 goto out;
90 break;
0af4e98b 91 case MADV_HUGEPAGE:
a664b2d8 92 case MADV_NOHUGEPAGE:
60ab3244 93 error = hugepage_madvise(vma, &new_flags, behavior);
0af4e98b
AA
94 if (error)
95 goto out;
96 break;
e798c6e8
PM
97 }
98
05b74384
PM
99 if (new_flags == vma->vm_flags) {
100 *prev = vma;
836d5ffd 101 goto out;
05b74384
PM
102 }
103
104 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
105 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
19a809af
AA
106 vma->vm_file, pgoff, vma_policy(vma),
107 vma->vm_userfaultfd_ctx);
05b74384
PM
108 if (*prev) {
109 vma = *prev;
110 goto success;
111 }
112
113 *prev = vma;
1da177e4
LT
114
115 if (start != vma->vm_start) {
116 error = split_vma(mm, vma, start, 1);
117 if (error)
118 goto out;
119 }
120
121 if (end != vma->vm_end) {
122 error = split_vma(mm, vma, end, 0);
123 if (error)
124 goto out;
125 }
126
836d5ffd 127success:
1da177e4
LT
128 /*
129 * vm_flags is protected by the mmap_sem held in write mode.
130 */
e798c6e8 131 vma->vm_flags = new_flags;
1da177e4
LT
132
133out:
134 if (error == -ENOMEM)
135 error = -EAGAIN;
136 return error;
137}
138
1998cc04
SL
139#ifdef CONFIG_SWAP
140static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
141 unsigned long end, struct mm_walk *walk)
142{
143 pte_t *orig_pte;
144 struct vm_area_struct *vma = walk->private;
145 unsigned long index;
146
147 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
148 return 0;
149
150 for (index = start; index != end; index += PAGE_SIZE) {
151 pte_t pte;
152 swp_entry_t entry;
153 struct page *page;
154 spinlock_t *ptl;
155
156 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
157 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
158 pte_unmap_unlock(orig_pte, ptl);
159
0661a336 160 if (pte_present(pte) || pte_none(pte))
1998cc04
SL
161 continue;
162 entry = pte_to_swp_entry(pte);
163 if (unlikely(non_swap_entry(entry)))
164 continue;
165
166 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
167 vma, index);
168 if (page)
169 page_cache_release(page);
170 }
171
172 return 0;
173}
174
175static void force_swapin_readahead(struct vm_area_struct *vma,
176 unsigned long start, unsigned long end)
177{
178 struct mm_walk walk = {
179 .mm = vma->vm_mm,
180 .pmd_entry = swapin_walk_pmd_entry,
181 .private = vma,
182 };
183
184 walk_page_range(start, end, &walk);
185
186 lru_add_drain(); /* Push any new pages onto the LRU now */
187}
188
189static void force_shm_swapin_readahead(struct vm_area_struct *vma,
190 unsigned long start, unsigned long end,
191 struct address_space *mapping)
192{
193 pgoff_t index;
194 struct page *page;
195 swp_entry_t swap;
196
197 for (; start < end; start += PAGE_SIZE) {
198 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
199
55231e5c 200 page = find_get_entry(mapping, index);
1998cc04
SL
201 if (!radix_tree_exceptional_entry(page)) {
202 if (page)
203 page_cache_release(page);
204 continue;
205 }
206 swap = radix_to_swp_entry(page);
207 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
208 NULL, 0);
209 if (page)
210 page_cache_release(page);
211 }
212
213 lru_add_drain(); /* Push any new pages onto the LRU now */
214}
215#endif /* CONFIG_SWAP */
216
1da177e4
LT
217/*
218 * Schedule all required I/O operations. Do not wait for completion.
219 */
ec9bed9d
VC
220static long madvise_willneed(struct vm_area_struct *vma,
221 struct vm_area_struct **prev,
1da177e4
LT
222 unsigned long start, unsigned long end)
223{
224 struct file *file = vma->vm_file;
225
3936b79c 226 *prev = vma;
1998cc04 227#ifdef CONFIG_SWAP
97b713ba 228 if (!file) {
97b713ba 229 force_swapin_readahead(vma, start, end);
1998cc04
SL
230 return 0;
231 }
1998cc04 232
97b713ba 233 if (shmem_mapping(file->f_mapping)) {
97b713ba
CH
234 force_shm_swapin_readahead(vma, start, end,
235 file->f_mapping);
236 return 0;
237 }
238#else
1bef4003
S
239 if (!file)
240 return -EBADF;
97b713ba 241#endif
1bef4003 242
e748dcd0 243 if (IS_DAX(file_inode(file))) {
fe77ba6f
CO
244 /* no bad return value, but ignore advice */
245 return 0;
246 }
247
1da177e4
LT
248 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
249 if (end > vma->vm_end)
250 end = vma->vm_end;
251 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
252
f7e839dd 253 force_page_cache_readahead(file->f_mapping, file, start, end - start);
1da177e4
LT
254 return 0;
255}
256
257/*
258 * Application no longer needs these pages. If the pages are dirty,
259 * it's OK to just throw them away. The app will be more careful about
260 * data it wants to keep. Be sure to free swap resources too. The
7e6cbea3 261 * zap_page_range call sets things up for shrink_active_list to actually free
1da177e4
LT
262 * these pages later if no one else has touched them in the meantime,
263 * although we could add these pages to a global reuse list for
7e6cbea3 264 * shrink_active_list to pick up before reclaiming other pages.
1da177e4
LT
265 *
266 * NB: This interface discards data rather than pushes it out to swap,
267 * as some implementations do. This has performance implications for
268 * applications like large transactional databases which want to discard
269 * pages in anonymous maps after committing to backing store the data
270 * that was kept in them. There is no reason to write this data out to
271 * the swap area if the application is discarding it.
272 *
273 * An interface that causes the system to free clean pages and flush
274 * dirty pages is already available as msync(MS_INVALIDATE).
275 */
ec9bed9d
VC
276static long madvise_dontneed(struct vm_area_struct *vma,
277 struct vm_area_struct **prev,
1da177e4
LT
278 unsigned long start, unsigned long end)
279{
05b74384 280 *prev = vma;
6aab341e 281 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
1da177e4
LT
282 return -EINVAL;
283
8a5f14a2 284 zap_page_range(vma, start, end - start, NULL);
1da177e4
LT
285 return 0;
286}
287
f6b3ec23
BP
288/*
289 * Application wants to free up the pages and associated backing store.
290 * This is effectively punching a hole into the middle of a file.
f6b3ec23
BP
291 */
292static long madvise_remove(struct vm_area_struct *vma,
00e9fa2d 293 struct vm_area_struct **prev,
f6b3ec23
BP
294 unsigned long start, unsigned long end)
295{
3f31d075 296 loff_t offset;
90ed52eb 297 int error;
9ab4233d 298 struct file *f;
f6b3ec23 299
90ed52eb 300 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
00e9fa2d 301
72079ba0 302 if (vma->vm_flags & VM_LOCKED)
f6b3ec23
BP
303 return -EINVAL;
304
9ab4233d
AL
305 f = vma->vm_file;
306
307 if (!f || !f->f_mapping || !f->f_mapping->host) {
f6b3ec23
BP
308 return -EINVAL;
309 }
310
69cf0fac
HD
311 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
312 return -EACCES;
313
f6b3ec23
BP
314 offset = (loff_t)(start - vma->vm_start)
315 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
90ed52eb 316
9ab4233d
AL
317 /*
318 * Filesystem's fallocate may need to take i_mutex. We need to
319 * explicitly grab a reference because the vma (and hence the
320 * vma's reference to the file) can go away as soon as we drop
321 * mmap_sem.
322 */
323 get_file(f);
0a27a14a 324 up_read(&current->mm->mmap_sem);
72c72bdf 325 error = vfs_fallocate(f,
3f31d075
HD
326 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
327 offset, end - start);
9ab4233d 328 fput(f);
0a27a14a 329 down_read(&current->mm->mmap_sem);
90ed52eb 330 return error;
f6b3ec23
BP
331}
332
9893e49d
AK
333#ifdef CONFIG_MEMORY_FAILURE
334/*
335 * Error injection support for memory error handling.
336 */
afcf938e 337static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
9893e49d 338{
20cb6cab 339 struct page *p;
9893e49d
AK
340 if (!capable(CAP_SYS_ADMIN))
341 return -EPERM;
20cb6cab
WL
342 for (; start < end; start += PAGE_SIZE <<
343 compound_order(compound_head(p))) {
325c4ef5
AM
344 int ret;
345
346 ret = get_user_pages_fast(start, 1, 0, &p);
9893e49d
AK
347 if (ret != 1)
348 return ret;
325c4ef5 349
29b4eede
WL
350 if (PageHWPoison(p)) {
351 put_page(p);
352 continue;
353 }
afcf938e 354 if (bhv == MADV_SOFT_OFFLINE) {
b194b8cd 355 pr_info("Soft offlining page %#lx at %#lx\n",
afcf938e
AK
356 page_to_pfn(p), start);
357 ret = soft_offline_page(p, MF_COUNT_INCREASED);
358 if (ret)
8302423b 359 return ret;
afcf938e
AK
360 continue;
361 }
b194b8cd 362 pr_info("Injecting memory failure for page %#lx at %#lx\n",
9893e49d
AK
363 page_to_pfn(p), start);
364 /* Ignore return value for now */
cd42f4a3 365 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
9893e49d 366 }
325c4ef5 367 return 0;
9893e49d
AK
368}
369#endif
370
165cd402 371static long
372madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
373 unsigned long start, unsigned long end, int behavior)
1da177e4 374{
1da177e4 375 switch (behavior) {
f6b3ec23 376 case MADV_REMOVE:
3866ea90 377 return madvise_remove(vma, prev, start, end);
1da177e4 378 case MADV_WILLNEED:
3866ea90 379 return madvise_willneed(vma, prev, start, end);
1da177e4 380 case MADV_DONTNEED:
3866ea90 381 return madvise_dontneed(vma, prev, start, end);
1da177e4 382 default:
3866ea90 383 return madvise_behavior(vma, prev, start, end, behavior);
1da177e4 384 }
1da177e4
LT
385}
386
1ecef9ed 387static bool
75927af8
NP
388madvise_behavior_valid(int behavior)
389{
390 switch (behavior) {
391 case MADV_DOFORK:
392 case MADV_DONTFORK:
393 case MADV_NORMAL:
394 case MADV_SEQUENTIAL:
395 case MADV_RANDOM:
396 case MADV_REMOVE:
397 case MADV_WILLNEED:
398 case MADV_DONTNEED:
f8af4da3
HD
399#ifdef CONFIG_KSM
400 case MADV_MERGEABLE:
401 case MADV_UNMERGEABLE:
0af4e98b
AA
402#endif
403#ifdef CONFIG_TRANSPARENT_HUGEPAGE
404 case MADV_HUGEPAGE:
a664b2d8 405 case MADV_NOHUGEPAGE:
f8af4da3 406#endif
accb61fe
JB
407 case MADV_DONTDUMP:
408 case MADV_DODUMP:
1ecef9ed 409 return true;
75927af8
NP
410
411 default:
1ecef9ed 412 return false;
75927af8
NP
413 }
414}
3866ea90 415
1da177e4
LT
416/*
417 * The madvise(2) system call.
418 *
419 * Applications can use madvise() to advise the kernel how it should
420 * handle paging I/O in this VM area. The idea is to help the kernel
421 * use appropriate read-ahead and caching techniques. The information
422 * provided is advisory only, and can be safely disregarded by the
423 * kernel without affecting the correct operation of the application.
424 *
425 * behavior values:
426 * MADV_NORMAL - the default behavior is to read clusters. This
427 * results in some read-ahead and read-behind.
428 * MADV_RANDOM - the system should read the minimum amount of data
429 * on any access, since it is unlikely that the appli-
430 * cation will need more than what it asks for.
431 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
432 * once, so they can be aggressively read ahead, and
433 * can be freed soon after they are accessed.
434 * MADV_WILLNEED - the application is notifying the system to read
435 * some pages ahead.
436 * MADV_DONTNEED - the application is finished with the given range,
437 * so the kernel can free resources associated with it.
f6b3ec23
BP
438 * MADV_REMOVE - the application wants to free up the given range of
439 * pages and associated backing store.
3866ea90
HD
440 * MADV_DONTFORK - omit this area from child's address space when forking:
441 * typically, to avoid COWing pages pinned by get_user_pages().
442 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
f8af4da3
HD
443 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
444 * this area with pages of identical content from other such areas.
445 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1da177e4
LT
446 *
447 * return values:
448 * zero - success
449 * -EINVAL - start + len < 0, start is not page-aligned,
450 * "behavior" is not a valid value, or application
451 * is attempting to release locked or shared pages.
452 * -ENOMEM - addresses in the specified range are not currently
453 * mapped, or are outside the AS of the process.
454 * -EIO - an I/O error occurred while paging in data.
455 * -EBADF - map exists, but area maps something that isn't a file.
456 * -EAGAIN - a kernel resource was temporarily unavailable.
457 */
3480b257 458SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1da177e4 459{
05b74384 460 unsigned long end, tmp;
ec9bed9d 461 struct vm_area_struct *vma, *prev;
1da177e4
LT
462 int unmapped_error = 0;
463 int error = -EINVAL;
f7977793 464 int write;
1da177e4 465 size_t len;
1998cc04 466 struct blk_plug plug;
1da177e4 467
9893e49d 468#ifdef CONFIG_MEMORY_FAILURE
afcf938e
AK
469 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
470 return madvise_hwpoison(behavior, start, start+len_in);
9893e49d 471#endif
75927af8
NP
472 if (!madvise_behavior_valid(behavior))
473 return error;
474
1da177e4 475 if (start & ~PAGE_MASK)
84d96d89 476 return error;
1da177e4
LT
477 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
478
479 /* Check to see whether len was rounded up from small -ve to zero */
480 if (len_in && !len)
84d96d89 481 return error;
1da177e4
LT
482
483 end = start + len;
484 if (end < start)
84d96d89 485 return error;
1da177e4
LT
486
487 error = 0;
488 if (end == start)
84d96d89
RV
489 return error;
490
491 write = madvise_need_mmap_write(behavior);
492 if (write)
493 down_write(&current->mm->mmap_sem);
494 else
495 down_read(&current->mm->mmap_sem);
1da177e4
LT
496
497 /*
498 * If the interval [start,end) covers some unmapped address
499 * ranges, just ignore them, but return -ENOMEM at the end.
05b74384 500 * - different from the way of handling in mlock etc.
1da177e4 501 */
05b74384 502 vma = find_vma_prev(current->mm, start, &prev);
836d5ffd
HD
503 if (vma && start > vma->vm_start)
504 prev = vma;
505
1998cc04 506 blk_start_plug(&plug);
1da177e4
LT
507 for (;;) {
508 /* Still start < end. */
509 error = -ENOMEM;
510 if (!vma)
84d96d89 511 goto out;
1da177e4 512
05b74384 513 /* Here start < (end|vma->vm_end). */
1da177e4
LT
514 if (start < vma->vm_start) {
515 unmapped_error = -ENOMEM;
516 start = vma->vm_start;
05b74384 517 if (start >= end)
84d96d89 518 goto out;
1da177e4
LT
519 }
520
05b74384
PM
521 /* Here vma->vm_start <= start < (end|vma->vm_end) */
522 tmp = vma->vm_end;
523 if (end < tmp)
524 tmp = end;
1da177e4 525
05b74384
PM
526 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
527 error = madvise_vma(vma, &prev, start, tmp, behavior);
1da177e4 528 if (error)
84d96d89 529 goto out;
05b74384 530 start = tmp;
90ed52eb 531 if (prev && start < prev->vm_end)
05b74384
PM
532 start = prev->vm_end;
533 error = unmapped_error;
534 if (start >= end)
84d96d89 535 goto out;
90ed52eb
HD
536 if (prev)
537 vma = prev->vm_next;
538 else /* madvise_remove dropped mmap_sem */
539 vma = find_vma(current->mm, start);
1da177e4 540 }
1da177e4 541out:
84d96d89 542 blk_finish_plug(&plug);
f7977793 543 if (write)
0a27a14a
NP
544 up_write(&current->mm->mmap_sem);
545 else
546 up_read(&current->mm->mmap_sem);
547
1da177e4
LT
548 return error;
549}