]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/madvise.c
oom-reaper: use madvise_dontneed() logic to decide if unmap the VMA
[mirror_ubuntu-bionic-kernel.git] / mm / madvise.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
05b74384 11#include <linux/mempolicy.h>
afcf938e 12#include <linux/page-isolation.h>
05ce7724 13#include <linux/userfaultfd_k.h>
1da177e4 14#include <linux/hugetlb.h>
3f31d075 15#include <linux/falloc.h>
e8edc6e0 16#include <linux/sched.h>
f8af4da3 17#include <linux/ksm.h>
3f31d075 18#include <linux/fs.h>
9ab4233d 19#include <linux/file.h>
1998cc04 20#include <linux/blkdev.h>
66114cad 21#include <linux/backing-dev.h>
1998cc04
SL
22#include <linux/swap.h>
23#include <linux/swapops.h>
854e9ed0
MK
24#include <linux/mmu_notifier.h>
25
26#include <asm/tlb.h>
1da177e4 27
23519073
KS
28#include "internal.h"
29
0a27a14a
NP
30/*
31 * Any behaviour which results in changes to the vma->vm_flags needs to
32 * take mmap_sem for writing. Others, which simply traverse vmas, need
33 * to only take it for reading.
34 */
35static int madvise_need_mmap_write(int behavior)
36{
37 switch (behavior) {
38 case MADV_REMOVE:
39 case MADV_WILLNEED:
40 case MADV_DONTNEED:
854e9ed0 41 case MADV_FREE:
0a27a14a
NP
42 return 0;
43 default:
44 /* be safe, default to 1. list exceptions explicitly */
45 return 1;
46 }
47}
48
1da177e4
LT
49/*
50 * We can potentially split a vm area into separate
51 * areas, each area with its own behavior.
52 */
ec9bed9d 53static long madvise_behavior(struct vm_area_struct *vma,
05b74384
PM
54 struct vm_area_struct **prev,
55 unsigned long start, unsigned long end, int behavior)
1da177e4 56{
ec9bed9d 57 struct mm_struct *mm = vma->vm_mm;
1da177e4 58 int error = 0;
05b74384 59 pgoff_t pgoff;
3866ea90 60 unsigned long new_flags = vma->vm_flags;
e798c6e8
PM
61
62 switch (behavior) {
f8225661
MT
63 case MADV_NORMAL:
64 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
65 break;
e798c6e8 66 case MADV_SEQUENTIAL:
f8225661 67 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
e798c6e8
PM
68 break;
69 case MADV_RANDOM:
f8225661 70 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
e798c6e8 71 break;
f8225661
MT
72 case MADV_DONTFORK:
73 new_flags |= VM_DONTCOPY;
74 break;
75 case MADV_DOFORK:
3866ea90
HD
76 if (vma->vm_flags & VM_IO) {
77 error = -EINVAL;
78 goto out;
79 }
f8225661 80 new_flags &= ~VM_DONTCOPY;
e798c6e8 81 break;
accb61fe 82 case MADV_DONTDUMP:
0103bd16 83 new_flags |= VM_DONTDUMP;
accb61fe
JB
84 break;
85 case MADV_DODUMP:
0103bd16
KK
86 if (new_flags & VM_SPECIAL) {
87 error = -EINVAL;
88 goto out;
89 }
90 new_flags &= ~VM_DONTDUMP;
accb61fe 91 break;
f8af4da3
HD
92 case MADV_MERGEABLE:
93 case MADV_UNMERGEABLE:
94 error = ksm_madvise(vma, start, end, behavior, &new_flags);
95 if (error)
96 goto out;
97 break;
0af4e98b 98 case MADV_HUGEPAGE:
a664b2d8 99 case MADV_NOHUGEPAGE:
60ab3244 100 error = hugepage_madvise(vma, &new_flags, behavior);
0af4e98b
AA
101 if (error)
102 goto out;
103 break;
e798c6e8
PM
104 }
105
05b74384
PM
106 if (new_flags == vma->vm_flags) {
107 *prev = vma;
836d5ffd 108 goto out;
05b74384
PM
109 }
110
111 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
112 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
19a809af
AA
113 vma->vm_file, pgoff, vma_policy(vma),
114 vma->vm_userfaultfd_ctx);
05b74384
PM
115 if (*prev) {
116 vma = *prev;
117 goto success;
118 }
119
120 *prev = vma;
1da177e4
LT
121
122 if (start != vma->vm_start) {
123 error = split_vma(mm, vma, start, 1);
124 if (error)
125 goto out;
126 }
127
128 if (end != vma->vm_end) {
129 error = split_vma(mm, vma, end, 0);
130 if (error)
131 goto out;
132 }
133
836d5ffd 134success:
1da177e4
LT
135 /*
136 * vm_flags is protected by the mmap_sem held in write mode.
137 */
e798c6e8 138 vma->vm_flags = new_flags;
1da177e4
LT
139
140out:
141 if (error == -ENOMEM)
142 error = -EAGAIN;
143 return error;
144}
145
1998cc04
SL
146#ifdef CONFIG_SWAP
147static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
148 unsigned long end, struct mm_walk *walk)
149{
150 pte_t *orig_pte;
151 struct vm_area_struct *vma = walk->private;
152 unsigned long index;
153
154 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
155 return 0;
156
157 for (index = start; index != end; index += PAGE_SIZE) {
158 pte_t pte;
159 swp_entry_t entry;
160 struct page *page;
161 spinlock_t *ptl;
162
163 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
164 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
165 pte_unmap_unlock(orig_pte, ptl);
166
0661a336 167 if (pte_present(pte) || pte_none(pte))
1998cc04
SL
168 continue;
169 entry = pte_to_swp_entry(pte);
170 if (unlikely(non_swap_entry(entry)))
171 continue;
172
173 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
174 vma, index);
175 if (page)
09cbfeaf 176 put_page(page);
1998cc04
SL
177 }
178
179 return 0;
180}
181
182static void force_swapin_readahead(struct vm_area_struct *vma,
183 unsigned long start, unsigned long end)
184{
185 struct mm_walk walk = {
186 .mm = vma->vm_mm,
187 .pmd_entry = swapin_walk_pmd_entry,
188 .private = vma,
189 };
190
191 walk_page_range(start, end, &walk);
192
193 lru_add_drain(); /* Push any new pages onto the LRU now */
194}
195
196static void force_shm_swapin_readahead(struct vm_area_struct *vma,
197 unsigned long start, unsigned long end,
198 struct address_space *mapping)
199{
200 pgoff_t index;
201 struct page *page;
202 swp_entry_t swap;
203
204 for (; start < end; start += PAGE_SIZE) {
205 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
206
55231e5c 207 page = find_get_entry(mapping, index);
1998cc04
SL
208 if (!radix_tree_exceptional_entry(page)) {
209 if (page)
09cbfeaf 210 put_page(page);
1998cc04
SL
211 continue;
212 }
213 swap = radix_to_swp_entry(page);
214 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
215 NULL, 0);
216 if (page)
09cbfeaf 217 put_page(page);
1998cc04
SL
218 }
219
220 lru_add_drain(); /* Push any new pages onto the LRU now */
221}
222#endif /* CONFIG_SWAP */
223
1da177e4
LT
224/*
225 * Schedule all required I/O operations. Do not wait for completion.
226 */
ec9bed9d
VC
227static long madvise_willneed(struct vm_area_struct *vma,
228 struct vm_area_struct **prev,
1da177e4
LT
229 unsigned long start, unsigned long end)
230{
231 struct file *file = vma->vm_file;
232
1998cc04 233#ifdef CONFIG_SWAP
97b713ba 234 if (!file) {
1998cc04 235 *prev = vma;
97b713ba 236 force_swapin_readahead(vma, start, end);
1998cc04
SL
237 return 0;
238 }
1998cc04 239
97b713ba
CH
240 if (shmem_mapping(file->f_mapping)) {
241 *prev = vma;
242 force_shm_swapin_readahead(vma, start, end,
243 file->f_mapping);
244 return 0;
245 }
246#else
1bef4003
S
247 if (!file)
248 return -EBADF;
97b713ba 249#endif
1bef4003 250
e748dcd0 251 if (IS_DAX(file_inode(file))) {
fe77ba6f
CO
252 /* no bad return value, but ignore advice */
253 return 0;
254 }
255
05b74384 256 *prev = vma;
1da177e4
LT
257 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
258 if (end > vma->vm_end)
259 end = vma->vm_end;
260 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
261
f7e839dd 262 force_page_cache_readahead(file->f_mapping, file, start, end - start);
1da177e4
LT
263 return 0;
264}
265
854e9ed0
MK
266static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
267 unsigned long end, struct mm_walk *walk)
268
269{
270 struct mmu_gather *tlb = walk->private;
271 struct mm_struct *mm = tlb->mm;
272 struct vm_area_struct *vma = walk->vma;
273 spinlock_t *ptl;
274 pte_t *orig_pte, *pte, ptent;
275 struct page *page;
64b42bc1 276 int nr_swap = 0;
b8d3c4c3
MK
277 unsigned long next;
278
279 next = pmd_addr_end(addr, end);
280 if (pmd_trans_huge(*pmd))
281 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
282 goto next;
854e9ed0 283
854e9ed0
MK
284 if (pmd_trans_unstable(pmd))
285 return 0;
286
07e32661 287 tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
854e9ed0
MK
288 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
289 arch_enter_lazy_mmu_mode();
290 for (; addr != end; pte++, addr += PAGE_SIZE) {
291 ptent = *pte;
292
64b42bc1 293 if (pte_none(ptent))
854e9ed0 294 continue;
64b42bc1
MK
295 /*
296 * If the pte has swp_entry, just clear page table to
297 * prevent swap-in which is more expensive rather than
298 * (page allocation + zeroing).
299 */
300 if (!pte_present(ptent)) {
301 swp_entry_t entry;
302
303 entry = pte_to_swp_entry(ptent);
304 if (non_swap_entry(entry))
305 continue;
306 nr_swap--;
307 free_swap_and_cache(entry);
308 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
309 continue;
310 }
854e9ed0
MK
311
312 page = vm_normal_page(vma, addr, ptent);
313 if (!page)
314 continue;
315
316 /*
317 * If pmd isn't transhuge but the page is THP and
318 * is owned by only this process, split it and
319 * deactivate all pages.
320 */
321 if (PageTransCompound(page)) {
322 if (page_mapcount(page) != 1)
323 goto out;
324 get_page(page);
325 if (!trylock_page(page)) {
326 put_page(page);
327 goto out;
328 }
329 pte_unmap_unlock(orig_pte, ptl);
330 if (split_huge_page(page)) {
331 unlock_page(page);
332 put_page(page);
333 pte_offset_map_lock(mm, pmd, addr, &ptl);
334 goto out;
335 }
336 put_page(page);
337 unlock_page(page);
338 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
339 pte--;
340 addr -= PAGE_SIZE;
341 continue;
342 }
343
344 VM_BUG_ON_PAGE(PageTransCompound(page), page);
345
346 if (PageSwapCache(page) || PageDirty(page)) {
347 if (!trylock_page(page))
348 continue;
349 /*
350 * If page is shared with others, we couldn't clear
351 * PG_dirty of the page.
352 */
353 if (page_mapcount(page) != 1) {
354 unlock_page(page);
355 continue;
356 }
357
358 if (PageSwapCache(page) && !try_to_free_swap(page)) {
359 unlock_page(page);
360 continue;
361 }
362
363 ClearPageDirty(page);
364 unlock_page(page);
365 }
366
367 if (pte_young(ptent) || pte_dirty(ptent)) {
368 /*
369 * Some of architecture(ex, PPC) don't update TLB
370 * with set_pte_at and tlb_remove_tlb_entry so for
371 * the portability, remap the pte with old|clean
372 * after pte clearing.
373 */
374 ptent = ptep_get_and_clear_full(mm, addr, pte,
375 tlb->fullmm);
376
377 ptent = pte_mkold(ptent);
378 ptent = pte_mkclean(ptent);
379 set_pte_at(mm, addr, pte, ptent);
10853a03
MK
380 if (PageActive(page))
381 deactivate_page(page);
854e9ed0
MK
382 tlb_remove_tlb_entry(tlb, pte, addr);
383 }
384 }
385out:
64b42bc1
MK
386 if (nr_swap) {
387 if (current->mm == mm)
388 sync_mm_rss(mm);
389
390 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
391 }
854e9ed0
MK
392 arch_leave_lazy_mmu_mode();
393 pte_unmap_unlock(orig_pte, ptl);
394 cond_resched();
b8d3c4c3 395next:
854e9ed0
MK
396 return 0;
397}
398
399static void madvise_free_page_range(struct mmu_gather *tlb,
400 struct vm_area_struct *vma,
401 unsigned long addr, unsigned long end)
402{
403 struct mm_walk free_walk = {
404 .pmd_entry = madvise_free_pte_range,
405 .mm = vma->vm_mm,
406 .private = tlb,
407 };
408
409 tlb_start_vma(tlb, vma);
410 walk_page_range(addr, end, &free_walk);
411 tlb_end_vma(tlb, vma);
412}
413
414static int madvise_free_single_vma(struct vm_area_struct *vma,
415 unsigned long start_addr, unsigned long end_addr)
416{
417 unsigned long start, end;
418 struct mm_struct *mm = vma->vm_mm;
419 struct mmu_gather tlb;
420
421 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
422 return -EINVAL;
423
424 /* MADV_FREE works for only anon vma at the moment */
425 if (!vma_is_anonymous(vma))
426 return -EINVAL;
427
428 start = max(vma->vm_start, start_addr);
429 if (start >= vma->vm_end)
430 return -EINVAL;
431 end = min(vma->vm_end, end_addr);
432 if (end <= vma->vm_start)
433 return -EINVAL;
434
435 lru_add_drain();
436 tlb_gather_mmu(&tlb, mm, start, end);
437 update_hiwater_rss(mm);
438
439 mmu_notifier_invalidate_range_start(mm, start, end);
440 madvise_free_page_range(&tlb, vma, start, end);
441 mmu_notifier_invalidate_range_end(mm, start, end);
442 tlb_finish_mmu(&tlb, start, end);
443
444 return 0;
445}
446
447static long madvise_free(struct vm_area_struct *vma,
448 struct vm_area_struct **prev,
449 unsigned long start, unsigned long end)
450{
451 *prev = vma;
452 return madvise_free_single_vma(vma, start, end);
453}
454
1da177e4
LT
455/*
456 * Application no longer needs these pages. If the pages are dirty,
457 * it's OK to just throw them away. The app will be more careful about
458 * data it wants to keep. Be sure to free swap resources too. The
7e6cbea3 459 * zap_page_range call sets things up for shrink_active_list to actually free
1da177e4
LT
460 * these pages later if no one else has touched them in the meantime,
461 * although we could add these pages to a global reuse list for
7e6cbea3 462 * shrink_active_list to pick up before reclaiming other pages.
1da177e4
LT
463 *
464 * NB: This interface discards data rather than pushes it out to swap,
465 * as some implementations do. This has performance implications for
466 * applications like large transactional databases which want to discard
467 * pages in anonymous maps after committing to backing store the data
468 * that was kept in them. There is no reason to write this data out to
469 * the swap area if the application is discarding it.
470 *
471 * An interface that causes the system to free clean pages and flush
472 * dirty pages is already available as msync(MS_INVALIDATE).
473 */
ec9bed9d
VC
474static long madvise_dontneed(struct vm_area_struct *vma,
475 struct vm_area_struct **prev,
1da177e4
LT
476 unsigned long start, unsigned long end)
477{
05b74384 478 *prev = vma;
23519073 479 if (!can_madv_dontneed_vma(vma))
1da177e4
LT
480 return -EINVAL;
481
05ce7724 482 madvise_userfault_dontneed(vma, prev, start, end);
ecf1385d 483 zap_page_range(vma, start, end - start);
1da177e4
LT
484 return 0;
485}
486
f6b3ec23
BP
487/*
488 * Application wants to free up the pages and associated backing store.
489 * This is effectively punching a hole into the middle of a file.
f6b3ec23
BP
490 */
491static long madvise_remove(struct vm_area_struct *vma,
00e9fa2d 492 struct vm_area_struct **prev,
f6b3ec23
BP
493 unsigned long start, unsigned long end)
494{
3f31d075 495 loff_t offset;
90ed52eb 496 int error;
9ab4233d 497 struct file *f;
f6b3ec23 498
90ed52eb 499 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
00e9fa2d 500
72079ba0 501 if (vma->vm_flags & VM_LOCKED)
f6b3ec23
BP
502 return -EINVAL;
503
9ab4233d
AL
504 f = vma->vm_file;
505
506 if (!f || !f->f_mapping || !f->f_mapping->host) {
f6b3ec23
BP
507 return -EINVAL;
508 }
509
69cf0fac
HD
510 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
511 return -EACCES;
512
f6b3ec23
BP
513 offset = (loff_t)(start - vma->vm_start)
514 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
90ed52eb 515
9ab4233d
AL
516 /*
517 * Filesystem's fallocate may need to take i_mutex. We need to
518 * explicitly grab a reference because the vma (and hence the
519 * vma's reference to the file) can go away as soon as we drop
520 * mmap_sem.
521 */
522 get_file(f);
0a27a14a 523 up_read(&current->mm->mmap_sem);
72c72bdf 524 error = vfs_fallocate(f,
3f31d075
HD
525 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
526 offset, end - start);
9ab4233d 527 fput(f);
0a27a14a 528 down_read(&current->mm->mmap_sem);
90ed52eb 529 return error;
f6b3ec23
BP
530}
531
9893e49d
AK
532#ifdef CONFIG_MEMORY_FAILURE
533/*
534 * Error injection support for memory error handling.
535 */
afcf938e 536static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
9893e49d 537{
20cb6cab 538 struct page *p;
9893e49d
AK
539 if (!capable(CAP_SYS_ADMIN))
540 return -EPERM;
20cb6cab
WL
541 for (; start < end; start += PAGE_SIZE <<
542 compound_order(compound_head(p))) {
325c4ef5
AM
543 int ret;
544
545 ret = get_user_pages_fast(start, 1, 0, &p);
9893e49d
AK
546 if (ret != 1)
547 return ret;
325c4ef5 548
29b4eede
WL
549 if (PageHWPoison(p)) {
550 put_page(p);
551 continue;
552 }
afcf938e 553 if (bhv == MADV_SOFT_OFFLINE) {
b194b8cd 554 pr_info("Soft offlining page %#lx at %#lx\n",
afcf938e
AK
555 page_to_pfn(p), start);
556 ret = soft_offline_page(p, MF_COUNT_INCREASED);
557 if (ret)
8302423b 558 return ret;
afcf938e
AK
559 continue;
560 }
b194b8cd 561 pr_info("Injecting memory failure for page %#lx at %#lx\n",
9893e49d 562 page_to_pfn(p), start);
23a003bf
NH
563 ret = memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
564 if (ret)
565 return ret;
9893e49d 566 }
325c4ef5 567 return 0;
9893e49d
AK
568}
569#endif
570
165cd402 571static long
572madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
573 unsigned long start, unsigned long end, int behavior)
1da177e4 574{
1da177e4 575 switch (behavior) {
f6b3ec23 576 case MADV_REMOVE:
3866ea90 577 return madvise_remove(vma, prev, start, end);
1da177e4 578 case MADV_WILLNEED:
3866ea90 579 return madvise_willneed(vma, prev, start, end);
854e9ed0
MK
580 case MADV_FREE:
581 /*
582 * XXX: In this implementation, MADV_FREE works like
583 * MADV_DONTNEED on swapless system or full swap.
584 */
585 if (get_nr_swap_pages() > 0)
586 return madvise_free(vma, prev, start, end);
587 /* passthrough */
1da177e4 588 case MADV_DONTNEED:
3866ea90 589 return madvise_dontneed(vma, prev, start, end);
1da177e4 590 default:
3866ea90 591 return madvise_behavior(vma, prev, start, end, behavior);
1da177e4 592 }
1da177e4
LT
593}
594
1ecef9ed 595static bool
75927af8
NP
596madvise_behavior_valid(int behavior)
597{
598 switch (behavior) {
599 case MADV_DOFORK:
600 case MADV_DONTFORK:
601 case MADV_NORMAL:
602 case MADV_SEQUENTIAL:
603 case MADV_RANDOM:
604 case MADV_REMOVE:
605 case MADV_WILLNEED:
606 case MADV_DONTNEED:
854e9ed0 607 case MADV_FREE:
f8af4da3
HD
608#ifdef CONFIG_KSM
609 case MADV_MERGEABLE:
610 case MADV_UNMERGEABLE:
0af4e98b
AA
611#endif
612#ifdef CONFIG_TRANSPARENT_HUGEPAGE
613 case MADV_HUGEPAGE:
a664b2d8 614 case MADV_NOHUGEPAGE:
f8af4da3 615#endif
accb61fe
JB
616 case MADV_DONTDUMP:
617 case MADV_DODUMP:
1ecef9ed 618 return true;
75927af8
NP
619
620 default:
1ecef9ed 621 return false;
75927af8
NP
622 }
623}
3866ea90 624
1da177e4
LT
625/*
626 * The madvise(2) system call.
627 *
628 * Applications can use madvise() to advise the kernel how it should
629 * handle paging I/O in this VM area. The idea is to help the kernel
630 * use appropriate read-ahead and caching techniques. The information
631 * provided is advisory only, and can be safely disregarded by the
632 * kernel without affecting the correct operation of the application.
633 *
634 * behavior values:
635 * MADV_NORMAL - the default behavior is to read clusters. This
636 * results in some read-ahead and read-behind.
637 * MADV_RANDOM - the system should read the minimum amount of data
638 * on any access, since it is unlikely that the appli-
639 * cation will need more than what it asks for.
640 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
641 * once, so they can be aggressively read ahead, and
642 * can be freed soon after they are accessed.
643 * MADV_WILLNEED - the application is notifying the system to read
644 * some pages ahead.
645 * MADV_DONTNEED - the application is finished with the given range,
646 * so the kernel can free resources associated with it.
d7206a70
NH
647 * MADV_FREE - the application marks pages in the given range as lazy free,
648 * where actual purges are postponed until memory pressure happens.
f6b3ec23
BP
649 * MADV_REMOVE - the application wants to free up the given range of
650 * pages and associated backing store.
3866ea90
HD
651 * MADV_DONTFORK - omit this area from child's address space when forking:
652 * typically, to avoid COWing pages pinned by get_user_pages().
653 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
d7206a70
NH
654 * MADV_HWPOISON - trigger memory error handler as if the given memory range
655 * were corrupted by unrecoverable hardware memory failure.
656 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
f8af4da3
HD
657 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
658 * this area with pages of identical content from other such areas.
659 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
d7206a70
NH
660 * MADV_HUGEPAGE - the application wants to back the given range by transparent
661 * huge pages in the future. Existing pages might be coalesced and
662 * new pages might be allocated as THP.
663 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
664 * transparent huge pages so the existing pages will not be
665 * coalesced into THP and new pages will not be allocated as THP.
666 * MADV_DONTDUMP - the application wants to prevent pages in the given range
667 * from being included in its core dump.
668 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1da177e4
LT
669 *
670 * return values:
671 * zero - success
672 * -EINVAL - start + len < 0, start is not page-aligned,
673 * "behavior" is not a valid value, or application
674 * is attempting to release locked or shared pages.
675 * -ENOMEM - addresses in the specified range are not currently
676 * mapped, or are outside the AS of the process.
677 * -EIO - an I/O error occurred while paging in data.
678 * -EBADF - map exists, but area maps something that isn't a file.
679 * -EAGAIN - a kernel resource was temporarily unavailable.
680 */
3480b257 681SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1da177e4 682{
05b74384 683 unsigned long end, tmp;
ec9bed9d 684 struct vm_area_struct *vma, *prev;
1da177e4
LT
685 int unmapped_error = 0;
686 int error = -EINVAL;
f7977793 687 int write;
1da177e4 688 size_t len;
1998cc04 689 struct blk_plug plug;
1da177e4 690
9893e49d 691#ifdef CONFIG_MEMORY_FAILURE
afcf938e
AK
692 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
693 return madvise_hwpoison(behavior, start, start+len_in);
9893e49d 694#endif
75927af8
NP
695 if (!madvise_behavior_valid(behavior))
696 return error;
697
1da177e4 698 if (start & ~PAGE_MASK)
84d96d89 699 return error;
1da177e4
LT
700 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
701
702 /* Check to see whether len was rounded up from small -ve to zero */
703 if (len_in && !len)
84d96d89 704 return error;
1da177e4
LT
705
706 end = start + len;
707 if (end < start)
84d96d89 708 return error;
1da177e4
LT
709
710 error = 0;
711 if (end == start)
84d96d89
RV
712 return error;
713
714 write = madvise_need_mmap_write(behavior);
dc0ef0df
MH
715 if (write) {
716 if (down_write_killable(&current->mm->mmap_sem))
717 return -EINTR;
718 } else {
84d96d89 719 down_read(&current->mm->mmap_sem);
dc0ef0df 720 }
1da177e4
LT
721
722 /*
723 * If the interval [start,end) covers some unmapped address
724 * ranges, just ignore them, but return -ENOMEM at the end.
05b74384 725 * - different from the way of handling in mlock etc.
1da177e4 726 */
05b74384 727 vma = find_vma_prev(current->mm, start, &prev);
836d5ffd
HD
728 if (vma && start > vma->vm_start)
729 prev = vma;
730
1998cc04 731 blk_start_plug(&plug);
1da177e4
LT
732 for (;;) {
733 /* Still start < end. */
734 error = -ENOMEM;
735 if (!vma)
84d96d89 736 goto out;
1da177e4 737
05b74384 738 /* Here start < (end|vma->vm_end). */
1da177e4
LT
739 if (start < vma->vm_start) {
740 unmapped_error = -ENOMEM;
741 start = vma->vm_start;
05b74384 742 if (start >= end)
84d96d89 743 goto out;
1da177e4
LT
744 }
745
05b74384
PM
746 /* Here vma->vm_start <= start < (end|vma->vm_end) */
747 tmp = vma->vm_end;
748 if (end < tmp)
749 tmp = end;
1da177e4 750
05b74384
PM
751 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
752 error = madvise_vma(vma, &prev, start, tmp, behavior);
1da177e4 753 if (error)
84d96d89 754 goto out;
05b74384 755 start = tmp;
90ed52eb 756 if (prev && start < prev->vm_end)
05b74384
PM
757 start = prev->vm_end;
758 error = unmapped_error;
759 if (start >= end)
84d96d89 760 goto out;
90ed52eb
HD
761 if (prev)
762 vma = prev->vm_next;
763 else /* madvise_remove dropped mmap_sem */
764 vma = find_vma(current->mm, start);
1da177e4 765 }
1da177e4 766out:
84d96d89 767 blk_finish_plug(&plug);
f7977793 768 if (write)
0a27a14a
NP
769 up_write(&current->mm->mmap_sem);
770 else
771 up_read(&current->mm->mmap_sem);
772
1da177e4
LT
773 return error;
774}