]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - mm/madvise.c
UBUNTU: Ubuntu-5.3.0-29.31
[mirror_ubuntu-eoan-kernel.git] / mm / madvise.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
05b74384 12#include <linux/mempolicy.h>
afcf938e 13#include <linux/page-isolation.h>
05ce7724 14#include <linux/userfaultfd_k.h>
1da177e4 15#include <linux/hugetlb.h>
3f31d075 16#include <linux/falloc.h>
fee5077e 17#include <linux/fadvise.h>
e8edc6e0 18#include <linux/sched.h>
f8af4da3 19#include <linux/ksm.h>
3f31d075 20#include <linux/fs.h>
9ab4233d 21#include <linux/file.h>
1998cc04 22#include <linux/blkdev.h>
66114cad 23#include <linux/backing-dev.h>
1998cc04
SL
24#include <linux/swap.h>
25#include <linux/swapops.h>
3a4f8a0b 26#include <linux/shmem_fs.h>
854e9ed0
MK
27#include <linux/mmu_notifier.h>
28
29#include <asm/tlb.h>
1da177e4 30
23519073
KS
31#include "internal.h"
32
0a27a14a
NP
33/*
34 * Any behaviour which results in changes to the vma->vm_flags needs to
35 * take mmap_sem for writing. Others, which simply traverse vmas, need
36 * to only take it for reading.
37 */
38static int madvise_need_mmap_write(int behavior)
39{
40 switch (behavior) {
41 case MADV_REMOVE:
42 case MADV_WILLNEED:
43 case MADV_DONTNEED:
854e9ed0 44 case MADV_FREE:
0a27a14a
NP
45 return 0;
46 default:
47 /* be safe, default to 1. list exceptions explicitly */
48 return 1;
49 }
50}
51
1da177e4
LT
52/*
53 * We can potentially split a vm area into separate
54 * areas, each area with its own behavior.
55 */
ec9bed9d 56static long madvise_behavior(struct vm_area_struct *vma,
05b74384
PM
57 struct vm_area_struct **prev,
58 unsigned long start, unsigned long end, int behavior)
1da177e4 59{
ec9bed9d 60 struct mm_struct *mm = vma->vm_mm;
1da177e4 61 int error = 0;
05b74384 62 pgoff_t pgoff;
3866ea90 63 unsigned long new_flags = vma->vm_flags;
e798c6e8
PM
64
65 switch (behavior) {
f8225661
MT
66 case MADV_NORMAL:
67 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
68 break;
e798c6e8 69 case MADV_SEQUENTIAL:
f8225661 70 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
e798c6e8
PM
71 break;
72 case MADV_RANDOM:
f8225661 73 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
e798c6e8 74 break;
f8225661
MT
75 case MADV_DONTFORK:
76 new_flags |= VM_DONTCOPY;
77 break;
78 case MADV_DOFORK:
3866ea90
HD
79 if (vma->vm_flags & VM_IO) {
80 error = -EINVAL;
81 goto out;
82 }
f8225661 83 new_flags &= ~VM_DONTCOPY;
e798c6e8 84 break;
d2cd9ede
RR
85 case MADV_WIPEONFORK:
86 /* MADV_WIPEONFORK is only supported on anonymous memory. */
87 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
88 error = -EINVAL;
89 goto out;
90 }
91 new_flags |= VM_WIPEONFORK;
92 break;
93 case MADV_KEEPONFORK:
94 new_flags &= ~VM_WIPEONFORK;
95 break;
accb61fe 96 case MADV_DONTDUMP:
0103bd16 97 new_flags |= VM_DONTDUMP;
accb61fe
JB
98 break;
99 case MADV_DODUMP:
d41aa525 100 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
0103bd16
KK
101 error = -EINVAL;
102 goto out;
103 }
104 new_flags &= ~VM_DONTDUMP;
accb61fe 105 break;
f8af4da3
HD
106 case MADV_MERGEABLE:
107 case MADV_UNMERGEABLE:
108 error = ksm_madvise(vma, start, end, behavior, &new_flags);
def5efe0
DR
109 if (error) {
110 /*
111 * madvise() returns EAGAIN if kernel resources, such as
112 * slab, are temporarily unavailable.
113 */
114 if (error == -ENOMEM)
115 error = -EAGAIN;
f8af4da3 116 goto out;
def5efe0 117 }
f8af4da3 118 break;
0af4e98b 119 case MADV_HUGEPAGE:
a664b2d8 120 case MADV_NOHUGEPAGE:
60ab3244 121 error = hugepage_madvise(vma, &new_flags, behavior);
def5efe0
DR
122 if (error) {
123 /*
124 * madvise() returns EAGAIN if kernel resources, such as
125 * slab, are temporarily unavailable.
126 */
127 if (error == -ENOMEM)
128 error = -EAGAIN;
0af4e98b 129 goto out;
def5efe0 130 }
0af4e98b 131 break;
e798c6e8
PM
132 }
133
05b74384
PM
134 if (new_flags == vma->vm_flags) {
135 *prev = vma;
836d5ffd 136 goto out;
05b74384
PM
137 }
138
139 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
140 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
19a809af
AA
141 vma->vm_file, pgoff, vma_policy(vma),
142 vma->vm_userfaultfd_ctx);
05b74384
PM
143 if (*prev) {
144 vma = *prev;
145 goto success;
146 }
147
148 *prev = vma;
1da177e4
LT
149
150 if (start != vma->vm_start) {
def5efe0
DR
151 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
152 error = -ENOMEM;
1da177e4 153 goto out;
def5efe0
DR
154 }
155 error = __split_vma(mm, vma, start, 1);
156 if (error) {
157 /*
158 * madvise() returns EAGAIN if kernel resources, such as
159 * slab, are temporarily unavailable.
160 */
161 if (error == -ENOMEM)
162 error = -EAGAIN;
163 goto out;
164 }
1da177e4
LT
165 }
166
167 if (end != vma->vm_end) {
def5efe0
DR
168 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
169 error = -ENOMEM;
1da177e4 170 goto out;
def5efe0
DR
171 }
172 error = __split_vma(mm, vma, end, 0);
173 if (error) {
174 /*
175 * madvise() returns EAGAIN if kernel resources, such as
176 * slab, are temporarily unavailable.
177 */
178 if (error == -ENOMEM)
179 error = -EAGAIN;
180 goto out;
181 }
1da177e4
LT
182 }
183
836d5ffd 184success:
1da177e4
LT
185 /*
186 * vm_flags is protected by the mmap_sem held in write mode.
187 */
e798c6e8 188 vma->vm_flags = new_flags;
1da177e4 189out:
1da177e4
LT
190 return error;
191}
192
1998cc04
SL
193#ifdef CONFIG_SWAP
194static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
195 unsigned long end, struct mm_walk *walk)
196{
197 pte_t *orig_pte;
198 struct vm_area_struct *vma = walk->private;
199 unsigned long index;
200
201 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
202 return 0;
203
204 for (index = start; index != end; index += PAGE_SIZE) {
205 pte_t pte;
206 swp_entry_t entry;
207 struct page *page;
208 spinlock_t *ptl;
209
210 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
211 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
212 pte_unmap_unlock(orig_pte, ptl);
213
0661a336 214 if (pte_present(pte) || pte_none(pte))
1998cc04
SL
215 continue;
216 entry = pte_to_swp_entry(pte);
217 if (unlikely(non_swap_entry(entry)))
218 continue;
219
220 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
23955622 221 vma, index, false);
1998cc04 222 if (page)
09cbfeaf 223 put_page(page);
1998cc04
SL
224 }
225
226 return 0;
227}
228
229static void force_swapin_readahead(struct vm_area_struct *vma,
230 unsigned long start, unsigned long end)
231{
232 struct mm_walk walk = {
233 .mm = vma->vm_mm,
234 .pmd_entry = swapin_walk_pmd_entry,
235 .private = vma,
236 };
237
238 walk_page_range(start, end, &walk);
239
240 lru_add_drain(); /* Push any new pages onto the LRU now */
241}
242
243static void force_shm_swapin_readahead(struct vm_area_struct *vma,
244 unsigned long start, unsigned long end,
245 struct address_space *mapping)
246{
247 pgoff_t index;
248 struct page *page;
249 swp_entry_t swap;
250
251 for (; start < end; start += PAGE_SIZE) {
252 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
253
55231e5c 254 page = find_get_entry(mapping, index);
3159f943 255 if (!xa_is_value(page)) {
1998cc04 256 if (page)
09cbfeaf 257 put_page(page);
1998cc04
SL
258 continue;
259 }
260 swap = radix_to_swp_entry(page);
261 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
23955622 262 NULL, 0, false);
1998cc04 263 if (page)
09cbfeaf 264 put_page(page);
1998cc04
SL
265 }
266
267 lru_add_drain(); /* Push any new pages onto the LRU now */
268}
269#endif /* CONFIG_SWAP */
270
1da177e4
LT
271/*
272 * Schedule all required I/O operations. Do not wait for completion.
273 */
ec9bed9d
VC
274static long madvise_willneed(struct vm_area_struct *vma,
275 struct vm_area_struct **prev,
1da177e4
LT
276 unsigned long start, unsigned long end)
277{
278 struct file *file = vma->vm_file;
fee5077e 279 loff_t offset;
1da177e4 280
6ea8d958 281 *prev = vma;
1998cc04 282#ifdef CONFIG_SWAP
97b713ba 283 if (!file) {
97b713ba 284 force_swapin_readahead(vma, start, end);
1998cc04
SL
285 return 0;
286 }
1998cc04 287
97b713ba 288 if (shmem_mapping(file->f_mapping)) {
97b713ba
CH
289 force_shm_swapin_readahead(vma, start, end,
290 file->f_mapping);
291 return 0;
292 }
293#else
1bef4003
S
294 if (!file)
295 return -EBADF;
97b713ba 296#endif
1bef4003 297
e748dcd0 298 if (IS_DAX(file_inode(file))) {
fe77ba6f
CO
299 /* no bad return value, but ignore advice */
300 return 0;
301 }
302
fee5077e
JK
303 /*
304 * Filesystem's fadvise may need to take various locks. We need to
305 * explicitly grab a reference because the vma (and hence the
306 * vma's reference to the file) can go away as soon as we drop
307 * mmap_sem.
308 */
309 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
310 get_file(file);
311 up_read(&current->mm->mmap_sem);
312 offset = (loff_t)(start - vma->vm_start)
313 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
314 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
315 fput(file);
316 down_read(&current->mm->mmap_sem);
1da177e4
LT
317 return 0;
318}
319
854e9ed0
MK
320static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
321 unsigned long end, struct mm_walk *walk)
322
323{
324 struct mmu_gather *tlb = walk->private;
325 struct mm_struct *mm = tlb->mm;
326 struct vm_area_struct *vma = walk->vma;
327 spinlock_t *ptl;
328 pte_t *orig_pte, *pte, ptent;
329 struct page *page;
64b42bc1 330 int nr_swap = 0;
b8d3c4c3
MK
331 unsigned long next;
332
333 next = pmd_addr_end(addr, end);
334 if (pmd_trans_huge(*pmd))
335 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
336 goto next;
854e9ed0 337
854e9ed0
MK
338 if (pmd_trans_unstable(pmd))
339 return 0;
340
ed6a7935 341 tlb_change_page_size(tlb, PAGE_SIZE);
854e9ed0 342 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
3ea27719 343 flush_tlb_batched_pending(mm);
854e9ed0
MK
344 arch_enter_lazy_mmu_mode();
345 for (; addr != end; pte++, addr += PAGE_SIZE) {
346 ptent = *pte;
347
64b42bc1 348 if (pte_none(ptent))
854e9ed0 349 continue;
64b42bc1
MK
350 /*
351 * If the pte has swp_entry, just clear page table to
352 * prevent swap-in which is more expensive rather than
353 * (page allocation + zeroing).
354 */
355 if (!pte_present(ptent)) {
356 swp_entry_t entry;
357
358 entry = pte_to_swp_entry(ptent);
359 if (non_swap_entry(entry))
360 continue;
361 nr_swap--;
362 free_swap_and_cache(entry);
363 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
364 continue;
365 }
854e9ed0 366
25b2995a 367 page = vm_normal_page(vma, addr, ptent);
854e9ed0
MK
368 if (!page)
369 continue;
370
371 /*
372 * If pmd isn't transhuge but the page is THP and
373 * is owned by only this process, split it and
374 * deactivate all pages.
375 */
376 if (PageTransCompound(page)) {
377 if (page_mapcount(page) != 1)
378 goto out;
379 get_page(page);
380 if (!trylock_page(page)) {
381 put_page(page);
382 goto out;
383 }
384 pte_unmap_unlock(orig_pte, ptl);
385 if (split_huge_page(page)) {
386 unlock_page(page);
387 put_page(page);
388 pte_offset_map_lock(mm, pmd, addr, &ptl);
389 goto out;
390 }
854e9ed0 391 unlock_page(page);
263630e8 392 put_page(page);
854e9ed0
MK
393 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
394 pte--;
395 addr -= PAGE_SIZE;
396 continue;
397 }
398
399 VM_BUG_ON_PAGE(PageTransCompound(page), page);
400
401 if (PageSwapCache(page) || PageDirty(page)) {
402 if (!trylock_page(page))
403 continue;
404 /*
405 * If page is shared with others, we couldn't clear
406 * PG_dirty of the page.
407 */
408 if (page_mapcount(page) != 1) {
409 unlock_page(page);
410 continue;
411 }
412
413 if (PageSwapCache(page) && !try_to_free_swap(page)) {
414 unlock_page(page);
415 continue;
416 }
417
418 ClearPageDirty(page);
419 unlock_page(page);
420 }
421
422 if (pte_young(ptent) || pte_dirty(ptent)) {
423 /*
424 * Some of architecture(ex, PPC) don't update TLB
425 * with set_pte_at and tlb_remove_tlb_entry so for
426 * the portability, remap the pte with old|clean
427 * after pte clearing.
428 */
429 ptent = ptep_get_and_clear_full(mm, addr, pte,
430 tlb->fullmm);
431
432 ptent = pte_mkold(ptent);
433 ptent = pte_mkclean(ptent);
434 set_pte_at(mm, addr, pte, ptent);
435 tlb_remove_tlb_entry(tlb, pte, addr);
436 }
802a3a92 437 mark_page_lazyfree(page);
854e9ed0
MK
438 }
439out:
64b42bc1
MK
440 if (nr_swap) {
441 if (current->mm == mm)
442 sync_mm_rss(mm);
443
444 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
445 }
854e9ed0
MK
446 arch_leave_lazy_mmu_mode();
447 pte_unmap_unlock(orig_pte, ptl);
448 cond_resched();
b8d3c4c3 449next:
854e9ed0
MK
450 return 0;
451}
452
453static void madvise_free_page_range(struct mmu_gather *tlb,
454 struct vm_area_struct *vma,
455 unsigned long addr, unsigned long end)
456{
457 struct mm_walk free_walk = {
458 .pmd_entry = madvise_free_pte_range,
459 .mm = vma->vm_mm,
460 .private = tlb,
461 };
462
463 tlb_start_vma(tlb, vma);
464 walk_page_range(addr, end, &free_walk);
465 tlb_end_vma(tlb, vma);
466}
467
468static int madvise_free_single_vma(struct vm_area_struct *vma,
469 unsigned long start_addr, unsigned long end_addr)
470{
854e9ed0 471 struct mm_struct *mm = vma->vm_mm;
ac46d4f3 472 struct mmu_notifier_range range;
854e9ed0
MK
473 struct mmu_gather tlb;
474
854e9ed0
MK
475 /* MADV_FREE works for only anon vma at the moment */
476 if (!vma_is_anonymous(vma))
477 return -EINVAL;
478
ac46d4f3
JG
479 range.start = max(vma->vm_start, start_addr);
480 if (range.start >= vma->vm_end)
854e9ed0 481 return -EINVAL;
ac46d4f3
JG
482 range.end = min(vma->vm_end, end_addr);
483 if (range.end <= vma->vm_start)
854e9ed0 484 return -EINVAL;
7269f999 485 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
6f4f13e8 486 range.start, range.end);
854e9ed0
MK
487
488 lru_add_drain();
ac46d4f3 489 tlb_gather_mmu(&tlb, mm, range.start, range.end);
854e9ed0
MK
490 update_hiwater_rss(mm);
491
ac46d4f3
JG
492 mmu_notifier_invalidate_range_start(&range);
493 madvise_free_page_range(&tlb, vma, range.start, range.end);
494 mmu_notifier_invalidate_range_end(&range);
495 tlb_finish_mmu(&tlb, range.start, range.end);
854e9ed0
MK
496
497 return 0;
498}
499
1da177e4
LT
500/*
501 * Application no longer needs these pages. If the pages are dirty,
502 * it's OK to just throw them away. The app will be more careful about
503 * data it wants to keep. Be sure to free swap resources too. The
7e6cbea3 504 * zap_page_range call sets things up for shrink_active_list to actually free
1da177e4
LT
505 * these pages later if no one else has touched them in the meantime,
506 * although we could add these pages to a global reuse list for
7e6cbea3 507 * shrink_active_list to pick up before reclaiming other pages.
1da177e4
LT
508 *
509 * NB: This interface discards data rather than pushes it out to swap,
510 * as some implementations do. This has performance implications for
511 * applications like large transactional databases which want to discard
512 * pages in anonymous maps after committing to backing store the data
513 * that was kept in them. There is no reason to write this data out to
514 * the swap area if the application is discarding it.
515 *
516 * An interface that causes the system to free clean pages and flush
517 * dirty pages is already available as msync(MS_INVALIDATE).
518 */
230ca982
MR
519static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
520 unsigned long start, unsigned long end)
521{
522 zap_page_range(vma, start, end - start);
523 return 0;
524}
525
526static long madvise_dontneed_free(struct vm_area_struct *vma,
527 struct vm_area_struct **prev,
528 unsigned long start, unsigned long end,
529 int behavior)
1da177e4 530{
05b74384 531 *prev = vma;
23519073 532 if (!can_madv_dontneed_vma(vma))
1da177e4
LT
533 return -EINVAL;
534
70ccb92f
AA
535 if (!userfaultfd_remove(vma, start, end)) {
536 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
537
538 down_read(&current->mm->mmap_sem);
539 vma = find_vma(current->mm, start);
540 if (!vma)
541 return -ENOMEM;
542 if (start < vma->vm_start) {
543 /*
544 * This "vma" under revalidation is the one
545 * with the lowest vma->vm_start where start
546 * is also < vma->vm_end. If start <
547 * vma->vm_start it means an hole materialized
548 * in the user address space within the
230ca982
MR
549 * virtual range passed to MADV_DONTNEED
550 * or MADV_FREE.
70ccb92f
AA
551 */
552 return -ENOMEM;
553 }
554 if (!can_madv_dontneed_vma(vma))
555 return -EINVAL;
556 if (end > vma->vm_end) {
557 /*
558 * Don't fail if end > vma->vm_end. If the old
559 * vma was splitted while the mmap_sem was
560 * released the effect of the concurrent
230ca982 561 * operation may not cause madvise() to
70ccb92f
AA
562 * have an undefined result. There may be an
563 * adjacent next vma that we'll walk
564 * next. userfaultfd_remove() will generate an
565 * UFFD_EVENT_REMOVE repetition on the
566 * end-vma->vm_end range, but the manager can
567 * handle a repetition fine.
568 */
569 end = vma->vm_end;
570 }
571 VM_WARN_ON(start >= end);
572 }
230ca982
MR
573
574 if (behavior == MADV_DONTNEED)
575 return madvise_dontneed_single_vma(vma, start, end);
576 else if (behavior == MADV_FREE)
577 return madvise_free_single_vma(vma, start, end);
578 else
579 return -EINVAL;
1da177e4
LT
580}
581
f6b3ec23
BP
582/*
583 * Application wants to free up the pages and associated backing store.
584 * This is effectively punching a hole into the middle of a file.
f6b3ec23
BP
585 */
586static long madvise_remove(struct vm_area_struct *vma,
00e9fa2d 587 struct vm_area_struct **prev,
f6b3ec23
BP
588 unsigned long start, unsigned long end)
589{
3f31d075 590 loff_t offset;
90ed52eb 591 int error;
9ab4233d 592 struct file *f;
f6b3ec23 593
90ed52eb 594 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
00e9fa2d 595
72079ba0 596 if (vma->vm_flags & VM_LOCKED)
f6b3ec23
BP
597 return -EINVAL;
598
9ab4233d
AL
599 f = vma->vm_file;
600
601 if (!f || !f->f_mapping || !f->f_mapping->host) {
f6b3ec23
BP
602 return -EINVAL;
603 }
604
69cf0fac
HD
605 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
606 return -EACCES;
607
f6b3ec23
BP
608 offset = (loff_t)(start - vma->vm_start)
609 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
90ed52eb 610
9ab4233d
AL
611 /*
612 * Filesystem's fallocate may need to take i_mutex. We need to
613 * explicitly grab a reference because the vma (and hence the
614 * vma's reference to the file) can go away as soon as we drop
615 * mmap_sem.
616 */
617 get_file(f);
70ccb92f
AA
618 if (userfaultfd_remove(vma, start, end)) {
619 /* mmap_sem was not released by userfaultfd_remove() */
620 up_read(&current->mm->mmap_sem);
621 }
72c72bdf 622 error = vfs_fallocate(f,
3f31d075
HD
623 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
624 offset, end - start);
9ab4233d 625 fput(f);
0a27a14a 626 down_read(&current->mm->mmap_sem);
90ed52eb 627 return error;
f6b3ec23
BP
628}
629
9893e49d
AK
630#ifdef CONFIG_MEMORY_FAILURE
631/*
632 * Error injection support for memory error handling.
633 */
97167a76
AK
634static int madvise_inject_error(int behavior,
635 unsigned long start, unsigned long end)
9893e49d 636{
97167a76 637 struct page *page;
c461ad6a 638 struct zone *zone;
19bfbe22 639 unsigned int order;
97167a76 640
9893e49d
AK
641 if (!capable(CAP_SYS_ADMIN))
642 return -EPERM;
97167a76 643
19bfbe22
AM
644
645 for (; start < end; start += PAGE_SIZE << order) {
23e7b5c2 646 unsigned long pfn;
325c4ef5
AM
647 int ret;
648
97167a76 649 ret = get_user_pages_fast(start, 1, 0, &page);
9893e49d
AK
650 if (ret != 1)
651 return ret;
23e7b5c2 652 pfn = page_to_pfn(page);
325c4ef5 653
19bfbe22
AM
654 /*
655 * When soft offlining hugepages, after migrating the page
656 * we dissolve it, therefore in the second loop "page" will
657 * no longer be a compound page, and order will be 0.
658 */
659 order = compound_order(compound_head(page));
660
97167a76
AK
661 if (PageHWPoison(page)) {
662 put_page(page);
29b4eede
WL
663 continue;
664 }
97167a76
AK
665
666 if (behavior == MADV_SOFT_OFFLINE) {
667 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
23e7b5c2 668 pfn, start);
97167a76
AK
669
670 ret = soft_offline_page(page, MF_COUNT_INCREASED);
afcf938e 671 if (ret)
8302423b 672 return ret;
afcf938e
AK
673 continue;
674 }
23e7b5c2 675
97167a76 676 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
23e7b5c2 677 pfn, start);
97167a76 678
23e7b5c2
DW
679 /*
680 * Drop the page reference taken by get_user_pages_fast(). In
681 * the absence of MF_COUNT_INCREASED the memory_failure()
682 * routine is responsible for pinning the page to prevent it
683 * from being released back to the page allocator.
684 */
685 put_page(page);
686 ret = memory_failure(pfn, 0);
23a003bf
NH
687 if (ret)
688 return ret;
9893e49d 689 }
c461ad6a
MG
690
691 /* Ensure that all poisoned pages are removed from per-cpu lists */
692 for_each_populated_zone(zone)
693 drain_all_pages(zone);
694
325c4ef5 695 return 0;
9893e49d
AK
696}
697#endif
698
165cd402 699static long
700madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
701 unsigned long start, unsigned long end, int behavior)
1da177e4 702{
1da177e4 703 switch (behavior) {
f6b3ec23 704 case MADV_REMOVE:
3866ea90 705 return madvise_remove(vma, prev, start, end);
1da177e4 706 case MADV_WILLNEED:
3866ea90 707 return madvise_willneed(vma, prev, start, end);
854e9ed0 708 case MADV_FREE:
1da177e4 709 case MADV_DONTNEED:
230ca982 710 return madvise_dontneed_free(vma, prev, start, end, behavior);
1da177e4 711 default:
3866ea90 712 return madvise_behavior(vma, prev, start, end, behavior);
1da177e4 713 }
1da177e4
LT
714}
715
1ecef9ed 716static bool
75927af8
NP
717madvise_behavior_valid(int behavior)
718{
719 switch (behavior) {
720 case MADV_DOFORK:
721 case MADV_DONTFORK:
722 case MADV_NORMAL:
723 case MADV_SEQUENTIAL:
724 case MADV_RANDOM:
725 case MADV_REMOVE:
726 case MADV_WILLNEED:
727 case MADV_DONTNEED:
854e9ed0 728 case MADV_FREE:
f8af4da3
HD
729#ifdef CONFIG_KSM
730 case MADV_MERGEABLE:
731 case MADV_UNMERGEABLE:
0af4e98b
AA
732#endif
733#ifdef CONFIG_TRANSPARENT_HUGEPAGE
734 case MADV_HUGEPAGE:
a664b2d8 735 case MADV_NOHUGEPAGE:
f8af4da3 736#endif
accb61fe
JB
737 case MADV_DONTDUMP:
738 case MADV_DODUMP:
d2cd9ede
RR
739 case MADV_WIPEONFORK:
740 case MADV_KEEPONFORK:
5e451be7
AK
741#ifdef CONFIG_MEMORY_FAILURE
742 case MADV_SOFT_OFFLINE:
743 case MADV_HWPOISON:
744#endif
1ecef9ed 745 return true;
75927af8
NP
746
747 default:
1ecef9ed 748 return false;
75927af8
NP
749 }
750}
3866ea90 751
1da177e4
LT
752/*
753 * The madvise(2) system call.
754 *
755 * Applications can use madvise() to advise the kernel how it should
756 * handle paging I/O in this VM area. The idea is to help the kernel
757 * use appropriate read-ahead and caching techniques. The information
758 * provided is advisory only, and can be safely disregarded by the
759 * kernel without affecting the correct operation of the application.
760 *
761 * behavior values:
762 * MADV_NORMAL - the default behavior is to read clusters. This
763 * results in some read-ahead and read-behind.
764 * MADV_RANDOM - the system should read the minimum amount of data
765 * on any access, since it is unlikely that the appli-
766 * cation will need more than what it asks for.
767 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
768 * once, so they can be aggressively read ahead, and
769 * can be freed soon after they are accessed.
770 * MADV_WILLNEED - the application is notifying the system to read
771 * some pages ahead.
772 * MADV_DONTNEED - the application is finished with the given range,
773 * so the kernel can free resources associated with it.
d7206a70
NH
774 * MADV_FREE - the application marks pages in the given range as lazy free,
775 * where actual purges are postponed until memory pressure happens.
f6b3ec23
BP
776 * MADV_REMOVE - the application wants to free up the given range of
777 * pages and associated backing store.
3866ea90
HD
778 * MADV_DONTFORK - omit this area from child's address space when forking:
779 * typically, to avoid COWing pages pinned by get_user_pages().
780 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
c02c3009
YS
781 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
782 * range after a fork.
783 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
d7206a70
NH
784 * MADV_HWPOISON - trigger memory error handler as if the given memory range
785 * were corrupted by unrecoverable hardware memory failure.
786 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
f8af4da3
HD
787 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
788 * this area with pages of identical content from other such areas.
789 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
d7206a70
NH
790 * MADV_HUGEPAGE - the application wants to back the given range by transparent
791 * huge pages in the future. Existing pages might be coalesced and
792 * new pages might be allocated as THP.
793 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
794 * transparent huge pages so the existing pages will not be
795 * coalesced into THP and new pages will not be allocated as THP.
796 * MADV_DONTDUMP - the application wants to prevent pages in the given range
797 * from being included in its core dump.
798 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1da177e4
LT
799 *
800 * return values:
801 * zero - success
802 * -EINVAL - start + len < 0, start is not page-aligned,
803 * "behavior" is not a valid value, or application
c02c3009
YS
804 * is attempting to release locked or shared pages,
805 * or the specified address range includes file, Huge TLB,
806 * MAP_SHARED or VMPFNMAP range.
1da177e4
LT
807 * -ENOMEM - addresses in the specified range are not currently
808 * mapped, or are outside the AS of the process.
809 * -EIO - an I/O error occurred while paging in data.
810 * -EBADF - map exists, but area maps something that isn't a file.
811 * -EAGAIN - a kernel resource was temporarily unavailable.
812 */
3480b257 813SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1da177e4 814{
05b74384 815 unsigned long end, tmp;
ec9bed9d 816 struct vm_area_struct *vma, *prev;
1da177e4
LT
817 int unmapped_error = 0;
818 int error = -EINVAL;
f7977793 819 int write;
1da177e4 820 size_t len;
1998cc04 821 struct blk_plug plug;
1da177e4 822
75927af8
NP
823 if (!madvise_behavior_valid(behavior))
824 return error;
825
1da177e4 826 if (start & ~PAGE_MASK)
84d96d89 827 return error;
1da177e4
LT
828 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
829
830 /* Check to see whether len was rounded up from small -ve to zero */
831 if (len_in && !len)
84d96d89 832 return error;
1da177e4
LT
833
834 end = start + len;
835 if (end < start)
84d96d89 836 return error;
1da177e4
LT
837
838 error = 0;
839 if (end == start)
84d96d89
RV
840 return error;
841
5e451be7
AK
842#ifdef CONFIG_MEMORY_FAILURE
843 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
844 return madvise_inject_error(behavior, start, start + len_in);
845#endif
846
84d96d89 847 write = madvise_need_mmap_write(behavior);
dc0ef0df
MH
848 if (write) {
849 if (down_write_killable(&current->mm->mmap_sem))
850 return -EINTR;
851 } else {
84d96d89 852 down_read(&current->mm->mmap_sem);
dc0ef0df 853 }
1da177e4
LT
854
855 /*
856 * If the interval [start,end) covers some unmapped address
857 * ranges, just ignore them, but return -ENOMEM at the end.
05b74384 858 * - different from the way of handling in mlock etc.
1da177e4 859 */
05b74384 860 vma = find_vma_prev(current->mm, start, &prev);
836d5ffd
HD
861 if (vma && start > vma->vm_start)
862 prev = vma;
863
1998cc04 864 blk_start_plug(&plug);
1da177e4
LT
865 for (;;) {
866 /* Still start < end. */
867 error = -ENOMEM;
868 if (!vma)
84d96d89 869 goto out;
1da177e4 870
05b74384 871 /* Here start < (end|vma->vm_end). */
1da177e4
LT
872 if (start < vma->vm_start) {
873 unmapped_error = -ENOMEM;
874 start = vma->vm_start;
05b74384 875 if (start >= end)
84d96d89 876 goto out;
1da177e4
LT
877 }
878
05b74384
PM
879 /* Here vma->vm_start <= start < (end|vma->vm_end) */
880 tmp = vma->vm_end;
881 if (end < tmp)
882 tmp = end;
1da177e4 883
05b74384
PM
884 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
885 error = madvise_vma(vma, &prev, start, tmp, behavior);
1da177e4 886 if (error)
84d96d89 887 goto out;
05b74384 888 start = tmp;
90ed52eb 889 if (prev && start < prev->vm_end)
05b74384
PM
890 start = prev->vm_end;
891 error = unmapped_error;
892 if (start >= end)
84d96d89 893 goto out;
90ed52eb
HD
894 if (prev)
895 vma = prev->vm_next;
896 else /* madvise_remove dropped mmap_sem */
897 vma = find_vma(current->mm, start);
1da177e4 898 }
1da177e4 899out:
84d96d89 900 blk_finish_plug(&plug);
f7977793 901 if (write)
0a27a14a
NP
902 up_write(&current->mm->mmap_sem);
903 else
904 up_read(&current->mm->mmap_sem);
905
1da177e4
LT
906 return error;
907}