]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/rmap.c
hugetlbfs: kill applications that use MAP_NORESERVE with SIGBUS instead of OOM-killer
[mirror_ubuntu-bionic-kernel.git] / mm / rmap.c
CommitLineData
1da177e4
LT
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
98f32602 17 * Contributions by Hugh Dickins 2003, 2004
1da177e4
LT
18 */
19
20/*
21 * Lock ordering in mm:
22 *
1b1dcc1b 23 * inode->i_mutex (while writing or truncating, not reading or faulting)
82591e6e
NP
24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
28 * anon_vma->lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
6a46079c
AK
39 *
40 * (code doesn't rely on that order so it could be switched around)
41 * ->tasklist_lock
42 * anon_vma->lock (memory_failure, collect_procs_anon)
43 * pte map lock
1da177e4
LT
44 */
45
46#include <linux/mm.h>
47#include <linux/pagemap.h>
48#include <linux/swap.h>
49#include <linux/swapops.h>
50#include <linux/slab.h>
51#include <linux/init.h>
5ad64688 52#include <linux/ksm.h>
1da177e4
LT
53#include <linux/rmap.h>
54#include <linux/rcupdate.h>
a48d07af 55#include <linux/module.h>
8a9f3ccd 56#include <linux/memcontrol.h>
cddb8a5c 57#include <linux/mmu_notifier.h>
64cdd548 58#include <linux/migrate.h>
1da177e4
LT
59
60#include <asm/tlbflush.h>
61
b291f000
NP
62#include "internal.h"
63
fdd2e5f8 64static struct kmem_cache *anon_vma_cachep;
5beb4930 65static struct kmem_cache *anon_vma_chain_cachep;
fdd2e5f8
AB
66
67static inline struct anon_vma *anon_vma_alloc(void)
68{
69 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
70}
71
db114b83 72void anon_vma_free(struct anon_vma *anon_vma)
fdd2e5f8
AB
73{
74 kmem_cache_free(anon_vma_cachep, anon_vma);
75}
1da177e4 76
5beb4930
RR
77static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
78{
79 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
80}
81
82void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
83{
84 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
85}
86
d9d332e0
LT
87/**
88 * anon_vma_prepare - attach an anon_vma to a memory region
89 * @vma: the memory region in question
90 *
91 * This makes sure the memory mapping described by 'vma' has
92 * an 'anon_vma' attached to it, so that we can associate the
93 * anonymous pages mapped into it with that anon_vma.
94 *
95 * The common case will be that we already have one, but if
96 * if not we either need to find an adjacent mapping that we
97 * can re-use the anon_vma from (very common when the only
98 * reason for splitting a vma has been mprotect()), or we
99 * allocate a new one.
100 *
101 * Anon-vma allocations are very subtle, because we may have
102 * optimistically looked up an anon_vma in page_lock_anon_vma()
103 * and that may actually touch the spinlock even in the newly
104 * allocated vma (it depends on RCU to make sure that the
105 * anon_vma isn't actually destroyed).
106 *
107 * As a result, we need to do proper anon_vma locking even
108 * for the new allocation. At the same time, we do not want
109 * to do any locking for the common case of already having
110 * an anon_vma.
111 *
112 * This must be called with the mmap_sem held for reading.
113 */
1da177e4
LT
114int anon_vma_prepare(struct vm_area_struct *vma)
115{
116 struct anon_vma *anon_vma = vma->anon_vma;
5beb4930 117 struct anon_vma_chain *avc;
1da177e4
LT
118
119 might_sleep();
120 if (unlikely(!anon_vma)) {
121 struct mm_struct *mm = vma->vm_mm;
d9d332e0 122 struct anon_vma *allocated;
1da177e4 123
5beb4930
RR
124 avc = anon_vma_chain_alloc();
125 if (!avc)
126 goto out_enomem;
127
1da177e4 128 anon_vma = find_mergeable_anon_vma(vma);
d9d332e0
LT
129 allocated = NULL;
130 if (!anon_vma) {
1da177e4
LT
131 anon_vma = anon_vma_alloc();
132 if (unlikely(!anon_vma))
5beb4930 133 goto out_enomem_free_avc;
1da177e4 134 allocated = anon_vma;
1da177e4
LT
135 }
136
31f2b0eb 137 spin_lock(&anon_vma->lock);
1da177e4
LT
138 /* page_table_lock to protect against threads */
139 spin_lock(&mm->page_table_lock);
140 if (likely(!vma->anon_vma)) {
141 vma->anon_vma = anon_vma;
5beb4930
RR
142 avc->anon_vma = anon_vma;
143 avc->vma = vma;
144 list_add(&avc->same_vma, &vma->anon_vma_chain);
145 list_add(&avc->same_anon_vma, &anon_vma->head);
1da177e4 146 allocated = NULL;
31f2b0eb 147 avc = NULL;
1da177e4
LT
148 }
149 spin_unlock(&mm->page_table_lock);
d9d332e0 150 spin_unlock(&anon_vma->lock);
31f2b0eb
ON
151
152 if (unlikely(allocated))
1da177e4 153 anon_vma_free(allocated);
31f2b0eb 154 if (unlikely(avc))
5beb4930 155 anon_vma_chain_free(avc);
1da177e4
LT
156 }
157 return 0;
5beb4930
RR
158
159 out_enomem_free_avc:
160 anon_vma_chain_free(avc);
161 out_enomem:
162 return -ENOMEM;
1da177e4
LT
163}
164
5beb4930
RR
165static void anon_vma_chain_link(struct vm_area_struct *vma,
166 struct anon_vma_chain *avc,
167 struct anon_vma *anon_vma)
1da177e4 168{
5beb4930
RR
169 avc->vma = vma;
170 avc->anon_vma = anon_vma;
171 list_add(&avc->same_vma, &vma->anon_vma_chain);
172
173 spin_lock(&anon_vma->lock);
174 list_add_tail(&avc->same_anon_vma, &anon_vma->head);
175 spin_unlock(&anon_vma->lock);
1da177e4
LT
176}
177
5beb4930
RR
178/*
179 * Attach the anon_vmas from src to dst.
180 * Returns 0 on success, -ENOMEM on failure.
181 */
182int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
1da177e4 183{
5beb4930
RR
184 struct anon_vma_chain *avc, *pavc;
185
646d87b4 186 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
5beb4930
RR
187 avc = anon_vma_chain_alloc();
188 if (!avc)
189 goto enomem_failure;
190 anon_vma_chain_link(dst, avc, pavc->anon_vma);
191 }
192 return 0;
1da177e4 193
5beb4930
RR
194 enomem_failure:
195 unlink_anon_vmas(dst);
196 return -ENOMEM;
1da177e4
LT
197}
198
5beb4930
RR
199/*
200 * Attach vma to its own anon_vma, as well as to the anon_vmas that
201 * the corresponding VMA in the parent process is attached to.
202 * Returns 0 on success, non-zero on failure.
203 */
204int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
1da177e4 205{
5beb4930
RR
206 struct anon_vma_chain *avc;
207 struct anon_vma *anon_vma;
1da177e4 208
5beb4930
RR
209 /* Don't bother if the parent process has no anon_vma here. */
210 if (!pvma->anon_vma)
211 return 0;
212
213 /*
214 * First, attach the new VMA to the parent VMA's anon_vmas,
215 * so rmap can find non-COWed pages in child processes.
216 */
217 if (anon_vma_clone(vma, pvma))
218 return -ENOMEM;
219
220 /* Then add our own anon_vma. */
221 anon_vma = anon_vma_alloc();
222 if (!anon_vma)
223 goto out_error;
224 avc = anon_vma_chain_alloc();
225 if (!avc)
226 goto out_error_free_anon_vma;
227 anon_vma_chain_link(vma, avc, anon_vma);
228 /* Mark this anon_vma as the one where our new (COWed) pages go. */
229 vma->anon_vma = anon_vma;
230
231 return 0;
232
233 out_error_free_anon_vma:
234 anon_vma_free(anon_vma);
235 out_error:
4946d54c 236 unlink_anon_vmas(vma);
5beb4930 237 return -ENOMEM;
1da177e4
LT
238}
239
5beb4930 240static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
1da177e4 241{
5beb4930 242 struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
1da177e4
LT
243 int empty;
244
5beb4930 245 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
1da177e4
LT
246 if (!anon_vma)
247 return;
248
249 spin_lock(&anon_vma->lock);
5beb4930 250 list_del(&anon_vma_chain->same_anon_vma);
1da177e4
LT
251
252 /* We must garbage collect the anon_vma if it's empty */
db114b83 253 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma);
1da177e4
LT
254 spin_unlock(&anon_vma->lock);
255
256 if (empty)
257 anon_vma_free(anon_vma);
258}
259
5beb4930
RR
260void unlink_anon_vmas(struct vm_area_struct *vma)
261{
262 struct anon_vma_chain *avc, *next;
263
264 /* Unlink each anon_vma chained to the VMA. */
265 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
266 anon_vma_unlink(avc);
267 list_del(&avc->same_vma);
268 anon_vma_chain_free(avc);
269 }
270}
271
51cc5068 272static void anon_vma_ctor(void *data)
1da177e4 273{
a35afb83 274 struct anon_vma *anon_vma = data;
1da177e4 275
a35afb83 276 spin_lock_init(&anon_vma->lock);
db114b83 277 ksm_refcount_init(anon_vma);
a35afb83 278 INIT_LIST_HEAD(&anon_vma->head);
1da177e4
LT
279}
280
281void __init anon_vma_init(void)
282{
283 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
20c2df83 284 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
5beb4930 285 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
1da177e4
LT
286}
287
288/*
289 * Getting a lock on a stable anon_vma from a page off the LRU is
290 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
291 */
10be22df 292struct anon_vma *page_lock_anon_vma(struct page *page)
1da177e4 293{
34bbd704 294 struct anon_vma *anon_vma;
1da177e4
LT
295 unsigned long anon_mapping;
296
297 rcu_read_lock();
80e14822 298 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
3ca7b3c5 299 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
1da177e4
LT
300 goto out;
301 if (!page_mapped(page))
302 goto out;
303
304 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
305 spin_lock(&anon_vma->lock);
34bbd704 306 return anon_vma;
1da177e4
LT
307out:
308 rcu_read_unlock();
34bbd704
ON
309 return NULL;
310}
311
10be22df 312void page_unlock_anon_vma(struct anon_vma *anon_vma)
34bbd704
ON
313{
314 spin_unlock(&anon_vma->lock);
315 rcu_read_unlock();
1da177e4
LT
316}
317
318/*
3ad33b24
LS
319 * At what user virtual address is page expected in @vma?
320 * Returns virtual address or -EFAULT if page's index/offset is not
321 * within the range mapped the @vma.
1da177e4
LT
322 */
323static inline unsigned long
324vma_address(struct page *page, struct vm_area_struct *vma)
325{
326 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
327 unsigned long address;
328
329 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
330 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
3ad33b24 331 /* page should be within @vma mapping range */
1da177e4
LT
332 return -EFAULT;
333 }
334 return address;
335}
336
337/*
bf89c8c8
HS
338 * At what user virtual address is page expected in vma?
339 * checking that the page matches the vma.
1da177e4
LT
340 */
341unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
342{
343 if (PageAnon(page)) {
3ca7b3c5 344 if (vma->anon_vma != page_anon_vma(page))
1da177e4
LT
345 return -EFAULT;
346 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
ee498ed7
HD
347 if (!vma->vm_file ||
348 vma->vm_file->f_mapping != page->mapping)
1da177e4
LT
349 return -EFAULT;
350 } else
351 return -EFAULT;
352 return vma_address(page, vma);
353}
354
81b4082d
ND
355/*
356 * Check that @page is mapped at @address into @mm.
357 *
479db0bf
NP
358 * If @sync is false, page_check_address may perform a racy check to avoid
359 * the page table lock when the pte is not present (helpful when reclaiming
360 * highly shared pages).
361 *
b8072f09 362 * On success returns with pte mapped and locked.
81b4082d 363 */
ceffc078 364pte_t *page_check_address(struct page *page, struct mm_struct *mm,
479db0bf 365 unsigned long address, spinlock_t **ptlp, int sync)
81b4082d
ND
366{
367 pgd_t *pgd;
368 pud_t *pud;
369 pmd_t *pmd;
370 pte_t *pte;
c0718806 371 spinlock_t *ptl;
81b4082d 372
81b4082d 373 pgd = pgd_offset(mm, address);
c0718806
HD
374 if (!pgd_present(*pgd))
375 return NULL;
376
377 pud = pud_offset(pgd, address);
378 if (!pud_present(*pud))
379 return NULL;
380
381 pmd = pmd_offset(pud, address);
382 if (!pmd_present(*pmd))
383 return NULL;
384
385 pte = pte_offset_map(pmd, address);
386 /* Make a quick check before getting the lock */
479db0bf 387 if (!sync && !pte_present(*pte)) {
c0718806
HD
388 pte_unmap(pte);
389 return NULL;
390 }
391
4c21e2f2 392 ptl = pte_lockptr(mm, pmd);
c0718806
HD
393 spin_lock(ptl);
394 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
395 *ptlp = ptl;
396 return pte;
81b4082d 397 }
c0718806
HD
398 pte_unmap_unlock(pte, ptl);
399 return NULL;
81b4082d
ND
400}
401
b291f000
NP
402/**
403 * page_mapped_in_vma - check whether a page is really mapped in a VMA
404 * @page: the page to test
405 * @vma: the VMA to test
406 *
407 * Returns 1 if the page is mapped into the page tables of the VMA, 0
408 * if the page is not mapped into the page tables of this VMA. Only
409 * valid for normal file or anonymous VMAs.
410 */
6a46079c 411int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
b291f000
NP
412{
413 unsigned long address;
414 pte_t *pte;
415 spinlock_t *ptl;
416
417 address = vma_address(page, vma);
418 if (address == -EFAULT) /* out of vma range */
419 return 0;
420 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
421 if (!pte) /* the page is not in this mm */
422 return 0;
423 pte_unmap_unlock(pte, ptl);
424
425 return 1;
426}
427
1da177e4
LT
428/*
429 * Subfunctions of page_referenced: page_referenced_one called
430 * repeatedly from either page_referenced_anon or page_referenced_file.
431 */
5ad64688
HD
432int page_referenced_one(struct page *page, struct vm_area_struct *vma,
433 unsigned long address, unsigned int *mapcount,
434 unsigned long *vm_flags)
1da177e4
LT
435{
436 struct mm_struct *mm = vma->vm_mm;
1da177e4 437 pte_t *pte;
c0718806 438 spinlock_t *ptl;
1da177e4
LT
439 int referenced = 0;
440
479db0bf 441 pte = page_check_address(page, mm, address, &ptl, 0);
c0718806
HD
442 if (!pte)
443 goto out;
1da177e4 444
b291f000
NP
445 /*
446 * Don't want to elevate referenced for mlocked page that gets this far,
447 * in order that it progresses to try_to_unmap and is moved to the
448 * unevictable list.
449 */
5a9bbdcd 450 if (vma->vm_flags & VM_LOCKED) {
5a9bbdcd 451 *mapcount = 1; /* break early from loop */
03ef83af 452 *vm_flags |= VM_LOCKED;
b291f000
NP
453 goto out_unmap;
454 }
455
4917e5d0
JW
456 if (ptep_clear_flush_young_notify(vma, address, pte)) {
457 /*
458 * Don't treat a reference through a sequentially read
459 * mapping as such. If the page has been used in
460 * another mapping, we will catch it; if this other
461 * mapping is already gone, the unmap path will have
462 * set PG_referenced or activated the page.
463 */
464 if (likely(!VM_SequentialReadHint(vma)))
465 referenced++;
466 }
1da177e4 467
c0718806
HD
468 /* Pretend the page is referenced if the task has the
469 swap token and is in the middle of a page fault. */
f7b7fd8f 470 if (mm != current->mm && has_swap_token(mm) &&
c0718806
HD
471 rwsem_is_locked(&mm->mmap_sem))
472 referenced++;
473
b291f000 474out_unmap:
c0718806
HD
475 (*mapcount)--;
476 pte_unmap_unlock(pte, ptl);
273f047e 477
6fe6b7e3
WF
478 if (referenced)
479 *vm_flags |= vma->vm_flags;
273f047e 480out:
1da177e4
LT
481 return referenced;
482}
483
bed7161a 484static int page_referenced_anon(struct page *page,
6fe6b7e3
WF
485 struct mem_cgroup *mem_cont,
486 unsigned long *vm_flags)
1da177e4
LT
487{
488 unsigned int mapcount;
489 struct anon_vma *anon_vma;
5beb4930 490 struct anon_vma_chain *avc;
1da177e4
LT
491 int referenced = 0;
492
493 anon_vma = page_lock_anon_vma(page);
494 if (!anon_vma)
495 return referenced;
496
497 mapcount = page_mapcount(page);
5beb4930
RR
498 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
499 struct vm_area_struct *vma = avc->vma;
1cb1729b
HD
500 unsigned long address = vma_address(page, vma);
501 if (address == -EFAULT)
502 continue;
bed7161a
BS
503 /*
504 * If we are reclaiming on behalf of a cgroup, skip
505 * counting on behalf of references from different
506 * cgroups
507 */
bd845e38 508 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
bed7161a 509 continue;
1cb1729b 510 referenced += page_referenced_one(page, vma, address,
6fe6b7e3 511 &mapcount, vm_flags);
1da177e4
LT
512 if (!mapcount)
513 break;
514 }
34bbd704
ON
515
516 page_unlock_anon_vma(anon_vma);
1da177e4
LT
517 return referenced;
518}
519
520/**
521 * page_referenced_file - referenced check for object-based rmap
522 * @page: the page we're checking references on.
43d8eac4 523 * @mem_cont: target memory controller
6fe6b7e3 524 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
1da177e4
LT
525 *
526 * For an object-based mapped page, find all the places it is mapped and
527 * check/clear the referenced flag. This is done by following the page->mapping
528 * pointer, then walking the chain of vmas it holds. It returns the number
529 * of references it found.
530 *
531 * This function is only called from page_referenced for object-based pages.
532 */
bed7161a 533static int page_referenced_file(struct page *page,
6fe6b7e3
WF
534 struct mem_cgroup *mem_cont,
535 unsigned long *vm_flags)
1da177e4
LT
536{
537 unsigned int mapcount;
538 struct address_space *mapping = page->mapping;
539 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
540 struct vm_area_struct *vma;
541 struct prio_tree_iter iter;
542 int referenced = 0;
543
544 /*
545 * The caller's checks on page->mapping and !PageAnon have made
546 * sure that this is a file page: the check for page->mapping
547 * excludes the case just before it gets set on an anon page.
548 */
549 BUG_ON(PageAnon(page));
550
551 /*
552 * The page lock not only makes sure that page->mapping cannot
553 * suddenly be NULLified by truncation, it makes sure that the
554 * structure at mapping cannot be freed and reused yet,
555 * so we can safely take mapping->i_mmap_lock.
556 */
557 BUG_ON(!PageLocked(page));
558
559 spin_lock(&mapping->i_mmap_lock);
560
561 /*
562 * i_mmap_lock does not stabilize mapcount at all, but mapcount
563 * is more likely to be accurate if we note it after spinning.
564 */
565 mapcount = page_mapcount(page);
566
567 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1cb1729b
HD
568 unsigned long address = vma_address(page, vma);
569 if (address == -EFAULT)
570 continue;
bed7161a
BS
571 /*
572 * If we are reclaiming on behalf of a cgroup, skip
573 * counting on behalf of references from different
574 * cgroups
575 */
bd845e38 576 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
bed7161a 577 continue;
1cb1729b 578 referenced += page_referenced_one(page, vma, address,
6fe6b7e3 579 &mapcount, vm_flags);
1da177e4
LT
580 if (!mapcount)
581 break;
582 }
583
584 spin_unlock(&mapping->i_mmap_lock);
585 return referenced;
586}
587
588/**
589 * page_referenced - test if the page was referenced
590 * @page: the page to test
591 * @is_locked: caller holds lock on the page
43d8eac4 592 * @mem_cont: target memory controller
6fe6b7e3 593 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
1da177e4
LT
594 *
595 * Quick test_and_clear_referenced for all mappings to a page,
596 * returns the number of ptes which referenced the page.
597 */
6fe6b7e3
WF
598int page_referenced(struct page *page,
599 int is_locked,
600 struct mem_cgroup *mem_cont,
601 unsigned long *vm_flags)
1da177e4
LT
602{
603 int referenced = 0;
5ad64688 604 int we_locked = 0;
1da177e4 605
6fe6b7e3 606 *vm_flags = 0;
3ca7b3c5 607 if (page_mapped(page) && page_rmapping(page)) {
5ad64688
HD
608 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
609 we_locked = trylock_page(page);
610 if (!we_locked) {
611 referenced++;
612 goto out;
613 }
614 }
615 if (unlikely(PageKsm(page)))
616 referenced += page_referenced_ksm(page, mem_cont,
617 vm_flags);
618 else if (PageAnon(page))
6fe6b7e3
WF
619 referenced += page_referenced_anon(page, mem_cont,
620 vm_flags);
5ad64688 621 else if (page->mapping)
6fe6b7e3
WF
622 referenced += page_referenced_file(page, mem_cont,
623 vm_flags);
5ad64688 624 if (we_locked)
1da177e4 625 unlock_page(page);
1da177e4 626 }
5ad64688 627out:
5b7baf05
CB
628 if (page_test_and_clear_young(page))
629 referenced++;
630
1da177e4
LT
631 return referenced;
632}
633
1cb1729b
HD
634static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
635 unsigned long address)
d08b3851
PZ
636{
637 struct mm_struct *mm = vma->vm_mm;
c2fda5fe 638 pte_t *pte;
d08b3851
PZ
639 spinlock_t *ptl;
640 int ret = 0;
641
479db0bf 642 pte = page_check_address(page, mm, address, &ptl, 1);
d08b3851
PZ
643 if (!pte)
644 goto out;
645
c2fda5fe
PZ
646 if (pte_dirty(*pte) || pte_write(*pte)) {
647 pte_t entry;
d08b3851 648
c2fda5fe 649 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 650 entry = ptep_clear_flush_notify(vma, address, pte);
c2fda5fe
PZ
651 entry = pte_wrprotect(entry);
652 entry = pte_mkclean(entry);
d6e88e67 653 set_pte_at(mm, address, pte, entry);
c2fda5fe
PZ
654 ret = 1;
655 }
d08b3851 656
d08b3851
PZ
657 pte_unmap_unlock(pte, ptl);
658out:
659 return ret;
660}
661
662static int page_mkclean_file(struct address_space *mapping, struct page *page)
663{
664 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
665 struct vm_area_struct *vma;
666 struct prio_tree_iter iter;
667 int ret = 0;
668
669 BUG_ON(PageAnon(page));
670
671 spin_lock(&mapping->i_mmap_lock);
672 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1cb1729b
HD
673 if (vma->vm_flags & VM_SHARED) {
674 unsigned long address = vma_address(page, vma);
675 if (address == -EFAULT)
676 continue;
677 ret += page_mkclean_one(page, vma, address);
678 }
d08b3851
PZ
679 }
680 spin_unlock(&mapping->i_mmap_lock);
681 return ret;
682}
683
684int page_mkclean(struct page *page)
685{
686 int ret = 0;
687
688 BUG_ON(!PageLocked(page));
689
690 if (page_mapped(page)) {
691 struct address_space *mapping = page_mapping(page);
ce7e9fae 692 if (mapping) {
d08b3851 693 ret = page_mkclean_file(mapping, page);
ce7e9fae
CB
694 if (page_test_dirty(page)) {
695 page_clear_dirty(page);
696 ret = 1;
697 }
6c210482 698 }
d08b3851
PZ
699 }
700
701 return ret;
702}
60b59bea 703EXPORT_SYMBOL_GPL(page_mkclean);
d08b3851 704
c44b6743
RR
705/**
706 * page_move_anon_rmap - move a page to our anon_vma
707 * @page: the page to move to our anon_vma
708 * @vma: the vma the page belongs to
709 * @address: the user virtual address mapped
710 *
711 * When a page belongs exclusively to one process after a COW event,
712 * that page can be moved into the anon_vma that belongs to just that
713 * process, so the rmap code will not search the parent or sibling
714 * processes.
715 */
716void page_move_anon_rmap(struct page *page,
717 struct vm_area_struct *vma, unsigned long address)
718{
719 struct anon_vma *anon_vma = vma->anon_vma;
720
721 VM_BUG_ON(!PageLocked(page));
722 VM_BUG_ON(!anon_vma);
723 VM_BUG_ON(page->index != linear_page_index(vma, address));
724
725 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
726 page->mapping = (struct address_space *) anon_vma;
727}
728
9617d95e 729/**
43d8eac4 730 * __page_set_anon_rmap - setup new anonymous rmap
9617d95e
NP
731 * @page: the page to add the mapping to
732 * @vma: the vm area in which the mapping is added
733 * @address: the user virtual address mapped
e8a03feb 734 * @exclusive: the page is exclusively owned by the current process
9617d95e
NP
735 */
736static void __page_set_anon_rmap(struct page *page,
e8a03feb 737 struct vm_area_struct *vma, unsigned long address, int exclusive)
9617d95e 738{
e8a03feb 739 struct anon_vma *anon_vma = vma->anon_vma;
ea90002b 740
e8a03feb 741 BUG_ON(!anon_vma);
ea90002b
LT
742
743 /*
e8a03feb
RR
744 * If the page isn't exclusively mapped into this vma,
745 * we must use the _oldest_ possible anon_vma for the
746 * page mapping!
ea90002b 747 *
e8a03feb
RR
748 * So take the last AVC chain entry in the vma, which is
749 * the deepest ancestor, and use the anon_vma from that.
ea90002b 750 */
e8a03feb
RR
751 if (!exclusive) {
752 struct anon_vma_chain *avc;
753 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
754 anon_vma = avc->anon_vma;
755 }
9617d95e 756
9617d95e
NP
757 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
758 page->mapping = (struct address_space *) anon_vma;
9617d95e 759 page->index = linear_page_index(vma, address);
9617d95e
NP
760}
761
c97a9e10 762/**
43d8eac4 763 * __page_check_anon_rmap - sanity check anonymous rmap addition
c97a9e10
NP
764 * @page: the page to add the mapping to
765 * @vma: the vm area in which the mapping is added
766 * @address: the user virtual address mapped
767 */
768static void __page_check_anon_rmap(struct page *page,
769 struct vm_area_struct *vma, unsigned long address)
770{
771#ifdef CONFIG_DEBUG_VM
772 /*
773 * The page's anon-rmap details (mapping and index) are guaranteed to
774 * be set up correctly at this point.
775 *
776 * We have exclusion against page_add_anon_rmap because the caller
777 * always holds the page locked, except if called from page_dup_rmap,
778 * in which case the page is already known to be setup.
779 *
780 * We have exclusion against page_add_new_anon_rmap because those pages
781 * are initially only visible via the pagetables, and the pte is locked
782 * over the call to page_add_new_anon_rmap.
783 */
c97a9e10
NP
784 BUG_ON(page->index != linear_page_index(vma, address));
785#endif
786}
787
1da177e4
LT
788/**
789 * page_add_anon_rmap - add pte mapping to an anonymous page
790 * @page: the page to add the mapping to
791 * @vma: the vm area in which the mapping is added
792 * @address: the user virtual address mapped
793 *
5ad64688 794 * The caller needs to hold the pte lock, and the page must be locked in
80e14822
HD
795 * the anon_vma case: to serialize mapping,index checking after setting,
796 * and to ensure that PageAnon is not being upgraded racily to PageKsm
797 * (but PageKsm is never downgraded to PageAnon).
1da177e4
LT
798 */
799void page_add_anon_rmap(struct page *page,
800 struct vm_area_struct *vma, unsigned long address)
801{
5ad64688
HD
802 int first = atomic_inc_and_test(&page->_mapcount);
803 if (first)
804 __inc_zone_page_state(page, NR_ANON_PAGES);
805 if (unlikely(PageKsm(page)))
806 return;
807
c97a9e10
NP
808 VM_BUG_ON(!PageLocked(page));
809 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
5ad64688 810 if (first)
e8a03feb 811 __page_set_anon_rmap(page, vma, address, 0);
69029cd5 812 else
c97a9e10 813 __page_check_anon_rmap(page, vma, address);
1da177e4
LT
814}
815
43d8eac4 816/**
9617d95e
NP
817 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
818 * @page: the page to add the mapping to
819 * @vma: the vm area in which the mapping is added
820 * @address: the user virtual address mapped
821 *
822 * Same as page_add_anon_rmap but must only be called on *new* pages.
823 * This means the inc-and-test can be bypassed.
c97a9e10 824 * Page does not have to be locked.
9617d95e
NP
825 */
826void page_add_new_anon_rmap(struct page *page,
827 struct vm_area_struct *vma, unsigned long address)
828{
b5934c53 829 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
cbf84b7a
HD
830 SetPageSwapBacked(page);
831 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
5ad64688 832 __inc_zone_page_state(page, NR_ANON_PAGES);
e8a03feb 833 __page_set_anon_rmap(page, vma, address, 1);
b5934c53 834 if (page_evictable(page, vma))
cbf84b7a 835 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
b5934c53
HD
836 else
837 add_page_to_unevictable_list(page);
9617d95e
NP
838}
839
1da177e4
LT
840/**
841 * page_add_file_rmap - add pte mapping to a file page
842 * @page: the page to add the mapping to
843 *
b8072f09 844 * The caller needs to hold the pte lock.
1da177e4
LT
845 */
846void page_add_file_rmap(struct page *page)
847{
d69b042f 848 if (atomic_inc_and_test(&page->_mapcount)) {
65ba55f5 849 __inc_zone_page_state(page, NR_FILE_MAPPED);
d8046582 850 mem_cgroup_update_file_mapped(page, 1);
d69b042f 851 }
1da177e4
LT
852}
853
854/**
855 * page_remove_rmap - take down pte mapping from a page
856 * @page: page to remove mapping from
857 *
b8072f09 858 * The caller needs to hold the pte lock.
1da177e4 859 */
edc315fd 860void page_remove_rmap(struct page *page)
1da177e4 861{
b904dcfe
KM
862 /* page still mapped by someone else? */
863 if (!atomic_add_negative(-1, &page->_mapcount))
864 return;
865
866 /*
867 * Now that the last pte has gone, s390 must transfer dirty
868 * flag from storage key to struct page. We can usually skip
869 * this if the page is anon, so about to be freed; but perhaps
870 * not if it's in swapcache - there might be another pte slot
871 * containing the swap entry, but page not yet written to swap.
872 */
873 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
874 page_clear_dirty(page);
875 set_page_dirty(page);
1da177e4 876 }
b904dcfe
KM
877 if (PageAnon(page)) {
878 mem_cgroup_uncharge_page(page);
879 __dec_zone_page_state(page, NR_ANON_PAGES);
880 } else {
881 __dec_zone_page_state(page, NR_FILE_MAPPED);
d8046582 882 mem_cgroup_update_file_mapped(page, -1);
b904dcfe 883 }
b904dcfe
KM
884 /*
885 * It would be tidy to reset the PageAnon mapping here,
886 * but that might overwrite a racing page_add_anon_rmap
887 * which increments mapcount after us but sets mapping
888 * before us: so leave the reset to free_hot_cold_page,
889 * and remember that it's only reliable while mapped.
890 * Leaving it set also helps swapoff to reinstate ptes
891 * faster for those pages still in swapcache.
892 */
1da177e4
LT
893}
894
895/*
896 * Subfunctions of try_to_unmap: try_to_unmap_one called
897 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
898 */
5ad64688
HD
899int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
900 unsigned long address, enum ttu_flags flags)
1da177e4
LT
901{
902 struct mm_struct *mm = vma->vm_mm;
1da177e4
LT
903 pte_t *pte;
904 pte_t pteval;
c0718806 905 spinlock_t *ptl;
1da177e4
LT
906 int ret = SWAP_AGAIN;
907
479db0bf 908 pte = page_check_address(page, mm, address, &ptl, 0);
c0718806 909 if (!pte)
81b4082d 910 goto out;
1da177e4
LT
911
912 /*
913 * If the page is mlock()d, we cannot swap it out.
914 * If it's recently referenced (perhaps page_referenced
915 * skipped over this mm) then we should reactivate it.
916 */
14fa31b8 917 if (!(flags & TTU_IGNORE_MLOCK)) {
caed0f48
KM
918 if (vma->vm_flags & VM_LOCKED)
919 goto out_mlock;
920
af8e3354 921 if (TTU_ACTION(flags) == TTU_MUNLOCK)
53f79acb 922 goto out_unmap;
14fa31b8
AK
923 }
924 if (!(flags & TTU_IGNORE_ACCESS)) {
b291f000
NP
925 if (ptep_clear_flush_young_notify(vma, address, pte)) {
926 ret = SWAP_FAIL;
927 goto out_unmap;
928 }
929 }
1da177e4 930
1da177e4
LT
931 /* Nuke the page table entry. */
932 flush_cache_page(vma, address, page_to_pfn(page));
cddb8a5c 933 pteval = ptep_clear_flush_notify(vma, address, pte);
1da177e4
LT
934
935 /* Move the dirty bit to the physical page now the pte is gone. */
936 if (pte_dirty(pteval))
937 set_page_dirty(page);
938
365e9c87
HD
939 /* Update high watermark before we lower rss */
940 update_hiwater_rss(mm);
941
888b9f7c
AK
942 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
943 if (PageAnon(page))
d559db08 944 dec_mm_counter(mm, MM_ANONPAGES);
888b9f7c 945 else
d559db08 946 dec_mm_counter(mm, MM_FILEPAGES);
888b9f7c
AK
947 set_pte_at(mm, address, pte,
948 swp_entry_to_pte(make_hwpoison_entry(page)));
949 } else if (PageAnon(page)) {
4c21e2f2 950 swp_entry_t entry = { .val = page_private(page) };
0697212a
CL
951
952 if (PageSwapCache(page)) {
953 /*
954 * Store the swap location in the pte.
955 * See handle_pte_fault() ...
956 */
570a335b
HD
957 if (swap_duplicate(entry) < 0) {
958 set_pte_at(mm, address, pte, pteval);
959 ret = SWAP_FAIL;
960 goto out_unmap;
961 }
0697212a
CL
962 if (list_empty(&mm->mmlist)) {
963 spin_lock(&mmlist_lock);
964 if (list_empty(&mm->mmlist))
965 list_add(&mm->mmlist, &init_mm.mmlist);
966 spin_unlock(&mmlist_lock);
967 }
d559db08 968 dec_mm_counter(mm, MM_ANONPAGES);
b084d435 969 inc_mm_counter(mm, MM_SWAPENTS);
64cdd548 970 } else if (PAGE_MIGRATION) {
0697212a
CL
971 /*
972 * Store the pfn of the page in a special migration
973 * pte. do_swap_page() will wait until the migration
974 * pte is removed and then restart fault handling.
975 */
14fa31b8 976 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
0697212a 977 entry = make_migration_entry(page, pte_write(pteval));
1da177e4
LT
978 }
979 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
980 BUG_ON(pte_file(*pte));
14fa31b8 981 } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
04e62a29
CL
982 /* Establish migration entry for a file page */
983 swp_entry_t entry;
984 entry = make_migration_entry(page, pte_write(pteval));
985 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
986 } else
d559db08 987 dec_mm_counter(mm, MM_FILEPAGES);
1da177e4 988
edc315fd 989 page_remove_rmap(page);
1da177e4
LT
990 page_cache_release(page);
991
992out_unmap:
c0718806 993 pte_unmap_unlock(pte, ptl);
caed0f48
KM
994out:
995 return ret;
53f79acb 996
caed0f48
KM
997out_mlock:
998 pte_unmap_unlock(pte, ptl);
999
1000
1001 /*
1002 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1003 * unstable result and race. Plus, We can't wait here because
1004 * we now hold anon_vma->lock or mapping->i_mmap_lock.
1005 * if trylock failed, the page remain in evictable lru and later
1006 * vmscan could retry to move the page to unevictable lru if the
1007 * page is actually mlocked.
1008 */
1009 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1010 if (vma->vm_flags & VM_LOCKED) {
1011 mlock_vma_page(page);
1012 ret = SWAP_MLOCK;
53f79acb 1013 }
caed0f48 1014 up_read(&vma->vm_mm->mmap_sem);
53f79acb 1015 }
1da177e4
LT
1016 return ret;
1017}
1018
1019/*
1020 * objrmap doesn't work for nonlinear VMAs because the assumption that
1021 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
1022 * Consequently, given a particular page and its ->index, we cannot locate the
1023 * ptes which are mapping that page without an exhaustive linear search.
1024 *
1025 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
1026 * maps the file to which the target page belongs. The ->vm_private_data field
1027 * holds the current cursor into that scan. Successive searches will circulate
1028 * around the vma's virtual address space.
1029 *
1030 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
1031 * more scanning pressure is placed against them as well. Eventually pages
1032 * will become fully unmapped and are eligible for eviction.
1033 *
1034 * For very sparsely populated VMAs this is a little inefficient - chances are
1035 * there there won't be many ptes located within the scan cluster. In this case
1036 * maybe we could scan further - to the end of the pte page, perhaps.
b291f000
NP
1037 *
1038 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
1039 * acquire it without blocking. If vma locked, mlock the pages in the cluster,
1040 * rather than unmapping them. If we encounter the "check_page" that vmscan is
1041 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
1da177e4
LT
1042 */
1043#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
1044#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
1045
b291f000
NP
1046static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1047 struct vm_area_struct *vma, struct page *check_page)
1da177e4
LT
1048{
1049 struct mm_struct *mm = vma->vm_mm;
1050 pgd_t *pgd;
1051 pud_t *pud;
1052 pmd_t *pmd;
c0718806 1053 pte_t *pte;
1da177e4 1054 pte_t pteval;
c0718806 1055 spinlock_t *ptl;
1da177e4
LT
1056 struct page *page;
1057 unsigned long address;
1058 unsigned long end;
b291f000
NP
1059 int ret = SWAP_AGAIN;
1060 int locked_vma = 0;
1da177e4 1061
1da177e4
LT
1062 address = (vma->vm_start + cursor) & CLUSTER_MASK;
1063 end = address + CLUSTER_SIZE;
1064 if (address < vma->vm_start)
1065 address = vma->vm_start;
1066 if (end > vma->vm_end)
1067 end = vma->vm_end;
1068
1069 pgd = pgd_offset(mm, address);
1070 if (!pgd_present(*pgd))
b291f000 1071 return ret;
1da177e4
LT
1072
1073 pud = pud_offset(pgd, address);
1074 if (!pud_present(*pud))
b291f000 1075 return ret;
1da177e4
LT
1076
1077 pmd = pmd_offset(pud, address);
1078 if (!pmd_present(*pmd))
b291f000
NP
1079 return ret;
1080
1081 /*
af8e3354 1082 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
b291f000
NP
1083 * keep the sem while scanning the cluster for mlocking pages.
1084 */
af8e3354 1085 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
b291f000
NP
1086 locked_vma = (vma->vm_flags & VM_LOCKED);
1087 if (!locked_vma)
1088 up_read(&vma->vm_mm->mmap_sem); /* don't need it */
1089 }
c0718806
HD
1090
1091 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4 1092
365e9c87
HD
1093 /* Update high watermark before we lower rss */
1094 update_hiwater_rss(mm);
1095
c0718806 1096 for (; address < end; pte++, address += PAGE_SIZE) {
1da177e4
LT
1097 if (!pte_present(*pte))
1098 continue;
6aab341e
LT
1099 page = vm_normal_page(vma, address, *pte);
1100 BUG_ON(!page || PageAnon(page));
1da177e4 1101
b291f000
NP
1102 if (locked_vma) {
1103 mlock_vma_page(page); /* no-op if already mlocked */
1104 if (page == check_page)
1105 ret = SWAP_MLOCK;
1106 continue; /* don't unmap */
1107 }
1108
cddb8a5c 1109 if (ptep_clear_flush_young_notify(vma, address, pte))
1da177e4
LT
1110 continue;
1111
1112 /* Nuke the page table entry. */
eca35133 1113 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 1114 pteval = ptep_clear_flush_notify(vma, address, pte);
1da177e4
LT
1115
1116 /* If nonlinear, store the file page offset in the pte. */
1117 if (page->index != linear_page_index(vma, address))
1118 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
1119
1120 /* Move the dirty bit to the physical page now the pte is gone. */
1121 if (pte_dirty(pteval))
1122 set_page_dirty(page);
1123
edc315fd 1124 page_remove_rmap(page);
1da177e4 1125 page_cache_release(page);
d559db08 1126 dec_mm_counter(mm, MM_FILEPAGES);
1da177e4
LT
1127 (*mapcount)--;
1128 }
c0718806 1129 pte_unmap_unlock(pte - 1, ptl);
b291f000
NP
1130 if (locked_vma)
1131 up_read(&vma->vm_mm->mmap_sem);
1132 return ret;
1da177e4
LT
1133}
1134
b291f000
NP
1135/**
1136 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1137 * rmap method
1138 * @page: the page to unmap/unlock
8051be5e 1139 * @flags: action and flags
b291f000
NP
1140 *
1141 * Find all the mappings of a page using the mapping pointer and the vma chains
1142 * contained in the anon_vma struct it points to.
1143 *
1144 * This function is only called from try_to_unmap/try_to_munlock for
1145 * anonymous pages.
1146 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1147 * where the page was found will be held for write. So, we won't recheck
1148 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1149 * 'LOCKED.
1150 */
14fa31b8 1151static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1da177e4
LT
1152{
1153 struct anon_vma *anon_vma;
5beb4930 1154 struct anon_vma_chain *avc;
1da177e4 1155 int ret = SWAP_AGAIN;
b291f000 1156
1da177e4
LT
1157 anon_vma = page_lock_anon_vma(page);
1158 if (!anon_vma)
1159 return ret;
1160
5beb4930
RR
1161 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1162 struct vm_area_struct *vma = avc->vma;
1cb1729b
HD
1163 unsigned long address = vma_address(page, vma);
1164 if (address == -EFAULT)
1165 continue;
1166 ret = try_to_unmap_one(page, vma, address, flags);
53f79acb
HD
1167 if (ret != SWAP_AGAIN || !page_mapped(page))
1168 break;
1da177e4 1169 }
34bbd704
ON
1170
1171 page_unlock_anon_vma(anon_vma);
1da177e4
LT
1172 return ret;
1173}
1174
1175/**
b291f000
NP
1176 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
1177 * @page: the page to unmap/unlock
14fa31b8 1178 * @flags: action and flags
1da177e4
LT
1179 *
1180 * Find all the mappings of a page using the mapping pointer and the vma chains
1181 * contained in the address_space struct it points to.
1182 *
b291f000
NP
1183 * This function is only called from try_to_unmap/try_to_munlock for
1184 * object-based pages.
1185 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1186 * where the page was found will be held for write. So, we won't recheck
1187 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1188 * 'LOCKED.
1da177e4 1189 */
14fa31b8 1190static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1da177e4
LT
1191{
1192 struct address_space *mapping = page->mapping;
1193 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1194 struct vm_area_struct *vma;
1195 struct prio_tree_iter iter;
1196 int ret = SWAP_AGAIN;
1197 unsigned long cursor;
1198 unsigned long max_nl_cursor = 0;
1199 unsigned long max_nl_size = 0;
1200 unsigned int mapcount;
1201
1202 spin_lock(&mapping->i_mmap_lock);
1203 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1cb1729b
HD
1204 unsigned long address = vma_address(page, vma);
1205 if (address == -EFAULT)
1206 continue;
1207 ret = try_to_unmap_one(page, vma, address, flags);
53f79acb
HD
1208 if (ret != SWAP_AGAIN || !page_mapped(page))
1209 goto out;
1da177e4
LT
1210 }
1211
1212 if (list_empty(&mapping->i_mmap_nonlinear))
1213 goto out;
1214
53f79acb
HD
1215 /*
1216 * We don't bother to try to find the munlocked page in nonlinears.
1217 * It's costly. Instead, later, page reclaim logic may call
1218 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
1219 */
1220 if (TTU_ACTION(flags) == TTU_MUNLOCK)
1221 goto out;
1222
1da177e4
LT
1223 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1224 shared.vm_set.list) {
1da177e4
LT
1225 cursor = (unsigned long) vma->vm_private_data;
1226 if (cursor > max_nl_cursor)
1227 max_nl_cursor = cursor;
1228 cursor = vma->vm_end - vma->vm_start;
1229 if (cursor > max_nl_size)
1230 max_nl_size = cursor;
1231 }
1232
b291f000 1233 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
1da177e4
LT
1234 ret = SWAP_FAIL;
1235 goto out;
1236 }
1237
1238 /*
1239 * We don't try to search for this page in the nonlinear vmas,
1240 * and page_referenced wouldn't have found it anyway. Instead
1241 * just walk the nonlinear vmas trying to age and unmap some.
1242 * The mapcount of the page we came in with is irrelevant,
1243 * but even so use it as a guide to how hard we should try?
1244 */
1245 mapcount = page_mapcount(page);
1246 if (!mapcount)
1247 goto out;
1248 cond_resched_lock(&mapping->i_mmap_lock);
1249
1250 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1251 if (max_nl_cursor == 0)
1252 max_nl_cursor = CLUSTER_SIZE;
1253
1254 do {
1255 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1256 shared.vm_set.list) {
1da177e4 1257 cursor = (unsigned long) vma->vm_private_data;
839b9685 1258 while ( cursor < max_nl_cursor &&
1da177e4 1259 cursor < vma->vm_end - vma->vm_start) {
53f79acb
HD
1260 if (try_to_unmap_cluster(cursor, &mapcount,
1261 vma, page) == SWAP_MLOCK)
1262 ret = SWAP_MLOCK;
1da177e4
LT
1263 cursor += CLUSTER_SIZE;
1264 vma->vm_private_data = (void *) cursor;
1265 if ((int)mapcount <= 0)
1266 goto out;
1267 }
1268 vma->vm_private_data = (void *) max_nl_cursor;
1269 }
1270 cond_resched_lock(&mapping->i_mmap_lock);
1271 max_nl_cursor += CLUSTER_SIZE;
1272 } while (max_nl_cursor <= max_nl_size);
1273
1274 /*
1275 * Don't loop forever (perhaps all the remaining pages are
1276 * in locked vmas). Reset cursor on all unreserved nonlinear
1277 * vmas, now forgetting on which ones it had fallen behind.
1278 */
101d2be7
HD
1279 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1280 vma->vm_private_data = NULL;
1da177e4
LT
1281out:
1282 spin_unlock(&mapping->i_mmap_lock);
1283 return ret;
1284}
1285
1286/**
1287 * try_to_unmap - try to remove all page table mappings to a page
1288 * @page: the page to get unmapped
14fa31b8 1289 * @flags: action and flags
1da177e4
LT
1290 *
1291 * Tries to remove all the page table entries which are mapping this
1292 * page, used in the pageout path. Caller must hold the page lock.
1293 * Return values are:
1294 *
1295 * SWAP_SUCCESS - we succeeded in removing all mappings
1296 * SWAP_AGAIN - we missed a mapping, try again later
1297 * SWAP_FAIL - the page is unswappable
b291f000 1298 * SWAP_MLOCK - page is mlocked.
1da177e4 1299 */
14fa31b8 1300int try_to_unmap(struct page *page, enum ttu_flags flags)
1da177e4
LT
1301{
1302 int ret;
1303
1da177e4
LT
1304 BUG_ON(!PageLocked(page));
1305
5ad64688
HD
1306 if (unlikely(PageKsm(page)))
1307 ret = try_to_unmap_ksm(page, flags);
1308 else if (PageAnon(page))
14fa31b8 1309 ret = try_to_unmap_anon(page, flags);
1da177e4 1310 else
14fa31b8 1311 ret = try_to_unmap_file(page, flags);
b291f000 1312 if (ret != SWAP_MLOCK && !page_mapped(page))
1da177e4
LT
1313 ret = SWAP_SUCCESS;
1314 return ret;
1315}
81b4082d 1316
b291f000
NP
1317/**
1318 * try_to_munlock - try to munlock a page
1319 * @page: the page to be munlocked
1320 *
1321 * Called from munlock code. Checks all of the VMAs mapping the page
1322 * to make sure nobody else has this page mlocked. The page will be
1323 * returned with PG_mlocked cleared if no other vmas have it mlocked.
1324 *
1325 * Return values are:
1326 *
53f79acb 1327 * SWAP_AGAIN - no vma is holding page mlocked, or,
b291f000 1328 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
5ad64688 1329 * SWAP_FAIL - page cannot be located at present
b291f000
NP
1330 * SWAP_MLOCK - page is now mlocked.
1331 */
1332int try_to_munlock(struct page *page)
1333{
1334 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1335
5ad64688
HD
1336 if (unlikely(PageKsm(page)))
1337 return try_to_unmap_ksm(page, TTU_MUNLOCK);
1338 else if (PageAnon(page))
14fa31b8 1339 return try_to_unmap_anon(page, TTU_MUNLOCK);
b291f000 1340 else
14fa31b8 1341 return try_to_unmap_file(page, TTU_MUNLOCK);
b291f000 1342}
e9995ef9
HD
1343
1344#ifdef CONFIG_MIGRATION
1345/*
1346 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1347 * Called by migrate.c to remove migration ptes, but might be used more later.
1348 */
1349static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1350 struct vm_area_struct *, unsigned long, void *), void *arg)
1351{
1352 struct anon_vma *anon_vma;
5beb4930 1353 struct anon_vma_chain *avc;
e9995ef9
HD
1354 int ret = SWAP_AGAIN;
1355
1356 /*
1357 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1358 * because that depends on page_mapped(); but not all its usages
1359 * are holding mmap_sem, which also gave the necessary guarantee
1360 * (that this anon_vma's slab has not already been destroyed).
1361 * This needs to be reviewed later: avoiding page_lock_anon_vma()
1362 * is risky, and currently limits the usefulness of rmap_walk().
1363 */
1364 anon_vma = page_anon_vma(page);
1365 if (!anon_vma)
1366 return ret;
1367 spin_lock(&anon_vma->lock);
5beb4930
RR
1368 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1369 struct vm_area_struct *vma = avc->vma;
e9995ef9
HD
1370 unsigned long address = vma_address(page, vma);
1371 if (address == -EFAULT)
1372 continue;
1373 ret = rmap_one(page, vma, address, arg);
1374 if (ret != SWAP_AGAIN)
1375 break;
1376 }
1377 spin_unlock(&anon_vma->lock);
1378 return ret;
1379}
1380
1381static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1382 struct vm_area_struct *, unsigned long, void *), void *arg)
1383{
1384 struct address_space *mapping = page->mapping;
1385 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1386 struct vm_area_struct *vma;
1387 struct prio_tree_iter iter;
1388 int ret = SWAP_AGAIN;
1389
1390 if (!mapping)
1391 return ret;
1392 spin_lock(&mapping->i_mmap_lock);
1393 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1394 unsigned long address = vma_address(page, vma);
1395 if (address == -EFAULT)
1396 continue;
1397 ret = rmap_one(page, vma, address, arg);
1398 if (ret != SWAP_AGAIN)
1399 break;
1400 }
1401 /*
1402 * No nonlinear handling: being always shared, nonlinear vmas
1403 * never contain migration ptes. Decide what to do about this
1404 * limitation to linear when we need rmap_walk() on nonlinear.
1405 */
1406 spin_unlock(&mapping->i_mmap_lock);
1407 return ret;
1408}
1409
1410int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1411 struct vm_area_struct *, unsigned long, void *), void *arg)
1412{
1413 VM_BUG_ON(!PageLocked(page));
1414
1415 if (unlikely(PageKsm(page)))
1416 return rmap_walk_ksm(page, rmap_one, arg);
1417 else if (PageAnon(page))
1418 return rmap_walk_anon(page, rmap_one, arg);
1419 else
1420 return rmap_walk_file(page, rmap_one, arg);
1421}
1422#endif /* CONFIG_MIGRATION */