]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/rmap.c
SHM_LOCKED pages are unevictable
[mirror_ubuntu-bionic-kernel.git] / mm / rmap.c
CommitLineData
1da177e4
LT
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18 */
19
20/*
21 * Lock ordering in mm:
22 *
1b1dcc1b 23 * inode->i_mutex (while writing or truncating, not reading or faulting)
82591e6e
NP
24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
28 * anon_vma->lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
1da177e4
LT
39 */
40
41#include <linux/mm.h>
42#include <linux/pagemap.h>
43#include <linux/swap.h>
44#include <linux/swapops.h>
45#include <linux/slab.h>
46#include <linux/init.h>
47#include <linux/rmap.h>
48#include <linux/rcupdate.h>
a48d07af 49#include <linux/module.h>
7de6b805 50#include <linux/kallsyms.h>
8a9f3ccd 51#include <linux/memcontrol.h>
cddb8a5c 52#include <linux/mmu_notifier.h>
1da177e4
LT
53
54#include <asm/tlbflush.h>
55
fcc234f8 56struct kmem_cache *anon_vma_cachep;
1da177e4 57
d9d332e0
LT
58/**
59 * anon_vma_prepare - attach an anon_vma to a memory region
60 * @vma: the memory region in question
61 *
62 * This makes sure the memory mapping described by 'vma' has
63 * an 'anon_vma' attached to it, so that we can associate the
64 * anonymous pages mapped into it with that anon_vma.
65 *
66 * The common case will be that we already have one, but if
67 * if not we either need to find an adjacent mapping that we
68 * can re-use the anon_vma from (very common when the only
69 * reason for splitting a vma has been mprotect()), or we
70 * allocate a new one.
71 *
72 * Anon-vma allocations are very subtle, because we may have
73 * optimistically looked up an anon_vma in page_lock_anon_vma()
74 * and that may actually touch the spinlock even in the newly
75 * allocated vma (it depends on RCU to make sure that the
76 * anon_vma isn't actually destroyed).
77 *
78 * As a result, we need to do proper anon_vma locking even
79 * for the new allocation. At the same time, we do not want
80 * to do any locking for the common case of already having
81 * an anon_vma.
82 *
83 * This must be called with the mmap_sem held for reading.
84 */
1da177e4
LT
85int anon_vma_prepare(struct vm_area_struct *vma)
86{
87 struct anon_vma *anon_vma = vma->anon_vma;
88
89 might_sleep();
90 if (unlikely(!anon_vma)) {
91 struct mm_struct *mm = vma->vm_mm;
d9d332e0 92 struct anon_vma *allocated;
1da177e4
LT
93
94 anon_vma = find_mergeable_anon_vma(vma);
d9d332e0
LT
95 allocated = NULL;
96 if (!anon_vma) {
1da177e4
LT
97 anon_vma = anon_vma_alloc();
98 if (unlikely(!anon_vma))
99 return -ENOMEM;
100 allocated = anon_vma;
1da177e4 101 }
d9d332e0 102 spin_lock(&anon_vma->lock);
1da177e4
LT
103
104 /* page_table_lock to protect against threads */
105 spin_lock(&mm->page_table_lock);
106 if (likely(!vma->anon_vma)) {
107 vma->anon_vma = anon_vma;
0697212a 108 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
1da177e4
LT
109 allocated = NULL;
110 }
111 spin_unlock(&mm->page_table_lock);
112
d9d332e0 113 spin_unlock(&anon_vma->lock);
1da177e4
LT
114 if (unlikely(allocated))
115 anon_vma_free(allocated);
116 }
117 return 0;
118}
119
120void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
121{
122 BUG_ON(vma->anon_vma != next->anon_vma);
123 list_del(&next->anon_vma_node);
124}
125
126void __anon_vma_link(struct vm_area_struct *vma)
127{
128 struct anon_vma *anon_vma = vma->anon_vma;
129
30acbaba 130 if (anon_vma)
0697212a 131 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
1da177e4
LT
132}
133
134void anon_vma_link(struct vm_area_struct *vma)
135{
136 struct anon_vma *anon_vma = vma->anon_vma;
137
138 if (anon_vma) {
139 spin_lock(&anon_vma->lock);
0697212a 140 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
1da177e4
LT
141 spin_unlock(&anon_vma->lock);
142 }
143}
144
145void anon_vma_unlink(struct vm_area_struct *vma)
146{
147 struct anon_vma *anon_vma = vma->anon_vma;
148 int empty;
149
150 if (!anon_vma)
151 return;
152
153 spin_lock(&anon_vma->lock);
1da177e4
LT
154 list_del(&vma->anon_vma_node);
155
156 /* We must garbage collect the anon_vma if it's empty */
157 empty = list_empty(&anon_vma->head);
158 spin_unlock(&anon_vma->lock);
159
160 if (empty)
161 anon_vma_free(anon_vma);
162}
163
51cc5068 164static void anon_vma_ctor(void *data)
1da177e4 165{
a35afb83 166 struct anon_vma *anon_vma = data;
1da177e4 167
a35afb83
CL
168 spin_lock_init(&anon_vma->lock);
169 INIT_LIST_HEAD(&anon_vma->head);
1da177e4
LT
170}
171
172void __init anon_vma_init(void)
173{
174 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
20c2df83 175 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
1da177e4
LT
176}
177
178/*
179 * Getting a lock on a stable anon_vma from a page off the LRU is
180 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
181 */
182static struct anon_vma *page_lock_anon_vma(struct page *page)
183{
34bbd704 184 struct anon_vma *anon_vma;
1da177e4
LT
185 unsigned long anon_mapping;
186
187 rcu_read_lock();
188 anon_mapping = (unsigned long) page->mapping;
189 if (!(anon_mapping & PAGE_MAPPING_ANON))
190 goto out;
191 if (!page_mapped(page))
192 goto out;
193
194 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
195 spin_lock(&anon_vma->lock);
34bbd704 196 return anon_vma;
1da177e4
LT
197out:
198 rcu_read_unlock();
34bbd704
ON
199 return NULL;
200}
201
202static void page_unlock_anon_vma(struct anon_vma *anon_vma)
203{
204 spin_unlock(&anon_vma->lock);
205 rcu_read_unlock();
1da177e4
LT
206}
207
208/*
3ad33b24
LS
209 * At what user virtual address is page expected in @vma?
210 * Returns virtual address or -EFAULT if page's index/offset is not
211 * within the range mapped the @vma.
1da177e4
LT
212 */
213static inline unsigned long
214vma_address(struct page *page, struct vm_area_struct *vma)
215{
216 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
217 unsigned long address;
218
219 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
220 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
3ad33b24 221 /* page should be within @vma mapping range */
1da177e4
LT
222 return -EFAULT;
223 }
224 return address;
225}
226
227/*
228 * At what user virtual address is page expected in vma? checking that the
ee498ed7 229 * page matches the vma: currently only used on anon pages, by unuse_vma;
1da177e4
LT
230 */
231unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
232{
233 if (PageAnon(page)) {
234 if ((void *)vma->anon_vma !=
235 (void *)page->mapping - PAGE_MAPPING_ANON)
236 return -EFAULT;
237 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
ee498ed7
HD
238 if (!vma->vm_file ||
239 vma->vm_file->f_mapping != page->mapping)
1da177e4
LT
240 return -EFAULT;
241 } else
242 return -EFAULT;
243 return vma_address(page, vma);
244}
245
81b4082d
ND
246/*
247 * Check that @page is mapped at @address into @mm.
248 *
479db0bf
NP
249 * If @sync is false, page_check_address may perform a racy check to avoid
250 * the page table lock when the pte is not present (helpful when reclaiming
251 * highly shared pages).
252 *
b8072f09 253 * On success returns with pte mapped and locked.
81b4082d 254 */
ceffc078 255pte_t *page_check_address(struct page *page, struct mm_struct *mm,
479db0bf 256 unsigned long address, spinlock_t **ptlp, int sync)
81b4082d
ND
257{
258 pgd_t *pgd;
259 pud_t *pud;
260 pmd_t *pmd;
261 pte_t *pte;
c0718806 262 spinlock_t *ptl;
81b4082d 263
81b4082d 264 pgd = pgd_offset(mm, address);
c0718806
HD
265 if (!pgd_present(*pgd))
266 return NULL;
267
268 pud = pud_offset(pgd, address);
269 if (!pud_present(*pud))
270 return NULL;
271
272 pmd = pmd_offset(pud, address);
273 if (!pmd_present(*pmd))
274 return NULL;
275
276 pte = pte_offset_map(pmd, address);
277 /* Make a quick check before getting the lock */
479db0bf 278 if (!sync && !pte_present(*pte)) {
c0718806
HD
279 pte_unmap(pte);
280 return NULL;
281 }
282
4c21e2f2 283 ptl = pte_lockptr(mm, pmd);
c0718806
HD
284 spin_lock(ptl);
285 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
286 *ptlp = ptl;
287 return pte;
81b4082d 288 }
c0718806
HD
289 pte_unmap_unlock(pte, ptl);
290 return NULL;
81b4082d
ND
291}
292
1da177e4
LT
293/*
294 * Subfunctions of page_referenced: page_referenced_one called
295 * repeatedly from either page_referenced_anon or page_referenced_file.
296 */
297static int page_referenced_one(struct page *page,
f7b7fd8f 298 struct vm_area_struct *vma, unsigned int *mapcount)
1da177e4
LT
299{
300 struct mm_struct *mm = vma->vm_mm;
301 unsigned long address;
1da177e4 302 pte_t *pte;
c0718806 303 spinlock_t *ptl;
1da177e4
LT
304 int referenced = 0;
305
1da177e4
LT
306 address = vma_address(page, vma);
307 if (address == -EFAULT)
308 goto out;
309
479db0bf 310 pte = page_check_address(page, mm, address, &ptl, 0);
c0718806
HD
311 if (!pte)
312 goto out;
1da177e4 313
5a9bbdcd
HD
314 if (vma->vm_flags & VM_LOCKED) {
315 referenced++;
316 *mapcount = 1; /* break early from loop */
cddb8a5c 317 } else if (ptep_clear_flush_young_notify(vma, address, pte))
c0718806 318 referenced++;
1da177e4 319
c0718806
HD
320 /* Pretend the page is referenced if the task has the
321 swap token and is in the middle of a page fault. */
f7b7fd8f 322 if (mm != current->mm && has_swap_token(mm) &&
c0718806
HD
323 rwsem_is_locked(&mm->mmap_sem))
324 referenced++;
325
326 (*mapcount)--;
327 pte_unmap_unlock(pte, ptl);
1da177e4
LT
328out:
329 return referenced;
330}
331
bed7161a
BS
332static int page_referenced_anon(struct page *page,
333 struct mem_cgroup *mem_cont)
1da177e4
LT
334{
335 unsigned int mapcount;
336 struct anon_vma *anon_vma;
337 struct vm_area_struct *vma;
338 int referenced = 0;
339
340 anon_vma = page_lock_anon_vma(page);
341 if (!anon_vma)
342 return referenced;
343
344 mapcount = page_mapcount(page);
345 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
bed7161a
BS
346 /*
347 * If we are reclaiming on behalf of a cgroup, skip
348 * counting on behalf of references from different
349 * cgroups
350 */
bd845e38 351 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
bed7161a 352 continue;
f7b7fd8f 353 referenced += page_referenced_one(page, vma, &mapcount);
1da177e4
LT
354 if (!mapcount)
355 break;
356 }
34bbd704
ON
357
358 page_unlock_anon_vma(anon_vma);
1da177e4
LT
359 return referenced;
360}
361
362/**
363 * page_referenced_file - referenced check for object-based rmap
364 * @page: the page we're checking references on.
43d8eac4 365 * @mem_cont: target memory controller
1da177e4
LT
366 *
367 * For an object-based mapped page, find all the places it is mapped and
368 * check/clear the referenced flag. This is done by following the page->mapping
369 * pointer, then walking the chain of vmas it holds. It returns the number
370 * of references it found.
371 *
372 * This function is only called from page_referenced for object-based pages.
373 */
bed7161a
BS
374static int page_referenced_file(struct page *page,
375 struct mem_cgroup *mem_cont)
1da177e4
LT
376{
377 unsigned int mapcount;
378 struct address_space *mapping = page->mapping;
379 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
380 struct vm_area_struct *vma;
381 struct prio_tree_iter iter;
382 int referenced = 0;
383
384 /*
385 * The caller's checks on page->mapping and !PageAnon have made
386 * sure that this is a file page: the check for page->mapping
387 * excludes the case just before it gets set on an anon page.
388 */
389 BUG_ON(PageAnon(page));
390
391 /*
392 * The page lock not only makes sure that page->mapping cannot
393 * suddenly be NULLified by truncation, it makes sure that the
394 * structure at mapping cannot be freed and reused yet,
395 * so we can safely take mapping->i_mmap_lock.
396 */
397 BUG_ON(!PageLocked(page));
398
399 spin_lock(&mapping->i_mmap_lock);
400
401 /*
402 * i_mmap_lock does not stabilize mapcount at all, but mapcount
403 * is more likely to be accurate if we note it after spinning.
404 */
405 mapcount = page_mapcount(page);
406
407 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
bed7161a
BS
408 /*
409 * If we are reclaiming on behalf of a cgroup, skip
410 * counting on behalf of references from different
411 * cgroups
412 */
bd845e38 413 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
bed7161a 414 continue;
1da177e4
LT
415 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
416 == (VM_LOCKED|VM_MAYSHARE)) {
417 referenced++;
418 break;
419 }
f7b7fd8f 420 referenced += page_referenced_one(page, vma, &mapcount);
1da177e4
LT
421 if (!mapcount)
422 break;
423 }
424
425 spin_unlock(&mapping->i_mmap_lock);
426 return referenced;
427}
428
429/**
430 * page_referenced - test if the page was referenced
431 * @page: the page to test
432 * @is_locked: caller holds lock on the page
43d8eac4 433 * @mem_cont: target memory controller
1da177e4
LT
434 *
435 * Quick test_and_clear_referenced for all mappings to a page,
436 * returns the number of ptes which referenced the page.
437 */
bed7161a
BS
438int page_referenced(struct page *page, int is_locked,
439 struct mem_cgroup *mem_cont)
1da177e4
LT
440{
441 int referenced = 0;
442
1da177e4
LT
443 if (TestClearPageReferenced(page))
444 referenced++;
445
446 if (page_mapped(page) && page->mapping) {
447 if (PageAnon(page))
bed7161a 448 referenced += page_referenced_anon(page, mem_cont);
1da177e4 449 else if (is_locked)
bed7161a 450 referenced += page_referenced_file(page, mem_cont);
529ae9aa 451 else if (!trylock_page(page))
1da177e4
LT
452 referenced++;
453 else {
454 if (page->mapping)
bed7161a
BS
455 referenced +=
456 page_referenced_file(page, mem_cont);
1da177e4
LT
457 unlock_page(page);
458 }
459 }
5b7baf05
CB
460
461 if (page_test_and_clear_young(page))
462 referenced++;
463
1da177e4
LT
464 return referenced;
465}
466
d08b3851
PZ
467static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
468{
469 struct mm_struct *mm = vma->vm_mm;
470 unsigned long address;
c2fda5fe 471 pte_t *pte;
d08b3851
PZ
472 spinlock_t *ptl;
473 int ret = 0;
474
475 address = vma_address(page, vma);
476 if (address == -EFAULT)
477 goto out;
478
479db0bf 479 pte = page_check_address(page, mm, address, &ptl, 1);
d08b3851
PZ
480 if (!pte)
481 goto out;
482
c2fda5fe
PZ
483 if (pte_dirty(*pte) || pte_write(*pte)) {
484 pte_t entry;
d08b3851 485
c2fda5fe 486 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 487 entry = ptep_clear_flush_notify(vma, address, pte);
c2fda5fe
PZ
488 entry = pte_wrprotect(entry);
489 entry = pte_mkclean(entry);
d6e88e67 490 set_pte_at(mm, address, pte, entry);
c2fda5fe
PZ
491 ret = 1;
492 }
d08b3851 493
d08b3851
PZ
494 pte_unmap_unlock(pte, ptl);
495out:
496 return ret;
497}
498
499static int page_mkclean_file(struct address_space *mapping, struct page *page)
500{
501 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
502 struct vm_area_struct *vma;
503 struct prio_tree_iter iter;
504 int ret = 0;
505
506 BUG_ON(PageAnon(page));
507
508 spin_lock(&mapping->i_mmap_lock);
509 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
510 if (vma->vm_flags & VM_SHARED)
511 ret += page_mkclean_one(page, vma);
512 }
513 spin_unlock(&mapping->i_mmap_lock);
514 return ret;
515}
516
517int page_mkclean(struct page *page)
518{
519 int ret = 0;
520
521 BUG_ON(!PageLocked(page));
522
523 if (page_mapped(page)) {
524 struct address_space *mapping = page_mapping(page);
ce7e9fae 525 if (mapping) {
d08b3851 526 ret = page_mkclean_file(mapping, page);
ce7e9fae
CB
527 if (page_test_dirty(page)) {
528 page_clear_dirty(page);
529 ret = 1;
530 }
6c210482 531 }
d08b3851
PZ
532 }
533
534 return ret;
535}
60b59bea 536EXPORT_SYMBOL_GPL(page_mkclean);
d08b3851 537
9617d95e 538/**
43d8eac4 539 * __page_set_anon_rmap - setup new anonymous rmap
9617d95e
NP
540 * @page: the page to add the mapping to
541 * @vma: the vm area in which the mapping is added
542 * @address: the user virtual address mapped
543 */
544static void __page_set_anon_rmap(struct page *page,
545 struct vm_area_struct *vma, unsigned long address)
546{
547 struct anon_vma *anon_vma = vma->anon_vma;
548
549 BUG_ON(!anon_vma);
550 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
551 page->mapping = (struct address_space *) anon_vma;
552
553 page->index = linear_page_index(vma, address);
554
a74609fa
NP
555 /*
556 * nr_mapped state can be updated without turning off
557 * interrupts because it is not modified via interrupt.
558 */
f3dbd344 559 __inc_zone_page_state(page, NR_ANON_PAGES);
9617d95e
NP
560}
561
c97a9e10 562/**
43d8eac4 563 * __page_check_anon_rmap - sanity check anonymous rmap addition
c97a9e10
NP
564 * @page: the page to add the mapping to
565 * @vma: the vm area in which the mapping is added
566 * @address: the user virtual address mapped
567 */
568static void __page_check_anon_rmap(struct page *page,
569 struct vm_area_struct *vma, unsigned long address)
570{
571#ifdef CONFIG_DEBUG_VM
572 /*
573 * The page's anon-rmap details (mapping and index) are guaranteed to
574 * be set up correctly at this point.
575 *
576 * We have exclusion against page_add_anon_rmap because the caller
577 * always holds the page locked, except if called from page_dup_rmap,
578 * in which case the page is already known to be setup.
579 *
580 * We have exclusion against page_add_new_anon_rmap because those pages
581 * are initially only visible via the pagetables, and the pte is locked
582 * over the call to page_add_new_anon_rmap.
583 */
584 struct anon_vma *anon_vma = vma->anon_vma;
585 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
586 BUG_ON(page->mapping != (struct address_space *)anon_vma);
587 BUG_ON(page->index != linear_page_index(vma, address));
588#endif
589}
590
1da177e4
LT
591/**
592 * page_add_anon_rmap - add pte mapping to an anonymous page
593 * @page: the page to add the mapping to
594 * @vma: the vm area in which the mapping is added
595 * @address: the user virtual address mapped
596 *
c97a9e10 597 * The caller needs to hold the pte lock and the page must be locked.
1da177e4
LT
598 */
599void page_add_anon_rmap(struct page *page,
600 struct vm_area_struct *vma, unsigned long address)
601{
c97a9e10
NP
602 VM_BUG_ON(!PageLocked(page));
603 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
9617d95e
NP
604 if (atomic_inc_and_test(&page->_mapcount))
605 __page_set_anon_rmap(page, vma, address);
69029cd5 606 else
c97a9e10 607 __page_check_anon_rmap(page, vma, address);
1da177e4
LT
608}
609
43d8eac4 610/**
9617d95e
NP
611 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
612 * @page: the page to add the mapping to
613 * @vma: the vm area in which the mapping is added
614 * @address: the user virtual address mapped
615 *
616 * Same as page_add_anon_rmap but must only be called on *new* pages.
617 * This means the inc-and-test can be bypassed.
c97a9e10 618 * Page does not have to be locked.
9617d95e
NP
619 */
620void page_add_new_anon_rmap(struct page *page,
621 struct vm_area_struct *vma, unsigned long address)
622{
c97a9e10 623 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
9617d95e
NP
624 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
625 __page_set_anon_rmap(page, vma, address);
626}
627
1da177e4
LT
628/**
629 * page_add_file_rmap - add pte mapping to a file page
630 * @page: the page to add the mapping to
631 *
b8072f09 632 * The caller needs to hold the pte lock.
1da177e4
LT
633 */
634void page_add_file_rmap(struct page *page)
635{
1da177e4 636 if (atomic_inc_and_test(&page->_mapcount))
65ba55f5 637 __inc_zone_page_state(page, NR_FILE_MAPPED);
1da177e4
LT
638}
639
c97a9e10
NP
640#ifdef CONFIG_DEBUG_VM
641/**
642 * page_dup_rmap - duplicate pte mapping to a page
643 * @page: the page to add the mapping to
43d8eac4
RD
644 * @vma: the vm area being duplicated
645 * @address: the user virtual address mapped
c97a9e10
NP
646 *
647 * For copy_page_range only: minimal extract from page_add_file_rmap /
648 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
649 * quicker.
650 *
651 * The caller needs to hold the pte lock.
652 */
653void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
654{
655 BUG_ON(page_mapcount(page) == 0);
656 if (PageAnon(page))
657 __page_check_anon_rmap(page, vma, address);
658 atomic_inc(&page->_mapcount);
659}
660#endif
661
1da177e4
LT
662/**
663 * page_remove_rmap - take down pte mapping from a page
664 * @page: page to remove mapping from
43d8eac4 665 * @vma: the vm area in which the mapping is removed
1da177e4 666 *
b8072f09 667 * The caller needs to hold the pte lock.
1da177e4 668 */
7de6b805 669void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
1da177e4 670{
1da177e4 671 if (atomic_add_negative(-1, &page->_mapcount)) {
b7ab795b 672 if (unlikely(page_mapcount(page) < 0)) {
ef2bf0dc 673 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
7de6b805 674 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page));
ef2bf0dc
DJ
675 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
676 printk (KERN_EMERG " page->count = %x\n", page_count(page));
677 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
7de6b805 678 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
54cb8821 679 if (vma->vm_ops) {
54cb8821
NP
680 print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
681 }
7de6b805
NP
682 if (vma->vm_file && vma->vm_file->f_op)
683 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
b16bc64d 684 BUG();
ef2bf0dc 685 }
b16bc64d 686
1da177e4 687 /*
16f8c5b2
HD
688 * Now that the last pte has gone, s390 must transfer dirty
689 * flag from storage key to struct page. We can usually skip
690 * this if the page is anon, so about to be freed; but perhaps
691 * not if it's in swapcache - there might be another pte slot
692 * containing the swap entry, but page not yet written to swap.
1da177e4 693 */
a4b526b3
MS
694 if ((!PageAnon(page) || PageSwapCache(page)) &&
695 page_test_dirty(page)) {
6c210482 696 page_clear_dirty(page);
1da177e4 697 set_page_dirty(page);
6c210482 698 }
8a9f3ccd 699
16f8c5b2 700 mem_cgroup_uncharge_page(page);
f3dbd344 701 __dec_zone_page_state(page,
16f8c5b2
HD
702 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
703 /*
704 * It would be tidy to reset the PageAnon mapping here,
705 * but that might overwrite a racing page_add_anon_rmap
706 * which increments mapcount after us but sets mapping
707 * before us: so leave the reset to free_hot_cold_page,
708 * and remember that it's only reliable while mapped.
709 * Leaving it set also helps swapoff to reinstate ptes
710 * faster for those pages still in swapcache.
711 */
1da177e4
LT
712 }
713}
714
715/*
716 * Subfunctions of try_to_unmap: try_to_unmap_one called
717 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
718 */
a48d07af 719static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
7352349a 720 int migration)
1da177e4
LT
721{
722 struct mm_struct *mm = vma->vm_mm;
723 unsigned long address;
1da177e4
LT
724 pte_t *pte;
725 pte_t pteval;
c0718806 726 spinlock_t *ptl;
1da177e4
LT
727 int ret = SWAP_AGAIN;
728
1da177e4
LT
729 address = vma_address(page, vma);
730 if (address == -EFAULT)
731 goto out;
732
479db0bf 733 pte = page_check_address(page, mm, address, &ptl, 0);
c0718806 734 if (!pte)
81b4082d 735 goto out;
1da177e4
LT
736
737 /*
738 * If the page is mlock()d, we cannot swap it out.
739 * If it's recently referenced (perhaps page_referenced
740 * skipped over this mm) then we should reactivate it.
741 */
e6a1530d 742 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
cddb8a5c 743 (ptep_clear_flush_young_notify(vma, address, pte)))) {
1da177e4
LT
744 ret = SWAP_FAIL;
745 goto out_unmap;
746 }
747
1da177e4
LT
748 /* Nuke the page table entry. */
749 flush_cache_page(vma, address, page_to_pfn(page));
cddb8a5c 750 pteval = ptep_clear_flush_notify(vma, address, pte);
1da177e4
LT
751
752 /* Move the dirty bit to the physical page now the pte is gone. */
753 if (pte_dirty(pteval))
754 set_page_dirty(page);
755
365e9c87
HD
756 /* Update high watermark before we lower rss */
757 update_hiwater_rss(mm);
758
1da177e4 759 if (PageAnon(page)) {
4c21e2f2 760 swp_entry_t entry = { .val = page_private(page) };
0697212a
CL
761
762 if (PageSwapCache(page)) {
763 /*
764 * Store the swap location in the pte.
765 * See handle_pte_fault() ...
766 */
767 swap_duplicate(entry);
768 if (list_empty(&mm->mmlist)) {
769 spin_lock(&mmlist_lock);
770 if (list_empty(&mm->mmlist))
771 list_add(&mm->mmlist, &init_mm.mmlist);
772 spin_unlock(&mmlist_lock);
773 }
442c9137 774 dec_mm_counter(mm, anon_rss);
04e62a29 775#ifdef CONFIG_MIGRATION
0697212a
CL
776 } else {
777 /*
778 * Store the pfn of the page in a special migration
779 * pte. do_swap_page() will wait until the migration
780 * pte is removed and then restart fault handling.
781 */
782 BUG_ON(!migration);
783 entry = make_migration_entry(page, pte_write(pteval));
04e62a29 784#endif
1da177e4
LT
785 }
786 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
787 BUG_ON(pte_file(*pte));
4294621f 788 } else
04e62a29
CL
789#ifdef CONFIG_MIGRATION
790 if (migration) {
791 /* Establish migration entry for a file page */
792 swp_entry_t entry;
793 entry = make_migration_entry(page, pte_write(pteval));
794 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
795 } else
796#endif
4294621f 797 dec_mm_counter(mm, file_rss);
1da177e4 798
04e62a29 799
7de6b805 800 page_remove_rmap(page, vma);
1da177e4
LT
801 page_cache_release(page);
802
803out_unmap:
c0718806 804 pte_unmap_unlock(pte, ptl);
1da177e4
LT
805out:
806 return ret;
807}
808
809/*
810 * objrmap doesn't work for nonlinear VMAs because the assumption that
811 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
812 * Consequently, given a particular page and its ->index, we cannot locate the
813 * ptes which are mapping that page without an exhaustive linear search.
814 *
815 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
816 * maps the file to which the target page belongs. The ->vm_private_data field
817 * holds the current cursor into that scan. Successive searches will circulate
818 * around the vma's virtual address space.
819 *
820 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
821 * more scanning pressure is placed against them as well. Eventually pages
822 * will become fully unmapped and are eligible for eviction.
823 *
824 * For very sparsely populated VMAs this is a little inefficient - chances are
825 * there there won't be many ptes located within the scan cluster. In this case
826 * maybe we could scan further - to the end of the pte page, perhaps.
827 */
828#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
829#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
830
831static void try_to_unmap_cluster(unsigned long cursor,
832 unsigned int *mapcount, struct vm_area_struct *vma)
833{
834 struct mm_struct *mm = vma->vm_mm;
835 pgd_t *pgd;
836 pud_t *pud;
837 pmd_t *pmd;
c0718806 838 pte_t *pte;
1da177e4 839 pte_t pteval;
c0718806 840 spinlock_t *ptl;
1da177e4
LT
841 struct page *page;
842 unsigned long address;
843 unsigned long end;
1da177e4 844
1da177e4
LT
845 address = (vma->vm_start + cursor) & CLUSTER_MASK;
846 end = address + CLUSTER_SIZE;
847 if (address < vma->vm_start)
848 address = vma->vm_start;
849 if (end > vma->vm_end)
850 end = vma->vm_end;
851
852 pgd = pgd_offset(mm, address);
853 if (!pgd_present(*pgd))
c0718806 854 return;
1da177e4
LT
855
856 pud = pud_offset(pgd, address);
857 if (!pud_present(*pud))
c0718806 858 return;
1da177e4
LT
859
860 pmd = pmd_offset(pud, address);
861 if (!pmd_present(*pmd))
c0718806
HD
862 return;
863
864 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4 865
365e9c87
HD
866 /* Update high watermark before we lower rss */
867 update_hiwater_rss(mm);
868
c0718806 869 for (; address < end; pte++, address += PAGE_SIZE) {
1da177e4
LT
870 if (!pte_present(*pte))
871 continue;
6aab341e
LT
872 page = vm_normal_page(vma, address, *pte);
873 BUG_ON(!page || PageAnon(page));
1da177e4 874
cddb8a5c 875 if (ptep_clear_flush_young_notify(vma, address, pte))
1da177e4
LT
876 continue;
877
878 /* Nuke the page table entry. */
eca35133 879 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 880 pteval = ptep_clear_flush_notify(vma, address, pte);
1da177e4
LT
881
882 /* If nonlinear, store the file page offset in the pte. */
883 if (page->index != linear_page_index(vma, address))
884 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
885
886 /* Move the dirty bit to the physical page now the pte is gone. */
887 if (pte_dirty(pteval))
888 set_page_dirty(page);
889
7de6b805 890 page_remove_rmap(page, vma);
1da177e4 891 page_cache_release(page);
4294621f 892 dec_mm_counter(mm, file_rss);
1da177e4
LT
893 (*mapcount)--;
894 }
c0718806 895 pte_unmap_unlock(pte - 1, ptl);
1da177e4
LT
896}
897
7352349a 898static int try_to_unmap_anon(struct page *page, int migration)
1da177e4
LT
899{
900 struct anon_vma *anon_vma;
901 struct vm_area_struct *vma;
902 int ret = SWAP_AGAIN;
903
904 anon_vma = page_lock_anon_vma(page);
905 if (!anon_vma)
906 return ret;
907
908 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
7352349a 909 ret = try_to_unmap_one(page, vma, migration);
1da177e4
LT
910 if (ret == SWAP_FAIL || !page_mapped(page))
911 break;
912 }
34bbd704
ON
913
914 page_unlock_anon_vma(anon_vma);
1da177e4
LT
915 return ret;
916}
917
918/**
919 * try_to_unmap_file - unmap file page using the object-based rmap method
920 * @page: the page to unmap
43d8eac4 921 * @migration: migration flag
1da177e4
LT
922 *
923 * Find all the mappings of a page using the mapping pointer and the vma chains
924 * contained in the address_space struct it points to.
925 *
926 * This function is only called from try_to_unmap for object-based pages.
927 */
7352349a 928static int try_to_unmap_file(struct page *page, int migration)
1da177e4
LT
929{
930 struct address_space *mapping = page->mapping;
931 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
932 struct vm_area_struct *vma;
933 struct prio_tree_iter iter;
934 int ret = SWAP_AGAIN;
935 unsigned long cursor;
936 unsigned long max_nl_cursor = 0;
937 unsigned long max_nl_size = 0;
938 unsigned int mapcount;
939
940 spin_lock(&mapping->i_mmap_lock);
941 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
7352349a 942 ret = try_to_unmap_one(page, vma, migration);
1da177e4
LT
943 if (ret == SWAP_FAIL || !page_mapped(page))
944 goto out;
945 }
946
947 if (list_empty(&mapping->i_mmap_nonlinear))
948 goto out;
949
950 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
951 shared.vm_set.list) {
e6a1530d 952 if ((vma->vm_flags & VM_LOCKED) && !migration)
1da177e4
LT
953 continue;
954 cursor = (unsigned long) vma->vm_private_data;
955 if (cursor > max_nl_cursor)
956 max_nl_cursor = cursor;
957 cursor = vma->vm_end - vma->vm_start;
958 if (cursor > max_nl_size)
959 max_nl_size = cursor;
960 }
961
962 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
963 ret = SWAP_FAIL;
964 goto out;
965 }
966
967 /*
968 * We don't try to search for this page in the nonlinear vmas,
969 * and page_referenced wouldn't have found it anyway. Instead
970 * just walk the nonlinear vmas trying to age and unmap some.
971 * The mapcount of the page we came in with is irrelevant,
972 * but even so use it as a guide to how hard we should try?
973 */
974 mapcount = page_mapcount(page);
975 if (!mapcount)
976 goto out;
977 cond_resched_lock(&mapping->i_mmap_lock);
978
979 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
980 if (max_nl_cursor == 0)
981 max_nl_cursor = CLUSTER_SIZE;
982
983 do {
984 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
985 shared.vm_set.list) {
e6a1530d 986 if ((vma->vm_flags & VM_LOCKED) && !migration)
1da177e4
LT
987 continue;
988 cursor = (unsigned long) vma->vm_private_data;
839b9685 989 while ( cursor < max_nl_cursor &&
1da177e4
LT
990 cursor < vma->vm_end - vma->vm_start) {
991 try_to_unmap_cluster(cursor, &mapcount, vma);
992 cursor += CLUSTER_SIZE;
993 vma->vm_private_data = (void *) cursor;
994 if ((int)mapcount <= 0)
995 goto out;
996 }
997 vma->vm_private_data = (void *) max_nl_cursor;
998 }
999 cond_resched_lock(&mapping->i_mmap_lock);
1000 max_nl_cursor += CLUSTER_SIZE;
1001 } while (max_nl_cursor <= max_nl_size);
1002
1003 /*
1004 * Don't loop forever (perhaps all the remaining pages are
1005 * in locked vmas). Reset cursor on all unreserved nonlinear
1006 * vmas, now forgetting on which ones it had fallen behind.
1007 */
101d2be7
HD
1008 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1009 vma->vm_private_data = NULL;
1da177e4
LT
1010out:
1011 spin_unlock(&mapping->i_mmap_lock);
1012 return ret;
1013}
1014
1015/**
1016 * try_to_unmap - try to remove all page table mappings to a page
1017 * @page: the page to get unmapped
43d8eac4 1018 * @migration: migration flag
1da177e4
LT
1019 *
1020 * Tries to remove all the page table entries which are mapping this
1021 * page, used in the pageout path. Caller must hold the page lock.
1022 * Return values are:
1023 *
1024 * SWAP_SUCCESS - we succeeded in removing all mappings
1025 * SWAP_AGAIN - we missed a mapping, try again later
1026 * SWAP_FAIL - the page is unswappable
1027 */
7352349a 1028int try_to_unmap(struct page *page, int migration)
1da177e4
LT
1029{
1030 int ret;
1031
1da177e4
LT
1032 BUG_ON(!PageLocked(page));
1033
1034 if (PageAnon(page))
7352349a 1035 ret = try_to_unmap_anon(page, migration);
1da177e4 1036 else
7352349a 1037 ret = try_to_unmap_file(page, migration);
1da177e4
LT
1038
1039 if (!page_mapped(page))
1040 ret = SWAP_SUCCESS;
1041 return ret;
1042}
81b4082d 1043