]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/rmap.c
[PATCH] mm: kevent threads: use MPOL_DEFAULT
[mirror_ubuntu-bionic-kernel.git] / mm / rmap.c
CommitLineData
1da177e4
LT
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18 */
19
20/*
21 * Lock ordering in mm:
22 *
1b1dcc1b 23 * inode->i_mutex (while writing or truncating, not reading or faulting)
1da177e4
LT
24 * inode->i_alloc_sem
25 *
26 * When a page fault occurs in writing from user to file, down_read
1b1dcc1b
JS
27 * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
28 * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
29 * taken together; in truncation, i_mutex is taken outermost.
1da177e4
LT
30 *
31 * mm->mmap_sem
32 * page->flags PG_locked (lock_page)
33 * mapping->i_mmap_lock
34 * anon_vma->lock
b8072f09 35 * mm->page_table_lock or pte_lock
053837fc 36 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
5d337b91 37 * swap_lock (in swap_duplicate, swap_info_get)
1da177e4 38 * mmlist_lock (in mmput, drain_mmlist and others)
1da177e4
LT
39 * mapping->private_lock (in __set_page_dirty_buffers)
40 * inode_lock (in set_page_dirty's __mark_inode_dirty)
41 * sb_lock (within inode_lock in fs/fs-writeback.c)
42 * mapping->tree_lock (widely used, in set_page_dirty,
43 * in arch-dependent flush_dcache_mmap_lock,
44 * within inode_lock in __sync_single_inode)
45 */
46
47#include <linux/mm.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/swapops.h>
51#include <linux/slab.h>
52#include <linux/init.h>
53#include <linux/rmap.h>
54#include <linux/rcupdate.h>
a48d07af 55#include <linux/module.h>
1da177e4
LT
56
57#include <asm/tlbflush.h>
58
fcc234f8 59struct kmem_cache *anon_vma_cachep;
1da177e4
LT
60
61static inline void validate_anon_vma(struct vm_area_struct *find_vma)
62{
b7ab795b 63#ifdef CONFIG_DEBUG_VM
1da177e4
LT
64 struct anon_vma *anon_vma = find_vma->anon_vma;
65 struct vm_area_struct *vma;
66 unsigned int mapcount = 0;
67 int found = 0;
68
69 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
70 mapcount++;
71 BUG_ON(mapcount > 100000);
72 if (vma == find_vma)
73 found = 1;
74 }
75 BUG_ON(!found);
76#endif
77}
78
79/* This must be called under the mmap_sem. */
80int anon_vma_prepare(struct vm_area_struct *vma)
81{
82 struct anon_vma *anon_vma = vma->anon_vma;
83
84 might_sleep();
85 if (unlikely(!anon_vma)) {
86 struct mm_struct *mm = vma->vm_mm;
87 struct anon_vma *allocated, *locked;
88
89 anon_vma = find_mergeable_anon_vma(vma);
90 if (anon_vma) {
91 allocated = NULL;
92 locked = anon_vma;
93 spin_lock(&locked->lock);
94 } else {
95 anon_vma = anon_vma_alloc();
96 if (unlikely(!anon_vma))
97 return -ENOMEM;
98 allocated = anon_vma;
99 locked = NULL;
100 }
101
102 /* page_table_lock to protect against threads */
103 spin_lock(&mm->page_table_lock);
104 if (likely(!vma->anon_vma)) {
105 vma->anon_vma = anon_vma;
0697212a 106 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
1da177e4
LT
107 allocated = NULL;
108 }
109 spin_unlock(&mm->page_table_lock);
110
111 if (locked)
112 spin_unlock(&locked->lock);
113 if (unlikely(allocated))
114 anon_vma_free(allocated);
115 }
116 return 0;
117}
118
119void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
120{
121 BUG_ON(vma->anon_vma != next->anon_vma);
122 list_del(&next->anon_vma_node);
123}
124
125void __anon_vma_link(struct vm_area_struct *vma)
126{
127 struct anon_vma *anon_vma = vma->anon_vma;
128
129 if (anon_vma) {
0697212a 130 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
1da177e4
LT
131 validate_anon_vma(vma);
132 }
133}
134
135void anon_vma_link(struct vm_area_struct *vma)
136{
137 struct anon_vma *anon_vma = vma->anon_vma;
138
139 if (anon_vma) {
140 spin_lock(&anon_vma->lock);
0697212a 141 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
1da177e4
LT
142 validate_anon_vma(vma);
143 spin_unlock(&anon_vma->lock);
144 }
145}
146
147void anon_vma_unlink(struct vm_area_struct *vma)
148{
149 struct anon_vma *anon_vma = vma->anon_vma;
150 int empty;
151
152 if (!anon_vma)
153 return;
154
155 spin_lock(&anon_vma->lock);
156 validate_anon_vma(vma);
157 list_del(&vma->anon_vma_node);
158
159 /* We must garbage collect the anon_vma if it's empty */
160 empty = list_empty(&anon_vma->head);
161 spin_unlock(&anon_vma->lock);
162
163 if (empty)
164 anon_vma_free(anon_vma);
165}
166
fcc234f8
PE
167static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
168 unsigned long flags)
1da177e4
LT
169{
170 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
171 SLAB_CTOR_CONSTRUCTOR) {
172 struct anon_vma *anon_vma = data;
173
174 spin_lock_init(&anon_vma->lock);
175 INIT_LIST_HEAD(&anon_vma->head);
176 }
177}
178
179void __init anon_vma_init(void)
180{
181 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
182 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
183}
184
185/*
186 * Getting a lock on a stable anon_vma from a page off the LRU is
187 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
188 */
189static struct anon_vma *page_lock_anon_vma(struct page *page)
190{
191 struct anon_vma *anon_vma = NULL;
192 unsigned long anon_mapping;
193
194 rcu_read_lock();
195 anon_mapping = (unsigned long) page->mapping;
196 if (!(anon_mapping & PAGE_MAPPING_ANON))
197 goto out;
198 if (!page_mapped(page))
199 goto out;
200
201 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
202 spin_lock(&anon_vma->lock);
203out:
204 rcu_read_unlock();
205 return anon_vma;
206}
207
208/*
209 * At what user virtual address is page expected in vma?
210 */
211static inline unsigned long
212vma_address(struct page *page, struct vm_area_struct *vma)
213{
214 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
215 unsigned long address;
216
217 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
218 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
219 /* page should be within any vma from prio_tree_next */
220 BUG_ON(!PageAnon(page));
221 return -EFAULT;
222 }
223 return address;
224}
225
226/*
227 * At what user virtual address is page expected in vma? checking that the
ee498ed7 228 * page matches the vma: currently only used on anon pages, by unuse_vma;
1da177e4
LT
229 */
230unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
231{
232 if (PageAnon(page)) {
233 if ((void *)vma->anon_vma !=
234 (void *)page->mapping - PAGE_MAPPING_ANON)
235 return -EFAULT;
236 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
ee498ed7
HD
237 if (!vma->vm_file ||
238 vma->vm_file->f_mapping != page->mapping)
1da177e4
LT
239 return -EFAULT;
240 } else
241 return -EFAULT;
242 return vma_address(page, vma);
243}
244
81b4082d
ND
245/*
246 * Check that @page is mapped at @address into @mm.
247 *
b8072f09 248 * On success returns with pte mapped and locked.
81b4082d 249 */
ceffc078 250pte_t *page_check_address(struct page *page, struct mm_struct *mm,
c0718806 251 unsigned long address, spinlock_t **ptlp)
81b4082d
ND
252{
253 pgd_t *pgd;
254 pud_t *pud;
255 pmd_t *pmd;
256 pte_t *pte;
c0718806 257 spinlock_t *ptl;
81b4082d 258
81b4082d 259 pgd = pgd_offset(mm, address);
c0718806
HD
260 if (!pgd_present(*pgd))
261 return NULL;
262
263 pud = pud_offset(pgd, address);
264 if (!pud_present(*pud))
265 return NULL;
266
267 pmd = pmd_offset(pud, address);
268 if (!pmd_present(*pmd))
269 return NULL;
270
271 pte = pte_offset_map(pmd, address);
272 /* Make a quick check before getting the lock */
273 if (!pte_present(*pte)) {
274 pte_unmap(pte);
275 return NULL;
276 }
277
4c21e2f2 278 ptl = pte_lockptr(mm, pmd);
c0718806
HD
279 spin_lock(ptl);
280 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
281 *ptlp = ptl;
282 return pte;
81b4082d 283 }
c0718806
HD
284 pte_unmap_unlock(pte, ptl);
285 return NULL;
81b4082d
ND
286}
287
1da177e4
LT
288/*
289 * Subfunctions of page_referenced: page_referenced_one called
290 * repeatedly from either page_referenced_anon or page_referenced_file.
291 */
292static int page_referenced_one(struct page *page,
f7b7fd8f 293 struct vm_area_struct *vma, unsigned int *mapcount)
1da177e4
LT
294{
295 struct mm_struct *mm = vma->vm_mm;
296 unsigned long address;
1da177e4 297 pte_t *pte;
c0718806 298 spinlock_t *ptl;
1da177e4
LT
299 int referenced = 0;
300
1da177e4
LT
301 address = vma_address(page, vma);
302 if (address == -EFAULT)
303 goto out;
304
c0718806
HD
305 pte = page_check_address(page, mm, address, &ptl);
306 if (!pte)
307 goto out;
1da177e4 308
c0718806
HD
309 if (ptep_clear_flush_young(vma, address, pte))
310 referenced++;
1da177e4 311
c0718806
HD
312 /* Pretend the page is referenced if the task has the
313 swap token and is in the middle of a page fault. */
f7b7fd8f 314 if (mm != current->mm && has_swap_token(mm) &&
c0718806
HD
315 rwsem_is_locked(&mm->mmap_sem))
316 referenced++;
317
318 (*mapcount)--;
319 pte_unmap_unlock(pte, ptl);
1da177e4
LT
320out:
321 return referenced;
322}
323
f7b7fd8f 324static int page_referenced_anon(struct page *page)
1da177e4
LT
325{
326 unsigned int mapcount;
327 struct anon_vma *anon_vma;
328 struct vm_area_struct *vma;
329 int referenced = 0;
330
331 anon_vma = page_lock_anon_vma(page);
332 if (!anon_vma)
333 return referenced;
334
335 mapcount = page_mapcount(page);
336 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
f7b7fd8f 337 referenced += page_referenced_one(page, vma, &mapcount);
1da177e4
LT
338 if (!mapcount)
339 break;
340 }
341 spin_unlock(&anon_vma->lock);
342 return referenced;
343}
344
345/**
346 * page_referenced_file - referenced check for object-based rmap
347 * @page: the page we're checking references on.
348 *
349 * For an object-based mapped page, find all the places it is mapped and
350 * check/clear the referenced flag. This is done by following the page->mapping
351 * pointer, then walking the chain of vmas it holds. It returns the number
352 * of references it found.
353 *
354 * This function is only called from page_referenced for object-based pages.
355 */
f7b7fd8f 356static int page_referenced_file(struct page *page)
1da177e4
LT
357{
358 unsigned int mapcount;
359 struct address_space *mapping = page->mapping;
360 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
361 struct vm_area_struct *vma;
362 struct prio_tree_iter iter;
363 int referenced = 0;
364
365 /*
366 * The caller's checks on page->mapping and !PageAnon have made
367 * sure that this is a file page: the check for page->mapping
368 * excludes the case just before it gets set on an anon page.
369 */
370 BUG_ON(PageAnon(page));
371
372 /*
373 * The page lock not only makes sure that page->mapping cannot
374 * suddenly be NULLified by truncation, it makes sure that the
375 * structure at mapping cannot be freed and reused yet,
376 * so we can safely take mapping->i_mmap_lock.
377 */
378 BUG_ON(!PageLocked(page));
379
380 spin_lock(&mapping->i_mmap_lock);
381
382 /*
383 * i_mmap_lock does not stabilize mapcount at all, but mapcount
384 * is more likely to be accurate if we note it after spinning.
385 */
386 mapcount = page_mapcount(page);
387
388 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
389 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
390 == (VM_LOCKED|VM_MAYSHARE)) {
391 referenced++;
392 break;
393 }
f7b7fd8f 394 referenced += page_referenced_one(page, vma, &mapcount);
1da177e4
LT
395 if (!mapcount)
396 break;
397 }
398
399 spin_unlock(&mapping->i_mmap_lock);
400 return referenced;
401}
402
403/**
404 * page_referenced - test if the page was referenced
405 * @page: the page to test
406 * @is_locked: caller holds lock on the page
407 *
408 * Quick test_and_clear_referenced for all mappings to a page,
409 * returns the number of ptes which referenced the page.
410 */
f7b7fd8f 411int page_referenced(struct page *page, int is_locked)
1da177e4
LT
412{
413 int referenced = 0;
414
1da177e4
LT
415 if (page_test_and_clear_young(page))
416 referenced++;
417
418 if (TestClearPageReferenced(page))
419 referenced++;
420
421 if (page_mapped(page) && page->mapping) {
422 if (PageAnon(page))
f7b7fd8f 423 referenced += page_referenced_anon(page);
1da177e4 424 else if (is_locked)
f7b7fd8f 425 referenced += page_referenced_file(page);
1da177e4
LT
426 else if (TestSetPageLocked(page))
427 referenced++;
428 else {
429 if (page->mapping)
f7b7fd8f 430 referenced += page_referenced_file(page);
1da177e4
LT
431 unlock_page(page);
432 }
433 }
434 return referenced;
435}
436
d08b3851
PZ
437static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
438{
439 struct mm_struct *mm = vma->vm_mm;
440 unsigned long address;
441 pte_t *pte, entry;
442 spinlock_t *ptl;
443 int ret = 0;
444
445 address = vma_address(page, vma);
446 if (address == -EFAULT)
447 goto out;
448
449 pte = page_check_address(page, mm, address, &ptl);
450 if (!pte)
451 goto out;
452
453 if (!pte_dirty(*pte) && !pte_write(*pte))
454 goto unlock;
455
456 entry = ptep_get_and_clear(mm, address, pte);
457 entry = pte_mkclean(entry);
458 entry = pte_wrprotect(entry);
459 ptep_establish(vma, address, pte, entry);
460 lazy_mmu_prot_update(entry);
461 ret = 1;
462
463unlock:
464 pte_unmap_unlock(pte, ptl);
465out:
466 return ret;
467}
468
469static int page_mkclean_file(struct address_space *mapping, struct page *page)
470{
471 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
472 struct vm_area_struct *vma;
473 struct prio_tree_iter iter;
474 int ret = 0;
475
476 BUG_ON(PageAnon(page));
477
478 spin_lock(&mapping->i_mmap_lock);
479 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
480 if (vma->vm_flags & VM_SHARED)
481 ret += page_mkclean_one(page, vma);
482 }
483 spin_unlock(&mapping->i_mmap_lock);
484 return ret;
485}
486
487int page_mkclean(struct page *page)
488{
489 int ret = 0;
490
491 BUG_ON(!PageLocked(page));
492
493 if (page_mapped(page)) {
494 struct address_space *mapping = page_mapping(page);
495 if (mapping)
496 ret = page_mkclean_file(mapping, page);
497 }
498
499 return ret;
500}
501
9617d95e
NP
502/**
503 * page_set_anon_rmap - setup new anonymous rmap
504 * @page: the page to add the mapping to
505 * @vma: the vm area in which the mapping is added
506 * @address: the user virtual address mapped
507 */
508static void __page_set_anon_rmap(struct page *page,
509 struct vm_area_struct *vma, unsigned long address)
510{
511 struct anon_vma *anon_vma = vma->anon_vma;
512
513 BUG_ON(!anon_vma);
514 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
515 page->mapping = (struct address_space *) anon_vma;
516
517 page->index = linear_page_index(vma, address);
518
a74609fa
NP
519 /*
520 * nr_mapped state can be updated without turning off
521 * interrupts because it is not modified via interrupt.
522 */
f3dbd344 523 __inc_zone_page_state(page, NR_ANON_PAGES);
9617d95e
NP
524}
525
1da177e4
LT
526/**
527 * page_add_anon_rmap - add pte mapping to an anonymous page
528 * @page: the page to add the mapping to
529 * @vma: the vm area in which the mapping is added
530 * @address: the user virtual address mapped
531 *
b8072f09 532 * The caller needs to hold the pte lock.
1da177e4
LT
533 */
534void page_add_anon_rmap(struct page *page,
535 struct vm_area_struct *vma, unsigned long address)
536{
9617d95e
NP
537 if (atomic_inc_and_test(&page->_mapcount))
538 __page_set_anon_rmap(page, vma, address);
1da177e4
LT
539 /* else checking page index and mapping is racy */
540}
541
9617d95e
NP
542/*
543 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
544 * @page: the page to add the mapping to
545 * @vma: the vm area in which the mapping is added
546 * @address: the user virtual address mapped
547 *
548 * Same as page_add_anon_rmap but must only be called on *new* pages.
549 * This means the inc-and-test can be bypassed.
550 */
551void page_add_new_anon_rmap(struct page *page,
552 struct vm_area_struct *vma, unsigned long address)
553{
554 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
555 __page_set_anon_rmap(page, vma, address);
556}
557
1da177e4
LT
558/**
559 * page_add_file_rmap - add pte mapping to a file page
560 * @page: the page to add the mapping to
561 *
b8072f09 562 * The caller needs to hold the pte lock.
1da177e4
LT
563 */
564void page_add_file_rmap(struct page *page)
565{
1da177e4 566 if (atomic_inc_and_test(&page->_mapcount))
65ba55f5 567 __inc_zone_page_state(page, NR_FILE_MAPPED);
1da177e4
LT
568}
569
570/**
571 * page_remove_rmap - take down pte mapping from a page
572 * @page: page to remove mapping from
573 *
b8072f09 574 * The caller needs to hold the pte lock.
1da177e4
LT
575 */
576void page_remove_rmap(struct page *page)
577{
1da177e4 578 if (atomic_add_negative(-1, &page->_mapcount)) {
b7ab795b
NP
579#ifdef CONFIG_DEBUG_VM
580 if (unlikely(page_mapcount(page) < 0)) {
ef2bf0dc
DJ
581 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
582 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
583 printk (KERN_EMERG " page->count = %x\n", page_count(page));
584 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
585 }
b7ab795b 586#endif
1da177e4
LT
587 BUG_ON(page_mapcount(page) < 0);
588 /*
589 * It would be tidy to reset the PageAnon mapping here,
590 * but that might overwrite a racing page_add_anon_rmap
591 * which increments mapcount after us but sets mapping
592 * before us: so leave the reset to free_hot_cold_page,
593 * and remember that it's only reliable while mapped.
594 * Leaving it set also helps swapoff to reinstate ptes
595 * faster for those pages still in swapcache.
596 */
597 if (page_test_and_clear_dirty(page))
598 set_page_dirty(page);
f3dbd344
CL
599 __dec_zone_page_state(page,
600 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
1da177e4
LT
601 }
602}
603
604/*
605 * Subfunctions of try_to_unmap: try_to_unmap_one called
606 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
607 */
a48d07af 608static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
7352349a 609 int migration)
1da177e4
LT
610{
611 struct mm_struct *mm = vma->vm_mm;
612 unsigned long address;
1da177e4
LT
613 pte_t *pte;
614 pte_t pteval;
c0718806 615 spinlock_t *ptl;
1da177e4
LT
616 int ret = SWAP_AGAIN;
617
1da177e4
LT
618 address = vma_address(page, vma);
619 if (address == -EFAULT)
620 goto out;
621
c0718806
HD
622 pte = page_check_address(page, mm, address, &ptl);
623 if (!pte)
81b4082d 624 goto out;
1da177e4
LT
625
626 /*
627 * If the page is mlock()d, we cannot swap it out.
628 * If it's recently referenced (perhaps page_referenced
629 * skipped over this mm) then we should reactivate it.
630 */
e6a1530d
CL
631 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
632 (ptep_clear_flush_young(vma, address, pte)))) {
1da177e4
LT
633 ret = SWAP_FAIL;
634 goto out_unmap;
635 }
636
1da177e4
LT
637 /* Nuke the page table entry. */
638 flush_cache_page(vma, address, page_to_pfn(page));
639 pteval = ptep_clear_flush(vma, address, pte);
640
641 /* Move the dirty bit to the physical page now the pte is gone. */
642 if (pte_dirty(pteval))
643 set_page_dirty(page);
644
365e9c87
HD
645 /* Update high watermark before we lower rss */
646 update_hiwater_rss(mm);
647
1da177e4 648 if (PageAnon(page)) {
4c21e2f2 649 swp_entry_t entry = { .val = page_private(page) };
0697212a
CL
650
651 if (PageSwapCache(page)) {
652 /*
653 * Store the swap location in the pte.
654 * See handle_pte_fault() ...
655 */
656 swap_duplicate(entry);
657 if (list_empty(&mm->mmlist)) {
658 spin_lock(&mmlist_lock);
659 if (list_empty(&mm->mmlist))
660 list_add(&mm->mmlist, &init_mm.mmlist);
661 spin_unlock(&mmlist_lock);
662 }
442c9137 663 dec_mm_counter(mm, anon_rss);
04e62a29 664#ifdef CONFIG_MIGRATION
0697212a
CL
665 } else {
666 /*
667 * Store the pfn of the page in a special migration
668 * pte. do_swap_page() will wait until the migration
669 * pte is removed and then restart fault handling.
670 */
671 BUG_ON(!migration);
672 entry = make_migration_entry(page, pte_write(pteval));
04e62a29 673#endif
1da177e4
LT
674 }
675 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
676 BUG_ON(pte_file(*pte));
4294621f 677 } else
04e62a29
CL
678#ifdef CONFIG_MIGRATION
679 if (migration) {
680 /* Establish migration entry for a file page */
681 swp_entry_t entry;
682 entry = make_migration_entry(page, pte_write(pteval));
683 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
684 } else
685#endif
4294621f 686 dec_mm_counter(mm, file_rss);
1da177e4 687
04e62a29 688
1da177e4
LT
689 page_remove_rmap(page);
690 page_cache_release(page);
691
692out_unmap:
c0718806 693 pte_unmap_unlock(pte, ptl);
1da177e4
LT
694out:
695 return ret;
696}
697
698/*
699 * objrmap doesn't work for nonlinear VMAs because the assumption that
700 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
701 * Consequently, given a particular page and its ->index, we cannot locate the
702 * ptes which are mapping that page without an exhaustive linear search.
703 *
704 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
705 * maps the file to which the target page belongs. The ->vm_private_data field
706 * holds the current cursor into that scan. Successive searches will circulate
707 * around the vma's virtual address space.
708 *
709 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
710 * more scanning pressure is placed against them as well. Eventually pages
711 * will become fully unmapped and are eligible for eviction.
712 *
713 * For very sparsely populated VMAs this is a little inefficient - chances are
714 * there there won't be many ptes located within the scan cluster. In this case
715 * maybe we could scan further - to the end of the pte page, perhaps.
716 */
717#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
718#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
719
720static void try_to_unmap_cluster(unsigned long cursor,
721 unsigned int *mapcount, struct vm_area_struct *vma)
722{
723 struct mm_struct *mm = vma->vm_mm;
724 pgd_t *pgd;
725 pud_t *pud;
726 pmd_t *pmd;
c0718806 727 pte_t *pte;
1da177e4 728 pte_t pteval;
c0718806 729 spinlock_t *ptl;
1da177e4
LT
730 struct page *page;
731 unsigned long address;
732 unsigned long end;
1da177e4 733
1da177e4
LT
734 address = (vma->vm_start + cursor) & CLUSTER_MASK;
735 end = address + CLUSTER_SIZE;
736 if (address < vma->vm_start)
737 address = vma->vm_start;
738 if (end > vma->vm_end)
739 end = vma->vm_end;
740
741 pgd = pgd_offset(mm, address);
742 if (!pgd_present(*pgd))
c0718806 743 return;
1da177e4
LT
744
745 pud = pud_offset(pgd, address);
746 if (!pud_present(*pud))
c0718806 747 return;
1da177e4
LT
748
749 pmd = pmd_offset(pud, address);
750 if (!pmd_present(*pmd))
c0718806
HD
751 return;
752
753 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4 754
365e9c87
HD
755 /* Update high watermark before we lower rss */
756 update_hiwater_rss(mm);
757
c0718806 758 for (; address < end; pte++, address += PAGE_SIZE) {
1da177e4
LT
759 if (!pte_present(*pte))
760 continue;
6aab341e
LT
761 page = vm_normal_page(vma, address, *pte);
762 BUG_ON(!page || PageAnon(page));
1da177e4
LT
763
764 if (ptep_clear_flush_young(vma, address, pte))
765 continue;
766
767 /* Nuke the page table entry. */
eca35133 768 flush_cache_page(vma, address, pte_pfn(*pte));
1da177e4
LT
769 pteval = ptep_clear_flush(vma, address, pte);
770
771 /* If nonlinear, store the file page offset in the pte. */
772 if (page->index != linear_page_index(vma, address))
773 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
774
775 /* Move the dirty bit to the physical page now the pte is gone. */
776 if (pte_dirty(pteval))
777 set_page_dirty(page);
778
779 page_remove_rmap(page);
780 page_cache_release(page);
4294621f 781 dec_mm_counter(mm, file_rss);
1da177e4
LT
782 (*mapcount)--;
783 }
c0718806 784 pte_unmap_unlock(pte - 1, ptl);
1da177e4
LT
785}
786
7352349a 787static int try_to_unmap_anon(struct page *page, int migration)
1da177e4
LT
788{
789 struct anon_vma *anon_vma;
790 struct vm_area_struct *vma;
791 int ret = SWAP_AGAIN;
792
793 anon_vma = page_lock_anon_vma(page);
794 if (!anon_vma)
795 return ret;
796
797 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
7352349a 798 ret = try_to_unmap_one(page, vma, migration);
1da177e4
LT
799 if (ret == SWAP_FAIL || !page_mapped(page))
800 break;
801 }
802 spin_unlock(&anon_vma->lock);
803 return ret;
804}
805
806/**
807 * try_to_unmap_file - unmap file page using the object-based rmap method
808 * @page: the page to unmap
809 *
810 * Find all the mappings of a page using the mapping pointer and the vma chains
811 * contained in the address_space struct it points to.
812 *
813 * This function is only called from try_to_unmap for object-based pages.
814 */
7352349a 815static int try_to_unmap_file(struct page *page, int migration)
1da177e4
LT
816{
817 struct address_space *mapping = page->mapping;
818 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
819 struct vm_area_struct *vma;
820 struct prio_tree_iter iter;
821 int ret = SWAP_AGAIN;
822 unsigned long cursor;
823 unsigned long max_nl_cursor = 0;
824 unsigned long max_nl_size = 0;
825 unsigned int mapcount;
826
827 spin_lock(&mapping->i_mmap_lock);
828 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
7352349a 829 ret = try_to_unmap_one(page, vma, migration);
1da177e4
LT
830 if (ret == SWAP_FAIL || !page_mapped(page))
831 goto out;
832 }
833
834 if (list_empty(&mapping->i_mmap_nonlinear))
835 goto out;
836
837 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
838 shared.vm_set.list) {
e6a1530d 839 if ((vma->vm_flags & VM_LOCKED) && !migration)
1da177e4
LT
840 continue;
841 cursor = (unsigned long) vma->vm_private_data;
842 if (cursor > max_nl_cursor)
843 max_nl_cursor = cursor;
844 cursor = vma->vm_end - vma->vm_start;
845 if (cursor > max_nl_size)
846 max_nl_size = cursor;
847 }
848
849 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
850 ret = SWAP_FAIL;
851 goto out;
852 }
853
854 /*
855 * We don't try to search for this page in the nonlinear vmas,
856 * and page_referenced wouldn't have found it anyway. Instead
857 * just walk the nonlinear vmas trying to age and unmap some.
858 * The mapcount of the page we came in with is irrelevant,
859 * but even so use it as a guide to how hard we should try?
860 */
861 mapcount = page_mapcount(page);
862 if (!mapcount)
863 goto out;
864 cond_resched_lock(&mapping->i_mmap_lock);
865
866 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
867 if (max_nl_cursor == 0)
868 max_nl_cursor = CLUSTER_SIZE;
869
870 do {
871 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
872 shared.vm_set.list) {
e6a1530d 873 if ((vma->vm_flags & VM_LOCKED) && !migration)
1da177e4
LT
874 continue;
875 cursor = (unsigned long) vma->vm_private_data;
839b9685 876 while ( cursor < max_nl_cursor &&
1da177e4
LT
877 cursor < vma->vm_end - vma->vm_start) {
878 try_to_unmap_cluster(cursor, &mapcount, vma);
879 cursor += CLUSTER_SIZE;
880 vma->vm_private_data = (void *) cursor;
881 if ((int)mapcount <= 0)
882 goto out;
883 }
884 vma->vm_private_data = (void *) max_nl_cursor;
885 }
886 cond_resched_lock(&mapping->i_mmap_lock);
887 max_nl_cursor += CLUSTER_SIZE;
888 } while (max_nl_cursor <= max_nl_size);
889
890 /*
891 * Don't loop forever (perhaps all the remaining pages are
892 * in locked vmas). Reset cursor on all unreserved nonlinear
893 * vmas, now forgetting on which ones it had fallen behind.
894 */
101d2be7
HD
895 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
896 vma->vm_private_data = NULL;
1da177e4
LT
897out:
898 spin_unlock(&mapping->i_mmap_lock);
899 return ret;
900}
901
902/**
903 * try_to_unmap - try to remove all page table mappings to a page
904 * @page: the page to get unmapped
905 *
906 * Tries to remove all the page table entries which are mapping this
907 * page, used in the pageout path. Caller must hold the page lock.
908 * Return values are:
909 *
910 * SWAP_SUCCESS - we succeeded in removing all mappings
911 * SWAP_AGAIN - we missed a mapping, try again later
912 * SWAP_FAIL - the page is unswappable
913 */
7352349a 914int try_to_unmap(struct page *page, int migration)
1da177e4
LT
915{
916 int ret;
917
1da177e4
LT
918 BUG_ON(!PageLocked(page));
919
920 if (PageAnon(page))
7352349a 921 ret = try_to_unmap_anon(page, migration);
1da177e4 922 else
7352349a 923 ret = try_to_unmap_file(page, migration);
1da177e4
LT
924
925 if (!page_mapped(page))
926 ret = SWAP_SUCCESS;
927 return ret;
928}
81b4082d 929