]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/rmap.c
ALSA: hda/realtek - Support HP mute led for output and input
[mirror_ubuntu-bionic-kernel.git] / mm / rmap.c
1 /*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins 2003, 2004
18 */
19
20 /*
21 * Lock ordering in mm:
22 *
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * mm->mmap_sem
25 * page->flags PG_locked (lock_page)
26 * mapping->i_mmap_mutex
27 * anon_vma->rwsem
28 * mm->page_table_lock or pte_lock
29 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
30 * swap_lock (in swap_duplicate, swap_info_get)
31 * mmlist_lock (in mmput, drain_mmlist and others)
32 * mapping->private_lock (in __set_page_dirty_buffers)
33 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
34 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within bdi.wb->list_lock in __sync_single_inode)
39 *
40 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
41 * ->tasklist_lock
42 * pte map lock
43 */
44
45 #include <linux/mm.h>
46 #include <linux/pagemap.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/slab.h>
50 #include <linux/init.h>
51 #include <linux/ksm.h>
52 #include <linux/rmap.h>
53 #include <linux/rcupdate.h>
54 #include <linux/export.h>
55 #include <linux/memcontrol.h>
56 #include <linux/mmu_notifier.h>
57 #include <linux/migrate.h>
58 #include <linux/hugetlb.h>
59 #include <linux/backing-dev.h>
60
61 #include <asm/tlbflush.h>
62
63 #include "internal.h"
64
65 static struct kmem_cache *anon_vma_cachep;
66 static struct kmem_cache *anon_vma_chain_cachep;
67
68 static inline struct anon_vma *anon_vma_alloc(void)
69 {
70 struct anon_vma *anon_vma;
71
72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
73 if (anon_vma) {
74 atomic_set(&anon_vma->refcount, 1);
75 /*
76 * Initialise the anon_vma root to point to itself. If called
77 * from fork, the root will be reset to the parents anon_vma.
78 */
79 anon_vma->root = anon_vma;
80 }
81
82 return anon_vma;
83 }
84
85 static inline void anon_vma_free(struct anon_vma *anon_vma)
86 {
87 VM_BUG_ON(atomic_read(&anon_vma->refcount));
88
89 /*
90 * Synchronize against page_lock_anon_vma_read() such that
91 * we can safely hold the lock without the anon_vma getting
92 * freed.
93 *
94 * Relies on the full mb implied by the atomic_dec_and_test() from
95 * put_anon_vma() against the acquire barrier implied by
96 * down_read_trylock() from page_lock_anon_vma_read(). This orders:
97 *
98 * page_lock_anon_vma_read() VS put_anon_vma()
99 * down_read_trylock() atomic_dec_and_test()
100 * LOCK MB
101 * atomic_read() rwsem_is_locked()
102 *
103 * LOCK should suffice since the actual taking of the lock must
104 * happen _before_ what follows.
105 */
106 might_sleep();
107 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
108 anon_vma_lock_write(anon_vma);
109 anon_vma_unlock_write(anon_vma);
110 }
111
112 kmem_cache_free(anon_vma_cachep, anon_vma);
113 }
114
115 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
116 {
117 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
118 }
119
120 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
121 {
122 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
123 }
124
125 static void anon_vma_chain_link(struct vm_area_struct *vma,
126 struct anon_vma_chain *avc,
127 struct anon_vma *anon_vma)
128 {
129 avc->vma = vma;
130 avc->anon_vma = anon_vma;
131 list_add(&avc->same_vma, &vma->anon_vma_chain);
132 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
133 }
134
135 /**
136 * anon_vma_prepare - attach an anon_vma to a memory region
137 * @vma: the memory region in question
138 *
139 * This makes sure the memory mapping described by 'vma' has
140 * an 'anon_vma' attached to it, so that we can associate the
141 * anonymous pages mapped into it with that anon_vma.
142 *
143 * The common case will be that we already have one, but if
144 * not we either need to find an adjacent mapping that we
145 * can re-use the anon_vma from (very common when the only
146 * reason for splitting a vma has been mprotect()), or we
147 * allocate a new one.
148 *
149 * Anon-vma allocations are very subtle, because we may have
150 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
151 * and that may actually touch the spinlock even in the newly
152 * allocated vma (it depends on RCU to make sure that the
153 * anon_vma isn't actually destroyed).
154 *
155 * As a result, we need to do proper anon_vma locking even
156 * for the new allocation. At the same time, we do not want
157 * to do any locking for the common case of already having
158 * an anon_vma.
159 *
160 * This must be called with the mmap_sem held for reading.
161 */
162 int anon_vma_prepare(struct vm_area_struct *vma)
163 {
164 struct anon_vma *anon_vma = vma->anon_vma;
165 struct anon_vma_chain *avc;
166
167 might_sleep();
168 if (unlikely(!anon_vma)) {
169 struct mm_struct *mm = vma->vm_mm;
170 struct anon_vma *allocated;
171
172 avc = anon_vma_chain_alloc(GFP_KERNEL);
173 if (!avc)
174 goto out_enomem;
175
176 anon_vma = find_mergeable_anon_vma(vma);
177 allocated = NULL;
178 if (!anon_vma) {
179 anon_vma = anon_vma_alloc();
180 if (unlikely(!anon_vma))
181 goto out_enomem_free_avc;
182 allocated = anon_vma;
183 }
184
185 anon_vma_lock_write(anon_vma);
186 /* page_table_lock to protect against threads */
187 spin_lock(&mm->page_table_lock);
188 if (likely(!vma->anon_vma)) {
189 vma->anon_vma = anon_vma;
190 anon_vma_chain_link(vma, avc, anon_vma);
191 allocated = NULL;
192 avc = NULL;
193 }
194 spin_unlock(&mm->page_table_lock);
195 anon_vma_unlock_write(anon_vma);
196
197 if (unlikely(allocated))
198 put_anon_vma(allocated);
199 if (unlikely(avc))
200 anon_vma_chain_free(avc);
201 }
202 return 0;
203
204 out_enomem_free_avc:
205 anon_vma_chain_free(avc);
206 out_enomem:
207 return -ENOMEM;
208 }
209
210 /*
211 * This is a useful helper function for locking the anon_vma root as
212 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
213 * have the same vma.
214 *
215 * Such anon_vma's should have the same root, so you'd expect to see
216 * just a single mutex_lock for the whole traversal.
217 */
218 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
219 {
220 struct anon_vma *new_root = anon_vma->root;
221 if (new_root != root) {
222 if (WARN_ON_ONCE(root))
223 up_write(&root->rwsem);
224 root = new_root;
225 down_write(&root->rwsem);
226 }
227 return root;
228 }
229
230 static inline void unlock_anon_vma_root(struct anon_vma *root)
231 {
232 if (root)
233 up_write(&root->rwsem);
234 }
235
236 /*
237 * Attach the anon_vmas from src to dst.
238 * Returns 0 on success, -ENOMEM on failure.
239 */
240 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
241 {
242 struct anon_vma_chain *avc, *pavc;
243 struct anon_vma *root = NULL;
244
245 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
246 struct anon_vma *anon_vma;
247
248 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
249 if (unlikely(!avc)) {
250 unlock_anon_vma_root(root);
251 root = NULL;
252 avc = anon_vma_chain_alloc(GFP_KERNEL);
253 if (!avc)
254 goto enomem_failure;
255 }
256 anon_vma = pavc->anon_vma;
257 root = lock_anon_vma_root(root, anon_vma);
258 anon_vma_chain_link(dst, avc, anon_vma);
259 }
260 unlock_anon_vma_root(root);
261 return 0;
262
263 enomem_failure:
264 unlink_anon_vmas(dst);
265 return -ENOMEM;
266 }
267
268 /*
269 * Attach vma to its own anon_vma, as well as to the anon_vmas that
270 * the corresponding VMA in the parent process is attached to.
271 * Returns 0 on success, non-zero on failure.
272 */
273 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
274 {
275 struct anon_vma_chain *avc;
276 struct anon_vma *anon_vma;
277
278 /* Don't bother if the parent process has no anon_vma here. */
279 if (!pvma->anon_vma)
280 return 0;
281
282 /*
283 * First, attach the new VMA to the parent VMA's anon_vmas,
284 * so rmap can find non-COWed pages in child processes.
285 */
286 if (anon_vma_clone(vma, pvma))
287 return -ENOMEM;
288
289 /* Then add our own anon_vma. */
290 anon_vma = anon_vma_alloc();
291 if (!anon_vma)
292 goto out_error;
293 avc = anon_vma_chain_alloc(GFP_KERNEL);
294 if (!avc)
295 goto out_error_free_anon_vma;
296
297 /*
298 * The root anon_vma's spinlock is the lock actually used when we
299 * lock any of the anon_vmas in this anon_vma tree.
300 */
301 anon_vma->root = pvma->anon_vma->root;
302 /*
303 * With refcounts, an anon_vma can stay around longer than the
304 * process it belongs to. The root anon_vma needs to be pinned until
305 * this anon_vma is freed, because the lock lives in the root.
306 */
307 get_anon_vma(anon_vma->root);
308 /* Mark this anon_vma as the one where our new (COWed) pages go. */
309 vma->anon_vma = anon_vma;
310 anon_vma_lock_write(anon_vma);
311 anon_vma_chain_link(vma, avc, anon_vma);
312 anon_vma_unlock_write(anon_vma);
313
314 return 0;
315
316 out_error_free_anon_vma:
317 put_anon_vma(anon_vma);
318 out_error:
319 unlink_anon_vmas(vma);
320 return -ENOMEM;
321 }
322
323 void unlink_anon_vmas(struct vm_area_struct *vma)
324 {
325 struct anon_vma_chain *avc, *next;
326 struct anon_vma *root = NULL;
327
328 /*
329 * Unlink each anon_vma chained to the VMA. This list is ordered
330 * from newest to oldest, ensuring the root anon_vma gets freed last.
331 */
332 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
333 struct anon_vma *anon_vma = avc->anon_vma;
334
335 root = lock_anon_vma_root(root, anon_vma);
336 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
337
338 /*
339 * Leave empty anon_vmas on the list - we'll need
340 * to free them outside the lock.
341 */
342 if (RB_EMPTY_ROOT(&anon_vma->rb_root))
343 continue;
344
345 list_del(&avc->same_vma);
346 anon_vma_chain_free(avc);
347 }
348 unlock_anon_vma_root(root);
349
350 /*
351 * Iterate the list once more, it now only contains empty and unlinked
352 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
353 * needing to write-acquire the anon_vma->root->rwsem.
354 */
355 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
356 struct anon_vma *anon_vma = avc->anon_vma;
357
358 put_anon_vma(anon_vma);
359
360 list_del(&avc->same_vma);
361 anon_vma_chain_free(avc);
362 }
363 }
364
365 static void anon_vma_ctor(void *data)
366 {
367 struct anon_vma *anon_vma = data;
368
369 init_rwsem(&anon_vma->rwsem);
370 atomic_set(&anon_vma->refcount, 0);
371 anon_vma->rb_root = RB_ROOT;
372 }
373
374 void __init anon_vma_init(void)
375 {
376 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
377 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
378 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
379 }
380
381 /*
382 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
383 *
384 * Since there is no serialization what so ever against page_remove_rmap()
385 * the best this function can do is return a locked anon_vma that might
386 * have been relevant to this page.
387 *
388 * The page might have been remapped to a different anon_vma or the anon_vma
389 * returned may already be freed (and even reused).
390 *
391 * In case it was remapped to a different anon_vma, the new anon_vma will be a
392 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
393 * ensure that any anon_vma obtained from the page will still be valid for as
394 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
395 *
396 * All users of this function must be very careful when walking the anon_vma
397 * chain and verify that the page in question is indeed mapped in it
398 * [ something equivalent to page_mapped_in_vma() ].
399 *
400 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
401 * that the anon_vma pointer from page->mapping is valid if there is a
402 * mapcount, we can dereference the anon_vma after observing those.
403 */
404 struct anon_vma *page_get_anon_vma(struct page *page)
405 {
406 struct anon_vma *anon_vma = NULL;
407 unsigned long anon_mapping;
408
409 rcu_read_lock();
410 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
411 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
412 goto out;
413 if (!page_mapped(page))
414 goto out;
415
416 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
417 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
418 anon_vma = NULL;
419 goto out;
420 }
421
422 /*
423 * If this page is still mapped, then its anon_vma cannot have been
424 * freed. But if it has been unmapped, we have no security against the
425 * anon_vma structure being freed and reused (for another anon_vma:
426 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
427 * above cannot corrupt).
428 */
429 if (!page_mapped(page)) {
430 rcu_read_unlock();
431 put_anon_vma(anon_vma);
432 return NULL;
433 }
434 out:
435 rcu_read_unlock();
436
437 return anon_vma;
438 }
439
440 /*
441 * Similar to page_get_anon_vma() except it locks the anon_vma.
442 *
443 * Its a little more complex as it tries to keep the fast path to a single
444 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
445 * reference like with page_get_anon_vma() and then block on the mutex.
446 */
447 struct anon_vma *page_lock_anon_vma_read(struct page *page)
448 {
449 struct anon_vma *anon_vma = NULL;
450 struct anon_vma *root_anon_vma;
451 unsigned long anon_mapping;
452
453 rcu_read_lock();
454 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
455 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
456 goto out;
457 if (!page_mapped(page))
458 goto out;
459
460 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
461 root_anon_vma = ACCESS_ONCE(anon_vma->root);
462 if (down_read_trylock(&root_anon_vma->rwsem)) {
463 /*
464 * If the page is still mapped, then this anon_vma is still
465 * its anon_vma, and holding the mutex ensures that it will
466 * not go away, see anon_vma_free().
467 */
468 if (!page_mapped(page)) {
469 up_read(&root_anon_vma->rwsem);
470 anon_vma = NULL;
471 }
472 goto out;
473 }
474
475 /* trylock failed, we got to sleep */
476 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
477 anon_vma = NULL;
478 goto out;
479 }
480
481 if (!page_mapped(page)) {
482 rcu_read_unlock();
483 put_anon_vma(anon_vma);
484 return NULL;
485 }
486
487 /* we pinned the anon_vma, its safe to sleep */
488 rcu_read_unlock();
489 anon_vma_lock_read(anon_vma);
490
491 if (atomic_dec_and_test(&anon_vma->refcount)) {
492 /*
493 * Oops, we held the last refcount, release the lock
494 * and bail -- can't simply use put_anon_vma() because
495 * we'll deadlock on the anon_vma_lock_write() recursion.
496 */
497 anon_vma_unlock_read(anon_vma);
498 __put_anon_vma(anon_vma);
499 anon_vma = NULL;
500 }
501
502 return anon_vma;
503
504 out:
505 rcu_read_unlock();
506 return anon_vma;
507 }
508
509 void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
510 {
511 anon_vma_unlock_read(anon_vma);
512 }
513
514 /*
515 * At what user virtual address is page expected in @vma?
516 */
517 static inline unsigned long
518 __vma_address(struct page *page, struct vm_area_struct *vma)
519 {
520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
521
522 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page));
524
525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526 }
527
528 inline unsigned long
529 vma_address(struct page *page, struct vm_area_struct *vma)
530 {
531 unsigned long address = __vma_address(page, vma);
532
533 /* page should be within @vma mapping range */
534 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
535
536 return address;
537 }
538
539 /*
540 * At what user virtual address is page expected in vma?
541 * Caller should check the page is actually part of the vma.
542 */
543 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
544 {
545 unsigned long address;
546 if (PageAnon(page)) {
547 struct anon_vma *page__anon_vma = page_anon_vma(page);
548 /*
549 * Note: swapoff's unuse_vma() is more efficient with this
550 * check, and needs it to match anon_vma when KSM is active.
551 */
552 if (!vma->anon_vma || !page__anon_vma ||
553 vma->anon_vma->root != page__anon_vma->root)
554 return -EFAULT;
555 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
556 if (!vma->vm_file ||
557 vma->vm_file->f_mapping != page->mapping)
558 return -EFAULT;
559 } else
560 return -EFAULT;
561 address = __vma_address(page, vma);
562 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
563 return -EFAULT;
564 return address;
565 }
566
567 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
568 {
569 pgd_t *pgd;
570 pud_t *pud;
571 pmd_t *pmd = NULL;
572
573 pgd = pgd_offset(mm, address);
574 if (!pgd_present(*pgd))
575 goto out;
576
577 pud = pud_offset(pgd, address);
578 if (!pud_present(*pud))
579 goto out;
580
581 pmd = pmd_offset(pud, address);
582 if (!pmd_present(*pmd))
583 pmd = NULL;
584 out:
585 return pmd;
586 }
587
588 /*
589 * Check that @page is mapped at @address into @mm.
590 *
591 * If @sync is false, page_check_address may perform a racy check to avoid
592 * the page table lock when the pte is not present (helpful when reclaiming
593 * highly shared pages).
594 *
595 * On success returns with pte mapped and locked.
596 */
597 pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
598 unsigned long address, spinlock_t **ptlp, int sync)
599 {
600 pmd_t *pmd;
601 pte_t *pte;
602 spinlock_t *ptl;
603
604 if (unlikely(PageHuge(page))) {
605 /* when pud is not present, pte will be NULL */
606 pte = huge_pte_offset(mm, address);
607 if (!pte)
608 return NULL;
609
610 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
611 goto check;
612 }
613
614 pmd = mm_find_pmd(mm, address);
615 if (!pmd)
616 return NULL;
617
618 if (pmd_trans_huge(*pmd))
619 return NULL;
620
621 pte = pte_offset_map(pmd, address);
622 /* Make a quick check before getting the lock */
623 if (!sync && !pte_present(*pte)) {
624 pte_unmap(pte);
625 return NULL;
626 }
627
628 ptl = pte_lockptr(mm, pmd);
629 check:
630 spin_lock(ptl);
631 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
632 *ptlp = ptl;
633 return pte;
634 }
635 pte_unmap_unlock(pte, ptl);
636 return NULL;
637 }
638
639 /**
640 * page_mapped_in_vma - check whether a page is really mapped in a VMA
641 * @page: the page to test
642 * @vma: the VMA to test
643 *
644 * Returns 1 if the page is mapped into the page tables of the VMA, 0
645 * if the page is not mapped into the page tables of this VMA. Only
646 * valid for normal file or anonymous VMAs.
647 */
648 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
649 {
650 unsigned long address;
651 pte_t *pte;
652 spinlock_t *ptl;
653
654 address = __vma_address(page, vma);
655 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
656 return 0;
657 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
658 if (!pte) /* the page is not in this mm */
659 return 0;
660 pte_unmap_unlock(pte, ptl);
661
662 return 1;
663 }
664
665 struct page_referenced_arg {
666 int mapcount;
667 int referenced;
668 unsigned long vm_flags;
669 struct mem_cgroup *memcg;
670 };
671 /*
672 * arg: page_referenced_arg will be passed
673 */
674 static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
675 unsigned long address, void *arg)
676 {
677 struct mm_struct *mm = vma->vm_mm;
678 spinlock_t *ptl;
679 int referenced = 0;
680 struct page_referenced_arg *pra = arg;
681
682 if (unlikely(PageTransHuge(page))) {
683 pmd_t *pmd;
684
685 /*
686 * rmap might return false positives; we must filter
687 * these out using page_check_address_pmd().
688 */
689 pmd = page_check_address_pmd(page, mm, address,
690 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
691 if (!pmd)
692 return SWAP_AGAIN;
693
694 if (vma->vm_flags & VM_LOCKED) {
695 spin_unlock(ptl);
696 pra->vm_flags |= VM_LOCKED;
697 return SWAP_FAIL; /* To break the loop */
698 }
699
700 /* go ahead even if the pmd is pmd_trans_splitting() */
701 if (pmdp_clear_flush_young_notify(vma, address, pmd))
702 referenced++;
703 spin_unlock(ptl);
704 } else {
705 pte_t *pte;
706
707 /*
708 * rmap might return false positives; we must filter
709 * these out using page_check_address().
710 */
711 pte = page_check_address(page, mm, address, &ptl, 0);
712 if (!pte)
713 return SWAP_AGAIN;
714
715 if (vma->vm_flags & VM_LOCKED) {
716 pte_unmap_unlock(pte, ptl);
717 pra->vm_flags |= VM_LOCKED;
718 return SWAP_FAIL; /* To break the loop */
719 }
720
721 if (ptep_clear_flush_young_notify(vma, address, pte)) {
722 /*
723 * Don't treat a reference through a sequentially read
724 * mapping as such. If the page has been used in
725 * another mapping, we will catch it; if this other
726 * mapping is already gone, the unmap path will have
727 * set PG_referenced or activated the page.
728 */
729 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
730 referenced++;
731 }
732 pte_unmap_unlock(pte, ptl);
733 }
734
735 if (referenced) {
736 pra->referenced++;
737 pra->vm_flags |= vma->vm_flags;
738 }
739
740 pra->mapcount--;
741 if (!pra->mapcount)
742 return SWAP_SUCCESS; /* To break the loop */
743
744 return SWAP_AGAIN;
745 }
746
747 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
748 {
749 struct page_referenced_arg *pra = arg;
750 struct mem_cgroup *memcg = pra->memcg;
751
752 if (!mm_match_cgroup(vma->vm_mm, memcg))
753 return true;
754
755 return false;
756 }
757
758 /**
759 * page_referenced - test if the page was referenced
760 * @page: the page to test
761 * @is_locked: caller holds lock on the page
762 * @memcg: target memory cgroup
763 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
764 *
765 * Quick test_and_clear_referenced for all mappings to a page,
766 * returns the number of ptes which referenced the page.
767 */
768 int page_referenced(struct page *page,
769 int is_locked,
770 struct mem_cgroup *memcg,
771 unsigned long *vm_flags)
772 {
773 int ret;
774 int we_locked = 0;
775 struct page_referenced_arg pra = {
776 .mapcount = page_mapcount(page),
777 .memcg = memcg,
778 };
779 struct rmap_walk_control rwc = {
780 .rmap_one = page_referenced_one,
781 .arg = (void *)&pra,
782 .anon_lock = page_lock_anon_vma_read,
783 };
784
785 *vm_flags = 0;
786 if (!page_mapped(page))
787 return 0;
788
789 if (!page_rmapping(page))
790 return 0;
791
792 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
793 we_locked = trylock_page(page);
794 if (!we_locked)
795 return 1;
796 }
797
798 /*
799 * If we are reclaiming on behalf of a cgroup, skip
800 * counting on behalf of references from different
801 * cgroups
802 */
803 if (memcg) {
804 rwc.invalid_vma = invalid_page_referenced_vma;
805 }
806
807 ret = rmap_walk(page, &rwc);
808 *vm_flags = pra.vm_flags;
809
810 if (we_locked)
811 unlock_page(page);
812
813 return pra.referenced;
814 }
815
816 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
817 unsigned long address, void *arg)
818 {
819 struct mm_struct *mm = vma->vm_mm;
820 pte_t *pte;
821 spinlock_t *ptl;
822 int ret = 0;
823 int *cleaned = arg;
824
825 pte = page_check_address(page, mm, address, &ptl, 1);
826 if (!pte)
827 goto out;
828
829 if (pte_dirty(*pte) || pte_write(*pte)) {
830 pte_t entry;
831
832 flush_cache_page(vma, address, pte_pfn(*pte));
833 entry = ptep_clear_flush(vma, address, pte);
834 entry = pte_wrprotect(entry);
835 entry = pte_mkclean(entry);
836 set_pte_at(mm, address, pte, entry);
837 ret = 1;
838 }
839
840 pte_unmap_unlock(pte, ptl);
841
842 if (ret) {
843 mmu_notifier_invalidate_page(mm, address);
844 (*cleaned)++;
845 }
846 out:
847 return SWAP_AGAIN;
848 }
849
850 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
851 {
852 if (vma->vm_flags & VM_SHARED)
853 return false;
854
855 return true;
856 }
857
858 int page_mkclean(struct page *page)
859 {
860 int cleaned = 0;
861 struct address_space *mapping;
862 struct rmap_walk_control rwc = {
863 .arg = (void *)&cleaned,
864 .rmap_one = page_mkclean_one,
865 .invalid_vma = invalid_mkclean_vma,
866 };
867
868 BUG_ON(!PageLocked(page));
869
870 if (!page_mapped(page))
871 return 0;
872
873 mapping = page_mapping(page);
874 if (!mapping)
875 return 0;
876
877 rmap_walk(page, &rwc);
878
879 return cleaned;
880 }
881 EXPORT_SYMBOL_GPL(page_mkclean);
882
883 /**
884 * page_move_anon_rmap - move a page to our anon_vma
885 * @page: the page to move to our anon_vma
886 * @vma: the vma the page belongs to
887 * @address: the user virtual address mapped
888 *
889 * When a page belongs exclusively to one process after a COW event,
890 * that page can be moved into the anon_vma that belongs to just that
891 * process, so the rmap code will not search the parent or sibling
892 * processes.
893 */
894 void page_move_anon_rmap(struct page *page,
895 struct vm_area_struct *vma, unsigned long address)
896 {
897 struct anon_vma *anon_vma = vma->anon_vma;
898
899 VM_BUG_ON_PAGE(!PageLocked(page), page);
900 VM_BUG_ON(!anon_vma);
901 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
902
903 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
904 page->mapping = (struct address_space *) anon_vma;
905 }
906
907 /**
908 * __page_set_anon_rmap - set up new anonymous rmap
909 * @page: Page to add to rmap
910 * @vma: VM area to add page to.
911 * @address: User virtual address of the mapping
912 * @exclusive: the page is exclusively owned by the current process
913 */
914 static void __page_set_anon_rmap(struct page *page,
915 struct vm_area_struct *vma, unsigned long address, int exclusive)
916 {
917 struct anon_vma *anon_vma = vma->anon_vma;
918
919 BUG_ON(!anon_vma);
920
921 if (PageAnon(page))
922 return;
923
924 /*
925 * If the page isn't exclusively mapped into this vma,
926 * we must use the _oldest_ possible anon_vma for the
927 * page mapping!
928 */
929 if (!exclusive)
930 anon_vma = anon_vma->root;
931
932 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
933 page->mapping = (struct address_space *) anon_vma;
934 page->index = linear_page_index(vma, address);
935 }
936
937 /**
938 * __page_check_anon_rmap - sanity check anonymous rmap addition
939 * @page: the page to add the mapping to
940 * @vma: the vm area in which the mapping is added
941 * @address: the user virtual address mapped
942 */
943 static void __page_check_anon_rmap(struct page *page,
944 struct vm_area_struct *vma, unsigned long address)
945 {
946 #ifdef CONFIG_DEBUG_VM
947 /*
948 * The page's anon-rmap details (mapping and index) are guaranteed to
949 * be set up correctly at this point.
950 *
951 * We have exclusion against page_add_anon_rmap because the caller
952 * always holds the page locked, except if called from page_dup_rmap,
953 * in which case the page is already known to be setup.
954 *
955 * We have exclusion against page_add_new_anon_rmap because those pages
956 * are initially only visible via the pagetables, and the pte is locked
957 * over the call to page_add_new_anon_rmap.
958 */
959 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
960 BUG_ON(page->index != linear_page_index(vma, address));
961 #endif
962 }
963
964 /**
965 * page_add_anon_rmap - add pte mapping to an anonymous page
966 * @page: the page to add the mapping to
967 * @vma: the vm area in which the mapping is added
968 * @address: the user virtual address mapped
969 *
970 * The caller needs to hold the pte lock, and the page must be locked in
971 * the anon_vma case: to serialize mapping,index checking after setting,
972 * and to ensure that PageAnon is not being upgraded racily to PageKsm
973 * (but PageKsm is never downgraded to PageAnon).
974 */
975 void page_add_anon_rmap(struct page *page,
976 struct vm_area_struct *vma, unsigned long address)
977 {
978 do_page_add_anon_rmap(page, vma, address, 0);
979 }
980
981 /*
982 * Special version of the above for do_swap_page, which often runs
983 * into pages that are exclusively owned by the current process.
984 * Everybody else should continue to use page_add_anon_rmap above.
985 */
986 void do_page_add_anon_rmap(struct page *page,
987 struct vm_area_struct *vma, unsigned long address, int exclusive)
988 {
989 int first = atomic_inc_and_test(&page->_mapcount);
990 if (first) {
991 /*
992 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
993 * these counters are not modified in interrupt context, and
994 * pte lock(a spinlock) is held, which implies preemption
995 * disabled.
996 */
997 if (PageTransHuge(page))
998 __inc_zone_page_state(page,
999 NR_ANON_TRANSPARENT_HUGEPAGES);
1000 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1001 hpage_nr_pages(page));
1002 }
1003 if (unlikely(PageKsm(page)))
1004 return;
1005
1006 VM_BUG_ON_PAGE(!PageLocked(page), page);
1007 /* address might be in next vma when migration races vma_adjust */
1008 if (first)
1009 __page_set_anon_rmap(page, vma, address, exclusive);
1010 else
1011 __page_check_anon_rmap(page, vma, address);
1012 }
1013
1014 /**
1015 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1016 * @page: the page to add the mapping to
1017 * @vma: the vm area in which the mapping is added
1018 * @address: the user virtual address mapped
1019 *
1020 * Same as page_add_anon_rmap but must only be called on *new* pages.
1021 * This means the inc-and-test can be bypassed.
1022 * Page does not have to be locked.
1023 */
1024 void page_add_new_anon_rmap(struct page *page,
1025 struct vm_area_struct *vma, unsigned long address)
1026 {
1027 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1028 SetPageSwapBacked(page);
1029 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
1030 if (PageTransHuge(page))
1031 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1032 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1033 hpage_nr_pages(page));
1034 __page_set_anon_rmap(page, vma, address, 1);
1035
1036 VM_BUG_ON_PAGE(PageLRU(page), page);
1037 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
1038 SetPageActive(page);
1039 lru_cache_add(page);
1040 return;
1041 }
1042
1043 if (!TestSetPageMlocked(page)) {
1044 /*
1045 * We use the irq-unsafe __mod_zone_page_stat because this
1046 * counter is not modified from interrupt context, and the pte
1047 * lock is held(spinlock), which implies preemption disabled.
1048 */
1049 __mod_zone_page_state(page_zone(page), NR_MLOCK,
1050 hpage_nr_pages(page));
1051 count_vm_event(UNEVICTABLE_PGMLOCKED);
1052 }
1053 add_page_to_unevictable_list(page);
1054 }
1055
1056 /**
1057 * page_add_file_rmap - add pte mapping to a file page
1058 * @page: the page to add the mapping to
1059 *
1060 * The caller needs to hold the pte lock.
1061 */
1062 void page_add_file_rmap(struct page *page)
1063 {
1064 bool locked;
1065 unsigned long flags;
1066
1067 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1068 if (atomic_inc_and_test(&page->_mapcount)) {
1069 __inc_zone_page_state(page, NR_FILE_MAPPED);
1070 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1071 }
1072 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1073 }
1074
1075 /**
1076 * page_remove_rmap - take down pte mapping from a page
1077 * @page: page to remove mapping from
1078 *
1079 * The caller needs to hold the pte lock.
1080 */
1081 void page_remove_rmap(struct page *page)
1082 {
1083 bool anon = PageAnon(page);
1084 bool locked;
1085 unsigned long flags;
1086
1087 /*
1088 * The anon case has no mem_cgroup page_stat to update; but may
1089 * uncharge_page() below, where the lock ordering can deadlock if
1090 * we hold the lock against page_stat move: so avoid it on anon.
1091 */
1092 if (!anon)
1093 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1094
1095 /* page still mapped by someone else? */
1096 if (!atomic_add_negative(-1, &page->_mapcount))
1097 goto out;
1098
1099 /*
1100 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1101 * and not charged by memcg for now.
1102 *
1103 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1104 * these counters are not modified in interrupt context, and
1105 * these counters are not modified in interrupt context, and
1106 * pte lock(a spinlock) is held, which implies preemption disabled.
1107 */
1108 if (unlikely(PageHuge(page)))
1109 goto out;
1110 if (anon) {
1111 mem_cgroup_uncharge_page(page);
1112 if (PageTransHuge(page))
1113 __dec_zone_page_state(page,
1114 NR_ANON_TRANSPARENT_HUGEPAGES);
1115 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1116 -hpage_nr_pages(page));
1117 } else {
1118 __dec_zone_page_state(page, NR_FILE_MAPPED);
1119 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1120 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1121 }
1122 if (unlikely(PageMlocked(page)))
1123 clear_page_mlock(page);
1124 /*
1125 * It would be tidy to reset the PageAnon mapping here,
1126 * but that might overwrite a racing page_add_anon_rmap
1127 * which increments mapcount after us but sets mapping
1128 * before us: so leave the reset to free_hot_cold_page,
1129 * and remember that it's only reliable while mapped.
1130 * Leaving it set also helps swapoff to reinstate ptes
1131 * faster for those pages still in swapcache.
1132 */
1133 return;
1134 out:
1135 if (!anon)
1136 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1137 }
1138
1139 /*
1140 * @arg: enum ttu_flags will be passed to this argument
1141 */
1142 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1143 unsigned long address, void *arg)
1144 {
1145 struct mm_struct *mm = vma->vm_mm;
1146 pte_t *pte;
1147 pte_t pteval;
1148 spinlock_t *ptl;
1149 int ret = SWAP_AGAIN;
1150 enum ttu_flags flags = (enum ttu_flags)arg;
1151
1152 pte = page_check_address(page, mm, address, &ptl, 0);
1153 if (!pte)
1154 goto out;
1155
1156 /*
1157 * If the page is mlock()d, we cannot swap it out.
1158 * If it's recently referenced (perhaps page_referenced
1159 * skipped over this mm) then we should reactivate it.
1160 */
1161 if (!(flags & TTU_IGNORE_MLOCK)) {
1162 if (vma->vm_flags & VM_LOCKED)
1163 goto out_mlock;
1164
1165 if (flags & TTU_MUNLOCK)
1166 goto out_unmap;
1167 }
1168 if (!(flags & TTU_IGNORE_ACCESS)) {
1169 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1170 ret = SWAP_FAIL;
1171 goto out_unmap;
1172 }
1173 }
1174
1175 /* Nuke the page table entry. */
1176 flush_cache_page(vma, address, page_to_pfn(page));
1177 pteval = ptep_clear_flush(vma, address, pte);
1178
1179 /* Move the dirty bit to the physical page now the pte is gone. */
1180 if (pte_dirty(pteval))
1181 set_page_dirty(page);
1182
1183 /* Update high watermark before we lower rss */
1184 update_hiwater_rss(mm);
1185
1186 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1187 if (!PageHuge(page)) {
1188 if (PageAnon(page))
1189 dec_mm_counter(mm, MM_ANONPAGES);
1190 else
1191 dec_mm_counter(mm, MM_FILEPAGES);
1192 }
1193 set_pte_at(mm, address, pte,
1194 swp_entry_to_pte(make_hwpoison_entry(page)));
1195 } else if (pte_unused(pteval)) {
1196 /*
1197 * The guest indicated that the page content is of no
1198 * interest anymore. Simply discard the pte, vmscan
1199 * will take care of the rest.
1200 */
1201 if (PageAnon(page))
1202 dec_mm_counter(mm, MM_ANONPAGES);
1203 else
1204 dec_mm_counter(mm, MM_FILEPAGES);
1205 } else if (PageAnon(page)) {
1206 swp_entry_t entry = { .val = page_private(page) };
1207 pte_t swp_pte;
1208
1209 if (PageSwapCache(page)) {
1210 /*
1211 * Store the swap location in the pte.
1212 * See handle_pte_fault() ...
1213 */
1214 if (swap_duplicate(entry) < 0) {
1215 set_pte_at(mm, address, pte, pteval);
1216 ret = SWAP_FAIL;
1217 goto out_unmap;
1218 }
1219 if (list_empty(&mm->mmlist)) {
1220 spin_lock(&mmlist_lock);
1221 if (list_empty(&mm->mmlist))
1222 list_add(&mm->mmlist, &init_mm.mmlist);
1223 spin_unlock(&mmlist_lock);
1224 }
1225 dec_mm_counter(mm, MM_ANONPAGES);
1226 inc_mm_counter(mm, MM_SWAPENTS);
1227 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
1228 /*
1229 * Store the pfn of the page in a special migration
1230 * pte. do_swap_page() will wait until the migration
1231 * pte is removed and then restart fault handling.
1232 */
1233 BUG_ON(!(flags & TTU_MIGRATION));
1234 entry = make_migration_entry(page, pte_write(pteval));
1235 }
1236 swp_pte = swp_entry_to_pte(entry);
1237 if (pte_soft_dirty(pteval))
1238 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1239 set_pte_at(mm, address, pte, swp_pte);
1240 BUG_ON(pte_file(*pte));
1241 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1242 (flags & TTU_MIGRATION)) {
1243 /* Establish migration entry for a file page */
1244 swp_entry_t entry;
1245 entry = make_migration_entry(page, pte_write(pteval));
1246 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1247 } else
1248 dec_mm_counter(mm, MM_FILEPAGES);
1249
1250 page_remove_rmap(page);
1251 page_cache_release(page);
1252
1253 out_unmap:
1254 pte_unmap_unlock(pte, ptl);
1255 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
1256 mmu_notifier_invalidate_page(mm, address);
1257 out:
1258 return ret;
1259
1260 out_mlock:
1261 pte_unmap_unlock(pte, ptl);
1262
1263
1264 /*
1265 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1266 * unstable result and race. Plus, We can't wait here because
1267 * we now hold anon_vma->rwsem or mapping->i_mmap_mutex.
1268 * if trylock failed, the page remain in evictable lru and later
1269 * vmscan could retry to move the page to unevictable lru if the
1270 * page is actually mlocked.
1271 */
1272 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1273 if (vma->vm_flags & VM_LOCKED) {
1274 mlock_vma_page(page);
1275 ret = SWAP_MLOCK;
1276 }
1277 up_read(&vma->vm_mm->mmap_sem);
1278 }
1279 return ret;
1280 }
1281
1282 /*
1283 * objrmap doesn't work for nonlinear VMAs because the assumption that
1284 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
1285 * Consequently, given a particular page and its ->index, we cannot locate the
1286 * ptes which are mapping that page without an exhaustive linear search.
1287 *
1288 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
1289 * maps the file to which the target page belongs. The ->vm_private_data field
1290 * holds the current cursor into that scan. Successive searches will circulate
1291 * around the vma's virtual address space.
1292 *
1293 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
1294 * more scanning pressure is placed against them as well. Eventually pages
1295 * will become fully unmapped and are eligible for eviction.
1296 *
1297 * For very sparsely populated VMAs this is a little inefficient - chances are
1298 * there there won't be many ptes located within the scan cluster. In this case
1299 * maybe we could scan further - to the end of the pte page, perhaps.
1300 *
1301 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
1302 * acquire it without blocking. If vma locked, mlock the pages in the cluster,
1303 * rather than unmapping them. If we encounter the "check_page" that vmscan is
1304 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
1305 */
1306 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
1307 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
1308
1309 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1310 struct vm_area_struct *vma, struct page *check_page)
1311 {
1312 struct mm_struct *mm = vma->vm_mm;
1313 pmd_t *pmd;
1314 pte_t *pte;
1315 pte_t pteval;
1316 spinlock_t *ptl;
1317 struct page *page;
1318 unsigned long address;
1319 unsigned long mmun_start; /* For mmu_notifiers */
1320 unsigned long mmun_end; /* For mmu_notifiers */
1321 unsigned long end;
1322 int ret = SWAP_AGAIN;
1323 int locked_vma = 0;
1324
1325 address = (vma->vm_start + cursor) & CLUSTER_MASK;
1326 end = address + CLUSTER_SIZE;
1327 if (address < vma->vm_start)
1328 address = vma->vm_start;
1329 if (end > vma->vm_end)
1330 end = vma->vm_end;
1331
1332 pmd = mm_find_pmd(mm, address);
1333 if (!pmd)
1334 return ret;
1335
1336 mmun_start = address;
1337 mmun_end = end;
1338 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1339
1340 /*
1341 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
1342 * keep the sem while scanning the cluster for mlocking pages.
1343 */
1344 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1345 locked_vma = (vma->vm_flags & VM_LOCKED);
1346 if (!locked_vma)
1347 up_read(&vma->vm_mm->mmap_sem); /* don't need it */
1348 }
1349
1350 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1351
1352 /* Update high watermark before we lower rss */
1353 update_hiwater_rss(mm);
1354
1355 for (; address < end; pte++, address += PAGE_SIZE) {
1356 if (!pte_present(*pte))
1357 continue;
1358 page = vm_normal_page(vma, address, *pte);
1359 BUG_ON(!page || PageAnon(page));
1360
1361 if (locked_vma) {
1362 if (page == check_page) {
1363 /* we know we have check_page locked */
1364 mlock_vma_page(page);
1365 ret = SWAP_MLOCK;
1366 } else if (trylock_page(page)) {
1367 /*
1368 * If we can lock the page, perform mlock.
1369 * Otherwise leave the page alone, it will be
1370 * eventually encountered again later.
1371 */
1372 mlock_vma_page(page);
1373 unlock_page(page);
1374 }
1375 continue; /* don't unmap */
1376 }
1377
1378 if (ptep_clear_flush_young_notify(vma, address, pte))
1379 continue;
1380
1381 /* Nuke the page table entry. */
1382 flush_cache_page(vma, address, pte_pfn(*pte));
1383 pteval = ptep_clear_flush(vma, address, pte);
1384
1385 /* If nonlinear, store the file page offset in the pte. */
1386 if (page->index != linear_page_index(vma, address)) {
1387 pte_t ptfile = pgoff_to_pte(page->index);
1388 if (pte_soft_dirty(pteval))
1389 ptfile = pte_file_mksoft_dirty(ptfile);
1390 set_pte_at(mm, address, pte, ptfile);
1391 }
1392
1393 /* Move the dirty bit to the physical page now the pte is gone. */
1394 if (pte_dirty(pteval))
1395 set_page_dirty(page);
1396
1397 page_remove_rmap(page);
1398 page_cache_release(page);
1399 dec_mm_counter(mm, MM_FILEPAGES);
1400 (*mapcount)--;
1401 }
1402 pte_unmap_unlock(pte - 1, ptl);
1403 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1404 if (locked_vma)
1405 up_read(&vma->vm_mm->mmap_sem);
1406 return ret;
1407 }
1408
1409 static int try_to_unmap_nonlinear(struct page *page,
1410 struct address_space *mapping, void *arg)
1411 {
1412 struct vm_area_struct *vma;
1413 int ret = SWAP_AGAIN;
1414 unsigned long cursor;
1415 unsigned long max_nl_cursor = 0;
1416 unsigned long max_nl_size = 0;
1417 unsigned int mapcount;
1418
1419 list_for_each_entry(vma,
1420 &mapping->i_mmap_nonlinear, shared.nonlinear) {
1421
1422 cursor = (unsigned long) vma->vm_private_data;
1423 if (cursor > max_nl_cursor)
1424 max_nl_cursor = cursor;
1425 cursor = vma->vm_end - vma->vm_start;
1426 if (cursor > max_nl_size)
1427 max_nl_size = cursor;
1428 }
1429
1430 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
1431 return SWAP_FAIL;
1432 }
1433
1434 /*
1435 * We don't try to search for this page in the nonlinear vmas,
1436 * and page_referenced wouldn't have found it anyway. Instead
1437 * just walk the nonlinear vmas trying to age and unmap some.
1438 * The mapcount of the page we came in with is irrelevant,
1439 * but even so use it as a guide to how hard we should try?
1440 */
1441 mapcount = page_mapcount(page);
1442 if (!mapcount)
1443 return ret;
1444
1445 cond_resched();
1446
1447 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1448 if (max_nl_cursor == 0)
1449 max_nl_cursor = CLUSTER_SIZE;
1450
1451 do {
1452 list_for_each_entry(vma,
1453 &mapping->i_mmap_nonlinear, shared.nonlinear) {
1454
1455 cursor = (unsigned long) vma->vm_private_data;
1456 while (cursor < max_nl_cursor &&
1457 cursor < vma->vm_end - vma->vm_start) {
1458 if (try_to_unmap_cluster(cursor, &mapcount,
1459 vma, page) == SWAP_MLOCK)
1460 ret = SWAP_MLOCK;
1461 cursor += CLUSTER_SIZE;
1462 vma->vm_private_data = (void *) cursor;
1463 if ((int)mapcount <= 0)
1464 return ret;
1465 }
1466 vma->vm_private_data = (void *) max_nl_cursor;
1467 }
1468 cond_resched();
1469 max_nl_cursor += CLUSTER_SIZE;
1470 } while (max_nl_cursor <= max_nl_size);
1471
1472 /*
1473 * Don't loop forever (perhaps all the remaining pages are
1474 * in locked vmas). Reset cursor on all unreserved nonlinear
1475 * vmas, now forgetting on which ones it had fallen behind.
1476 */
1477 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
1478 vma->vm_private_data = NULL;
1479
1480 return ret;
1481 }
1482
1483 bool is_vma_temporary_stack(struct vm_area_struct *vma)
1484 {
1485 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1486
1487 if (!maybe_stack)
1488 return false;
1489
1490 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1491 VM_STACK_INCOMPLETE_SETUP)
1492 return true;
1493
1494 return false;
1495 }
1496
1497 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1498 {
1499 return is_vma_temporary_stack(vma);
1500 }
1501
1502 static int page_not_mapped(struct page *page)
1503 {
1504 return !page_mapped(page);
1505 };
1506
1507 /**
1508 * try_to_unmap - try to remove all page table mappings to a page
1509 * @page: the page to get unmapped
1510 * @flags: action and flags
1511 *
1512 * Tries to remove all the page table entries which are mapping this
1513 * page, used in the pageout path. Caller must hold the page lock.
1514 * Return values are:
1515 *
1516 * SWAP_SUCCESS - we succeeded in removing all mappings
1517 * SWAP_AGAIN - we missed a mapping, try again later
1518 * SWAP_FAIL - the page is unswappable
1519 * SWAP_MLOCK - page is mlocked.
1520 */
1521 int try_to_unmap(struct page *page, enum ttu_flags flags)
1522 {
1523 int ret;
1524 struct rmap_walk_control rwc = {
1525 .rmap_one = try_to_unmap_one,
1526 .arg = (void *)flags,
1527 .done = page_not_mapped,
1528 .file_nonlinear = try_to_unmap_nonlinear,
1529 .anon_lock = page_lock_anon_vma_read,
1530 };
1531
1532 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
1533
1534 /*
1535 * During exec, a temporary VMA is setup and later moved.
1536 * The VMA is moved under the anon_vma lock but not the
1537 * page tables leading to a race where migration cannot
1538 * find the migration ptes. Rather than increasing the
1539 * locking requirements of exec(), migration skips
1540 * temporary VMAs until after exec() completes.
1541 */
1542 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1543 rwc.invalid_vma = invalid_migration_vma;
1544
1545 ret = rmap_walk(page, &rwc);
1546
1547 if (ret != SWAP_MLOCK && !page_mapped(page))
1548 ret = SWAP_SUCCESS;
1549 return ret;
1550 }
1551
1552 /**
1553 * try_to_munlock - try to munlock a page
1554 * @page: the page to be munlocked
1555 *
1556 * Called from munlock code. Checks all of the VMAs mapping the page
1557 * to make sure nobody else has this page mlocked. The page will be
1558 * returned with PG_mlocked cleared if no other vmas have it mlocked.
1559 *
1560 * Return values are:
1561 *
1562 * SWAP_AGAIN - no vma is holding page mlocked, or,
1563 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
1564 * SWAP_FAIL - page cannot be located at present
1565 * SWAP_MLOCK - page is now mlocked.
1566 */
1567 int try_to_munlock(struct page *page)
1568 {
1569 int ret;
1570 struct rmap_walk_control rwc = {
1571 .rmap_one = try_to_unmap_one,
1572 .arg = (void *)TTU_MUNLOCK,
1573 .done = page_not_mapped,
1574 /*
1575 * We don't bother to try to find the munlocked page in
1576 * nonlinears. It's costly. Instead, later, page reclaim logic
1577 * may call try_to_unmap() and recover PG_mlocked lazily.
1578 */
1579 .file_nonlinear = NULL,
1580 .anon_lock = page_lock_anon_vma_read,
1581
1582 };
1583
1584 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1585
1586 ret = rmap_walk(page, &rwc);
1587 return ret;
1588 }
1589
1590 void __put_anon_vma(struct anon_vma *anon_vma)
1591 {
1592 struct anon_vma *root = anon_vma->root;
1593
1594 anon_vma_free(anon_vma);
1595 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1596 anon_vma_free(root);
1597 }
1598
1599 static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1600 struct rmap_walk_control *rwc)
1601 {
1602 struct anon_vma *anon_vma;
1603
1604 if (rwc->anon_lock)
1605 return rwc->anon_lock(page);
1606
1607 /*
1608 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
1609 * because that depends on page_mapped(); but not all its usages
1610 * are holding mmap_sem. Users without mmap_sem are required to
1611 * take a reference count to prevent the anon_vma disappearing
1612 */
1613 anon_vma = page_anon_vma(page);
1614 if (!anon_vma)
1615 return NULL;
1616
1617 anon_vma_lock_read(anon_vma);
1618 return anon_vma;
1619 }
1620
1621 /*
1622 * rmap_walk_anon - do something to anonymous page using the object-based
1623 * rmap method
1624 * @page: the page to be handled
1625 * @rwc: control variable according to each walk type
1626 *
1627 * Find all the mappings of a page using the mapping pointer and the vma chains
1628 * contained in the anon_vma struct it points to.
1629 *
1630 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1631 * where the page was found will be held for write. So, we won't recheck
1632 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1633 * LOCKED.
1634 */
1635 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1636 {
1637 struct anon_vma *anon_vma;
1638 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1639 struct anon_vma_chain *avc;
1640 int ret = SWAP_AGAIN;
1641
1642 anon_vma = rmap_walk_anon_lock(page, rwc);
1643 if (!anon_vma)
1644 return ret;
1645
1646 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1647 struct vm_area_struct *vma = avc->vma;
1648 unsigned long address = vma_address(page, vma);
1649
1650 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1651 continue;
1652
1653 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1654 if (ret != SWAP_AGAIN)
1655 break;
1656 if (rwc->done && rwc->done(page))
1657 break;
1658 }
1659 anon_vma_unlock_read(anon_vma);
1660 return ret;
1661 }
1662
1663 /*
1664 * rmap_walk_file - do something to file page using the object-based rmap method
1665 * @page: the page to be handled
1666 * @rwc: control variable according to each walk type
1667 *
1668 * Find all the mappings of a page using the mapping pointer and the vma chains
1669 * contained in the address_space struct it points to.
1670 *
1671 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1672 * where the page was found will be held for write. So, we won't recheck
1673 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1674 * LOCKED.
1675 */
1676 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1677 {
1678 struct address_space *mapping = page->mapping;
1679 pgoff_t pgoff = page->index << compound_order(page);
1680 struct vm_area_struct *vma;
1681 int ret = SWAP_AGAIN;
1682
1683 /*
1684 * The page lock not only makes sure that page->mapping cannot
1685 * suddenly be NULLified by truncation, it makes sure that the
1686 * structure at mapping cannot be freed and reused yet,
1687 * so we can safely take mapping->i_mmap_mutex.
1688 */
1689 VM_BUG_ON(!PageLocked(page));
1690
1691 if (!mapping)
1692 return ret;
1693 mutex_lock(&mapping->i_mmap_mutex);
1694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1695 unsigned long address = vma_address(page, vma);
1696
1697 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1698 continue;
1699
1700 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1701 if (ret != SWAP_AGAIN)
1702 goto done;
1703 if (rwc->done && rwc->done(page))
1704 goto done;
1705 }
1706
1707 if (!rwc->file_nonlinear)
1708 goto done;
1709
1710 if (list_empty(&mapping->i_mmap_nonlinear))
1711 goto done;
1712
1713 ret = rwc->file_nonlinear(page, mapping, rwc->arg);
1714
1715 done:
1716 mutex_unlock(&mapping->i_mmap_mutex);
1717 return ret;
1718 }
1719
1720 int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1721 {
1722 if (unlikely(PageKsm(page)))
1723 return rmap_walk_ksm(page, rwc);
1724 else if (PageAnon(page))
1725 return rmap_walk_anon(page, rwc);
1726 else
1727 return rmap_walk_file(page, rwc);
1728 }
1729
1730 #ifdef CONFIG_HUGETLB_PAGE
1731 /*
1732 * The following three functions are for anonymous (private mapped) hugepages.
1733 * Unlike common anonymous pages, anonymous hugepages have no accounting code
1734 * and no lru code, because we handle hugepages differently from common pages.
1735 */
1736 static void __hugepage_set_anon_rmap(struct page *page,
1737 struct vm_area_struct *vma, unsigned long address, int exclusive)
1738 {
1739 struct anon_vma *anon_vma = vma->anon_vma;
1740
1741 BUG_ON(!anon_vma);
1742
1743 if (PageAnon(page))
1744 return;
1745 if (!exclusive)
1746 anon_vma = anon_vma->root;
1747
1748 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1749 page->mapping = (struct address_space *) anon_vma;
1750 page->index = linear_page_index(vma, address);
1751 }
1752
1753 void hugepage_add_anon_rmap(struct page *page,
1754 struct vm_area_struct *vma, unsigned long address)
1755 {
1756 struct anon_vma *anon_vma = vma->anon_vma;
1757 int first;
1758
1759 BUG_ON(!PageLocked(page));
1760 BUG_ON(!anon_vma);
1761 /* address might be in next vma when migration races vma_adjust */
1762 first = atomic_inc_and_test(&page->_mapcount);
1763 if (first)
1764 __hugepage_set_anon_rmap(page, vma, address, 0);
1765 }
1766
1767 void hugepage_add_new_anon_rmap(struct page *page,
1768 struct vm_area_struct *vma, unsigned long address)
1769 {
1770 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1771 atomic_set(&page->_mapcount, 0);
1772 __hugepage_set_anon_rmap(page, vma, address, 1);
1773 }
1774 #endif /* CONFIG_HUGETLB_PAGE */