]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/rmap.c
dax: update to new mmu_notifier semantic
[mirror_ubuntu-artful-kernel.git] / mm / rmap.c
CommitLineData
1da177e4
LT
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
98f32602 17 * Contributions by Hugh Dickins 2003, 2004
1da177e4
LT
18 */
19
20/*
21 * Lock ordering in mm:
22 *
1b1dcc1b 23 * inode->i_mutex (while writing or truncating, not reading or faulting)
82591e6e
NP
24 * mm->mmap_sem
25 * page->flags PG_locked (lock_page)
88f306b6
KS
26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
27 * mapping->i_mmap_rwsem
28 * anon_vma->rwsem
29 * mm->page_table_lock or pte_lock
a52633d8 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page)
88f306b6
KS
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
35 * mapping->tree_lock (widely used)
36 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
37 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
38 * sb_lock (within inode_lock in fs/fs-writeback.c)
39 * mapping->tree_lock (widely used, in set_page_dirty,
40 * in arch-dependent flush_dcache_mmap_lock,
41 * within bdi.wb->list_lock in __sync_single_inode)
6a46079c 42 *
5a505085 43 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
9b679320 44 * ->tasklist_lock
6a46079c 45 * pte map lock
1da177e4
LT
46 */
47
48#include <linux/mm.h>
6e84f315 49#include <linux/sched/mm.h>
29930025 50#include <linux/sched/task.h>
1da177e4
LT
51#include <linux/pagemap.h>
52#include <linux/swap.h>
53#include <linux/swapops.h>
54#include <linux/slab.h>
55#include <linux/init.h>
5ad64688 56#include <linux/ksm.h>
1da177e4
LT
57#include <linux/rmap.h>
58#include <linux/rcupdate.h>
b95f1b31 59#include <linux/export.h>
8a9f3ccd 60#include <linux/memcontrol.h>
cddb8a5c 61#include <linux/mmu_notifier.h>
64cdd548 62#include <linux/migrate.h>
0fe6e20b 63#include <linux/hugetlb.h>
ef5d437f 64#include <linux/backing-dev.h>
33c3fc71 65#include <linux/page_idle.h>
1da177e4
LT
66
67#include <asm/tlbflush.h>
68
72b252ae
MG
69#include <trace/events/tlb.h>
70
b291f000
NP
71#include "internal.h"
72
fdd2e5f8 73static struct kmem_cache *anon_vma_cachep;
5beb4930 74static struct kmem_cache *anon_vma_chain_cachep;
fdd2e5f8
AB
75
76static inline struct anon_vma *anon_vma_alloc(void)
77{
01d8b20d
PZ
78 struct anon_vma *anon_vma;
79
80 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
81 if (anon_vma) {
82 atomic_set(&anon_vma->refcount, 1);
7a3ef208
KK
83 anon_vma->degree = 1; /* Reference for first vma */
84 anon_vma->parent = anon_vma;
01d8b20d
PZ
85 /*
86 * Initialise the anon_vma root to point to itself. If called
87 * from fork, the root will be reset to the parents anon_vma.
88 */
89 anon_vma->root = anon_vma;
90 }
91
92 return anon_vma;
fdd2e5f8
AB
93}
94
01d8b20d 95static inline void anon_vma_free(struct anon_vma *anon_vma)
fdd2e5f8 96{
01d8b20d 97 VM_BUG_ON(atomic_read(&anon_vma->refcount));
88c22088
PZ
98
99 /*
4fc3f1d6 100 * Synchronize against page_lock_anon_vma_read() such that
88c22088
PZ
101 * we can safely hold the lock without the anon_vma getting
102 * freed.
103 *
104 * Relies on the full mb implied by the atomic_dec_and_test() from
105 * put_anon_vma() against the acquire barrier implied by
4fc3f1d6 106 * down_read_trylock() from page_lock_anon_vma_read(). This orders:
88c22088 107 *
4fc3f1d6
IM
108 * page_lock_anon_vma_read() VS put_anon_vma()
109 * down_read_trylock() atomic_dec_and_test()
88c22088 110 * LOCK MB
4fc3f1d6 111 * atomic_read() rwsem_is_locked()
88c22088
PZ
112 *
113 * LOCK should suffice since the actual taking of the lock must
114 * happen _before_ what follows.
115 */
7f39dda9 116 might_sleep();
5a505085 117 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
4fc3f1d6 118 anon_vma_lock_write(anon_vma);
08b52706 119 anon_vma_unlock_write(anon_vma);
88c22088
PZ
120 }
121
fdd2e5f8
AB
122 kmem_cache_free(anon_vma_cachep, anon_vma);
123}
1da177e4 124
dd34739c 125static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
5beb4930 126{
dd34739c 127 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
5beb4930
RR
128}
129
e574b5fd 130static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
5beb4930
RR
131{
132 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
133}
134
6583a843
KC
135static void anon_vma_chain_link(struct vm_area_struct *vma,
136 struct anon_vma_chain *avc,
137 struct anon_vma *anon_vma)
138{
139 avc->vma = vma;
140 avc->anon_vma = anon_vma;
141 list_add(&avc->same_vma, &vma->anon_vma_chain);
bf181b9f 142 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
6583a843
KC
143}
144
d9d332e0 145/**
d5a187da 146 * __anon_vma_prepare - attach an anon_vma to a memory region
d9d332e0
LT
147 * @vma: the memory region in question
148 *
149 * This makes sure the memory mapping described by 'vma' has
150 * an 'anon_vma' attached to it, so that we can associate the
151 * anonymous pages mapped into it with that anon_vma.
152 *
d5a187da
VB
153 * The common case will be that we already have one, which
154 * is handled inline by anon_vma_prepare(). But if
23a0790a 155 * not we either need to find an adjacent mapping that we
d9d332e0
LT
156 * can re-use the anon_vma from (very common when the only
157 * reason for splitting a vma has been mprotect()), or we
158 * allocate a new one.
159 *
160 * Anon-vma allocations are very subtle, because we may have
4fc3f1d6 161 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
d9d332e0
LT
162 * and that may actually touch the spinlock even in the newly
163 * allocated vma (it depends on RCU to make sure that the
164 * anon_vma isn't actually destroyed).
165 *
166 * As a result, we need to do proper anon_vma locking even
167 * for the new allocation. At the same time, we do not want
168 * to do any locking for the common case of already having
169 * an anon_vma.
170 *
171 * This must be called with the mmap_sem held for reading.
172 */
d5a187da 173int __anon_vma_prepare(struct vm_area_struct *vma)
1da177e4 174{
d5a187da
VB
175 struct mm_struct *mm = vma->vm_mm;
176 struct anon_vma *anon_vma, *allocated;
5beb4930 177 struct anon_vma_chain *avc;
1da177e4
LT
178
179 might_sleep();
1da177e4 180
d5a187da
VB
181 avc = anon_vma_chain_alloc(GFP_KERNEL);
182 if (!avc)
183 goto out_enomem;
184
185 anon_vma = find_mergeable_anon_vma(vma);
186 allocated = NULL;
187 if (!anon_vma) {
188 anon_vma = anon_vma_alloc();
189 if (unlikely(!anon_vma))
190 goto out_enomem_free_avc;
191 allocated = anon_vma;
192 }
5beb4930 193
d5a187da
VB
194 anon_vma_lock_write(anon_vma);
195 /* page_table_lock to protect against threads */
196 spin_lock(&mm->page_table_lock);
197 if (likely(!vma->anon_vma)) {
198 vma->anon_vma = anon_vma;
199 anon_vma_chain_link(vma, avc, anon_vma);
200 /* vma reference or self-parent link for new root */
201 anon_vma->degree++;
d9d332e0 202 allocated = NULL;
d5a187da
VB
203 avc = NULL;
204 }
205 spin_unlock(&mm->page_table_lock);
206 anon_vma_unlock_write(anon_vma);
1da177e4 207
d5a187da
VB
208 if (unlikely(allocated))
209 put_anon_vma(allocated);
210 if (unlikely(avc))
211 anon_vma_chain_free(avc);
31f2b0eb 212
1da177e4 213 return 0;
5beb4930
RR
214
215 out_enomem_free_avc:
216 anon_vma_chain_free(avc);
217 out_enomem:
218 return -ENOMEM;
1da177e4
LT
219}
220
bb4aa396
LT
221/*
222 * This is a useful helper function for locking the anon_vma root as
223 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
224 * have the same vma.
225 *
226 * Such anon_vma's should have the same root, so you'd expect to see
227 * just a single mutex_lock for the whole traversal.
228 */
229static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
230{
231 struct anon_vma *new_root = anon_vma->root;
232 if (new_root != root) {
233 if (WARN_ON_ONCE(root))
5a505085 234 up_write(&root->rwsem);
bb4aa396 235 root = new_root;
5a505085 236 down_write(&root->rwsem);
bb4aa396
LT
237 }
238 return root;
239}
240
241static inline void unlock_anon_vma_root(struct anon_vma *root)
242{
243 if (root)
5a505085 244 up_write(&root->rwsem);
bb4aa396
LT
245}
246
5beb4930
RR
247/*
248 * Attach the anon_vmas from src to dst.
249 * Returns 0 on success, -ENOMEM on failure.
7a3ef208
KK
250 *
251 * If dst->anon_vma is NULL this function tries to find and reuse existing
252 * anon_vma which has no vmas and only one child anon_vma. This prevents
253 * degradation of anon_vma hierarchy to endless linear chain in case of
254 * constantly forking task. On the other hand, an anon_vma with more than one
255 * child isn't reused even if there was no alive vma, thus rmap walker has a
256 * good chance of avoiding scanning the whole hierarchy when it searches where
257 * page is mapped.
5beb4930
RR
258 */
259int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
1da177e4 260{
5beb4930 261 struct anon_vma_chain *avc, *pavc;
bb4aa396 262 struct anon_vma *root = NULL;
5beb4930 263
646d87b4 264 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
bb4aa396
LT
265 struct anon_vma *anon_vma;
266
dd34739c
LT
267 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
268 if (unlikely(!avc)) {
269 unlock_anon_vma_root(root);
270 root = NULL;
271 avc = anon_vma_chain_alloc(GFP_KERNEL);
272 if (!avc)
273 goto enomem_failure;
274 }
bb4aa396
LT
275 anon_vma = pavc->anon_vma;
276 root = lock_anon_vma_root(root, anon_vma);
277 anon_vma_chain_link(dst, avc, anon_vma);
7a3ef208
KK
278
279 /*
280 * Reuse existing anon_vma if its degree lower than two,
281 * that means it has no vma and only one anon_vma child.
282 *
283 * Do not chose parent anon_vma, otherwise first child
284 * will always reuse it. Root anon_vma is never reused:
285 * it has self-parent reference and at least one child.
286 */
287 if (!dst->anon_vma && anon_vma != src->anon_vma &&
288 anon_vma->degree < 2)
289 dst->anon_vma = anon_vma;
5beb4930 290 }
7a3ef208
KK
291 if (dst->anon_vma)
292 dst->anon_vma->degree++;
bb4aa396 293 unlock_anon_vma_root(root);
5beb4930 294 return 0;
1da177e4 295
5beb4930 296 enomem_failure:
3fe89b3e
LY
297 /*
298 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
299 * decremented in unlink_anon_vmas().
300 * We can safely do this because callers of anon_vma_clone() don't care
301 * about dst->anon_vma if anon_vma_clone() failed.
302 */
303 dst->anon_vma = NULL;
5beb4930
RR
304 unlink_anon_vmas(dst);
305 return -ENOMEM;
1da177e4
LT
306}
307
5beb4930
RR
308/*
309 * Attach vma to its own anon_vma, as well as to the anon_vmas that
310 * the corresponding VMA in the parent process is attached to.
311 * Returns 0 on success, non-zero on failure.
312 */
313int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
1da177e4 314{
5beb4930
RR
315 struct anon_vma_chain *avc;
316 struct anon_vma *anon_vma;
c4ea95d7 317 int error;
1da177e4 318
5beb4930
RR
319 /* Don't bother if the parent process has no anon_vma here. */
320 if (!pvma->anon_vma)
321 return 0;
322
7a3ef208
KK
323 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
324 vma->anon_vma = NULL;
325
5beb4930
RR
326 /*
327 * First, attach the new VMA to the parent VMA's anon_vmas,
328 * so rmap can find non-COWed pages in child processes.
329 */
c4ea95d7
DF
330 error = anon_vma_clone(vma, pvma);
331 if (error)
332 return error;
5beb4930 333
7a3ef208
KK
334 /* An existing anon_vma has been reused, all done then. */
335 if (vma->anon_vma)
336 return 0;
337
5beb4930
RR
338 /* Then add our own anon_vma. */
339 anon_vma = anon_vma_alloc();
340 if (!anon_vma)
341 goto out_error;
dd34739c 342 avc = anon_vma_chain_alloc(GFP_KERNEL);
5beb4930
RR
343 if (!avc)
344 goto out_error_free_anon_vma;
5c341ee1
RR
345
346 /*
347 * The root anon_vma's spinlock is the lock actually used when we
348 * lock any of the anon_vmas in this anon_vma tree.
349 */
350 anon_vma->root = pvma->anon_vma->root;
7a3ef208 351 anon_vma->parent = pvma->anon_vma;
76545066 352 /*
01d8b20d
PZ
353 * With refcounts, an anon_vma can stay around longer than the
354 * process it belongs to. The root anon_vma needs to be pinned until
355 * this anon_vma is freed, because the lock lives in the root.
76545066
RR
356 */
357 get_anon_vma(anon_vma->root);
5beb4930
RR
358 /* Mark this anon_vma as the one where our new (COWed) pages go. */
359 vma->anon_vma = anon_vma;
4fc3f1d6 360 anon_vma_lock_write(anon_vma);
5c341ee1 361 anon_vma_chain_link(vma, avc, anon_vma);
7a3ef208 362 anon_vma->parent->degree++;
08b52706 363 anon_vma_unlock_write(anon_vma);
5beb4930
RR
364
365 return 0;
366
367 out_error_free_anon_vma:
01d8b20d 368 put_anon_vma(anon_vma);
5beb4930 369 out_error:
4946d54c 370 unlink_anon_vmas(vma);
5beb4930 371 return -ENOMEM;
1da177e4
LT
372}
373
5beb4930
RR
374void unlink_anon_vmas(struct vm_area_struct *vma)
375{
376 struct anon_vma_chain *avc, *next;
eee2acba 377 struct anon_vma *root = NULL;
5beb4930 378
5c341ee1
RR
379 /*
380 * Unlink each anon_vma chained to the VMA. This list is ordered
381 * from newest to oldest, ensuring the root anon_vma gets freed last.
382 */
5beb4930 383 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
eee2acba
PZ
384 struct anon_vma *anon_vma = avc->anon_vma;
385
386 root = lock_anon_vma_root(root, anon_vma);
bf181b9f 387 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
eee2acba
PZ
388
389 /*
390 * Leave empty anon_vmas on the list - we'll need
391 * to free them outside the lock.
392 */
7a3ef208
KK
393 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
394 anon_vma->parent->degree--;
eee2acba 395 continue;
7a3ef208 396 }
eee2acba
PZ
397
398 list_del(&avc->same_vma);
399 anon_vma_chain_free(avc);
400 }
7a3ef208
KK
401 if (vma->anon_vma)
402 vma->anon_vma->degree--;
eee2acba
PZ
403 unlock_anon_vma_root(root);
404
405 /*
406 * Iterate the list once more, it now only contains empty and unlinked
407 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
5a505085 408 * needing to write-acquire the anon_vma->root->rwsem.
eee2acba
PZ
409 */
410 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
411 struct anon_vma *anon_vma = avc->anon_vma;
412
e4c5800a 413 VM_WARN_ON(anon_vma->degree);
eee2acba
PZ
414 put_anon_vma(anon_vma);
415
5beb4930
RR
416 list_del(&avc->same_vma);
417 anon_vma_chain_free(avc);
418 }
419}
420
51cc5068 421static void anon_vma_ctor(void *data)
1da177e4 422{
a35afb83 423 struct anon_vma *anon_vma = data;
1da177e4 424
5a505085 425 init_rwsem(&anon_vma->rwsem);
83813267 426 atomic_set(&anon_vma->refcount, 0);
bf181b9f 427 anon_vma->rb_root = RB_ROOT;
1da177e4
LT
428}
429
430void __init anon_vma_init(void)
431{
432 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
5f0d5a3a 433 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
5d097056
VD
434 anon_vma_ctor);
435 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
436 SLAB_PANIC|SLAB_ACCOUNT);
1da177e4
LT
437}
438
439/*
6111e4ca
PZ
440 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
441 *
442 * Since there is no serialization what so ever against page_remove_rmap()
443 * the best this function can do is return a locked anon_vma that might
444 * have been relevant to this page.
445 *
446 * The page might have been remapped to a different anon_vma or the anon_vma
447 * returned may already be freed (and even reused).
448 *
bc658c96
PZ
449 * In case it was remapped to a different anon_vma, the new anon_vma will be a
450 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
451 * ensure that any anon_vma obtained from the page will still be valid for as
452 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
453 *
6111e4ca
PZ
454 * All users of this function must be very careful when walking the anon_vma
455 * chain and verify that the page in question is indeed mapped in it
456 * [ something equivalent to page_mapped_in_vma() ].
457 *
458 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
459 * that the anon_vma pointer from page->mapping is valid if there is a
460 * mapcount, we can dereference the anon_vma after observing those.
1da177e4 461 */
746b18d4 462struct anon_vma *page_get_anon_vma(struct page *page)
1da177e4 463{
746b18d4 464 struct anon_vma *anon_vma = NULL;
1da177e4
LT
465 unsigned long anon_mapping;
466
467 rcu_read_lock();
4db0c3c2 468 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
3ca7b3c5 469 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
1da177e4
LT
470 goto out;
471 if (!page_mapped(page))
472 goto out;
473
474 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
746b18d4
PZ
475 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
476 anon_vma = NULL;
477 goto out;
478 }
f1819427
HD
479
480 /*
481 * If this page is still mapped, then its anon_vma cannot have been
746b18d4
PZ
482 * freed. But if it has been unmapped, we have no security against the
483 * anon_vma structure being freed and reused (for another anon_vma:
5f0d5a3a 484 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
746b18d4 485 * above cannot corrupt).
f1819427 486 */
746b18d4 487 if (!page_mapped(page)) {
7f39dda9 488 rcu_read_unlock();
746b18d4 489 put_anon_vma(anon_vma);
7f39dda9 490 return NULL;
746b18d4 491 }
1da177e4
LT
492out:
493 rcu_read_unlock();
746b18d4
PZ
494
495 return anon_vma;
496}
497
88c22088
PZ
498/*
499 * Similar to page_get_anon_vma() except it locks the anon_vma.
500 *
501 * Its a little more complex as it tries to keep the fast path to a single
502 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
503 * reference like with page_get_anon_vma() and then block on the mutex.
504 */
4fc3f1d6 505struct anon_vma *page_lock_anon_vma_read(struct page *page)
746b18d4 506{
88c22088 507 struct anon_vma *anon_vma = NULL;
eee0f252 508 struct anon_vma *root_anon_vma;
88c22088 509 unsigned long anon_mapping;
746b18d4 510
88c22088 511 rcu_read_lock();
4db0c3c2 512 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
88c22088
PZ
513 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
514 goto out;
515 if (!page_mapped(page))
516 goto out;
517
518 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
4db0c3c2 519 root_anon_vma = READ_ONCE(anon_vma->root);
4fc3f1d6 520 if (down_read_trylock(&root_anon_vma->rwsem)) {
88c22088 521 /*
eee0f252
HD
522 * If the page is still mapped, then this anon_vma is still
523 * its anon_vma, and holding the mutex ensures that it will
bc658c96 524 * not go away, see anon_vma_free().
88c22088 525 */
eee0f252 526 if (!page_mapped(page)) {
4fc3f1d6 527 up_read(&root_anon_vma->rwsem);
88c22088
PZ
528 anon_vma = NULL;
529 }
530 goto out;
531 }
746b18d4 532
88c22088
PZ
533 /* trylock failed, we got to sleep */
534 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
535 anon_vma = NULL;
536 goto out;
537 }
538
539 if (!page_mapped(page)) {
7f39dda9 540 rcu_read_unlock();
88c22088 541 put_anon_vma(anon_vma);
7f39dda9 542 return NULL;
88c22088
PZ
543 }
544
545 /* we pinned the anon_vma, its safe to sleep */
546 rcu_read_unlock();
4fc3f1d6 547 anon_vma_lock_read(anon_vma);
88c22088
PZ
548
549 if (atomic_dec_and_test(&anon_vma->refcount)) {
550 /*
551 * Oops, we held the last refcount, release the lock
552 * and bail -- can't simply use put_anon_vma() because
4fc3f1d6 553 * we'll deadlock on the anon_vma_lock_write() recursion.
88c22088 554 */
4fc3f1d6 555 anon_vma_unlock_read(anon_vma);
88c22088
PZ
556 __put_anon_vma(anon_vma);
557 anon_vma = NULL;
558 }
559
560 return anon_vma;
561
562out:
563 rcu_read_unlock();
746b18d4 564 return anon_vma;
34bbd704
ON
565}
566
4fc3f1d6 567void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
34bbd704 568{
4fc3f1d6 569 anon_vma_unlock_read(anon_vma);
1da177e4
LT
570}
571
72b252ae 572#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
72b252ae
MG
573/*
574 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
575 * important if a PTE was dirty when it was unmapped that it's flushed
576 * before any IO is initiated on the page to prevent lost writes. Similarly,
577 * it must be flushed before freeing to prevent data leakage.
578 */
579void try_to_unmap_flush(void)
580{
581 struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
72b252ae
MG
582
583 if (!tlb_ubc->flush_required)
584 return;
585
e73ad5ff 586 arch_tlbbatch_flush(&tlb_ubc->arch);
72b252ae 587 tlb_ubc->flush_required = false;
d950c947 588 tlb_ubc->writable = false;
72b252ae
MG
589}
590
d950c947
MG
591/* Flush iff there are potentially writable TLB entries that can race with IO */
592void try_to_unmap_flush_dirty(void)
593{
594 struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
595
596 if (tlb_ubc->writable)
597 try_to_unmap_flush();
598}
599
c7ab0d2f 600static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
72b252ae
MG
601{
602 struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
603
e73ad5ff 604 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
72b252ae 605 tlb_ubc->flush_required = true;
d950c947 606
3ea27719
MG
607 /*
608 * Ensure compiler does not re-order the setting of tlb_flush_batched
609 * before the PTE is cleared.
610 */
611 barrier();
612 mm->tlb_flush_batched = true;
613
d950c947
MG
614 /*
615 * If the PTE was dirty then it's best to assume it's writable. The
616 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
617 * before the page is queued for IO.
618 */
619 if (writable)
620 tlb_ubc->writable = true;
72b252ae
MG
621}
622
623/*
624 * Returns true if the TLB flush should be deferred to the end of a batch of
625 * unmap operations to reduce IPIs.
626 */
627static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
628{
629 bool should_defer = false;
630
631 if (!(flags & TTU_BATCH_FLUSH))
632 return false;
633
634 /* If remote CPUs need to be flushed then defer batch the flush */
635 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
636 should_defer = true;
637 put_cpu();
638
639 return should_defer;
640}
3ea27719
MG
641
642/*
643 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
644 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
645 * operation such as mprotect or munmap to race between reclaim unmapping
646 * the page and flushing the page. If this race occurs, it potentially allows
647 * access to data via a stale TLB entry. Tracking all mm's that have TLB
648 * batching in flight would be expensive during reclaim so instead track
649 * whether TLB batching occurred in the past and if so then do a flush here
650 * if required. This will cost one additional flush per reclaim cycle paid
651 * by the first operation at risk such as mprotect and mumap.
652 *
653 * This must be called under the PTL so that an access to tlb_flush_batched
654 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
655 * via the PTL.
656 */
657void flush_tlb_batched_pending(struct mm_struct *mm)
658{
659 if (mm->tlb_flush_batched) {
660 flush_tlb_mm(mm);
661
662 /*
663 * Do not allow the compiler to re-order the clearing of
664 * tlb_flush_batched before the tlb is flushed.
665 */
666 barrier();
667 mm->tlb_flush_batched = false;
668 }
669}
72b252ae 670#else
c7ab0d2f 671static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
72b252ae
MG
672{
673}
674
675static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
676{
677 return false;
678}
679#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
680
1da177e4 681/*
bf89c8c8 682 * At what user virtual address is page expected in vma?
ab941e0f 683 * Caller should check the page is actually part of the vma.
1da177e4
LT
684 */
685unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
686{
86c2ad19 687 unsigned long address;
21d0d443 688 if (PageAnon(page)) {
4829b906
HD
689 struct anon_vma *page__anon_vma = page_anon_vma(page);
690 /*
691 * Note: swapoff's unuse_vma() is more efficient with this
692 * check, and needs it to match anon_vma when KSM is active.
693 */
694 if (!vma->anon_vma || !page__anon_vma ||
695 vma->anon_vma->root != page__anon_vma->root)
21d0d443 696 return -EFAULT;
27ba0644
KS
697 } else if (page->mapping) {
698 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
1da177e4
LT
699 return -EFAULT;
700 } else
701 return -EFAULT;
86c2ad19
ML
702 address = __vma_address(page, vma);
703 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
704 return -EFAULT;
705 return address;
1da177e4
LT
706}
707
6219049a
BL
708pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
709{
710 pgd_t *pgd;
c2febafc 711 p4d_t *p4d;
6219049a
BL
712 pud_t *pud;
713 pmd_t *pmd = NULL;
f72e7dcd 714 pmd_t pmde;
6219049a
BL
715
716 pgd = pgd_offset(mm, address);
717 if (!pgd_present(*pgd))
718 goto out;
719
c2febafc
KS
720 p4d = p4d_offset(pgd, address);
721 if (!p4d_present(*p4d))
722 goto out;
723
724 pud = pud_offset(p4d, address);
6219049a
BL
725 if (!pud_present(*pud))
726 goto out;
727
728 pmd = pmd_offset(pud, address);
f72e7dcd 729 /*
8809aa2d 730 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
f72e7dcd
HD
731 * without holding anon_vma lock for write. So when looking for a
732 * genuine pmde (in which to find pte), test present and !THP together.
733 */
e37c6982
CB
734 pmde = *pmd;
735 barrier();
f72e7dcd 736 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
6219049a
BL
737 pmd = NULL;
738out:
739 return pmd;
740}
741
8749cfea
VD
742struct page_referenced_arg {
743 int mapcount;
744 int referenced;
745 unsigned long vm_flags;
746 struct mem_cgroup *memcg;
747};
748/*
749 * arg: page_referenced_arg will be passed
750 */
e4b82222 751static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
8749cfea
VD
752 unsigned long address, void *arg)
753{
8749cfea 754 struct page_referenced_arg *pra = arg;
8eaedede
KS
755 struct page_vma_mapped_walk pvmw = {
756 .page = page,
757 .vma = vma,
758 .address = address,
759 };
8749cfea
VD
760 int referenced = 0;
761
8eaedede
KS
762 while (page_vma_mapped_walk(&pvmw)) {
763 address = pvmw.address;
b20ce5e0 764
8eaedede
KS
765 if (vma->vm_flags & VM_LOCKED) {
766 page_vma_mapped_walk_done(&pvmw);
767 pra->vm_flags |= VM_LOCKED;
e4b82222 768 return false; /* To break the loop */
8eaedede 769 }
71e3aac0 770
8eaedede
KS
771 if (pvmw.pte) {
772 if (ptep_clear_flush_young_notify(vma, address,
773 pvmw.pte)) {
774 /*
775 * Don't treat a reference through
776 * a sequentially read mapping as such.
777 * If the page has been used in another mapping,
778 * we will catch it; if this other mapping is
779 * already gone, the unmap path will have set
780 * PG_referenced or activated the page.
781 */
782 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
783 referenced++;
784 }
785 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
786 if (pmdp_clear_flush_young_notify(vma, address,
787 pvmw.pmd))
8749cfea 788 referenced++;
8eaedede
KS
789 } else {
790 /* unexpected pmd-mapped page? */
791 WARN_ON_ONCE(1);
8749cfea 792 }
8eaedede
KS
793
794 pra->mapcount--;
b20ce5e0 795 }
b20ce5e0 796
33c3fc71
VD
797 if (referenced)
798 clear_page_idle(page);
799 if (test_and_clear_page_young(page))
800 referenced++;
801
9f32624b
JK
802 if (referenced) {
803 pra->referenced++;
804 pra->vm_flags |= vma->vm_flags;
1da177e4 805 }
34bbd704 806
9f32624b 807 if (!pra->mapcount)
e4b82222 808 return false; /* To break the loop */
9f32624b 809
e4b82222 810 return true;
1da177e4
LT
811}
812
9f32624b 813static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
1da177e4 814{
9f32624b
JK
815 struct page_referenced_arg *pra = arg;
816 struct mem_cgroup *memcg = pra->memcg;
1da177e4 817
9f32624b
JK
818 if (!mm_match_cgroup(vma->vm_mm, memcg))
819 return true;
1da177e4 820
9f32624b 821 return false;
1da177e4
LT
822}
823
824/**
825 * page_referenced - test if the page was referenced
826 * @page: the page to test
827 * @is_locked: caller holds lock on the page
72835c86 828 * @memcg: target memory cgroup
6fe6b7e3 829 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
1da177e4
LT
830 *
831 * Quick test_and_clear_referenced for all mappings to a page,
832 * returns the number of ptes which referenced the page.
833 */
6fe6b7e3
WF
834int page_referenced(struct page *page,
835 int is_locked,
72835c86 836 struct mem_cgroup *memcg,
6fe6b7e3 837 unsigned long *vm_flags)
1da177e4 838{
5ad64688 839 int we_locked = 0;
9f32624b 840 struct page_referenced_arg pra = {
b20ce5e0 841 .mapcount = total_mapcount(page),
9f32624b
JK
842 .memcg = memcg,
843 };
844 struct rmap_walk_control rwc = {
845 .rmap_one = page_referenced_one,
846 .arg = (void *)&pra,
847 .anon_lock = page_lock_anon_vma_read,
848 };
1da177e4 849
6fe6b7e3 850 *vm_flags = 0;
9f32624b
JK
851 if (!page_mapped(page))
852 return 0;
853
854 if (!page_rmapping(page))
855 return 0;
856
857 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
858 we_locked = trylock_page(page);
859 if (!we_locked)
860 return 1;
1da177e4 861 }
9f32624b
JK
862
863 /*
864 * If we are reclaiming on behalf of a cgroup, skip
865 * counting on behalf of references from different
866 * cgroups
867 */
868 if (memcg) {
869 rwc.invalid_vma = invalid_page_referenced_vma;
870 }
871
c24f386c 872 rmap_walk(page, &rwc);
9f32624b
JK
873 *vm_flags = pra.vm_flags;
874
875 if (we_locked)
876 unlock_page(page);
877
878 return pra.referenced;
1da177e4
LT
879}
880
e4b82222 881static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
9853a407 882 unsigned long address, void *arg)
d08b3851 883{
f27176cf
KS
884 struct page_vma_mapped_walk pvmw = {
885 .page = page,
886 .vma = vma,
887 .address = address,
888 .flags = PVMW_SYNC,
889 };
9853a407 890 int *cleaned = arg;
d08b3851 891
f27176cf
KS
892 while (page_vma_mapped_walk(&pvmw)) {
893 int ret = 0;
785373b4 894 address = pvmw.address;
f27176cf
KS
895 if (pvmw.pte) {
896 pte_t entry;
897 pte_t *pte = pvmw.pte;
898
899 if (!pte_dirty(*pte) && !pte_write(*pte))
900 continue;
901
785373b4
LT
902 flush_cache_page(vma, address, pte_pfn(*pte));
903 entry = ptep_clear_flush(vma, address, pte);
f27176cf
KS
904 entry = pte_wrprotect(entry);
905 entry = pte_mkclean(entry);
785373b4 906 set_pte_at(vma->vm_mm, address, pte, entry);
f27176cf
KS
907 ret = 1;
908 } else {
909#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
910 pmd_t *pmd = pvmw.pmd;
911 pmd_t entry;
912
913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
914 continue;
915
785373b4
LT
916 flush_cache_page(vma, address, page_to_pfn(page));
917 entry = pmdp_huge_clear_flush(vma, address, pmd);
f27176cf
KS
918 entry = pmd_wrprotect(entry);
919 entry = pmd_mkclean(entry);
785373b4 920 set_pmd_at(vma->vm_mm, address, pmd, entry);
f27176cf
KS
921 ret = 1;
922#else
923 /* unexpected pmd-mapped page? */
924 WARN_ON_ONCE(1);
925#endif
926 }
d08b3851 927
f27176cf 928 if (ret) {
785373b4 929 mmu_notifier_invalidate_page(vma->vm_mm, address);
f27176cf
KS
930 (*cleaned)++;
931 }
c2fda5fe 932 }
d08b3851 933
e4b82222 934 return true;
d08b3851
PZ
935}
936
9853a407 937static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
d08b3851 938{
9853a407 939 if (vma->vm_flags & VM_SHARED)
871beb8c 940 return false;
d08b3851 941
871beb8c 942 return true;
d08b3851
PZ
943}
944
945int page_mkclean(struct page *page)
946{
9853a407
JK
947 int cleaned = 0;
948 struct address_space *mapping;
949 struct rmap_walk_control rwc = {
950 .arg = (void *)&cleaned,
951 .rmap_one = page_mkclean_one,
952 .invalid_vma = invalid_mkclean_vma,
953 };
d08b3851
PZ
954
955 BUG_ON(!PageLocked(page));
956
9853a407
JK
957 if (!page_mapped(page))
958 return 0;
959
960 mapping = page_mapping(page);
961 if (!mapping)
962 return 0;
963
964 rmap_walk(page, &rwc);
d08b3851 965
9853a407 966 return cleaned;
d08b3851 967}
60b59bea 968EXPORT_SYMBOL_GPL(page_mkclean);
d08b3851 969
c44b6743
RR
970/**
971 * page_move_anon_rmap - move a page to our anon_vma
972 * @page: the page to move to our anon_vma
973 * @vma: the vma the page belongs to
c44b6743
RR
974 *
975 * When a page belongs exclusively to one process after a COW event,
976 * that page can be moved into the anon_vma that belongs to just that
977 * process, so the rmap code will not search the parent or sibling
978 * processes.
979 */
5a49973d 980void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
c44b6743
RR
981{
982 struct anon_vma *anon_vma = vma->anon_vma;
983
5a49973d
HD
984 page = compound_head(page);
985
309381fe 986 VM_BUG_ON_PAGE(!PageLocked(page), page);
81d1b09c 987 VM_BUG_ON_VMA(!anon_vma, vma);
c44b6743
RR
988
989 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
414e2fb8
VD
990 /*
991 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
992 * simultaneously, so a concurrent reader (eg page_referenced()'s
993 * PageAnon()) will not see one without the other.
994 */
995 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
c44b6743
RR
996}
997
9617d95e 998/**
4e1c1975
AK
999 * __page_set_anon_rmap - set up new anonymous rmap
1000 * @page: Page to add to rmap
1001 * @vma: VM area to add page to.
1002 * @address: User virtual address of the mapping
e8a03feb 1003 * @exclusive: the page is exclusively owned by the current process
9617d95e
NP
1004 */
1005static void __page_set_anon_rmap(struct page *page,
e8a03feb 1006 struct vm_area_struct *vma, unsigned long address, int exclusive)
9617d95e 1007{
e8a03feb 1008 struct anon_vma *anon_vma = vma->anon_vma;
ea90002b 1009
e8a03feb 1010 BUG_ON(!anon_vma);
ea90002b 1011
4e1c1975
AK
1012 if (PageAnon(page))
1013 return;
1014
ea90002b 1015 /*
e8a03feb
RR
1016 * If the page isn't exclusively mapped into this vma,
1017 * we must use the _oldest_ possible anon_vma for the
1018 * page mapping!
ea90002b 1019 */
4e1c1975 1020 if (!exclusive)
288468c3 1021 anon_vma = anon_vma->root;
9617d95e 1022
9617d95e
NP
1023 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1024 page->mapping = (struct address_space *) anon_vma;
9617d95e 1025 page->index = linear_page_index(vma, address);
9617d95e
NP
1026}
1027
c97a9e10 1028/**
43d8eac4 1029 * __page_check_anon_rmap - sanity check anonymous rmap addition
c97a9e10
NP
1030 * @page: the page to add the mapping to
1031 * @vma: the vm area in which the mapping is added
1032 * @address: the user virtual address mapped
1033 */
1034static void __page_check_anon_rmap(struct page *page,
1035 struct vm_area_struct *vma, unsigned long address)
1036{
1037#ifdef CONFIG_DEBUG_VM
1038 /*
1039 * The page's anon-rmap details (mapping and index) are guaranteed to
1040 * be set up correctly at this point.
1041 *
1042 * We have exclusion against page_add_anon_rmap because the caller
1043 * always holds the page locked, except if called from page_dup_rmap,
1044 * in which case the page is already known to be setup.
1045 *
1046 * We have exclusion against page_add_new_anon_rmap because those pages
1047 * are initially only visible via the pagetables, and the pte is locked
1048 * over the call to page_add_new_anon_rmap.
1049 */
44ab57a0 1050 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
53f9263b 1051 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
c97a9e10
NP
1052#endif
1053}
1054
1da177e4
LT
1055/**
1056 * page_add_anon_rmap - add pte mapping to an anonymous page
1057 * @page: the page to add the mapping to
1058 * @vma: the vm area in which the mapping is added
1059 * @address: the user virtual address mapped
d281ee61 1060 * @compound: charge the page as compound or small page
1da177e4 1061 *
5ad64688 1062 * The caller needs to hold the pte lock, and the page must be locked in
80e14822
HD
1063 * the anon_vma case: to serialize mapping,index checking after setting,
1064 * and to ensure that PageAnon is not being upgraded racily to PageKsm
1065 * (but PageKsm is never downgraded to PageAnon).
1da177e4
LT
1066 */
1067void page_add_anon_rmap(struct page *page,
d281ee61 1068 struct vm_area_struct *vma, unsigned long address, bool compound)
ad8c2ee8 1069{
d281ee61 1070 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
ad8c2ee8
RR
1071}
1072
1073/*
1074 * Special version of the above for do_swap_page, which often runs
1075 * into pages that are exclusively owned by the current process.
1076 * Everybody else should continue to use page_add_anon_rmap above.
1077 */
1078void do_page_add_anon_rmap(struct page *page,
d281ee61 1079 struct vm_area_struct *vma, unsigned long address, int flags)
1da177e4 1080{
53f9263b
KS
1081 bool compound = flags & RMAP_COMPOUND;
1082 bool first;
1083
e9b61f19
KS
1084 if (compound) {
1085 atomic_t *mapcount;
53f9263b 1086 VM_BUG_ON_PAGE(!PageLocked(page), page);
e9b61f19
KS
1087 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1088 mapcount = compound_mapcount_ptr(page);
1089 first = atomic_inc_and_test(mapcount);
53f9263b
KS
1090 } else {
1091 first = atomic_inc_and_test(&page->_mapcount);
1092 }
1093
79134171 1094 if (first) {
d281ee61 1095 int nr = compound ? hpage_nr_pages(page) : 1;
bea04b07
JZ
1096 /*
1097 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1098 * these counters are not modified in interrupt context, and
1099 * pte lock(a spinlock) is held, which implies preemption
1100 * disabled.
1101 */
65c45377 1102 if (compound)
11fb9989 1103 __inc_node_page_state(page, NR_ANON_THPS);
4b9d0fab 1104 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
79134171 1105 }
5ad64688
HD
1106 if (unlikely(PageKsm(page)))
1107 return;
1108
309381fe 1109 VM_BUG_ON_PAGE(!PageLocked(page), page);
53f9263b 1110
5dbe0af4 1111 /* address might be in next vma when migration races vma_adjust */
5ad64688 1112 if (first)
d281ee61
KS
1113 __page_set_anon_rmap(page, vma, address,
1114 flags & RMAP_EXCLUSIVE);
69029cd5 1115 else
c97a9e10 1116 __page_check_anon_rmap(page, vma, address);
1da177e4
LT
1117}
1118
43d8eac4 1119/**
9617d95e
NP
1120 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1121 * @page: the page to add the mapping to
1122 * @vma: the vm area in which the mapping is added
1123 * @address: the user virtual address mapped
d281ee61 1124 * @compound: charge the page as compound or small page
9617d95e
NP
1125 *
1126 * Same as page_add_anon_rmap but must only be called on *new* pages.
1127 * This means the inc-and-test can be bypassed.
c97a9e10 1128 * Page does not have to be locked.
9617d95e
NP
1129 */
1130void page_add_new_anon_rmap(struct page *page,
d281ee61 1131 struct vm_area_struct *vma, unsigned long address, bool compound)
9617d95e 1132{
d281ee61
KS
1133 int nr = compound ? hpage_nr_pages(page) : 1;
1134
81d1b09c 1135 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
fa9949da 1136 __SetPageSwapBacked(page);
d281ee61
KS
1137 if (compound) {
1138 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
53f9263b
KS
1139 /* increment count (starts at -1) */
1140 atomic_set(compound_mapcount_ptr(page), 0);
11fb9989 1141 __inc_node_page_state(page, NR_ANON_THPS);
53f9263b
KS
1142 } else {
1143 /* Anon THP always mapped first with PMD */
1144 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1145 /* increment count (starts at -1) */
1146 atomic_set(&page->_mapcount, 0);
d281ee61 1147 }
4b9d0fab 1148 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
e8a03feb 1149 __page_set_anon_rmap(page, vma, address, 1);
9617d95e
NP
1150}
1151
1da177e4
LT
1152/**
1153 * page_add_file_rmap - add pte mapping to a file page
1154 * @page: the page to add the mapping to
1155 *
b8072f09 1156 * The caller needs to hold the pte lock.
1da177e4 1157 */
dd78fedd 1158void page_add_file_rmap(struct page *page, bool compound)
1da177e4 1159{
dd78fedd
KS
1160 int i, nr = 1;
1161
1162 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
62cccb8c 1163 lock_page_memcg(page);
dd78fedd
KS
1164 if (compound && PageTransHuge(page)) {
1165 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1166 if (atomic_inc_and_test(&page[i]._mapcount))
1167 nr++;
1168 }
1169 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1170 goto out;
65c45377 1171 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
11fb9989 1172 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
dd78fedd 1173 } else {
c8efc390
KS
1174 if (PageTransCompound(page) && page_mapping(page)) {
1175 VM_WARN_ON_ONCE(!PageLocked(page));
1176
9a73f61b
KS
1177 SetPageDoubleMap(compound_head(page));
1178 if (PageMlocked(page))
1179 clear_page_mlock(compound_head(page));
1180 }
dd78fedd
KS
1181 if (!atomic_inc_and_test(&page->_mapcount))
1182 goto out;
d69b042f 1183 }
00f3ca2c 1184 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
dd78fedd 1185out:
62cccb8c 1186 unlock_page_memcg(page);
1da177e4
LT
1187}
1188
dd78fedd 1189static void page_remove_file_rmap(struct page *page, bool compound)
8186eb6a 1190{
dd78fedd
KS
1191 int i, nr = 1;
1192
57dea93a 1193 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
62cccb8c 1194 lock_page_memcg(page);
8186eb6a 1195
53f9263b
KS
1196 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1197 if (unlikely(PageHuge(page))) {
1198 /* hugetlb pages are always mapped with pmds */
1199 atomic_dec(compound_mapcount_ptr(page));
8186eb6a 1200 goto out;
53f9263b 1201 }
8186eb6a 1202
53f9263b 1203 /* page still mapped by someone else? */
dd78fedd
KS
1204 if (compound && PageTransHuge(page)) {
1205 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1206 if (atomic_add_negative(-1, &page[i]._mapcount))
1207 nr++;
1208 }
1209 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1210 goto out;
65c45377 1211 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
11fb9989 1212 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
dd78fedd
KS
1213 } else {
1214 if (!atomic_add_negative(-1, &page->_mapcount))
1215 goto out;
1216 }
8186eb6a
JW
1217
1218 /*
00f3ca2c 1219 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
8186eb6a
JW
1220 * these counters are not modified in interrupt context, and
1221 * pte lock(a spinlock) is held, which implies preemption disabled.
1222 */
00f3ca2c 1223 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
8186eb6a
JW
1224
1225 if (unlikely(PageMlocked(page)))
1226 clear_page_mlock(page);
1227out:
62cccb8c 1228 unlock_page_memcg(page);
8186eb6a
JW
1229}
1230
53f9263b
KS
1231static void page_remove_anon_compound_rmap(struct page *page)
1232{
1233 int i, nr;
1234
1235 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1236 return;
1237
1238 /* Hugepages are not counted in NR_ANON_PAGES for now. */
1239 if (unlikely(PageHuge(page)))
1240 return;
1241
1242 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1243 return;
1244
11fb9989 1245 __dec_node_page_state(page, NR_ANON_THPS);
53f9263b
KS
1246
1247 if (TestClearPageDoubleMap(page)) {
1248 /*
1249 * Subpages can be mapped with PTEs too. Check how many of
1250 * themi are still mapped.
1251 */
1252 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1253 if (atomic_add_negative(-1, &page[i]._mapcount))
1254 nr++;
1255 }
1256 } else {
1257 nr = HPAGE_PMD_NR;
1258 }
1259
e90309c9
KS
1260 if (unlikely(PageMlocked(page)))
1261 clear_page_mlock(page);
1262
9a982250 1263 if (nr) {
4b9d0fab 1264 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
9a982250
KS
1265 deferred_split_huge_page(page);
1266 }
53f9263b
KS
1267}
1268
1da177e4
LT
1269/**
1270 * page_remove_rmap - take down pte mapping from a page
d281ee61
KS
1271 * @page: page to remove mapping from
1272 * @compound: uncharge the page as compound or small page
1da177e4 1273 *
b8072f09 1274 * The caller needs to hold the pte lock.
1da177e4 1275 */
d281ee61 1276void page_remove_rmap(struct page *page, bool compound)
1da177e4 1277{
dd78fedd
KS
1278 if (!PageAnon(page))
1279 return page_remove_file_rmap(page, compound);
89c06bd5 1280
53f9263b
KS
1281 if (compound)
1282 return page_remove_anon_compound_rmap(page);
1283
b904dcfe
KM
1284 /* page still mapped by someone else? */
1285 if (!atomic_add_negative(-1, &page->_mapcount))
8186eb6a
JW
1286 return;
1287
0fe6e20b 1288 /*
bea04b07
JZ
1289 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1290 * these counters are not modified in interrupt context, and
bea04b07 1291 * pte lock(a spinlock) is held, which implies preemption disabled.
0fe6e20b 1292 */
4b9d0fab 1293 __dec_node_page_state(page, NR_ANON_MAPPED);
8186eb6a 1294
e6c509f8
HD
1295 if (unlikely(PageMlocked(page)))
1296 clear_page_mlock(page);
8186eb6a 1297
9a982250
KS
1298 if (PageTransCompound(page))
1299 deferred_split_huge_page(compound_head(page));
1300
b904dcfe
KM
1301 /*
1302 * It would be tidy to reset the PageAnon mapping here,
1303 * but that might overwrite a racing page_add_anon_rmap
1304 * which increments mapcount after us but sets mapping
1305 * before us: so leave the reset to free_hot_cold_page,
1306 * and remember that it's only reliable while mapped.
1307 * Leaving it set also helps swapoff to reinstate ptes
1308 * faster for those pages still in swapcache.
1309 */
1da177e4
LT
1310}
1311
1312/*
52629506 1313 * @arg: enum ttu_flags will be passed to this argument
1da177e4 1314 */
e4b82222 1315static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
52629506 1316 unsigned long address, void *arg)
1da177e4
LT
1317{
1318 struct mm_struct *mm = vma->vm_mm;
c7ab0d2f
KS
1319 struct page_vma_mapped_walk pvmw = {
1320 .page = page,
1321 .vma = vma,
1322 .address = address,
1323 };
1da177e4 1324 pte_t pteval;
c7ab0d2f 1325 struct page *subpage;
785373b4 1326 bool ret = true;
802a3a92 1327 enum ttu_flags flags = (enum ttu_flags)arg;
1da177e4 1328
b87537d9
HD
1329 /* munlock has nothing to gain from examining un-locked vmas */
1330 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
e4b82222 1331 return true;
b87537d9 1332
fec89c10
KS
1333 if (flags & TTU_SPLIT_HUGE_PMD) {
1334 split_huge_pmd_address(vma, address,
1335 flags & TTU_MIGRATION, page);
fec89c10
KS
1336 }
1337
c7ab0d2f 1338 while (page_vma_mapped_walk(&pvmw)) {
c7ab0d2f
KS
1339 /*
1340 * If the page is mlock()d, we cannot swap it out.
1341 * If it's recently referenced (perhaps page_referenced
1342 * skipped over this mm) then we should reactivate it.
1343 */
1344 if (!(flags & TTU_IGNORE_MLOCK)) {
1345 if (vma->vm_flags & VM_LOCKED) {
1346 /* PTE-mapped THP are never mlocked */
1347 if (!PageTransCompound(page)) {
1348 /*
1349 * Holding pte lock, we do *not* need
1350 * mmap_sem here
1351 */
1352 mlock_vma_page(page);
1353 }
e4b82222 1354 ret = false;
c7ab0d2f
KS
1355 page_vma_mapped_walk_done(&pvmw);
1356 break;
9a73f61b 1357 }
c7ab0d2f
KS
1358 if (flags & TTU_MUNLOCK)
1359 continue;
b87537d9 1360 }
c7ab0d2f 1361
8346242a
KS
1362 /* Unexpected PMD-mapped THP? */
1363 VM_BUG_ON_PAGE(!pvmw.pte, page);
1364
1365 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
785373b4
LT
1366 address = pvmw.address;
1367
8346242a 1368
c7ab0d2f 1369 if (!(flags & TTU_IGNORE_ACCESS)) {
785373b4 1370 if (ptep_clear_flush_young_notify(vma, address,
c7ab0d2f 1371 pvmw.pte)) {
e4b82222 1372 ret = false;
c7ab0d2f
KS
1373 page_vma_mapped_walk_done(&pvmw);
1374 break;
1375 }
b291f000 1376 }
1da177e4 1377
c7ab0d2f 1378 /* Nuke the page table entry. */
785373b4 1379 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
c7ab0d2f
KS
1380 if (should_defer_flush(mm, flags)) {
1381 /*
1382 * We clear the PTE but do not flush so potentially
1383 * a remote CPU could still be writing to the page.
1384 * If the entry was previously clean then the
1385 * architecture must guarantee that a clear->dirty
1386 * transition on a cached TLB entry is written through
1387 * and traps if the PTE is unmapped.
1388 */
785373b4 1389 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
c7ab0d2f
KS
1390
1391 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1392 } else {
785373b4 1393 pteval = ptep_clear_flush(vma, address, pvmw.pte);
c7ab0d2f 1394 }
72b252ae 1395
c7ab0d2f
KS
1396 /* Move the dirty bit to the page. Now the pte is gone. */
1397 if (pte_dirty(pteval))
1398 set_page_dirty(page);
1da177e4 1399
c7ab0d2f
KS
1400 /* Update high watermark before we lower rss */
1401 update_hiwater_rss(mm);
1da177e4 1402
c7ab0d2f 1403 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
5fd27b8e 1404 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
c7ab0d2f
KS
1405 if (PageHuge(page)) {
1406 int nr = 1 << compound_order(page);
1407 hugetlb_count_sub(nr, mm);
785373b4 1408 set_huge_swap_pte_at(mm, address,
5fd27b8e
PA
1409 pvmw.pte, pteval,
1410 vma_mmu_pagesize(vma));
c7ab0d2f
KS
1411 } else {
1412 dec_mm_counter(mm, mm_counter(page));
785373b4 1413 set_pte_at(mm, address, pvmw.pte, pteval);
c7ab0d2f 1414 }
365e9c87 1415
c7ab0d2f
KS
1416 } else if (pte_unused(pteval)) {
1417 /*
1418 * The guest indicated that the page content is of no
1419 * interest anymore. Simply discard the pte, vmscan
1420 * will take care of the rest.
1421 */
eca56ff9 1422 dec_mm_counter(mm, mm_counter(page));
c7ab0d2f
KS
1423 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1424 (flags & TTU_MIGRATION)) {
1425 swp_entry_t entry;
1426 pte_t swp_pte;
1427 /*
1428 * Store the pfn of the page in a special migration
1429 * pte. do_swap_page() will wait until the migration
1430 * pte is removed and then restart fault handling.
1431 */
1432 entry = make_migration_entry(subpage,
1433 pte_write(pteval));
1434 swp_pte = swp_entry_to_pte(entry);
1435 if (pte_soft_dirty(pteval))
1436 swp_pte = pte_swp_mksoft_dirty(swp_pte);
785373b4 1437 set_pte_at(mm, address, pvmw.pte, swp_pte);
c7ab0d2f
KS
1438 } else if (PageAnon(page)) {
1439 swp_entry_t entry = { .val = page_private(subpage) };
1440 pte_t swp_pte;
1441 /*
1442 * Store the swap location in the pte.
1443 * See handle_pte_fault() ...
1444 */
eb94a878
MK
1445 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1446 WARN_ON_ONCE(1);
83612a94 1447 ret = false;
eb94a878
MK
1448 page_vma_mapped_walk_done(&pvmw);
1449 break;
1450 }
c7ab0d2f 1451
802a3a92
SL
1452 /* MADV_FREE page check */
1453 if (!PageSwapBacked(page)) {
1454 if (!PageDirty(page)) {
1455 dec_mm_counter(mm, MM_ANONPAGES);
1456 goto discard;
1457 }
1458
1459 /*
1460 * If the page was redirtied, it cannot be
1461 * discarded. Remap the page to page table.
1462 */
785373b4 1463 set_pte_at(mm, address, pvmw.pte, pteval);
18863d3a 1464 SetPageSwapBacked(page);
e4b82222 1465 ret = false;
802a3a92
SL
1466 page_vma_mapped_walk_done(&pvmw);
1467 break;
c7ab0d2f 1468 }
854e9ed0 1469
c7ab0d2f 1470 if (swap_duplicate(entry) < 0) {
785373b4 1471 set_pte_at(mm, address, pvmw.pte, pteval);
e4b82222 1472 ret = false;
c7ab0d2f
KS
1473 page_vma_mapped_walk_done(&pvmw);
1474 break;
1475 }
1476 if (list_empty(&mm->mmlist)) {
1477 spin_lock(&mmlist_lock);
1478 if (list_empty(&mm->mmlist))
1479 list_add(&mm->mmlist, &init_mm.mmlist);
1480 spin_unlock(&mmlist_lock);
1481 }
854e9ed0 1482 dec_mm_counter(mm, MM_ANONPAGES);
c7ab0d2f
KS
1483 inc_mm_counter(mm, MM_SWAPENTS);
1484 swp_pte = swp_entry_to_pte(entry);
1485 if (pte_soft_dirty(pteval))
1486 swp_pte = pte_swp_mksoft_dirty(swp_pte);
785373b4 1487 set_pte_at(mm, address, pvmw.pte, swp_pte);
c7ab0d2f
KS
1488 } else
1489 dec_mm_counter(mm, mm_counter_file(page));
854e9ed0 1490discard:
c7ab0d2f
KS
1491 page_remove_rmap(subpage, PageHuge(page));
1492 put_page(page);
785373b4 1493 mmu_notifier_invalidate_page(mm, address);
c7ab0d2f 1494 }
caed0f48 1495 return ret;
1da177e4
LT
1496}
1497
71e3aac0 1498bool is_vma_temporary_stack(struct vm_area_struct *vma)
a8bef8ff
MG
1499{
1500 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1501
1502 if (!maybe_stack)
1503 return false;
1504
1505 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1506 VM_STACK_INCOMPLETE_SETUP)
1507 return true;
1508
1509 return false;
1510}
1511
52629506
JK
1512static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1513{
1514 return is_vma_temporary_stack(vma);
1515}
1516
2a52bcbc 1517static int page_mapcount_is_zero(struct page *page)
52629506 1518{
c7ab0d2f 1519 return !total_mapcount(page);
2a52bcbc 1520}
52629506 1521
1da177e4
LT
1522/**
1523 * try_to_unmap - try to remove all page table mappings to a page
1524 * @page: the page to get unmapped
14fa31b8 1525 * @flags: action and flags
1da177e4
LT
1526 *
1527 * Tries to remove all the page table entries which are mapping this
1528 * page, used in the pageout path. Caller must hold the page lock.
1da177e4 1529 *
666e5a40 1530 * If unmap is successful, return true. Otherwise, false.
1da177e4 1531 */
666e5a40 1532bool try_to_unmap(struct page *page, enum ttu_flags flags)
1da177e4 1533{
52629506
JK
1534 struct rmap_walk_control rwc = {
1535 .rmap_one = try_to_unmap_one,
802a3a92 1536 .arg = (void *)flags,
2a52bcbc 1537 .done = page_mapcount_is_zero,
52629506
JK
1538 .anon_lock = page_lock_anon_vma_read,
1539 };
1da177e4 1540
52629506
JK
1541 /*
1542 * During exec, a temporary VMA is setup and later moved.
1543 * The VMA is moved under the anon_vma lock but not the
1544 * page tables leading to a race where migration cannot
1545 * find the migration ptes. Rather than increasing the
1546 * locking requirements of exec(), migration skips
1547 * temporary VMAs until after exec() completes.
1548 */
daa5ba76 1549 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
52629506
JK
1550 rwc.invalid_vma = invalid_migration_vma;
1551
2a52bcbc 1552 if (flags & TTU_RMAP_LOCKED)
33fc80e2 1553 rmap_walk_locked(page, &rwc);
2a52bcbc 1554 else
33fc80e2 1555 rmap_walk(page, &rwc);
52629506 1556
666e5a40 1557 return !page_mapcount(page) ? true : false;
1da177e4 1558}
81b4082d 1559
2a52bcbc
KS
1560static int page_not_mapped(struct page *page)
1561{
1562 return !page_mapped(page);
1563};
1564
b291f000
NP
1565/**
1566 * try_to_munlock - try to munlock a page
1567 * @page: the page to be munlocked
1568 *
1569 * Called from munlock code. Checks all of the VMAs mapping the page
1570 * to make sure nobody else has this page mlocked. The page will be
1571 * returned with PG_mlocked cleared if no other vmas have it mlocked.
b291f000 1572 */
854e9ed0 1573
192d7232
MK
1574void try_to_munlock(struct page *page)
1575{
e8351ac9
JK
1576 struct rmap_walk_control rwc = {
1577 .rmap_one = try_to_unmap_one,
802a3a92 1578 .arg = (void *)TTU_MUNLOCK,
e8351ac9 1579 .done = page_not_mapped,
e8351ac9
JK
1580 .anon_lock = page_lock_anon_vma_read,
1581
1582 };
1583
309381fe 1584 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
192d7232 1585 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
b291f000 1586
192d7232 1587 rmap_walk(page, &rwc);
b291f000 1588}
e9995ef9 1589
01d8b20d 1590void __put_anon_vma(struct anon_vma *anon_vma)
76545066 1591{
01d8b20d 1592 struct anon_vma *root = anon_vma->root;
76545066 1593
624483f3 1594 anon_vma_free(anon_vma);
01d8b20d
PZ
1595 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1596 anon_vma_free(root);
76545066 1597}
76545066 1598
0dd1c7bb
JK
1599static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1600 struct rmap_walk_control *rwc)
faecd8dd
JK
1601{
1602 struct anon_vma *anon_vma;
1603
0dd1c7bb
JK
1604 if (rwc->anon_lock)
1605 return rwc->anon_lock(page);
1606
faecd8dd
JK
1607 /*
1608 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
1609 * because that depends on page_mapped(); but not all its usages
1610 * are holding mmap_sem. Users without mmap_sem are required to
1611 * take a reference count to prevent the anon_vma disappearing
1612 */
1613 anon_vma = page_anon_vma(page);
1614 if (!anon_vma)
1615 return NULL;
1616
1617 anon_vma_lock_read(anon_vma);
1618 return anon_vma;
1619}
1620
e9995ef9 1621/*
e8351ac9
JK
1622 * rmap_walk_anon - do something to anonymous page using the object-based
1623 * rmap method
1624 * @page: the page to be handled
1625 * @rwc: control variable according to each walk type
1626 *
1627 * Find all the mappings of a page using the mapping pointer and the vma chains
1628 * contained in the anon_vma struct it points to.
1629 *
1630 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1631 * where the page was found will be held for write. So, we won't recheck
1632 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1633 * LOCKED.
e9995ef9 1634 */
1df631ae 1635static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
b9773199 1636 bool locked)
e9995ef9
HD
1637{
1638 struct anon_vma *anon_vma;
a8fa41ad 1639 pgoff_t pgoff_start, pgoff_end;
5beb4930 1640 struct anon_vma_chain *avc;
e9995ef9 1641
b9773199
KS
1642 if (locked) {
1643 anon_vma = page_anon_vma(page);
1644 /* anon_vma disappear under us? */
1645 VM_BUG_ON_PAGE(!anon_vma, page);
1646 } else {
1647 anon_vma = rmap_walk_anon_lock(page, rwc);
1648 }
e9995ef9 1649 if (!anon_vma)
1df631ae 1650 return;
faecd8dd 1651
a8fa41ad
KS
1652 pgoff_start = page_to_pgoff(page);
1653 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1654 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1655 pgoff_start, pgoff_end) {
5beb4930 1656 struct vm_area_struct *vma = avc->vma;
e9995ef9 1657 unsigned long address = vma_address(page, vma);
0dd1c7bb 1658
ad12695f
AA
1659 cond_resched();
1660
0dd1c7bb
JK
1661 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1662 continue;
1663
e4b82222 1664 if (!rwc->rmap_one(page, vma, address, rwc->arg))
e9995ef9 1665 break;
0dd1c7bb
JK
1666 if (rwc->done && rwc->done(page))
1667 break;
e9995ef9 1668 }
b9773199
KS
1669
1670 if (!locked)
1671 anon_vma_unlock_read(anon_vma);
e9995ef9
HD
1672}
1673
e8351ac9
JK
1674/*
1675 * rmap_walk_file - do something to file page using the object-based rmap method
1676 * @page: the page to be handled
1677 * @rwc: control variable according to each walk type
1678 *
1679 * Find all the mappings of a page using the mapping pointer and the vma chains
1680 * contained in the address_space struct it points to.
1681 *
1682 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1683 * where the page was found will be held for write. So, we won't recheck
1684 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1685 * LOCKED.
1686 */
1df631ae 1687static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
b9773199 1688 bool locked)
e9995ef9 1689{
b9773199 1690 struct address_space *mapping = page_mapping(page);
a8fa41ad 1691 pgoff_t pgoff_start, pgoff_end;
e9995ef9 1692 struct vm_area_struct *vma;
e9995ef9 1693
9f32624b
JK
1694 /*
1695 * The page lock not only makes sure that page->mapping cannot
1696 * suddenly be NULLified by truncation, it makes sure that the
1697 * structure at mapping cannot be freed and reused yet,
c8c06efa 1698 * so we can safely take mapping->i_mmap_rwsem.
9f32624b 1699 */
81d1b09c 1700 VM_BUG_ON_PAGE(!PageLocked(page), page);
9f32624b 1701
e9995ef9 1702 if (!mapping)
1df631ae 1703 return;
3dec0ba0 1704
a8fa41ad
KS
1705 pgoff_start = page_to_pgoff(page);
1706 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
b9773199
KS
1707 if (!locked)
1708 i_mmap_lock_read(mapping);
a8fa41ad
KS
1709 vma_interval_tree_foreach(vma, &mapping->i_mmap,
1710 pgoff_start, pgoff_end) {
e9995ef9 1711 unsigned long address = vma_address(page, vma);
0dd1c7bb 1712
ad12695f
AA
1713 cond_resched();
1714
0dd1c7bb
JK
1715 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1716 continue;
1717
e4b82222 1718 if (!rwc->rmap_one(page, vma, address, rwc->arg))
0dd1c7bb
JK
1719 goto done;
1720 if (rwc->done && rwc->done(page))
1721 goto done;
e9995ef9 1722 }
0dd1c7bb 1723
0dd1c7bb 1724done:
b9773199
KS
1725 if (!locked)
1726 i_mmap_unlock_read(mapping);
e9995ef9
HD
1727}
1728
1df631ae 1729void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
e9995ef9 1730{
e9995ef9 1731 if (unlikely(PageKsm(page)))
1df631ae 1732 rmap_walk_ksm(page, rwc);
e9995ef9 1733 else if (PageAnon(page))
1df631ae 1734 rmap_walk_anon(page, rwc, false);
b9773199 1735 else
1df631ae 1736 rmap_walk_file(page, rwc, false);
b9773199
KS
1737}
1738
1739/* Like rmap_walk, but caller holds relevant rmap lock */
1df631ae 1740void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
b9773199
KS
1741{
1742 /* no ksm support for now */
1743 VM_BUG_ON_PAGE(PageKsm(page), page);
1744 if (PageAnon(page))
1df631ae 1745 rmap_walk_anon(page, rwc, true);
e9995ef9 1746 else
1df631ae 1747 rmap_walk_file(page, rwc, true);
e9995ef9 1748}
0fe6e20b 1749
e3390f67 1750#ifdef CONFIG_HUGETLB_PAGE
0fe6e20b
NH
1751/*
1752 * The following three functions are for anonymous (private mapped) hugepages.
1753 * Unlike common anonymous pages, anonymous hugepages have no accounting code
1754 * and no lru code, because we handle hugepages differently from common pages.
1755 */
1756static void __hugepage_set_anon_rmap(struct page *page,
1757 struct vm_area_struct *vma, unsigned long address, int exclusive)
1758{
1759 struct anon_vma *anon_vma = vma->anon_vma;
433abed6 1760
0fe6e20b 1761 BUG_ON(!anon_vma);
433abed6
NH
1762
1763 if (PageAnon(page))
1764 return;
1765 if (!exclusive)
1766 anon_vma = anon_vma->root;
1767
0fe6e20b
NH
1768 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1769 page->mapping = (struct address_space *) anon_vma;
1770 page->index = linear_page_index(vma, address);
1771}
1772
1773void hugepage_add_anon_rmap(struct page *page,
1774 struct vm_area_struct *vma, unsigned long address)
1775{
1776 struct anon_vma *anon_vma = vma->anon_vma;
1777 int first;
a850ea30
NH
1778
1779 BUG_ON(!PageLocked(page));
0fe6e20b 1780 BUG_ON(!anon_vma);
5dbe0af4 1781 /* address might be in next vma when migration races vma_adjust */
53f9263b 1782 first = atomic_inc_and_test(compound_mapcount_ptr(page));
0fe6e20b
NH
1783 if (first)
1784 __hugepage_set_anon_rmap(page, vma, address, 0);
1785}
1786
1787void hugepage_add_new_anon_rmap(struct page *page,
1788 struct vm_area_struct *vma, unsigned long address)
1789{
1790 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
53f9263b 1791 atomic_set(compound_mapcount_ptr(page), 0);
0fe6e20b
NH
1792 __hugepage_set_anon_rmap(page, vma, address, 1);
1793}
e3390f67 1794#endif /* CONFIG_HUGETLB_PAGE */