]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/rmap.h
netfilter: nft_ct: add zone id set support
[mirror_ubuntu-artful-kernel.git] / include / linux / rmap.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3/*
4 * Declarations for Reverse Mapping functions in mm/rmap.c
5 */
6
1da177e4
LT
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
5a505085 10#include <linux/rwsem.h>
bed7161a 11#include <linux/memcontrol.h>
1da177e4
LT
12
13/*
14 * The anon_vma heads a list of private "related" vmas, to scan if
15 * an anonymous page pointing to this anon_vma needs to be unmapped:
16 * the vmas on the list will be related by forking, or by splitting.
17 *
18 * Since vmas come and go as they are split and merged (particularly
19 * in mprotect), the mapping field of an anonymous page cannot point
20 * directly to a vma: instead it points to an anon_vma, on whose list
21 * the related vmas can be easily linked or unlinked.
22 *
23 * After unlinking the last vma on the list, we must garbage collect
24 * the anon_vma object itself: we're guaranteed no page can be
25 * pointing to this anon_vma once its vma list is empty.
26 */
27struct anon_vma {
5a505085
IM
28 struct anon_vma *root; /* Root of this anon_vma tree */
29 struct rw_semaphore rwsem; /* W: modification, R: walking the list */
7f60c214 30 /*
83813267 31 * The refcount is taken on an anon_vma when there is no
7f60c214
MG
32 * guarantee that the vma of page tables will exist for
33 * the duration of the operation. A caller that takes
34 * the reference is responsible for clearing up the
35 * anon_vma if they are the last user on release
36 */
83813267
PZ
37 atomic_t refcount;
38
7a3ef208
KK
39 /*
40 * Count of child anon_vmas and VMAs which points to this anon_vma.
41 *
42 * This counter is used for making decision about reusing anon_vma
43 * instead of forking new one. See comments in function anon_vma_clone.
44 */
45 unsigned degree;
46
47 struct anon_vma *parent; /* Parent of this anon_vma */
48
7906d00c 49 /*
bf181b9f 50 * NOTE: the LSB of the rb_root.rb_node is set by
7906d00c 51 * mm_take_all_locks() _after_ taking the above lock. So the
bf181b9f 52 * rb_root must only be read/written after taking the above lock
7906d00c
AA
53 * to be sure to see a valid next pointer. The LSB bit itself
54 * is serialized by a system wide lock only visible to
55 * mm_take_all_locks() (mm_all_locks_mutex).
56 */
bf181b9f 57 struct rb_root rb_root; /* Interval tree of private "related" vmas */
5beb4930
RR
58};
59
60/*
61 * The copy-on-write semantics of fork mean that an anon_vma
62 * can become associated with multiple processes. Furthermore,
63 * each child process will have its own anon_vma, where new
64 * pages for that process are instantiated.
65 *
66 * This structure allows us to find the anon_vmas associated
67 * with a VMA, or the VMAs associated with an anon_vma.
68 * The "same_vma" list contains the anon_vma_chains linking
69 * all the anon_vmas associated with this VMA.
bf181b9f 70 * The "rb" field indexes on an interval tree the anon_vma_chains
5beb4930
RR
71 * which link all the VMAs associated with this anon_vma.
72 */
73struct anon_vma_chain {
74 struct vm_area_struct *vma;
75 struct anon_vma *anon_vma;
76 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
5a505085 77 struct rb_node rb; /* locked by anon_vma->rwsem */
bf181b9f 78 unsigned long rb_subtree_last;
ed8ea815
ML
79#ifdef CONFIG_DEBUG_VM_RB
80 unsigned long cached_vma_start, cached_vma_last;
81#endif
1da177e4
LT
82};
83
02c6de8d 84enum ttu_flags {
daa5ba76
KK
85 TTU_UNMAP = 1, /* unmap mode */
86 TTU_MIGRATION = 2, /* migration mode */
87 TTU_MUNLOCK = 4, /* munlock mode */
854e9ed0 88 TTU_LZFREE = 8, /* lazy free mode */
2a52bcbc 89 TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */
02c6de8d
MK
90
91 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
92 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
93 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
72b252ae
MG
94 TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
95 * and caller guarantees they will
96 * do a final flush if necessary */
2a52bcbc
KS
97 TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock:
98 * caller holds it */
02c6de8d
MK
99};
100
1da177e4 101#ifdef CONFIG_MMU
76545066
RR
102static inline void get_anon_vma(struct anon_vma *anon_vma)
103{
83813267 104 atomic_inc(&anon_vma->refcount);
76545066
RR
105}
106
01d8b20d
PZ
107void __put_anon_vma(struct anon_vma *anon_vma);
108
109static inline void put_anon_vma(struct anon_vma *anon_vma)
110{
111 if (atomic_dec_and_test(&anon_vma->refcount))
112 __put_anon_vma(anon_vma);
113}
1da177e4 114
4fc3f1d6 115static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
cba48b98 116{
5a505085 117 down_write(&anon_vma->root->rwsem);
cba48b98
RR
118}
119
08b52706 120static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
cba48b98 121{
5a505085 122 up_write(&anon_vma->root->rwsem);
cba48b98
RR
123}
124
4fc3f1d6
IM
125static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
126{
127 down_read(&anon_vma->root->rwsem);
128}
129
130static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
131{
132 up_read(&anon_vma->root->rwsem);
133}
134
135
1da177e4
LT
136/*
137 * anon_vma helper functions.
138 */
139void anon_vma_init(void); /* create anon_vma_cachep */
d5a187da 140int __anon_vma_prepare(struct vm_area_struct *);
5beb4930
RR
141void unlink_anon_vmas(struct vm_area_struct *);
142int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
143int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
1da177e4 144
d5a187da
VB
145static inline int anon_vma_prepare(struct vm_area_struct *vma)
146{
147 if (likely(vma->anon_vma))
148 return 0;
149
150 return __anon_vma_prepare(vma);
151}
152
5beb4930
RR
153static inline void anon_vma_merge(struct vm_area_struct *vma,
154 struct vm_area_struct *next)
155{
81d1b09c 156 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
5beb4930
RR
157 unlink_anon_vmas(next);
158}
159
01d8b20d
PZ
160struct anon_vma *page_get_anon_vma(struct page *page);
161
d281ee61
KS
162/* bitflags for do_page_add_anon_rmap() */
163#define RMAP_EXCLUSIVE 0x01
164#define RMAP_COMPOUND 0x02
165
1da177e4
LT
166/*
167 * rmap interfaces called when adding or removing pte of page
168 */
5a49973d 169void page_move_anon_rmap(struct page *, struct vm_area_struct *);
d281ee61
KS
170void page_add_anon_rmap(struct page *, struct vm_area_struct *,
171 unsigned long, bool);
ad8c2ee8
RR
172void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
173 unsigned long, int);
d281ee61
KS
174void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
175 unsigned long, bool);
dd78fedd 176void page_add_file_rmap(struct page *, bool);
d281ee61 177void page_remove_rmap(struct page *, bool);
1da177e4 178
0fe6e20b
NH
179void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
180 unsigned long);
181void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
182 unsigned long);
183
53f9263b 184static inline void page_dup_rmap(struct page *page, bool compound)
1da177e4 185{
53f9263b 186 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
1da177e4
LT
187}
188
189/*
190 * Called from mm/vmscan.c to handle paging out
191 */
6fe6b7e3 192int page_referenced(struct page *, int is_locked,
72835c86 193 struct mem_cgroup *memcg, unsigned long *vm_flags);
5ad64688 194
14fa31b8
AK
195#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
196
197int try_to_unmap(struct page *, enum ttu_flags flags);
1da177e4 198
ceffc078 199/*
e748dcd0 200 * Used by uprobes to replace a userspace page safely
ceffc078 201 */
e9a81a82 202pte_t *__page_check_address(struct page *, struct mm_struct *,
479db0bf 203 unsigned long, spinlock_t **, int);
ceffc078 204
e9a81a82
NK
205static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
206 unsigned long address,
207 spinlock_t **ptlp, int sync)
208{
209 pte_t *ptep;
210
211 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
212 ptlp, sync));
213 return ptep;
214}
215
8749cfea
VD
216/*
217 * Used by idle page tracking to check if a page was referenced via page
218 * tables.
219 */
220#ifdef CONFIG_TRANSPARENT_HUGEPAGE
221bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
222 unsigned long address, pmd_t **pmdp,
223 pte_t **ptep, spinlock_t **ptlp);
224#else
225static inline bool page_check_address_transhuge(struct page *page,
226 struct mm_struct *mm, unsigned long address,
227 pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
228{
229 *ptep = page_check_address(page, mm, address, ptlp, 0);
230 *pmdp = NULL;
231 return !!*ptep;
232}
233#endif
234
1da177e4
LT
235/*
236 * Used by swapoff to help locate where page is expected in vma.
237 */
238unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
239
d08b3851
PZ
240/*
241 * Cleans the PTEs of shared mappings.
242 * (and since clean PTEs should also be readonly, write protects them too)
243 *
244 * returns the number of cleaned PTEs.
245 */
246int page_mkclean(struct page *);
247
b291f000
NP
248/*
249 * called in munlock()/munmap() path to check for other vmas holding
250 * the page mlocked.
251 */
252int try_to_munlock(struct page *);
b291f000 253
e388466d
KS
254void remove_migration_ptes(struct page *old, struct page *new, bool locked);
255
10be22df
AK
256/*
257 * Called by memory-failure.c to kill processes.
258 */
4fc3f1d6
IM
259struct anon_vma *page_lock_anon_vma_read(struct page *page);
260void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
6a46079c 261int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
10be22df 262
0dd1c7bb
JK
263/*
264 * rmap_walk_control: To control rmap traversing for specific needs
265 *
266 * arg: passed to rmap_one() and invalid_vma()
267 * rmap_one: executed on each vma where page is mapped
268 * done: for checking traversing termination condition
0dd1c7bb
JK
269 * anon_lock: for getting anon_lock by optimized way rather than default
270 * invalid_vma: for skipping uninterested vma
271 */
051ac83a
JK
272struct rmap_walk_control {
273 void *arg;
274 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
275 unsigned long addr, void *arg);
0dd1c7bb 276 int (*done)(struct page *page);
0dd1c7bb
JK
277 struct anon_vma *(*anon_lock)(struct page *page);
278 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
051ac83a
JK
279};
280
051ac83a 281int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
b9773199 282int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
e9995ef9 283
1da177e4
LT
284#else /* !CONFIG_MMU */
285
286#define anon_vma_init() do {} while (0)
287#define anon_vma_prepare(vma) (0)
288#define anon_vma_link(vma) do {} while (0)
289
01ff53f4 290static inline int page_referenced(struct page *page, int is_locked,
72835c86 291 struct mem_cgroup *memcg,
01ff53f4
MF
292 unsigned long *vm_flags)
293{
294 *vm_flags = 0;
64574746 295 return 0;
01ff53f4
MF
296}
297
a48d07af 298#define try_to_unmap(page, refs) SWAP_FAIL
1da177e4 299
d08b3851
PZ
300static inline int page_mkclean(struct page *page)
301{
302 return 0;
303}
304
305
1da177e4
LT
306#endif /* CONFIG_MMU */
307
308/*
309 * Return values of try_to_unmap
310 */
311#define SWAP_SUCCESS 0
312#define SWAP_AGAIN 1
313#define SWAP_FAIL 2
b291f000 314#define SWAP_MLOCK 3
854e9ed0 315#define SWAP_LZFREE 4
1da177e4
LT
316
317#endif /* _LINUX_RMAP_H */