]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_RMAP_H |
2 | #define _LINUX_RMAP_H | |
3 | /* | |
4 | * Declarations for Reverse Mapping functions in mm/rmap.c | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/list.h> |
8 | #include <linux/slab.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/spinlock.h> | |
bed7161a | 11 | #include <linux/memcontrol.h> |
1da177e4 LT |
12 | |
13 | /* | |
14 | * The anon_vma heads a list of private "related" vmas, to scan if | |
15 | * an anonymous page pointing to this anon_vma needs to be unmapped: | |
16 | * the vmas on the list will be related by forking, or by splitting. | |
17 | * | |
18 | * Since vmas come and go as they are split and merged (particularly | |
19 | * in mprotect), the mapping field of an anonymous page cannot point | |
20 | * directly to a vma: instead it points to an anon_vma, on whose list | |
21 | * the related vmas can be easily linked or unlinked. | |
22 | * | |
23 | * After unlinking the last vma on the list, we must garbage collect | |
24 | * the anon_vma object itself: we're guaranteed no page can be | |
25 | * pointing to this anon_vma once its vma list is empty. | |
26 | */ | |
27 | struct anon_vma { | |
5c341ee1 | 28 | struct anon_vma *root; /* Root of this anon_vma tree */ |
182fea8f | 29 | spinlock_t lock; /* Serialize access to vma list */ |
7f60c214 MG |
30 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) |
31 | ||
32 | /* | |
33 | * The external_refcount is taken by either KSM or page migration | |
34 | * to take a reference to an anon_vma when there is no | |
35 | * guarantee that the vma of page tables will exist for | |
36 | * the duration of the operation. A caller that takes | |
37 | * the reference is responsible for clearing up the | |
38 | * anon_vma if they are the last user on release | |
39 | */ | |
40 | atomic_t external_refcount; | |
db114b83 | 41 | #endif |
7906d00c AA |
42 | /* |
43 | * NOTE: the LSB of the head.next is set by | |
44 | * mm_take_all_locks() _after_ taking the above lock. So the | |
45 | * head must only be read/written after taking the above lock | |
46 | * to be sure to see a valid next pointer. The LSB bit itself | |
47 | * is serialized by a system wide lock only visible to | |
48 | * mm_take_all_locks() (mm_all_locks_mutex). | |
49 | */ | |
5beb4930 RR |
50 | struct list_head head; /* Chain of private "related" vmas */ |
51 | }; | |
52 | ||
53 | /* | |
54 | * The copy-on-write semantics of fork mean that an anon_vma | |
55 | * can become associated with multiple processes. Furthermore, | |
56 | * each child process will have its own anon_vma, where new | |
57 | * pages for that process are instantiated. | |
58 | * | |
59 | * This structure allows us to find the anon_vmas associated | |
60 | * with a VMA, or the VMAs associated with an anon_vma. | |
61 | * The "same_vma" list contains the anon_vma_chains linking | |
62 | * all the anon_vmas associated with this VMA. | |
63 | * The "same_anon_vma" list contains the anon_vma_chains | |
64 | * which link all the VMAs associated with this anon_vma. | |
65 | */ | |
66 | struct anon_vma_chain { | |
67 | struct vm_area_struct *vma; | |
68 | struct anon_vma *anon_vma; | |
69 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ | |
70 | struct list_head same_anon_vma; /* locked by anon_vma->lock */ | |
1da177e4 LT |
71 | }; |
72 | ||
73 | #ifdef CONFIG_MMU | |
7f60c214 MG |
74 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) |
75 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) | |
db114b83 | 76 | { |
7f60c214 | 77 | atomic_set(&anon_vma->external_refcount, 0); |
db114b83 HD |
78 | } |
79 | ||
7f60c214 | 80 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) |
db114b83 | 81 | { |
7f60c214 | 82 | return atomic_read(&anon_vma->external_refcount); |
db114b83 | 83 | } |
76545066 RR |
84 | |
85 | static inline void get_anon_vma(struct anon_vma *anon_vma) | |
86 | { | |
87 | atomic_inc(&anon_vma->external_refcount); | |
88 | } | |
89 | ||
90 | void drop_anon_vma(struct anon_vma *); | |
db114b83 | 91 | #else |
7f60c214 | 92 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) |
db114b83 HD |
93 | { |
94 | } | |
95 | ||
7f60c214 | 96 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) |
db114b83 HD |
97 | { |
98 | return 0; | |
99 | } | |
76545066 RR |
100 | |
101 | static inline void get_anon_vma(struct anon_vma *anon_vma) | |
102 | { | |
103 | } | |
104 | ||
105 | static inline void drop_anon_vma(struct anon_vma *anon_vma) | |
106 | { | |
107 | } | |
db114b83 | 108 | #endif /* CONFIG_KSM */ |
1da177e4 | 109 | |
3ca7b3c5 HD |
110 | static inline struct anon_vma *page_anon_vma(struct page *page) |
111 | { | |
112 | if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != | |
113 | PAGE_MAPPING_ANON) | |
114 | return NULL; | |
115 | return page_rmapping(page); | |
116 | } | |
117 | ||
bb4a340e | 118 | static inline void vma_lock_anon_vma(struct vm_area_struct *vma) |
1da177e4 LT |
119 | { |
120 | struct anon_vma *anon_vma = vma->anon_vma; | |
121 | if (anon_vma) | |
012f1800 | 122 | spin_lock(&anon_vma->root->lock); |
1da177e4 LT |
123 | } |
124 | ||
bb4a340e | 125 | static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) |
1da177e4 LT |
126 | { |
127 | struct anon_vma *anon_vma = vma->anon_vma; | |
128 | if (anon_vma) | |
012f1800 | 129 | spin_unlock(&anon_vma->root->lock); |
1da177e4 LT |
130 | } |
131 | ||
cba48b98 RR |
132 | static inline void anon_vma_lock(struct anon_vma *anon_vma) |
133 | { | |
012f1800 | 134 | spin_lock(&anon_vma->root->lock); |
cba48b98 RR |
135 | } |
136 | ||
137 | static inline void anon_vma_unlock(struct anon_vma *anon_vma) | |
138 | { | |
012f1800 | 139 | spin_unlock(&anon_vma->root->lock); |
cba48b98 RR |
140 | } |
141 | ||
1da177e4 LT |
142 | /* |
143 | * anon_vma helper functions. | |
144 | */ | |
145 | void anon_vma_init(void); /* create anon_vma_cachep */ | |
146 | int anon_vma_prepare(struct vm_area_struct *); | |
5beb4930 RR |
147 | void unlink_anon_vmas(struct vm_area_struct *); |
148 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | |
149 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | |
1da177e4 | 150 | void __anon_vma_link(struct vm_area_struct *); |
db114b83 | 151 | void anon_vma_free(struct anon_vma *); |
1da177e4 | 152 | |
5beb4930 RR |
153 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
154 | struct vm_area_struct *next) | |
155 | { | |
156 | VM_BUG_ON(vma->anon_vma != next->anon_vma); | |
157 | unlink_anon_vmas(next); | |
158 | } | |
159 | ||
1da177e4 LT |
160 | /* |
161 | * rmap interfaces called when adding or removing pte of page | |
162 | */ | |
c44b6743 | 163 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
1da177e4 | 164 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
ad8c2ee8 RR |
165 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
166 | unsigned long, int); | |
9617d95e | 167 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
1da177e4 | 168 | void page_add_file_rmap(struct page *); |
edc315fd | 169 | void page_remove_rmap(struct page *); |
1da177e4 | 170 | |
0fe6e20b NH |
171 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, |
172 | unsigned long); | |
173 | void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, | |
174 | unsigned long); | |
175 | ||
21333b2b | 176 | static inline void page_dup_rmap(struct page *page) |
1da177e4 LT |
177 | { |
178 | atomic_inc(&page->_mapcount); | |
179 | } | |
180 | ||
181 | /* | |
182 | * Called from mm/vmscan.c to handle paging out | |
183 | */ | |
6fe6b7e3 WF |
184 | int page_referenced(struct page *, int is_locked, |
185 | struct mem_cgroup *cnt, unsigned long *vm_flags); | |
5ad64688 HD |
186 | int page_referenced_one(struct page *, struct vm_area_struct *, |
187 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | |
188 | ||
14fa31b8 AK |
189 | enum ttu_flags { |
190 | TTU_UNMAP = 0, /* unmap mode */ | |
191 | TTU_MIGRATION = 1, /* migration mode */ | |
192 | TTU_MUNLOCK = 2, /* munlock mode */ | |
193 | TTU_ACTION_MASK = 0xff, | |
194 | ||
195 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ | |
196 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ | |
888b9f7c | 197 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ |
14fa31b8 AK |
198 | }; |
199 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | |
200 | ||
71e3aac0 AA |
201 | bool is_vma_temporary_stack(struct vm_area_struct *vma); |
202 | ||
14fa31b8 | 203 | int try_to_unmap(struct page *, enum ttu_flags flags); |
5ad64688 HD |
204 | int try_to_unmap_one(struct page *, struct vm_area_struct *, |
205 | unsigned long address, enum ttu_flags flags); | |
1da177e4 | 206 | |
ceffc078 CO |
207 | /* |
208 | * Called from mm/filemap_xip.c to unmap empty zero page | |
209 | */ | |
e9a81a82 | 210 | pte_t *__page_check_address(struct page *, struct mm_struct *, |
479db0bf | 211 | unsigned long, spinlock_t **, int); |
ceffc078 | 212 | |
e9a81a82 NK |
213 | static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, |
214 | unsigned long address, | |
215 | spinlock_t **ptlp, int sync) | |
216 | { | |
217 | pte_t *ptep; | |
218 | ||
219 | __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, | |
220 | ptlp, sync)); | |
221 | return ptep; | |
222 | } | |
223 | ||
1da177e4 LT |
224 | /* |
225 | * Used by swapoff to help locate where page is expected in vma. | |
226 | */ | |
227 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |
228 | ||
d08b3851 PZ |
229 | /* |
230 | * Cleans the PTEs of shared mappings. | |
231 | * (and since clean PTEs should also be readonly, write protects them too) | |
232 | * | |
233 | * returns the number of cleaned PTEs. | |
234 | */ | |
235 | int page_mkclean(struct page *); | |
236 | ||
b291f000 NP |
237 | /* |
238 | * called in munlock()/munmap() path to check for other vmas holding | |
239 | * the page mlocked. | |
240 | */ | |
241 | int try_to_munlock(struct page *); | |
b291f000 | 242 | |
10be22df AK |
243 | /* |
244 | * Called by memory-failure.c to kill processes. | |
245 | */ | |
ea4525b6 NK |
246 | struct anon_vma *__page_lock_anon_vma(struct page *page); |
247 | ||
248 | static inline struct anon_vma *page_lock_anon_vma(struct page *page) | |
249 | { | |
250 | struct anon_vma *anon_vma; | |
251 | ||
252 | __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page)); | |
253 | ||
254 | /* (void) is needed to make gcc happy */ | |
255 | (void) __cond_lock(&anon_vma->root->lock, anon_vma); | |
256 | ||
257 | return anon_vma; | |
258 | } | |
259 | ||
10be22df | 260 | void page_unlock_anon_vma(struct anon_vma *anon_vma); |
6a46079c | 261 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
10be22df | 262 | |
e9995ef9 HD |
263 | /* |
264 | * Called by migrate.c to remove migration ptes, but might be used more later. | |
265 | */ | |
266 | int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | |
267 | struct vm_area_struct *, unsigned long, void *), void *arg); | |
268 | ||
1da177e4 LT |
269 | #else /* !CONFIG_MMU */ |
270 | ||
271 | #define anon_vma_init() do {} while (0) | |
272 | #define anon_vma_prepare(vma) (0) | |
273 | #define anon_vma_link(vma) do {} while (0) | |
274 | ||
01ff53f4 MF |
275 | static inline int page_referenced(struct page *page, int is_locked, |
276 | struct mem_cgroup *cnt, | |
277 | unsigned long *vm_flags) | |
278 | { | |
279 | *vm_flags = 0; | |
64574746 | 280 | return 0; |
01ff53f4 MF |
281 | } |
282 | ||
a48d07af | 283 | #define try_to_unmap(page, refs) SWAP_FAIL |
1da177e4 | 284 | |
d08b3851 PZ |
285 | static inline int page_mkclean(struct page *page) |
286 | { | |
287 | return 0; | |
288 | } | |
289 | ||
290 | ||
1da177e4 LT |
291 | #endif /* CONFIG_MMU */ |
292 | ||
293 | /* | |
294 | * Return values of try_to_unmap | |
295 | */ | |
296 | #define SWAP_SUCCESS 0 | |
297 | #define SWAP_AGAIN 1 | |
298 | #define SWAP_FAIL 2 | |
b291f000 | 299 | #define SWAP_MLOCK 3 |
1da177e4 LT |
300 | |
301 | #endif /* _LINUX_RMAP_H */ |