]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Macros for manipulating and testing page->flags | |
3 | */ | |
4 | ||
5 | #ifndef PAGE_FLAGS_H | |
6 | #define PAGE_FLAGS_H | |
7 | ||
f886ed44 | 8 | #include <linux/types.h> |
187f1882 | 9 | #include <linux/bug.h> |
072bb0aa | 10 | #include <linux/mmdebug.h> |
9223b419 | 11 | #ifndef __GENERATING_BOUNDS_H |
6d777953 | 12 | #include <linux/mm_types.h> |
01fc0ac1 | 13 | #include <generated/bounds.h> |
9223b419 | 14 | #endif /* !__GENERATING_BOUNDS_H */ |
f886ed44 | 15 | |
1da177e4 LT |
16 | /* |
17 | * Various page->flags bits: | |
18 | * | |
19 | * PG_reserved is set for special pages, which can never be swapped out. Some | |
20 | * of them might not even exist (eg empty_bad_page)... | |
21 | * | |
da6052f7 NP |
22 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
23 | * specific data (which is normally at page->private). It can be used by | |
24 | * private allocations for its own usage. | |
1da177e4 | 25 | * |
da6052f7 NP |
26 | * During initiation of disk I/O, PG_locked is set. This bit is set before I/O |
27 | * and cleared when writeback _starts_ or when read _completes_. PG_writeback | |
28 | * is set before writeback starts and cleared when it finishes. | |
29 | * | |
30 | * PG_locked also pins a page in pagecache, and blocks truncation of the file | |
31 | * while it is held. | |
32 | * | |
33 | * page_waitqueue(page) is a wait queue of all tasks waiting for the page | |
34 | * to become unlocked. | |
1da177e4 LT |
35 | * |
36 | * PG_uptodate tells whether the page's contents is valid. When a read | |
37 | * completes, the page becomes uptodate, unless a disk I/O error happened. | |
38 | * | |
da6052f7 NP |
39 | * PG_referenced, PG_reclaim are used for page reclaim for anonymous and |
40 | * file-backed pagecache (see mm/vmscan.c). | |
1da177e4 LT |
41 | * |
42 | * PG_error is set to indicate that an I/O error occurred on this page. | |
43 | * | |
44 | * PG_arch_1 is an architecture specific page state bit. The generic code | |
45 | * guarantees that this bit is cleared for a page when it first is entered into | |
46 | * the page cache. | |
47 | * | |
48 | * PG_highmem pages are not permanently mapped into the kernel virtual address | |
49 | * space, they need to be kmapped separately for doing IO on the pages. The | |
50 | * struct page (these bits with information) are always mapped into kernel | |
51 | * address space... | |
da6052f7 | 52 | * |
d466f2fc AK |
53 | * PG_hwpoison indicates that a page got corrupted in hardware and contains |
54 | * data with incorrect ECC bits that triggered a machine check. Accessing is | |
55 | * not safe since it may cause another machine check. Don't touch! | |
1da177e4 LT |
56 | */ |
57 | ||
58 | /* | |
59 | * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break | |
91fc8ab3 AW |
60 | * locked- and dirty-page accounting. |
61 | * | |
62 | * The page flags field is split into two parts, the main flags area | |
63 | * which extends from the low bits upwards, and the fields area which | |
64 | * extends from the high bits downwards. | |
65 | * | |
66 | * | FIELD | ... | FLAGS | | |
9223b419 CL |
67 | * N-1 ^ 0 |
68 | * (NR_PAGEFLAGS) | |
91fc8ab3 | 69 | * |
9223b419 CL |
70 | * The fields area is reserved for fields mapping zone, node (for NUMA) and |
71 | * SPARSEMEM section (for variants of SPARSEMEM that require section ids like | |
72 | * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). | |
1da177e4 | 73 | */ |
e2683181 CL |
74 | enum pageflags { |
75 | PG_locked, /* Page is locked. Don't touch. */ | |
76 | PG_error, | |
77 | PG_referenced, | |
78 | PG_uptodate, | |
79 | PG_dirty, | |
80 | PG_lru, | |
81 | PG_active, | |
b91e1302 | 82 | PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ |
e2683181 CL |
83 | PG_slab, |
84 | PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ | |
e2683181 CL |
85 | PG_arch_1, |
86 | PG_reserved, | |
87 | PG_private, /* If pagecache, has fs-private data */ | |
266cf658 | 88 | PG_private_2, /* If pagecache, has fs aux data */ |
e2683181 | 89 | PG_writeback, /* Page is under writeback */ |
e20b8cca | 90 | PG_head, /* A head page */ |
e2683181 CL |
91 | PG_mappedtodisk, /* Has blocks allocated on-disk */ |
92 | PG_reclaim, /* To be reclaimed asap */ | |
b2e18538 | 93 | PG_swapbacked, /* Page is backed by RAM/swap */ |
894bc310 | 94 | PG_unevictable, /* Page is "unevictable" */ |
af8e3354 | 95 | #ifdef CONFIG_MMU |
b291f000 | 96 | PG_mlocked, /* Page is vma mlocked */ |
894bc310 | 97 | #endif |
46cf98cd | 98 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
602c4d11 | 99 | PG_uncached, /* Page has been mapped as uncached */ |
d466f2fc AK |
100 | #endif |
101 | #ifdef CONFIG_MEMORY_FAILURE | |
102 | PG_hwpoison, /* hardware poisoned page. Don't touch */ | |
e9da73d6 | 103 | #endif |
33c3fc71 VD |
104 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) |
105 | PG_young, | |
106 | PG_idle, | |
f886ed44 | 107 | #endif |
0cad47cf AW |
108 | __NR_PAGEFLAGS, |
109 | ||
110 | /* Filesystems */ | |
111 | PG_checked = PG_owner_priv_1, | |
112 | ||
6326fec1 NP |
113 | /* SwapBacked */ |
114 | PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ | |
115 | ||
266cf658 DH |
116 | /* Two page bits are conscripted by FS-Cache to maintain local caching |
117 | * state. These bits are set on pages belonging to the netfs's inodes | |
118 | * when those inodes are being locally cached. | |
119 | */ | |
120 | PG_fscache = PG_private_2, /* page backed by cache */ | |
121 | ||
0cad47cf | 122 | /* XEN */ |
d8ac3dd4 | 123 | /* Pinned in Xen as a read-only pagetable page. */ |
0cad47cf | 124 | PG_pinned = PG_owner_priv_1, |
d8ac3dd4 | 125 | /* Pinned as part of domain save (see xen_mm_pin_all()). */ |
0cad47cf | 126 | PG_savepinned = PG_dirty, |
d8ac3dd4 JH |
127 | /* Has a grant mapping of another (foreign) domain's page. */ |
128 | PG_foreign = PG_owner_priv_1, | |
8a38082d | 129 | |
9023cb7e | 130 | /* SLOB */ |
9023cb7e | 131 | PG_slob_free = PG_private, |
53f9263b KS |
132 | |
133 | /* Compound pages. Stored in first tail page's flags */ | |
134 | PG_double_map = PG_private_2, | |
bda807d4 MK |
135 | |
136 | /* non-lru isolated movable page */ | |
137 | PG_isolated = PG_reclaim, | |
e2683181 | 138 | }; |
1da177e4 | 139 | |
9223b419 CL |
140 | #ifndef __GENERATING_BOUNDS_H |
141 | ||
0e6d31a7 KS |
142 | struct page; /* forward declaration */ |
143 | ||
144 | static inline struct page *compound_head(struct page *page) | |
145 | { | |
146 | unsigned long head = READ_ONCE(page->compound_head); | |
147 | ||
148 | if (unlikely(head & 1)) | |
149 | return (struct page *) (head - 1); | |
150 | return page; | |
151 | } | |
152 | ||
4b0f3261 | 153 | static __always_inline int PageTail(struct page *page) |
0e6d31a7 KS |
154 | { |
155 | return READ_ONCE(page->compound_head) & 1; | |
156 | } | |
157 | ||
4b0f3261 | 158 | static __always_inline int PageCompound(struct page *page) |
0e6d31a7 KS |
159 | { |
160 | return test_bit(PG_head, &page->flags) || PageTail(page); | |
161 | } | |
162 | ||
95ad9755 KS |
163 | /* |
164 | * Page flags policies wrt compound pages | |
165 | * | |
166 | * PF_ANY: | |
167 | * the page flag is relevant for small, head and tail pages. | |
168 | * | |
169 | * PF_HEAD: | |
170 | * for compound page all operations related to the page flag applied to | |
171 | * head page. | |
172 | * | |
62906027 NP |
173 | * PF_ONLY_HEAD: |
174 | * for compound page, callers only ever operate on the head page. | |
175 | * | |
95ad9755 KS |
176 | * PF_NO_TAIL: |
177 | * modifications of the page flag must be done on small or head pages, | |
178 | * checks can be done on tail pages too. | |
179 | * | |
180 | * PF_NO_COMPOUND: | |
181 | * the page flag is not relevant for compound pages. | |
182 | */ | |
183 | #define PF_ANY(page, enforce) page | |
184 | #define PF_HEAD(page, enforce) compound_head(page) | |
62906027 NP |
185 | #define PF_ONLY_HEAD(page, enforce) ({ \ |
186 | VM_BUG_ON_PGFLAGS(PageTail(page), page); \ | |
187 | page;}) | |
95ad9755 KS |
188 | #define PF_NO_TAIL(page, enforce) ({ \ |
189 | VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ | |
190 | compound_head(page);}) | |
822cdd11 | 191 | #define PF_NO_COMPOUND(page, enforce) ({ \ |
95ad9755 KS |
192 | VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ |
193 | page;}) | |
194 | ||
f94a62e9 CL |
195 | /* |
196 | * Macros to create function definitions for page flags | |
197 | */ | |
95ad9755 | 198 | #define TESTPAGEFLAG(uname, lname, policy) \ |
4b0f3261 | 199 | static __always_inline int Page##uname(struct page *page) \ |
95ad9755 | 200 | { return test_bit(PG_##lname, &policy(page, 0)->flags); } |
f94a62e9 | 201 | |
95ad9755 | 202 | #define SETPAGEFLAG(uname, lname, policy) \ |
4b0f3261 | 203 | static __always_inline void SetPage##uname(struct page *page) \ |
95ad9755 | 204 | { set_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 205 | |
95ad9755 | 206 | #define CLEARPAGEFLAG(uname, lname, policy) \ |
4b0f3261 | 207 | static __always_inline void ClearPage##uname(struct page *page) \ |
95ad9755 | 208 | { clear_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 209 | |
95ad9755 | 210 | #define __SETPAGEFLAG(uname, lname, policy) \ |
4b0f3261 | 211 | static __always_inline void __SetPage##uname(struct page *page) \ |
95ad9755 | 212 | { __set_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 213 | |
95ad9755 | 214 | #define __CLEARPAGEFLAG(uname, lname, policy) \ |
4b0f3261 | 215 | static __always_inline void __ClearPage##uname(struct page *page) \ |
95ad9755 | 216 | { __clear_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 217 | |
95ad9755 | 218 | #define TESTSETFLAG(uname, lname, policy) \ |
4b0f3261 | 219 | static __always_inline int TestSetPage##uname(struct page *page) \ |
95ad9755 | 220 | { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 221 | |
95ad9755 | 222 | #define TESTCLEARFLAG(uname, lname, policy) \ |
4b0f3261 | 223 | static __always_inline int TestClearPage##uname(struct page *page) \ |
95ad9755 | 224 | { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 225 | |
95ad9755 KS |
226 | #define PAGEFLAG(uname, lname, policy) \ |
227 | TESTPAGEFLAG(uname, lname, policy) \ | |
228 | SETPAGEFLAG(uname, lname, policy) \ | |
229 | CLEARPAGEFLAG(uname, lname, policy) | |
f94a62e9 | 230 | |
95ad9755 KS |
231 | #define __PAGEFLAG(uname, lname, policy) \ |
232 | TESTPAGEFLAG(uname, lname, policy) \ | |
233 | __SETPAGEFLAG(uname, lname, policy) \ | |
234 | __CLEARPAGEFLAG(uname, lname, policy) | |
f94a62e9 | 235 | |
95ad9755 KS |
236 | #define TESTSCFLAG(uname, lname, policy) \ |
237 | TESTSETFLAG(uname, lname, policy) \ | |
238 | TESTCLEARFLAG(uname, lname, policy) | |
f94a62e9 | 239 | |
2f3e442c JW |
240 | #define TESTPAGEFLAG_FALSE(uname) \ |
241 | static inline int Page##uname(const struct page *page) { return 0; } | |
242 | ||
8a7a8544 LS |
243 | #define SETPAGEFLAG_NOOP(uname) \ |
244 | static inline void SetPage##uname(struct page *page) { } | |
245 | ||
246 | #define CLEARPAGEFLAG_NOOP(uname) \ | |
247 | static inline void ClearPage##uname(struct page *page) { } | |
248 | ||
249 | #define __CLEARPAGEFLAG_NOOP(uname) \ | |
250 | static inline void __ClearPage##uname(struct page *page) { } | |
251 | ||
2f3e442c JW |
252 | #define TESTSETFLAG_FALSE(uname) \ |
253 | static inline int TestSetPage##uname(struct page *page) { return 0; } | |
254 | ||
8a7a8544 LS |
255 | #define TESTCLEARFLAG_FALSE(uname) \ |
256 | static inline int TestClearPage##uname(struct page *page) { return 0; } | |
257 | ||
2f3e442c JW |
258 | #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \ |
259 | SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname) | |
260 | ||
261 | #define TESTSCFLAG_FALSE(uname) \ | |
262 | TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) | |
263 | ||
48c935ad | 264 | __PAGEFLAG(Locked, locked, PF_NO_TAIL) |
62906027 | 265 | PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) |
df8c94d1 | 266 | PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) |
8cb38fab KS |
267 | PAGEFLAG(Referenced, referenced, PF_HEAD) |
268 | TESTCLEARFLAG(Referenced, referenced, PF_HEAD) | |
269 | __SETPAGEFLAG(Referenced, referenced, PF_HEAD) | |
df8c94d1 KS |
270 | PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) |
271 | __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) | |
8cb38fab KS |
272 | PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) |
273 | PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) | |
274 | TESTCLEARFLAG(Active, active, PF_HEAD) | |
dcb351cd KS |
275 | __PAGEFLAG(Slab, slab, PF_NO_TAIL) |
276 | __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) | |
df8c94d1 | 277 | PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ |
c13985fa KS |
278 | |
279 | /* Xen */ | |
280 | PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) | |
281 | TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) | |
282 | PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); | |
283 | PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); | |
284 | ||
de09d31d KS |
285 | PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
286 | __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) | |
da5efc40 KS |
287 | PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
288 | __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) | |
289 | __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) | |
95ad9755 | 290 | |
266cf658 DH |
291 | /* |
292 | * Private page markings that may be used by the filesystem that owns the page | |
293 | * for its own purposes. | |
294 | * - PG_private and PG_private_2 cause releasepage() and co to be invoked | |
295 | */ | |
95ad9755 KS |
296 | PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) |
297 | __CLEARPAGEFLAG(Private, private, PF_ANY) | |
298 | PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) | |
299 | PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) | |
300 | TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) | |
266cf658 | 301 | |
6a1e7f77 CL |
302 | /* |
303 | * Only test-and-set exist for PG_writeback. The unconditional operators are | |
304 | * risky: they bypass page accounting. | |
305 | */ | |
df8c94d1 KS |
306 | TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) |
307 | TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) | |
e2f0a0db | 308 | PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) |
6a1e7f77 | 309 | |
579f8290 | 310 | /* PG_readahead is only used for reads; PG_reclaim is only for writes */ |
e2f0a0db KS |
311 | PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) |
312 | TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) | |
df8c94d1 KS |
313 | PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) |
314 | TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) | |
6a1e7f77 CL |
315 | |
316 | #ifdef CONFIG_HIGHMEM | |
1da177e4 | 317 | /* |
6a1e7f77 CL |
318 | * Must use a macro here due to header dependency issues. page_zone() is not |
319 | * available at this point. | |
1da177e4 | 320 | */ |
3ca65c19 | 321 | #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) |
6a1e7f77 | 322 | #else |
ec7cade8 | 323 | PAGEFLAG_FALSE(HighMem) |
6a1e7f77 CL |
324 | #endif |
325 | ||
326 | #ifdef CONFIG_SWAP | |
6326fec1 NP |
327 | static __always_inline int PageSwapCache(struct page *page) |
328 | { | |
38d8b4e6 HY |
329 | #ifdef CONFIG_THP_SWAP |
330 | page = compound_head(page); | |
331 | #endif | |
6326fec1 NP |
332 | return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); |
333 | ||
334 | } | |
38d8b4e6 HY |
335 | SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) |
336 | CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) | |
6a1e7f77 | 337 | #else |
ec7cade8 | 338 | PAGEFLAG_FALSE(SwapCache) |
6a1e7f77 CL |
339 | #endif |
340 | ||
8cb38fab KS |
341 | PAGEFLAG(Unevictable, unevictable, PF_HEAD) |
342 | __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) | |
343 | TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) | |
b291f000 | 344 | |
af8e3354 | 345 | #ifdef CONFIG_MMU |
e4f87d5d KS |
346 | PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) |
347 | __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) | |
348 | TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) | |
894bc310 | 349 | #else |
2f3e442c | 350 | PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked) |
685eaade | 351 | TESTSCFLAG_FALSE(Mlocked) |
894bc310 LS |
352 | #endif |
353 | ||
46cf98cd | 354 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
b9d41817 | 355 | PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) |
602c4d11 | 356 | #else |
ec7cade8 | 357 | PAGEFLAG_FALSE(Uncached) |
6a1e7f77 | 358 | #endif |
1da177e4 | 359 | |
d466f2fc | 360 | #ifdef CONFIG_MEMORY_FAILURE |
95ad9755 KS |
361 | PAGEFLAG(HWPoison, hwpoison, PF_ANY) |
362 | TESTSCFLAG(HWPoison, hwpoison, PF_ANY) | |
d466f2fc AK |
363 | #define __PG_HWPOISON (1UL << PG_hwpoison) |
364 | #else | |
365 | PAGEFLAG_FALSE(HWPoison) | |
366 | #define __PG_HWPOISON 0 | |
367 | #endif | |
368 | ||
33c3fc71 | 369 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) |
95ad9755 KS |
370 | TESTPAGEFLAG(Young, young, PF_ANY) |
371 | SETPAGEFLAG(Young, young, PF_ANY) | |
372 | TESTCLEARFLAG(Young, young, PF_ANY) | |
373 | PAGEFLAG(Idle, idle, PF_ANY) | |
33c3fc71 VD |
374 | #endif |
375 | ||
e8c6158f KS |
376 | /* |
377 | * On an anonymous page mapped into a user virtual memory area, | |
378 | * page->mapping points to its anon_vma, not to a struct address_space; | |
379 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. | |
380 | * | |
381 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, | |
bda807d4 MK |
382 | * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON |
383 | * bit; and then page->mapping points, not to an anon_vma, but to a private | |
e8c6158f KS |
384 | * structure which KSM associates with that merged page. See ksm.h. |
385 | * | |
bda807d4 MK |
386 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable |
387 | * page and then page->mapping points a struct address_space. | |
e8c6158f KS |
388 | * |
389 | * Please note that, confusingly, "page_mapping" refers to the inode | |
390 | * address_space which maps the page from disk; whereas "page_mapped" | |
391 | * refers to user virtual address space into which the page is mapped. | |
392 | */ | |
bda807d4 MK |
393 | #define PAGE_MAPPING_ANON 0x1 |
394 | #define PAGE_MAPPING_MOVABLE 0x2 | |
395 | #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) | |
396 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) | |
e8c6158f | 397 | |
bda807d4 | 398 | static __always_inline int PageMappingFlags(struct page *page) |
17514574 | 399 | { |
bda807d4 | 400 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; |
17514574 MG |
401 | } |
402 | ||
4b0f3261 | 403 | static __always_inline int PageAnon(struct page *page) |
e8c6158f | 404 | { |
822cdd11 | 405 | page = compound_head(page); |
bda807d4 MK |
406 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
407 | } | |
408 | ||
409 | static __always_inline int __PageMovable(struct page *page) | |
410 | { | |
411 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == | |
412 | PAGE_MAPPING_MOVABLE; | |
e8c6158f KS |
413 | } |
414 | ||
415 | #ifdef CONFIG_KSM | |
416 | /* | |
417 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | |
418 | * which KSM maps into multiple mms, wherever identical anonymous page content | |
419 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any | |
420 | * anon_vma, but to that page's node of the stable tree. | |
421 | */ | |
4b0f3261 | 422 | static __always_inline int PageKsm(struct page *page) |
e8c6158f | 423 | { |
822cdd11 | 424 | page = compound_head(page); |
e8c6158f | 425 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
bda807d4 | 426 | PAGE_MAPPING_KSM; |
e8c6158f KS |
427 | } |
428 | #else | |
429 | TESTPAGEFLAG_FALSE(Ksm) | |
430 | #endif | |
431 | ||
1a9b5b7f WF |
432 | u64 stable_page_flags(struct page *page); |
433 | ||
0ed361de NP |
434 | static inline int PageUptodate(struct page *page) |
435 | { | |
d2998c4d KS |
436 | int ret; |
437 | page = compound_head(page); | |
438 | ret = test_bit(PG_uptodate, &(page)->flags); | |
0ed361de NP |
439 | /* |
440 | * Must ensure that the data we read out of the page is loaded | |
441 | * _after_ we've loaded page->flags to check for PageUptodate. | |
442 | * We can skip the barrier if the page is not uptodate, because | |
443 | * we wouldn't be reading anything from it. | |
444 | * | |
445 | * See SetPageUptodate() for the other side of the story. | |
446 | */ | |
447 | if (ret) | |
448 | smp_rmb(); | |
449 | ||
450 | return ret; | |
451 | } | |
452 | ||
4b0f3261 | 453 | static __always_inline void __SetPageUptodate(struct page *page) |
0ed361de | 454 | { |
d2998c4d | 455 | VM_BUG_ON_PAGE(PageTail(page), page); |
0ed361de | 456 | smp_wmb(); |
df8c94d1 | 457 | __set_bit(PG_uptodate, &page->flags); |
0ed361de NP |
458 | } |
459 | ||
4b0f3261 | 460 | static __always_inline void SetPageUptodate(struct page *page) |
2dcea57a | 461 | { |
d2998c4d | 462 | VM_BUG_ON_PAGE(PageTail(page), page); |
0ed361de NP |
463 | /* |
464 | * Memory barrier must be issued before setting the PG_uptodate bit, | |
465 | * so that all previous stores issued in order to bring the page | |
466 | * uptodate are actually visible before PageUptodate becomes true. | |
0ed361de NP |
467 | */ |
468 | smp_wmb(); | |
df8c94d1 | 469 | set_bit(PG_uptodate, &page->flags); |
0ed361de NP |
470 | } |
471 | ||
d2998c4d | 472 | CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) |
1da177e4 | 473 | |
6a1e7f77 | 474 | int test_clear_page_writeback(struct page *page); |
1c8349a1 NJ |
475 | int __test_set_page_writeback(struct page *page, bool keep_write); |
476 | ||
477 | #define test_set_page_writeback(page) \ | |
478 | __test_set_page_writeback(page, false) | |
479 | #define test_set_page_writeback_keepwrite(page) \ | |
480 | __test_set_page_writeback(page, true) | |
1da177e4 | 481 | |
6a1e7f77 CL |
482 | static inline void set_page_writeback(struct page *page) |
483 | { | |
484 | test_set_page_writeback(page); | |
485 | } | |
1da177e4 | 486 | |
1c8349a1 NJ |
487 | static inline void set_page_writeback_keepwrite(struct page *page) |
488 | { | |
489 | test_set_page_writeback_keepwrite(page); | |
490 | } | |
491 | ||
95ad9755 | 492 | __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) |
e20b8cca | 493 | |
4b0f3261 | 494 | static __always_inline void set_compound_head(struct page *page, struct page *head) |
ad4b3fb7 | 495 | { |
1d798ca3 | 496 | WRITE_ONCE(page->compound_head, (unsigned long)head + 1); |
ad4b3fb7 CD |
497 | } |
498 | ||
4b0f3261 | 499 | static __always_inline void clear_compound_head(struct page *page) |
6a1e7f77 | 500 | { |
1d798ca3 | 501 | WRITE_ONCE(page->compound_head, 0); |
6a1e7f77 | 502 | } |
6d777953 | 503 | |
4e6af67e AA |
504 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
505 | static inline void ClearPageCompound(struct page *page) | |
506 | { | |
1d798ca3 KS |
507 | BUG_ON(!PageHead(page)); |
508 | ClearPageHead(page); | |
4e6af67e AA |
509 | } |
510 | #endif | |
511 | ||
d2a1a1f0 | 512 | #define PG_head_mask ((1UL << PG_head)) |
dfa7e20c | 513 | |
e8c6158f KS |
514 | #ifdef CONFIG_HUGETLB_PAGE |
515 | int PageHuge(struct page *page); | |
516 | int PageHeadHuge(struct page *page); | |
7e1f049e | 517 | bool page_huge_active(struct page *page); |
e8c6158f KS |
518 | #else |
519 | TESTPAGEFLAG_FALSE(Huge) | |
520 | TESTPAGEFLAG_FALSE(HeadHuge) | |
7e1f049e NH |
521 | |
522 | static inline bool page_huge_active(struct page *page) | |
523 | { | |
524 | return 0; | |
525 | } | |
e8c6158f KS |
526 | #endif |
527 | ||
7e1f049e | 528 | |
936a5fe6 | 529 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
71e3aac0 AA |
530 | /* |
531 | * PageHuge() only returns true for hugetlbfs pages, but not for | |
532 | * normal or transparent huge pages. | |
533 | * | |
534 | * PageTransHuge() returns true for both transparent huge and | |
535 | * hugetlbfs pages, but not normal pages. PageTransHuge() can only be | |
536 | * called only in the core VM paths where hugetlbfs pages can't exist. | |
537 | */ | |
538 | static inline int PageTransHuge(struct page *page) | |
539 | { | |
309381fe | 540 | VM_BUG_ON_PAGE(PageTail(page), page); |
71e3aac0 AA |
541 | return PageHead(page); |
542 | } | |
543 | ||
385de357 DN |
544 | /* |
545 | * PageTransCompound returns true for both transparent huge pages | |
546 | * and hugetlbfs pages, so it should only be called when it's known | |
547 | * that hugetlbfs pages aren't involved. | |
548 | */ | |
936a5fe6 AA |
549 | static inline int PageTransCompound(struct page *page) |
550 | { | |
551 | return PageCompound(page); | |
552 | } | |
71e3aac0 | 553 | |
127393fb AA |
554 | /* |
555 | * PageTransCompoundMap is the same as PageTransCompound, but it also | |
556 | * guarantees the primary MMU has the entire compound page mapped | |
557 | * through pmd_trans_huge, which in turn guarantees the secondary MMUs | |
558 | * can also map the entire compound page. This allows the secondary | |
559 | * MMUs to call get_user_pages() only once for each compound page and | |
560 | * to immediately map the entire compound page with a single secondary | |
561 | * MMU fault. If there will be a pmd split later, the secondary MMUs | |
562 | * will get an update through the MMU notifier invalidation through | |
563 | * split_huge_pmd(). | |
564 | * | |
565 | * Unlike PageTransCompound, this is safe to be called only while | |
566 | * split_huge_pmd() cannot run from under us, like if protected by the | |
567 | * MMU notifier, otherwise it may result in page->_mapcount < 0 false | |
568 | * positives. | |
569 | */ | |
570 | static inline int PageTransCompoundMap(struct page *page) | |
571 | { | |
572 | return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; | |
573 | } | |
574 | ||
385de357 DN |
575 | /* |
576 | * PageTransTail returns true for both transparent huge pages | |
577 | * and hugetlbfs pages, so it should only be called when it's known | |
578 | * that hugetlbfs pages aren't involved. | |
579 | */ | |
580 | static inline int PageTransTail(struct page *page) | |
581 | { | |
582 | return PageTail(page); | |
583 | } | |
584 | ||
53f9263b KS |
585 | /* |
586 | * PageDoubleMap indicates that the compound page is mapped with PTEs as well | |
587 | * as PMDs. | |
588 | * | |
589 | * This is required for optimization of rmap operations for THP: we can postpone | |
590 | * per small page mapcount accounting (and its overhead from atomic operations) | |
591 | * until the first PMD split. | |
592 | * | |
593 | * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up | |
594 | * by one. This reference will go away with last compound_mapcount. | |
595 | * | |
596 | * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). | |
597 | */ | |
598 | static inline int PageDoubleMap(struct page *page) | |
599 | { | |
600 | return PageHead(page) && test_bit(PG_double_map, &page[1].flags); | |
601 | } | |
602 | ||
9a73f61b KS |
603 | static inline void SetPageDoubleMap(struct page *page) |
604 | { | |
605 | VM_BUG_ON_PAGE(!PageHead(page), page); | |
606 | set_bit(PG_double_map, &page[1].flags); | |
607 | } | |
608 | ||
609 | static inline void ClearPageDoubleMap(struct page *page) | |
610 | { | |
611 | VM_BUG_ON_PAGE(!PageHead(page), page); | |
612 | clear_bit(PG_double_map, &page[1].flags); | |
613 | } | |
53f9263b KS |
614 | static inline int TestSetPageDoubleMap(struct page *page) |
615 | { | |
616 | VM_BUG_ON_PAGE(!PageHead(page), page); | |
617 | return test_and_set_bit(PG_double_map, &page[1].flags); | |
618 | } | |
619 | ||
620 | static inline int TestClearPageDoubleMap(struct page *page) | |
621 | { | |
622 | VM_BUG_ON_PAGE(!PageHead(page), page); | |
623 | return test_and_clear_bit(PG_double_map, &page[1].flags); | |
624 | } | |
625 | ||
936a5fe6 | 626 | #else |
d8c1bdeb KS |
627 | TESTPAGEFLAG_FALSE(TransHuge) |
628 | TESTPAGEFLAG_FALSE(TransCompound) | |
127393fb | 629 | TESTPAGEFLAG_FALSE(TransCompoundMap) |
d8c1bdeb | 630 | TESTPAGEFLAG_FALSE(TransTail) |
9a73f61b | 631 | PAGEFLAG_FALSE(DoubleMap) |
53f9263b KS |
632 | TESTSETFLAG_FALSE(DoubleMap) |
633 | TESTCLEARFLAG_FALSE(DoubleMap) | |
936a5fe6 AA |
634 | #endif |
635 | ||
e8c6158f | 636 | /* |
632c0a1a VD |
637 | * For pages that are never mapped to userspace, page->mapcount may be |
638 | * used for storing extra information about page type. Any value used | |
639 | * for this purpose must be <= -2, but it's better start not too close | |
640 | * to -2 so that an underflow of the page_mapcount() won't be mistaken | |
641 | * for a special page. | |
e8c6158f | 642 | */ |
632c0a1a VD |
643 | #define PAGE_MAPCOUNT_OPS(uname, lname) \ |
644 | static __always_inline int Page##uname(struct page *page) \ | |
645 | { \ | |
646 | return atomic_read(&page->_mapcount) == \ | |
647 | PAGE_##lname##_MAPCOUNT_VALUE; \ | |
648 | } \ | |
649 | static __always_inline void __SetPage##uname(struct page *page) \ | |
650 | { \ | |
651 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ | |
652 | atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ | |
653 | } \ | |
654 | static __always_inline void __ClearPage##uname(struct page *page) \ | |
655 | { \ | |
656 | VM_BUG_ON_PAGE(!Page##uname(page), page); \ | |
657 | atomic_set(&page->_mapcount, -1); \ | |
e8c6158f KS |
658 | } |
659 | ||
632c0a1a VD |
660 | /* |
661 | * PageBuddy() indicate that the page is free and in the buddy system | |
662 | * (see mm/page_alloc.c). | |
663 | */ | |
664 | #define PAGE_BUDDY_MAPCOUNT_VALUE (-128) | |
665 | PAGE_MAPCOUNT_OPS(Buddy, BUDDY) | |
e8c6158f | 666 | |
632c0a1a VD |
667 | /* |
668 | * PageBalloon() is set on pages that are on the balloon page list | |
669 | * (see mm/balloon_compaction.c). | |
670 | */ | |
671 | #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) | |
672 | PAGE_MAPCOUNT_OPS(Balloon, BALLOON) | |
e8c6158f | 673 | |
4949148a VD |
674 | /* |
675 | * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on | |
676 | * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. | |
677 | */ | |
678 | #define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) | |
679 | PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG) | |
680 | ||
832fc1de NH |
681 | extern bool is_free_buddy_page(struct page *page); |
682 | ||
bda807d4 MK |
683 | __PAGEFLAG(Isolated, isolated, PF_ANY); |
684 | ||
072bb0aa MG |
685 | /* |
686 | * If network-based swap is enabled, sl*b must keep track of whether pages | |
687 | * were allocated from pfmemalloc reserves. | |
688 | */ | |
689 | static inline int PageSlabPfmemalloc(struct page *page) | |
690 | { | |
309381fe | 691 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
072bb0aa MG |
692 | return PageActive(page); |
693 | } | |
694 | ||
695 | static inline void SetPageSlabPfmemalloc(struct page *page) | |
696 | { | |
309381fe | 697 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
072bb0aa MG |
698 | SetPageActive(page); |
699 | } | |
700 | ||
701 | static inline void __ClearPageSlabPfmemalloc(struct page *page) | |
702 | { | |
309381fe | 703 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
072bb0aa MG |
704 | __ClearPageActive(page); |
705 | } | |
706 | ||
707 | static inline void ClearPageSlabPfmemalloc(struct page *page) | |
708 | { | |
309381fe | 709 | VM_BUG_ON_PAGE(!PageSlab(page), page); |
072bb0aa MG |
710 | ClearPageActive(page); |
711 | } | |
712 | ||
af8e3354 | 713 | #ifdef CONFIG_MMU |
d2a1a1f0 | 714 | #define __PG_MLOCKED (1UL << PG_mlocked) |
33925b25 | 715 | #else |
b291f000 | 716 | #define __PG_MLOCKED 0 |
894bc310 LS |
717 | #endif |
718 | ||
dfa7e20c RA |
719 | /* |
720 | * Flags checked when a page is freed. Pages being freed should not have | |
721 | * these flags set. It they are, there is a problem. | |
722 | */ | |
6326fec1 NP |
723 | #define PAGE_FLAGS_CHECK_AT_FREE \ |
724 | (1UL << PG_lru | 1UL << PG_locked | \ | |
725 | 1UL << PG_private | 1UL << PG_private_2 | \ | |
726 | 1UL << PG_writeback | 1UL << PG_reserved | \ | |
727 | 1UL << PG_slab | 1UL << PG_active | \ | |
728 | 1UL << PG_unevictable | __PG_MLOCKED) | |
dfa7e20c RA |
729 | |
730 | /* | |
731 | * Flags checked when a page is prepped for return by the page allocator. | |
f4c18e6f | 732 | * Pages being prepped should not have these flags set. It they are set, |
79f4b7bf | 733 | * there has been a kernel bug or struct page corruption. |
f4c18e6f NH |
734 | * |
735 | * __PG_HWPOISON is exceptional because it needs to be kept beyond page's | |
736 | * alloc-free cycle to prevent from reusing the page. | |
dfa7e20c | 737 | */ |
f4c18e6f | 738 | #define PAGE_FLAGS_CHECK_AT_PREP \ |
d2a1a1f0 | 739 | (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) |
dfa7e20c | 740 | |
edcf4748 | 741 | #define PAGE_FLAGS_PRIVATE \ |
d2a1a1f0 | 742 | (1UL << PG_private | 1UL << PG_private_2) |
266cf658 DH |
743 | /** |
744 | * page_has_private - Determine if page has private stuff | |
745 | * @page: The page to be checked | |
746 | * | |
747 | * Determine if a page has private stuff, indicating that release routines | |
748 | * should be invoked upon it. | |
749 | */ | |
edcf4748 JW |
750 | static inline int page_has_private(struct page *page) |
751 | { | |
752 | return !!(page->flags & PAGE_FLAGS_PRIVATE); | |
753 | } | |
754 | ||
95ad9755 KS |
755 | #undef PF_ANY |
756 | #undef PF_HEAD | |
62906027 | 757 | #undef PF_ONLY_HEAD |
95ad9755 KS |
758 | #undef PF_NO_TAIL |
759 | #undef PF_NO_COMPOUND | |
edcf4748 | 760 | #endif /* !__GENERATING_BOUNDS_H */ |
266cf658 | 761 | |
1da177e4 | 762 | #endif /* PAGE_FLAGS_H */ |