]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/page-flags.h
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / include / linux / page-flags.h
CommitLineData
1da177e4
LT
1/*
2 * Macros for manipulating and testing page->flags
3 */
4
5#ifndef PAGE_FLAGS_H
6#define PAGE_FLAGS_H
7
f886ed44 8#include <linux/types.h>
187f1882 9#include <linux/bug.h>
072bb0aa 10#include <linux/mmdebug.h>
9223b419 11#ifndef __GENERATING_BOUNDS_H
6d777953 12#include <linux/mm_types.h>
01fc0ac1 13#include <generated/bounds.h>
9223b419 14#endif /* !__GENERATING_BOUNDS_H */
f886ed44 15
1da177e4
LT
16/*
17 * Various page->flags bits:
18 *
19 * PG_reserved is set for special pages, which can never be swapped out. Some
20 * of them might not even exist (eg empty_bad_page)...
21 *
da6052f7
NP
22 * The PG_private bitflag is set on pagecache pages if they contain filesystem
23 * specific data (which is normally at page->private). It can be used by
24 * private allocations for its own usage.
1da177e4 25 *
da6052f7
NP
26 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28 * is set before writeback starts and cleared when it finishes.
29 *
30 * PG_locked also pins a page in pagecache, and blocks truncation of the file
31 * while it is held.
32 *
33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
34 * to become unlocked.
1da177e4
LT
35 *
36 * PG_uptodate tells whether the page's contents is valid. When a read
37 * completes, the page becomes uptodate, unless a disk I/O error happened.
38 *
da6052f7
NP
39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40 * file-backed pagecache (see mm/vmscan.c).
1da177e4
LT
41 *
42 * PG_error is set to indicate that an I/O error occurred on this page.
43 *
44 * PG_arch_1 is an architecture specific page state bit. The generic code
45 * guarantees that this bit is cleared for a page when it first is entered into
46 * the page cache.
47 *
48 * PG_highmem pages are not permanently mapped into the kernel virtual address
49 * space, they need to be kmapped separately for doing IO on the pages. The
50 * struct page (these bits with information) are always mapped into kernel
51 * address space...
da6052f7 52 *
d466f2fc
AK
53 * PG_hwpoison indicates that a page got corrupted in hardware and contains
54 * data with incorrect ECC bits that triggered a machine check. Accessing is
55 * not safe since it may cause another machine check. Don't touch!
1da177e4
LT
56 */
57
58/*
59 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
91fc8ab3
AW
60 * locked- and dirty-page accounting.
61 *
62 * The page flags field is split into two parts, the main flags area
63 * which extends from the low bits upwards, and the fields area which
64 * extends from the high bits downwards.
65 *
66 * | FIELD | ... | FLAGS |
9223b419
CL
67 * N-1 ^ 0
68 * (NR_PAGEFLAGS)
91fc8ab3 69 *
9223b419
CL
70 * The fields area is reserved for fields mapping zone, node (for NUMA) and
71 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
1da177e4 73 */
e2683181
CL
74enum pageflags {
75 PG_locked, /* Page is locked. Don't touch. */
76 PG_error,
77 PG_referenced,
78 PG_uptodate,
79 PG_dirty,
80 PG_lru,
81 PG_active,
b91e1302 82 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
e2683181
CL
83 PG_slab,
84 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
e2683181
CL
85 PG_arch_1,
86 PG_reserved,
87 PG_private, /* If pagecache, has fs-private data */
266cf658 88 PG_private_2, /* If pagecache, has fs aux data */
e2683181 89 PG_writeback, /* Page is under writeback */
e20b8cca 90 PG_head, /* A head page */
e2683181
CL
91 PG_mappedtodisk, /* Has blocks allocated on-disk */
92 PG_reclaim, /* To be reclaimed asap */
b2e18538 93 PG_swapbacked, /* Page is backed by RAM/swap */
894bc310 94 PG_unevictable, /* Page is "unevictable" */
af8e3354 95#ifdef CONFIG_MMU
b291f000 96 PG_mlocked, /* Page is vma mlocked */
894bc310 97#endif
46cf98cd 98#ifdef CONFIG_ARCH_USES_PG_UNCACHED
602c4d11 99 PG_uncached, /* Page has been mapped as uncached */
d466f2fc
AK
100#endif
101#ifdef CONFIG_MEMORY_FAILURE
102 PG_hwpoison, /* hardware poisoned page. Don't touch */
e9da73d6 103#endif
33c3fc71
VD
104#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
105 PG_young,
106 PG_idle,
f886ed44 107#endif
0cad47cf
AW
108 __NR_PAGEFLAGS,
109
110 /* Filesystems */
111 PG_checked = PG_owner_priv_1,
112
6326fec1
NP
113 /* SwapBacked */
114 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
115
266cf658
DH
116 /* Two page bits are conscripted by FS-Cache to maintain local caching
117 * state. These bits are set on pages belonging to the netfs's inodes
118 * when those inodes are being locally cached.
119 */
120 PG_fscache = PG_private_2, /* page backed by cache */
121
0cad47cf 122 /* XEN */
d8ac3dd4 123 /* Pinned in Xen as a read-only pagetable page. */
0cad47cf 124 PG_pinned = PG_owner_priv_1,
d8ac3dd4 125 /* Pinned as part of domain save (see xen_mm_pin_all()). */
0cad47cf 126 PG_savepinned = PG_dirty,
d8ac3dd4
JH
127 /* Has a grant mapping of another (foreign) domain's page. */
128 PG_foreign = PG_owner_priv_1,
8a38082d 129
9023cb7e 130 /* SLOB */
9023cb7e 131 PG_slob_free = PG_private,
53f9263b
KS
132
133 /* Compound pages. Stored in first tail page's flags */
134 PG_double_map = PG_private_2,
bda807d4
MK
135
136 /* non-lru isolated movable page */
137 PG_isolated = PG_reclaim,
e2683181 138};
1da177e4 139
9223b419
CL
140#ifndef __GENERATING_BOUNDS_H
141
0e6d31a7
KS
142struct page; /* forward declaration */
143
144static inline struct page *compound_head(struct page *page)
145{
146 unsigned long head = READ_ONCE(page->compound_head);
147
148 if (unlikely(head & 1))
149 return (struct page *) (head - 1);
150 return page;
151}
152
4b0f3261 153static __always_inline int PageTail(struct page *page)
0e6d31a7
KS
154{
155 return READ_ONCE(page->compound_head) & 1;
156}
157
4b0f3261 158static __always_inline int PageCompound(struct page *page)
0e6d31a7
KS
159{
160 return test_bit(PG_head, &page->flags) || PageTail(page);
161}
162
95ad9755
KS
163/*
164 * Page flags policies wrt compound pages
165 *
166 * PF_ANY:
167 * the page flag is relevant for small, head and tail pages.
168 *
169 * PF_HEAD:
170 * for compound page all operations related to the page flag applied to
171 * head page.
172 *
62906027
NP
173 * PF_ONLY_HEAD:
174 * for compound page, callers only ever operate on the head page.
175 *
95ad9755
KS
176 * PF_NO_TAIL:
177 * modifications of the page flag must be done on small or head pages,
178 * checks can be done on tail pages too.
179 *
180 * PF_NO_COMPOUND:
181 * the page flag is not relevant for compound pages.
182 */
183#define PF_ANY(page, enforce) page
184#define PF_HEAD(page, enforce) compound_head(page)
62906027
NP
185#define PF_ONLY_HEAD(page, enforce) ({ \
186 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
187 page;})
95ad9755
KS
188#define PF_NO_TAIL(page, enforce) ({ \
189 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
190 compound_head(page);})
822cdd11 191#define PF_NO_COMPOUND(page, enforce) ({ \
95ad9755
KS
192 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
193 page;})
194
f94a62e9
CL
195/*
196 * Macros to create function definitions for page flags
197 */
95ad9755 198#define TESTPAGEFLAG(uname, lname, policy) \
4b0f3261 199static __always_inline int Page##uname(struct page *page) \
95ad9755 200 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
f94a62e9 201
95ad9755 202#define SETPAGEFLAG(uname, lname, policy) \
4b0f3261 203static __always_inline void SetPage##uname(struct page *page) \
95ad9755 204 { set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 205
95ad9755 206#define CLEARPAGEFLAG(uname, lname, policy) \
4b0f3261 207static __always_inline void ClearPage##uname(struct page *page) \
95ad9755 208 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 209
95ad9755 210#define __SETPAGEFLAG(uname, lname, policy) \
4b0f3261 211static __always_inline void __SetPage##uname(struct page *page) \
95ad9755 212 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 213
95ad9755 214#define __CLEARPAGEFLAG(uname, lname, policy) \
4b0f3261 215static __always_inline void __ClearPage##uname(struct page *page) \
95ad9755 216 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 217
95ad9755 218#define TESTSETFLAG(uname, lname, policy) \
4b0f3261 219static __always_inline int TestSetPage##uname(struct page *page) \
95ad9755 220 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 221
95ad9755 222#define TESTCLEARFLAG(uname, lname, policy) \
4b0f3261 223static __always_inline int TestClearPage##uname(struct page *page) \
95ad9755 224 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 225
95ad9755
KS
226#define PAGEFLAG(uname, lname, policy) \
227 TESTPAGEFLAG(uname, lname, policy) \
228 SETPAGEFLAG(uname, lname, policy) \
229 CLEARPAGEFLAG(uname, lname, policy)
f94a62e9 230
95ad9755
KS
231#define __PAGEFLAG(uname, lname, policy) \
232 TESTPAGEFLAG(uname, lname, policy) \
233 __SETPAGEFLAG(uname, lname, policy) \
234 __CLEARPAGEFLAG(uname, lname, policy)
f94a62e9 235
95ad9755
KS
236#define TESTSCFLAG(uname, lname, policy) \
237 TESTSETFLAG(uname, lname, policy) \
238 TESTCLEARFLAG(uname, lname, policy)
f94a62e9 239
2f3e442c
JW
240#define TESTPAGEFLAG_FALSE(uname) \
241static inline int Page##uname(const struct page *page) { return 0; }
242
8a7a8544
LS
243#define SETPAGEFLAG_NOOP(uname) \
244static inline void SetPage##uname(struct page *page) { }
245
246#define CLEARPAGEFLAG_NOOP(uname) \
247static inline void ClearPage##uname(struct page *page) { }
248
249#define __CLEARPAGEFLAG_NOOP(uname) \
250static inline void __ClearPage##uname(struct page *page) { }
251
2f3e442c
JW
252#define TESTSETFLAG_FALSE(uname) \
253static inline int TestSetPage##uname(struct page *page) { return 0; }
254
8a7a8544
LS
255#define TESTCLEARFLAG_FALSE(uname) \
256static inline int TestClearPage##uname(struct page *page) { return 0; }
257
2f3e442c
JW
258#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
259 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
260
261#define TESTSCFLAG_FALSE(uname) \
262 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
263
48c935ad 264__PAGEFLAG(Locked, locked, PF_NO_TAIL)
62906027 265PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
df8c94d1 266PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
8cb38fab
KS
267PAGEFLAG(Referenced, referenced, PF_HEAD)
268 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
269 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
df8c94d1
KS
270PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
271 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
8cb38fab
KS
272PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
273PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
274 TESTCLEARFLAG(Active, active, PF_HEAD)
dcb351cd
KS
275__PAGEFLAG(Slab, slab, PF_NO_TAIL)
276__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
df8c94d1 277PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
c13985fa
KS
278
279/* Xen */
280PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
281 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
282PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
283PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
284
de09d31d
KS
285PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
286 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
da5efc40
KS
287PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
288 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
289 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
95ad9755 290
266cf658
DH
291/*
292 * Private page markings that may be used by the filesystem that owns the page
293 * for its own purposes.
294 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
295 */
95ad9755
KS
296PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
297 __CLEARPAGEFLAG(Private, private, PF_ANY)
298PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
299PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
300 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
266cf658 301
6a1e7f77
CL
302/*
303 * Only test-and-set exist for PG_writeback. The unconditional operators are
304 * risky: they bypass page accounting.
305 */
df8c94d1
KS
306TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
307 TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
e2f0a0db 308PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
6a1e7f77 309
579f8290 310/* PG_readahead is only used for reads; PG_reclaim is only for writes */
e2f0a0db
KS
311PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
312 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
df8c94d1
KS
313PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
314 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
6a1e7f77
CL
315
316#ifdef CONFIG_HIGHMEM
1da177e4 317/*
6a1e7f77
CL
318 * Must use a macro here due to header dependency issues. page_zone() is not
319 * available at this point.
1da177e4 320 */
3ca65c19 321#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
6a1e7f77 322#else
ec7cade8 323PAGEFLAG_FALSE(HighMem)
6a1e7f77
CL
324#endif
325
326#ifdef CONFIG_SWAP
6326fec1
NP
327static __always_inline int PageSwapCache(struct page *page)
328{
329 return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
330
331}
332SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
333CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
6a1e7f77 334#else
ec7cade8 335PAGEFLAG_FALSE(SwapCache)
6a1e7f77
CL
336#endif
337
8cb38fab
KS
338PAGEFLAG(Unevictable, unevictable, PF_HEAD)
339 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
340 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
b291f000 341
af8e3354 342#ifdef CONFIG_MMU
e4f87d5d
KS
343PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
344 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
345 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
894bc310 346#else
2f3e442c 347PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
685eaade 348 TESTSCFLAG_FALSE(Mlocked)
894bc310
LS
349#endif
350
46cf98cd 351#ifdef CONFIG_ARCH_USES_PG_UNCACHED
b9d41817 352PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
602c4d11 353#else
ec7cade8 354PAGEFLAG_FALSE(Uncached)
6a1e7f77 355#endif
1da177e4 356
d466f2fc 357#ifdef CONFIG_MEMORY_FAILURE
95ad9755
KS
358PAGEFLAG(HWPoison, hwpoison, PF_ANY)
359TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
d466f2fc
AK
360#define __PG_HWPOISON (1UL << PG_hwpoison)
361#else
362PAGEFLAG_FALSE(HWPoison)
363#define __PG_HWPOISON 0
364#endif
365
33c3fc71 366#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
95ad9755
KS
367TESTPAGEFLAG(Young, young, PF_ANY)
368SETPAGEFLAG(Young, young, PF_ANY)
369TESTCLEARFLAG(Young, young, PF_ANY)
370PAGEFLAG(Idle, idle, PF_ANY)
33c3fc71
VD
371#endif
372
e8c6158f
KS
373/*
374 * On an anonymous page mapped into a user virtual memory area,
375 * page->mapping points to its anon_vma, not to a struct address_space;
376 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
377 *
378 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
bda807d4
MK
379 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
380 * bit; and then page->mapping points, not to an anon_vma, but to a private
e8c6158f
KS
381 * structure which KSM associates with that merged page. See ksm.h.
382 *
bda807d4
MK
383 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
384 * page and then page->mapping points a struct address_space.
e8c6158f
KS
385 *
386 * Please note that, confusingly, "page_mapping" refers to the inode
387 * address_space which maps the page from disk; whereas "page_mapped"
388 * refers to user virtual address space into which the page is mapped.
389 */
bda807d4
MK
390#define PAGE_MAPPING_ANON 0x1
391#define PAGE_MAPPING_MOVABLE 0x2
392#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
393#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
e8c6158f 394
bda807d4 395static __always_inline int PageMappingFlags(struct page *page)
17514574 396{
bda807d4 397 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
17514574
MG
398}
399
4b0f3261 400static __always_inline int PageAnon(struct page *page)
e8c6158f 401{
822cdd11 402 page = compound_head(page);
bda807d4
MK
403 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
404}
405
406static __always_inline int __PageMovable(struct page *page)
407{
408 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
409 PAGE_MAPPING_MOVABLE;
e8c6158f
KS
410}
411
412#ifdef CONFIG_KSM
413/*
414 * A KSM page is one of those write-protected "shared pages" or "merged pages"
415 * which KSM maps into multiple mms, wherever identical anonymous page content
416 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
417 * anon_vma, but to that page's node of the stable tree.
418 */
4b0f3261 419static __always_inline int PageKsm(struct page *page)
e8c6158f 420{
822cdd11 421 page = compound_head(page);
e8c6158f 422 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
bda807d4 423 PAGE_MAPPING_KSM;
e8c6158f
KS
424}
425#else
426TESTPAGEFLAG_FALSE(Ksm)
427#endif
428
1a9b5b7f
WF
429u64 stable_page_flags(struct page *page);
430
0ed361de
NP
431static inline int PageUptodate(struct page *page)
432{
d2998c4d
KS
433 int ret;
434 page = compound_head(page);
435 ret = test_bit(PG_uptodate, &(page)->flags);
0ed361de
NP
436 /*
437 * Must ensure that the data we read out of the page is loaded
438 * _after_ we've loaded page->flags to check for PageUptodate.
439 * We can skip the barrier if the page is not uptodate, because
440 * we wouldn't be reading anything from it.
441 *
442 * See SetPageUptodate() for the other side of the story.
443 */
444 if (ret)
445 smp_rmb();
446
447 return ret;
448}
449
4b0f3261 450static __always_inline void __SetPageUptodate(struct page *page)
0ed361de 451{
d2998c4d 452 VM_BUG_ON_PAGE(PageTail(page), page);
0ed361de 453 smp_wmb();
df8c94d1 454 __set_bit(PG_uptodate, &page->flags);
0ed361de
NP
455}
456
4b0f3261 457static __always_inline void SetPageUptodate(struct page *page)
2dcea57a 458{
d2998c4d 459 VM_BUG_ON_PAGE(PageTail(page), page);
0ed361de
NP
460 /*
461 * Memory barrier must be issued before setting the PG_uptodate bit,
462 * so that all previous stores issued in order to bring the page
463 * uptodate are actually visible before PageUptodate becomes true.
0ed361de
NP
464 */
465 smp_wmb();
df8c94d1 466 set_bit(PG_uptodate, &page->flags);
0ed361de
NP
467}
468
d2998c4d 469CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
1da177e4 470
6a1e7f77 471int test_clear_page_writeback(struct page *page);
1c8349a1
NJ
472int __test_set_page_writeback(struct page *page, bool keep_write);
473
474#define test_set_page_writeback(page) \
475 __test_set_page_writeback(page, false)
476#define test_set_page_writeback_keepwrite(page) \
477 __test_set_page_writeback(page, true)
1da177e4 478
6a1e7f77
CL
479static inline void set_page_writeback(struct page *page)
480{
481 test_set_page_writeback(page);
482}
1da177e4 483
1c8349a1
NJ
484static inline void set_page_writeback_keepwrite(struct page *page)
485{
486 test_set_page_writeback_keepwrite(page);
487}
488
95ad9755 489__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
e20b8cca 490
4b0f3261 491static __always_inline void set_compound_head(struct page *page, struct page *head)
ad4b3fb7 492{
1d798ca3 493 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
ad4b3fb7
CD
494}
495
4b0f3261 496static __always_inline void clear_compound_head(struct page *page)
6a1e7f77 497{
1d798ca3 498 WRITE_ONCE(page->compound_head, 0);
6a1e7f77 499}
6d777953 500
4e6af67e
AA
501#ifdef CONFIG_TRANSPARENT_HUGEPAGE
502static inline void ClearPageCompound(struct page *page)
503{
1d798ca3
KS
504 BUG_ON(!PageHead(page));
505 ClearPageHead(page);
4e6af67e
AA
506}
507#endif
508
d2a1a1f0 509#define PG_head_mask ((1UL << PG_head))
dfa7e20c 510
e8c6158f
KS
511#ifdef CONFIG_HUGETLB_PAGE
512int PageHuge(struct page *page);
513int PageHeadHuge(struct page *page);
7e1f049e 514bool page_huge_active(struct page *page);
e8c6158f
KS
515#else
516TESTPAGEFLAG_FALSE(Huge)
517TESTPAGEFLAG_FALSE(HeadHuge)
7e1f049e
NH
518
519static inline bool page_huge_active(struct page *page)
520{
521 return 0;
522}
e8c6158f
KS
523#endif
524
7e1f049e 525
936a5fe6 526#ifdef CONFIG_TRANSPARENT_HUGEPAGE
71e3aac0
AA
527/*
528 * PageHuge() only returns true for hugetlbfs pages, but not for
529 * normal or transparent huge pages.
530 *
531 * PageTransHuge() returns true for both transparent huge and
532 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
533 * called only in the core VM paths where hugetlbfs pages can't exist.
534 */
535static inline int PageTransHuge(struct page *page)
536{
309381fe 537 VM_BUG_ON_PAGE(PageTail(page), page);
71e3aac0
AA
538 return PageHead(page);
539}
540
385de357
DN
541/*
542 * PageTransCompound returns true for both transparent huge pages
543 * and hugetlbfs pages, so it should only be called when it's known
544 * that hugetlbfs pages aren't involved.
545 */
936a5fe6
AA
546static inline int PageTransCompound(struct page *page)
547{
548 return PageCompound(page);
549}
71e3aac0 550
127393fb
AA
551/*
552 * PageTransCompoundMap is the same as PageTransCompound, but it also
553 * guarantees the primary MMU has the entire compound page mapped
554 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
555 * can also map the entire compound page. This allows the secondary
556 * MMUs to call get_user_pages() only once for each compound page and
557 * to immediately map the entire compound page with a single secondary
558 * MMU fault. If there will be a pmd split later, the secondary MMUs
559 * will get an update through the MMU notifier invalidation through
560 * split_huge_pmd().
561 *
562 * Unlike PageTransCompound, this is safe to be called only while
563 * split_huge_pmd() cannot run from under us, like if protected by the
564 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
565 * positives.
566 */
567static inline int PageTransCompoundMap(struct page *page)
568{
569 return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
570}
571
385de357
DN
572/*
573 * PageTransTail returns true for both transparent huge pages
574 * and hugetlbfs pages, so it should only be called when it's known
575 * that hugetlbfs pages aren't involved.
576 */
577static inline int PageTransTail(struct page *page)
578{
579 return PageTail(page);
580}
581
53f9263b
KS
582/*
583 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
584 * as PMDs.
585 *
586 * This is required for optimization of rmap operations for THP: we can postpone
587 * per small page mapcount accounting (and its overhead from atomic operations)
588 * until the first PMD split.
589 *
590 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
591 * by one. This reference will go away with last compound_mapcount.
592 *
593 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
594 */
595static inline int PageDoubleMap(struct page *page)
596{
597 return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
598}
599
9a73f61b
KS
600static inline void SetPageDoubleMap(struct page *page)
601{
602 VM_BUG_ON_PAGE(!PageHead(page), page);
603 set_bit(PG_double_map, &page[1].flags);
604}
605
606static inline void ClearPageDoubleMap(struct page *page)
607{
608 VM_BUG_ON_PAGE(!PageHead(page), page);
609 clear_bit(PG_double_map, &page[1].flags);
610}
53f9263b
KS
611static inline int TestSetPageDoubleMap(struct page *page)
612{
613 VM_BUG_ON_PAGE(!PageHead(page), page);
614 return test_and_set_bit(PG_double_map, &page[1].flags);
615}
616
617static inline int TestClearPageDoubleMap(struct page *page)
618{
619 VM_BUG_ON_PAGE(!PageHead(page), page);
620 return test_and_clear_bit(PG_double_map, &page[1].flags);
621}
622
936a5fe6 623#else
d8c1bdeb
KS
624TESTPAGEFLAG_FALSE(TransHuge)
625TESTPAGEFLAG_FALSE(TransCompound)
127393fb 626TESTPAGEFLAG_FALSE(TransCompoundMap)
d8c1bdeb 627TESTPAGEFLAG_FALSE(TransTail)
9a73f61b 628PAGEFLAG_FALSE(DoubleMap)
53f9263b
KS
629 TESTSETFLAG_FALSE(DoubleMap)
630 TESTCLEARFLAG_FALSE(DoubleMap)
936a5fe6
AA
631#endif
632
e8c6158f 633/*
632c0a1a
VD
634 * For pages that are never mapped to userspace, page->mapcount may be
635 * used for storing extra information about page type. Any value used
636 * for this purpose must be <= -2, but it's better start not too close
637 * to -2 so that an underflow of the page_mapcount() won't be mistaken
638 * for a special page.
e8c6158f 639 */
632c0a1a
VD
640#define PAGE_MAPCOUNT_OPS(uname, lname) \
641static __always_inline int Page##uname(struct page *page) \
642{ \
643 return atomic_read(&page->_mapcount) == \
644 PAGE_##lname##_MAPCOUNT_VALUE; \
645} \
646static __always_inline void __SetPage##uname(struct page *page) \
647{ \
648 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \
649 atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \
650} \
651static __always_inline void __ClearPage##uname(struct page *page) \
652{ \
653 VM_BUG_ON_PAGE(!Page##uname(page), page); \
654 atomic_set(&page->_mapcount, -1); \
e8c6158f
KS
655}
656
632c0a1a
VD
657/*
658 * PageBuddy() indicate that the page is free and in the buddy system
659 * (see mm/page_alloc.c).
660 */
661#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
662PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
e8c6158f 663
632c0a1a
VD
664/*
665 * PageBalloon() is set on pages that are on the balloon page list
666 * (see mm/balloon_compaction.c).
667 */
668#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
669PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
e8c6158f 670
4949148a
VD
671/*
672 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
673 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
674 */
675#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512)
676PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
677
832fc1de
NH
678extern bool is_free_buddy_page(struct page *page);
679
bda807d4
MK
680__PAGEFLAG(Isolated, isolated, PF_ANY);
681
072bb0aa
MG
682/*
683 * If network-based swap is enabled, sl*b must keep track of whether pages
684 * were allocated from pfmemalloc reserves.
685 */
686static inline int PageSlabPfmemalloc(struct page *page)
687{
309381fe 688 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
689 return PageActive(page);
690}
691
692static inline void SetPageSlabPfmemalloc(struct page *page)
693{
309381fe 694 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
695 SetPageActive(page);
696}
697
698static inline void __ClearPageSlabPfmemalloc(struct page *page)
699{
309381fe 700 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
701 __ClearPageActive(page);
702}
703
704static inline void ClearPageSlabPfmemalloc(struct page *page)
705{
309381fe 706 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
707 ClearPageActive(page);
708}
709
af8e3354 710#ifdef CONFIG_MMU
d2a1a1f0 711#define __PG_MLOCKED (1UL << PG_mlocked)
33925b25 712#else
b291f000 713#define __PG_MLOCKED 0
894bc310
LS
714#endif
715
dfa7e20c
RA
716/*
717 * Flags checked when a page is freed. Pages being freed should not have
718 * these flags set. It they are, there is a problem.
719 */
6326fec1
NP
720#define PAGE_FLAGS_CHECK_AT_FREE \
721 (1UL << PG_lru | 1UL << PG_locked | \
722 1UL << PG_private | 1UL << PG_private_2 | \
723 1UL << PG_writeback | 1UL << PG_reserved | \
724 1UL << PG_slab | 1UL << PG_active | \
725 1UL << PG_unevictable | __PG_MLOCKED)
dfa7e20c
RA
726
727/*
728 * Flags checked when a page is prepped for return by the page allocator.
f4c18e6f 729 * Pages being prepped should not have these flags set. It they are set,
79f4b7bf 730 * there has been a kernel bug or struct page corruption.
f4c18e6f
NH
731 *
732 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
733 * alloc-free cycle to prevent from reusing the page.
dfa7e20c 734 */
f4c18e6f 735#define PAGE_FLAGS_CHECK_AT_PREP \
d2a1a1f0 736 (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
dfa7e20c 737
edcf4748 738#define PAGE_FLAGS_PRIVATE \
d2a1a1f0 739 (1UL << PG_private | 1UL << PG_private_2)
266cf658
DH
740/**
741 * page_has_private - Determine if page has private stuff
742 * @page: The page to be checked
743 *
744 * Determine if a page has private stuff, indicating that release routines
745 * should be invoked upon it.
746 */
edcf4748
JW
747static inline int page_has_private(struct page *page)
748{
749 return !!(page->flags & PAGE_FLAGS_PRIVATE);
750}
751
95ad9755
KS
752#undef PF_ANY
753#undef PF_HEAD
62906027 754#undef PF_ONLY_HEAD
95ad9755
KS
755#undef PF_NO_TAIL
756#undef PF_NO_COMPOUND
edcf4748 757#endif /* !__GENERATING_BOUNDS_H */
266cf658 758
1da177e4 759#endif /* PAGE_FLAGS_H */