]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * Macros for manipulating and testing page->flags | |
4 | */ | |
5 | ||
6 | #ifndef PAGE_FLAGS_H | |
7 | #define PAGE_FLAGS_H | |
8 | ||
f886ed44 | 9 | #include <linux/types.h> |
187f1882 | 10 | #include <linux/bug.h> |
072bb0aa | 11 | #include <linux/mmdebug.h> |
9223b419 | 12 | #ifndef __GENERATING_BOUNDS_H |
6d777953 | 13 | #include <linux/mm_types.h> |
01fc0ac1 | 14 | #include <generated/bounds.h> |
9223b419 | 15 | #endif /* !__GENERATING_BOUNDS_H */ |
f886ed44 | 16 | |
1da177e4 LT |
17 | /* |
18 | * Various page->flags bits: | |
19 | * | |
6e2e07cd DH |
20 | * PG_reserved is set for special pages. The "struct page" of such a page |
21 | * should in general not be touched (e.g. set dirty) except by its owner. | |
22 | * Pages marked as PG_reserved include: | |
23 | * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, | |
24 | * initrd, HW tables) | |
25 | * - Pages reserved or allocated early during boot (before the page allocator | |
26 | * was initialized). This includes (depending on the architecture) the | |
27 | * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much | |
28 | * much more. Once (if ever) freed, PG_reserved is cleared and they will | |
29 | * be given to the page allocator. | |
30 | * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying | |
31 | * to read/write these pages might end badly. Don't touch! | |
32 | * - The zero page(s) | |
33 | * - Pages not added to the page allocator when onlining a section because | |
34 | * they were excluded via the online_page_callback() or because they are | |
35 | * PG_hwpoison. | |
36 | * - Pages allocated in the context of kexec/kdump (loaded kernel image, | |
37 | * control pages, vmcoreinfo) | |
38 | * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are | |
39 | * not marked PG_reserved (as they might be in use by somebody else who does | |
40 | * not respect the caching strategy). | |
41 | * - Pages part of an offline section (struct pages of offline sections should | |
42 | * not be trusted as they will be initialized when first onlined). | |
43 | * - MCA pages on ia64 | |
44 | * - Pages holding CPU notes for POWER Firmware Assisted Dump | |
45 | * - Device memory (e.g. PMEM, DAX, HMM) | |
46 | * Some PG_reserved pages will be excluded from the hibernation image. | |
47 | * PG_reserved does in general not hinder anybody from dumping or swapping | |
48 | * and is no longer required for remap_pfn_range(). ioremap might require it. | |
49 | * Consequently, PG_reserved for a page mapped into user space can indicate | |
50 | * the zero page, the vDSO, MMIO pages or device memory. | |
1da177e4 | 51 | * |
da6052f7 NP |
52 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
53 | * specific data (which is normally at page->private). It can be used by | |
54 | * private allocations for its own usage. | |
1da177e4 | 55 | * |
da6052f7 NP |
56 | * During initiation of disk I/O, PG_locked is set. This bit is set before I/O |
57 | * and cleared when writeback _starts_ or when read _completes_. PG_writeback | |
58 | * is set before writeback starts and cleared when it finishes. | |
59 | * | |
60 | * PG_locked also pins a page in pagecache, and blocks truncation of the file | |
61 | * while it is held. | |
62 | * | |
63 | * page_waitqueue(page) is a wait queue of all tasks waiting for the page | |
64 | * to become unlocked. | |
1da177e4 | 65 | * |
9de4f22a HY |
66 | * PG_swapbacked is set when a page uses swap as a backing storage. This are |
67 | * usually PageAnon or shmem pages but please note that even anonymous pages | |
68 | * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as | |
69 | * a result of MADV_FREE). | |
70 | * | |
da6052f7 NP |
71 | * PG_referenced, PG_reclaim are used for page reclaim for anonymous and |
72 | * file-backed pagecache (see mm/vmscan.c). | |
1da177e4 LT |
73 | * |
74 | * PG_error is set to indicate that an I/O error occurred on this page. | |
75 | * | |
76 | * PG_arch_1 is an architecture specific page state bit. The generic code | |
77 | * guarantees that this bit is cleared for a page when it first is entered into | |
78 | * the page cache. | |
79 | * | |
d466f2fc AK |
80 | * PG_hwpoison indicates that a page got corrupted in hardware and contains |
81 | * data with incorrect ECC bits that triggered a machine check. Accessing is | |
82 | * not safe since it may cause another machine check. Don't touch! | |
1da177e4 LT |
83 | */ |
84 | ||
85 | /* | |
3b12da6d | 86 | * Don't use the pageflags directly. Use the PageFoo macros. |
91fc8ab3 AW |
87 | * |
88 | * The page flags field is split into two parts, the main flags area | |
89 | * which extends from the low bits upwards, and the fields area which | |
90 | * extends from the high bits downwards. | |
91 | * | |
92 | * | FIELD | ... | FLAGS | | |
9223b419 CL |
93 | * N-1 ^ 0 |
94 | * (NR_PAGEFLAGS) | |
91fc8ab3 | 95 | * |
9223b419 CL |
96 | * The fields area is reserved for fields mapping zone, node (for NUMA) and |
97 | * SPARSEMEM section (for variants of SPARSEMEM that require section ids like | |
98 | * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). | |
1da177e4 | 99 | */ |
e2683181 CL |
100 | enum pageflags { |
101 | PG_locked, /* Page is locked. Don't touch. */ | |
e2683181 CL |
102 | PG_referenced, |
103 | PG_uptodate, | |
104 | PG_dirty, | |
105 | PG_lru, | |
106 | PG_active, | |
1899ad18 | 107 | PG_workingset, |
b91e1302 | 108 | PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ |
1899ad18 | 109 | PG_error, |
e2683181 CL |
110 | PG_slab, |
111 | PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ | |
e2683181 CL |
112 | PG_arch_1, |
113 | PG_reserved, | |
114 | PG_private, /* If pagecache, has fs-private data */ | |
266cf658 | 115 | PG_private_2, /* If pagecache, has fs aux data */ |
e2683181 | 116 | PG_writeback, /* Page is under writeback */ |
e20b8cca | 117 | PG_head, /* A head page */ |
e2683181 CL |
118 | PG_mappedtodisk, /* Has blocks allocated on-disk */ |
119 | PG_reclaim, /* To be reclaimed asap */ | |
b2e18538 | 120 | PG_swapbacked, /* Page is backed by RAM/swap */ |
894bc310 | 121 | PG_unevictable, /* Page is "unevictable" */ |
af8e3354 | 122 | #ifdef CONFIG_MMU |
b291f000 | 123 | PG_mlocked, /* Page is vma mlocked */ |
894bc310 | 124 | #endif |
46cf98cd | 125 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
602c4d11 | 126 | PG_uncached, /* Page has been mapped as uncached */ |
d466f2fc AK |
127 | #endif |
128 | #ifdef CONFIG_MEMORY_FAILURE | |
129 | PG_hwpoison, /* hardware poisoned page. Don't touch */ | |
e9da73d6 | 130 | #endif |
1c676e0d | 131 | #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) |
33c3fc71 VD |
132 | PG_young, |
133 | PG_idle, | |
4beba948 SP |
134 | #endif |
135 | #ifdef CONFIG_64BIT | |
136 | PG_arch_2, | |
c275c5c6 PC |
137 | #endif |
138 | #ifdef CONFIG_KASAN_HW_TAGS | |
139 | PG_skip_kasan_poison, | |
f886ed44 | 140 | #endif |
0cad47cf AW |
141 | __NR_PAGEFLAGS, |
142 | ||
d389a4a8 MWO |
143 | PG_readahead = PG_reclaim, |
144 | ||
0cad47cf AW |
145 | /* Filesystems */ |
146 | PG_checked = PG_owner_priv_1, | |
147 | ||
6326fec1 NP |
148 | /* SwapBacked */ |
149 | PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ | |
150 | ||
266cf658 DH |
151 | /* Two page bits are conscripted by FS-Cache to maintain local caching |
152 | * state. These bits are set on pages belonging to the netfs's inodes | |
153 | * when those inodes are being locally cached. | |
154 | */ | |
155 | PG_fscache = PG_private_2, /* page backed by cache */ | |
156 | ||
0cad47cf | 157 | /* XEN */ |
d8ac3dd4 | 158 | /* Pinned in Xen as a read-only pagetable page. */ |
0cad47cf | 159 | PG_pinned = PG_owner_priv_1, |
d8ac3dd4 | 160 | /* Pinned as part of domain save (see xen_mm_pin_all()). */ |
0cad47cf | 161 | PG_savepinned = PG_dirty, |
d8ac3dd4 JH |
162 | /* Has a grant mapping of another (foreign) domain's page. */ |
163 | PG_foreign = PG_owner_priv_1, | |
b877ac98 JG |
164 | /* Remapped by swiotlb-xen. */ |
165 | PG_xen_remapped = PG_owner_priv_1, | |
8a38082d | 166 | |
9023cb7e | 167 | /* SLOB */ |
9023cb7e | 168 | PG_slob_free = PG_private, |
53f9263b KS |
169 | |
170 | /* Compound pages. Stored in first tail page's flags */ | |
e18c45ff | 171 | PG_double_map = PG_workingset, |
bda807d4 | 172 | |
eac96c3e YS |
173 | #ifdef CONFIG_MEMORY_FAILURE |
174 | /* | |
175 | * Compound pages. Stored in first tail page's flags. | |
176 | * Indicates that at least one subpage is hwpoisoned in the | |
177 | * THP. | |
178 | */ | |
179 | PG_has_hwpoisoned = PG_mappedtodisk, | |
180 | #endif | |
181 | ||
bda807d4 MK |
182 | /* non-lru isolated movable page */ |
183 | PG_isolated = PG_reclaim, | |
36e66c55 AD |
184 | |
185 | /* Only valid for buddy pages. Used to track pages that are reported */ | |
186 | PG_reported = PG_uptodate, | |
e2683181 | 187 | }; |
1da177e4 | 188 | |
41c961b9 MS |
189 | #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) |
190 | ||
9223b419 CL |
191 | #ifndef __GENERATING_BOUNDS_H |
192 | ||
47010c04 MS |
193 | #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP |
194 | DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, | |
f10f1442 | 195 | hugetlb_optimize_vmemmap_key); |
a6b40850 | 196 | |
f10f1442 | 197 | static __always_inline bool hugetlb_optimize_vmemmap_enabled(void) |
a6b40850 | 198 | { |
47010c04 | 199 | return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, |
f10f1442 | 200 | &hugetlb_optimize_vmemmap_key); |
a6b40850 | 201 | } |
e7d32485 MS |
202 | |
203 | /* | |
f10f1442 | 204 | * If the feature of optimizing vmemmap pages associated with each HugeTLB |
e7d32485 MS |
205 | * page is enabled, the head vmemmap page frame is reused and all of the tail |
206 | * vmemmap addresses map to the head vmemmap page frame (furture details can | |
207 | * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other | |
208 | * words, there are more than one page struct with PG_head associated with each | |
209 | * HugeTLB page. We __know__ that there is only one head page struct, the tail | |
210 | * page structs with PG_head are fake head page structs. We need an approach | |
211 | * to distinguish between those two different types of page structs so that | |
212 | * compound_head() can return the real head page struct when the parameter is | |
213 | * the tail page struct but with PG_head. | |
214 | * | |
215 | * The page_fixed_fake_head() returns the real head page struct if the @page is | |
216 | * fake page head, otherwise, returns @page which can either be a true page | |
217 | * head or tail. | |
218 | */ | |
219 | static __always_inline const struct page *page_fixed_fake_head(const struct page *page) | |
220 | { | |
f10f1442 | 221 | if (!hugetlb_optimize_vmemmap_enabled()) |
e7d32485 MS |
222 | return page; |
223 | ||
224 | /* | |
225 | * Only addresses aligned with PAGE_SIZE of struct page may be fake head | |
226 | * struct page. The alignment check aims to avoid access the fields ( | |
227 | * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) | |
228 | * cold cacheline in some cases. | |
229 | */ | |
230 | if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && | |
231 | test_bit(PG_head, &page->flags)) { | |
232 | /* | |
233 | * We can safely access the field of the @page[1] with PG_head | |
234 | * because the @page is a compound page composed with at least | |
235 | * two contiguous pages. | |
236 | */ | |
237 | unsigned long head = READ_ONCE(page[1].compound_head); | |
238 | ||
239 | if (likely(head & 1)) | |
240 | return (const struct page *)(head - 1); | |
241 | } | |
242 | return page; | |
243 | } | |
244 | #else | |
245 | static inline const struct page *page_fixed_fake_head(const struct page *page) | |
246 | { | |
247 | return page; | |
248 | } | |
a6b40850 | 249 | |
f10f1442 | 250 | static inline bool hugetlb_optimize_vmemmap_enabled(void) |
a6b40850 MS |
251 | { |
252 | return false; | |
253 | } | |
e7d32485 MS |
254 | #endif |
255 | ||
256 | static __always_inline int page_is_fake_head(struct page *page) | |
257 | { | |
258 | return page_fixed_fake_head(page) != page; | |
259 | } | |
260 | ||
0f2317e3 | 261 | static inline unsigned long _compound_head(const struct page *page) |
0e6d31a7 KS |
262 | { |
263 | unsigned long head = READ_ONCE(page->compound_head); | |
264 | ||
265 | if (unlikely(head & 1)) | |
0f2317e3 | 266 | return head - 1; |
e7d32485 | 267 | return (unsigned long)page_fixed_fake_head(page); |
0e6d31a7 KS |
268 | } |
269 | ||
0f2317e3 MWO |
270 | #define compound_head(page) ((typeof(page))_compound_head(page)) |
271 | ||
7b230db3 MWO |
272 | /** |
273 | * page_folio - Converts from page to folio. | |
274 | * @p: The page. | |
275 | * | |
276 | * Every page is part of a folio. This function cannot be called on a | |
277 | * NULL pointer. | |
278 | * | |
279 | * Context: No reference, nor lock is required on @page. If the caller | |
280 | * does not hold a reference, this call may race with a folio split, so | |
281 | * it should re-check the folio still contains this page after gaining | |
282 | * a reference on the folio. | |
283 | * Return: The folio which contains this page. | |
284 | */ | |
285 | #define page_folio(p) (_Generic((p), \ | |
286 | const struct page *: (const struct folio *)_compound_head(p), \ | |
287 | struct page *: (struct folio *)_compound_head(p))) | |
288 | ||
289 | /** | |
290 | * folio_page - Return a page from a folio. | |
291 | * @folio: The folio. | |
292 | * @n: The page number to return. | |
293 | * | |
294 | * @n is relative to the start of the folio. This function does not | |
295 | * check that the page number lies within @folio; the caller is presumed | |
296 | * to have a reference to the page. | |
297 | */ | |
298 | #define folio_page(folio, n) nth_page(&(folio)->page, n) | |
299 | ||
4b0f3261 | 300 | static __always_inline int PageTail(struct page *page) |
0e6d31a7 | 301 | { |
e7d32485 | 302 | return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); |
0e6d31a7 KS |
303 | } |
304 | ||
4b0f3261 | 305 | static __always_inline int PageCompound(struct page *page) |
0e6d31a7 | 306 | { |
e7d32485 MS |
307 | return test_bit(PG_head, &page->flags) || |
308 | READ_ONCE(page->compound_head) & 1; | |
0e6d31a7 KS |
309 | } |
310 | ||
f165b378 PT |
311 | #define PAGE_POISON_PATTERN -1l |
312 | static inline int PagePoisoned(const struct page *page) | |
313 | { | |
477d01fc | 314 | return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; |
f165b378 PT |
315 | } |
316 | ||
f682a97a AD |
317 | #ifdef CONFIG_DEBUG_VM |
318 | void page_init_poison(struct page *page, size_t size); | |
319 | #else | |
320 | static inline void page_init_poison(struct page *page, size_t size) | |
321 | { | |
322 | } | |
323 | #endif | |
324 | ||
d389a4a8 MWO |
325 | static unsigned long *folio_flags(struct folio *folio, unsigned n) |
326 | { | |
327 | struct page *page = &folio->page; | |
328 | ||
329 | VM_BUG_ON_PGFLAGS(PageTail(page), page); | |
330 | VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); | |
331 | return &page[n].flags; | |
332 | } | |
333 | ||
95ad9755 KS |
334 | /* |
335 | * Page flags policies wrt compound pages | |
336 | * | |
f165b378 PT |
337 | * PF_POISONED_CHECK |
338 | * check if this struct page poisoned/uninitialized | |
339 | * | |
95ad9755 KS |
340 | * PF_ANY: |
341 | * the page flag is relevant for small, head and tail pages. | |
342 | * | |
343 | * PF_HEAD: | |
344 | * for compound page all operations related to the page flag applied to | |
345 | * head page. | |
346 | * | |
62906027 NP |
347 | * PF_ONLY_HEAD: |
348 | * for compound page, callers only ever operate on the head page. | |
349 | * | |
95ad9755 KS |
350 | * PF_NO_TAIL: |
351 | * modifications of the page flag must be done on small or head pages, | |
352 | * checks can be done on tail pages too. | |
353 | * | |
354 | * PF_NO_COMPOUND: | |
355 | * the page flag is not relevant for compound pages. | |
a08d93e5 MWO |
356 | * |
357 | * PF_SECOND: | |
358 | * the page flag is stored in the first tail page. | |
95ad9755 | 359 | */ |
f165b378 PT |
360 | #define PF_POISONED_CHECK(page) ({ \ |
361 | VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ | |
362 | page; }) | |
363 | #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) | |
364 | #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) | |
62906027 NP |
365 | #define PF_ONLY_HEAD(page, enforce) ({ \ |
366 | VM_BUG_ON_PGFLAGS(PageTail(page), page); \ | |
f165b378 | 367 | PF_POISONED_CHECK(page); }) |
95ad9755 KS |
368 | #define PF_NO_TAIL(page, enforce) ({ \ |
369 | VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ | |
f165b378 | 370 | PF_POISONED_CHECK(compound_head(page)); }) |
822cdd11 | 371 | #define PF_NO_COMPOUND(page, enforce) ({ \ |
95ad9755 | 372 | VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ |
f165b378 | 373 | PF_POISONED_CHECK(page); }) |
a08d93e5 MWO |
374 | #define PF_SECOND(page, enforce) ({ \ |
375 | VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ | |
376 | PF_POISONED_CHECK(&page[1]); }) | |
95ad9755 | 377 | |
d389a4a8 MWO |
378 | /* Which page is the flag stored in */ |
379 | #define FOLIO_PF_ANY 0 | |
380 | #define FOLIO_PF_HEAD 0 | |
381 | #define FOLIO_PF_ONLY_HEAD 0 | |
382 | #define FOLIO_PF_NO_TAIL 0 | |
383 | #define FOLIO_PF_NO_COMPOUND 0 | |
384 | #define FOLIO_PF_SECOND 1 | |
385 | ||
f94a62e9 CL |
386 | /* |
387 | * Macros to create function definitions for page flags | |
388 | */ | |
95ad9755 | 389 | #define TESTPAGEFLAG(uname, lname, policy) \ |
d389a4a8 MWO |
390 | static __always_inline bool folio_test_##lname(struct folio *folio) \ |
391 | { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ | |
4b0f3261 | 392 | static __always_inline int Page##uname(struct page *page) \ |
d389a4a8 | 393 | { return test_bit(PG_##lname, &policy(page, 0)->flags); } |
f94a62e9 | 394 | |
95ad9755 | 395 | #define SETPAGEFLAG(uname, lname, policy) \ |
d389a4a8 MWO |
396 | static __always_inline \ |
397 | void folio_set_##lname(struct folio *folio) \ | |
398 | { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ | |
4b0f3261 | 399 | static __always_inline void SetPage##uname(struct page *page) \ |
d389a4a8 | 400 | { set_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 401 | |
95ad9755 | 402 | #define CLEARPAGEFLAG(uname, lname, policy) \ |
d389a4a8 MWO |
403 | static __always_inline \ |
404 | void folio_clear_##lname(struct folio *folio) \ | |
405 | { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ | |
4b0f3261 | 406 | static __always_inline void ClearPage##uname(struct page *page) \ |
d389a4a8 | 407 | { clear_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 408 | |
95ad9755 | 409 | #define __SETPAGEFLAG(uname, lname, policy) \ |
d389a4a8 MWO |
410 | static __always_inline \ |
411 | void __folio_set_##lname(struct folio *folio) \ | |
412 | { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ | |
4b0f3261 | 413 | static __always_inline void __SetPage##uname(struct page *page) \ |
d389a4a8 | 414 | { __set_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 415 | |
95ad9755 | 416 | #define __CLEARPAGEFLAG(uname, lname, policy) \ |
d389a4a8 MWO |
417 | static __always_inline \ |
418 | void __folio_clear_##lname(struct folio *folio) \ | |
419 | { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ | |
4b0f3261 | 420 | static __always_inline void __ClearPage##uname(struct page *page) \ |
d389a4a8 | 421 | { __clear_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 422 | |
95ad9755 | 423 | #define TESTSETFLAG(uname, lname, policy) \ |
d389a4a8 MWO |
424 | static __always_inline \ |
425 | bool folio_test_set_##lname(struct folio *folio) \ | |
426 | { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ | |
4b0f3261 | 427 | static __always_inline int TestSetPage##uname(struct page *page) \ |
d389a4a8 | 428 | { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 429 | |
95ad9755 | 430 | #define TESTCLEARFLAG(uname, lname, policy) \ |
d389a4a8 MWO |
431 | static __always_inline \ |
432 | bool folio_test_clear_##lname(struct folio *folio) \ | |
433 | { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ | |
4b0f3261 | 434 | static __always_inline int TestClearPage##uname(struct page *page) \ |
d389a4a8 | 435 | { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } |
f94a62e9 | 436 | |
95ad9755 KS |
437 | #define PAGEFLAG(uname, lname, policy) \ |
438 | TESTPAGEFLAG(uname, lname, policy) \ | |
439 | SETPAGEFLAG(uname, lname, policy) \ | |
440 | CLEARPAGEFLAG(uname, lname, policy) | |
f94a62e9 | 441 | |
95ad9755 KS |
442 | #define __PAGEFLAG(uname, lname, policy) \ |
443 | TESTPAGEFLAG(uname, lname, policy) \ | |
444 | __SETPAGEFLAG(uname, lname, policy) \ | |
445 | __CLEARPAGEFLAG(uname, lname, policy) | |
f94a62e9 | 446 | |
95ad9755 KS |
447 | #define TESTSCFLAG(uname, lname, policy) \ |
448 | TESTSETFLAG(uname, lname, policy) \ | |
449 | TESTCLEARFLAG(uname, lname, policy) | |
f94a62e9 | 450 | |
d389a4a8 | 451 | #define TESTPAGEFLAG_FALSE(uname, lname) \ |
1611f74a | 452 | static inline bool folio_test_##lname(const struct folio *folio) { return false; } \ |
2f3e442c JW |
453 | static inline int Page##uname(const struct page *page) { return 0; } |
454 | ||
d389a4a8 MWO |
455 | #define SETPAGEFLAG_NOOP(uname, lname) \ |
456 | static inline void folio_set_##lname(struct folio *folio) { } \ | |
8a7a8544 LS |
457 | static inline void SetPage##uname(struct page *page) { } |
458 | ||
d389a4a8 MWO |
459 | #define CLEARPAGEFLAG_NOOP(uname, lname) \ |
460 | static inline void folio_clear_##lname(struct folio *folio) { } \ | |
8a7a8544 LS |
461 | static inline void ClearPage##uname(struct page *page) { } |
462 | ||
d389a4a8 MWO |
463 | #define __CLEARPAGEFLAG_NOOP(uname, lname) \ |
464 | static inline void __folio_clear_##lname(struct folio *folio) { } \ | |
8a7a8544 LS |
465 | static inline void __ClearPage##uname(struct page *page) { } |
466 | ||
d389a4a8 MWO |
467 | #define TESTSETFLAG_FALSE(uname, lname) \ |
468 | static inline bool folio_test_set_##lname(struct folio *folio) \ | |
469 | { return 0; } \ | |
2f3e442c JW |
470 | static inline int TestSetPage##uname(struct page *page) { return 0; } |
471 | ||
d389a4a8 MWO |
472 | #define TESTCLEARFLAG_FALSE(uname, lname) \ |
473 | static inline bool folio_test_clear_##lname(struct folio *folio) \ | |
474 | { return 0; } \ | |
8a7a8544 LS |
475 | static inline int TestClearPage##uname(struct page *page) { return 0; } |
476 | ||
d389a4a8 MWO |
477 | #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ |
478 | SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) | |
2f3e442c | 479 | |
d389a4a8 MWO |
480 | #define TESTSCFLAG_FALSE(uname, lname) \ |
481 | TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) | |
2f3e442c | 482 | |
48c935ad | 483 | __PAGEFLAG(Locked, locked, PF_NO_TAIL) |
bb43b14b | 484 | PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) |
d72520ad | 485 | PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) |
8cb38fab KS |
486 | PAGEFLAG(Referenced, referenced, PF_HEAD) |
487 | TESTCLEARFLAG(Referenced, referenced, PF_HEAD) | |
488 | __SETPAGEFLAG(Referenced, referenced, PF_HEAD) | |
df8c94d1 KS |
489 | PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) |
490 | __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) | |
8cb38fab | 491 | PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) |
d25b5bd8 | 492 | TESTCLEARFLAG(LRU, lru, PF_HEAD) |
8cb38fab KS |
493 | PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) |
494 | TESTCLEARFLAG(Active, active, PF_HEAD) | |
1899ad18 JW |
495 | PAGEFLAG(Workingset, workingset, PF_HEAD) |
496 | TESTCLEARFLAG(Workingset, workingset, PF_HEAD) | |
dcb351cd KS |
497 | __PAGEFLAG(Slab, slab, PF_NO_TAIL) |
498 | __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) | |
df8c94d1 | 499 | PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ |
c13985fa KS |
500 | |
501 | /* Xen */ | |
502 | PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) | |
503 | TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) | |
504 | PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); | |
505 | PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); | |
b877ac98 JG |
506 | PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) |
507 | TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) | |
c13985fa | 508 | |
de09d31d KS |
509 | PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
510 | __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) | |
d483da5b | 511 | __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
da5efc40 KS |
512 | PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
513 | __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) | |
514 | __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) | |
95ad9755 | 515 | |
266cf658 DH |
516 | /* |
517 | * Private page markings that may be used by the filesystem that owns the page | |
518 | * for its own purposes. | |
519 | * - PG_private and PG_private_2 cause releasepage() and co to be invoked | |
520 | */ | |
2ee08717 | 521 | PAGEFLAG(Private, private, PF_ANY) |
95ad9755 KS |
522 | PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) |
523 | PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) | |
524 | TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) | |
266cf658 | 525 | |
6a1e7f77 CL |
526 | /* |
527 | * Only test-and-set exist for PG_writeback. The unconditional operators are | |
528 | * risky: they bypass page accounting. | |
529 | */ | |
225311a4 HY |
530 | TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) |
531 | TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) | |
e2f0a0db | 532 | PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) |
6a1e7f77 | 533 | |
579f8290 | 534 | /* PG_readahead is only used for reads; PG_reclaim is only for writes */ |
e2f0a0db KS |
535 | PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) |
536 | TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) | |
d389a4a8 MWO |
537 | PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) |
538 | TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) | |
6a1e7f77 CL |
539 | |
540 | #ifdef CONFIG_HIGHMEM | |
1da177e4 | 541 | /* |
6a1e7f77 CL |
542 | * Must use a macro here due to header dependency issues. page_zone() is not |
543 | * available at this point. | |
1da177e4 | 544 | */ |
3ca65c19 | 545 | #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) |
6a1e7f77 | 546 | #else |
d389a4a8 | 547 | PAGEFLAG_FALSE(HighMem, highmem) |
6a1e7f77 CL |
548 | #endif |
549 | ||
550 | #ifdef CONFIG_SWAP | |
d389a4a8 | 551 | static __always_inline bool folio_test_swapcache(struct folio *folio) |
6326fec1 | 552 | { |
d389a4a8 MWO |
553 | return folio_test_swapbacked(folio) && |
554 | test_bit(PG_swapcache, folio_flags(folio, 0)); | |
555 | } | |
6326fec1 | 556 | |
d389a4a8 MWO |
557 | static __always_inline bool PageSwapCache(struct page *page) |
558 | { | |
559 | return folio_test_swapcache(page_folio(page)); | |
6326fec1 | 560 | } |
d389a4a8 | 561 | |
38d8b4e6 HY |
562 | SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) |
563 | CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) | |
6a1e7f77 | 564 | #else |
d389a4a8 | 565 | PAGEFLAG_FALSE(SwapCache, swapcache) |
6a1e7f77 CL |
566 | #endif |
567 | ||
8cb38fab KS |
568 | PAGEFLAG(Unevictable, unevictable, PF_HEAD) |
569 | __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) | |
570 | TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) | |
b291f000 | 571 | |
af8e3354 | 572 | #ifdef CONFIG_MMU |
e4f87d5d KS |
573 | PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) |
574 | __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) | |
575 | TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) | |
894bc310 | 576 | #else |
d389a4a8 MWO |
577 | PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) |
578 | TESTSCFLAG_FALSE(Mlocked, mlocked) | |
894bc310 LS |
579 | #endif |
580 | ||
46cf98cd | 581 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
b9d41817 | 582 | PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) |
602c4d11 | 583 | #else |
d389a4a8 | 584 | PAGEFLAG_FALSE(Uncached, uncached) |
6a1e7f77 | 585 | #endif |
1da177e4 | 586 | |
d466f2fc | 587 | #ifdef CONFIG_MEMORY_FAILURE |
95ad9755 KS |
588 | PAGEFLAG(HWPoison, hwpoison, PF_ANY) |
589 | TESTSCFLAG(HWPoison, hwpoison, PF_ANY) | |
d466f2fc | 590 | #define __PG_HWPOISON (1UL << PG_hwpoison) |
bf181c58 NH |
591 | #define MAGIC_HWPOISON 0x48575053U /* HWPS */ |
592 | extern void SetPageHWPoisonTakenOff(struct page *page); | |
593 | extern void ClearPageHWPoisonTakenOff(struct page *page); | |
06be6ff3 | 594 | extern bool take_page_off_buddy(struct page *page); |
bf181c58 | 595 | extern bool put_page_back_buddy(struct page *page); |
d466f2fc | 596 | #else |
d389a4a8 | 597 | PAGEFLAG_FALSE(HWPoison, hwpoison) |
d466f2fc AK |
598 | #define __PG_HWPOISON 0 |
599 | #endif | |
600 | ||
1c676e0d | 601 | #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) |
95ad9755 KS |
602 | TESTPAGEFLAG(Young, young, PF_ANY) |
603 | SETPAGEFLAG(Young, young, PF_ANY) | |
604 | TESTCLEARFLAG(Young, young, PF_ANY) | |
605 | PAGEFLAG(Idle, idle, PF_ANY) | |
33c3fc71 VD |
606 | #endif |
607 | ||
c275c5c6 PC |
608 | #ifdef CONFIG_KASAN_HW_TAGS |
609 | PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) | |
610 | #else | |
d389a4a8 | 611 | PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison) |
c275c5c6 PC |
612 | #endif |
613 | ||
36e66c55 AD |
614 | /* |
615 | * PageReported() is used to track reported free pages within the Buddy | |
616 | * allocator. We can use the non-atomic version of the test and set | |
617 | * operations as both should be shielded with the zone lock to prevent | |
618 | * any possible races on the setting or clearing of the bit. | |
619 | */ | |
620 | __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) | |
621 | ||
e8c6158f KS |
622 | /* |
623 | * On an anonymous page mapped into a user virtual memory area, | |
624 | * page->mapping points to its anon_vma, not to a struct address_space; | |
625 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. | |
626 | * | |
627 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, | |
bda807d4 MK |
628 | * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON |
629 | * bit; and then page->mapping points, not to an anon_vma, but to a private | |
e8c6158f KS |
630 | * structure which KSM associates with that merged page. See ksm.h. |
631 | * | |
bda807d4 MK |
632 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable |
633 | * page and then page->mapping points a struct address_space. | |
e8c6158f KS |
634 | * |
635 | * Please note that, confusingly, "page_mapping" refers to the inode | |
636 | * address_space which maps the page from disk; whereas "page_mapped" | |
637 | * refers to user virtual address space into which the page is mapped. | |
638 | */ | |
bda807d4 MK |
639 | #define PAGE_MAPPING_ANON 0x1 |
640 | #define PAGE_MAPPING_MOVABLE 0x2 | |
641 | #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) | |
642 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) | |
e8c6158f | 643 | |
bda807d4 | 644 | static __always_inline int PageMappingFlags(struct page *page) |
17514574 | 645 | { |
bda807d4 | 646 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; |
17514574 MG |
647 | } |
648 | ||
d389a4a8 | 649 | static __always_inline bool folio_test_anon(struct folio *folio) |
e8c6158f | 650 | { |
d389a4a8 MWO |
651 | return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; |
652 | } | |
653 | ||
654 | static __always_inline bool PageAnon(struct page *page) | |
e8c6158f | 655 | { |
d389a4a8 | 656 | return folio_test_anon(page_folio(page)); |
bda807d4 MK |
657 | } |
658 | ||
659 | static __always_inline int __PageMovable(struct page *page) | |
660 | { | |
661 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == | |
662 | PAGE_MAPPING_MOVABLE; | |
e8c6158f KS |
663 | } |
664 | ||
665 | #ifdef CONFIG_KSM | |
666 | /* | |
667 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | |
668 | * which KSM maps into multiple mms, wherever identical anonymous page content | |
669 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any | |
670 | * anon_vma, but to that page's node of the stable tree. | |
671 | */ | |
d389a4a8 | 672 | static __always_inline bool folio_test_ksm(struct folio *folio) |
e8c6158f | 673 | { |
d389a4a8 | 674 | return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == |
bda807d4 | 675 | PAGE_MAPPING_KSM; |
e8c6158f | 676 | } |
d389a4a8 MWO |
677 | |
678 | static __always_inline bool PageKsm(struct page *page) | |
679 | { | |
680 | return folio_test_ksm(page_folio(page)); | |
681 | } | |
e8c6158f | 682 | #else |
d389a4a8 | 683 | TESTPAGEFLAG_FALSE(Ksm, ksm) |
e8c6158f KS |
684 | #endif |
685 | ||
1a9b5b7f WF |
686 | u64 stable_page_flags(struct page *page); |
687 | ||
ece01414 MWO |
688 | /** |
689 | * folio_test_uptodate - Is this folio up to date? | |
690 | * @folio: The folio. | |
691 | * | |
692 | * The uptodate flag is set on a folio when every byte in the folio is | |
693 | * at least as new as the corresponding bytes on storage. Anonymous | |
694 | * and CoW folios are always uptodate. If the folio is not uptodate, | |
695 | * some of the bytes in it may be; see the is_partially_uptodate() | |
696 | * address_space operation. | |
697 | */ | |
d389a4a8 | 698 | static inline bool folio_test_uptodate(struct folio *folio) |
0ed361de | 699 | { |
d389a4a8 | 700 | bool ret = test_bit(PG_uptodate, folio_flags(folio, 0)); |
0ed361de | 701 | /* |
d389a4a8 MWO |
702 | * Must ensure that the data we read out of the folio is loaded |
703 | * _after_ we've loaded folio->flags to check the uptodate bit. | |
704 | * We can skip the barrier if the folio is not uptodate, because | |
0ed361de NP |
705 | * we wouldn't be reading anything from it. |
706 | * | |
d389a4a8 | 707 | * See folio_mark_uptodate() for the other side of the story. |
0ed361de NP |
708 | */ |
709 | if (ret) | |
710 | smp_rmb(); | |
711 | ||
712 | return ret; | |
713 | } | |
714 | ||
d389a4a8 MWO |
715 | static inline int PageUptodate(struct page *page) |
716 | { | |
717 | return folio_test_uptodate(page_folio(page)); | |
718 | } | |
719 | ||
720 | static __always_inline void __folio_mark_uptodate(struct folio *folio) | |
0ed361de NP |
721 | { |
722 | smp_wmb(); | |
d389a4a8 | 723 | __set_bit(PG_uptodate, folio_flags(folio, 0)); |
0ed361de NP |
724 | } |
725 | ||
d389a4a8 | 726 | static __always_inline void folio_mark_uptodate(struct folio *folio) |
2dcea57a | 727 | { |
0ed361de NP |
728 | /* |
729 | * Memory barrier must be issued before setting the PG_uptodate bit, | |
d389a4a8 MWO |
730 | * so that all previous stores issued in order to bring the folio |
731 | * uptodate are actually visible before folio_test_uptodate becomes true. | |
0ed361de NP |
732 | */ |
733 | smp_wmb(); | |
d389a4a8 MWO |
734 | set_bit(PG_uptodate, folio_flags(folio, 0)); |
735 | } | |
736 | ||
737 | static __always_inline void __SetPageUptodate(struct page *page) | |
738 | { | |
739 | __folio_mark_uptodate((struct folio *)page); | |
740 | } | |
741 | ||
742 | static __always_inline void SetPageUptodate(struct page *page) | |
743 | { | |
744 | folio_mark_uptodate((struct folio *)page); | |
0ed361de NP |
745 | } |
746 | ||
d2998c4d | 747 | CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) |
1da177e4 | 748 | |
f143f1ea MWO |
749 | bool __folio_start_writeback(struct folio *folio, bool keep_write); |
750 | bool set_page_writeback(struct page *page); | |
1c8349a1 | 751 | |
f143f1ea MWO |
752 | #define folio_start_writeback(folio) \ |
753 | __folio_start_writeback(folio, false) | |
754 | #define folio_start_writeback_keepwrite(folio) \ | |
755 | __folio_start_writeback(folio, true) | |
1da177e4 | 756 | |
f143f1ea | 757 | static inline void set_page_writeback_keepwrite(struct page *page) |
6a1e7f77 | 758 | { |
f143f1ea | 759 | folio_start_writeback_keepwrite(page_folio(page)); |
6a1e7f77 | 760 | } |
1da177e4 | 761 | |
f143f1ea | 762 | static inline bool test_set_page_writeback(struct page *page) |
1c8349a1 | 763 | { |
f143f1ea | 764 | return set_page_writeback(page); |
1c8349a1 NJ |
765 | } |
766 | ||
e7d32485 MS |
767 | static __always_inline bool folio_test_head(struct folio *folio) |
768 | { | |
769 | return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); | |
770 | } | |
771 | ||
772 | static __always_inline int PageHead(struct page *page) | |
773 | { | |
774 | PF_POISONED_CHECK(page); | |
775 | return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); | |
776 | } | |
777 | ||
778 | __SETPAGEFLAG(Head, head, PF_ANY) | |
779 | __CLEARPAGEFLAG(Head, head, PF_ANY) | |
780 | CLEARPAGEFLAG(Head, head, PF_ANY) | |
e20b8cca | 781 | |
9c325215 MWO |
782 | /** |
783 | * folio_test_large() - Does this folio contain more than one page? | |
784 | * @folio: The folio to test. | |
785 | * | |
786 | * Return: True if the folio is larger than one page. | |
787 | */ | |
788 | static inline bool folio_test_large(struct folio *folio) | |
d389a4a8 MWO |
789 | { |
790 | return folio_test_head(folio); | |
791 | } | |
792 | ||
4b0f3261 | 793 | static __always_inline void set_compound_head(struct page *page, struct page *head) |
ad4b3fb7 | 794 | { |
1d798ca3 | 795 | WRITE_ONCE(page->compound_head, (unsigned long)head + 1); |
ad4b3fb7 CD |
796 | } |
797 | ||
4b0f3261 | 798 | static __always_inline void clear_compound_head(struct page *page) |
6a1e7f77 | 799 | { |
1d798ca3 | 800 | WRITE_ONCE(page->compound_head, 0); |
6a1e7f77 | 801 | } |
6d777953 | 802 | |
4e6af67e AA |
803 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
804 | static inline void ClearPageCompound(struct page *page) | |
805 | { | |
1d798ca3 KS |
806 | BUG_ON(!PageHead(page)); |
807 | ClearPageHead(page); | |
4e6af67e AA |
808 | } |
809 | #endif | |
810 | ||
d2a1a1f0 | 811 | #define PG_head_mask ((1UL << PG_head)) |
dfa7e20c | 812 | |
e8c6158f KS |
813 | #ifdef CONFIG_HUGETLB_PAGE |
814 | int PageHuge(struct page *page); | |
815 | int PageHeadHuge(struct page *page); | |
d389a4a8 MWO |
816 | static inline bool folio_test_hugetlb(struct folio *folio) |
817 | { | |
818 | return PageHeadHuge(&folio->page); | |
819 | } | |
e8c6158f | 820 | #else |
d389a4a8 MWO |
821 | TESTPAGEFLAG_FALSE(Huge, hugetlb) |
822 | TESTPAGEFLAG_FALSE(HeadHuge, headhuge) | |
e8c6158f KS |
823 | #endif |
824 | ||
936a5fe6 | 825 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
71e3aac0 AA |
826 | /* |
827 | * PageHuge() only returns true for hugetlbfs pages, but not for | |
828 | * normal or transparent huge pages. | |
829 | * | |
830 | * PageTransHuge() returns true for both transparent huge and | |
831 | * hugetlbfs pages, but not normal pages. PageTransHuge() can only be | |
832 | * called only in the core VM paths where hugetlbfs pages can't exist. | |
833 | */ | |
834 | static inline int PageTransHuge(struct page *page) | |
835 | { | |
309381fe | 836 | VM_BUG_ON_PAGE(PageTail(page), page); |
71e3aac0 AA |
837 | return PageHead(page); |
838 | } | |
839 | ||
d389a4a8 MWO |
840 | static inline bool folio_test_transhuge(struct folio *folio) |
841 | { | |
842 | return folio_test_head(folio); | |
843 | } | |
844 | ||
385de357 DN |
845 | /* |
846 | * PageTransCompound returns true for both transparent huge pages | |
847 | * and hugetlbfs pages, so it should only be called when it's known | |
848 | * that hugetlbfs pages aren't involved. | |
849 | */ | |
936a5fe6 AA |
850 | static inline int PageTransCompound(struct page *page) |
851 | { | |
852 | return PageCompound(page); | |
853 | } | |
71e3aac0 | 854 | |
385de357 DN |
855 | /* |
856 | * PageTransTail returns true for both transparent huge pages | |
857 | * and hugetlbfs pages, so it should only be called when it's known | |
858 | * that hugetlbfs pages aren't involved. | |
859 | */ | |
860 | static inline int PageTransTail(struct page *page) | |
861 | { | |
862 | return PageTail(page); | |
863 | } | |
864 | ||
53f9263b KS |
865 | /* |
866 | * PageDoubleMap indicates that the compound page is mapped with PTEs as well | |
867 | * as PMDs. | |
868 | * | |
869 | * This is required for optimization of rmap operations for THP: we can postpone | |
870 | * per small page mapcount accounting (and its overhead from atomic operations) | |
871 | * until the first PMD split. | |
872 | * | |
873 | * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up | |
874 | * by one. This reference will go away with last compound_mapcount. | |
875 | * | |
876 | * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). | |
877 | */ | |
a08d93e5 MWO |
878 | PAGEFLAG(DoubleMap, double_map, PF_SECOND) |
879 | TESTSCFLAG(DoubleMap, double_map, PF_SECOND) | |
936a5fe6 | 880 | #else |
d389a4a8 MWO |
881 | TESTPAGEFLAG_FALSE(TransHuge, transhuge) |
882 | TESTPAGEFLAG_FALSE(TransCompound, transcompound) | |
883 | TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) | |
884 | TESTPAGEFLAG_FALSE(TransTail, transtail) | |
885 | PAGEFLAG_FALSE(DoubleMap, double_map) | |
886 | TESTSCFLAG_FALSE(DoubleMap, double_map) | |
936a5fe6 AA |
887 | #endif |
888 | ||
eac96c3e YS |
889 | #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
890 | /* | |
891 | * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the | |
892 | * compound page. | |
893 | * | |
894 | * This flag is set by hwpoison handler. Cleared by THP split or free page. | |
895 | */ | |
896 | PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) | |
897 | TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) | |
898 | #else | |
e6643593 LT |
899 | PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) |
900 | TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) | |
eac96c3e YS |
901 | #endif |
902 | ||
0daa322b DH |
903 | /* |
904 | * Check if a page is currently marked HWPoisoned. Note that this check is | |
905 | * best effort only and inherently racy: there is no way to synchronize with | |
906 | * failing hardware. | |
907 | */ | |
908 | static inline bool is_page_hwpoison(struct page *page) | |
909 | { | |
910 | if (PageHWPoison(page)) | |
911 | return true; | |
912 | return PageHuge(page) && PageHWPoison(compound_head(page)); | |
913 | } | |
914 | ||
e8c6158f | 915 | /* |
6e292b9b MW |
916 | * For pages that are never mapped to userspace (and aren't PageSlab), |
917 | * page_type may be used. Because it is initialised to -1, we invert the | |
918 | * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and | |
919 | * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and | |
920 | * low bits so that an underflow or overflow of page_mapcount() won't be | |
921 | * mistaken for a page type value. | |
e8c6158f | 922 | */ |
6e292b9b MW |
923 | |
924 | #define PAGE_TYPE_BASE 0xf0000000 | |
925 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ | |
144552ff | 926 | #define PAGE_MAPCOUNT_RESERVE -128 |
6e292b9b | 927 | #define PG_buddy 0x00000080 |
ca215086 | 928 | #define PG_offline 0x00000100 |
18b2db3b RG |
929 | #define PG_table 0x00000200 |
930 | #define PG_guard 0x00000400 | |
6e292b9b MW |
931 | |
932 | #define PageType(page, flag) \ | |
933 | ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) | |
934 | ||
144552ff AY |
935 | static inline int page_has_type(struct page *page) |
936 | { | |
937 | return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; | |
938 | } | |
939 | ||
6e292b9b | 940 | #define PAGE_TYPE_OPS(uname, lname) \ |
632c0a1a VD |
941 | static __always_inline int Page##uname(struct page *page) \ |
942 | { \ | |
6e292b9b | 943 | return PageType(page, PG_##lname); \ |
632c0a1a VD |
944 | } \ |
945 | static __always_inline void __SetPage##uname(struct page *page) \ | |
946 | { \ | |
6e292b9b MW |
947 | VM_BUG_ON_PAGE(!PageType(page, 0), page); \ |
948 | page->page_type &= ~PG_##lname; \ | |
632c0a1a VD |
949 | } \ |
950 | static __always_inline void __ClearPage##uname(struct page *page) \ | |
951 | { \ | |
952 | VM_BUG_ON_PAGE(!Page##uname(page), page); \ | |
6e292b9b | 953 | page->page_type |= PG_##lname; \ |
e8c6158f KS |
954 | } |
955 | ||
632c0a1a | 956 | /* |
6e292b9b | 957 | * PageBuddy() indicates that the page is free and in the buddy system |
632c0a1a VD |
958 | * (see mm/page_alloc.c). |
959 | */ | |
6e292b9b | 960 | PAGE_TYPE_OPS(Buddy, buddy) |
e8c6158f | 961 | |
632c0a1a | 962 | /* |
ca215086 DH |
963 | * PageOffline() indicates that the page is logically offline although the |
964 | * containing section is online. (e.g. inflated in a balloon driver or | |
965 | * not onlined when onlining the section). | |
966 | * The content of these pages is effectively stale. Such pages should not | |
967 | * be touched (read/write/dump/save) except by their owner. | |
aa218795 DH |
968 | * |
969 | * If a driver wants to allow to offline unmovable PageOffline() pages without | |
970 | * putting them back to the buddy, it can do so via the memory notifier by | |
971 | * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the | |
972 | * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() | |
973 | * pages (now with a reference count of zero) are treated like free pages, | |
974 | * allowing the containing memory block to get offlined. A driver that | |
975 | * relies on this feature is aware that re-onlining the memory block will | |
976 | * require to re-set the pages PageOffline() and not giving them to the | |
977 | * buddy via online_page_callback_t. | |
82840451 DH |
978 | * |
979 | * There are drivers that mark a page PageOffline() and expect there won't be | |
980 | * any further access to page content. PFN walkers that read content of random | |
981 | * pages should check PageOffline() and synchronize with such drivers using | |
982 | * page_offline_freeze()/page_offline_thaw(). | |
632c0a1a | 983 | */ |
ca215086 | 984 | PAGE_TYPE_OPS(Offline, offline) |
e8c6158f | 985 | |
82840451 DH |
986 | extern void page_offline_freeze(void); |
987 | extern void page_offline_thaw(void); | |
988 | extern void page_offline_begin(void); | |
989 | extern void page_offline_end(void); | |
990 | ||
1d40a5ea MW |
991 | /* |
992 | * Marks pages in use as page tables. | |
993 | */ | |
994 | PAGE_TYPE_OPS(Table, table) | |
995 | ||
3972f6bb VB |
996 | /* |
997 | * Marks guardpages used with debug_pagealloc. | |
998 | */ | |
999 | PAGE_TYPE_OPS(Guard, guard) | |
1000 | ||
832fc1de NH |
1001 | extern bool is_free_buddy_page(struct page *page); |
1002 | ||
356ea386 | 1003 | PAGEFLAG(Isolated, isolated, PF_ANY); |
bda807d4 | 1004 | |
af8e3354 | 1005 | #ifdef CONFIG_MMU |
d2a1a1f0 | 1006 | #define __PG_MLOCKED (1UL << PG_mlocked) |
33925b25 | 1007 | #else |
b291f000 | 1008 | #define __PG_MLOCKED 0 |
894bc310 LS |
1009 | #endif |
1010 | ||
dfa7e20c RA |
1011 | /* |
1012 | * Flags checked when a page is freed. Pages being freed should not have | |
4be408ce | 1013 | * these flags set. If they are, there is a problem. |
dfa7e20c | 1014 | */ |
6326fec1 NP |
1015 | #define PAGE_FLAGS_CHECK_AT_FREE \ |
1016 | (1UL << PG_lru | 1UL << PG_locked | \ | |
1017 | 1UL << PG_private | 1UL << PG_private_2 | \ | |
1018 | 1UL << PG_writeback | 1UL << PG_reserved | \ | |
1019 | 1UL << PG_slab | 1UL << PG_active | \ | |
1020 | 1UL << PG_unevictable | __PG_MLOCKED) | |
dfa7e20c RA |
1021 | |
1022 | /* | |
1023 | * Flags checked when a page is prepped for return by the page allocator. | |
4be408ce | 1024 | * Pages being prepped should not have these flags set. If they are set, |
79f4b7bf | 1025 | * there has been a kernel bug or struct page corruption. |
f4c18e6f NH |
1026 | * |
1027 | * __PG_HWPOISON is exceptional because it needs to be kept beyond page's | |
1028 | * alloc-free cycle to prevent from reusing the page. | |
dfa7e20c | 1029 | */ |
f4c18e6f | 1030 | #define PAGE_FLAGS_CHECK_AT_PREP \ |
41c961b9 | 1031 | (PAGEFLAGS_MASK & ~__PG_HWPOISON) |
dfa7e20c | 1032 | |
edcf4748 | 1033 | #define PAGE_FLAGS_PRIVATE \ |
d2a1a1f0 | 1034 | (1UL << PG_private | 1UL << PG_private_2) |
266cf658 DH |
1035 | /** |
1036 | * page_has_private - Determine if page has private stuff | |
1037 | * @page: The page to be checked | |
1038 | * | |
1039 | * Determine if a page has private stuff, indicating that release routines | |
1040 | * should be invoked upon it. | |
1041 | */ | |
edcf4748 JW |
1042 | static inline int page_has_private(struct page *page) |
1043 | { | |
1044 | return !!(page->flags & PAGE_FLAGS_PRIVATE); | |
1045 | } | |
1046 | ||
d389a4a8 MWO |
1047 | static inline bool folio_has_private(struct folio *folio) |
1048 | { | |
1049 | return page_has_private(&folio->page); | |
1050 | } | |
1051 | ||
95ad9755 KS |
1052 | #undef PF_ANY |
1053 | #undef PF_HEAD | |
62906027 | 1054 | #undef PF_ONLY_HEAD |
95ad9755 KS |
1055 | #undef PF_NO_TAIL |
1056 | #undef PF_NO_COMPOUND | |
a08d93e5 | 1057 | #undef PF_SECOND |
edcf4748 | 1058 | #endif /* !__GENERATING_BOUNDS_H */ |
266cf658 | 1059 | |
1da177e4 | 1060 | #endif /* PAGE_FLAGS_H */ |