]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/page-flags.h
zram: export new 'mm_stat' sysfs attrs
[mirror_ubuntu-artful-kernel.git] / include / linux / page-flags.h
1 /*
2 * Macros for manipulating and testing page->flags
3 */
4
5 #ifndef PAGE_FLAGS_H
6 #define PAGE_FLAGS_H
7
8 #include <linux/types.h>
9 #include <linux/bug.h>
10 #include <linux/mmdebug.h>
11 #ifndef __GENERATING_BOUNDS_H
12 #include <linux/mm_types.h>
13 #include <generated/bounds.h>
14 #endif /* !__GENERATING_BOUNDS_H */
15
16 /*
17 * Various page->flags bits:
18 *
19 * PG_reserved is set for special pages, which can never be swapped out. Some
20 * of them might not even exist (eg empty_bad_page)...
21 *
22 * The PG_private bitflag is set on pagecache pages if they contain filesystem
23 * specific data (which is normally at page->private). It can be used by
24 * private allocations for its own usage.
25 *
26 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28 * is set before writeback starts and cleared when it finishes.
29 *
30 * PG_locked also pins a page in pagecache, and blocks truncation of the file
31 * while it is held.
32 *
33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
34 * to become unlocked.
35 *
36 * PG_uptodate tells whether the page's contents is valid. When a read
37 * completes, the page becomes uptodate, unless a disk I/O error happened.
38 *
39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40 * file-backed pagecache (see mm/vmscan.c).
41 *
42 * PG_error is set to indicate that an I/O error occurred on this page.
43 *
44 * PG_arch_1 is an architecture specific page state bit. The generic code
45 * guarantees that this bit is cleared for a page when it first is entered into
46 * the page cache.
47 *
48 * PG_highmem pages are not permanently mapped into the kernel virtual address
49 * space, they need to be kmapped separately for doing IO on the pages. The
50 * struct page (these bits with information) are always mapped into kernel
51 * address space...
52 *
53 * PG_hwpoison indicates that a page got corrupted in hardware and contains
54 * data with incorrect ECC bits that triggered a machine check. Accessing is
55 * not safe since it may cause another machine check. Don't touch!
56 */
57
58 /*
59 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
60 * locked- and dirty-page accounting.
61 *
62 * The page flags field is split into two parts, the main flags area
63 * which extends from the low bits upwards, and the fields area which
64 * extends from the high bits downwards.
65 *
66 * | FIELD | ... | FLAGS |
67 * N-1 ^ 0
68 * (NR_PAGEFLAGS)
69 *
70 * The fields area is reserved for fields mapping zone, node (for NUMA) and
71 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
73 */
74 enum pageflags {
75 PG_locked, /* Page is locked. Don't touch. */
76 PG_error,
77 PG_referenced,
78 PG_uptodate,
79 PG_dirty,
80 PG_lru,
81 PG_active,
82 PG_slab,
83 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
84 PG_arch_1,
85 PG_reserved,
86 PG_private, /* If pagecache, has fs-private data */
87 PG_private_2, /* If pagecache, has fs aux data */
88 PG_writeback, /* Page is under writeback */
89 #ifdef CONFIG_PAGEFLAGS_EXTENDED
90 PG_head, /* A head page */
91 PG_tail, /* A tail page */
92 #else
93 PG_compound, /* A compound page */
94 #endif
95 PG_swapcache, /* Swap page: swp_entry_t in private */
96 PG_mappedtodisk, /* Has blocks allocated on-disk */
97 PG_reclaim, /* To be reclaimed asap */
98 PG_swapbacked, /* Page is backed by RAM/swap */
99 PG_unevictable, /* Page is "unevictable" */
100 #ifdef CONFIG_MMU
101 PG_mlocked, /* Page is vma mlocked */
102 #endif
103 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
104 PG_uncached, /* Page has been mapped as uncached */
105 #endif
106 #ifdef CONFIG_MEMORY_FAILURE
107 PG_hwpoison, /* hardware poisoned page. Don't touch */
108 #endif
109 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
110 PG_compound_lock,
111 #endif
112 __NR_PAGEFLAGS,
113
114 /* Filesystems */
115 PG_checked = PG_owner_priv_1,
116
117 /* Two page bits are conscripted by FS-Cache to maintain local caching
118 * state. These bits are set on pages belonging to the netfs's inodes
119 * when those inodes are being locally cached.
120 */
121 PG_fscache = PG_private_2, /* page backed by cache */
122
123 /* XEN */
124 /* Pinned in Xen as a read-only pagetable page. */
125 PG_pinned = PG_owner_priv_1,
126 /* Pinned as part of domain save (see xen_mm_pin_all()). */
127 PG_savepinned = PG_dirty,
128 /* Has a grant mapping of another (foreign) domain's page. */
129 PG_foreign = PG_owner_priv_1,
130
131 /* SLOB */
132 PG_slob_free = PG_private,
133 };
134
135 #ifndef __GENERATING_BOUNDS_H
136
137 /*
138 * Macros to create function definitions for page flags
139 */
140 #define TESTPAGEFLAG(uname, lname) \
141 static inline int Page##uname(const struct page *page) \
142 { return test_bit(PG_##lname, &page->flags); }
143
144 #define SETPAGEFLAG(uname, lname) \
145 static inline void SetPage##uname(struct page *page) \
146 { set_bit(PG_##lname, &page->flags); }
147
148 #define CLEARPAGEFLAG(uname, lname) \
149 static inline void ClearPage##uname(struct page *page) \
150 { clear_bit(PG_##lname, &page->flags); }
151
152 #define __SETPAGEFLAG(uname, lname) \
153 static inline void __SetPage##uname(struct page *page) \
154 { __set_bit(PG_##lname, &page->flags); }
155
156 #define __CLEARPAGEFLAG(uname, lname) \
157 static inline void __ClearPage##uname(struct page *page) \
158 { __clear_bit(PG_##lname, &page->flags); }
159
160 #define TESTSETFLAG(uname, lname) \
161 static inline int TestSetPage##uname(struct page *page) \
162 { return test_and_set_bit(PG_##lname, &page->flags); }
163
164 #define TESTCLEARFLAG(uname, lname) \
165 static inline int TestClearPage##uname(struct page *page) \
166 { return test_and_clear_bit(PG_##lname, &page->flags); }
167
168 #define __TESTCLEARFLAG(uname, lname) \
169 static inline int __TestClearPage##uname(struct page *page) \
170 { return __test_and_clear_bit(PG_##lname, &page->flags); }
171
172 #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
173 SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
174
175 #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
176 __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
177
178 #define TESTSCFLAG(uname, lname) \
179 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
180
181 #define TESTPAGEFLAG_FALSE(uname) \
182 static inline int Page##uname(const struct page *page) { return 0; }
183
184 #define SETPAGEFLAG_NOOP(uname) \
185 static inline void SetPage##uname(struct page *page) { }
186
187 #define CLEARPAGEFLAG_NOOP(uname) \
188 static inline void ClearPage##uname(struct page *page) { }
189
190 #define __CLEARPAGEFLAG_NOOP(uname) \
191 static inline void __ClearPage##uname(struct page *page) { }
192
193 #define TESTSETFLAG_FALSE(uname) \
194 static inline int TestSetPage##uname(struct page *page) { return 0; }
195
196 #define TESTCLEARFLAG_FALSE(uname) \
197 static inline int TestClearPage##uname(struct page *page) { return 0; }
198
199 #define __TESTCLEARFLAG_FALSE(uname) \
200 static inline int __TestClearPage##uname(struct page *page) { return 0; }
201
202 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
203 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
204
205 #define TESTSCFLAG_FALSE(uname) \
206 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
207
208 struct page; /* forward declaration */
209
210 TESTPAGEFLAG(Locked, locked)
211 PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
212 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
213 __SETPAGEFLAG(Referenced, referenced)
214 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
215 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
216 PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
217 TESTCLEARFLAG(Active, active)
218 __PAGEFLAG(Slab, slab)
219 PAGEFLAG(Checked, checked) /* Used by some filesystems */
220 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
221 PAGEFLAG(SavePinned, savepinned); /* Xen */
222 PAGEFLAG(Foreign, foreign); /* Xen */
223 PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
224 PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
225 __SETPAGEFLAG(SwapBacked, swapbacked)
226
227 __PAGEFLAG(SlobFree, slob_free)
228
229 /*
230 * Private page markings that may be used by the filesystem that owns the page
231 * for its own purposes.
232 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
233 */
234 PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
235 __CLEARPAGEFLAG(Private, private)
236 PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
237 PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
238
239 /*
240 * Only test-and-set exist for PG_writeback. The unconditional operators are
241 * risky: they bypass page accounting.
242 */
243 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
244 PAGEFLAG(MappedToDisk, mappedtodisk)
245
246 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
247 PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
248 PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
249
250 #ifdef CONFIG_HIGHMEM
251 /*
252 * Must use a macro here due to header dependency issues. page_zone() is not
253 * available at this point.
254 */
255 #define PageHighMem(__p) is_highmem(page_zone(__p))
256 #else
257 PAGEFLAG_FALSE(HighMem)
258 #endif
259
260 #ifdef CONFIG_SWAP
261 PAGEFLAG(SwapCache, swapcache)
262 #else
263 PAGEFLAG_FALSE(SwapCache)
264 #endif
265
266 PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
267 TESTCLEARFLAG(Unevictable, unevictable)
268
269 #ifdef CONFIG_MMU
270 PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
271 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
272 #else
273 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
274 TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
275 #endif
276
277 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
278 PAGEFLAG(Uncached, uncached)
279 #else
280 PAGEFLAG_FALSE(Uncached)
281 #endif
282
283 #ifdef CONFIG_MEMORY_FAILURE
284 PAGEFLAG(HWPoison, hwpoison)
285 TESTSCFLAG(HWPoison, hwpoison)
286 #define __PG_HWPOISON (1UL << PG_hwpoison)
287 #else
288 PAGEFLAG_FALSE(HWPoison)
289 #define __PG_HWPOISON 0
290 #endif
291
292 /*
293 * On an anonymous page mapped into a user virtual memory area,
294 * page->mapping points to its anon_vma, not to a struct address_space;
295 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
296 *
297 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
298 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
299 * and then page->mapping points, not to an anon_vma, but to a private
300 * structure which KSM associates with that merged page. See ksm.h.
301 *
302 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
303 *
304 * Please note that, confusingly, "page_mapping" refers to the inode
305 * address_space which maps the page from disk; whereas "page_mapped"
306 * refers to user virtual address space into which the page is mapped.
307 */
308 #define PAGE_MAPPING_ANON 1
309 #define PAGE_MAPPING_KSM 2
310 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
311
312 static inline int PageAnon(struct page *page)
313 {
314 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
315 }
316
317 #ifdef CONFIG_KSM
318 /*
319 * A KSM page is one of those write-protected "shared pages" or "merged pages"
320 * which KSM maps into multiple mms, wherever identical anonymous page content
321 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
322 * anon_vma, but to that page's node of the stable tree.
323 */
324 static inline int PageKsm(struct page *page)
325 {
326 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
327 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
328 }
329 #else
330 TESTPAGEFLAG_FALSE(Ksm)
331 #endif
332
333 u64 stable_page_flags(struct page *page);
334
335 static inline int PageUptodate(struct page *page)
336 {
337 int ret = test_bit(PG_uptodate, &(page)->flags);
338
339 /*
340 * Must ensure that the data we read out of the page is loaded
341 * _after_ we've loaded page->flags to check for PageUptodate.
342 * We can skip the barrier if the page is not uptodate, because
343 * we wouldn't be reading anything from it.
344 *
345 * See SetPageUptodate() for the other side of the story.
346 */
347 if (ret)
348 smp_rmb();
349
350 return ret;
351 }
352
353 static inline void __SetPageUptodate(struct page *page)
354 {
355 smp_wmb();
356 __set_bit(PG_uptodate, &(page)->flags);
357 }
358
359 static inline void SetPageUptodate(struct page *page)
360 {
361 /*
362 * Memory barrier must be issued before setting the PG_uptodate bit,
363 * so that all previous stores issued in order to bring the page
364 * uptodate are actually visible before PageUptodate becomes true.
365 */
366 smp_wmb();
367 set_bit(PG_uptodate, &(page)->flags);
368 }
369
370 CLEARPAGEFLAG(Uptodate, uptodate)
371
372 int test_clear_page_writeback(struct page *page);
373 int __test_set_page_writeback(struct page *page, bool keep_write);
374
375 #define test_set_page_writeback(page) \
376 __test_set_page_writeback(page, false)
377 #define test_set_page_writeback_keepwrite(page) \
378 __test_set_page_writeback(page, true)
379
380 static inline void set_page_writeback(struct page *page)
381 {
382 test_set_page_writeback(page);
383 }
384
385 static inline void set_page_writeback_keepwrite(struct page *page)
386 {
387 test_set_page_writeback_keepwrite(page);
388 }
389
390 #ifdef CONFIG_PAGEFLAGS_EXTENDED
391 /*
392 * System with lots of page flags available. This allows separate
393 * flags for PageHead() and PageTail() checks of compound pages so that bit
394 * tests can be used in performance sensitive paths. PageCompound is
395 * generally not used in hot code paths except arch/powerpc/mm/init_64.c
396 * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
397 * and avoid handling those in real mode.
398 */
399 __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
400 __PAGEFLAG(Tail, tail)
401
402 static inline int PageCompound(struct page *page)
403 {
404 return page->flags & ((1L << PG_head) | (1L << PG_tail));
405
406 }
407 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
408 static inline void ClearPageCompound(struct page *page)
409 {
410 BUG_ON(!PageHead(page));
411 ClearPageHead(page);
412 }
413 #endif
414
415 #define PG_head_mask ((1L << PG_head))
416
417 #else
418 /*
419 * Reduce page flag use as much as possible by overlapping
420 * compound page flags with the flags used for page cache pages. Possible
421 * because PageCompound is always set for compound pages and not for
422 * pages on the LRU and/or pagecache.
423 */
424 TESTPAGEFLAG(Compound, compound)
425 __SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
426
427 /*
428 * PG_reclaim is used in combination with PG_compound to mark the
429 * head and tail of a compound page. This saves one page flag
430 * but makes it impossible to use compound pages for the page cache.
431 * The PG_reclaim bit would have to be used for reclaim or readahead
432 * if compound pages enter the page cache.
433 *
434 * PG_compound & PG_reclaim => Tail page
435 * PG_compound & ~PG_reclaim => Head page
436 */
437 #define PG_head_mask ((1L << PG_compound))
438 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
439
440 static inline int PageHead(struct page *page)
441 {
442 return ((page->flags & PG_head_tail_mask) == PG_head_mask);
443 }
444
445 static inline int PageTail(struct page *page)
446 {
447 return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
448 }
449
450 static inline void __SetPageTail(struct page *page)
451 {
452 page->flags |= PG_head_tail_mask;
453 }
454
455 static inline void __ClearPageTail(struct page *page)
456 {
457 page->flags &= ~PG_head_tail_mask;
458 }
459
460 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
461 static inline void ClearPageCompound(struct page *page)
462 {
463 BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
464 clear_bit(PG_compound, &page->flags);
465 }
466 #endif
467
468 #endif /* !PAGEFLAGS_EXTENDED */
469
470 #ifdef CONFIG_HUGETLB_PAGE
471 int PageHuge(struct page *page);
472 int PageHeadHuge(struct page *page);
473 bool page_huge_active(struct page *page);
474 #else
475 TESTPAGEFLAG_FALSE(Huge)
476 TESTPAGEFLAG_FALSE(HeadHuge)
477
478 static inline bool page_huge_active(struct page *page)
479 {
480 return 0;
481 }
482 #endif
483
484
485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
486 /*
487 * PageHuge() only returns true for hugetlbfs pages, but not for
488 * normal or transparent huge pages.
489 *
490 * PageTransHuge() returns true for both transparent huge and
491 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
492 * called only in the core VM paths where hugetlbfs pages can't exist.
493 */
494 static inline int PageTransHuge(struct page *page)
495 {
496 VM_BUG_ON_PAGE(PageTail(page), page);
497 return PageHead(page);
498 }
499
500 /*
501 * PageTransCompound returns true for both transparent huge pages
502 * and hugetlbfs pages, so it should only be called when it's known
503 * that hugetlbfs pages aren't involved.
504 */
505 static inline int PageTransCompound(struct page *page)
506 {
507 return PageCompound(page);
508 }
509
510 /*
511 * PageTransTail returns true for both transparent huge pages
512 * and hugetlbfs pages, so it should only be called when it's known
513 * that hugetlbfs pages aren't involved.
514 */
515 static inline int PageTransTail(struct page *page)
516 {
517 return PageTail(page);
518 }
519
520 #else
521
522 static inline int PageTransHuge(struct page *page)
523 {
524 return 0;
525 }
526
527 static inline int PageTransCompound(struct page *page)
528 {
529 return 0;
530 }
531
532 static inline int PageTransTail(struct page *page)
533 {
534 return 0;
535 }
536 #endif
537
538 /*
539 * PageBuddy() indicate that the page is free and in the buddy system
540 * (see mm/page_alloc.c).
541 *
542 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
543 * -2 so that an underflow of the page_mapcount() won't be mistaken
544 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
545 * efficiently by most CPU architectures.
546 */
547 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
548
549 static inline int PageBuddy(struct page *page)
550 {
551 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
552 }
553
554 static inline void __SetPageBuddy(struct page *page)
555 {
556 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
557 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
558 }
559
560 static inline void __ClearPageBuddy(struct page *page)
561 {
562 VM_BUG_ON_PAGE(!PageBuddy(page), page);
563 atomic_set(&page->_mapcount, -1);
564 }
565
566 #define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
567
568 static inline int PageBalloon(struct page *page)
569 {
570 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
571 }
572
573 static inline void __SetPageBalloon(struct page *page)
574 {
575 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
576 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
577 }
578
579 static inline void __ClearPageBalloon(struct page *page)
580 {
581 VM_BUG_ON_PAGE(!PageBalloon(page), page);
582 atomic_set(&page->_mapcount, -1);
583 }
584
585 /*
586 * If network-based swap is enabled, sl*b must keep track of whether pages
587 * were allocated from pfmemalloc reserves.
588 */
589 static inline int PageSlabPfmemalloc(struct page *page)
590 {
591 VM_BUG_ON_PAGE(!PageSlab(page), page);
592 return PageActive(page);
593 }
594
595 static inline void SetPageSlabPfmemalloc(struct page *page)
596 {
597 VM_BUG_ON_PAGE(!PageSlab(page), page);
598 SetPageActive(page);
599 }
600
601 static inline void __ClearPageSlabPfmemalloc(struct page *page)
602 {
603 VM_BUG_ON_PAGE(!PageSlab(page), page);
604 __ClearPageActive(page);
605 }
606
607 static inline void ClearPageSlabPfmemalloc(struct page *page)
608 {
609 VM_BUG_ON_PAGE(!PageSlab(page), page);
610 ClearPageActive(page);
611 }
612
613 #ifdef CONFIG_MMU
614 #define __PG_MLOCKED (1 << PG_mlocked)
615 #else
616 #define __PG_MLOCKED 0
617 #endif
618
619 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
620 #define __PG_COMPOUND_LOCK (1 << PG_compound_lock)
621 #else
622 #define __PG_COMPOUND_LOCK 0
623 #endif
624
625 /*
626 * Flags checked when a page is freed. Pages being freed should not have
627 * these flags set. It they are, there is a problem.
628 */
629 #define PAGE_FLAGS_CHECK_AT_FREE \
630 (1 << PG_lru | 1 << PG_locked | \
631 1 << PG_private | 1 << PG_private_2 | \
632 1 << PG_writeback | 1 << PG_reserved | \
633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
634 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
635 __PG_COMPOUND_LOCK)
636
637 /*
638 * Flags checked when a page is prepped for return by the page allocator.
639 * Pages being prepped should not have any flags set. It they are set,
640 * there has been a kernel bug or struct page corruption.
641 */
642 #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
643
644 #define PAGE_FLAGS_PRIVATE \
645 (1 << PG_private | 1 << PG_private_2)
646 /**
647 * page_has_private - Determine if page has private stuff
648 * @page: The page to be checked
649 *
650 * Determine if a page has private stuff, indicating that release routines
651 * should be invoked upon it.
652 */
653 static inline int page_has_private(struct page *page)
654 {
655 return !!(page->flags & PAGE_FLAGS_PRIVATE);
656 }
657
658 #endif /* !__GENERATING_BOUNDS_H */
659
660 #endif /* PAGE_FLAGS_H */