]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/page-flags.h
Merge branch 'stable/for-jens-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / include / linux / page-flags.h
1 /*
2 * Macros for manipulating and testing page->flags
3 */
4
5 #ifndef PAGE_FLAGS_H
6 #define PAGE_FLAGS_H
7
8 #include <linux/types.h>
9 #include <linux/bug.h>
10 #include <linux/mmdebug.h>
11 #ifndef __GENERATING_BOUNDS_H
12 #include <linux/mm_types.h>
13 #include <generated/bounds.h>
14 #endif /* !__GENERATING_BOUNDS_H */
15
16 /*
17 * Various page->flags bits:
18 *
19 * PG_reserved is set for special pages, which can never be swapped out. Some
20 * of them might not even exist (eg empty_bad_page)...
21 *
22 * The PG_private bitflag is set on pagecache pages if they contain filesystem
23 * specific data (which is normally at page->private). It can be used by
24 * private allocations for its own usage.
25 *
26 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28 * is set before writeback starts and cleared when it finishes.
29 *
30 * PG_locked also pins a page in pagecache, and blocks truncation of the file
31 * while it is held.
32 *
33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
34 * to become unlocked.
35 *
36 * PG_uptodate tells whether the page's contents is valid. When a read
37 * completes, the page becomes uptodate, unless a disk I/O error happened.
38 *
39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40 * file-backed pagecache (see mm/vmscan.c).
41 *
42 * PG_error is set to indicate that an I/O error occurred on this page.
43 *
44 * PG_arch_1 is an architecture specific page state bit. The generic code
45 * guarantees that this bit is cleared for a page when it first is entered into
46 * the page cache.
47 *
48 * PG_highmem pages are not permanently mapped into the kernel virtual address
49 * space, they need to be kmapped separately for doing IO on the pages. The
50 * struct page (these bits with information) are always mapped into kernel
51 * address space...
52 *
53 * PG_hwpoison indicates that a page got corrupted in hardware and contains
54 * data with incorrect ECC bits that triggered a machine check. Accessing is
55 * not safe since it may cause another machine check. Don't touch!
56 */
57
58 /*
59 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
60 * locked- and dirty-page accounting.
61 *
62 * The page flags field is split into two parts, the main flags area
63 * which extends from the low bits upwards, and the fields area which
64 * extends from the high bits downwards.
65 *
66 * | FIELD | ... | FLAGS |
67 * N-1 ^ 0
68 * (NR_PAGEFLAGS)
69 *
70 * The fields area is reserved for fields mapping zone, node (for NUMA) and
71 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
73 */
74 enum pageflags {
75 PG_locked, /* Page is locked. Don't touch. */
76 PG_error,
77 PG_referenced,
78 PG_uptodate,
79 PG_dirty,
80 PG_lru,
81 PG_active,
82 PG_slab,
83 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
84 PG_arch_1,
85 PG_reserved,
86 PG_private, /* If pagecache, has fs-private data */
87 PG_private_2, /* If pagecache, has fs aux data */
88 PG_writeback, /* Page is under writeback */
89 #ifdef CONFIG_PAGEFLAGS_EXTENDED
90 PG_head, /* A head page */
91 PG_tail, /* A tail page */
92 #else
93 PG_compound, /* A compound page */
94 #endif
95 PG_swapcache, /* Swap page: swp_entry_t in private */
96 PG_mappedtodisk, /* Has blocks allocated on-disk */
97 PG_reclaim, /* To be reclaimed asap */
98 PG_swapbacked, /* Page is backed by RAM/swap */
99 PG_unevictable, /* Page is "unevictable" */
100 #ifdef CONFIG_MMU
101 PG_mlocked, /* Page is vma mlocked */
102 #endif
103 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
104 PG_uncached, /* Page has been mapped as uncached */
105 #endif
106 #ifdef CONFIG_MEMORY_FAILURE
107 PG_hwpoison, /* hardware poisoned page. Don't touch */
108 #endif
109 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
110 PG_compound_lock,
111 #endif
112 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
113 PG_young,
114 PG_idle,
115 #endif
116 __NR_PAGEFLAGS,
117
118 /* Filesystems */
119 PG_checked = PG_owner_priv_1,
120
121 /* Two page bits are conscripted by FS-Cache to maintain local caching
122 * state. These bits are set on pages belonging to the netfs's inodes
123 * when those inodes are being locally cached.
124 */
125 PG_fscache = PG_private_2, /* page backed by cache */
126
127 /* XEN */
128 /* Pinned in Xen as a read-only pagetable page. */
129 PG_pinned = PG_owner_priv_1,
130 /* Pinned as part of domain save (see xen_mm_pin_all()). */
131 PG_savepinned = PG_dirty,
132 /* Has a grant mapping of another (foreign) domain's page. */
133 PG_foreign = PG_owner_priv_1,
134
135 /* SLOB */
136 PG_slob_free = PG_private,
137 };
138
139 #ifndef __GENERATING_BOUNDS_H
140
141 /*
142 * Macros to create function definitions for page flags
143 */
144 #define TESTPAGEFLAG(uname, lname) \
145 static inline int Page##uname(const struct page *page) \
146 { return test_bit(PG_##lname, &page->flags); }
147
148 #define SETPAGEFLAG(uname, lname) \
149 static inline void SetPage##uname(struct page *page) \
150 { set_bit(PG_##lname, &page->flags); }
151
152 #define CLEARPAGEFLAG(uname, lname) \
153 static inline void ClearPage##uname(struct page *page) \
154 { clear_bit(PG_##lname, &page->flags); }
155
156 #define __SETPAGEFLAG(uname, lname) \
157 static inline void __SetPage##uname(struct page *page) \
158 { __set_bit(PG_##lname, &page->flags); }
159
160 #define __CLEARPAGEFLAG(uname, lname) \
161 static inline void __ClearPage##uname(struct page *page) \
162 { __clear_bit(PG_##lname, &page->flags); }
163
164 #define TESTSETFLAG(uname, lname) \
165 static inline int TestSetPage##uname(struct page *page) \
166 { return test_and_set_bit(PG_##lname, &page->flags); }
167
168 #define TESTCLEARFLAG(uname, lname) \
169 static inline int TestClearPage##uname(struct page *page) \
170 { return test_and_clear_bit(PG_##lname, &page->flags); }
171
172 #define __TESTCLEARFLAG(uname, lname) \
173 static inline int __TestClearPage##uname(struct page *page) \
174 { return __test_and_clear_bit(PG_##lname, &page->flags); }
175
176 #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
177 SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
178
179 #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
180 __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
181
182 #define TESTSCFLAG(uname, lname) \
183 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
184
185 #define TESTPAGEFLAG_FALSE(uname) \
186 static inline int Page##uname(const struct page *page) { return 0; }
187
188 #define SETPAGEFLAG_NOOP(uname) \
189 static inline void SetPage##uname(struct page *page) { }
190
191 #define CLEARPAGEFLAG_NOOP(uname) \
192 static inline void ClearPage##uname(struct page *page) { }
193
194 #define __CLEARPAGEFLAG_NOOP(uname) \
195 static inline void __ClearPage##uname(struct page *page) { }
196
197 #define TESTSETFLAG_FALSE(uname) \
198 static inline int TestSetPage##uname(struct page *page) { return 0; }
199
200 #define TESTCLEARFLAG_FALSE(uname) \
201 static inline int TestClearPage##uname(struct page *page) { return 0; }
202
203 #define __TESTCLEARFLAG_FALSE(uname) \
204 static inline int __TestClearPage##uname(struct page *page) { return 0; }
205
206 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
207 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
208
209 #define TESTSCFLAG_FALSE(uname) \
210 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
211
212 struct page; /* forward declaration */
213
214 TESTPAGEFLAG(Locked, locked)
215 PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
216 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
217 __SETPAGEFLAG(Referenced, referenced)
218 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
219 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
220 PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
221 TESTCLEARFLAG(Active, active)
222 __PAGEFLAG(Slab, slab)
223 PAGEFLAG(Checked, checked) /* Used by some filesystems */
224 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
225 PAGEFLAG(SavePinned, savepinned); /* Xen */
226 PAGEFLAG(Foreign, foreign); /* Xen */
227 PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
228 PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
229 __SETPAGEFLAG(SwapBacked, swapbacked)
230
231 __PAGEFLAG(SlobFree, slob_free)
232
233 /*
234 * Private page markings that may be used by the filesystem that owns the page
235 * for its own purposes.
236 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
237 */
238 PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
239 __CLEARPAGEFLAG(Private, private)
240 PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
241 PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
242
243 /*
244 * Only test-and-set exist for PG_writeback. The unconditional operators are
245 * risky: they bypass page accounting.
246 */
247 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
248 PAGEFLAG(MappedToDisk, mappedtodisk)
249
250 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
251 PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
252 PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
253
254 #ifdef CONFIG_HIGHMEM
255 /*
256 * Must use a macro here due to header dependency issues. page_zone() is not
257 * available at this point.
258 */
259 #define PageHighMem(__p) is_highmem(page_zone(__p))
260 #else
261 PAGEFLAG_FALSE(HighMem)
262 #endif
263
264 #ifdef CONFIG_SWAP
265 PAGEFLAG(SwapCache, swapcache)
266 #else
267 PAGEFLAG_FALSE(SwapCache)
268 #endif
269
270 PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
271 TESTCLEARFLAG(Unevictable, unevictable)
272
273 #ifdef CONFIG_MMU
274 PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
275 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
276 #else
277 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
278 TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
279 #endif
280
281 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
282 PAGEFLAG(Uncached, uncached)
283 #else
284 PAGEFLAG_FALSE(Uncached)
285 #endif
286
287 #ifdef CONFIG_MEMORY_FAILURE
288 PAGEFLAG(HWPoison, hwpoison)
289 TESTSCFLAG(HWPoison, hwpoison)
290 #define __PG_HWPOISON (1UL << PG_hwpoison)
291 #else
292 PAGEFLAG_FALSE(HWPoison)
293 #define __PG_HWPOISON 0
294 #endif
295
296 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
297 TESTPAGEFLAG(Young, young)
298 SETPAGEFLAG(Young, young)
299 TESTCLEARFLAG(Young, young)
300 PAGEFLAG(Idle, idle)
301 #endif
302
303 /*
304 * On an anonymous page mapped into a user virtual memory area,
305 * page->mapping points to its anon_vma, not to a struct address_space;
306 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
307 *
308 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
309 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
310 * and then page->mapping points, not to an anon_vma, but to a private
311 * structure which KSM associates with that merged page. See ksm.h.
312 *
313 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
314 *
315 * Please note that, confusingly, "page_mapping" refers to the inode
316 * address_space which maps the page from disk; whereas "page_mapped"
317 * refers to user virtual address space into which the page is mapped.
318 */
319 #define PAGE_MAPPING_ANON 1
320 #define PAGE_MAPPING_KSM 2
321 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
322
323 static inline int PageAnon(struct page *page)
324 {
325 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
326 }
327
328 #ifdef CONFIG_KSM
329 /*
330 * A KSM page is one of those write-protected "shared pages" or "merged pages"
331 * which KSM maps into multiple mms, wherever identical anonymous page content
332 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
333 * anon_vma, but to that page's node of the stable tree.
334 */
335 static inline int PageKsm(struct page *page)
336 {
337 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
338 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
339 }
340 #else
341 TESTPAGEFLAG_FALSE(Ksm)
342 #endif
343
344 u64 stable_page_flags(struct page *page);
345
346 static inline int PageUptodate(struct page *page)
347 {
348 int ret = test_bit(PG_uptodate, &(page)->flags);
349
350 /*
351 * Must ensure that the data we read out of the page is loaded
352 * _after_ we've loaded page->flags to check for PageUptodate.
353 * We can skip the barrier if the page is not uptodate, because
354 * we wouldn't be reading anything from it.
355 *
356 * See SetPageUptodate() for the other side of the story.
357 */
358 if (ret)
359 smp_rmb();
360
361 return ret;
362 }
363
364 static inline void __SetPageUptodate(struct page *page)
365 {
366 smp_wmb();
367 __set_bit(PG_uptodate, &(page)->flags);
368 }
369
370 static inline void SetPageUptodate(struct page *page)
371 {
372 /*
373 * Memory barrier must be issued before setting the PG_uptodate bit,
374 * so that all previous stores issued in order to bring the page
375 * uptodate are actually visible before PageUptodate becomes true.
376 */
377 smp_wmb();
378 set_bit(PG_uptodate, &(page)->flags);
379 }
380
381 CLEARPAGEFLAG(Uptodate, uptodate)
382
383 int test_clear_page_writeback(struct page *page);
384 int __test_set_page_writeback(struct page *page, bool keep_write);
385
386 #define test_set_page_writeback(page) \
387 __test_set_page_writeback(page, false)
388 #define test_set_page_writeback_keepwrite(page) \
389 __test_set_page_writeback(page, true)
390
391 static inline void set_page_writeback(struct page *page)
392 {
393 test_set_page_writeback(page);
394 }
395
396 static inline void set_page_writeback_keepwrite(struct page *page)
397 {
398 test_set_page_writeback_keepwrite(page);
399 }
400
401 #ifdef CONFIG_PAGEFLAGS_EXTENDED
402 /*
403 * System with lots of page flags available. This allows separate
404 * flags for PageHead() and PageTail() checks of compound pages so that bit
405 * tests can be used in performance sensitive paths. PageCompound is
406 * generally not used in hot code paths except arch/powerpc/mm/init_64.c
407 * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
408 * and avoid handling those in real mode.
409 */
410 __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
411 __PAGEFLAG(Tail, tail)
412
413 static inline int PageCompound(struct page *page)
414 {
415 return page->flags & ((1L << PG_head) | (1L << PG_tail));
416
417 }
418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 static inline void ClearPageCompound(struct page *page)
420 {
421 BUG_ON(!PageHead(page));
422 ClearPageHead(page);
423 }
424 #endif
425
426 #define PG_head_mask ((1L << PG_head))
427
428 #else
429 /*
430 * Reduce page flag use as much as possible by overlapping
431 * compound page flags with the flags used for page cache pages. Possible
432 * because PageCompound is always set for compound pages and not for
433 * pages on the LRU and/or pagecache.
434 */
435 TESTPAGEFLAG(Compound, compound)
436 __SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
437
438 /*
439 * PG_reclaim is used in combination with PG_compound to mark the
440 * head and tail of a compound page. This saves one page flag
441 * but makes it impossible to use compound pages for the page cache.
442 * The PG_reclaim bit would have to be used for reclaim or readahead
443 * if compound pages enter the page cache.
444 *
445 * PG_compound & PG_reclaim => Tail page
446 * PG_compound & ~PG_reclaim => Head page
447 */
448 #define PG_head_mask ((1L << PG_compound))
449 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
450
451 static inline int PageHead(struct page *page)
452 {
453 return ((page->flags & PG_head_tail_mask) == PG_head_mask);
454 }
455
456 static inline int PageTail(struct page *page)
457 {
458 return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
459 }
460
461 static inline void __SetPageTail(struct page *page)
462 {
463 page->flags |= PG_head_tail_mask;
464 }
465
466 static inline void __ClearPageTail(struct page *page)
467 {
468 page->flags &= ~PG_head_tail_mask;
469 }
470
471 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
472 static inline void ClearPageCompound(struct page *page)
473 {
474 BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
475 clear_bit(PG_compound, &page->flags);
476 }
477 #endif
478
479 #endif /* !PAGEFLAGS_EXTENDED */
480
481 #ifdef CONFIG_HUGETLB_PAGE
482 int PageHuge(struct page *page);
483 int PageHeadHuge(struct page *page);
484 bool page_huge_active(struct page *page);
485 #else
486 TESTPAGEFLAG_FALSE(Huge)
487 TESTPAGEFLAG_FALSE(HeadHuge)
488
489 static inline bool page_huge_active(struct page *page)
490 {
491 return 0;
492 }
493 #endif
494
495
496 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
497 /*
498 * PageHuge() only returns true for hugetlbfs pages, but not for
499 * normal or transparent huge pages.
500 *
501 * PageTransHuge() returns true for both transparent huge and
502 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
503 * called only in the core VM paths where hugetlbfs pages can't exist.
504 */
505 static inline int PageTransHuge(struct page *page)
506 {
507 VM_BUG_ON_PAGE(PageTail(page), page);
508 return PageHead(page);
509 }
510
511 /*
512 * PageTransCompound returns true for both transparent huge pages
513 * and hugetlbfs pages, so it should only be called when it's known
514 * that hugetlbfs pages aren't involved.
515 */
516 static inline int PageTransCompound(struct page *page)
517 {
518 return PageCompound(page);
519 }
520
521 /*
522 * PageTransTail returns true for both transparent huge pages
523 * and hugetlbfs pages, so it should only be called when it's known
524 * that hugetlbfs pages aren't involved.
525 */
526 static inline int PageTransTail(struct page *page)
527 {
528 return PageTail(page);
529 }
530
531 #else
532
533 static inline int PageTransHuge(struct page *page)
534 {
535 return 0;
536 }
537
538 static inline int PageTransCompound(struct page *page)
539 {
540 return 0;
541 }
542
543 static inline int PageTransTail(struct page *page)
544 {
545 return 0;
546 }
547 #endif
548
549 /*
550 * PageBuddy() indicate that the page is free and in the buddy system
551 * (see mm/page_alloc.c).
552 *
553 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
554 * -2 so that an underflow of the page_mapcount() won't be mistaken
555 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
556 * efficiently by most CPU architectures.
557 */
558 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
559
560 static inline int PageBuddy(struct page *page)
561 {
562 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
563 }
564
565 static inline void __SetPageBuddy(struct page *page)
566 {
567 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
568 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
569 }
570
571 static inline void __ClearPageBuddy(struct page *page)
572 {
573 VM_BUG_ON_PAGE(!PageBuddy(page), page);
574 atomic_set(&page->_mapcount, -1);
575 }
576
577 #define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
578
579 static inline int PageBalloon(struct page *page)
580 {
581 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
582 }
583
584 static inline void __SetPageBalloon(struct page *page)
585 {
586 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
587 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
588 }
589
590 static inline void __ClearPageBalloon(struct page *page)
591 {
592 VM_BUG_ON_PAGE(!PageBalloon(page), page);
593 atomic_set(&page->_mapcount, -1);
594 }
595
596 /*
597 * If network-based swap is enabled, sl*b must keep track of whether pages
598 * were allocated from pfmemalloc reserves.
599 */
600 static inline int PageSlabPfmemalloc(struct page *page)
601 {
602 VM_BUG_ON_PAGE(!PageSlab(page), page);
603 return PageActive(page);
604 }
605
606 static inline void SetPageSlabPfmemalloc(struct page *page)
607 {
608 VM_BUG_ON_PAGE(!PageSlab(page), page);
609 SetPageActive(page);
610 }
611
612 static inline void __ClearPageSlabPfmemalloc(struct page *page)
613 {
614 VM_BUG_ON_PAGE(!PageSlab(page), page);
615 __ClearPageActive(page);
616 }
617
618 static inline void ClearPageSlabPfmemalloc(struct page *page)
619 {
620 VM_BUG_ON_PAGE(!PageSlab(page), page);
621 ClearPageActive(page);
622 }
623
624 #ifdef CONFIG_MMU
625 #define __PG_MLOCKED (1 << PG_mlocked)
626 #else
627 #define __PG_MLOCKED 0
628 #endif
629
630 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
631 #define __PG_COMPOUND_LOCK (1 << PG_compound_lock)
632 #else
633 #define __PG_COMPOUND_LOCK 0
634 #endif
635
636 /*
637 * Flags checked when a page is freed. Pages being freed should not have
638 * these flags set. It they are, there is a problem.
639 */
640 #define PAGE_FLAGS_CHECK_AT_FREE \
641 (1 << PG_lru | 1 << PG_locked | \
642 1 << PG_private | 1 << PG_private_2 | \
643 1 << PG_writeback | 1 << PG_reserved | \
644 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
645 1 << PG_unevictable | __PG_MLOCKED | \
646 __PG_COMPOUND_LOCK)
647
648 /*
649 * Flags checked when a page is prepped for return by the page allocator.
650 * Pages being prepped should not have these flags set. It they are set,
651 * there has been a kernel bug or struct page corruption.
652 *
653 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
654 * alloc-free cycle to prevent from reusing the page.
655 */
656 #define PAGE_FLAGS_CHECK_AT_PREP \
657 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
658
659 #define PAGE_FLAGS_PRIVATE \
660 (1 << PG_private | 1 << PG_private_2)
661 /**
662 * page_has_private - Determine if page has private stuff
663 * @page: The page to be checked
664 *
665 * Determine if a page has private stuff, indicating that release routines
666 * should be invoked upon it.
667 */
668 static inline int page_has_private(struct page *page)
669 {
670 return !!(page->flags & PAGE_FLAGS_PRIVATE);
671 }
672
673 #endif /* !__GENERATING_BOUNDS_H */
674
675 #endif /* PAGE_FLAGS_H */