]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/mm_inline.h
UBUNTU: SAUCE: LSM stacking: procfs: add smack subdir to attrs
[mirror_ubuntu-artful-kernel.git] / include / linux / mm_inline.h
CommitLineData
b2e18538
RR
1#ifndef LINUX_MM_INLINE_H
2#define LINUX_MM_INLINE_H
3
2c888cfb 4#include <linux/huge_mm.h>
6e543d57 5#include <linux/swap.h>
2c888cfb 6
b2e18538
RR
7/**
8 * page_is_file_cache - should the page be on a file LRU or anon LRU?
9 * @page: the page to test
10 *
6c0b1351 11 * Returns 1 if @page is page cache page backed by a regular filesystem,
b2e18538
RR
12 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
13 * Used by functions that manipulate the LRU lists, to sort a page
14 * onto the right LRU list.
15 *
16 * We would like to get this info without a page flag, but the state
17 * needs to survive until the page is last deleted from the LRU, which
18 * could be as far down as __page_cache_release.
19 */
20static inline int page_is_file_cache(struct page *page)
21{
6c0b1351 22 return !PageSwapBacked(page);
b2e18538
RR
23}
24
9d5e6a9f 25static __always_inline void __update_lru_size(struct lruvec *lruvec,
599d0c95
MG
26 enum lru_list lru, enum zone_type zid,
27 int nr_pages)
9d5e6a9f 28{
599d0c95
MG
29 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
30
31 __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
71c799f4
MK
32 __mod_zone_page_state(&pgdat->node_zones[zid],
33 NR_ZONE_LRU_BASE + lru, nr_pages);
9d5e6a9f
HD
34}
35
36static __always_inline void update_lru_size(struct lruvec *lruvec,
599d0c95
MG
37 enum lru_list lru, enum zone_type zid,
38 int nr_pages)
9d5e6a9f 39{
599d0c95 40 __update_lru_size(lruvec, lru, zid, nr_pages);
7ee36a14 41#ifdef CONFIG_MEMCG
b4536f0c 42 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
9d5e6a9f
HD
43#endif
44}
45
fa9add64
HD
46static __always_inline void add_page_to_lru_list(struct page *page,
47 struct lruvec *lruvec, enum lru_list lru)
71e3aac0 48{
599d0c95 49 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
4111304d 50 list_add(&page->lru, &lruvec->lists[lru]);
71e3aac0
AA
51}
52
c55e8d03
JW
53static __always_inline void add_page_to_lru_list_tail(struct page *page,
54 struct lruvec *lruvec, enum lru_list lru)
55{
56 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
57 list_add_tail(&page->lru, &lruvec->lists[lru]);
58}
59
fa9add64
HD
60static __always_inline void del_page_from_lru_list(struct page *page,
61 struct lruvec *lruvec, enum lru_list lru)
b69408e8
CL
62{
63 list_del(&page->lru);
599d0c95 64 update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
b69408e8
CL
65}
66
401a8e1c
JW
67/**
68 * page_lru_base_type - which LRU list type should a page be on?
69 * @page: the page to test
70 *
71 * Used for LRU list index arithmetic.
72 *
73 * Returns the base LRU type - file or anon - @page should be on.
74 */
75static inline enum lru_list page_lru_base_type(struct page *page)
76{
77 if (page_is_file_cache(page))
78 return LRU_INACTIVE_FILE;
79 return LRU_INACTIVE_ANON;
80}
81
1c1c53d4
HD
82/**
83 * page_off_lru - which LRU list was page on? clearing its lru flags.
84 * @page: the page to test
85 *
86 * Returns the LRU list a page was on, as an index into the array of LRU
87 * lists; and clears its Unevictable or Active flags, ready for freeing.
88 */
014483bc 89static __always_inline enum lru_list page_off_lru(struct page *page)
1da177e4 90{
4111304d 91 enum lru_list lru;
b69408e8 92
894bc310
LS
93 if (PageUnevictable(page)) {
94 __ClearPageUnevictable(page);
4111304d 95 lru = LRU_UNEVICTABLE;
894bc310 96 } else {
4111304d 97 lru = page_lru_base_type(page);
894bc310
LS
98 if (PageActive(page)) {
99 __ClearPageActive(page);
4111304d 100 lru += LRU_ACTIVE;
894bc310 101 }
1da177e4 102 }
1c1c53d4 103 return lru;
1da177e4 104}
21eac81f 105
b69408e8
CL
106/**
107 * page_lru - which LRU list should a page be on?
108 * @page: the page to test
109 *
110 * Returns the LRU list a page should be on, as an index
111 * into the array of LRU lists.
112 */
014483bc 113static __always_inline enum lru_list page_lru(struct page *page)
b69408e8 114{
401a8e1c 115 enum lru_list lru;
b69408e8 116
894bc310
LS
117 if (PageUnevictable(page))
118 lru = LRU_UNEVICTABLE;
119 else {
401a8e1c 120 lru = page_lru_base_type(page);
894bc310
LS
121 if (PageActive(page))
122 lru += LRU_ACTIVE;
894bc310 123 }
b69408e8
CL
124 return lru;
125}
b2e18538 126
d72ee911
GT
127#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
128
a6456406
TL
129#ifdef arch_unmap_kpfn
130extern void arch_unmap_kpfn(unsigned long pfn);
131#else
132static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
133#endif
134
b2e18538 135#endif