]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/huge_mm.h
mm: add per-zone lru list stat
[mirror_ubuntu-artful-kernel.git] / include / linux / huge_mm.h
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
3
4 extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
5 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
6 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
7 struct vm_area_struct *vma);
8 extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
9 extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
10 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
11 unsigned long addr,
12 pmd_t *pmd,
13 unsigned int flags);
14 extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
15 struct vm_area_struct *vma,
16 pmd_t *pmd, unsigned long addr, unsigned long next);
17 extern int zap_huge_pmd(struct mmu_gather *tlb,
18 struct vm_area_struct *vma,
19 pmd_t *pmd, unsigned long addr);
20 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
21 unsigned long addr, unsigned long end,
22 unsigned char *vec);
23 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
24 unsigned long new_addr, unsigned long old_end,
25 pmd_t *old_pmd, pmd_t *new_pmd);
26 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
27 unsigned long addr, pgprot_t newprot,
28 int prot_numa);
29 int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
30 pfn_t pfn, bool write);
31 enum transparent_hugepage_flag {
32 TRANSPARENT_HUGEPAGE_FLAG,
33 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
34 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
35 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
36 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
37 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
38 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
39 #ifdef CONFIG_DEBUG_VM
40 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
41 #endif
42 };
43
44 struct kobject;
45 struct kobj_attribute;
46
47 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
48 struct kobj_attribute *attr,
49 const char *buf, size_t count,
50 enum transparent_hugepage_flag flag);
51 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
52 struct kobj_attribute *attr, char *buf,
53 enum transparent_hugepage_flag flag);
54 extern struct kobj_attribute shmem_enabled_attr;
55
56 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
57 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
58
59 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
60 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
61 pmd_t *pmd, int flags);
62
63 #define HPAGE_PMD_SHIFT PMD_SHIFT
64 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
65 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
66
67 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
68
69 #define transparent_hugepage_enabled(__vma) \
70 ((transparent_hugepage_flags & \
71 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
72 (transparent_hugepage_flags & \
73 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
74 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
75 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
76 !is_vma_temporary_stack(__vma))
77 #define transparent_hugepage_use_zero_page() \
78 (transparent_hugepage_flags & \
79 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
80 #ifdef CONFIG_DEBUG_VM
81 #define transparent_hugepage_debug_cow() \
82 (transparent_hugepage_flags & \
83 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
84 #else /* CONFIG_DEBUG_VM */
85 #define transparent_hugepage_debug_cow() 0
86 #endif /* CONFIG_DEBUG_VM */
87
88 extern unsigned long transparent_hugepage_flags;
89
90 extern void prep_transhuge_page(struct page *page);
91 extern void free_transhuge_page(struct page *page);
92
93 int split_huge_page_to_list(struct page *page, struct list_head *list);
94 static inline int split_huge_page(struct page *page)
95 {
96 return split_huge_page_to_list(page, NULL);
97 }
98 void deferred_split_huge_page(struct page *page);
99
100 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
101 unsigned long address, bool freeze, struct page *page);
102
103 #define split_huge_pmd(__vma, __pmd, __address) \
104 do { \
105 pmd_t *____pmd = (__pmd); \
106 if (pmd_trans_huge(*____pmd) \
107 || pmd_devmap(*____pmd)) \
108 __split_huge_pmd(__vma, __pmd, __address, \
109 false, NULL); \
110 } while (0)
111
112
113 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
114 bool freeze, struct page *page);
115
116 extern int hugepage_madvise(struct vm_area_struct *vma,
117 unsigned long *vm_flags, int advice);
118 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
119 unsigned long start,
120 unsigned long end,
121 long adjust_next);
122 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
123 struct vm_area_struct *vma);
124 /* mmap_sem must be held on entry */
125 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
126 struct vm_area_struct *vma)
127 {
128 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
129 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
130 return __pmd_trans_huge_lock(pmd, vma);
131 else
132 return NULL;
133 }
134 static inline int hpage_nr_pages(struct page *page)
135 {
136 if (unlikely(PageTransHuge(page)))
137 return HPAGE_PMD_NR;
138 return 1;
139 }
140
141 extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
142
143 extern struct page *huge_zero_page;
144
145 static inline bool is_huge_zero_page(struct page *page)
146 {
147 return ACCESS_ONCE(huge_zero_page) == page;
148 }
149
150 static inline bool is_huge_zero_pmd(pmd_t pmd)
151 {
152 return is_huge_zero_page(pmd_page(pmd));
153 }
154
155 struct page *get_huge_zero_page(void);
156 void put_huge_zero_page(void);
157
158 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
159
160 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
161 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
162 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
163 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
164
165 #define hpage_nr_pages(x) 1
166
167 #define transparent_hugepage_enabled(__vma) 0
168
169 static inline void prep_transhuge_page(struct page *page) {}
170
171 #define transparent_hugepage_flags 0UL
172 static inline int
173 split_huge_page_to_list(struct page *page, struct list_head *list)
174 {
175 return 0;
176 }
177 static inline int split_huge_page(struct page *page)
178 {
179 return 0;
180 }
181 static inline void deferred_split_huge_page(struct page *page) {}
182 #define split_huge_pmd(__vma, __pmd, __address) \
183 do { } while (0)
184
185 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
186 unsigned long address, bool freeze, struct page *page) {}
187
188 static inline int hugepage_madvise(struct vm_area_struct *vma,
189 unsigned long *vm_flags, int advice)
190 {
191 BUG();
192 return 0;
193 }
194 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
195 unsigned long start,
196 unsigned long end,
197 long adjust_next)
198 {
199 }
200 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
201 struct vm_area_struct *vma)
202 {
203 return NULL;
204 }
205
206 static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
207 {
208 return 0;
209 }
210
211 static inline bool is_huge_zero_page(struct page *page)
212 {
213 return false;
214 }
215
216 static inline void put_huge_zero_page(void)
217 {
218 BUILD_BUG();
219 }
220
221 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
222 unsigned long addr, pmd_t *pmd, int flags)
223 {
224 return NULL;
225 }
226 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
227
228 #endif /* _LINUX_HUGE_MM_H */