]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/huge_mm.h
mm: make PR_SET_THP_DISABLE immediately active
[mirror_ubuntu-artful-kernel.git] / include / linux / huge_mm.h
CommitLineData
71e3aac0
AA
1#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
82b0f8c3 4extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
71e3aac0
AA
5extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
6 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
7 struct vm_area_struct *vma);
82b0f8c3 8extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
a00cc7d9
MW
9extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
10 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
11 struct vm_area_struct *vma);
12
13#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
14extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
15#else
16static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
17{
18}
19#endif
20
82b0f8c3 21extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
b676b293 22extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
23 unsigned long addr,
24 pmd_t *pmd,
25 unsigned int flags);
319904ad 26extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
b8d3c4c3
MK
27 struct vm_area_struct *vma,
28 pmd_t *pmd, unsigned long addr, unsigned long next);
71e3aac0
AA
29extern int zap_huge_pmd(struct mmu_gather *tlb,
30 struct vm_area_struct *vma,
f21760b1 31 pmd_t *pmd, unsigned long addr);
a00cc7d9
MW
32extern int zap_huge_pud(struct mmu_gather *tlb,
33 struct vm_area_struct *vma,
34 pud_t *pud, unsigned long addr);
0ca1634d
JW
35extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
36 unsigned long addr, unsigned long end,
37 unsigned char *vec);
bf8616d5 38extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
37a1c49a 39 unsigned long new_addr, unsigned long old_end,
5d190420 40 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
cd7548ab 41extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4b10e7d5
MG
42 unsigned long addr, pgprot_t newprot,
43 int prot_numa);
a00cc7d9
MW
44int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
45 pmd_t *pmd, pfn_t pfn, bool write);
46int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
47 pud_t *pud, pfn_t pfn, bool write);
71e3aac0
AA
48enum transparent_hugepage_flag {
49 TRANSPARENT_HUGEPAGE_FLAG,
50 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
51 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
52 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 53 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 54 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 55 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 56 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
57#ifdef CONFIG_DEBUG_VM
58 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
59#endif
60};
61
b46e756f
KS
62struct kobject;
63struct kobj_attribute;
64
65extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
66 struct kobj_attribute *attr,
67 const char *buf, size_t count,
68 enum transparent_hugepage_flag flag);
69extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
70 struct kobj_attribute *attr, char *buf,
71 enum transparent_hugepage_flag flag);
5a6e75f8
KS
72extern struct kobj_attribute shmem_enabled_attr;
73
d8c37c48
NH
74#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
75#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
76
71e3aac0 77#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
78#define HPAGE_PMD_SHIFT PMD_SHIFT
79#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
80#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 81
a00cc7d9
MW
82#define HPAGE_PUD_SHIFT PUD_SHIFT
83#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
84#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
85
20995974
AS
86extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
87
71e3aac0 88#define transparent_hugepage_enabled(__vma) \
a664b2d8
AA
89 ((transparent_hugepage_flags & \
90 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
91 (transparent_hugepage_flags & \
92 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
93 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
a7d6e4ec 94 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
18600332 95 !test_bit(MMF_DISABLE_THP, &(__vma)->vm_mm->flags) && \
a7d6e4ec 96 !is_vma_temporary_stack(__vma))
79da5407
KS
97#define transparent_hugepage_use_zero_page() \
98 (transparent_hugepage_flags & \
99 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0
AA
100#ifdef CONFIG_DEBUG_VM
101#define transparent_hugepage_debug_cow() \
102 (transparent_hugepage_flags & \
103 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
104#else /* CONFIG_DEBUG_VM */
105#define transparent_hugepage_debug_cow() 0
106#endif /* CONFIG_DEBUG_VM */
107
108extern unsigned long transparent_hugepage_flags;
ad0bed24 109
74d2fad1
TK
110extern unsigned long thp_get_unmapped_area(struct file *filp,
111 unsigned long addr, unsigned long len, unsigned long pgoff,
112 unsigned long flags);
113
9a982250
KS
114extern void prep_transhuge_page(struct page *page);
115extern void free_transhuge_page(struct page *page);
116
b8f593cd 117bool can_split_huge_page(struct page *page, int *pextra_pins);
e9b61f19
KS
118int split_huge_page_to_list(struct page *page, struct list_head *list);
119static inline int split_huge_page(struct page *page)
120{
121 return split_huge_page_to_list(page, NULL);
122}
9a982250 123void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
124
125void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
33f4751e 126 unsigned long address, bool freeze, struct page *page);
eef1b3ba
KS
127
128#define split_huge_pmd(__vma, __pmd, __address) \
129 do { \
130 pmd_t *____pmd = (__pmd); \
5c7fb56e
DW
131 if (pmd_trans_huge(*____pmd) \
132 || pmd_devmap(*____pmd)) \
fec89c10 133 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 134 false, NULL); \
eef1b3ba 135 } while (0)
ad0bed24 136
2a52bcbc 137
fec89c10
KS
138void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
139 bool freeze, struct page *page);
2a52bcbc 140
a00cc7d9
MW
141void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
142 unsigned long address);
143
144#define split_huge_pud(__vma, __pud, __address) \
145 do { \
146 pud_t *____pud = (__pud); \
147 if (pud_trans_huge(*____pud) \
148 || pud_devmap(*____pud)) \
149 __split_huge_pud(__vma, __pud, __address); \
150 } while (0)
151
60ab3244
AA
152extern int hugepage_madvise(struct vm_area_struct *vma,
153 unsigned long *vm_flags, int advice);
e1b9996b 154extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
155 unsigned long start,
156 unsigned long end,
157 long adjust_next);
b6ec57f4
KS
158extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
159 struct vm_area_struct *vma);
a00cc7d9
MW
160extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
161 struct vm_area_struct *vma);
025c5b24 162/* mmap_sem must be held on entry */
b6ec57f4
KS
163static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
164 struct vm_area_struct *vma)
025c5b24 165{
81d1b09c 166 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
5c7fb56e 167 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 168 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 169 else
969e8d7e 170 return NULL;
025c5b24 171}
a00cc7d9
MW
172static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
173 struct vm_area_struct *vma)
174{
175 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
176 if (pud_trans_huge(*pud) || pud_devmap(*pud))
177 return __pud_trans_huge_lock(pud, vma);
178 else
179 return NULL;
180}
2c888cfb
RR
181static inline int hpage_nr_pages(struct page *page)
182{
183 if (unlikely(PageTransHuge(page)))
184 return HPAGE_PMD_NR;
185 return 1;
186}
d10e63f2 187
a00cc7d9
MW
188struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
189 pmd_t *pmd, int flags);
190struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
191 pud_t *pud, int flags);
192
82b0f8c3 193extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
d10e63f2 194
56873f43
WY
195extern struct page *huge_zero_page;
196
197static inline bool is_huge_zero_page(struct page *page)
198{
199 return ACCESS_ONCE(huge_zero_page) == page;
200}
201
fc437044
MW
202static inline bool is_huge_zero_pmd(pmd_t pmd)
203{
204 return is_huge_zero_page(pmd_page(pmd));
205}
206
a00cc7d9
MW
207static inline bool is_huge_zero_pud(pud_t pud)
208{
209 return false;
210}
211
6fcb52a5
AL
212struct page *mm_get_huge_zero_page(struct mm_struct *mm);
213void mm_put_huge_zero_page(struct mm_struct *mm);
fc437044 214
10102459
KS
215#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
216
71e3aac0 217#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
218#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
219#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
220#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 221
a00cc7d9
MW
222#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
223#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
224#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
225
2c888cfb
RR
226#define hpage_nr_pages(x) 1
227
71e3aac0
AA
228#define transparent_hugepage_enabled(__vma) 0
229
800d8c63
KS
230static inline void prep_transhuge_page(struct page *page) {}
231
71e3aac0 232#define transparent_hugepage_flags 0UL
74d2fad1
TK
233
234#define thp_get_unmapped_area NULL
235
b8f593cd
HY
236static inline bool
237can_split_huge_page(struct page *page, int *pextra_pins)
238{
239 BUILD_BUG();
240 return false;
241}
5bc7b8ac
SL
242static inline int
243split_huge_page_to_list(struct page *page, struct list_head *list)
244{
245 return 0;
246}
71e3aac0
AA
247static inline int split_huge_page(struct page *page)
248{
249 return 0;
250}
9a982250 251static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 252#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 253 do { } while (0)
2a52bcbc 254
fd60775a
DR
255static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
256 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 257static inline void split_huge_pmd_address(struct vm_area_struct *vma,
fec89c10 258 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 259
a00cc7d9
MW
260#define split_huge_pud(__vma, __pmd, __address) \
261 do { } while (0)
262
60ab3244
AA
263static inline int hugepage_madvise(struct vm_area_struct *vma,
264 unsigned long *vm_flags, int advice)
0af4e98b
AA
265{
266 BUG();
267 return 0;
268}
94fcc585
AA
269static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
270 unsigned long start,
271 unsigned long end,
272 long adjust_next)
273{
274}
b6ec57f4
KS
275static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
276 struct vm_area_struct *vma)
025c5b24 277{
b6ec57f4 278 return NULL;
025c5b24 279}
a00cc7d9
MW
280static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
281 struct vm_area_struct *vma)
282{
283 return NULL;
284}
d10e63f2 285
82b0f8c3 286static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
d10e63f2 287{
4daae3b4 288 return 0;
d10e63f2
MG
289}
290
56873f43
WY
291static inline bool is_huge_zero_page(struct page *page)
292{
293 return false;
294}
295
a00cc7d9
MW
296static inline bool is_huge_zero_pud(pud_t pud)
297{
298 return false;
299}
300
6fcb52a5 301static inline void mm_put_huge_zero_page(struct mm_struct *mm)
aa88b68c 302{
6fcb52a5 303 return;
aa88b68c 304}
3565fce3
DW
305
306static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
307 unsigned long addr, pmd_t *pmd, int flags)
308{
309 return NULL;
310}
a00cc7d9
MW
311
312static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
313 unsigned long addr, pud_t *pud, int flags)
314{
315 return NULL;
316}
71e3aac0
AA
317#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
318
319#endif /* _LINUX_HUGE_MM_H */