]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/huge_mm.h
rhashtable: compact struct rhashtable_params
[mirror_ubuntu-artful-kernel.git] / include / linux / huge_mm.h
CommitLineData
71e3aac0
AA
1#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
82b0f8c3 4extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
71e3aac0
AA
5extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
6 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
7 struct vm_area_struct *vma);
82b0f8c3 8extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
a00cc7d9
MW
9extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
10 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
11 struct vm_area_struct *vma);
12
13#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
14extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
15#else
16static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
17{
18}
19#endif
20
82b0f8c3 21extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
b676b293 22extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
23 unsigned long addr,
24 pmd_t *pmd,
25 unsigned int flags);
319904ad 26extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
b8d3c4c3
MK
27 struct vm_area_struct *vma,
28 pmd_t *pmd, unsigned long addr, unsigned long next);
71e3aac0
AA
29extern int zap_huge_pmd(struct mmu_gather *tlb,
30 struct vm_area_struct *vma,
f21760b1 31 pmd_t *pmd, unsigned long addr);
a00cc7d9
MW
32extern int zap_huge_pud(struct mmu_gather *tlb,
33 struct vm_area_struct *vma,
34 pud_t *pud, unsigned long addr);
0ca1634d
JW
35extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
36 unsigned long addr, unsigned long end,
37 unsigned char *vec);
bf8616d5 38extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
37a1c49a 39 unsigned long new_addr, unsigned long old_end,
5d190420 40 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
cd7548ab 41extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4b10e7d5
MG
42 unsigned long addr, pgprot_t newprot,
43 int prot_numa);
a00cc7d9
MW
44int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
45 pmd_t *pmd, pfn_t pfn, bool write);
46int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
47 pud_t *pud, pfn_t pfn, bool write);
71e3aac0
AA
48enum transparent_hugepage_flag {
49 TRANSPARENT_HUGEPAGE_FLAG,
50 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
51 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
52 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 53 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 54 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 55 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 56 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
57#ifdef CONFIG_DEBUG_VM
58 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
59#endif
60};
61
b46e756f
KS
62struct kobject;
63struct kobj_attribute;
64
65extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
66 struct kobj_attribute *attr,
67 const char *buf, size_t count,
68 enum transparent_hugepage_flag flag);
69extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
70 struct kobj_attribute *attr, char *buf,
71 enum transparent_hugepage_flag flag);
5a6e75f8
KS
72extern struct kobj_attribute shmem_enabled_attr;
73
d8c37c48
NH
74#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
75#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
76
71e3aac0 77#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
78#define HPAGE_PMD_SHIFT PMD_SHIFT
79#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
80#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 81
a00cc7d9
MW
82#define HPAGE_PUD_SHIFT PUD_SHIFT
83#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
84#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
85
20995974
AS
86extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
87
71e3aac0 88#define transparent_hugepage_enabled(__vma) \
a664b2d8
AA
89 ((transparent_hugepage_flags & \
90 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
91 (transparent_hugepage_flags & \
92 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
93 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
a7d6e4ec
AA
94 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
95 !is_vma_temporary_stack(__vma))
79da5407
KS
96#define transparent_hugepage_use_zero_page() \
97 (transparent_hugepage_flags & \
98 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0
AA
99#ifdef CONFIG_DEBUG_VM
100#define transparent_hugepage_debug_cow() \
101 (transparent_hugepage_flags & \
102 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
103#else /* CONFIG_DEBUG_VM */
104#define transparent_hugepage_debug_cow() 0
105#endif /* CONFIG_DEBUG_VM */
106
107extern unsigned long transparent_hugepage_flags;
ad0bed24 108
74d2fad1
TK
109extern unsigned long thp_get_unmapped_area(struct file *filp,
110 unsigned long addr, unsigned long len, unsigned long pgoff,
111 unsigned long flags);
112
9a982250
KS
113extern void prep_transhuge_page(struct page *page);
114extern void free_transhuge_page(struct page *page);
115
e9b61f19
KS
116int split_huge_page_to_list(struct page *page, struct list_head *list);
117static inline int split_huge_page(struct page *page)
118{
119 return split_huge_page_to_list(page, NULL);
120}
9a982250 121void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
122
123void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
33f4751e 124 unsigned long address, bool freeze, struct page *page);
eef1b3ba
KS
125
126#define split_huge_pmd(__vma, __pmd, __address) \
127 do { \
128 pmd_t *____pmd = (__pmd); \
5c7fb56e
DW
129 if (pmd_trans_huge(*____pmd) \
130 || pmd_devmap(*____pmd)) \
fec89c10 131 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 132 false, NULL); \
eef1b3ba 133 } while (0)
ad0bed24 134
2a52bcbc 135
fec89c10
KS
136void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
137 bool freeze, struct page *page);
2a52bcbc 138
a00cc7d9
MW
139void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
140 unsigned long address);
141
142#define split_huge_pud(__vma, __pud, __address) \
143 do { \
144 pud_t *____pud = (__pud); \
145 if (pud_trans_huge(*____pud) \
146 || pud_devmap(*____pud)) \
147 __split_huge_pud(__vma, __pud, __address); \
148 } while (0)
149
60ab3244
AA
150extern int hugepage_madvise(struct vm_area_struct *vma,
151 unsigned long *vm_flags, int advice);
e1b9996b 152extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
153 unsigned long start,
154 unsigned long end,
155 long adjust_next);
b6ec57f4
KS
156extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
157 struct vm_area_struct *vma);
a00cc7d9
MW
158extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
159 struct vm_area_struct *vma);
025c5b24 160/* mmap_sem must be held on entry */
b6ec57f4
KS
161static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
162 struct vm_area_struct *vma)
025c5b24 163{
81d1b09c 164 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
5c7fb56e 165 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 166 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 167 else
969e8d7e 168 return NULL;
025c5b24 169}
a00cc7d9
MW
170static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
171 struct vm_area_struct *vma)
172{
173 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
174 if (pud_trans_huge(*pud) || pud_devmap(*pud))
175 return __pud_trans_huge_lock(pud, vma);
176 else
177 return NULL;
178}
2c888cfb
RR
179static inline int hpage_nr_pages(struct page *page)
180{
181 if (unlikely(PageTransHuge(page)))
182 return HPAGE_PMD_NR;
183 return 1;
184}
d10e63f2 185
a00cc7d9
MW
186struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
187 pmd_t *pmd, int flags);
188struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
189 pud_t *pud, int flags);
190
82b0f8c3 191extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
d10e63f2 192
56873f43
WY
193extern struct page *huge_zero_page;
194
195static inline bool is_huge_zero_page(struct page *page)
196{
197 return ACCESS_ONCE(huge_zero_page) == page;
198}
199
fc437044
MW
200static inline bool is_huge_zero_pmd(pmd_t pmd)
201{
202 return is_huge_zero_page(pmd_page(pmd));
203}
204
a00cc7d9
MW
205static inline bool is_huge_zero_pud(pud_t pud)
206{
207 return false;
208}
209
6fcb52a5
AL
210struct page *mm_get_huge_zero_page(struct mm_struct *mm);
211void mm_put_huge_zero_page(struct mm_struct *mm);
fc437044 212
10102459
KS
213#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
214
71e3aac0 215#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
216#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
217#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
218#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 219
a00cc7d9
MW
220#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
221#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
222#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
223
2c888cfb
RR
224#define hpage_nr_pages(x) 1
225
71e3aac0
AA
226#define transparent_hugepage_enabled(__vma) 0
227
800d8c63
KS
228static inline void prep_transhuge_page(struct page *page) {}
229
71e3aac0 230#define transparent_hugepage_flags 0UL
74d2fad1
TK
231
232#define thp_get_unmapped_area NULL
233
5bc7b8ac
SL
234static inline int
235split_huge_page_to_list(struct page *page, struct list_head *list)
236{
237 return 0;
238}
71e3aac0
AA
239static inline int split_huge_page(struct page *page)
240{
241 return 0;
242}
9a982250 243static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 244#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 245 do { } while (0)
2a52bcbc 246
fd60775a
DR
247static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
248 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 249static inline void split_huge_pmd_address(struct vm_area_struct *vma,
fec89c10 250 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 251
a00cc7d9
MW
252#define split_huge_pud(__vma, __pmd, __address) \
253 do { } while (0)
254
60ab3244
AA
255static inline int hugepage_madvise(struct vm_area_struct *vma,
256 unsigned long *vm_flags, int advice)
0af4e98b
AA
257{
258 BUG();
259 return 0;
260}
94fcc585
AA
261static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
262 unsigned long start,
263 unsigned long end,
264 long adjust_next)
265{
266}
b6ec57f4
KS
267static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
268 struct vm_area_struct *vma)
025c5b24 269{
b6ec57f4 270 return NULL;
025c5b24 271}
a00cc7d9
MW
272static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
273 struct vm_area_struct *vma)
274{
275 return NULL;
276}
d10e63f2 277
82b0f8c3 278static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
d10e63f2 279{
4daae3b4 280 return 0;
d10e63f2
MG
281}
282
56873f43
WY
283static inline bool is_huge_zero_page(struct page *page)
284{
285 return false;
286}
287
a00cc7d9
MW
288static inline bool is_huge_zero_pud(pud_t pud)
289{
290 return false;
291}
292
6fcb52a5 293static inline void mm_put_huge_zero_page(struct mm_struct *mm)
aa88b68c 294{
6fcb52a5 295 return;
aa88b68c 296}
3565fce3
DW
297
298static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
299 unsigned long addr, pmd_t *pmd, int flags)
300{
301 return NULL;
302}
a00cc7d9
MW
303
304static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
305 unsigned long addr, pud_t *pud, int flags)
306{
307 return NULL;
308}
71e3aac0
AA
309#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
310
311#endif /* _LINUX_HUGE_MM_H */