]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/huge_mm.h
ACPI: fix acpi_find_child_device() invocation in acpi_preset_companion()
[mirror_ubuntu-bionic-kernel.git] / include / linux / huge_mm.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/sched/coredump.h>
6
7 #include <linux/fs.h> /* only for vma_is_dax() */
8
9 extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
10 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
11 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
12 struct vm_area_struct *vma);
13 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
14 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
15 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
16 struct vm_area_struct *vma);
17
18 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
19 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
20 #else
21 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
22 {
23 }
24 #endif
25
26 extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
27 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
28 unsigned long addr,
29 pmd_t *pmd,
30 unsigned int flags);
31 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
32 struct vm_area_struct *vma,
33 pmd_t *pmd, unsigned long addr, unsigned long next);
34 extern int zap_huge_pmd(struct mmu_gather *tlb,
35 struct vm_area_struct *vma,
36 pmd_t *pmd, unsigned long addr);
37 extern int zap_huge_pud(struct mmu_gather *tlb,
38 struct vm_area_struct *vma,
39 pud_t *pud, unsigned long addr);
40 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
41 unsigned long addr, unsigned long end,
42 unsigned char *vec);
43 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
44 unsigned long new_addr, unsigned long old_end,
45 pmd_t *old_pmd, pmd_t *new_pmd);
46 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
47 unsigned long addr, pgprot_t newprot,
48 int prot_numa);
49 int vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
50 int vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
51 enum transparent_hugepage_flag {
52 TRANSPARENT_HUGEPAGE_FLAG,
53 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
54 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
55 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
56 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
57 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
58 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
59 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
60 #ifdef CONFIG_DEBUG_VM
61 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
62 #endif
63 };
64
65 struct kobject;
66 struct kobj_attribute;
67
68 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
69 struct kobj_attribute *attr,
70 const char *buf, size_t count,
71 enum transparent_hugepage_flag flag);
72 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
73 struct kobj_attribute *attr, char *buf,
74 enum transparent_hugepage_flag flag);
75 extern struct kobj_attribute shmem_enabled_attr;
76
77 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
78 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
79
80 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
81 #define HPAGE_PMD_SHIFT PMD_SHIFT
82 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
83 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
84
85 #define HPAGE_PUD_SHIFT PUD_SHIFT
86 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
87 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
88
89 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
90
91 extern unsigned long transparent_hugepage_flags;
92
93 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
94 {
95 if (vma->vm_flags & VM_NOHUGEPAGE)
96 return false;
97
98 if (is_vma_temporary_stack(vma))
99 return false;
100
101 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
102 return false;
103
104 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
105 return true;
106
107 if (vma_is_dax(vma))
108 return true;
109
110 if (transparent_hugepage_flags &
111 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
112 return !!(vma->vm_flags & VM_HUGEPAGE);
113
114 return false;
115 }
116
117 #define transparent_hugepage_use_zero_page() \
118 (transparent_hugepage_flags & \
119 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
120 #ifdef CONFIG_DEBUG_VM
121 #define transparent_hugepage_debug_cow() \
122 (transparent_hugepage_flags & \
123 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
124 #else /* CONFIG_DEBUG_VM */
125 #define transparent_hugepage_debug_cow() 0
126 #endif /* CONFIG_DEBUG_VM */
127
128 extern unsigned long thp_get_unmapped_area(struct file *filp,
129 unsigned long addr, unsigned long len, unsigned long pgoff,
130 unsigned long flags);
131
132 extern void prep_transhuge_page(struct page *page);
133 extern void free_transhuge_page(struct page *page);
134
135 bool can_split_huge_page(struct page *page, int *pextra_pins);
136 int split_huge_page_to_list(struct page *page, struct list_head *list);
137 static inline int split_huge_page(struct page *page)
138 {
139 return split_huge_page_to_list(page, NULL);
140 }
141 void deferred_split_huge_page(struct page *page);
142
143 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
144 unsigned long address, bool freeze, struct page *page);
145
146 #define split_huge_pmd(__vma, __pmd, __address) \
147 do { \
148 pmd_t *____pmd = (__pmd); \
149 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
150 || pmd_devmap(*____pmd)) \
151 __split_huge_pmd(__vma, __pmd, __address, \
152 false, NULL); \
153 } while (0)
154
155
156 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
157 bool freeze, struct page *page);
158
159 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
160 unsigned long address);
161
162 #define split_huge_pud(__vma, __pud, __address) \
163 do { \
164 pud_t *____pud = (__pud); \
165 if (pud_trans_huge(*____pud) \
166 || pud_devmap(*____pud)) \
167 __split_huge_pud(__vma, __pud, __address); \
168 } while (0)
169
170 extern int hugepage_madvise(struct vm_area_struct *vma,
171 unsigned long *vm_flags, int advice);
172 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
173 unsigned long start,
174 unsigned long end,
175 long adjust_next);
176 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
177 struct vm_area_struct *vma);
178 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
179 struct vm_area_struct *vma);
180
181 static inline int is_swap_pmd(pmd_t pmd)
182 {
183 return !pmd_none(pmd) && !pmd_present(pmd);
184 }
185
186 /* mmap_sem must be held on entry */
187 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
188 struct vm_area_struct *vma)
189 {
190 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
191 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
192 return __pmd_trans_huge_lock(pmd, vma);
193 else
194 return NULL;
195 }
196 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
197 struct vm_area_struct *vma)
198 {
199 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
200 if (pud_trans_huge(*pud) || pud_devmap(*pud))
201 return __pud_trans_huge_lock(pud, vma);
202 else
203 return NULL;
204 }
205 static inline int hpage_nr_pages(struct page *page)
206 {
207 if (unlikely(PageTransHuge(page)))
208 return HPAGE_PMD_NR;
209 return 1;
210 }
211
212 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
213 pmd_t *pmd, int flags);
214 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
215 pud_t *pud, int flags);
216
217 extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
218
219 extern struct page *huge_zero_page;
220
221 static inline bool is_huge_zero_page(struct page *page)
222 {
223 return READ_ONCE(huge_zero_page) == page;
224 }
225
226 static inline bool is_huge_zero_pmd(pmd_t pmd)
227 {
228 return is_huge_zero_page(pmd_page(pmd));
229 }
230
231 static inline bool is_huge_zero_pud(pud_t pud)
232 {
233 return false;
234 }
235
236 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
237 void mm_put_huge_zero_page(struct mm_struct *mm);
238
239 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
240
241 static inline bool thp_migration_supported(void)
242 {
243 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
244 }
245
246 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
247 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
248 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
249 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
250
251 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
252 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
253 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
254
255 #define hpage_nr_pages(x) 1
256
257 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
258 {
259 return false;
260 }
261
262 static inline void prep_transhuge_page(struct page *page) {}
263
264 #define transparent_hugepage_flags 0UL
265
266 #define thp_get_unmapped_area NULL
267
268 static inline bool
269 can_split_huge_page(struct page *page, int *pextra_pins)
270 {
271 BUILD_BUG();
272 return false;
273 }
274 static inline int
275 split_huge_page_to_list(struct page *page, struct list_head *list)
276 {
277 return 0;
278 }
279 static inline int split_huge_page(struct page *page)
280 {
281 return 0;
282 }
283 static inline void deferred_split_huge_page(struct page *page) {}
284 #define split_huge_pmd(__vma, __pmd, __address) \
285 do { } while (0)
286
287 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
288 unsigned long address, bool freeze, struct page *page) {}
289 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
290 unsigned long address, bool freeze, struct page *page) {}
291
292 #define split_huge_pud(__vma, __pmd, __address) \
293 do { } while (0)
294
295 static inline int hugepage_madvise(struct vm_area_struct *vma,
296 unsigned long *vm_flags, int advice)
297 {
298 BUG();
299 return 0;
300 }
301 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
302 unsigned long start,
303 unsigned long end,
304 long adjust_next)
305 {
306 }
307 static inline int is_swap_pmd(pmd_t pmd)
308 {
309 return 0;
310 }
311 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
312 struct vm_area_struct *vma)
313 {
314 return NULL;
315 }
316 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
317 struct vm_area_struct *vma)
318 {
319 return NULL;
320 }
321
322 static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
323 {
324 return 0;
325 }
326
327 static inline bool is_huge_zero_page(struct page *page)
328 {
329 return false;
330 }
331
332 static inline bool is_huge_zero_pud(pud_t pud)
333 {
334 return false;
335 }
336
337 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
338 {
339 return;
340 }
341
342 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
343 unsigned long addr, pmd_t *pmd, int flags)
344 {
345 return NULL;
346 }
347
348 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
349 unsigned long addr, pud_t *pud, int flags)
350 {
351 return NULL;
352 }
353
354 static inline bool thp_migration_supported(void)
355 {
356 return false;
357 }
358 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
359
360 #endif /* _LINUX_HUGE_MM_H */