]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
71e3aac0 AA |
2 | #ifndef _LINUX_HUGE_MM_H |
3 | #define _LINUX_HUGE_MM_H | |
4 | ||
16981d76 | 5 | #include <linux/sched/coredump.h> |
226ab561 | 6 | #include <linux/mm_types.h> |
16981d76 | 7 | |
baabda26 DW |
8 | #include <linux/fs.h> /* only for vma_is_dax() */ |
9 | ||
2b740303 | 10 | extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
71e3aac0 AA |
11 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
12 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | |
13 | struct vm_area_struct *vma); | |
82b0f8c3 | 14 | extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); |
a00cc7d9 MW |
15 | extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
16 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, | |
17 | struct vm_area_struct *vma); | |
18 | ||
19 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
20 | extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); | |
21 | #else | |
22 | static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) | |
23 | { | |
24 | } | |
25 | #endif | |
26 | ||
2b740303 | 27 | extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); |
b676b293 | 28 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
71e3aac0 AA |
29 | unsigned long addr, |
30 | pmd_t *pmd, | |
31 | unsigned int flags); | |
319904ad | 32 | extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, |
b8d3c4c3 MK |
33 | struct vm_area_struct *vma, |
34 | pmd_t *pmd, unsigned long addr, unsigned long next); | |
71e3aac0 AA |
35 | extern int zap_huge_pmd(struct mmu_gather *tlb, |
36 | struct vm_area_struct *vma, | |
f21760b1 | 37 | pmd_t *pmd, unsigned long addr); |
a00cc7d9 MW |
38 | extern int zap_huge_pud(struct mmu_gather *tlb, |
39 | struct vm_area_struct *vma, | |
40 | pud_t *pud, unsigned long addr); | |
0ca1634d JW |
41 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
42 | unsigned long addr, unsigned long end, | |
43 | unsigned char *vec); | |
bf8616d5 | 44 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
37a1c49a | 45 | unsigned long new_addr, unsigned long old_end, |
eb66ae03 | 46 | pmd_t *old_pmd, pmd_t *new_pmd); |
cd7548ab | 47 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
4b10e7d5 MG |
48 | unsigned long addr, pgprot_t newprot, |
49 | int prot_numa); | |
fce86ff5 DW |
50 | vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); |
51 | vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); | |
71e3aac0 AA |
52 | enum transparent_hugepage_flag { |
53 | TRANSPARENT_HUGEPAGE_FLAG, | |
54 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | |
444eb2a4 MG |
55 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
56 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, | |
21440d7e | 57 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, |
71e3aac0 | 58 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
ba76149f | 59 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
79da5407 | 60 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, |
71e3aac0 AA |
61 | #ifdef CONFIG_DEBUG_VM |
62 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, | |
63 | #endif | |
64 | }; | |
65 | ||
b46e756f KS |
66 | struct kobject; |
67 | struct kobj_attribute; | |
68 | ||
69 | extern ssize_t single_hugepage_flag_store(struct kobject *kobj, | |
70 | struct kobj_attribute *attr, | |
71 | const char *buf, size_t count, | |
72 | enum transparent_hugepage_flag flag); | |
73 | extern ssize_t single_hugepage_flag_show(struct kobject *kobj, | |
74 | struct kobj_attribute *attr, char *buf, | |
75 | enum transparent_hugepage_flag flag); | |
5a6e75f8 KS |
76 | extern struct kobj_attribute shmem_enabled_attr; |
77 | ||
d8c37c48 NH |
78 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
79 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | |
80 | ||
71e3aac0 | 81 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
fde52796 AK |
82 | #define HPAGE_PMD_SHIFT PMD_SHIFT |
83 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) | |
84 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) | |
71e3aac0 | 85 | |
a00cc7d9 MW |
86 | #define HPAGE_PUD_SHIFT PUD_SHIFT |
87 | #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) | |
88 | #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) | |
89 | ||
20995974 AS |
90 | extern bool is_vma_temporary_stack(struct vm_area_struct *vma); |
91 | ||
16981d76 DW |
92 | extern unsigned long transparent_hugepage_flags; |
93 | ||
7635d9cb MH |
94 | /* |
95 | * to be used on vmas which are known to support THP. | |
96 | * Use transparent_hugepage_enabled otherwise | |
97 | */ | |
98 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) | |
16981d76 DW |
99 | { |
100 | if (vma->vm_flags & VM_NOHUGEPAGE) | |
101 | return false; | |
102 | ||
103 | if (is_vma_temporary_stack(vma)) | |
104 | return false; | |
105 | ||
106 | if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) | |
107 | return false; | |
108 | ||
109 | if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) | |
110 | return true; | |
f5376699 AK |
111 | /* |
112 | * For dax vmas, try to always use hugepage mappings. If the kernel does | |
113 | * not support hugepages, fsdax mappings will fallback to PAGE_SIZE | |
114 | * mappings, and device-dax namespaces, that try to guarantee a given | |
115 | * mapping size, will fail to enable | |
116 | */ | |
baabda26 DW |
117 | if (vma_is_dax(vma)) |
118 | return true; | |
119 | ||
16981d76 DW |
120 | if (transparent_hugepage_flags & |
121 | (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) | |
122 | return !!(vma->vm_flags & VM_HUGEPAGE); | |
123 | ||
124 | return false; | |
125 | } | |
126 | ||
7635d9cb MH |
127 | bool transparent_hugepage_enabled(struct vm_area_struct *vma); |
128 | ||
43675e6f YS |
129 | #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) |
130 | ||
131 | static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, | |
132 | unsigned long haddr) | |
133 | { | |
134 | /* Don't have to check pgoff for anonymous vma */ | |
135 | if (!vma_is_anonymous(vma)) { | |
136 | if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != | |
137 | (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) | |
138 | return false; | |
139 | } | |
140 | ||
141 | if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) | |
142 | return false; | |
143 | return true; | |
144 | } | |
145 | ||
79da5407 KS |
146 | #define transparent_hugepage_use_zero_page() \ |
147 | (transparent_hugepage_flags & \ | |
148 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) | |
71e3aac0 AA |
149 | #ifdef CONFIG_DEBUG_VM |
150 | #define transparent_hugepage_debug_cow() \ | |
151 | (transparent_hugepage_flags & \ | |
152 | (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) | |
153 | #else /* CONFIG_DEBUG_VM */ | |
154 | #define transparent_hugepage_debug_cow() 0 | |
155 | #endif /* CONFIG_DEBUG_VM */ | |
156 | ||
74d2fad1 TK |
157 | extern unsigned long thp_get_unmapped_area(struct file *filp, |
158 | unsigned long addr, unsigned long len, unsigned long pgoff, | |
159 | unsigned long flags); | |
160 | ||
9a982250 KS |
161 | extern void prep_transhuge_page(struct page *page); |
162 | extern void free_transhuge_page(struct page *page); | |
163 | ||
b8f593cd | 164 | bool can_split_huge_page(struct page *page, int *pextra_pins); |
e9b61f19 KS |
165 | int split_huge_page_to_list(struct page *page, struct list_head *list); |
166 | static inline int split_huge_page(struct page *page) | |
167 | { | |
168 | return split_huge_page_to_list(page, NULL); | |
169 | } | |
9a982250 | 170 | void deferred_split_huge_page(struct page *page); |
eef1b3ba KS |
171 | |
172 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |
33f4751e | 173 | unsigned long address, bool freeze, struct page *page); |
eef1b3ba KS |
174 | |
175 | #define split_huge_pmd(__vma, __pmd, __address) \ | |
176 | do { \ | |
177 | pmd_t *____pmd = (__pmd); \ | |
84c3fc4e | 178 | if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ |
5c7fb56e | 179 | || pmd_devmap(*____pmd)) \ |
fec89c10 | 180 | __split_huge_pmd(__vma, __pmd, __address, \ |
33f4751e | 181 | false, NULL); \ |
eef1b3ba | 182 | } while (0) |
ad0bed24 | 183 | |
2a52bcbc | 184 | |
fec89c10 KS |
185 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
186 | bool freeze, struct page *page); | |
2a52bcbc | 187 | |
a00cc7d9 MW |
188 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
189 | unsigned long address); | |
190 | ||
191 | #define split_huge_pud(__vma, __pud, __address) \ | |
192 | do { \ | |
193 | pud_t *____pud = (__pud); \ | |
194 | if (pud_trans_huge(*____pud) \ | |
195 | || pud_devmap(*____pud)) \ | |
196 | __split_huge_pud(__vma, __pud, __address); \ | |
197 | } while (0) | |
198 | ||
60ab3244 AA |
199 | extern int hugepage_madvise(struct vm_area_struct *vma, |
200 | unsigned long *vm_flags, int advice); | |
e1b9996b | 201 | extern void vma_adjust_trans_huge(struct vm_area_struct *vma, |
94fcc585 AA |
202 | unsigned long start, |
203 | unsigned long end, | |
204 | long adjust_next); | |
b6ec57f4 KS |
205 | extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, |
206 | struct vm_area_struct *vma); | |
a00cc7d9 MW |
207 | extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, |
208 | struct vm_area_struct *vma); | |
84c3fc4e ZY |
209 | |
210 | static inline int is_swap_pmd(pmd_t pmd) | |
211 | { | |
212 | return !pmd_none(pmd) && !pmd_present(pmd); | |
213 | } | |
214 | ||
025c5b24 | 215 | /* mmap_sem must be held on entry */ |
b6ec57f4 KS |
216 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
217 | struct vm_area_struct *vma) | |
025c5b24 | 218 | { |
81d1b09c | 219 | VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); |
84c3fc4e | 220 | if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) |
b6ec57f4 | 221 | return __pmd_trans_huge_lock(pmd, vma); |
025c5b24 | 222 | else |
969e8d7e | 223 | return NULL; |
025c5b24 | 224 | } |
a00cc7d9 MW |
225 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
226 | struct vm_area_struct *vma) | |
227 | { | |
228 | VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); | |
229 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) | |
230 | return __pud_trans_huge_lock(pud, vma); | |
231 | else | |
232 | return NULL; | |
233 | } | |
2c888cfb RR |
234 | static inline int hpage_nr_pages(struct page *page) |
235 | { | |
236 | if (unlikely(PageTransHuge(page))) | |
237 | return HPAGE_PMD_NR; | |
238 | return 1; | |
239 | } | |
d10e63f2 | 240 | |
a00cc7d9 | 241 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
df06b37f | 242 | pmd_t *pmd, int flags, struct dev_pagemap **pgmap); |
a00cc7d9 | 243 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
df06b37f | 244 | pud_t *pud, int flags, struct dev_pagemap **pgmap); |
a00cc7d9 | 245 | |
2b740303 | 246 | extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); |
d10e63f2 | 247 | |
56873f43 WY |
248 | extern struct page *huge_zero_page; |
249 | ||
250 | static inline bool is_huge_zero_page(struct page *page) | |
251 | { | |
6aa7de05 | 252 | return READ_ONCE(huge_zero_page) == page; |
56873f43 WY |
253 | } |
254 | ||
fc437044 MW |
255 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
256 | { | |
257 | return is_huge_zero_page(pmd_page(pmd)); | |
258 | } | |
259 | ||
a00cc7d9 MW |
260 | static inline bool is_huge_zero_pud(pud_t pud) |
261 | { | |
262 | return false; | |
263 | } | |
264 | ||
6fcb52a5 AL |
265 | struct page *mm_get_huge_zero_page(struct mm_struct *mm); |
266 | void mm_put_huge_zero_page(struct mm_struct *mm); | |
fc437044 | 267 | |
10102459 KS |
268 | #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) |
269 | ||
9c670ea3 NH |
270 | static inline bool thp_migration_supported(void) |
271 | { | |
272 | return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); | |
273 | } | |
274 | ||
87eaceb3 YS |
275 | static inline struct list_head *page_deferred_list(struct page *page) |
276 | { | |
277 | /* | |
278 | * Global or memcg deferred list in the second tail pages is | |
279 | * occupied by compound_head. | |
280 | */ | |
281 | return &page[2].deferred_list; | |
282 | } | |
283 | ||
71e3aac0 | 284 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
d8c37c48 NH |
285 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
286 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) | |
287 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) | |
71e3aac0 | 288 | |
a00cc7d9 MW |
289 | #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) |
290 | #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) | |
291 | #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) | |
292 | ||
2c888cfb RR |
293 | #define hpage_nr_pages(x) 1 |
294 | ||
7635d9cb MH |
295 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) |
296 | { | |
297 | return false; | |
298 | } | |
299 | ||
16981d76 DW |
300 | static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) |
301 | { | |
302 | return false; | |
303 | } | |
71e3aac0 | 304 | |
43675e6f YS |
305 | static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, |
306 | unsigned long haddr) | |
307 | { | |
308 | return false; | |
309 | } | |
310 | ||
800d8c63 KS |
311 | static inline void prep_transhuge_page(struct page *page) {} |
312 | ||
71e3aac0 | 313 | #define transparent_hugepage_flags 0UL |
74d2fad1 TK |
314 | |
315 | #define thp_get_unmapped_area NULL | |
316 | ||
b8f593cd HY |
317 | static inline bool |
318 | can_split_huge_page(struct page *page, int *pextra_pins) | |
319 | { | |
320 | BUILD_BUG(); | |
321 | return false; | |
322 | } | |
5bc7b8ac SL |
323 | static inline int |
324 | split_huge_page_to_list(struct page *page, struct list_head *list) | |
325 | { | |
326 | return 0; | |
327 | } | |
71e3aac0 AA |
328 | static inline int split_huge_page(struct page *page) |
329 | { | |
330 | return 0; | |
331 | } | |
9a982250 | 332 | static inline void deferred_split_huge_page(struct page *page) {} |
78ddc534 | 333 | #define split_huge_pmd(__vma, __pmd, __address) \ |
e180377f | 334 | do { } while (0) |
2a52bcbc | 335 | |
fd60775a DR |
336 | static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
337 | unsigned long address, bool freeze, struct page *page) {} | |
2a52bcbc | 338 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, |
fec89c10 | 339 | unsigned long address, bool freeze, struct page *page) {} |
2a52bcbc | 340 | |
a00cc7d9 MW |
341 | #define split_huge_pud(__vma, __pmd, __address) \ |
342 | do { } while (0) | |
343 | ||
60ab3244 AA |
344 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
345 | unsigned long *vm_flags, int advice) | |
0af4e98b AA |
346 | { |
347 | BUG(); | |
348 | return 0; | |
349 | } | |
94fcc585 AA |
350 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
351 | unsigned long start, | |
352 | unsigned long end, | |
353 | long adjust_next) | |
354 | { | |
355 | } | |
84c3fc4e ZY |
356 | static inline int is_swap_pmd(pmd_t pmd) |
357 | { | |
358 | return 0; | |
359 | } | |
b6ec57f4 KS |
360 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
361 | struct vm_area_struct *vma) | |
025c5b24 | 362 | { |
b6ec57f4 | 363 | return NULL; |
025c5b24 | 364 | } |
a00cc7d9 MW |
365 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
366 | struct vm_area_struct *vma) | |
367 | { | |
368 | return NULL; | |
369 | } | |
d10e63f2 | 370 | |
2b740303 SJ |
371 | static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, |
372 | pmd_t orig_pmd) | |
d10e63f2 | 373 | { |
4daae3b4 | 374 | return 0; |
d10e63f2 MG |
375 | } |
376 | ||
56873f43 WY |
377 | static inline bool is_huge_zero_page(struct page *page) |
378 | { | |
379 | return false; | |
380 | } | |
381 | ||
a00cc7d9 MW |
382 | static inline bool is_huge_zero_pud(pud_t pud) |
383 | { | |
384 | return false; | |
385 | } | |
386 | ||
6fcb52a5 | 387 | static inline void mm_put_huge_zero_page(struct mm_struct *mm) |
aa88b68c | 388 | { |
6fcb52a5 | 389 | return; |
aa88b68c | 390 | } |
3565fce3 DW |
391 | |
392 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, | |
df06b37f | 393 | unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
3565fce3 DW |
394 | { |
395 | return NULL; | |
396 | } | |
a00cc7d9 MW |
397 | |
398 | static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, | |
df06b37f | 399 | unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) |
a00cc7d9 MW |
400 | { |
401 | return NULL; | |
402 | } | |
9c670ea3 NH |
403 | |
404 | static inline bool thp_migration_supported(void) | |
405 | { | |
406 | return false; | |
407 | } | |
71e3aac0 AA |
408 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
409 | ||
410 | #endif /* _LINUX_HUGE_MM_H */ |