1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
8 #include <linux/fs.h> /* only for vma_is_dax() */
10 vm_fault_t
do_huge_pmd_anonymous_page(struct vm_fault
*vmf
);
11 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
12 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
13 struct vm_area_struct
*dst_vma
, struct vm_area_struct
*src_vma
);
14 void huge_pmd_set_accessed(struct vm_fault
*vmf
);
15 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
16 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
17 struct vm_area_struct
*vma
);
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
);
22 static inline void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
27 vm_fault_t
do_huge_pmd_wp_page(struct vm_fault
*vmf
);
28 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
29 unsigned long addr
, pmd_t
*pmd
,
31 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
32 pmd_t
*pmd
, unsigned long addr
, unsigned long next
);
33 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
, pmd_t
*pmd
,
35 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
, pud_t
*pud
,
37 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
38 unsigned long new_addr
, pmd_t
*old_pmd
, pmd_t
*new_pmd
);
39 int change_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
40 pmd_t
*pmd
, unsigned long addr
, pgprot_t newprot
,
41 unsigned long cp_flags
);
42 vm_fault_t
vmf_insert_pfn_pmd_prot(struct vm_fault
*vmf
, pfn_t pfn
,
43 pgprot_t pgprot
, bool write
);
46 * vmf_insert_pfn_pmd - insert a pmd size pfn
47 * @vmf: Structure describing the fault
49 * @pgprot: page protection to use
50 * @write: whether it's a write fault
52 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
54 * Return: vm_fault_t value.
56 static inline vm_fault_t
vmf_insert_pfn_pmd(struct vm_fault
*vmf
, pfn_t pfn
,
59 return vmf_insert_pfn_pmd_prot(vmf
, pfn
, vmf
->vma
->vm_page_prot
, write
);
61 vm_fault_t
vmf_insert_pfn_pud_prot(struct vm_fault
*vmf
, pfn_t pfn
,
62 pgprot_t pgprot
, bool write
);
65 * vmf_insert_pfn_pud - insert a pud size pfn
66 * @vmf: Structure describing the fault
68 * @pgprot: page protection to use
69 * @write: whether it's a write fault
71 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
73 * Return: vm_fault_t value.
75 static inline vm_fault_t
vmf_insert_pfn_pud(struct vm_fault
*vmf
, pfn_t pfn
,
78 return vmf_insert_pfn_pud_prot(vmf
, pfn
, vmf
->vma
->vm_page_prot
, write
);
81 enum transparent_hugepage_flag
{
82 TRANSPARENT_HUGEPAGE_NEVER_DAX
,
83 TRANSPARENT_HUGEPAGE_FLAG
,
84 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
85 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
,
87 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
,
88 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
,
89 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
,
90 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
,
94 struct kobj_attribute
;
96 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
97 struct kobj_attribute
*attr
,
98 const char *buf
, size_t count
,
99 enum transparent_hugepage_flag flag
);
100 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
101 struct kobj_attribute
*attr
, char *buf
,
102 enum transparent_hugepage_flag flag
);
103 extern struct kobj_attribute shmem_enabled_attr
;
105 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
106 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
108 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
109 #define HPAGE_PMD_SHIFT PMD_SHIFT
110 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
111 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
113 #define HPAGE_PUD_SHIFT PUD_SHIFT
114 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
115 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
117 extern unsigned long transparent_hugepage_flags
;
119 #define hugepage_flags_enabled() \
120 (transparent_hugepage_flags & \
121 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
122 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
123 #define hugepage_flags_always() \
124 (transparent_hugepage_flags & \
125 (1<<TRANSPARENT_HUGEPAGE_FLAG))
128 * Do the below checks:
129 * - For file vma, check if the linear page offset of vma is
130 * HPAGE_PMD_NR aligned within the file. The hugepage is
131 * guaranteed to be hugepage-aligned within the file, but we must
132 * check that the PMD-aligned addresses in the VMA map to
133 * PMD-aligned offsets within the file, else the hugepage will
134 * not be PMD-mappable.
135 * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
138 static inline bool transhuge_vma_suitable(struct vm_area_struct
*vma
,
143 /* Don't have to check pgoff for anonymous vma */
144 if (!vma_is_anonymous(vma
)) {
145 if (!IS_ALIGNED((vma
->vm_start
>> PAGE_SHIFT
) - vma
->vm_pgoff
,
150 haddr
= addr
& HPAGE_PMD_MASK
;
152 if (haddr
< vma
->vm_start
|| haddr
+ HPAGE_PMD_SIZE
> vma
->vm_end
)
157 static inline bool file_thp_enabled(struct vm_area_struct
*vma
)
164 inode
= vma
->vm_file
->f_inode
;
166 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
)) &&
167 (vma
->vm_flags
& VM_EXEC
) &&
168 !inode_is_open_for_write(inode
) && S_ISREG(inode
->i_mode
);
171 bool hugepage_vma_check(struct vm_area_struct
*vma
, unsigned long vm_flags
,
172 bool smaps
, bool in_pf
, bool enforce_sysfs
);
174 #define transparent_hugepage_use_zero_page() \
175 (transparent_hugepage_flags & \
176 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
178 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
179 unsigned long len
, unsigned long pgoff
, unsigned long flags
);
181 void prep_transhuge_page(struct page
*page
);
182 void free_transhuge_page(struct page
*page
);
184 bool can_split_folio(struct folio
*folio
, int *pextra_pins
);
185 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
);
186 static inline int split_huge_page(struct page
*page
)
188 return split_huge_page_to_list(page
, NULL
);
190 void deferred_split_huge_page(struct page
*page
);
192 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
193 unsigned long address
, bool freeze
, struct folio
*folio
);
195 #define split_huge_pmd(__vma, __pmd, __address) \
197 pmd_t *____pmd = (__pmd); \
198 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
199 || pmd_devmap(*____pmd)) \
200 __split_huge_pmd(__vma, __pmd, __address, \
205 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
206 bool freeze
, struct folio
*folio
);
208 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
209 unsigned long address
);
211 #define split_huge_pud(__vma, __pud, __address) \
213 pud_t *____pud = (__pud); \
214 if (pud_trans_huge(*____pud) \
215 || pud_devmap(*____pud)) \
216 __split_huge_pud(__vma, __pud, __address); \
219 int hugepage_madvise(struct vm_area_struct
*vma
, unsigned long *vm_flags
,
221 int madvise_collapse(struct vm_area_struct
*vma
,
222 struct vm_area_struct
**prev
,
223 unsigned long start
, unsigned long end
);
224 void vma_adjust_trans_huge(struct vm_area_struct
*vma
, unsigned long start
,
225 unsigned long end
, long adjust_next
);
226 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
);
227 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
);
229 static inline int is_swap_pmd(pmd_t pmd
)
231 return !pmd_none(pmd
) && !pmd_present(pmd
);
234 /* mmap_lock must be held on entry */
235 static inline spinlock_t
*pmd_trans_huge_lock(pmd_t
*pmd
,
236 struct vm_area_struct
*vma
)
238 if (is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
))
239 return __pmd_trans_huge_lock(pmd
, vma
);
243 static inline spinlock_t
*pud_trans_huge_lock(pud_t
*pud
,
244 struct vm_area_struct
*vma
)
246 if (pud_trans_huge(*pud
) || pud_devmap(*pud
))
247 return __pud_trans_huge_lock(pud
, vma
);
253 * folio_test_pmd_mappable - Can we map this folio with a PMD?
254 * @folio: The folio to test
256 static inline bool folio_test_pmd_mappable(struct folio
*folio
)
258 return folio_order(folio
) >= HPAGE_PMD_ORDER
;
261 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
262 pmd_t
*pmd
, int flags
, struct dev_pagemap
**pgmap
);
263 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
264 pud_t
*pud
, int flags
, struct dev_pagemap
**pgmap
);
266 vm_fault_t
do_huge_pmd_numa_page(struct vm_fault
*vmf
);
268 extern struct page
*huge_zero_page
;
269 extern unsigned long huge_zero_pfn
;
271 static inline bool is_huge_zero_page(struct page
*page
)
273 return READ_ONCE(huge_zero_page
) == page
;
276 static inline bool is_huge_zero_pmd(pmd_t pmd
)
278 return pmd_present(pmd
) && READ_ONCE(huge_zero_pfn
) == pmd_pfn(pmd
);
281 static inline bool is_huge_zero_pud(pud_t pud
)
286 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
);
287 void mm_put_huge_zero_page(struct mm_struct
*mm
);
289 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
291 static inline bool thp_migration_supported(void)
293 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION
);
296 static inline struct list_head
*page_deferred_list(struct page
*page
)
298 struct folio
*folio
= (struct folio
*)page
;
300 VM_BUG_ON_FOLIO(folio_order(folio
) < 2, folio
);
301 return &folio
->_deferred_list
;
304 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
305 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
306 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
307 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
309 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
310 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
311 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
313 static inline bool folio_test_pmd_mappable(struct folio
*folio
)
318 static inline bool transhuge_vma_suitable(struct vm_area_struct
*vma
,
324 static inline bool hugepage_vma_check(struct vm_area_struct
*vma
,
325 unsigned long vm_flags
, bool smaps
,
326 bool in_pf
, bool enforce_sysfs
)
331 static inline void prep_transhuge_page(struct page
*page
) {}
333 #define transparent_hugepage_flags 0UL
335 #define thp_get_unmapped_area NULL
338 can_split_folio(struct folio
*folio
, int *pextra_pins
)
343 split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
347 static inline int split_huge_page(struct page
*page
)
351 static inline void deferred_split_huge_page(struct page
*page
) {}
352 #define split_huge_pmd(__vma, __pmd, __address) \
355 static inline void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
356 unsigned long address
, bool freeze
, struct folio
*folio
) {}
357 static inline void split_huge_pmd_address(struct vm_area_struct
*vma
,
358 unsigned long address
, bool freeze
, struct folio
*folio
) {}
360 #define split_huge_pud(__vma, __pmd, __address) \
363 static inline int hugepage_madvise(struct vm_area_struct
*vma
,
364 unsigned long *vm_flags
, int advice
)
369 static inline int madvise_collapse(struct vm_area_struct
*vma
,
370 struct vm_area_struct
**prev
,
371 unsigned long start
, unsigned long end
)
376 static inline void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
382 static inline int is_swap_pmd(pmd_t pmd
)
386 static inline spinlock_t
*pmd_trans_huge_lock(pmd_t
*pmd
,
387 struct vm_area_struct
*vma
)
391 static inline spinlock_t
*pud_trans_huge_lock(pud_t
*pud
,
392 struct vm_area_struct
*vma
)
397 static inline vm_fault_t
do_huge_pmd_numa_page(struct vm_fault
*vmf
)
402 static inline bool is_huge_zero_page(struct page
*page
)
407 static inline bool is_huge_zero_pmd(pmd_t pmd
)
412 static inline bool is_huge_zero_pud(pud_t pud
)
417 static inline void mm_put_huge_zero_page(struct mm_struct
*mm
)
422 static inline struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
,
423 unsigned long addr
, pmd_t
*pmd
, int flags
, struct dev_pagemap
**pgmap
)
428 static inline struct page
*follow_devmap_pud(struct vm_area_struct
*vma
,
429 unsigned long addr
, pud_t
*pud
, int flags
, struct dev_pagemap
**pgmap
)
434 static inline bool thp_migration_supported(void)
438 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
440 static inline int split_folio_to_list(struct folio
*folio
,
441 struct list_head
*list
)
443 return split_huge_page_to_list(&folio
->page
, list
);
446 static inline int split_folio(struct folio
*folio
)
448 return split_folio_to_list(folio
, NULL
);
452 * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
453 * limitations in the implementation like arm64 MTE can override this to
456 #ifndef arch_thp_swp_supported
457 static inline bool arch_thp_swp_supported(void)
463 #endif /* _LINUX_HUGE_MM_H */