1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
4 extern int do_huge_pmd_anonymous_page(struct vm_fault
*vmf
);
5 extern int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
6 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
7 struct vm_area_struct
*vma
);
8 extern void huge_pmd_set_accessed(struct vm_fault
*vmf
, pmd_t orig_pmd
);
9 extern int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
10 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
11 struct vm_area_struct
*vma
);
13 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
14 extern void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
);
16 static inline void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
21 extern int do_huge_pmd_wp_page(struct vm_fault
*vmf
, pmd_t orig_pmd
);
22 extern struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
26 extern bool madvise_free_huge_pmd(struct mmu_gather
*tlb
,
27 struct vm_area_struct
*vma
,
28 pmd_t
*pmd
, unsigned long addr
, unsigned long next
);
29 extern int zap_huge_pmd(struct mmu_gather
*tlb
,
30 struct vm_area_struct
*vma
,
31 pmd_t
*pmd
, unsigned long addr
);
32 extern int zap_huge_pud(struct mmu_gather
*tlb
,
33 struct vm_area_struct
*vma
,
34 pud_t
*pud
, unsigned long addr
);
35 extern int mincore_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
36 unsigned long addr
, unsigned long end
,
38 extern bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
39 unsigned long new_addr
, unsigned long old_end
,
40 pmd_t
*old_pmd
, pmd_t
*new_pmd
, bool *need_flush
);
41 extern int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
42 unsigned long addr
, pgprot_t newprot
,
44 int vmf_insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
45 pmd_t
*pmd
, pfn_t pfn
, bool write
);
46 int vmf_insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
47 pud_t
*pud
, pfn_t pfn
, bool write
);
48 enum transparent_hugepage_flag
{
49 TRANSPARENT_HUGEPAGE_FLAG
,
50 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
51 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
,
52 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
,
53 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
,
54 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
,
55 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
,
56 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
,
57 #ifdef CONFIG_DEBUG_VM
58 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
,
63 struct kobj_attribute
;
65 extern ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
66 struct kobj_attribute
*attr
,
67 const char *buf
, size_t count
,
68 enum transparent_hugepage_flag flag
);
69 extern ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
70 struct kobj_attribute
*attr
, char *buf
,
71 enum transparent_hugepage_flag flag
);
72 extern struct kobj_attribute shmem_enabled_attr
;
74 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
75 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
77 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
78 #define HPAGE_PMD_SHIFT PMD_SHIFT
79 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
80 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
82 #define HPAGE_PUD_SHIFT PUD_SHIFT
83 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
84 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
86 extern bool is_vma_temporary_stack(struct vm_area_struct
*vma
);
88 #define transparent_hugepage_enabled(__vma) \
89 ((transparent_hugepage_flags & \
90 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
91 (transparent_hugepage_flags & \
92 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
93 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
94 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
95 !is_vma_temporary_stack(__vma))
96 #define transparent_hugepage_use_zero_page() \
97 (transparent_hugepage_flags & \
98 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
99 #ifdef CONFIG_DEBUG_VM
100 #define transparent_hugepage_debug_cow() \
101 (transparent_hugepage_flags & \
102 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
103 #else /* CONFIG_DEBUG_VM */
104 #define transparent_hugepage_debug_cow() 0
105 #endif /* CONFIG_DEBUG_VM */
107 extern unsigned long transparent_hugepage_flags
;
109 extern unsigned long thp_get_unmapped_area(struct file
*filp
,
110 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
111 unsigned long flags
);
113 extern void prep_transhuge_page(struct page
*page
);
114 extern void free_transhuge_page(struct page
*page
);
116 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
);
117 static inline int split_huge_page(struct page
*page
)
119 return split_huge_page_to_list(page
, NULL
);
121 void deferred_split_huge_page(struct page
*page
);
123 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
124 unsigned long address
, bool freeze
, struct page
*page
);
126 #define split_huge_pmd(__vma, __pmd, __address) \
128 pmd_t *____pmd = (__pmd); \
129 if (pmd_trans_huge(*____pmd) \
130 || pmd_devmap(*____pmd)) \
131 __split_huge_pmd(__vma, __pmd, __address, \
136 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
137 bool freeze
, struct page
*page
);
139 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
140 unsigned long address
);
142 #define split_huge_pud(__vma, __pud, __address) \
144 pud_t *____pud = (__pud); \
145 if (pud_trans_huge(*____pud) \
146 || pud_devmap(*____pud)) \
147 __split_huge_pud(__vma, __pud, __address); \
150 extern int hugepage_madvise(struct vm_area_struct
*vma
,
151 unsigned long *vm_flags
, int advice
);
152 extern void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
156 extern spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
,
157 struct vm_area_struct
*vma
);
158 extern spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
,
159 struct vm_area_struct
*vma
);
160 /* mmap_sem must be held on entry */
161 static inline spinlock_t
*pmd_trans_huge_lock(pmd_t
*pmd
,
162 struct vm_area_struct
*vma
)
164 VM_BUG_ON_VMA(!rwsem_is_locked(&vma
->vm_mm
->mmap_sem
), vma
);
165 if (pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
))
166 return __pmd_trans_huge_lock(pmd
, vma
);
170 static inline spinlock_t
*pud_trans_huge_lock(pud_t
*pud
,
171 struct vm_area_struct
*vma
)
173 VM_BUG_ON_VMA(!rwsem_is_locked(&vma
->vm_mm
->mmap_sem
), vma
);
174 if (pud_trans_huge(*pud
) || pud_devmap(*pud
))
175 return __pud_trans_huge_lock(pud
, vma
);
179 static inline int hpage_nr_pages(struct page
*page
)
181 if (unlikely(PageTransHuge(page
)))
186 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
187 pmd_t
*pmd
, int flags
);
188 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
189 pud_t
*pud
, int flags
);
191 extern int do_huge_pmd_numa_page(struct vm_fault
*vmf
, pmd_t orig_pmd
);
193 extern struct page
*huge_zero_page
;
195 static inline bool is_huge_zero_page(struct page
*page
)
197 return ACCESS_ONCE(huge_zero_page
) == page
;
200 static inline bool is_huge_zero_pmd(pmd_t pmd
)
202 return is_huge_zero_page(pmd_page(pmd
));
205 static inline bool is_huge_zero_pud(pud_t pud
)
210 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
);
211 void mm_put_huge_zero_page(struct mm_struct
*mm
);
213 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
215 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
216 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
217 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
218 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
220 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
221 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
222 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
224 #define hpage_nr_pages(x) 1
226 #define transparent_hugepage_enabled(__vma) 0
228 static inline void prep_transhuge_page(struct page
*page
) {}
230 #define transparent_hugepage_flags 0UL
232 #define thp_get_unmapped_area NULL
235 split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
239 static inline int split_huge_page(struct page
*page
)
243 static inline void deferred_split_huge_page(struct page
*page
) {}
244 #define split_huge_pmd(__vma, __pmd, __address) \
247 static inline void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
248 unsigned long address
, bool freeze
, struct page
*page
) {}
249 static inline void split_huge_pmd_address(struct vm_area_struct
*vma
,
250 unsigned long address
, bool freeze
, struct page
*page
) {}
252 #define split_huge_pud(__vma, __pmd, __address) \
255 static inline int hugepage_madvise(struct vm_area_struct
*vma
,
256 unsigned long *vm_flags
, int advice
)
261 static inline void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
267 static inline spinlock_t
*pmd_trans_huge_lock(pmd_t
*pmd
,
268 struct vm_area_struct
*vma
)
272 static inline spinlock_t
*pud_trans_huge_lock(pud_t
*pud
,
273 struct vm_area_struct
*vma
)
278 static inline int do_huge_pmd_numa_page(struct vm_fault
*vmf
, pmd_t orig_pmd
)
283 static inline bool is_huge_zero_page(struct page
*page
)
288 static inline bool is_huge_zero_pud(pud_t pud
)
293 static inline void mm_put_huge_zero_page(struct mm_struct
*mm
)
298 static inline struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
,
299 unsigned long addr
, pmd_t
*pmd
, int flags
)
304 static inline struct page
*follow_devmap_pud(struct vm_area_struct
*vma
,
305 unsigned long addr
, pud_t
*pud
, int flags
)
309 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
311 #endif /* _LINUX_HUGE_MM_H */