]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/huge_mm.h
arch/arm64/include/asm/pgtable.h: add pmd_mkclean for THP
[mirror_ubuntu-bionic-kernel.git] / include / linux / huge_mm.h
CommitLineData
71e3aac0
AA
1#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
7 unsigned int flags);
8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
a1dd450b
WD
11extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
71e3aac0
AA
15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 unsigned long address, pmd_t *pmd,
17 pmd_t orig_pmd);
b676b293 18extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
19 unsigned long addr,
20 pmd_t *pmd,
21 unsigned int flags);
22extern int zap_huge_pmd(struct mmu_gather *tlb,
23 struct vm_area_struct *vma,
f21760b1 24 pmd_t *pmd, unsigned long addr);
0ca1634d
JW
25extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, unsigned long end,
27 unsigned char *vec);
4b471e88 28extern bool move_huge_pmd(struct vm_area_struct *vma,
37a1c49a
AA
29 struct vm_area_struct *new_vma,
30 unsigned long old_addr,
31 unsigned long new_addr, unsigned long old_end,
32 pmd_t *old_pmd, pmd_t *new_pmd);
cd7548ab 33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4b10e7d5
MG
34 unsigned long addr, pgprot_t newprot,
35 int prot_numa);
5cad465d
MW
36int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
37 unsigned long pfn, bool write);
71e3aac0
AA
38
39enum transparent_hugepage_flag {
40 TRANSPARENT_HUGEPAGE_FLAG,
41 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
42 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
43 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 44 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 45 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
46#ifdef CONFIG_DEBUG_VM
47 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
48#endif
49};
50
d8c37c48
NH
51#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
52#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
53
71e3aac0 54#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
55#define HPAGE_PMD_SHIFT PMD_SHIFT
56#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
57#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 58
20995974
AS
59extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
60
71e3aac0 61#define transparent_hugepage_enabled(__vma) \
a664b2d8
AA
62 ((transparent_hugepage_flags & \
63 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
64 (transparent_hugepage_flags & \
65 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
66 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
a7d6e4ec
AA
67 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
68 !is_vma_temporary_stack(__vma))
71e3aac0
AA
69#define transparent_hugepage_defrag(__vma) \
70 ((transparent_hugepage_flags & \
71 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
72 (transparent_hugepage_flags & \
73 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
74 (__vma)->vm_flags & VM_HUGEPAGE))
79da5407
KS
75#define transparent_hugepage_use_zero_page() \
76 (transparent_hugepage_flags & \
77 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0
AA
78#ifdef CONFIG_DEBUG_VM
79#define transparent_hugepage_debug_cow() \
80 (transparent_hugepage_flags & \
81 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
82#else /* CONFIG_DEBUG_VM */
83#define transparent_hugepage_debug_cow() 0
84#endif /* CONFIG_DEBUG_VM */
85
86extern unsigned long transparent_hugepage_flags;
ad0bed24 87
9a982250
KS
88extern void prep_transhuge_page(struct page *page);
89extern void free_transhuge_page(struct page *page);
90
e9b61f19
KS
91int split_huge_page_to_list(struct page *page, struct list_head *list);
92static inline int split_huge_page(struct page *page)
93{
94 return split_huge_page_to_list(page, NULL);
95}
9a982250 96void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
97
98void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
99 unsigned long address);
100
101#define split_huge_pmd(__vma, __pmd, __address) \
102 do { \
103 pmd_t *____pmd = (__pmd); \
104 if (pmd_trans_huge(*____pmd)) \
105 __split_huge_pmd(__vma, __pmd, __address); \
106 } while (0)
ad0bed24 107
dfa5e237 108#if HPAGE_PMD_ORDER >= MAX_ORDER
71e3aac0
AA
109#error "hugepages can't be allocated by the buddy allocator"
110#endif
60ab3244
AA
111extern int hugepage_madvise(struct vm_area_struct *vma,
112 unsigned long *vm_flags, int advice);
e1b9996b 113extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
114 unsigned long start,
115 unsigned long end,
116 long adjust_next);
4b471e88 117extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
bf929152 118 spinlock_t **ptl);
025c5b24 119/* mmap_sem must be held on entry */
4b471e88 120static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
bf929152 121 spinlock_t **ptl)
025c5b24 122{
81d1b09c 123 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
025c5b24 124 if (pmd_trans_huge(*pmd))
bf929152 125 return __pmd_trans_huge_lock(pmd, vma, ptl);
025c5b24 126 else
4b471e88 127 return false;
025c5b24 128}
2c888cfb
RR
129static inline int hpage_nr_pages(struct page *page)
130{
131 if (unlikely(PageTransHuge(page)))
132 return HPAGE_PMD_NR;
133 return 1;
134}
d10e63f2 135
4daae3b4
MG
136extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
137 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
d10e63f2 138
56873f43
WY
139extern struct page *huge_zero_page;
140
141static inline bool is_huge_zero_page(struct page *page)
142{
143 return ACCESS_ONCE(huge_zero_page) == page;
144}
145
fc437044
MW
146static inline bool is_huge_zero_pmd(pmd_t pmd)
147{
148 return is_huge_zero_page(pmd_page(pmd));
149}
150
151struct page *get_huge_zero_page(void);
fc437044 152
71e3aac0 153#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
154#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
155#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
156#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 157
2c888cfb
RR
158#define hpage_nr_pages(x) 1
159
71e3aac0
AA
160#define transparent_hugepage_enabled(__vma) 0
161
162#define transparent_hugepage_flags 0UL
5bc7b8ac
SL
163static inline int
164split_huge_page_to_list(struct page *page, struct list_head *list)
165{
166 return 0;
167}
71e3aac0
AA
168static inline int split_huge_page(struct page *page)
169{
170 return 0;
171}
9a982250 172static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 173#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 174 do { } while (0)
60ab3244
AA
175static inline int hugepage_madvise(struct vm_area_struct *vma,
176 unsigned long *vm_flags, int advice)
0af4e98b
AA
177{
178 BUG();
179 return 0;
180}
94fcc585
AA
181static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
182 unsigned long start,
183 unsigned long end,
184 long adjust_next)
185{
186}
4b471e88 187static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
bf929152 188 spinlock_t **ptl)
025c5b24 189{
4b471e88 190 return false;
025c5b24 191}
d10e63f2 192
4daae3b4
MG
193static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
194 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
d10e63f2 195{
4daae3b4 196 return 0;
d10e63f2
MG
197}
198
56873f43
WY
199static inline bool is_huge_zero_page(struct page *page)
200{
201 return false;
202}
203
71e3aac0
AA
204#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
205
206#endif /* _LINUX_HUGE_MM_H */