]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/huge_mm.h
x86, thp: remove infrastructure for handling splitting PMDs
[mirror_ubuntu-bionic-kernel.git] / include / linux / huge_mm.h
CommitLineData
71e3aac0
AA
1#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
7 unsigned int flags);
8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
a1dd450b
WD
11extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
71e3aac0
AA
15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 unsigned long address, pmd_t *pmd,
17 pmd_t orig_pmd);
b676b293 18extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
19 unsigned long addr,
20 pmd_t *pmd,
21 unsigned int flags);
22extern int zap_huge_pmd(struct mmu_gather *tlb,
23 struct vm_area_struct *vma,
f21760b1 24 pmd_t *pmd, unsigned long addr);
0ca1634d
JW
25extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, unsigned long end,
27 unsigned char *vec);
37a1c49a
AA
28extern int move_huge_pmd(struct vm_area_struct *vma,
29 struct vm_area_struct *new_vma,
30 unsigned long old_addr,
31 unsigned long new_addr, unsigned long old_end,
32 pmd_t *old_pmd, pmd_t *new_pmd);
cd7548ab 33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4b10e7d5
MG
34 unsigned long addr, pgprot_t newprot,
35 int prot_numa);
5cad465d
MW
36int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
37 unsigned long pfn, bool write);
71e3aac0
AA
38
39enum transparent_hugepage_flag {
40 TRANSPARENT_HUGEPAGE_FLAG,
41 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
42 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
43 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 44 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 45 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
46#ifdef CONFIG_DEBUG_VM
47 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
48#endif
49};
50
51enum page_check_address_pmd_flag {
52 PAGE_CHECK_ADDRESS_PMD_FLAG,
53 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
54 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
55};
56extern pmd_t *page_check_address_pmd(struct page *page,
57 struct mm_struct *mm,
58 unsigned long address,
117b0791
KS
59 enum page_check_address_pmd_flag flag,
60 spinlock_t **ptl);
71e3aac0 61
d8c37c48
NH
62#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
63#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
64
71e3aac0 65#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
66#define HPAGE_PMD_SHIFT PMD_SHIFT
67#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
68#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 69
20995974
AS
70extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
71
71e3aac0 72#define transparent_hugepage_enabled(__vma) \
a664b2d8
AA
73 ((transparent_hugepage_flags & \
74 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
75 (transparent_hugepage_flags & \
76 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
77 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
a7d6e4ec
AA
78 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
79 !is_vma_temporary_stack(__vma))
71e3aac0
AA
80#define transparent_hugepage_defrag(__vma) \
81 ((transparent_hugepage_flags & \
82 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
83 (transparent_hugepage_flags & \
84 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
85 (__vma)->vm_flags & VM_HUGEPAGE))
79da5407
KS
86#define transparent_hugepage_use_zero_page() \
87 (transparent_hugepage_flags & \
88 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0
AA
89#ifdef CONFIG_DEBUG_VM
90#define transparent_hugepage_debug_cow() \
91 (transparent_hugepage_flags & \
92 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
93#else /* CONFIG_DEBUG_VM */
94#define transparent_hugepage_debug_cow() 0
95#endif /* CONFIG_DEBUG_VM */
96
97extern unsigned long transparent_hugepage_flags;
ad0bed24
KS
98
99#define split_huge_page_to_list(page, list) BUILD_BUG()
100#define split_huge_page(page) BUILD_BUG()
101#define split_huge_pmd(__vma, __pmd, __address) BUILD_BUG()
102
103#define wait_split_huge_page(__anon_vma, __pmd) BUILD_BUG()
dfa5e237 104#if HPAGE_PMD_ORDER >= MAX_ORDER
71e3aac0
AA
105#error "hugepages can't be allocated by the buddy allocator"
106#endif
60ab3244
AA
107extern int hugepage_madvise(struct vm_area_struct *vma,
108 unsigned long *vm_flags, int advice);
e1b9996b 109extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
110 unsigned long start,
111 unsigned long end,
112 long adjust_next);
bf929152
KS
113extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
114 spinlock_t **ptl);
025c5b24 115/* mmap_sem must be held on entry */
bf929152
KS
116static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
117 spinlock_t **ptl)
025c5b24 118{
81d1b09c 119 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
025c5b24 120 if (pmd_trans_huge(*pmd))
bf929152 121 return __pmd_trans_huge_lock(pmd, vma, ptl);
025c5b24
NH
122 else
123 return 0;
124}
2c888cfb
RR
125static inline int hpage_nr_pages(struct page *page)
126{
127 if (unlikely(PageTransHuge(page)))
128 return HPAGE_PMD_NR;
129 return 1;
130}
d10e63f2 131
4daae3b4
MG
132extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
133 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
d10e63f2 134
56873f43
WY
135extern struct page *huge_zero_page;
136
137static inline bool is_huge_zero_page(struct page *page)
138{
139 return ACCESS_ONCE(huge_zero_page) == page;
140}
141
fc437044
MW
142static inline bool is_huge_zero_pmd(pmd_t pmd)
143{
144 return is_huge_zero_page(pmd_page(pmd));
145}
146
147struct page *get_huge_zero_page(void);
fc437044 148
71e3aac0 149#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
150#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
151#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
152#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 153
2c888cfb
RR
154#define hpage_nr_pages(x) 1
155
71e3aac0
AA
156#define transparent_hugepage_enabled(__vma) 0
157
158#define transparent_hugepage_flags 0UL
5bc7b8ac
SL
159static inline int
160split_huge_page_to_list(struct page *page, struct list_head *list)
161{
162 return 0;
163}
71e3aac0
AA
164static inline int split_huge_page(struct page *page)
165{
166 return 0;
167}
71e3aac0
AA
168#define wait_split_huge_page(__anon_vma, __pmd) \
169 do { } while (0)
78ddc534 170#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 171 do { } while (0)
60ab3244
AA
172static inline int hugepage_madvise(struct vm_area_struct *vma,
173 unsigned long *vm_flags, int advice)
0af4e98b
AA
174{
175 BUG();
176 return 0;
177}
94fcc585
AA
178static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
179 unsigned long start,
180 unsigned long end,
181 long adjust_next)
182{
183}
bf929152
KS
184static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
185 spinlock_t **ptl)
025c5b24
NH
186{
187 return 0;
188}
d10e63f2 189
4daae3b4
MG
190static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
191 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
d10e63f2 192{
4daae3b4 193 return 0;
d10e63f2
MG
194}
195
56873f43
WY
196static inline bool is_huge_zero_page(struct page *page)
197{
198 return false;
199}
200
71e3aac0
AA
201#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
202
203#endif /* _LINUX_HUGE_MM_H */