]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/huge_mm.h
mm, thp: remove infrastructure for handling splitting PMDs
[mirror_ubuntu-artful-kernel.git] / include / linux / huge_mm.h
CommitLineData
71e3aac0
AA
1#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
7 unsigned int flags);
8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
a1dd450b
WD
11extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
71e3aac0
AA
15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 unsigned long address, pmd_t *pmd,
17 pmd_t orig_pmd);
b676b293 18extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
19 unsigned long addr,
20 pmd_t *pmd,
21 unsigned int flags);
22extern int zap_huge_pmd(struct mmu_gather *tlb,
23 struct vm_area_struct *vma,
f21760b1 24 pmd_t *pmd, unsigned long addr);
0ca1634d
JW
25extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, unsigned long end,
27 unsigned char *vec);
4b471e88 28extern bool move_huge_pmd(struct vm_area_struct *vma,
37a1c49a
AA
29 struct vm_area_struct *new_vma,
30 unsigned long old_addr,
31 unsigned long new_addr, unsigned long old_end,
32 pmd_t *old_pmd, pmd_t *new_pmd);
cd7548ab 33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4b10e7d5
MG
34 unsigned long addr, pgprot_t newprot,
35 int prot_numa);
5cad465d
MW
36int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
37 unsigned long pfn, bool write);
71e3aac0
AA
38
39enum transparent_hugepage_flag {
40 TRANSPARENT_HUGEPAGE_FLAG,
41 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
42 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
43 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 44 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 45 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
46#ifdef CONFIG_DEBUG_VM
47 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
48#endif
49};
50
71e3aac0
AA
51extern pmd_t *page_check_address_pmd(struct page *page,
52 struct mm_struct *mm,
53 unsigned long address,
117b0791 54 spinlock_t **ptl);
71e3aac0 55
d8c37c48
NH
56#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
57#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
58
71e3aac0 59#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
60#define HPAGE_PMD_SHIFT PMD_SHIFT
61#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
62#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 63
20995974
AS
64extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
65
71e3aac0 66#define transparent_hugepage_enabled(__vma) \
a664b2d8
AA
67 ((transparent_hugepage_flags & \
68 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
69 (transparent_hugepage_flags & \
70 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
71 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
a7d6e4ec
AA
72 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
73 !is_vma_temporary_stack(__vma))
71e3aac0
AA
74#define transparent_hugepage_defrag(__vma) \
75 ((transparent_hugepage_flags & \
76 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
77 (transparent_hugepage_flags & \
78 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
79 (__vma)->vm_flags & VM_HUGEPAGE))
79da5407
KS
80#define transparent_hugepage_use_zero_page() \
81 (transparent_hugepage_flags & \
82 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0
AA
83#ifdef CONFIG_DEBUG_VM
84#define transparent_hugepage_debug_cow() \
85 (transparent_hugepage_flags & \
86 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
87#else /* CONFIG_DEBUG_VM */
88#define transparent_hugepage_debug_cow() 0
89#endif /* CONFIG_DEBUG_VM */
90
91extern unsigned long transparent_hugepage_flags;
ad0bed24
KS
92
93#define split_huge_page_to_list(page, list) BUILD_BUG()
94#define split_huge_page(page) BUILD_BUG()
95#define split_huge_pmd(__vma, __pmd, __address) BUILD_BUG()
96
dfa5e237 97#if HPAGE_PMD_ORDER >= MAX_ORDER
71e3aac0
AA
98#error "hugepages can't be allocated by the buddy allocator"
99#endif
60ab3244
AA
100extern int hugepage_madvise(struct vm_area_struct *vma,
101 unsigned long *vm_flags, int advice);
e1b9996b 102extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
103 unsigned long start,
104 unsigned long end,
105 long adjust_next);
4b471e88 106extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
bf929152 107 spinlock_t **ptl);
025c5b24 108/* mmap_sem must be held on entry */
4b471e88 109static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
bf929152 110 spinlock_t **ptl)
025c5b24 111{
81d1b09c 112 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
025c5b24 113 if (pmd_trans_huge(*pmd))
bf929152 114 return __pmd_trans_huge_lock(pmd, vma, ptl);
025c5b24 115 else
4b471e88 116 return false;
025c5b24 117}
2c888cfb
RR
118static inline int hpage_nr_pages(struct page *page)
119{
120 if (unlikely(PageTransHuge(page)))
121 return HPAGE_PMD_NR;
122 return 1;
123}
d10e63f2 124
4daae3b4
MG
125extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
126 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
d10e63f2 127
56873f43
WY
128extern struct page *huge_zero_page;
129
130static inline bool is_huge_zero_page(struct page *page)
131{
132 return ACCESS_ONCE(huge_zero_page) == page;
133}
134
fc437044
MW
135static inline bool is_huge_zero_pmd(pmd_t pmd)
136{
137 return is_huge_zero_page(pmd_page(pmd));
138}
139
140struct page *get_huge_zero_page(void);
fc437044 141
71e3aac0 142#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
143#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
144#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
145#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 146
2c888cfb
RR
147#define hpage_nr_pages(x) 1
148
71e3aac0
AA
149#define transparent_hugepage_enabled(__vma) 0
150
151#define transparent_hugepage_flags 0UL
5bc7b8ac
SL
152static inline int
153split_huge_page_to_list(struct page *page, struct list_head *list)
154{
155 return 0;
156}
71e3aac0
AA
157static inline int split_huge_page(struct page *page)
158{
159 return 0;
160}
78ddc534 161#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 162 do { } while (0)
60ab3244
AA
163static inline int hugepage_madvise(struct vm_area_struct *vma,
164 unsigned long *vm_flags, int advice)
0af4e98b
AA
165{
166 BUG();
167 return 0;
168}
94fcc585
AA
169static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
170 unsigned long start,
171 unsigned long end,
172 long adjust_next)
173{
174}
4b471e88 175static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
bf929152 176 spinlock_t **ptl)
025c5b24 177{
4b471e88 178 return false;
025c5b24 179}
d10e63f2 180
4daae3b4
MG
181static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
182 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
d10e63f2 183{
4daae3b4 184 return 0;
d10e63f2
MG
185}
186
56873f43
WY
187static inline bool is_huge_zero_page(struct page *page)
188{
189 return false;
190}
191
71e3aac0
AA
192#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
193
194#endif /* _LINUX_HUGE_MM_H */