]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/huge_mm.h
netfilter: nft_ct: add zone id set support
[mirror_ubuntu-artful-kernel.git] / include / linux / huge_mm.h
CommitLineData
71e3aac0
AA
1#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
82b0f8c3 4extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
71e3aac0
AA
5extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
6 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
7 struct vm_area_struct *vma);
82b0f8c3
JK
8extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
9extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
b676b293 10extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
11 unsigned long addr,
12 pmd_t *pmd,
13 unsigned int flags);
319904ad 14extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
b8d3c4c3
MK
15 struct vm_area_struct *vma,
16 pmd_t *pmd, unsigned long addr, unsigned long next);
71e3aac0
AA
17extern int zap_huge_pmd(struct mmu_gather *tlb,
18 struct vm_area_struct *vma,
f21760b1 19 pmd_t *pmd, unsigned long addr);
0ca1634d
JW
20extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
21 unsigned long addr, unsigned long end,
22 unsigned char *vec);
bf8616d5 23extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
37a1c49a 24 unsigned long new_addr, unsigned long old_end,
5d190420 25 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
cd7548ab 26extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4b10e7d5
MG
27 unsigned long addr, pgprot_t newprot,
28 int prot_numa);
5cad465d 29int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
f25748e3 30 pfn_t pfn, bool write);
71e3aac0
AA
31enum transparent_hugepage_flag {
32 TRANSPARENT_HUGEPAGE_FLAG,
33 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
34 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
35 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
71e3aac0 36 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 37 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 38 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
39#ifdef CONFIG_DEBUG_VM
40 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
41#endif
42};
43
b46e756f
KS
44struct kobject;
45struct kobj_attribute;
46
47extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
48 struct kobj_attribute *attr,
49 const char *buf, size_t count,
50 enum transparent_hugepage_flag flag);
51extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
52 struct kobj_attribute *attr, char *buf,
53 enum transparent_hugepage_flag flag);
5a6e75f8
KS
54extern struct kobj_attribute shmem_enabled_attr;
55
d8c37c48
NH
56#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
57#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
58
71e3aac0 59#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3565fce3
DW
60struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
61 pmd_t *pmd, int flags);
62
fde52796
AK
63#define HPAGE_PMD_SHIFT PMD_SHIFT
64#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
65#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 66
20995974
AS
67extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
68
71e3aac0 69#define transparent_hugepage_enabled(__vma) \
a664b2d8
AA
70 ((transparent_hugepage_flags & \
71 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
72 (transparent_hugepage_flags & \
73 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
74 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
a7d6e4ec
AA
75 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
76 !is_vma_temporary_stack(__vma))
79da5407
KS
77#define transparent_hugepage_use_zero_page() \
78 (transparent_hugepage_flags & \
79 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0
AA
80#ifdef CONFIG_DEBUG_VM
81#define transparent_hugepage_debug_cow() \
82 (transparent_hugepage_flags & \
83 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
84#else /* CONFIG_DEBUG_VM */
85#define transparent_hugepage_debug_cow() 0
86#endif /* CONFIG_DEBUG_VM */
87
88extern unsigned long transparent_hugepage_flags;
ad0bed24 89
74d2fad1
TK
90extern unsigned long thp_get_unmapped_area(struct file *filp,
91 unsigned long addr, unsigned long len, unsigned long pgoff,
92 unsigned long flags);
93
9a982250
KS
94extern void prep_transhuge_page(struct page *page);
95extern void free_transhuge_page(struct page *page);
96
e9b61f19
KS
97int split_huge_page_to_list(struct page *page, struct list_head *list);
98static inline int split_huge_page(struct page *page)
99{
100 return split_huge_page_to_list(page, NULL);
101}
9a982250 102void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
103
104void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
33f4751e 105 unsigned long address, bool freeze, struct page *page);
eef1b3ba
KS
106
107#define split_huge_pmd(__vma, __pmd, __address) \
108 do { \
109 pmd_t *____pmd = (__pmd); \
5c7fb56e
DW
110 if (pmd_trans_huge(*____pmd) \
111 || pmd_devmap(*____pmd)) \
fec89c10 112 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 113 false, NULL); \
eef1b3ba 114 } while (0)
ad0bed24 115
2a52bcbc 116
fec89c10
KS
117void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
118 bool freeze, struct page *page);
2a52bcbc 119
60ab3244
AA
120extern int hugepage_madvise(struct vm_area_struct *vma,
121 unsigned long *vm_flags, int advice);
e1b9996b 122extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
123 unsigned long start,
124 unsigned long end,
125 long adjust_next);
b6ec57f4
KS
126extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
127 struct vm_area_struct *vma);
025c5b24 128/* mmap_sem must be held on entry */
b6ec57f4
KS
129static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
130 struct vm_area_struct *vma)
025c5b24 131{
81d1b09c 132 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
5c7fb56e 133 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 134 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 135 else
969e8d7e 136 return NULL;
025c5b24 137}
2c888cfb
RR
138static inline int hpage_nr_pages(struct page *page)
139{
140 if (unlikely(PageTransHuge(page)))
141 return HPAGE_PMD_NR;
142 return 1;
143}
d10e63f2 144
82b0f8c3 145extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
d10e63f2 146
56873f43
WY
147extern struct page *huge_zero_page;
148
149static inline bool is_huge_zero_page(struct page *page)
150{
151 return ACCESS_ONCE(huge_zero_page) == page;
152}
153
fc437044
MW
154static inline bool is_huge_zero_pmd(pmd_t pmd)
155{
156 return is_huge_zero_page(pmd_page(pmd));
157}
158
6fcb52a5
AL
159struct page *mm_get_huge_zero_page(struct mm_struct *mm);
160void mm_put_huge_zero_page(struct mm_struct *mm);
fc437044 161
10102459
KS
162#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
163
71e3aac0 164#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
165#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
166#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
167#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 168
2c888cfb
RR
169#define hpage_nr_pages(x) 1
170
71e3aac0
AA
171#define transparent_hugepage_enabled(__vma) 0
172
800d8c63
KS
173static inline void prep_transhuge_page(struct page *page) {}
174
71e3aac0 175#define transparent_hugepage_flags 0UL
74d2fad1
TK
176
177#define thp_get_unmapped_area NULL
178
5bc7b8ac
SL
179static inline int
180split_huge_page_to_list(struct page *page, struct list_head *list)
181{
182 return 0;
183}
71e3aac0
AA
184static inline int split_huge_page(struct page *page)
185{
186 return 0;
187}
9a982250 188static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 189#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 190 do { } while (0)
2a52bcbc 191
fd60775a
DR
192static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
193 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 194static inline void split_huge_pmd_address(struct vm_area_struct *vma,
fec89c10 195 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 196
60ab3244
AA
197static inline int hugepage_madvise(struct vm_area_struct *vma,
198 unsigned long *vm_flags, int advice)
0af4e98b
AA
199{
200 BUG();
201 return 0;
202}
94fcc585
AA
203static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
204 unsigned long start,
205 unsigned long end,
206 long adjust_next)
207{
208}
b6ec57f4
KS
209static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
210 struct vm_area_struct *vma)
025c5b24 211{
b6ec57f4 212 return NULL;
025c5b24 213}
d10e63f2 214
82b0f8c3 215static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
d10e63f2 216{
4daae3b4 217 return 0;
d10e63f2
MG
218}
219
56873f43
WY
220static inline bool is_huge_zero_page(struct page *page)
221{
222 return false;
223}
224
6fcb52a5 225static inline void mm_put_huge_zero_page(struct mm_struct *mm)
aa88b68c 226{
6fcb52a5 227 return;
aa88b68c 228}
3565fce3
DW
229
230static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
231 unsigned long addr, pmd_t *pmd, int flags)
232{
233 return NULL;
234}
71e3aac0
AA
235#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
236
237#endif /* _LINUX_HUGE_MM_H */