]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/huge_mm.h
Merge tag 'usb-serial-5.13-rc6' of https://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / include / linux / huge_mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
71e3aac0
AA
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
16981d76 5#include <linux/sched/coredump.h>
226ab561 6#include <linux/mm_types.h>
16981d76 7
baabda26
DW
8#include <linux/fs.h> /* only for vma_is_dax() */
9
ebfe1b8f
RC
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *vma);
14void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
a00cc7d9
MW
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ebfe1b8f 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
a00cc7d9
MW
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
ebfe1b8f
RC
27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40 pgprot_t newprot, unsigned long cp_flags);
9a9731b1
THV
41vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42 pgprot_t pgprot, bool write);
43
44/**
45 * vmf_insert_pfn_pmd - insert a pmd size pfn
46 * @vmf: Structure describing the fault
47 * @pfn: pfn to insert
48 * @pgprot: page protection to use
49 * @write: whether it's a write fault
50 *
51 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
52 *
53 * Return: vm_fault_t value.
54 */
55static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
56 bool write)
57{
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
59}
60vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61 pgprot_t pgprot, bool write);
62
63/**
64 * vmf_insert_pfn_pud - insert a pud size pfn
65 * @vmf: Structure describing the fault
66 * @pfn: pfn to insert
67 * @pgprot: page protection to use
68 * @write: whether it's a write fault
69 *
70 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
71 *
72 * Return: vm_fault_t value.
73 */
74static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
75 bool write)
76{
77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
78}
79
71e3aac0 80enum transparent_hugepage_flag {
bae84953 81 TRANSPARENT_HUGEPAGE_NEVER_DAX,
71e3aac0
AA
82 TRANSPARENT_HUGEPAGE_FLAG,
83 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
84 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 87 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 88 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 89 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
90};
91
b46e756f
KS
92struct kobject;
93struct kobj_attribute;
94
ebfe1b8f
RC
95ssize_t single_hugepage_flag_store(struct kobject *kobj,
96 struct kobj_attribute *attr,
97 const char *buf, size_t count,
98 enum transparent_hugepage_flag flag);
99ssize_t single_hugepage_flag_show(struct kobject *kobj,
100 struct kobj_attribute *attr, char *buf,
101 enum transparent_hugepage_flag flag);
5a6e75f8
KS
102extern struct kobj_attribute shmem_enabled_attr;
103
d8c37c48
NH
104#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
105#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
106
71e3aac0 107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
108#define HPAGE_PMD_SHIFT PMD_SHIFT
109#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
110#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 111
a00cc7d9
MW
112#define HPAGE_PUD_SHIFT PUD_SHIFT
113#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
114#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
115
16981d76
DW
116extern unsigned long transparent_hugepage_flags;
117
7635d9cb
MH
118/*
119 * to be used on vmas which are known to support THP.
120 * Use transparent_hugepage_enabled otherwise
121 */
122static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
16981d76 123{
bae84953
AK
124
125 /*
126 * If the hardware/firmware marked hugepage support disabled.
127 */
128 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
129 return false;
130
16981d76
DW
131 if (vma->vm_flags & VM_NOHUGEPAGE)
132 return false;
133
222100ee 134 if (vma_is_temporary_stack(vma))
16981d76
DW
135 return false;
136
137 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
138 return false;
139
140 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
141 return true;
bae84953 142
baabda26
DW
143 if (vma_is_dax(vma))
144 return true;
145
16981d76
DW
146 if (transparent_hugepage_flags &
147 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
148 return !!(vma->vm_flags & VM_HUGEPAGE);
149
150 return false;
151}
152
7635d9cb
MH
153bool transparent_hugepage_enabled(struct vm_area_struct *vma);
154
43675e6f
YS
155#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
156
157static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
158 unsigned long haddr)
159{
160 /* Don't have to check pgoff for anonymous vma */
161 if (!vma_is_anonymous(vma)) {
162 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
163 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
164 return false;
165 }
166
167 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
168 return false;
169 return true;
170}
171
79da5407
KS
172#define transparent_hugepage_use_zero_page() \
173 (transparent_hugepage_flags & \
174 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0 175
ebfe1b8f
RC
176unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
177 unsigned long len, unsigned long pgoff, unsigned long flags);
74d2fad1 178
ebfe1b8f
RC
179void prep_transhuge_page(struct page *page);
180void free_transhuge_page(struct page *page);
005ba37c 181bool is_transparent_hugepage(struct page *page);
9a982250 182
b8f593cd 183bool can_split_huge_page(struct page *page, int *pextra_pins);
e9b61f19
KS
184int split_huge_page_to_list(struct page *page, struct list_head *list);
185static inline int split_huge_page(struct page *page)
186{
187 return split_huge_page_to_list(page, NULL);
188}
9a982250 189void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
190
191void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
33f4751e 192 unsigned long address, bool freeze, struct page *page);
eef1b3ba
KS
193
194#define split_huge_pmd(__vma, __pmd, __address) \
195 do { \
196 pmd_t *____pmd = (__pmd); \
84c3fc4e 197 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
5c7fb56e 198 || pmd_devmap(*____pmd)) \
fec89c10 199 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 200 false, NULL); \
eef1b3ba 201 } while (0)
ad0bed24 202
2a52bcbc 203
fec89c10
KS
204void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
205 bool freeze, struct page *page);
2a52bcbc 206
a00cc7d9
MW
207void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
208 unsigned long address);
209
210#define split_huge_pud(__vma, __pud, __address) \
211 do { \
212 pud_t *____pud = (__pud); \
213 if (pud_trans_huge(*____pud) \
214 || pud_devmap(*____pud)) \
215 __split_huge_pud(__vma, __pud, __address); \
216 } while (0)
217
ebfe1b8f
RC
218int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
219 int advice);
220void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
221 unsigned long end, long adjust_next);
222spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
223spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
84c3fc4e
ZY
224
225static inline int is_swap_pmd(pmd_t pmd)
226{
227 return !pmd_none(pmd) && !pmd_present(pmd);
228}
229
c1e8d7c6 230/* mmap_lock must be held on entry */
b6ec57f4
KS
231static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
232 struct vm_area_struct *vma)
025c5b24 233{
84c3fc4e 234 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 235 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 236 else
969e8d7e 237 return NULL;
025c5b24 238}
a00cc7d9
MW
239static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
240 struct vm_area_struct *vma)
241{
a00cc7d9
MW
242 if (pud_trans_huge(*pud) || pud_devmap(*pud))
243 return __pud_trans_huge_lock(pud, vma);
244 else
245 return NULL;
246}
6ffbb458 247
2be1d718
MWO
248/**
249 * thp_head - Head page of a transparent huge page.
250 * @page: Any page (tail, head or regular) found in the page cache.
251 */
252static inline struct page *thp_head(struct page *page)
253{
254 return compound_head(page);
255}
256
6ffbb458
MWO
257/**
258 * thp_order - Order of a transparent huge page.
259 * @page: Head page of a transparent huge page.
260 */
261static inline unsigned int thp_order(struct page *page)
262{
263 VM_BUG_ON_PGFLAGS(PageTail(page), page);
264 if (PageHead(page))
265 return HPAGE_PMD_ORDER;
266 return 0;
267}
268
6c357848
MWO
269/**
270 * thp_nr_pages - The number of regular pages in this huge page.
271 * @page: The head page of a huge page.
272 */
273static inline int thp_nr_pages(struct page *page)
2c888cfb 274{
6c357848
MWO
275 VM_BUG_ON_PGFLAGS(PageTail(page), page);
276 if (PageHead(page))
2c888cfb
RR
277 return HPAGE_PMD_NR;
278 return 1;
279}
d10e63f2 280
a00cc7d9 281struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 282 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
a00cc7d9 283struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
df06b37f 284 pud_t *pud, int flags, struct dev_pagemap **pgmap);
a00cc7d9 285
ebfe1b8f 286vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
d10e63f2 287
56873f43
WY
288extern struct page *huge_zero_page;
289
290static inline bool is_huge_zero_page(struct page *page)
291{
6aa7de05 292 return READ_ONCE(huge_zero_page) == page;
56873f43
WY
293}
294
fc437044
MW
295static inline bool is_huge_zero_pmd(pmd_t pmd)
296{
297 return is_huge_zero_page(pmd_page(pmd));
298}
299
a00cc7d9
MW
300static inline bool is_huge_zero_pud(pud_t pud)
301{
302 return false;
303}
304
6fcb52a5
AL
305struct page *mm_get_huge_zero_page(struct mm_struct *mm);
306void mm_put_huge_zero_page(struct mm_struct *mm);
fc437044 307
10102459
KS
308#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
309
9c670ea3
NH
310static inline bool thp_migration_supported(void)
311{
312 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
313}
314
87eaceb3
YS
315static inline struct list_head *page_deferred_list(struct page *page)
316{
317 /*
318 * Global or memcg deferred list in the second tail pages is
319 * occupied by compound_head.
320 */
321 return &page[2].deferred_list;
322}
323
71e3aac0 324#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
325#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
326#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
327#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 328
a00cc7d9
MW
329#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
330#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
331#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
332
2be1d718
MWO
333static inline struct page *thp_head(struct page *page)
334{
335 VM_BUG_ON_PGFLAGS(PageTail(page), page);
336 return page;
337}
338
6ffbb458
MWO
339static inline unsigned int thp_order(struct page *page)
340{
341 VM_BUG_ON_PGFLAGS(PageTail(page), page);
342 return 0;
343}
344
6c357848 345static inline int thp_nr_pages(struct page *page)
77d6b909 346{
6c357848 347 VM_BUG_ON_PGFLAGS(PageTail(page), page);
77d6b909
MWO
348 return 1;
349}
2c888cfb 350
7635d9cb
MH
351static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
352{
353 return false;
354}
355
16981d76
DW
356static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
357{
358 return false;
359}
71e3aac0 360
43675e6f
YS
361static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
362 unsigned long haddr)
363{
364 return false;
365}
366
800d8c63
KS
367static inline void prep_transhuge_page(struct page *page) {}
368
005ba37c
SC
369static inline bool is_transparent_hugepage(struct page *page)
370{
371 return false;
372}
373
71e3aac0 374#define transparent_hugepage_flags 0UL
74d2fad1
TK
375
376#define thp_get_unmapped_area NULL
377
b8f593cd
HY
378static inline bool
379can_split_huge_page(struct page *page, int *pextra_pins)
380{
381 BUILD_BUG();
382 return false;
383}
5bc7b8ac
SL
384static inline int
385split_huge_page_to_list(struct page *page, struct list_head *list)
386{
387 return 0;
388}
71e3aac0
AA
389static inline int split_huge_page(struct page *page)
390{
391 return 0;
392}
9a982250 393static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 394#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 395 do { } while (0)
2a52bcbc 396
fd60775a
DR
397static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
398 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 399static inline void split_huge_pmd_address(struct vm_area_struct *vma,
fec89c10 400 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 401
a00cc7d9
MW
402#define split_huge_pud(__vma, __pmd, __address) \
403 do { } while (0)
404
60ab3244
AA
405static inline int hugepage_madvise(struct vm_area_struct *vma,
406 unsigned long *vm_flags, int advice)
0af4e98b
AA
407{
408 BUG();
409 return 0;
410}
94fcc585
AA
411static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
412 unsigned long start,
413 unsigned long end,
414 long adjust_next)
415{
416}
84c3fc4e
ZY
417static inline int is_swap_pmd(pmd_t pmd)
418{
419 return 0;
420}
b6ec57f4
KS
421static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
422 struct vm_area_struct *vma)
025c5b24 423{
b6ec57f4 424 return NULL;
025c5b24 425}
a00cc7d9
MW
426static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
427 struct vm_area_struct *vma)
428{
429 return NULL;
430}
d10e63f2 431
2b740303
SJ
432static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
433 pmd_t orig_pmd)
d10e63f2 434{
4daae3b4 435 return 0;
d10e63f2
MG
436}
437
56873f43
WY
438static inline bool is_huge_zero_page(struct page *page)
439{
440 return false;
441}
442
a00cc7d9
MW
443static inline bool is_huge_zero_pud(pud_t pud)
444{
445 return false;
446}
447
6fcb52a5 448static inline void mm_put_huge_zero_page(struct mm_struct *mm)
aa88b68c 449{
6fcb52a5 450 return;
aa88b68c 451}
3565fce3
DW
452
453static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
df06b37f 454 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
455{
456 return NULL;
457}
a00cc7d9
MW
458
459static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
df06b37f 460 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
a00cc7d9
MW
461{
462 return NULL;
463}
9c670ea3
NH
464
465static inline bool thp_migration_supported(void)
466{
467 return false;
468}
71e3aac0
AA
469#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
470
af3bbc12
MWO
471/**
472 * thp_size - Size of a transparent huge page.
473 * @page: Head page of a transparent huge page.
474 *
475 * Return: Number of bytes in this page.
476 */
477static inline unsigned long thp_size(struct page *page)
478{
479 return PAGE_SIZE << thp_order(page);
480}
481
71e3aac0 482#endif /* _LINUX_HUGE_MM_H */