]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
ba76149f AA |
2 | #ifndef _LINUX_KHUGEPAGED_H |
3 | #define _LINUX_KHUGEPAGED_H | |
4 | ||
f7ccbae4 IM |
5 | #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ |
6 | ||
ba76149f AA |
7 | |
8 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
b46e756f KS |
9 | extern struct attribute_group khugepaged_attr_group; |
10 | ||
11 | extern int khugepaged_init(void); | |
12 | extern void khugepaged_destroy(void); | |
13 | extern int start_stop_khugepaged(void); | |
ba76149f AA |
14 | extern int __khugepaged_enter(struct mm_struct *mm); |
15 | extern void __khugepaged_exit(struct mm_struct *mm); | |
6d50e60c DR |
16 | extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
17 | unsigned long vm_flags); | |
e2c01fd0 | 18 | extern void khugepaged_min_free_kbytes_update(void); |
27e1f827 SL |
19 | #ifdef CONFIG_SHMEM |
20 | extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); | |
21 | #else | |
22 | static inline void collapse_pte_mapped_thp(struct mm_struct *mm, | |
23 | unsigned long addr) | |
24 | { | |
25 | } | |
26 | #endif | |
ba76149f AA |
27 | |
28 | #define khugepaged_enabled() \ | |
29 | (transparent_hugepage_flags & \ | |
30 | ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \ | |
31 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))) | |
32 | #define khugepaged_always() \ | |
33 | (transparent_hugepage_flags & \ | |
34 | (1<<TRANSPARENT_HUGEPAGE_FLAG)) | |
35 | #define khugepaged_req_madv() \ | |
36 | (transparent_hugepage_flags & \ | |
37 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) | |
38 | #define khugepaged_defrag() \ | |
39 | (transparent_hugepage_flags & \ | |
40 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) | |
41 | ||
42 | static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
43 | { | |
44 | if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) | |
45 | return __khugepaged_enter(mm); | |
46 | return 0; | |
47 | } | |
48 | ||
49 | static inline void khugepaged_exit(struct mm_struct *mm) | |
50 | { | |
51 | if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) | |
52 | __khugepaged_exit(mm); | |
53 | } | |
54 | ||
6d50e60c DR |
55 | static inline int khugepaged_enter(struct vm_area_struct *vma, |
56 | unsigned long vm_flags) | |
ba76149f AA |
57 | { |
58 | if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) | |
a664b2d8 | 59 | if ((khugepaged_always() || |
6d50e60c | 60 | (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && |
18600332 MH |
61 | !(vm_flags & VM_NOHUGEPAGE) && |
62 | !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) | |
ba76149f AA |
63 | if (__khugepaged_enter(vma->vm_mm)) |
64 | return -ENOMEM; | |
65 | return 0; | |
66 | } | |
67 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
68 | static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
69 | { | |
70 | return 0; | |
71 | } | |
72 | static inline void khugepaged_exit(struct mm_struct *mm) | |
73 | { | |
74 | } | |
6d50e60c DR |
75 | static inline int khugepaged_enter(struct vm_area_struct *vma, |
76 | unsigned long vm_flags) | |
ba76149f AA |
77 | { |
78 | return 0; | |
79 | } | |
6d50e60c DR |
80 | static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
81 | unsigned long vm_flags) | |
ba76149f AA |
82 | { |
83 | return 0; | |
84 | } | |
27e1f827 SL |
85 | static inline void collapse_pte_mapped_thp(struct mm_struct *mm, |
86 | unsigned long addr) | |
87 | { | |
88 | } | |
e2c01fd0 VB |
89 | |
90 | static inline void khugepaged_min_free_kbytes_update(void) | |
91 | { | |
92 | } | |
ba76149f AA |
93 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
94 | ||
95 | #endif /* _LINUX_KHUGEPAGED_H */ |