]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
b20a3503 CL |
2 | #ifndef _LINUX_MIGRATE_H |
3 | #define _LINUX_MIGRATE_H | |
4 | ||
b20a3503 | 5 | #include <linux/mm.h> |
906e0be1 | 6 | #include <linux/mempolicy.h> |
6536e312 | 7 | #include <linux/migrate_mode.h> |
8b913238 | 8 | #include <linux/hugetlb.h> |
b20a3503 | 9 | |
666feb21 | 10 | typedef struct page *new_page_t(struct page *page, unsigned long private); |
68711a74 | 11 | typedef void free_page_t(struct page *page, unsigned long private); |
95a402c3 | 12 | |
19fc7bed JK |
13 | struct migration_target_control; |
14 | ||
78bd5209 RA |
15 | /* |
16 | * Return values from addresss_space_operations.migratepage(): | |
17 | * - negative errno on page migration failure; | |
18 | * - zero on page migration success; | |
19 | */ | |
20 | #define MIGRATEPAGE_SUCCESS 0 | |
d6d86c0a | 21 | |
7b2a2d4a MG |
22 | enum migrate_reason { |
23 | MR_COMPACTION, | |
24 | MR_MEMORY_FAILURE, | |
25 | MR_MEMORY_HOTPLUG, | |
26 | MR_SYSCALL, /* also applies to cpusets */ | |
27 | MR_MEMPOLICY_MBIND, | |
7039e1db | 28 | MR_NUMA_MISPLACED, |
31025351 | 29 | MR_CONTIG_RANGE, |
7cd12b4a | 30 | MR_TYPES |
7b2a2d4a | 31 | }; |
78bd5209 | 32 | |
7cd12b4a | 33 | /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ |
9a2f45ff | 34 | extern const char *migrate_reason_names[MR_TYPES]; |
7cd12b4a | 35 | |
906e0be1 | 36 | #ifdef CONFIG_MIGRATION |
64cdd548 | 37 | |
5733c7d1 | 38 | extern void putback_movable_pages(struct list_head *l); |
9927e388 PJ |
39 | extern int migrate_page(struct address_space *mapping, |
40 | struct page *newpage, struct page *page, | |
41 | enum migrate_mode mode); | |
68711a74 | 42 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, |
9c620e2b | 43 | unsigned long private, enum migrate_mode mode, int reason); |
19fc7bed | 44 | extern struct page *alloc_migration_target(struct page *page, unsigned long private); |
9e5bcd61 | 45 | extern int isolate_movable_page(struct page *page, isolate_mode_t mode); |
bda807d4 | 46 | extern void putback_movable_page(struct page *page); |
95a402c3 | 47 | |
236c32eb YS |
48 | extern void migrate_prep(void); |
49 | extern void migrate_prep_local(void); | |
2916ecc0 | 50 | extern void migrate_page_states(struct page *newpage, struct page *page); |
290408d4 NH |
51 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
52 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | |
53 | struct page *newpage, struct page *page); | |
36bc08cc | 54 | extern int migrate_page_move_mapping(struct address_space *mapping, |
37109694 | 55 | struct page *newpage, struct page *page, int extra_count); |
b20a3503 | 56 | #else |
64cdd548 | 57 | |
5733c7d1 | 58 | static inline void putback_movable_pages(struct list_head *l) {} |
68711a74 DR |
59 | static inline int migrate_pages(struct list_head *l, new_page_t new, |
60 | free_page_t free, unsigned long private, enum migrate_mode mode, | |
61 | int reason) | |
9c620e2b | 62 | { return -ENOSYS; } |
19fc7bed JK |
63 | static inline struct page *alloc_migration_target(struct page *page, |
64 | unsigned long private) | |
b4b38223 | 65 | { return NULL; } |
cbae0170 YX |
66 | static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) |
67 | { return -EBUSY; } | |
9bf9e89c | 68 | |
b20a3503 | 69 | static inline int migrate_prep(void) { return -ENOSYS; } |
748446bb | 70 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
b20a3503 | 71 | |
2916ecc0 JG |
72 | static inline void migrate_page_states(struct page *newpage, struct page *page) |
73 | { | |
74 | } | |
75 | ||
290408d4 NH |
76 | static inline void migrate_page_copy(struct page *newpage, |
77 | struct page *page) {} | |
78 | ||
6f39ce05 | 79 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
290408d4 NH |
80 | struct page *newpage, struct page *page) |
81 | { | |
82 | return -ENOSYS; | |
83 | } | |
84 | ||
b20a3503 | 85 | #endif /* CONFIG_MIGRATION */ |
7039e1db | 86 | |
dd4123f3 MK |
87 | #ifdef CONFIG_COMPACTION |
88 | extern int PageMovable(struct page *page); | |
89 | extern void __SetPageMovable(struct page *page, struct address_space *mapping); | |
90 | extern void __ClearPageMovable(struct page *page); | |
91 | #else | |
92 | static inline int PageMovable(struct page *page) { return 0; }; | |
93 | static inline void __SetPageMovable(struct page *page, | |
94 | struct address_space *mapping) | |
95 | { | |
96 | } | |
97 | static inline void __ClearPageMovable(struct page *page) | |
98 | { | |
99 | } | |
100 | #endif | |
101 | ||
7039e1db | 102 | #ifdef CONFIG_NUMA_BALANCING |
de466bd6 | 103 | extern bool pmd_trans_migrating(pmd_t pmd); |
1bc115d8 MG |
104 | extern int migrate_misplaced_page(struct page *page, |
105 | struct vm_area_struct *vma, int node); | |
7039e1db | 106 | #else |
de466bd6 MG |
107 | static inline bool pmd_trans_migrating(pmd_t pmd) |
108 | { | |
109 | return false; | |
110 | } | |
1bc115d8 MG |
111 | static inline int migrate_misplaced_page(struct page *page, |
112 | struct vm_area_struct *vma, int node) | |
7039e1db PZ |
113 | { |
114 | return -EAGAIN; /* can't migrate now */ | |
115 | } | |
220018d3 | 116 | #endif /* CONFIG_NUMA_BALANCING */ |
b32967ff | 117 | |
220018d3 MG |
118 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
119 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |
120 | struct vm_area_struct *vma, | |
121 | pmd_t *pmd, pmd_t entry, | |
122 | unsigned long address, | |
123 | struct page *page, int node); | |
124 | #else | |
b32967ff MG |
125 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
126 | struct vm_area_struct *vma, | |
127 | pmd_t *pmd, pmd_t entry, | |
128 | unsigned long address, | |
129 | struct page *page, int node) | |
130 | { | |
131 | return -EAGAIN; | |
132 | } | |
220018d3 | 133 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
7039e1db | 134 | |
8763cb45 JG |
135 | |
136 | #ifdef CONFIG_MIGRATION | |
137 | ||
a5430dda JG |
138 | /* |
139 | * Watch out for PAE architecture, which has an unsigned long, and might not | |
140 | * have enough bits to store all physical address and flags. So far we have | |
141 | * enough room for all our flags. | |
142 | */ | |
8763cb45 JG |
143 | #define MIGRATE_PFN_VALID (1UL << 0) |
144 | #define MIGRATE_PFN_MIGRATE (1UL << 1) | |
145 | #define MIGRATE_PFN_LOCKED (1UL << 2) | |
146 | #define MIGRATE_PFN_WRITE (1UL << 3) | |
a5430dda | 147 | #define MIGRATE_PFN_SHIFT 6 |
8763cb45 JG |
148 | |
149 | static inline struct page *migrate_pfn_to_page(unsigned long mpfn) | |
150 | { | |
151 | if (!(mpfn & MIGRATE_PFN_VALID)) | |
152 | return NULL; | |
153 | return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); | |
154 | } | |
155 | ||
156 | static inline unsigned long migrate_pfn(unsigned long pfn) | |
157 | { | |
158 | return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; | |
159 | } | |
160 | ||
5143192c RC |
161 | enum migrate_vma_direction { |
162 | MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, | |
163 | MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, | |
164 | }; | |
165 | ||
a7d1f22b CH |
166 | struct migrate_vma { |
167 | struct vm_area_struct *vma; | |
168 | /* | |
169 | * Both src and dst array must be big enough for | |
170 | * (end - start) >> PAGE_SHIFT entries. | |
171 | * | |
172 | * The src array must not be modified by the caller after | |
173 | * migrate_vma_setup(), and must not change the dst array after | |
174 | * migrate_vma_pages() returns. | |
175 | */ | |
176 | unsigned long *dst; | |
177 | unsigned long *src; | |
178 | unsigned long cpages; | |
179 | unsigned long npages; | |
180 | unsigned long start; | |
181 | unsigned long end; | |
800bb1c8 CH |
182 | |
183 | /* | |
184 | * Set to the owner value also stored in page->pgmap->owner for | |
5143192c RC |
185 | * migrating out of device private memory. The flags also need to |
186 | * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. | |
998427b3 RC |
187 | * The caller should always set this field when using mmu notifier |
188 | * callbacks to avoid device MMU invalidations for device private | |
189 | * pages that are not being migrated. | |
800bb1c8 | 190 | */ |
5143192c RC |
191 | void *pgmap_owner; |
192 | unsigned long flags; | |
8763cb45 JG |
193 | }; |
194 | ||
a7d1f22b CH |
195 | int migrate_vma_setup(struct migrate_vma *args); |
196 | void migrate_vma_pages(struct migrate_vma *migrate); | |
197 | void migrate_vma_finalize(struct migrate_vma *migrate); | |
8763cb45 JG |
198 | |
199 | #endif /* CONFIG_MIGRATION */ | |
200 | ||
b20a3503 | 201 | #endif /* _LINUX_MIGRATE_H */ |