]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
b20a3503 CL |
2 | #ifndef _LINUX_MIGRATE_H |
3 | #define _LINUX_MIGRATE_H | |
4 | ||
b20a3503 | 5 | #include <linux/mm.h> |
906e0be1 | 6 | #include <linux/mempolicy.h> |
6536e312 | 7 | #include <linux/migrate_mode.h> |
8b913238 | 8 | #include <linux/hugetlb.h> |
b20a3503 | 9 | |
68711a74 DR |
10 | typedef struct page *new_page_t(struct page *page, unsigned long private, |
11 | int **reason); | |
12 | typedef void free_page_t(struct page *page, unsigned long private); | |
95a402c3 | 13 | |
78bd5209 RA |
14 | /* |
15 | * Return values from addresss_space_operations.migratepage(): | |
16 | * - negative errno on page migration failure; | |
17 | * - zero on page migration success; | |
18 | */ | |
19 | #define MIGRATEPAGE_SUCCESS 0 | |
d6d86c0a | 20 | |
7b2a2d4a MG |
21 | enum migrate_reason { |
22 | MR_COMPACTION, | |
23 | MR_MEMORY_FAILURE, | |
24 | MR_MEMORY_HOTPLUG, | |
25 | MR_SYSCALL, /* also applies to cpusets */ | |
26 | MR_MEMPOLICY_MBIND, | |
7039e1db | 27 | MR_NUMA_MISPLACED, |
7cd12b4a VB |
28 | MR_CMA, |
29 | MR_TYPES | |
7b2a2d4a | 30 | }; |
78bd5209 | 31 | |
7cd12b4a VB |
32 | /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ |
33 | extern char *migrate_reason_names[MR_TYPES]; | |
34 | ||
8b913238 MH |
35 | static inline struct page *new_page_nodemask(struct page *page, |
36 | int preferred_nid, nodemask_t *nodemask) | |
37 | { | |
0f556856 | 38 | gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; |
8135d892 NH |
39 | unsigned int order = 0; |
40 | struct page *new_page = NULL; | |
8b913238 MH |
41 | |
42 | if (PageHuge(page)) | |
43 | return alloc_huge_page_nodemask(page_hstate(compound_head(page)), | |
3e59fcb0 | 44 | preferred_nid, nodemask); |
8b913238 | 45 | |
8135d892 NH |
46 | if (thp_migration_supported() && PageTransHuge(page)) { |
47 | order = HPAGE_PMD_ORDER; | |
48 | gfp_mask |= GFP_TRANSHUGE; | |
49 | } | |
50 | ||
8b913238 MH |
51 | if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) |
52 | gfp_mask |= __GFP_HIGHMEM; | |
53 | ||
8135d892 NH |
54 | new_page = __alloc_pages_nodemask(gfp_mask, order, |
55 | preferred_nid, nodemask); | |
56 | ||
40a899ed | 57 | if (new_page && PageTransHuge(new_page)) |
8135d892 NH |
58 | prep_transhuge_page(new_page); |
59 | ||
60 | return new_page; | |
8b913238 MH |
61 | } |
62 | ||
906e0be1 | 63 | #ifdef CONFIG_MIGRATION |
64cdd548 | 64 | |
5733c7d1 | 65 | extern void putback_movable_pages(struct list_head *l); |
9927e388 PJ |
66 | extern int migrate_page(struct address_space *mapping, |
67 | struct page *newpage, struct page *page, | |
68 | enum migrate_mode mode); | |
68711a74 | 69 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, |
9c620e2b | 70 | unsigned long private, enum migrate_mode mode, int reason); |
9e5bcd61 | 71 | extern int isolate_movable_page(struct page *page, isolate_mode_t mode); |
bda807d4 | 72 | extern void putback_movable_page(struct page *page); |
95a402c3 | 73 | |
b20a3503 | 74 | extern int migrate_prep(void); |
748446bb | 75 | extern int migrate_prep_local(void); |
2916ecc0 | 76 | extern void migrate_page_states(struct page *newpage, struct page *page); |
290408d4 NH |
77 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
78 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | |
79 | struct page *newpage, struct page *page); | |
36bc08cc GZ |
80 | extern int migrate_page_move_mapping(struct address_space *mapping, |
81 | struct page *newpage, struct page *page, | |
8e321fef BL |
82 | struct buffer_head *head, enum migrate_mode mode, |
83 | int extra_count); | |
b20a3503 | 84 | #else |
64cdd548 | 85 | |
5733c7d1 | 86 | static inline void putback_movable_pages(struct list_head *l) {} |
68711a74 DR |
87 | static inline int migrate_pages(struct list_head *l, new_page_t new, |
88 | free_page_t free, unsigned long private, enum migrate_mode mode, | |
89 | int reason) | |
9c620e2b | 90 | { return -ENOSYS; } |
cbae0170 YX |
91 | static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) |
92 | { return -EBUSY; } | |
9bf9e89c | 93 | |
b20a3503 | 94 | static inline int migrate_prep(void) { return -ENOSYS; } |
748446bb | 95 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
b20a3503 | 96 | |
2916ecc0 JG |
97 | static inline void migrate_page_states(struct page *newpage, struct page *page) |
98 | { | |
99 | } | |
100 | ||
290408d4 NH |
101 | static inline void migrate_page_copy(struct page *newpage, |
102 | struct page *page) {} | |
103 | ||
6f39ce05 | 104 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
290408d4 NH |
105 | struct page *newpage, struct page *page) |
106 | { | |
107 | return -ENOSYS; | |
108 | } | |
109 | ||
b20a3503 | 110 | #endif /* CONFIG_MIGRATION */ |
7039e1db | 111 | |
dd4123f3 MK |
112 | #ifdef CONFIG_COMPACTION |
113 | extern int PageMovable(struct page *page); | |
114 | extern void __SetPageMovable(struct page *page, struct address_space *mapping); | |
115 | extern void __ClearPageMovable(struct page *page); | |
116 | #else | |
117 | static inline int PageMovable(struct page *page) { return 0; }; | |
118 | static inline void __SetPageMovable(struct page *page, | |
119 | struct address_space *mapping) | |
120 | { | |
121 | } | |
122 | static inline void __ClearPageMovable(struct page *page) | |
123 | { | |
124 | } | |
125 | #endif | |
126 | ||
7039e1db | 127 | #ifdef CONFIG_NUMA_BALANCING |
de466bd6 | 128 | extern bool pmd_trans_migrating(pmd_t pmd); |
1bc115d8 MG |
129 | extern int migrate_misplaced_page(struct page *page, |
130 | struct vm_area_struct *vma, int node); | |
7039e1db | 131 | #else |
de466bd6 MG |
132 | static inline bool pmd_trans_migrating(pmd_t pmd) |
133 | { | |
134 | return false; | |
135 | } | |
1bc115d8 MG |
136 | static inline int migrate_misplaced_page(struct page *page, |
137 | struct vm_area_struct *vma, int node) | |
7039e1db PZ |
138 | { |
139 | return -EAGAIN; /* can't migrate now */ | |
140 | } | |
220018d3 | 141 | #endif /* CONFIG_NUMA_BALANCING */ |
b32967ff | 142 | |
220018d3 MG |
143 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
144 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |
145 | struct vm_area_struct *vma, | |
146 | pmd_t *pmd, pmd_t entry, | |
147 | unsigned long address, | |
148 | struct page *page, int node); | |
149 | #else | |
b32967ff MG |
150 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
151 | struct vm_area_struct *vma, | |
152 | pmd_t *pmd, pmd_t entry, | |
153 | unsigned long address, | |
154 | struct page *page, int node) | |
155 | { | |
156 | return -EAGAIN; | |
157 | } | |
220018d3 | 158 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
7039e1db | 159 | |
8763cb45 JG |
160 | |
161 | #ifdef CONFIG_MIGRATION | |
162 | ||
a5430dda JG |
163 | /* |
164 | * Watch out for PAE architecture, which has an unsigned long, and might not | |
165 | * have enough bits to store all physical address and flags. So far we have | |
166 | * enough room for all our flags. | |
167 | */ | |
8763cb45 JG |
168 | #define MIGRATE_PFN_VALID (1UL << 0) |
169 | #define MIGRATE_PFN_MIGRATE (1UL << 1) | |
170 | #define MIGRATE_PFN_LOCKED (1UL << 2) | |
171 | #define MIGRATE_PFN_WRITE (1UL << 3) | |
a5430dda JG |
172 | #define MIGRATE_PFN_DEVICE (1UL << 4) |
173 | #define MIGRATE_PFN_ERROR (1UL << 5) | |
174 | #define MIGRATE_PFN_SHIFT 6 | |
8763cb45 JG |
175 | |
176 | static inline struct page *migrate_pfn_to_page(unsigned long mpfn) | |
177 | { | |
178 | if (!(mpfn & MIGRATE_PFN_VALID)) | |
179 | return NULL; | |
180 | return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); | |
181 | } | |
182 | ||
183 | static inline unsigned long migrate_pfn(unsigned long pfn) | |
184 | { | |
185 | return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; | |
186 | } | |
187 | ||
188 | /* | |
189 | * struct migrate_vma_ops - migrate operation callback | |
190 | * | |
191 | * @alloc_and_copy: alloc destination memory and copy source memory to it | |
192 | * @finalize_and_map: allow caller to map the successfully migrated pages | |
193 | * | |
194 | * | |
195 | * The alloc_and_copy() callback happens once all source pages have been locked, | |
196 | * unmapped and checked (checked whether pinned or not). All pages that can be | |
197 | * migrated will have an entry in the src array set with the pfn value of the | |
198 | * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other | |
199 | * flags might be set but should be ignored by the callback). | |
200 | * | |
201 | * The alloc_and_copy() callback can then allocate destination memory and copy | |
202 | * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and | |
203 | * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the | |
204 | * callback must update each corresponding entry in the dst array with the pfn | |
205 | * value of the destination page and with the MIGRATE_PFN_VALID and | |
206 | * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages | |
207 | * locked, via lock_page()). | |
208 | * | |
209 | * At this point the alloc_and_copy() callback is done and returns. | |
210 | * | |
211 | * Note that the callback does not have to migrate all the pages that are | |
212 | * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration | |
213 | * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also | |
214 | * set in the src array entry). If the device driver cannot migrate a device | |
215 | * page back to system memory, then it must set the corresponding dst array | |
216 | * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to | |
217 | * access any of the virtual addresses originally backed by this page. Because | |
218 | * a SIGBUS is such a severe result for the userspace process, the device | |
219 | * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an | |
220 | * unrecoverable state. | |
221 | * | |
8315ada7 JG |
222 | * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we |
223 | * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus | |
224 | * allowing device driver to allocate device memory for those unback virtual | |
225 | * address. For this the device driver simply have to allocate device memory | |
226 | * and properly set the destination entry like for regular migration. Note that | |
227 | * this can still fails and thus inside the device driver must check if the | |
228 | * migration was successful for those entry inside the finalize_and_map() | |
229 | * callback just like for regular migration. | |
230 | * | |
8763cb45 JG |
231 | * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES |
232 | * OR BAD THINGS WILL HAPPEN ! | |
233 | * | |
234 | * | |
235 | * The finalize_and_map() callback happens after struct page migration from | |
236 | * source to destination (destination struct pages are the struct pages for the | |
237 | * memory allocated by the alloc_and_copy() callback). Migration can fail, and | |
238 | * thus the finalize_and_map() allows the driver to inspect which pages were | |
239 | * successfully migrated, and which were not. Successfully migrated pages will | |
240 | * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. | |
241 | * | |
242 | * It is safe to update device page table from within the finalize_and_map() | |
243 | * callback because both destination and source page are still locked, and the | |
244 | * mmap_sem is held in read mode (hence no one can unmap the range being | |
245 | * migrated). | |
246 | * | |
247 | * Once callback is done cleaning up things and updating its page table (if it | |
248 | * chose to do so, this is not an obligation) then it returns. At this point, | |
249 | * the HMM core will finish up the final steps, and the migration is complete. | |
250 | * | |
251 | * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY | |
252 | * ENTRIES OR BAD THINGS WILL HAPPEN ! | |
253 | */ | |
254 | struct migrate_vma_ops { | |
255 | void (*alloc_and_copy)(struct vm_area_struct *vma, | |
256 | const unsigned long *src, | |
257 | unsigned long *dst, | |
258 | unsigned long start, | |
259 | unsigned long end, | |
260 | void *private); | |
261 | void (*finalize_and_map)(struct vm_area_struct *vma, | |
262 | const unsigned long *src, | |
263 | const unsigned long *dst, | |
264 | unsigned long start, | |
265 | unsigned long end, | |
266 | void *private); | |
267 | }; | |
268 | ||
6b368cd4 | 269 | #if defined(CONFIG_MIGRATE_VMA_HELPER) |
8763cb45 JG |
270 | int migrate_vma(const struct migrate_vma_ops *ops, |
271 | struct vm_area_struct *vma, | |
272 | unsigned long start, | |
273 | unsigned long end, | |
274 | unsigned long *src, | |
275 | unsigned long *dst, | |
276 | void *private); | |
6b368cd4 JG |
277 | #else |
278 | static inline int migrate_vma(const struct migrate_vma_ops *ops, | |
279 | struct vm_area_struct *vma, | |
280 | unsigned long start, | |
281 | unsigned long end, | |
282 | unsigned long *src, | |
283 | unsigned long *dst, | |
284 | void *private) | |
285 | { | |
286 | return -EINVAL; | |
287 | } | |
288 | #endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */ | |
8763cb45 JG |
289 | |
290 | #endif /* CONFIG_MIGRATION */ | |
291 | ||
b20a3503 | 292 | #endif /* _LINUX_MIGRATE_H */ |