]>
Commit | Line | Data |
---|---|---|
eb59db53 DDAG |
1 | /* |
2 | * Postcopy migration for RAM | |
3 | * | |
4 | * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Dave Gilbert <dgilbert@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * Postcopy is a migration technique where the execution flips from the | |
16 | * source to the destination before all the data has been copied. | |
17 | */ | |
18 | ||
1393a485 | 19 | #include "qemu/osdep.h" |
51180423 | 20 | #include "exec/target_page.h" |
6666c96a | 21 | #include "migration.h" |
08a0aee1 | 22 | #include "qemu-file.h" |
20a519a0 | 23 | #include "savevm.h" |
be07b0ac | 24 | #include "postcopy-ram.h" |
7b1e1a22 | 25 | #include "ram.h" |
eb59db53 | 26 | #include "sysemu/sysemu.h" |
371ff5a3 | 27 | #include "sysemu/balloon.h" |
eb59db53 DDAG |
28 | #include "qemu/error-report.h" |
29 | #include "trace.h" | |
30 | ||
e0b266f0 DDAG |
31 | /* Arbitrary limit on size of each discard command, |
32 | * keeps them around ~200 bytes | |
33 | */ | |
34 | #define MAX_DISCARDS_PER_COMMAND 12 | |
35 | ||
36 | struct PostcopyDiscardState { | |
37 | const char *ramblock_name; | |
e0b266f0 DDAG |
38 | uint16_t cur_entry; |
39 | /* | |
40 | * Start and length of a discard range (bytes) | |
41 | */ | |
42 | uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; | |
43 | uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; | |
44 | unsigned int nsentwords; | |
45 | unsigned int nsentcmds; | |
46 | }; | |
47 | ||
eb59db53 DDAG |
48 | /* Postcopy needs to detect accesses to pages that haven't yet been copied |
49 | * across, and efficiently map new pages in, the techniques for doing this | |
50 | * are target OS specific. | |
51 | */ | |
52 | #if defined(__linux__) | |
53 | ||
c4faeed2 | 54 | #include <poll.h> |
eb59db53 DDAG |
55 | #include <sys/ioctl.h> |
56 | #include <sys/syscall.h> | |
eb59db53 DDAG |
57 | #include <asm/types.h> /* for __u64 */ |
58 | #endif | |
59 | ||
d8b9d771 MF |
60 | #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) |
61 | #include <sys/eventfd.h> | |
eb59db53 DDAG |
62 | #include <linux/userfaultfd.h> |
63 | ||
01a87f0b AP |
64 | typedef struct PostcopyBlocktimeContext { |
65 | /* time when page fault initiated per vCPU */ | |
66 | int64_t *page_fault_vcpu_time; | |
67 | /* page address per vCPU */ | |
68 | uintptr_t *vcpu_addr; | |
69 | int64_t total_blocktime; | |
70 | /* blocktime per vCPU */ | |
71 | int64_t *vcpu_blocktime; | |
72 | /* point in time when last page fault was initiated */ | |
73 | int64_t last_begin; | |
74 | /* number of vCPU are suspended */ | |
75 | int smp_cpus_down; | |
76 | ||
77 | /* | |
78 | * Handler for exit event, necessary for | |
79 | * releasing whole blocktime_ctx | |
80 | */ | |
81 | Notifier exit_notifier; | |
82 | } PostcopyBlocktimeContext; | |
83 | ||
84 | static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) | |
85 | { | |
86 | g_free(ctx->page_fault_vcpu_time); | |
87 | g_free(ctx->vcpu_addr); | |
88 | g_free(ctx->vcpu_blocktime); | |
89 | g_free(ctx); | |
90 | } | |
91 | ||
92 | static void migration_exit_cb(Notifier *n, void *data) | |
93 | { | |
94 | PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, | |
95 | exit_notifier); | |
96 | destroy_blocktime_context(ctx); | |
97 | } | |
98 | ||
99 | static struct PostcopyBlocktimeContext *blocktime_context_new(void) | |
100 | { | |
101 | PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); | |
102 | ctx->page_fault_vcpu_time = g_new0(int64_t, smp_cpus); | |
103 | ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); | |
104 | ctx->vcpu_blocktime = g_new0(int64_t, smp_cpus); | |
105 | ||
106 | ctx->exit_notifier.notify = migration_exit_cb; | |
107 | qemu_add_exit_notifier(&ctx->exit_notifier); | |
108 | return ctx; | |
109 | } | |
54ae0886 | 110 | |
ca6011c2 AP |
111 | static int64List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) |
112 | { | |
113 | int64List *list = NULL, *entry = NULL; | |
114 | int i; | |
115 | ||
116 | for (i = smp_cpus - 1; i >= 0; i--) { | |
117 | entry = g_new0(int64List, 1); | |
118 | entry->value = ctx->vcpu_blocktime[i]; | |
119 | entry->next = list; | |
120 | list = entry; | |
121 | } | |
122 | ||
123 | return list; | |
124 | } | |
125 | ||
126 | /* | |
127 | * This function just populates MigrationInfo from postcopy's | |
128 | * blocktime context. It will not populate MigrationInfo, | |
129 | * unless postcopy-blocktime capability was set. | |
130 | * | |
131 | * @info: pointer to MigrationInfo to populate | |
132 | */ | |
133 | void fill_destination_postcopy_migration_info(MigrationInfo *info) | |
134 | { | |
135 | MigrationIncomingState *mis = migration_incoming_get_current(); | |
136 | PostcopyBlocktimeContext *bc = mis->blocktime_ctx; | |
137 | ||
138 | if (!bc) { | |
139 | return; | |
140 | } | |
141 | ||
142 | info->has_postcopy_blocktime = true; | |
143 | info->postcopy_blocktime = bc->total_blocktime; | |
144 | info->has_postcopy_vcpu_blocktime = true; | |
145 | info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); | |
146 | } | |
147 | ||
148 | static uint64_t get_postcopy_total_blocktime(void) | |
149 | { | |
150 | MigrationIncomingState *mis = migration_incoming_get_current(); | |
151 | PostcopyBlocktimeContext *bc = mis->blocktime_ctx; | |
152 | ||
153 | if (!bc) { | |
154 | return 0; | |
155 | } | |
156 | ||
157 | return bc->total_blocktime; | |
158 | } | |
159 | ||
54ae0886 AP |
160 | /** |
161 | * receive_ufd_features: check userfault fd features, to request only supported | |
162 | * features in the future. | |
163 | * | |
164 | * Returns: true on success | |
165 | * | |
166 | * __NR_userfaultfd - should be checked before | |
167 | * @features: out parameter will contain uffdio_api.features provided by kernel | |
168 | * in case of success | |
169 | */ | |
170 | static bool receive_ufd_features(uint64_t *features) | |
eb59db53 | 171 | { |
54ae0886 AP |
172 | struct uffdio_api api_struct = {0}; |
173 | int ufd; | |
174 | bool ret = true; | |
175 | ||
176 | /* if we are here __NR_userfaultfd should exists */ | |
177 | ufd = syscall(__NR_userfaultfd, O_CLOEXEC); | |
178 | if (ufd == -1) { | |
179 | error_report("%s: syscall __NR_userfaultfd failed: %s", __func__, | |
180 | strerror(errno)); | |
181 | return false; | |
182 | } | |
eb59db53 | 183 | |
54ae0886 | 184 | /* ask features */ |
eb59db53 DDAG |
185 | api_struct.api = UFFD_API; |
186 | api_struct.features = 0; | |
187 | if (ioctl(ufd, UFFDIO_API, &api_struct)) { | |
5553499f | 188 | error_report("%s: UFFDIO_API failed: %s", __func__, |
eb59db53 | 189 | strerror(errno)); |
54ae0886 AP |
190 | ret = false; |
191 | goto release_ufd; | |
192 | } | |
193 | ||
194 | *features = api_struct.features; | |
195 | ||
196 | release_ufd: | |
197 | close(ufd); | |
198 | return ret; | |
199 | } | |
200 | ||
201 | /** | |
202 | * request_ufd_features: this function should be called only once on a newly | |
203 | * opened ufd, subsequent calls will lead to error. | |
204 | * | |
205 | * Returns: true on succes | |
206 | * | |
207 | * @ufd: fd obtained from userfaultfd syscall | |
208 | * @features: bit mask see UFFD_API_FEATURES | |
209 | */ | |
210 | static bool request_ufd_features(int ufd, uint64_t features) | |
211 | { | |
212 | struct uffdio_api api_struct = {0}; | |
213 | uint64_t ioctl_mask; | |
214 | ||
215 | api_struct.api = UFFD_API; | |
216 | api_struct.features = features; | |
217 | if (ioctl(ufd, UFFDIO_API, &api_struct)) { | |
218 | error_report("%s failed: UFFDIO_API failed: %s", __func__, | |
219 | strerror(errno)); | |
eb59db53 DDAG |
220 | return false; |
221 | } | |
222 | ||
223 | ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | | |
224 | (__u64)1 << _UFFDIO_UNREGISTER; | |
225 | if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { | |
226 | error_report("Missing userfault features: %" PRIx64, | |
227 | (uint64_t)(~api_struct.ioctls & ioctl_mask)); | |
228 | return false; | |
229 | } | |
230 | ||
54ae0886 AP |
231 | return true; |
232 | } | |
233 | ||
234 | static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) | |
235 | { | |
236 | uint64_t asked_features = 0; | |
237 | static uint64_t supported_features; | |
238 | ||
239 | /* | |
240 | * it's not possible to | |
241 | * request UFFD_API twice per one fd | |
242 | * userfault fd features is persistent | |
243 | */ | |
244 | if (!supported_features) { | |
245 | if (!receive_ufd_features(&supported_features)) { | |
246 | error_report("%s failed", __func__); | |
247 | return false; | |
248 | } | |
249 | } | |
250 | ||
01a87f0b AP |
251 | #ifdef UFFD_FEATURE_THREAD_ID |
252 | if (migrate_postcopy_blocktime() && mis && | |
253 | UFFD_FEATURE_THREAD_ID & supported_features) { | |
254 | /* kernel supports that feature */ | |
255 | /* don't create blocktime_context if it exists */ | |
256 | if (!mis->blocktime_ctx) { | |
257 | mis->blocktime_ctx = blocktime_context_new(); | |
258 | } | |
259 | ||
260 | asked_features |= UFFD_FEATURE_THREAD_ID; | |
261 | } | |
262 | #endif | |
263 | ||
54ae0886 AP |
264 | /* |
265 | * request features, even if asked_features is 0, due to | |
266 | * kernel expects UFFD_API before UFFDIO_REGISTER, per | |
267 | * userfault file descriptor | |
268 | */ | |
269 | if (!request_ufd_features(ufd, asked_features)) { | |
270 | error_report("%s failed: features %" PRIu64, __func__, | |
271 | asked_features); | |
272 | return false; | |
273 | } | |
274 | ||
7e8cafb7 DDAG |
275 | if (getpagesize() != ram_pagesize_summary()) { |
276 | bool have_hp = false; | |
277 | /* We've got a huge page */ | |
278 | #ifdef UFFD_FEATURE_MISSING_HUGETLBFS | |
54ae0886 | 279 | have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; |
7e8cafb7 DDAG |
280 | #endif |
281 | if (!have_hp) { | |
282 | error_report("Userfault on this host does not support huge pages"); | |
283 | return false; | |
284 | } | |
285 | } | |
eb59db53 DDAG |
286 | return true; |
287 | } | |
288 | ||
8679638b DDAG |
289 | /* Callback from postcopy_ram_supported_by_host block iterator. |
290 | */ | |
5d214a92 | 291 | static int test_ramblock_postcopiable(const char *block_name, void *host_addr, |
8679638b DDAG |
292 | ram_addr_t offset, ram_addr_t length, void *opaque) |
293 | { | |
5d214a92 DDAG |
294 | RAMBlock *rb = qemu_ram_block_by_name(block_name); |
295 | size_t pagesize = qemu_ram_pagesize(rb); | |
296 | ||
297 | if (qemu_ram_is_shared(rb)) { | |
8679638b DDAG |
298 | error_report("Postcopy on shared RAM (%s) is not yet supported", |
299 | block_name); | |
300 | return 1; | |
301 | } | |
5d214a92 DDAG |
302 | |
303 | if (length % pagesize) { | |
304 | error_report("Postcopy requires RAM blocks to be a page size multiple," | |
305 | " block %s is 0x" RAM_ADDR_FMT " bytes with a " | |
306 | "page size of 0x%zx", block_name, length, pagesize); | |
307 | return 1; | |
308 | } | |
8679638b DDAG |
309 | return 0; |
310 | } | |
311 | ||
58b7c17e DDAG |
312 | /* |
313 | * Note: This has the side effect of munlock'ing all of RAM, that's | |
314 | * normally fine since if the postcopy succeeds it gets turned back on at the | |
315 | * end. | |
316 | */ | |
d7651f15 | 317 | bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) |
eb59db53 DDAG |
318 | { |
319 | long pagesize = getpagesize(); | |
320 | int ufd = -1; | |
321 | bool ret = false; /* Error unless we change it */ | |
322 | void *testarea = NULL; | |
323 | struct uffdio_register reg_struct; | |
324 | struct uffdio_range range_struct; | |
325 | uint64_t feature_mask; | |
326 | ||
20afaed9 | 327 | if (qemu_target_page_size() > pagesize) { |
eb59db53 DDAG |
328 | error_report("Target page size bigger than host page size"); |
329 | goto out; | |
330 | } | |
331 | ||
332 | ufd = syscall(__NR_userfaultfd, O_CLOEXEC); | |
333 | if (ufd == -1) { | |
334 | error_report("%s: userfaultfd not available: %s", __func__, | |
335 | strerror(errno)); | |
336 | goto out; | |
337 | } | |
338 | ||
339 | /* Version and features check */ | |
54ae0886 | 340 | if (!ufd_check_and_apply(ufd, mis)) { |
eb59db53 DDAG |
341 | goto out; |
342 | } | |
343 | ||
8679638b | 344 | /* We don't support postcopy with shared RAM yet */ |
5d214a92 | 345 | if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) { |
8679638b DDAG |
346 | goto out; |
347 | } | |
348 | ||
58b7c17e DDAG |
349 | /* |
350 | * userfault and mlock don't go together; we'll put it back later if | |
351 | * it was enabled. | |
352 | */ | |
353 | if (munlockall()) { | |
354 | error_report("%s: munlockall: %s", __func__, strerror(errno)); | |
355 | return -1; | |
356 | } | |
357 | ||
eb59db53 DDAG |
358 | /* |
359 | * We need to check that the ops we need are supported on anon memory | |
360 | * To do that we need to register a chunk and see the flags that | |
361 | * are returned. | |
362 | */ | |
363 | testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | | |
364 | MAP_ANONYMOUS, -1, 0); | |
365 | if (testarea == MAP_FAILED) { | |
366 | error_report("%s: Failed to map test area: %s", __func__, | |
367 | strerror(errno)); | |
368 | goto out; | |
369 | } | |
370 | g_assert(((size_t)testarea & (pagesize-1)) == 0); | |
371 | ||
372 | reg_struct.range.start = (uintptr_t)testarea; | |
373 | reg_struct.range.len = pagesize; | |
374 | reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; | |
375 | ||
376 | if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { | |
377 | error_report("%s userfault register: %s", __func__, strerror(errno)); | |
378 | goto out; | |
379 | } | |
380 | ||
381 | range_struct.start = (uintptr_t)testarea; | |
382 | range_struct.len = pagesize; | |
383 | if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { | |
384 | error_report("%s userfault unregister: %s", __func__, strerror(errno)); | |
385 | goto out; | |
386 | } | |
387 | ||
388 | feature_mask = (__u64)1 << _UFFDIO_WAKE | | |
389 | (__u64)1 << _UFFDIO_COPY | | |
390 | (__u64)1 << _UFFDIO_ZEROPAGE; | |
391 | if ((reg_struct.ioctls & feature_mask) != feature_mask) { | |
392 | error_report("Missing userfault map features: %" PRIx64, | |
393 | (uint64_t)(~reg_struct.ioctls & feature_mask)); | |
394 | goto out; | |
395 | } | |
396 | ||
397 | /* Success! */ | |
398 | ret = true; | |
399 | out: | |
400 | if (testarea) { | |
401 | munmap(testarea, pagesize); | |
402 | } | |
403 | if (ufd != -1) { | |
404 | close(ufd); | |
405 | } | |
406 | return ret; | |
407 | } | |
408 | ||
1caddf8a DDAG |
409 | /* |
410 | * Setup an area of RAM so that it *can* be used for postcopy later; this | |
411 | * must be done right at the start prior to pre-copy. | |
412 | * opaque should be the MIS. | |
413 | */ | |
414 | static int init_range(const char *block_name, void *host_addr, | |
415 | ram_addr_t offset, ram_addr_t length, void *opaque) | |
416 | { | |
1caddf8a DDAG |
417 | trace_postcopy_init_range(block_name, host_addr, offset, length); |
418 | ||
419 | /* | |
420 | * We need the whole of RAM to be truly empty for postcopy, so things | |
421 | * like ROMs and any data tables built during init must be zero'd | |
422 | * - we're going to get the copy from the source anyway. | |
423 | * (Precopy will just overwrite this data, so doesn't need the discard) | |
424 | */ | |
aaa2064c | 425 | if (ram_discard_range(block_name, 0, length)) { |
1caddf8a DDAG |
426 | return -1; |
427 | } | |
428 | ||
429 | return 0; | |
430 | } | |
431 | ||
432 | /* | |
433 | * At the end of migration, undo the effects of init_range | |
434 | * opaque should be the MIS. | |
435 | */ | |
436 | static int cleanup_range(const char *block_name, void *host_addr, | |
437 | ram_addr_t offset, ram_addr_t length, void *opaque) | |
438 | { | |
439 | MigrationIncomingState *mis = opaque; | |
440 | struct uffdio_range range_struct; | |
441 | trace_postcopy_cleanup_range(block_name, host_addr, offset, length); | |
442 | ||
443 | /* | |
444 | * We turned off hugepage for the precopy stage with postcopy enabled | |
445 | * we can turn it back on now. | |
446 | */ | |
1d741439 | 447 | qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); |
1caddf8a DDAG |
448 | |
449 | /* | |
450 | * We can also turn off userfault now since we should have all the | |
451 | * pages. It can be useful to leave it on to debug postcopy | |
452 | * if you're not sure it's always getting every page. | |
453 | */ | |
454 | range_struct.start = (uintptr_t)host_addr; | |
455 | range_struct.len = length; | |
456 | ||
457 | if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { | |
458 | error_report("%s: userfault unregister %s", __func__, strerror(errno)); | |
459 | ||
460 | return -1; | |
461 | } | |
462 | ||
463 | return 0; | |
464 | } | |
465 | ||
466 | /* | |
467 | * Initialise postcopy-ram, setting the RAM to a state where we can go into | |
468 | * postcopy later; must be called prior to any precopy. | |
469 | * called from arch_init's similarly named ram_postcopy_incoming_init | |
470 | */ | |
471 | int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) | |
472 | { | |
aaa2064c | 473 | if (qemu_ram_foreach_block(init_range, NULL)) { |
1caddf8a DDAG |
474 | return -1; |
475 | } | |
476 | ||
477 | return 0; | |
478 | } | |
479 | ||
480 | /* | |
481 | * At the end of a migration where postcopy_ram_incoming_init was called. | |
482 | */ | |
483 | int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) | |
484 | { | |
c4faeed2 DDAG |
485 | trace_postcopy_ram_incoming_cleanup_entry(); |
486 | ||
487 | if (mis->have_fault_thread) { | |
488 | uint64_t tmp64; | |
489 | ||
490 | if (qemu_ram_foreach_block(cleanup_range, mis)) { | |
491 | return -1; | |
492 | } | |
493 | /* | |
494 | * Tell the fault_thread to exit, it's an eventfd that should | |
495 | * currently be at 0, we're going to increment it to 1 | |
496 | */ | |
497 | tmp64 = 1; | |
498 | if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) { | |
499 | trace_postcopy_ram_incoming_cleanup_join(); | |
500 | qemu_thread_join(&mis->fault_thread); | |
501 | } else { | |
502 | /* Not much we can do here, but may as well report it */ | |
503 | error_report("%s: incrementing userfault_quit_fd: %s", __func__, | |
504 | strerror(errno)); | |
505 | } | |
506 | trace_postcopy_ram_incoming_cleanup_closeuf(); | |
507 | close(mis->userfault_fd); | |
508 | close(mis->userfault_quit_fd); | |
509 | mis->have_fault_thread = false; | |
1caddf8a DDAG |
510 | } |
511 | ||
371ff5a3 DDAG |
512 | qemu_balloon_inhibit(false); |
513 | ||
58b7c17e DDAG |
514 | if (enable_mlock) { |
515 | if (os_mlock() < 0) { | |
516 | error_report("mlock: %s", strerror(errno)); | |
517 | /* | |
518 | * It doesn't feel right to fail at this point, we have a valid | |
519 | * VM state. | |
520 | */ | |
521 | } | |
522 | } | |
523 | ||
c4faeed2 | 524 | postcopy_state_set(POSTCOPY_INCOMING_END); |
c4faeed2 | 525 | |
696ed9a9 | 526 | if (mis->postcopy_tmp_page) { |
df9ff5e1 | 527 | munmap(mis->postcopy_tmp_page, mis->largest_page_size); |
696ed9a9 DDAG |
528 | mis->postcopy_tmp_page = NULL; |
529 | } | |
41d84210 DDAG |
530 | if (mis->postcopy_tmp_zero_page) { |
531 | munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); | |
532 | mis->postcopy_tmp_zero_page = NULL; | |
533 | } | |
ca6011c2 AP |
534 | trace_postcopy_ram_incoming_cleanup_blocktime( |
535 | get_postcopy_total_blocktime()); | |
536 | ||
c4faeed2 | 537 | trace_postcopy_ram_incoming_cleanup_exit(); |
1caddf8a DDAG |
538 | return 0; |
539 | } | |
540 | ||
f9527107 DDAG |
541 | /* |
542 | * Disable huge pages on an area | |
543 | */ | |
544 | static int nhp_range(const char *block_name, void *host_addr, | |
545 | ram_addr_t offset, ram_addr_t length, void *opaque) | |
546 | { | |
547 | trace_postcopy_nhp_range(block_name, host_addr, offset, length); | |
548 | ||
549 | /* | |
550 | * Before we do discards we need to ensure those discards really | |
551 | * do delete areas of the page, even if THP thinks a hugepage would | |
552 | * be a good idea, so force hugepages off. | |
553 | */ | |
1d741439 | 554 | qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); |
f9527107 DDAG |
555 | |
556 | return 0; | |
557 | } | |
558 | ||
559 | /* | |
560 | * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard | |
561 | * however leaving it until after precopy means that most of the precopy | |
562 | * data is still THPd | |
563 | */ | |
564 | int postcopy_ram_prepare_discard(MigrationIncomingState *mis) | |
565 | { | |
566 | if (qemu_ram_foreach_block(nhp_range, mis)) { | |
567 | return -1; | |
568 | } | |
569 | ||
570 | postcopy_state_set(POSTCOPY_INCOMING_DISCARD); | |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
f0a227ad DDAG |
575 | /* |
576 | * Mark the given area of RAM as requiring notification to unwritten areas | |
577 | * Used as a callback on qemu_ram_foreach_block. | |
578 | * host_addr: Base of area to mark | |
579 | * offset: Offset in the whole ram arena | |
580 | * length: Length of the section | |
581 | * opaque: MigrationIncomingState pointer | |
582 | * Returns 0 on success | |
583 | */ | |
584 | static int ram_block_enable_notify(const char *block_name, void *host_addr, | |
585 | ram_addr_t offset, ram_addr_t length, | |
586 | void *opaque) | |
587 | { | |
588 | MigrationIncomingState *mis = opaque; | |
589 | struct uffdio_register reg_struct; | |
590 | ||
591 | reg_struct.range.start = (uintptr_t)host_addr; | |
592 | reg_struct.range.len = length; | |
593 | reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; | |
594 | ||
595 | /* Now tell our userfault_fd that it's responsible for this area */ | |
596 | if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { | |
597 | error_report("%s userfault register: %s", __func__, strerror(errno)); | |
598 | return -1; | |
599 | } | |
665414ad DDAG |
600 | if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { |
601 | error_report("%s userfault: Region doesn't support COPY", __func__); | |
602 | return -1; | |
603 | } | |
f0a227ad DDAG |
604 | |
605 | return 0; | |
606 | } | |
607 | ||
3be98be4 AP |
608 | static int get_mem_fault_cpu_index(uint32_t pid) |
609 | { | |
610 | CPUState *cpu_iter; | |
611 | ||
612 | CPU_FOREACH(cpu_iter) { | |
613 | if (cpu_iter->thread_id == pid) { | |
614 | trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); | |
615 | return cpu_iter->cpu_index; | |
616 | } | |
617 | } | |
618 | trace_get_mem_fault_cpu_index(-1, pid); | |
619 | return -1; | |
620 | } | |
621 | ||
622 | /* | |
623 | * This function is being called when pagefault occurs. It | |
624 | * tracks down vCPU blocking time. | |
625 | * | |
626 | * @addr: faulted host virtual address | |
627 | * @ptid: faulted process thread id | |
628 | * @rb: ramblock appropriate to addr | |
629 | */ | |
630 | static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, | |
631 | RAMBlock *rb) | |
632 | { | |
633 | int cpu, already_received; | |
634 | MigrationIncomingState *mis = migration_incoming_get_current(); | |
635 | PostcopyBlocktimeContext *dc = mis->blocktime_ctx; | |
636 | int64_t now_ms; | |
637 | ||
638 | if (!dc || ptid == 0) { | |
639 | return; | |
640 | } | |
641 | cpu = get_mem_fault_cpu_index(ptid); | |
642 | if (cpu < 0) { | |
643 | return; | |
644 | } | |
645 | ||
646 | now_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
647 | if (dc->vcpu_addr[cpu] == 0) { | |
648 | atomic_inc(&dc->smp_cpus_down); | |
649 | } | |
650 | ||
651 | atomic_xchg__nocheck(&dc->last_begin, now_ms); | |
652 | atomic_xchg__nocheck(&dc->page_fault_vcpu_time[cpu], now_ms); | |
653 | atomic_xchg__nocheck(&dc->vcpu_addr[cpu], addr); | |
654 | ||
655 | /* check it here, not at the begining of the function, | |
656 | * due to, check could accur early than bitmap_set in | |
657 | * qemu_ufd_copy_ioctl */ | |
658 | already_received = ramblock_recv_bitmap_test(rb, (void *)addr); | |
659 | if (already_received) { | |
660 | atomic_xchg__nocheck(&dc->vcpu_addr[cpu], 0); | |
661 | atomic_xchg__nocheck(&dc->page_fault_vcpu_time[cpu], 0); | |
662 | atomic_dec(&dc->smp_cpus_down); | |
663 | } | |
664 | trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], | |
665 | cpu, already_received); | |
666 | } | |
667 | ||
668 | /* | |
669 | * This function just provide calculated blocktime per cpu and trace it. | |
670 | * Total blocktime is calculated in mark_postcopy_blocktime_end. | |
671 | * | |
672 | * | |
673 | * Assume we have 3 CPU | |
674 | * | |
675 | * S1 E1 S1 E1 | |
676 | * -----***********------------xxx***************------------------------> CPU1 | |
677 | * | |
678 | * S2 E2 | |
679 | * ------------****************xxx---------------------------------------> CPU2 | |
680 | * | |
681 | * S3 E3 | |
682 | * ------------------------****xxx********-------------------------------> CPU3 | |
683 | * | |
684 | * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 | |
685 | * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 | |
686 | * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - | |
687 | * it's a part of total blocktime. | |
688 | * S1 - here is last_begin | |
689 | * Legend of the picture is following: | |
690 | * * - means blocktime per vCPU | |
691 | * x - means overlapped blocktime (total blocktime) | |
692 | * | |
693 | * @addr: host virtual address | |
694 | */ | |
695 | static void mark_postcopy_blocktime_end(uintptr_t addr) | |
696 | { | |
697 | MigrationIncomingState *mis = migration_incoming_get_current(); | |
698 | PostcopyBlocktimeContext *dc = mis->blocktime_ctx; | |
699 | int i, affected_cpu = 0; | |
700 | int64_t now_ms; | |
701 | bool vcpu_total_blocktime = false; | |
702 | int64_t read_vcpu_time; | |
703 | ||
704 | if (!dc) { | |
705 | return; | |
706 | } | |
707 | ||
708 | now_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
709 | ||
710 | /* lookup cpu, to clear it, | |
711 | * that algorithm looks straighforward, but it's not | |
712 | * optimal, more optimal algorithm is keeping tree or hash | |
713 | * where key is address value is a list of */ | |
714 | for (i = 0; i < smp_cpus; i++) { | |
715 | uint64_t vcpu_blocktime = 0; | |
716 | ||
717 | read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); | |
718 | if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || | |
719 | read_vcpu_time == 0) { | |
720 | continue; | |
721 | } | |
722 | atomic_xchg__nocheck(&dc->vcpu_addr[i], 0); | |
723 | vcpu_blocktime = now_ms - read_vcpu_time; | |
724 | affected_cpu += 1; | |
725 | /* we need to know is that mark_postcopy_end was due to | |
726 | * faulted page, another possible case it's prefetched | |
727 | * page and in that case we shouldn't be here */ | |
728 | if (!vcpu_total_blocktime && | |
729 | atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { | |
730 | vcpu_total_blocktime = true; | |
731 | } | |
732 | /* continue cycle, due to one page could affect several vCPUs */ | |
733 | dc->vcpu_blocktime[i] += vcpu_blocktime; | |
734 | } | |
735 | ||
736 | atomic_sub(&dc->smp_cpus_down, affected_cpu); | |
737 | if (vcpu_total_blocktime) { | |
738 | dc->total_blocktime += now_ms - atomic_fetch_add(&dc->last_begin, 0); | |
739 | } | |
740 | trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, | |
741 | affected_cpu); | |
742 | } | |
743 | ||
f0a227ad DDAG |
744 | /* |
745 | * Handle faults detected by the USERFAULT markings | |
746 | */ | |
747 | static void *postcopy_ram_fault_thread(void *opaque) | |
748 | { | |
749 | MigrationIncomingState *mis = opaque; | |
c4faeed2 DDAG |
750 | struct uffd_msg msg; |
751 | int ret; | |
c4faeed2 DDAG |
752 | RAMBlock *rb = NULL; |
753 | RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */ | |
f0a227ad | 754 | |
c4faeed2 | 755 | trace_postcopy_ram_fault_thread_entry(); |
f0a227ad | 756 | qemu_sem_post(&mis->fault_thread_sem); |
f0a227ad | 757 | |
c4faeed2 DDAG |
758 | while (true) { |
759 | ram_addr_t rb_offset; | |
c4faeed2 DDAG |
760 | struct pollfd pfd[2]; |
761 | ||
762 | /* | |
763 | * We're mainly waiting for the kernel to give us a faulting HVA, | |
764 | * however we can be told to quit via userfault_quit_fd which is | |
765 | * an eventfd | |
766 | */ | |
767 | pfd[0].fd = mis->userfault_fd; | |
768 | pfd[0].events = POLLIN; | |
769 | pfd[0].revents = 0; | |
770 | pfd[1].fd = mis->userfault_quit_fd; | |
771 | pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ | |
772 | pfd[1].revents = 0; | |
773 | ||
774 | if (poll(pfd, 2, -1 /* Wait forever */) == -1) { | |
775 | error_report("%s: userfault poll: %s", __func__, strerror(errno)); | |
776 | break; | |
777 | } | |
778 | ||
779 | if (pfd[1].revents) { | |
780 | trace_postcopy_ram_fault_thread_quit(); | |
781 | break; | |
782 | } | |
783 | ||
784 | ret = read(mis->userfault_fd, &msg, sizeof(msg)); | |
785 | if (ret != sizeof(msg)) { | |
786 | if (errno == EAGAIN) { | |
787 | /* | |
788 | * if a wake up happens on the other thread just after | |
789 | * the poll, there is nothing to read. | |
790 | */ | |
791 | continue; | |
792 | } | |
793 | if (ret < 0) { | |
794 | error_report("%s: Failed to read full userfault message: %s", | |
795 | __func__, strerror(errno)); | |
796 | break; | |
797 | } else { | |
798 | error_report("%s: Read %d bytes from userfaultfd expected %zd", | |
799 | __func__, ret, sizeof(msg)); | |
800 | break; /* Lost alignment, don't know what we'd read next */ | |
801 | } | |
802 | } | |
803 | if (msg.event != UFFD_EVENT_PAGEFAULT) { | |
804 | error_report("%s: Read unexpected event %ud from userfaultfd", | |
805 | __func__, msg.event); | |
806 | continue; /* It's not a page fault, shouldn't happen */ | |
807 | } | |
808 | ||
809 | rb = qemu_ram_block_from_host( | |
810 | (void *)(uintptr_t)msg.arg.pagefault.address, | |
f615f396 | 811 | true, &rb_offset); |
c4faeed2 DDAG |
812 | if (!rb) { |
813 | error_report("postcopy_ram_fault_thread: Fault outside guest: %" | |
814 | PRIx64, (uint64_t)msg.arg.pagefault.address); | |
815 | break; | |
816 | } | |
817 | ||
332847f0 | 818 | rb_offset &= ~(qemu_ram_pagesize(rb) - 1); |
c4faeed2 DDAG |
819 | trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, |
820 | qemu_ram_get_idstr(rb), | |
3be98be4 AP |
821 | rb_offset, |
822 | msg.arg.pagefault.feat.ptid); | |
c4faeed2 | 823 | |
3be98be4 AP |
824 | mark_postcopy_blocktime_begin((uintptr_t)(msg.arg.pagefault.address), |
825 | msg.arg.pagefault.feat.ptid, rb); | |
c4faeed2 DDAG |
826 | /* |
827 | * Send the request to the source - we want to request one | |
828 | * of our host page sizes (which is >= TPS) | |
829 | */ | |
830 | if (rb != last_rb) { | |
831 | last_rb = rb; | |
832 | migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), | |
332847f0 | 833 | rb_offset, qemu_ram_pagesize(rb)); |
c4faeed2 DDAG |
834 | } else { |
835 | /* Save some space */ | |
836 | migrate_send_rp_req_pages(mis, NULL, | |
332847f0 | 837 | rb_offset, qemu_ram_pagesize(rb)); |
c4faeed2 DDAG |
838 | } |
839 | } | |
840 | trace_postcopy_ram_fault_thread_exit(); | |
f0a227ad DDAG |
841 | return NULL; |
842 | } | |
843 | ||
844 | int postcopy_ram_enable_notify(MigrationIncomingState *mis) | |
845 | { | |
c4faeed2 DDAG |
846 | /* Open the fd for the kernel to give us userfaults */ |
847 | mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); | |
848 | if (mis->userfault_fd == -1) { | |
849 | error_report("%s: Failed to open userfault fd: %s", __func__, | |
850 | strerror(errno)); | |
851 | return -1; | |
852 | } | |
853 | ||
854 | /* | |
855 | * Although the host check already tested the API, we need to | |
856 | * do the check again as an ABI handshake on the new fd. | |
857 | */ | |
54ae0886 | 858 | if (!ufd_check_and_apply(mis->userfault_fd, mis)) { |
c4faeed2 DDAG |
859 | return -1; |
860 | } | |
861 | ||
862 | /* Now an eventfd we use to tell the fault-thread to quit */ | |
863 | mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC); | |
864 | if (mis->userfault_quit_fd == -1) { | |
865 | error_report("%s: Opening userfault_quit_fd: %s", __func__, | |
866 | strerror(errno)); | |
867 | close(mis->userfault_fd); | |
868 | return -1; | |
869 | } | |
870 | ||
f0a227ad DDAG |
871 | qemu_sem_init(&mis->fault_thread_sem, 0); |
872 | qemu_thread_create(&mis->fault_thread, "postcopy/fault", | |
873 | postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); | |
874 | qemu_sem_wait(&mis->fault_thread_sem); | |
875 | qemu_sem_destroy(&mis->fault_thread_sem); | |
c4faeed2 | 876 | mis->have_fault_thread = true; |
f0a227ad DDAG |
877 | |
878 | /* Mark so that we get notified of accesses to unwritten areas */ | |
879 | if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { | |
880 | return -1; | |
881 | } | |
882 | ||
371ff5a3 DDAG |
883 | /* |
884 | * Ballooning can mark pages as absent while we're postcopying | |
885 | * that would cause false userfaults. | |
886 | */ | |
887 | qemu_balloon_inhibit(true); | |
888 | ||
c4faeed2 DDAG |
889 | trace_postcopy_ram_enable_notify(); |
890 | ||
f0a227ad DDAG |
891 | return 0; |
892 | } | |
893 | ||
727b9d7e | 894 | static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr, |
f9494614 | 895 | void *from_addr, uint64_t pagesize, RAMBlock *rb) |
727b9d7e | 896 | { |
f9494614 | 897 | int ret; |
727b9d7e AP |
898 | if (from_addr) { |
899 | struct uffdio_copy copy_struct; | |
900 | copy_struct.dst = (uint64_t)(uintptr_t)host_addr; | |
901 | copy_struct.src = (uint64_t)(uintptr_t)from_addr; | |
902 | copy_struct.len = pagesize; | |
903 | copy_struct.mode = 0; | |
f9494614 | 904 | ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); |
727b9d7e AP |
905 | } else { |
906 | struct uffdio_zeropage zero_struct; | |
907 | zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; | |
908 | zero_struct.range.len = pagesize; | |
909 | zero_struct.mode = 0; | |
f9494614 AP |
910 | ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); |
911 | } | |
912 | if (!ret) { | |
913 | ramblock_recv_bitmap_set_range(rb, host_addr, | |
914 | pagesize / qemu_target_page_size()); | |
3be98be4 AP |
915 | mark_postcopy_blocktime_end((uintptr_t)host_addr); |
916 | ||
727b9d7e | 917 | } |
f9494614 | 918 | return ret; |
727b9d7e AP |
919 | } |
920 | ||
696ed9a9 DDAG |
921 | /* |
922 | * Place a host page (from) at (host) atomically | |
923 | * returns 0 on success | |
924 | */ | |
df9ff5e1 | 925 | int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, |
8be4620b | 926 | RAMBlock *rb) |
696ed9a9 | 927 | { |
8be4620b | 928 | size_t pagesize = qemu_ram_pagesize(rb); |
696ed9a9 | 929 | |
696ed9a9 DDAG |
930 | /* copy also acks to the kernel waking the stalled thread up |
931 | * TODO: We can inhibit that ack and only do it if it was requested | |
932 | * which would be slightly cheaper, but we'd have to be careful | |
933 | * of the order of updating our page state. | |
934 | */ | |
f9494614 | 935 | if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) { |
696ed9a9 | 936 | int e = errno; |
df9ff5e1 DDAG |
937 | error_report("%s: %s copy host: %p from: %p (size: %zd)", |
938 | __func__, strerror(e), host, from, pagesize); | |
696ed9a9 DDAG |
939 | |
940 | return -e; | |
941 | } | |
942 | ||
943 | trace_postcopy_place_page(host); | |
944 | return 0; | |
945 | } | |
946 | ||
947 | /* | |
948 | * Place a zero page at (host) atomically | |
949 | * returns 0 on success | |
950 | */ | |
df9ff5e1 | 951 | int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, |
8be4620b | 952 | RAMBlock *rb) |
696ed9a9 | 953 | { |
df9ff5e1 | 954 | trace_postcopy_place_page_zero(host); |
696ed9a9 | 955 | |
8be4620b | 956 | if (qemu_ram_pagesize(rb) == getpagesize()) { |
f9494614 AP |
957 | if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, getpagesize(), |
958 | rb)) { | |
df9ff5e1 DDAG |
959 | int e = errno; |
960 | error_report("%s: %s zero host: %p", | |
961 | __func__, strerror(e), host); | |
696ed9a9 | 962 | |
df9ff5e1 DDAG |
963 | return -e; |
964 | } | |
965 | } else { | |
41d84210 DDAG |
966 | /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */ |
967 | if (!mis->postcopy_tmp_zero_page) { | |
968 | mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, | |
969 | PROT_READ | PROT_WRITE, | |
970 | MAP_PRIVATE | MAP_ANONYMOUS, | |
971 | -1, 0); | |
972 | if (mis->postcopy_tmp_zero_page == MAP_FAILED) { | |
973 | int e = errno; | |
974 | mis->postcopy_tmp_zero_page = NULL; | |
975 | error_report("%s: %s mapping large zero page", | |
976 | __func__, strerror(e)); | |
977 | return -e; | |
978 | } | |
979 | memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); | |
980 | } | |
981 | return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, | |
8be4620b | 982 | rb); |
696ed9a9 DDAG |
983 | } |
984 | ||
696ed9a9 DDAG |
985 | return 0; |
986 | } | |
987 | ||
988 | /* | |
989 | * Returns a target page of memory that can be mapped at a later point in time | |
990 | * using postcopy_place_page | |
991 | * The same address is used repeatedly, postcopy_place_page just takes the | |
992 | * backing page away. | |
993 | * Returns: Pointer to allocated page | |
994 | * | |
995 | */ | |
996 | void *postcopy_get_tmp_page(MigrationIncomingState *mis) | |
997 | { | |
998 | if (!mis->postcopy_tmp_page) { | |
df9ff5e1 | 999 | mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, |
696ed9a9 DDAG |
1000 | PROT_READ | PROT_WRITE, MAP_PRIVATE | |
1001 | MAP_ANONYMOUS, -1, 0); | |
0e8b3cdf EY |
1002 | if (mis->postcopy_tmp_page == MAP_FAILED) { |
1003 | mis->postcopy_tmp_page = NULL; | |
696ed9a9 DDAG |
1004 | error_report("%s: %s", __func__, strerror(errno)); |
1005 | return NULL; | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | return mis->postcopy_tmp_page; | |
1010 | } | |
1011 | ||
eb59db53 DDAG |
1012 | #else |
1013 | /* No target OS support, stubs just fail */ | |
ca6011c2 AP |
1014 | void fill_destination_postcopy_migration_info(MigrationInfo *info) |
1015 | { | |
1016 | } | |
1017 | ||
d7651f15 | 1018 | bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) |
eb59db53 DDAG |
1019 | { |
1020 | error_report("%s: No OS support", __func__); | |
1021 | return false; | |
1022 | } | |
1023 | ||
1caddf8a DDAG |
1024 | int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) |
1025 | { | |
1026 | error_report("postcopy_ram_incoming_init: No OS support"); | |
1027 | return -1; | |
1028 | } | |
1029 | ||
1030 | int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) | |
1031 | { | |
1032 | assert(0); | |
1033 | return -1; | |
1034 | } | |
1035 | ||
f9527107 DDAG |
1036 | int postcopy_ram_prepare_discard(MigrationIncomingState *mis) |
1037 | { | |
1038 | assert(0); | |
1039 | return -1; | |
1040 | } | |
1041 | ||
f0a227ad DDAG |
1042 | int postcopy_ram_enable_notify(MigrationIncomingState *mis) |
1043 | { | |
1044 | assert(0); | |
1045 | return -1; | |
1046 | } | |
696ed9a9 | 1047 | |
df9ff5e1 | 1048 | int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, |
8be4620b | 1049 | RAMBlock *rb) |
696ed9a9 DDAG |
1050 | { |
1051 | assert(0); | |
1052 | return -1; | |
1053 | } | |
1054 | ||
df9ff5e1 | 1055 | int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, |
8be4620b | 1056 | RAMBlock *rb) |
696ed9a9 DDAG |
1057 | { |
1058 | assert(0); | |
1059 | return -1; | |
1060 | } | |
1061 | ||
1062 | void *postcopy_get_tmp_page(MigrationIncomingState *mis) | |
1063 | { | |
1064 | assert(0); | |
1065 | return NULL; | |
1066 | } | |
1067 | ||
eb59db53 DDAG |
1068 | #endif |
1069 | ||
e0b266f0 DDAG |
1070 | /* ------------------------------------------------------------------------- */ |
1071 | ||
1072 | /** | |
1073 | * postcopy_discard_send_init: Called at the start of each RAMBlock before | |
1074 | * asking to discard individual ranges. | |
1075 | * | |
1076 | * @ms: The current migration state. | |
1077 | * @offset: the bitmap offset of the named RAMBlock in the migration | |
1078 | * bitmap. | |
1079 | * @name: RAMBlock that discards will operate on. | |
1080 | * | |
1081 | * returns: a new PDS. | |
1082 | */ | |
1083 | PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, | |
e0b266f0 DDAG |
1084 | const char *name) |
1085 | { | |
1086 | PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); | |
1087 | ||
1088 | if (res) { | |
1089 | res->ramblock_name = name; | |
e0b266f0 DDAG |
1090 | } |
1091 | ||
1092 | return res; | |
1093 | } | |
1094 | ||
1095 | /** | |
1096 | * postcopy_discard_send_range: Called by the bitmap code for each chunk to | |
1097 | * discard. May send a discard message, may just leave it queued to | |
1098 | * be sent later. | |
1099 | * | |
1100 | * @ms: Current migration state. | |
1101 | * @pds: Structure initialised by postcopy_discard_send_init(). | |
1102 | * @start,@length: a range of pages in the migration bitmap in the | |
1103 | * RAM block passed to postcopy_discard_send_init() (length=1 is one page) | |
1104 | */ | |
1105 | void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, | |
1106 | unsigned long start, unsigned long length) | |
1107 | { | |
20afaed9 | 1108 | size_t tp_size = qemu_target_page_size(); |
e0b266f0 | 1109 | /* Convert to byte offsets within the RAM block */ |
6b6712ef | 1110 | pds->start_list[pds->cur_entry] = start * tp_size; |
20afaed9 | 1111 | pds->length_list[pds->cur_entry] = length * tp_size; |
e0b266f0 DDAG |
1112 | trace_postcopy_discard_send_range(pds->ramblock_name, start, length); |
1113 | pds->cur_entry++; | |
1114 | pds->nsentwords++; | |
1115 | ||
1116 | if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { | |
1117 | /* Full set, ship it! */ | |
89a02a9f HZ |
1118 | qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, |
1119 | pds->ramblock_name, | |
e0b266f0 DDAG |
1120 | pds->cur_entry, |
1121 | pds->start_list, | |
1122 | pds->length_list); | |
1123 | pds->nsentcmds++; | |
1124 | pds->cur_entry = 0; | |
1125 | } | |
1126 | } | |
1127 | ||
1128 | /** | |
1129 | * postcopy_discard_send_finish: Called at the end of each RAMBlock by the | |
1130 | * bitmap code. Sends any outstanding discard messages, frees the PDS | |
1131 | * | |
1132 | * @ms: Current migration state. | |
1133 | * @pds: Structure initialised by postcopy_discard_send_init(). | |
1134 | */ | |
1135 | void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) | |
1136 | { | |
1137 | /* Anything unsent? */ | |
1138 | if (pds->cur_entry) { | |
89a02a9f HZ |
1139 | qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, |
1140 | pds->ramblock_name, | |
e0b266f0 DDAG |
1141 | pds->cur_entry, |
1142 | pds->start_list, | |
1143 | pds->length_list); | |
1144 | pds->nsentcmds++; | |
1145 | } | |
1146 | ||
1147 | trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, | |
1148 | pds->nsentcmds); | |
1149 | ||
1150 | g_free(pds); | |
1151 | } | |
bac3b212 JQ |
1152 | |
1153 | /* | |
1154 | * Current state of incoming postcopy; note this is not part of | |
1155 | * MigrationIncomingState since it's state is used during cleanup | |
1156 | * at the end as MIS is being freed. | |
1157 | */ | |
1158 | static PostcopyState incoming_postcopy_state; | |
1159 | ||
1160 | PostcopyState postcopy_state_get(void) | |
1161 | { | |
1162 | return atomic_mb_read(&incoming_postcopy_state); | |
1163 | } | |
1164 | ||
1165 | /* Set the state and return the old state */ | |
1166 | PostcopyState postcopy_state_set(PostcopyState new_state) | |
1167 | { | |
1168 | return atomic_xchg(&incoming_postcopy_state, new_state); | |
1169 | } |