]> git.proxmox.com Git - mirror_qemu.git/blame - migration/postcopy-ram.c
linux-user/hppa: Increase guest stack size to 80MB for hppa target
[mirror_qemu.git] / migration / postcopy-ram.c
CommitLineData
eb59db53
DDAG
1/*
2 * Postcopy migration for RAM
3 *
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Dave Gilbert <dgilbert@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14/*
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
17 */
18
1393a485 19#include "qemu/osdep.h"
898ba906 20#include "qemu/rcu.h"
b85ea5fa 21#include "qemu/madvise.h"
51180423 22#include "exec/target_page.h"
6666c96a 23#include "migration.h"
08a0aee1 24#include "qemu-file.h"
20a519a0 25#include "savevm.h"
be07b0ac 26#include "postcopy-ram.h"
7b1e1a22 27#include "ram.h"
1693c64c
DDAG
28#include "qapi/error.h"
29#include "qemu/notify.h"
d4842052 30#include "qemu/rcu.h"
eb59db53
DDAG
31#include "sysemu/sysemu.h"
32#include "qemu/error-report.h"
33#include "trace.h"
5cc8767d 34#include "hw/boards.h"
898ba906 35#include "exec/ramblock.h"
36f62f11
PX
36#include "socket.h"
37#include "qemu-file.h"
38#include "yank_functions.h"
f0afaf6c 39#include "tls.h"
eb59db53 40
e0b266f0
DDAG
41/* Arbitrary limit on size of each discard command,
42 * keeps them around ~200 bytes
43 */
44#define MAX_DISCARDS_PER_COMMAND 12
45
46struct PostcopyDiscardState {
47 const char *ramblock_name;
e0b266f0
DDAG
48 uint16_t cur_entry;
49 /*
50 * Start and length of a discard range (bytes)
51 */
52 uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
53 uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
54 unsigned int nsentwords;
55 unsigned int nsentcmds;
56};
57
1693c64c
DDAG
58static NotifierWithReturnList postcopy_notifier_list;
59
60void postcopy_infrastructure_init(void)
61{
62 notifier_with_return_list_init(&postcopy_notifier_list);
63}
64
65void postcopy_add_notifier(NotifierWithReturn *nn)
66{
67 notifier_with_return_list_add(&postcopy_notifier_list, nn);
68}
69
70void postcopy_remove_notifier(NotifierWithReturn *n)
71{
72 notifier_with_return_remove(n);
73}
74
75int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp)
76{
77 struct PostcopyNotifyData pnd;
78 pnd.reason = reason;
79 pnd.errp = errp;
80
81 return notifier_with_return_list_notify(&postcopy_notifier_list,
82 &pnd);
83}
84
095c12a4
PX
85/*
86 * NOTE: this routine is not thread safe, we can't call it concurrently. But it
87 * should be good enough for migration's purposes.
88 */
89void postcopy_thread_create(MigrationIncomingState *mis,
90 QemuThread *thread, const char *name,
91 void *(*fn)(void *), int joinable)
92{
93 qemu_sem_init(&mis->thread_sync_sem, 0);
94 qemu_thread_create(thread, name, fn, mis, joinable);
95 qemu_sem_wait(&mis->thread_sync_sem);
96 qemu_sem_destroy(&mis->thread_sync_sem);
97}
98
eb59db53
DDAG
99/* Postcopy needs to detect accesses to pages that haven't yet been copied
100 * across, and efficiently map new pages in, the techniques for doing this
101 * are target OS specific.
102 */
103#if defined(__linux__)
104
c4faeed2 105#include <poll.h>
eb59db53
DDAG
106#include <sys/ioctl.h>
107#include <sys/syscall.h>
eb59db53
DDAG
108#include <asm/types.h> /* for __u64 */
109#endif
110
d8b9d771
MF
111#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
112#include <sys/eventfd.h>
eb59db53
DDAG
113#include <linux/userfaultfd.h>
114
2a4c42f1
AP
115typedef struct PostcopyBlocktimeContext {
116 /* time when page fault initiated per vCPU */
117 uint32_t *page_fault_vcpu_time;
118 /* page address per vCPU */
119 uintptr_t *vcpu_addr;
120 uint32_t total_blocktime;
121 /* blocktime per vCPU */
122 uint32_t *vcpu_blocktime;
123 /* point in time when last page fault was initiated */
124 uint32_t last_begin;
125 /* number of vCPU are suspended */
126 int smp_cpus_down;
127 uint64_t start_time;
128
129 /*
130 * Handler for exit event, necessary for
131 * releasing whole blocktime_ctx
132 */
133 Notifier exit_notifier;
134} PostcopyBlocktimeContext;
135
136static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
137{
138 g_free(ctx->page_fault_vcpu_time);
139 g_free(ctx->vcpu_addr);
140 g_free(ctx->vcpu_blocktime);
141 g_free(ctx);
142}
143
144static void migration_exit_cb(Notifier *n, void *data)
145{
146 PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext,
147 exit_notifier);
148 destroy_blocktime_context(ctx);
149}
150
151static struct PostcopyBlocktimeContext *blocktime_context_new(void)
152{
5cc8767d
LX
153 MachineState *ms = MACHINE(qdev_get_machine());
154 unsigned int smp_cpus = ms->smp.cpus;
2a4c42f1
AP
155 PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
156 ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
157 ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
158 ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
159
160 ctx->exit_notifier.notify = migration_exit_cb;
161 ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
162 qemu_add_exit_notifier(&ctx->exit_notifier);
163 return ctx;
164}
ca6011c2 165
65ace060
AP
166static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
167{
5cc8767d 168 MachineState *ms = MACHINE(qdev_get_machine());
54aa3de7 169 uint32List *list = NULL;
65ace060
AP
170 int i;
171
5cc8767d 172 for (i = ms->smp.cpus - 1; i >= 0; i--) {
54aa3de7 173 QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]);
65ace060
AP
174 }
175
176 return list;
177}
178
179/*
180 * This function just populates MigrationInfo from postcopy's
181 * blocktime context. It will not populate MigrationInfo,
182 * unless postcopy-blocktime capability was set.
183 *
184 * @info: pointer to MigrationInfo to populate
185 */
186void fill_destination_postcopy_migration_info(MigrationInfo *info)
187{
188 MigrationIncomingState *mis = migration_incoming_get_current();
189 PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
190
191 if (!bc) {
192 return;
193 }
194
195 info->has_postcopy_blocktime = true;
196 info->postcopy_blocktime = bc->total_blocktime;
197 info->has_postcopy_vcpu_blocktime = true;
198 info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
199}
200
201static uint32_t get_postcopy_total_blocktime(void)
202{
203 MigrationIncomingState *mis = migration_incoming_get_current();
204 PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
205
206 if (!bc) {
207 return 0;
208 }
209
210 return bc->total_blocktime;
211}
212
54ae0886
AP
213/**
214 * receive_ufd_features: check userfault fd features, to request only supported
215 * features in the future.
216 *
217 * Returns: true on success
218 *
219 * __NR_userfaultfd - should be checked before
220 * @features: out parameter will contain uffdio_api.features provided by kernel
221 * in case of success
222 */
223static bool receive_ufd_features(uint64_t *features)
eb59db53 224{
54ae0886
AP
225 struct uffdio_api api_struct = {0};
226 int ufd;
227 bool ret = true;
228
229 /* if we are here __NR_userfaultfd should exists */
230 ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
231 if (ufd == -1) {
232 error_report("%s: syscall __NR_userfaultfd failed: %s", __func__,
233 strerror(errno));
234 return false;
235 }
eb59db53 236
54ae0886 237 /* ask features */
eb59db53
DDAG
238 api_struct.api = UFFD_API;
239 api_struct.features = 0;
240 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
5553499f 241 error_report("%s: UFFDIO_API failed: %s", __func__,
eb59db53 242 strerror(errno));
54ae0886
AP
243 ret = false;
244 goto release_ufd;
245 }
246
247 *features = api_struct.features;
248
249release_ufd:
250 close(ufd);
251 return ret;
252}
253
254/**
255 * request_ufd_features: this function should be called only once on a newly
256 * opened ufd, subsequent calls will lead to error.
257 *
3a4452d8 258 * Returns: true on success
54ae0886
AP
259 *
260 * @ufd: fd obtained from userfaultfd syscall
261 * @features: bit mask see UFFD_API_FEATURES
262 */
263static bool request_ufd_features(int ufd, uint64_t features)
264{
265 struct uffdio_api api_struct = {0};
266 uint64_t ioctl_mask;
267
268 api_struct.api = UFFD_API;
269 api_struct.features = features;
270 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
271 error_report("%s failed: UFFDIO_API failed: %s", __func__,
272 strerror(errno));
eb59db53
DDAG
273 return false;
274 }
275
276 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
277 (__u64)1 << _UFFDIO_UNREGISTER;
278 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
279 error_report("Missing userfault features: %" PRIx64,
280 (uint64_t)(~api_struct.ioctls & ioctl_mask));
281 return false;
282 }
283
54ae0886
AP
284 return true;
285}
286
287static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
288{
289 uint64_t asked_features = 0;
290 static uint64_t supported_features;
291
292 /*
293 * it's not possible to
294 * request UFFD_API twice per one fd
295 * userfault fd features is persistent
296 */
297 if (!supported_features) {
298 if (!receive_ufd_features(&supported_features)) {
299 error_report("%s failed", __func__);
300 return false;
301 }
302 }
303
2a4c42f1 304#ifdef UFFD_FEATURE_THREAD_ID
2d1c37c6 305 if (UFFD_FEATURE_THREAD_ID & supported_features) {
2a4c42f1 306 asked_features |= UFFD_FEATURE_THREAD_ID;
2d1c37c6
PX
307 if (migrate_postcopy_blocktime()) {
308 if (!mis->blocktime_ctx) {
309 mis->blocktime_ctx = blocktime_context_new();
310 }
311 }
2a4c42f1
AP
312 }
313#endif
314
54ae0886
AP
315 /*
316 * request features, even if asked_features is 0, due to
317 * kernel expects UFFD_API before UFFDIO_REGISTER, per
318 * userfault file descriptor
319 */
320 if (!request_ufd_features(ufd, asked_features)) {
321 error_report("%s failed: features %" PRIu64, __func__,
322 asked_features);
323 return false;
324 }
325
8e3b0cbb 326 if (qemu_real_host_page_size() != ram_pagesize_summary()) {
7e8cafb7
DDAG
327 bool have_hp = false;
328 /* We've got a huge page */
329#ifdef UFFD_FEATURE_MISSING_HUGETLBFS
54ae0886 330 have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS;
7e8cafb7
DDAG
331#endif
332 if (!have_hp) {
333 error_report("Userfault on this host does not support huge pages");
334 return false;
335 }
336 }
eb59db53
DDAG
337 return true;
338}
339
8679638b
DDAG
340/* Callback from postcopy_ram_supported_by_host block iterator.
341 */
754cb9c0 342static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
8679638b 343{
754cb9c0
YK
344 const char *block_name = qemu_ram_get_idstr(rb);
345 ram_addr_t length = qemu_ram_get_used_length(rb);
5d214a92
DDAG
346 size_t pagesize = qemu_ram_pagesize(rb);
347
5d214a92
DDAG
348 if (length % pagesize) {
349 error_report("Postcopy requires RAM blocks to be a page size multiple,"
350 " block %s is 0x" RAM_ADDR_FMT " bytes with a "
351 "page size of 0x%zx", block_name, length, pagesize);
352 return 1;
353 }
8679638b
DDAG
354 return 0;
355}
356
58b7c17e
DDAG
357/*
358 * Note: This has the side effect of munlock'ing all of RAM, that's
359 * normally fine since if the postcopy succeeds it gets turned back on at the
360 * end.
361 */
d7651f15 362bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
eb59db53 363{
8e3b0cbb 364 long pagesize = qemu_real_host_page_size();
eb59db53
DDAG
365 int ufd = -1;
366 bool ret = false; /* Error unless we change it */
367 void *testarea = NULL;
368 struct uffdio_register reg_struct;
369 struct uffdio_range range_struct;
370 uint64_t feature_mask;
1693c64c 371 Error *local_err = NULL;
eb59db53 372
20afaed9 373 if (qemu_target_page_size() > pagesize) {
eb59db53
DDAG
374 error_report("Target page size bigger than host page size");
375 goto out;
376 }
377
378 ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
379 if (ufd == -1) {
380 error_report("%s: userfaultfd not available: %s", __func__,
381 strerror(errno));
382 goto out;
383 }
384
1693c64c
DDAG
385 /* Give devices a chance to object */
386 if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) {
387 error_report_err(local_err);
388 goto out;
389 }
390
eb59db53 391 /* Version and features check */
54ae0886 392 if (!ufd_check_and_apply(ufd, mis)) {
eb59db53
DDAG
393 goto out;
394 }
395
8679638b 396 /* We don't support postcopy with shared RAM yet */
fbd162e6 397 if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) {
8679638b
DDAG
398 goto out;
399 }
400
58b7c17e
DDAG
401 /*
402 * userfault and mlock don't go together; we'll put it back later if
403 * it was enabled.
404 */
405 if (munlockall()) {
406 error_report("%s: munlockall: %s", __func__, strerror(errno));
617a32f5 407 goto out;
58b7c17e
DDAG
408 }
409
eb59db53
DDAG
410 /*
411 * We need to check that the ops we need are supported on anon memory
412 * To do that we need to register a chunk and see the flags that
413 * are returned.
414 */
415 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
416 MAP_ANONYMOUS, -1, 0);
417 if (testarea == MAP_FAILED) {
418 error_report("%s: Failed to map test area: %s", __func__,
419 strerror(errno));
420 goto out;
421 }
7648297d 422 g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize));
eb59db53
DDAG
423
424 reg_struct.range.start = (uintptr_t)testarea;
425 reg_struct.range.len = pagesize;
426 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
427
428 if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
429 error_report("%s userfault register: %s", __func__, strerror(errno));
430 goto out;
431 }
432
433 range_struct.start = (uintptr_t)testarea;
434 range_struct.len = pagesize;
435 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
436 error_report("%s userfault unregister: %s", __func__, strerror(errno));
437 goto out;
438 }
439
440 feature_mask = (__u64)1 << _UFFDIO_WAKE |
441 (__u64)1 << _UFFDIO_COPY |
442 (__u64)1 << _UFFDIO_ZEROPAGE;
443 if ((reg_struct.ioctls & feature_mask) != feature_mask) {
444 error_report("Missing userfault map features: %" PRIx64,
445 (uint64_t)(~reg_struct.ioctls & feature_mask));
446 goto out;
447 }
448
449 /* Success! */
450 ret = true;
451out:
452 if (testarea) {
453 munmap(testarea, pagesize);
454 }
455 if (ufd != -1) {
456 close(ufd);
457 }
458 return ret;
459}
460
1caddf8a
DDAG
461/*
462 * Setup an area of RAM so that it *can* be used for postcopy later; this
463 * must be done right at the start prior to pre-copy.
464 * opaque should be the MIS.
465 */
754cb9c0 466static int init_range(RAMBlock *rb, void *opaque)
1caddf8a 467{
754cb9c0
YK
468 const char *block_name = qemu_ram_get_idstr(rb);
469 void *host_addr = qemu_ram_get_host_addr(rb);
470 ram_addr_t offset = qemu_ram_get_offset(rb);
471 ram_addr_t length = qemu_ram_get_used_length(rb);
1caddf8a
DDAG
472 trace_postcopy_init_range(block_name, host_addr, offset, length);
473
898ba906
DH
474 /*
475 * Save the used_length before running the guest. In case we have to
476 * resize RAM blocks when syncing RAM block sizes from the source during
477 * precopy, we'll update it manually via the ram block notifier.
478 */
479 rb->postcopy_length = length;
480
1caddf8a
DDAG
481 /*
482 * We need the whole of RAM to be truly empty for postcopy, so things
483 * like ROMs and any data tables built during init must be zero'd
484 * - we're going to get the copy from the source anyway.
485 * (Precopy will just overwrite this data, so doesn't need the discard)
486 */
aaa2064c 487 if (ram_discard_range(block_name, 0, length)) {
1caddf8a
DDAG
488 return -1;
489 }
490
491 return 0;
492}
493
494/*
495 * At the end of migration, undo the effects of init_range
496 * opaque should be the MIS.
497 */
754cb9c0 498static int cleanup_range(RAMBlock *rb, void *opaque)
1caddf8a 499{
754cb9c0
YK
500 const char *block_name = qemu_ram_get_idstr(rb);
501 void *host_addr = qemu_ram_get_host_addr(rb);
502 ram_addr_t offset = qemu_ram_get_offset(rb);
898ba906 503 ram_addr_t length = rb->postcopy_length;
1caddf8a
DDAG
504 MigrationIncomingState *mis = opaque;
505 struct uffdio_range range_struct;
506 trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
507
508 /*
509 * We turned off hugepage for the precopy stage with postcopy enabled
510 * we can turn it back on now.
511 */
1d741439 512 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
1caddf8a
DDAG
513
514 /*
515 * We can also turn off userfault now since we should have all the
516 * pages. It can be useful to leave it on to debug postcopy
517 * if you're not sure it's always getting every page.
518 */
519 range_struct.start = (uintptr_t)host_addr;
520 range_struct.len = length;
521
522 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
523 error_report("%s: userfault unregister %s", __func__, strerror(errno));
524
525 return -1;
526 }
527
528 return 0;
529}
530
531/*
532 * Initialise postcopy-ram, setting the RAM to a state where we can go into
533 * postcopy later; must be called prior to any precopy.
534 * called from arch_init's similarly named ram_postcopy_incoming_init
535 */
c136180c 536int postcopy_ram_incoming_init(MigrationIncomingState *mis)
1caddf8a 537{
fbd162e6 538 if (foreach_not_ignored_block(init_range, NULL)) {
1caddf8a
DDAG
539 return -1;
540 }
541
542 return 0;
543}
544
476ebf77
PX
545static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis)
546{
77dadc3f
PX
547 int i;
548
549 if (mis->postcopy_tmp_pages) {
550 for (i = 0; i < mis->postcopy_channels; i++) {
551 if (mis->postcopy_tmp_pages[i].tmp_huge_page) {
552 munmap(mis->postcopy_tmp_pages[i].tmp_huge_page,
553 mis->largest_page_size);
554 mis->postcopy_tmp_pages[i].tmp_huge_page = NULL;
555 }
556 }
557 g_free(mis->postcopy_tmp_pages);
558 mis->postcopy_tmp_pages = NULL;
476ebf77
PX
559 }
560
561 if (mis->postcopy_tmp_zero_page) {
562 munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
563 mis->postcopy_tmp_zero_page = NULL;
564 }
565}
566
1caddf8a
DDAG
567/*
568 * At the end of a migration where postcopy_ram_incoming_init was called.
569 */
570int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
571{
c4faeed2
DDAG
572 trace_postcopy_ram_incoming_cleanup_entry();
573
36f62f11
PX
574 if (mis->postcopy_prio_thread_created) {
575 qemu_thread_join(&mis->postcopy_prio_thread);
576 mis->postcopy_prio_thread_created = false;
577 }
578
c4faeed2 579 if (mis->have_fault_thread) {
46343570
DDAG
580 Error *local_err = NULL;
581
55d0fe82 582 /* Let the fault thread quit */
d73415a3 583 qatomic_set(&mis->fault_thread_quit, 1);
55d0fe82
IM
584 postcopy_fault_thread_notify(mis);
585 trace_postcopy_ram_incoming_cleanup_join();
586 qemu_thread_join(&mis->fault_thread);
587
46343570
DDAG
588 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
589 error_report_err(local_err);
590 return -1;
591 }
592
fbd162e6 593 if (foreach_not_ignored_block(cleanup_range, mis)) {
c4faeed2
DDAG
594 return -1;
595 }
9ab7ef9b 596
c4faeed2
DDAG
597 trace_postcopy_ram_incoming_cleanup_closeuf();
598 close(mis->userfault_fd);
64f615fe 599 close(mis->userfault_event_fd);
c4faeed2 600 mis->have_fault_thread = false;
1caddf8a
DDAG
601 }
602
58b7c17e
DDAG
603 if (enable_mlock) {
604 if (os_mlock() < 0) {
605 error_report("mlock: %s", strerror(errno));
606 /*
607 * It doesn't feel right to fail at this point, we have a valid
608 * VM state.
609 */
610 }
611 }
612
476ebf77
PX
613 postcopy_temp_pages_cleanup(mis);
614
65ace060
AP
615 trace_postcopy_ram_incoming_cleanup_blocktime(
616 get_postcopy_total_blocktime());
617
c4faeed2 618 trace_postcopy_ram_incoming_cleanup_exit();
1caddf8a
DDAG
619 return 0;
620}
621
f9527107
DDAG
622/*
623 * Disable huge pages on an area
624 */
754cb9c0 625static int nhp_range(RAMBlock *rb, void *opaque)
f9527107 626{
754cb9c0
YK
627 const char *block_name = qemu_ram_get_idstr(rb);
628 void *host_addr = qemu_ram_get_host_addr(rb);
629 ram_addr_t offset = qemu_ram_get_offset(rb);
898ba906 630 ram_addr_t length = rb->postcopy_length;
f9527107
DDAG
631 trace_postcopy_nhp_range(block_name, host_addr, offset, length);
632
633 /*
634 * Before we do discards we need to ensure those discards really
635 * do delete areas of the page, even if THP thinks a hugepage would
636 * be a good idea, so force hugepages off.
637 */
1d741439 638 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
f9527107
DDAG
639
640 return 0;
641}
642
643/*
644 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
645 * however leaving it until after precopy means that most of the precopy
646 * data is still THPd
647 */
648int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
649{
fbd162e6 650 if (foreach_not_ignored_block(nhp_range, mis)) {
f9527107
DDAG
651 return -1;
652 }
653
654 postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
655
656 return 0;
657}
658
f0a227ad
DDAG
659/*
660 * Mark the given area of RAM as requiring notification to unwritten areas
fbd162e6 661 * Used as a callback on foreach_not_ignored_block.
f0a227ad
DDAG
662 * host_addr: Base of area to mark
663 * offset: Offset in the whole ram arena
664 * length: Length of the section
665 * opaque: MigrationIncomingState pointer
666 * Returns 0 on success
667 */
754cb9c0 668static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
f0a227ad
DDAG
669{
670 MigrationIncomingState *mis = opaque;
671 struct uffdio_register reg_struct;
672
754cb9c0 673 reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
898ba906 674 reg_struct.range.len = rb->postcopy_length;
f0a227ad
DDAG
675 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
676
677 /* Now tell our userfault_fd that it's responsible for this area */
678 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
679 error_report("%s userfault register: %s", __func__, strerror(errno));
680 return -1;
681 }
665414ad
DDAG
682 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
683 error_report("%s userfault: Region doesn't support COPY", __func__);
684 return -1;
685 }
2ce16640 686 if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
2ce16640
DDAG
687 qemu_ram_set_uf_zeroable(rb);
688 }
f0a227ad
DDAG
689
690 return 0;
691}
692
5efc3564
DDAG
693int postcopy_wake_shared(struct PostCopyFD *pcfd,
694 uint64_t client_addr,
695 RAMBlock *rb)
696{
697 size_t pagesize = qemu_ram_pagesize(rb);
698 struct uffdio_range range;
699 int ret;
700 trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
7648297d 701 range.start = ROUND_DOWN(client_addr, pagesize);
5efc3564
DDAG
702 range.len = pagesize;
703 ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
704 if (ret) {
705 error_report("%s: Failed to wake: %zx in %s (%s)",
706 __func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
707 strerror(errno));
708 }
709 return ret;
710}
711
9470c5e0
DH
712static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
713 ram_addr_t start, uint64_t haddr)
714{
715 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
716
717 /*
718 * Discarded pages (via RamDiscardManager) are never migrated. On unlikely
719 * access, place a zeropage, which will also set the relevant bits in the
720 * recv_bitmap accordingly, so we won't try placing a zeropage twice.
721 *
722 * Checking a single bit is sufficient to handle pagesize > TPS as either
723 * all relevant bits are set or not.
724 */
725 assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb)));
726 if (ramblock_page_is_discarded(rb, start)) {
727 bool received = ramblock_recv_bitmap_test_byte_offset(rb, start);
728
729 return received ? 0 : postcopy_place_page_zero(mis, aligned, rb);
730 }
731
732 return migrate_send_rp_req_pages(mis, rb, start, haddr);
733}
734
096bf4c8
DDAG
735/*
736 * Callback from shared fault handlers to ask for a page,
737 * the page must be specified by a RAMBlock and an offset in that rb
738 * Note: Only for use by shared fault handlers (in fault thread)
739 */
740int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
741 uint64_t client_addr, uint64_t rb_offset)
742{
7648297d 743 uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
096bf4c8
DDAG
744 MigrationIncomingState *mis = migration_incoming_get_current();
745
746 trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
747 rb_offset);
dedfb4b2
DDAG
748 if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) {
749 trace_postcopy_request_shared_page_present(pcfd->idstr,
750 qemu_ram_get_idstr(rb), rb_offset);
751 return postcopy_wake_shared(pcfd, client_addr, rb);
752 }
9470c5e0 753 postcopy_request_page(mis, rb, aligned_rbo, client_addr);
096bf4c8
DDAG
754 return 0;
755}
756
575b0b33
AP
757static int get_mem_fault_cpu_index(uint32_t pid)
758{
759 CPUState *cpu_iter;
760
761 CPU_FOREACH(cpu_iter) {
762 if (cpu_iter->thread_id == pid) {
763 trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
764 return cpu_iter->cpu_index;
765 }
766 }
767 trace_get_mem_fault_cpu_index(-1, pid);
768 return -1;
769}
770
771static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
772{
773 int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
774 dc->start_time;
775 return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
776}
777
778/*
779 * This function is being called when pagefault occurs. It
780 * tracks down vCPU blocking time.
781 *
782 * @addr: faulted host virtual address
783 * @ptid: faulted process thread id
784 * @rb: ramblock appropriate to addr
785 */
786static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
787 RAMBlock *rb)
788{
789 int cpu, already_received;
790 MigrationIncomingState *mis = migration_incoming_get_current();
791 PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
792 uint32_t low_time_offset;
793
794 if (!dc || ptid == 0) {
795 return;
796 }
797 cpu = get_mem_fault_cpu_index(ptid);
798 if (cpu < 0) {
799 return;
800 }
801
802 low_time_offset = get_low_time_offset(dc);
803 if (dc->vcpu_addr[cpu] == 0) {
d73415a3 804 qatomic_inc(&dc->smp_cpus_down);
575b0b33
AP
805 }
806
d73415a3
SH
807 qatomic_xchg(&dc->last_begin, low_time_offset);
808 qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
809 qatomic_xchg(&dc->vcpu_addr[cpu], addr);
575b0b33 810
da1725d3
WY
811 /*
812 * check it here, not at the beginning of the function,
813 * due to, check could occur early than bitmap_set in
814 * qemu_ufd_copy_ioctl
815 */
575b0b33
AP
816 already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
817 if (already_received) {
d73415a3
SH
818 qatomic_xchg(&dc->vcpu_addr[cpu], 0);
819 qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
820 qatomic_dec(&dc->smp_cpus_down);
575b0b33
AP
821 }
822 trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
823 cpu, already_received);
824}
825
826/*
827 * This function just provide calculated blocktime per cpu and trace it.
828 * Total blocktime is calculated in mark_postcopy_blocktime_end.
829 *
830 *
831 * Assume we have 3 CPU
832 *
833 * S1 E1 S1 E1
834 * -----***********------------xxx***************------------------------> CPU1
835 *
836 * S2 E2
837 * ------------****************xxx---------------------------------------> CPU2
838 *
839 * S3 E3
840 * ------------------------****xxx********-------------------------------> CPU3
841 *
842 * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
843 * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
844 * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
845 * it's a part of total blocktime.
846 * S1 - here is last_begin
847 * Legend of the picture is following:
848 * * - means blocktime per vCPU
849 * x - means overlapped blocktime (total blocktime)
850 *
851 * @addr: host virtual address
852 */
853static void mark_postcopy_blocktime_end(uintptr_t addr)
854{
855 MigrationIncomingState *mis = migration_incoming_get_current();
856 PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
5cc8767d
LX
857 MachineState *ms = MACHINE(qdev_get_machine());
858 unsigned int smp_cpus = ms->smp.cpus;
575b0b33
AP
859 int i, affected_cpu = 0;
860 bool vcpu_total_blocktime = false;
861 uint32_t read_vcpu_time, low_time_offset;
862
863 if (!dc) {
864 return;
865 }
866
867 low_time_offset = get_low_time_offset(dc);
868 /* lookup cpu, to clear it,
3a4452d8 869 * that algorithm looks straightforward, but it's not
575b0b33
AP
870 * optimal, more optimal algorithm is keeping tree or hash
871 * where key is address value is a list of */
872 for (i = 0; i < smp_cpus; i++) {
873 uint32_t vcpu_blocktime = 0;
874
d73415a3
SH
875 read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
876 if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
575b0b33
AP
877 read_vcpu_time == 0) {
878 continue;
879 }
d73415a3 880 qatomic_xchg(&dc->vcpu_addr[i], 0);
575b0b33
AP
881 vcpu_blocktime = low_time_offset - read_vcpu_time;
882 affected_cpu += 1;
883 /* we need to know is that mark_postcopy_end was due to
884 * faulted page, another possible case it's prefetched
885 * page and in that case we shouldn't be here */
886 if (!vcpu_total_blocktime &&
d73415a3 887 qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
575b0b33
AP
888 vcpu_total_blocktime = true;
889 }
890 /* continue cycle, due to one page could affect several vCPUs */
891 dc->vcpu_blocktime[i] += vcpu_blocktime;
892 }
893
d73415a3 894 qatomic_sub(&dc->smp_cpus_down, affected_cpu);
575b0b33 895 if (vcpu_total_blocktime) {
d73415a3 896 dc->total_blocktime += low_time_offset - qatomic_fetch_add(
575b0b33
AP
897 &dc->last_begin, 0);
898 }
899 trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
900 affected_cpu);
901}
902
27dd21b4 903static void postcopy_pause_fault_thread(MigrationIncomingState *mis)
3a7804c3
PX
904{
905 trace_postcopy_pause_fault_thread();
3a7804c3 906 qemu_sem_wait(&mis->postcopy_pause_sem_fault);
3a7804c3 907 trace_postcopy_pause_fault_thread_continued();
3a7804c3
PX
908}
909
f0a227ad
DDAG
910/*
911 * Handle faults detected by the USERFAULT markings
912 */
913static void *postcopy_ram_fault_thread(void *opaque)
914{
915 MigrationIncomingState *mis = opaque;
c4faeed2
DDAG
916 struct uffd_msg msg;
917 int ret;
00fa4fc8 918 size_t index;
c4faeed2 919 RAMBlock *rb = NULL;
f0a227ad 920
c4faeed2 921 trace_postcopy_ram_fault_thread_entry();
74637e6f 922 rcu_register_thread();
096bf4c8 923 mis->last_rb = NULL; /* last RAMBlock we sent part of */
095c12a4 924 qemu_sem_post(&mis->thread_sync_sem);
f0a227ad 925
00fa4fc8
DDAG
926 struct pollfd *pfd;
927 size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
928
929 pfd = g_new0(struct pollfd, pfd_len);
930
931 pfd[0].fd = mis->userfault_fd;
932 pfd[0].events = POLLIN;
933 pfd[1].fd = mis->userfault_event_fd;
934 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
935 trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd);
936 for (index = 0; index < mis->postcopy_remote_fds->len; index++) {
937 struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds,
938 struct PostCopyFD, index);
939 pfd[2 + index].fd = pcfd->fd;
940 pfd[2 + index].events = POLLIN;
941 trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr,
942 pcfd->fd);
943 }
944
c4faeed2
DDAG
945 while (true) {
946 ram_addr_t rb_offset;
00fa4fc8 947 int poll_result;
c4faeed2
DDAG
948
949 /*
950 * We're mainly waiting for the kernel to give us a faulting HVA,
951 * however we can be told to quit via userfault_quit_fd which is
952 * an eventfd
953 */
00fa4fc8
DDAG
954
955 poll_result = poll(pfd, pfd_len, -1 /* Wait forever */);
956 if (poll_result == -1) {
c4faeed2
DDAG
957 error_report("%s: userfault poll: %s", __func__, strerror(errno));
958 break;
959 }
960
3a7804c3
PX
961 if (!mis->to_src_file) {
962 /*
963 * Possibly someone tells us that the return path is
964 * broken already using the event. We should hold until
965 * the channel is rebuilt.
966 */
27dd21b4 967 postcopy_pause_fault_thread(mis);
3a7804c3
PX
968 }
969
c4faeed2 970 if (pfd[1].revents) {
64f615fe
PX
971 uint64_t tmp64 = 0;
972
973 /* Consume the signal */
974 if (read(mis->userfault_event_fd, &tmp64, 8) != 8) {
975 /* Nothing obviously nicer than posting this error. */
976 error_report("%s: read() failed", __func__);
977 }
978
d73415a3 979 if (qatomic_read(&mis->fault_thread_quit)) {
64f615fe
PX
980 trace_postcopy_ram_fault_thread_quit();
981 break;
982 }
c4faeed2
DDAG
983 }
984
00fa4fc8
DDAG
985 if (pfd[0].revents) {
986 poll_result--;
987 ret = read(mis->userfault_fd, &msg, sizeof(msg));
988 if (ret != sizeof(msg)) {
989 if (errno == EAGAIN) {
990 /*
991 * if a wake up happens on the other thread just after
992 * the poll, there is nothing to read.
993 */
994 continue;
995 }
996 if (ret < 0) {
997 error_report("%s: Failed to read full userfault "
998 "message: %s",
999 __func__, strerror(errno));
1000 break;
1001 } else {
1002 error_report("%s: Read %d bytes from userfaultfd "
1003 "expected %zd",
1004 __func__, ret, sizeof(msg));
1005 break; /* Lost alignment, don't know what we'd read next */
1006 }
c4faeed2 1007 }
00fa4fc8
DDAG
1008 if (msg.event != UFFD_EVENT_PAGEFAULT) {
1009 error_report("%s: Read unexpected event %ud from userfaultfd",
1010 __func__, msg.event);
1011 continue; /* It's not a page fault, shouldn't happen */
c4faeed2 1012 }
c4faeed2 1013
00fa4fc8
DDAG
1014 rb = qemu_ram_block_from_host(
1015 (void *)(uintptr_t)msg.arg.pagefault.address,
1016 true, &rb_offset);
1017 if (!rb) {
1018 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
1019 PRIx64, (uint64_t)msg.arg.pagefault.address);
1020 break;
1021 }
c4faeed2 1022
7648297d 1023 rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
00fa4fc8 1024 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
c4faeed2 1025 qemu_ram_get_idstr(rb),
575b0b33
AP
1026 rb_offset,
1027 msg.arg.pagefault.feat.ptid);
1028 mark_postcopy_blocktime_begin(
1029 (uintptr_t)(msg.arg.pagefault.address),
1030 msg.arg.pagefault.feat.ptid, rb);
1031
3a7804c3 1032retry:
00fa4fc8
DDAG
1033 /*
1034 * Send the request to the source - we want to request one
1035 * of our host page sizes (which is >= TPS)
1036 */
9470c5e0
DH
1037 ret = postcopy_request_page(mis, rb, rb_offset,
1038 msg.arg.pagefault.address);
3a7804c3
PX
1039 if (ret) {
1040 /* May be network failure, try to wait for recovery */
27dd21b4
PX
1041 postcopy_pause_fault_thread(mis);
1042 goto retry;
00fa4fc8
DDAG
1043 }
1044 }
c4faeed2 1045
00fa4fc8
DDAG
1046 /* Now handle any requests from external processes on shared memory */
1047 /* TODO: May need to handle devices deregistering during postcopy */
1048 for (index = 2; index < pfd_len && poll_result; index++) {
1049 if (pfd[index].revents) {
1050 struct PostCopyFD *pcfd =
1051 &g_array_index(mis->postcopy_remote_fds,
1052 struct PostCopyFD, index - 2);
1053
1054 poll_result--;
1055 if (pfd[index].revents & POLLERR) {
1056 error_report("%s: POLLERR on poll %zd fd=%d",
1057 __func__, index, pcfd->fd);
1058 pfd[index].events = 0;
1059 continue;
1060 }
1061
1062 ret = read(pcfd->fd, &msg, sizeof(msg));
1063 if (ret != sizeof(msg)) {
1064 if (errno == EAGAIN) {
1065 /*
1066 * if a wake up happens on the other thread just after
1067 * the poll, there is nothing to read.
1068 */
1069 continue;
1070 }
1071 if (ret < 0) {
1072 error_report("%s: Failed to read full userfault "
1073 "message: %s (shared) revents=%d",
1074 __func__, strerror(errno),
1075 pfd[index].revents);
1076 /*TODO: Could just disable this sharer */
1077 break;
1078 } else {
1079 error_report("%s: Read %d bytes from userfaultfd "
1080 "expected %zd (shared)",
1081 __func__, ret, sizeof(msg));
1082 /*TODO: Could just disable this sharer */
1083 break; /*Lost alignment,don't know what we'd read next*/
1084 }
1085 }
1086 if (msg.event != UFFD_EVENT_PAGEFAULT) {
1087 error_report("%s: Read unexpected event %ud "
1088 "from userfaultfd (shared)",
1089 __func__, msg.event);
1090 continue; /* It's not a page fault, shouldn't happen */
1091 }
1092 /* Call the device handler registered with us */
1093 ret = pcfd->handler(pcfd, &msg);
1094 if (ret) {
1095 error_report("%s: Failed to resolve shared fault on %zd/%s",
1096 __func__, index, pcfd->idstr);
1097 /* TODO: Fail? Disable this sharer? */
1098 }
1099 }
c4faeed2
DDAG
1100 }
1101 }
74637e6f 1102 rcu_unregister_thread();
c4faeed2 1103 trace_postcopy_ram_fault_thread_exit();
fc6008f3 1104 g_free(pfd);
f0a227ad
DDAG
1105 return NULL;
1106}
1107
476ebf77
PX
1108static int postcopy_temp_pages_setup(MigrationIncomingState *mis)
1109{
77dadc3f
PX
1110 PostcopyTmpPage *tmp_page;
1111 int err, i, channels;
1112 void *temp_page;
1113
36f62f11
PX
1114 if (migrate_postcopy_preempt()) {
1115 /* If preemption enabled, need extra channel for urgent requests */
1116 mis->postcopy_channels = RAM_CHANNEL_MAX;
1117 } else {
1118 /* Both precopy/postcopy on the same channel */
1119 mis->postcopy_channels = 1;
1120 }
77dadc3f
PX
1121
1122 channels = mis->postcopy_channels;
1123 mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels);
1124
1125 for (i = 0; i < channels; i++) {
1126 tmp_page = &mis->postcopy_tmp_pages[i];
1127 temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE,
1128 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1129 if (temp_page == MAP_FAILED) {
1130 err = errno;
1131 error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s",
1132 __func__, i, strerror(err));
1133 /* Clean up will be done later */
1134 return -err;
1135 }
1136 tmp_page->tmp_huge_page = temp_page;
1137 /* Initialize default states for each tmp page */
1138 postcopy_temp_page_reset(tmp_page);
476ebf77
PX
1139 }
1140
1141 /*
1142 * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
1143 */
1144 mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
1145 PROT_READ | PROT_WRITE,
1146 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1147 if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
1148 err = errno;
1149 mis->postcopy_tmp_zero_page = NULL;
1150 error_report("%s: Failed to map large zero page %s",
1151 __func__, strerror(err));
1152 return -err;
1153 }
1154
1155 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
1156
1157 return 0;
1158}
1159
2a7eb148 1160int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
f0a227ad 1161{
c4faeed2
DDAG
1162 /* Open the fd for the kernel to give us userfaults */
1163 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1164 if (mis->userfault_fd == -1) {
1165 error_report("%s: Failed to open userfault fd: %s", __func__,
1166 strerror(errno));
1167 return -1;
1168 }
1169
1170 /*
1171 * Although the host check already tested the API, we need to
1172 * do the check again as an ABI handshake on the new fd.
1173 */
54ae0886 1174 if (!ufd_check_and_apply(mis->userfault_fd, mis)) {
c4faeed2
DDAG
1175 return -1;
1176 }
1177
1178 /* Now an eventfd we use to tell the fault-thread to quit */
64f615fe
PX
1179 mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC);
1180 if (mis->userfault_event_fd == -1) {
1181 error_report("%s: Opening userfault_event_fd: %s", __func__,
c4faeed2
DDAG
1182 strerror(errno));
1183 close(mis->userfault_fd);
1184 return -1;
1185 }
1186
36f62f11 1187 postcopy_thread_create(mis, &mis->fault_thread, "fault-default",
095c12a4 1188 postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE);
c4faeed2 1189 mis->have_fault_thread = true;
f0a227ad
DDAG
1190
1191 /* Mark so that we get notified of accesses to unwritten areas */
fbd162e6 1192 if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
91b02dc7 1193 error_report("ram_block_enable_notify failed");
f0a227ad
DDAG
1194 return -1;
1195 }
1196
476ebf77
PX
1197 if (postcopy_temp_pages_setup(mis)) {
1198 /* Error dumped in the sub-function */
3414322a
WY
1199 return -1;
1200 }
1201
36f62f11
PX
1202 if (migrate_postcopy_preempt()) {
1203 /*
1204 * This thread needs to be created after the temp pages because
1205 * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately.
1206 */
1207 postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast",
1208 postcopy_preempt_thread, QEMU_THREAD_JOINABLE);
1209 mis->postcopy_prio_thread_created = true;
1210 }
1211
c4faeed2
DDAG
1212 trace_postcopy_ram_enable_notify();
1213
f0a227ad
DDAG
1214 return 0;
1215}
1216
eef621c4 1217static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
f9494614 1218 void *from_addr, uint64_t pagesize, RAMBlock *rb)
727b9d7e 1219{
eef621c4 1220 int userfault_fd = mis->userfault_fd;
f9494614 1221 int ret;
eef621c4 1222
727b9d7e
AP
1223 if (from_addr) {
1224 struct uffdio_copy copy_struct;
1225 copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
1226 copy_struct.src = (uint64_t)(uintptr_t)from_addr;
1227 copy_struct.len = pagesize;
1228 copy_struct.mode = 0;
f9494614 1229 ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
727b9d7e
AP
1230 } else {
1231 struct uffdio_zeropage zero_struct;
1232 zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
1233 zero_struct.range.len = pagesize;
1234 zero_struct.mode = 0;
f9494614
AP
1235 ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
1236 }
1237 if (!ret) {
8f8bfffc 1238 qemu_mutex_lock(&mis->page_request_mutex);
f9494614
AP
1239 ramblock_recv_bitmap_set_range(rb, host_addr,
1240 pagesize / qemu_target_page_size());
8f8bfffc
PX
1241 /*
1242 * If this page resolves a page fault for a previous recorded faulted
1243 * address, take a special note to maintain the requested page list.
1244 */
1245 if (g_tree_lookup(mis->page_requested, host_addr)) {
1246 g_tree_remove(mis->page_requested, host_addr);
1247 mis->page_requested_count--;
1248 trace_postcopy_page_req_del(host_addr, mis->page_requested_count);
1249 }
1250 qemu_mutex_unlock(&mis->page_request_mutex);
575b0b33 1251 mark_postcopy_blocktime_end((uintptr_t)host_addr);
727b9d7e 1252 }
f9494614 1253 return ret;
727b9d7e
AP
1254}
1255
d488b349
DDAG
1256int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset)
1257{
1258 int i;
1259 MigrationIncomingState *mis = migration_incoming_get_current();
1260 GArray *pcrfds = mis->postcopy_remote_fds;
1261
1262 for (i = 0; i < pcrfds->len; i++) {
1263 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
1264 int ret = cur->waker(cur, rb, offset);
1265 if (ret) {
1266 return ret;
1267 }
1268 }
1269 return 0;
1270}
1271
696ed9a9
DDAG
1272/*
1273 * Place a host page (from) at (host) atomically
1274 * returns 0 on success
1275 */
df9ff5e1 1276int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
8be4620b 1277 RAMBlock *rb)
696ed9a9 1278{
8be4620b 1279 size_t pagesize = qemu_ram_pagesize(rb);
696ed9a9 1280
696ed9a9
DDAG
1281 /* copy also acks to the kernel waking the stalled thread up
1282 * TODO: We can inhibit that ack and only do it if it was requested
1283 * which would be slightly cheaper, but we'd have to be careful
1284 * of the order of updating our page state.
1285 */
eef621c4 1286 if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) {
696ed9a9 1287 int e = errno;
df9ff5e1
DDAG
1288 error_report("%s: %s copy host: %p from: %p (size: %zd)",
1289 __func__, strerror(e), host, from, pagesize);
696ed9a9
DDAG
1290
1291 return -e;
1292 }
1293
1294 trace_postcopy_place_page(host);
dedfb4b2
DDAG
1295 return postcopy_notify_shared_wake(rb,
1296 qemu_ram_block_host_offset(rb, host));
696ed9a9
DDAG
1297}
1298
1299/*
1300 * Place a zero page at (host) atomically
1301 * returns 0 on success
1302 */
df9ff5e1 1303int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
8be4620b 1304 RAMBlock *rb)
696ed9a9 1305{
2ce16640 1306 size_t pagesize = qemu_ram_pagesize(rb);
df9ff5e1 1307 trace_postcopy_place_page_zero(host);
696ed9a9 1308
2ce16640
DDAG
1309 /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
1310 * but it's not available for everything (e.g. hugetlbpages)
1311 */
1312 if (qemu_ram_is_uf_zeroable(rb)) {
eef621c4 1313 if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) {
df9ff5e1
DDAG
1314 int e = errno;
1315 error_report("%s: %s zero host: %p",
1316 __func__, strerror(e), host);
696ed9a9 1317
df9ff5e1
DDAG
1318 return -e;
1319 }
dedfb4b2
DDAG
1320 return postcopy_notify_shared_wake(rb,
1321 qemu_ram_block_host_offset(rb,
1322 host));
df9ff5e1 1323 } else {
6629890d 1324 return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb);
696ed9a9 1325 }
696ed9a9
DDAG
1326}
1327
eb59db53
DDAG
1328#else
1329/* No target OS support, stubs just fail */
65ace060
AP
1330void fill_destination_postcopy_migration_info(MigrationInfo *info)
1331{
1332}
1333
d7651f15 1334bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
eb59db53
DDAG
1335{
1336 error_report("%s: No OS support", __func__);
1337 return false;
1338}
1339
c136180c 1340int postcopy_ram_incoming_init(MigrationIncomingState *mis)
1caddf8a
DDAG
1341{
1342 error_report("postcopy_ram_incoming_init: No OS support");
1343 return -1;
1344}
1345
1346int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
1347{
1348 assert(0);
1349 return -1;
1350}
1351
f9527107
DDAG
1352int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
1353{
1354 assert(0);
1355 return -1;
1356}
1357
c188c539
MT
1358int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
1359 uint64_t client_addr, uint64_t rb_offset)
1360{
1361 assert(0);
1362 return -1;
1363}
1364
2a7eb148 1365int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
f0a227ad
DDAG
1366{
1367 assert(0);
1368 return -1;
1369}
696ed9a9 1370
df9ff5e1 1371int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
8be4620b 1372 RAMBlock *rb)
696ed9a9
DDAG
1373{
1374 assert(0);
1375 return -1;
1376}
1377
df9ff5e1 1378int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
8be4620b 1379 RAMBlock *rb)
696ed9a9
DDAG
1380{
1381 assert(0);
1382 return -1;
1383}
1384
5efc3564
DDAG
1385int postcopy_wake_shared(struct PostCopyFD *pcfd,
1386 uint64_t client_addr,
1387 RAMBlock *rb)
1388{
1389 assert(0);
1390 return -1;
1391}
eb59db53
DDAG
1392#endif
1393
e0b266f0 1394/* ------------------------------------------------------------------------- */
77dadc3f
PX
1395void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page)
1396{
1397 tmp_page->target_pages = 0;
1398 tmp_page->host_addr = NULL;
1399 /*
1400 * This is set to true when reset, and cleared as long as we received any
1401 * of the non-zero small page within this huge page.
1402 */
1403 tmp_page->all_zero = true;
1404}
e0b266f0 1405
9ab7ef9b
PX
1406void postcopy_fault_thread_notify(MigrationIncomingState *mis)
1407{
1408 uint64_t tmp64 = 1;
1409
1410 /*
1411 * Wakeup the fault_thread. It's an eventfd that should currently
1412 * be at 0, we're going to increment it to 1
1413 */
1414 if (write(mis->userfault_event_fd, &tmp64, 8) != 8) {
1415 /* Not much we can do here, but may as well report it */
1416 error_report("%s: incrementing failed: %s", __func__,
1417 strerror(errno));
1418 }
1419}
1420
e0b266f0
DDAG
1421/**
1422 * postcopy_discard_send_init: Called at the start of each RAMBlock before
1423 * asking to discard individual ranges.
1424 *
1425 * @ms: The current migration state.
810cf2bb 1426 * @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
e0b266f0 1427 * @name: RAMBlock that discards will operate on.
e0b266f0 1428 */
810cf2bb
WY
1429static PostcopyDiscardState pds = {0};
1430void postcopy_discard_send_init(MigrationState *ms, const char *name)
e0b266f0 1431{
810cf2bb
WY
1432 pds.ramblock_name = name;
1433 pds.cur_entry = 0;
1434 pds.nsentwords = 0;
1435 pds.nsentcmds = 0;
e0b266f0
DDAG
1436}
1437
1438/**
1439 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1440 * discard. May send a discard message, may just leave it queued to
1441 * be sent later.
1442 *
1443 * @ms: Current migration state.
e0b266f0
DDAG
1444 * @start,@length: a range of pages in the migration bitmap in the
1445 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1446 */
810cf2bb
WY
1447void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
1448 unsigned long length)
e0b266f0 1449{
20afaed9 1450 size_t tp_size = qemu_target_page_size();
e0b266f0 1451 /* Convert to byte offsets within the RAM block */
810cf2bb
WY
1452 pds.start_list[pds.cur_entry] = start * tp_size;
1453 pds.length_list[pds.cur_entry] = length * tp_size;
1454 trace_postcopy_discard_send_range(pds.ramblock_name, start, length);
1455 pds.cur_entry++;
1456 pds.nsentwords++;
e0b266f0 1457
810cf2bb 1458 if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) {
e0b266f0 1459 /* Full set, ship it! */
89a02a9f 1460 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
810cf2bb
WY
1461 pds.ramblock_name,
1462 pds.cur_entry,
1463 pds.start_list,
1464 pds.length_list);
1465 pds.nsentcmds++;
1466 pds.cur_entry = 0;
e0b266f0
DDAG
1467 }
1468}
1469
1470/**
1471 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1472 * bitmap code. Sends any outstanding discard messages, frees the PDS
1473 *
1474 * @ms: Current migration state.
e0b266f0 1475 */
810cf2bb 1476void postcopy_discard_send_finish(MigrationState *ms)
e0b266f0
DDAG
1477{
1478 /* Anything unsent? */
810cf2bb 1479 if (pds.cur_entry) {
89a02a9f 1480 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
810cf2bb
WY
1481 pds.ramblock_name,
1482 pds.cur_entry,
1483 pds.start_list,
1484 pds.length_list);
1485 pds.nsentcmds++;
e0b266f0
DDAG
1486 }
1487
810cf2bb
WY
1488 trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords,
1489 pds.nsentcmds);
e0b266f0 1490}
bac3b212
JQ
1491
1492/*
1493 * Current state of incoming postcopy; note this is not part of
1494 * MigrationIncomingState since it's state is used during cleanup
1495 * at the end as MIS is being freed.
1496 */
1497static PostcopyState incoming_postcopy_state;
1498
1499PostcopyState postcopy_state_get(void)
1500{
d73415a3 1501 return qatomic_mb_read(&incoming_postcopy_state);
bac3b212
JQ
1502}
1503
1504/* Set the state and return the old state */
1505PostcopyState postcopy_state_set(PostcopyState new_state)
1506{
d73415a3 1507 return qatomic_xchg(&incoming_postcopy_state, new_state);
bac3b212 1508}
00fa4fc8
DDAG
1509
1510/* Register a handler for external shared memory postcopy
1511 * called on the destination.
1512 */
1513void postcopy_register_shared_ufd(struct PostCopyFD *pcfd)
1514{
1515 MigrationIncomingState *mis = migration_incoming_get_current();
1516
1517 mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds,
1518 *pcfd);
1519}
1520
1521/* Unregister a handler for external shared memory postcopy
1522 */
1523void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd)
1524{
1525 guint i;
1526 MigrationIncomingState *mis = migration_incoming_get_current();
1527 GArray *pcrfds = mis->postcopy_remote_fds;
1528
56559980
JQ
1529 if (!pcrfds) {
1530 /* migration has already finished and freed the array */
1531 return;
1532 }
00fa4fc8
DDAG
1533 for (i = 0; i < pcrfds->len; i++) {
1534 struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
1535 if (cur->fd == pcfd->fd) {
1536 mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i);
1537 return;
1538 }
1539 }
1540}
36f62f11
PX
1541
1542bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file)
1543{
1544 /*
1545 * The new loading channel has its own threads, so it needs to be
1546 * blocked too. It's by default true, just be explicit.
1547 */
1548 qemu_file_set_blocking(file, true);
1549 mis->postcopy_qemufile_dst = file;
1550 trace_postcopy_preempt_new_channel();
1551
1552 /* Start the migration immediately */
1553 return true;
1554}
1555
f0afaf6c
PX
1556/*
1557 * Setup the postcopy preempt channel with the IOC. If ERROR is specified,
1558 * setup the error instead. This helper will free the ERROR if specified.
1559 */
d0edb8a1 1560static void
f0afaf6c
PX
1561postcopy_preempt_send_channel_done(MigrationState *s,
1562 QIOChannel *ioc, Error *local_err)
36f62f11 1563{
f0afaf6c 1564 if (local_err) {
d0edb8a1
PX
1565 migrate_set_error(s, local_err);
1566 error_free(local_err);
1567 } else {
1568 migration_ioc_register_yank(ioc);
1569 s->postcopy_qemufile_src = qemu_file_new_output(ioc);
1570 trace_postcopy_preempt_new_channel();
1571 }
1572
1573 /*
1574 * Kick the waiter in all cases. The waiter should check upon
1575 * postcopy_qemufile_src to know whether it failed or not.
1576 */
1577 qemu_sem_post(&s->postcopy_qemufile_src_sem);
f0afaf6c
PX
1578}
1579
1580static void
1581postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque)
1582{
1583 g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task));
1584 MigrationState *s = opaque;
1585 Error *local_err = NULL;
1586
1587 qio_task_propagate_error(task, &local_err);
1588 postcopy_preempt_send_channel_done(s, ioc, local_err);
1589}
1590
1591static void
1592postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque)
1593{
1594 g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task));
1595 MigrationState *s = opaque;
1596 QIOChannelTLS *tioc;
1597 Error *local_err = NULL;
1598
1599 if (qio_task_propagate_error(task, &local_err)) {
1600 goto out;
1601 }
1602
1603 if (migrate_channel_requires_tls_upgrade(ioc)) {
1604 tioc = migration_tls_client_create(s, ioc, s->hostname, &local_err);
1605 if (!tioc) {
1606 goto out;
1607 }
1608 trace_postcopy_preempt_tls_handshake();
1609 qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt");
1610 qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake,
1611 s, NULL, NULL);
1612 /* Setup the channel until TLS handshake finished */
1613 return;
1614 }
1615
1616out:
1617 /* This handles both good and error cases */
1618 postcopy_preempt_send_channel_done(s, ioc, local_err);
d0edb8a1 1619}
36f62f11 1620
d0edb8a1
PX
1621/* Returns 0 if channel established, -1 for error. */
1622int postcopy_preempt_wait_channel(MigrationState *s)
1623{
1624 /* If preempt not enabled, no need to wait */
1625 if (!migrate_postcopy_preempt()) {
1626 return 0;
1627 }
1628
1629 /*
1630 * We need the postcopy preempt channel to be established before
1631 * starting doing anything.
1632 */
1633 qemu_sem_wait(&s->postcopy_qemufile_src_sem);
1634
1635 return s->postcopy_qemufile_src ? 0 : -1;
1636}
1637
1638int postcopy_preempt_setup(MigrationState *s, Error **errp)
1639{
36f62f11
PX
1640 if (!migrate_postcopy_preempt()) {
1641 return 0;
1642 }
1643
1644 if (!migrate_multi_channels_is_allowed()) {
1645 error_setg(errp, "Postcopy preempt is not supported as current "
1646 "migration stream does not support multi-channels.");
1647 return -1;
1648 }
1649
d0edb8a1
PX
1650 /* Kick an async task to connect */
1651 socket_send_channel_create(postcopy_preempt_send_channel_new, s);
36f62f11
PX
1652
1653 return 0;
1654}
1655
60bb3c58
PX
1656static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis)
1657{
1658 trace_postcopy_pause_fast_load();
1659 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex);
1660 qemu_sem_wait(&mis->postcopy_pause_sem_fast_load);
1661 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
1662 trace_postcopy_pause_fast_load_continued();
1663}
1664
36f62f11
PX
1665void *postcopy_preempt_thread(void *opaque)
1666{
1667 MigrationIncomingState *mis = opaque;
1668 int ret;
1669
1670 trace_postcopy_preempt_thread_entry();
1671
1672 rcu_register_thread();
1673
1674 qemu_sem_post(&mis->thread_sync_sem);
1675
1676 /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */
60bb3c58
PX
1677 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
1678 while (1) {
1679 ret = ram_load_postcopy(mis->postcopy_qemufile_dst,
1680 RAM_CHANNEL_POSTCOPY);
1681 /* If error happened, go into recovery routine */
1682 if (ret) {
1683 postcopy_pause_ram_fast_load(mis);
1684 } else {
1685 /* We're done */
1686 break;
1687 }
1688 }
1689 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex);
36f62f11
PX
1690
1691 rcu_unregister_thread();
1692
1693 trace_postcopy_preempt_thread_exit();
1694
60bb3c58 1695 return NULL;
36f62f11 1696}