]> git.proxmox.com Git - mirror_qemu.git/blob - migration/migration.c
migration: Switch to COLO process after finishing loadvm
[mirror_qemu.git] / migration / migration.c
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
37 #include "io/channel-buffer.h"
38 #include "io/channel-tls.h"
39 #include "migration/colo.h"
40
41 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
42
43 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
44 * data. */
45 #define BUFFER_DELAY 100
46 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
47
48 /* Time in milliseconds we are allowed to stop the source,
49 * for sending the last part */
50 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
51
52 /* Default compression thread count */
53 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
54 /* Default decompression thread count, usually decompression is at
55 * least 4 times as fast as compression.*/
56 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
57 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
58 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
59 /* Define default autoconverge cpu throttle migration parameters */
60 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
61 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
62
63 /* Migration XBZRLE default cache size */
64 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
65
66 static NotifierList migration_state_notifiers =
67 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
68
69 static bool deferred_incoming;
70
71 /*
72 * Current state of incoming postcopy; note this is not part of
73 * MigrationIncomingState since it's state is used during cleanup
74 * at the end as MIS is being freed.
75 */
76 static PostcopyState incoming_postcopy_state;
77
78 /* When we add fault tolerance, we could have several
79 migrations at once. For now we don't need to add
80 dynamic creation of migration */
81
82 /* For outgoing */
83 MigrationState *migrate_get_current(void)
84 {
85 static bool once;
86 static MigrationState current_migration = {
87 .state = MIGRATION_STATUS_NONE,
88 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
89 .mbps = -1,
90 .parameters = {
91 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL,
92 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
93 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
94 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
95 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
96 .max_bandwidth = MAX_THROTTLE,
97 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME,
98 },
99 };
100
101 if (!once) {
102 qemu_mutex_init(&current_migration.src_page_req_mutex);
103 once = true;
104 }
105 return &current_migration;
106 }
107
108 /* For incoming */
109 static MigrationIncomingState *mis_current;
110
111 MigrationIncomingState *migration_incoming_get_current(void)
112 {
113 return mis_current;
114 }
115
116 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
117 {
118 mis_current = g_new0(MigrationIncomingState, 1);
119 mis_current->from_src_file = f;
120 mis_current->state = MIGRATION_STATUS_NONE;
121 QLIST_INIT(&mis_current->loadvm_handlers);
122 qemu_mutex_init(&mis_current->rp_mutex);
123 qemu_event_init(&mis_current->main_thread_load_event, false);
124
125 return mis_current;
126 }
127
128 void migration_incoming_state_destroy(void)
129 {
130 qemu_event_destroy(&mis_current->main_thread_load_event);
131 loadvm_free_handlers(mis_current);
132 g_free(mis_current);
133 mis_current = NULL;
134 }
135
136
137 typedef struct {
138 bool optional;
139 uint32_t size;
140 uint8_t runstate[100];
141 RunState state;
142 bool received;
143 } GlobalState;
144
145 static GlobalState global_state;
146
147 int global_state_store(void)
148 {
149 if (!runstate_store((char *)global_state.runstate,
150 sizeof(global_state.runstate))) {
151 error_report("runstate name too big: %s", global_state.runstate);
152 trace_migrate_state_too_big();
153 return -EINVAL;
154 }
155 return 0;
156 }
157
158 void global_state_store_running(void)
159 {
160 const char *state = RunState_lookup[RUN_STATE_RUNNING];
161 strncpy((char *)global_state.runstate,
162 state, sizeof(global_state.runstate));
163 }
164
165 static bool global_state_received(void)
166 {
167 return global_state.received;
168 }
169
170 static RunState global_state_get_runstate(void)
171 {
172 return global_state.state;
173 }
174
175 void global_state_set_optional(void)
176 {
177 global_state.optional = true;
178 }
179
180 static bool global_state_needed(void *opaque)
181 {
182 GlobalState *s = opaque;
183 char *runstate = (char *)s->runstate;
184
185 /* If it is not optional, it is mandatory */
186
187 if (s->optional == false) {
188 return true;
189 }
190
191 /* If state is running or paused, it is not needed */
192
193 if (strcmp(runstate, "running") == 0 ||
194 strcmp(runstate, "paused") == 0) {
195 return false;
196 }
197
198 /* for any other state it is needed */
199 return true;
200 }
201
202 static int global_state_post_load(void *opaque, int version_id)
203 {
204 GlobalState *s = opaque;
205 Error *local_err = NULL;
206 int r;
207 char *runstate = (char *)s->runstate;
208
209 s->received = true;
210 trace_migrate_global_state_post_load(runstate);
211
212 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
213 -1, &local_err);
214
215 if (r == -1) {
216 if (local_err) {
217 error_report_err(local_err);
218 }
219 return -EINVAL;
220 }
221 s->state = r;
222
223 return 0;
224 }
225
226 static void global_state_pre_save(void *opaque)
227 {
228 GlobalState *s = opaque;
229
230 trace_migrate_global_state_pre_save((char *)s->runstate);
231 s->size = strlen((char *)s->runstate) + 1;
232 }
233
234 static const VMStateDescription vmstate_globalstate = {
235 .name = "globalstate",
236 .version_id = 1,
237 .minimum_version_id = 1,
238 .post_load = global_state_post_load,
239 .pre_save = global_state_pre_save,
240 .needed = global_state_needed,
241 .fields = (VMStateField[]) {
242 VMSTATE_UINT32(size, GlobalState),
243 VMSTATE_BUFFER(runstate, GlobalState),
244 VMSTATE_END_OF_LIST()
245 },
246 };
247
248 void register_global_state(void)
249 {
250 /* We would use it independently that we receive it */
251 strcpy((char *)&global_state.runstate, "");
252 global_state.received = false;
253 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
254 }
255
256 static void migrate_generate_event(int new_state)
257 {
258 if (migrate_use_events()) {
259 qapi_event_send_migration(new_state, &error_abort);
260 }
261 }
262
263 /*
264 * Called on -incoming with a defer: uri.
265 * The migration can be started later after any parameters have been
266 * changed.
267 */
268 static void deferred_incoming_migration(Error **errp)
269 {
270 if (deferred_incoming) {
271 error_setg(errp, "Incoming migration already deferred");
272 }
273 deferred_incoming = true;
274 }
275
276 /* Request a range of pages from the source VM at the given
277 * start address.
278 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
279 * as the last request (a name must have been given previously)
280 * Start: Address offset within the RB
281 * Len: Length in bytes required - must be a multiple of pagesize
282 */
283 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
284 ram_addr_t start, size_t len)
285 {
286 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
287 size_t msglen = 12; /* start + len */
288
289 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
290 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
291
292 if (rbname) {
293 int rbname_len = strlen(rbname);
294 assert(rbname_len < 256);
295
296 bufc[msglen++] = rbname_len;
297 memcpy(bufc + msglen, rbname, rbname_len);
298 msglen += rbname_len;
299 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
300 } else {
301 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
302 }
303 }
304
305 void qemu_start_incoming_migration(const char *uri, Error **errp)
306 {
307 const char *p;
308
309 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
310 if (!strcmp(uri, "defer")) {
311 deferred_incoming_migration(errp);
312 } else if (strstart(uri, "tcp:", &p)) {
313 tcp_start_incoming_migration(p, errp);
314 #ifdef CONFIG_RDMA
315 } else if (strstart(uri, "rdma:", &p)) {
316 rdma_start_incoming_migration(p, errp);
317 #endif
318 } else if (strstart(uri, "exec:", &p)) {
319 exec_start_incoming_migration(p, errp);
320 } else if (strstart(uri, "unix:", &p)) {
321 unix_start_incoming_migration(p, errp);
322 } else if (strstart(uri, "fd:", &p)) {
323 fd_start_incoming_migration(p, errp);
324 } else {
325 error_setg(errp, "unknown migration protocol: %s", uri);
326 }
327 }
328
329 static void process_incoming_migration_bh(void *opaque)
330 {
331 Error *local_err = NULL;
332 MigrationIncomingState *mis = opaque;
333
334 /* Make sure all file formats flush their mutable metadata */
335 bdrv_invalidate_cache_all(&local_err);
336 if (local_err) {
337 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
338 MIGRATION_STATUS_FAILED);
339 error_report_err(local_err);
340 migrate_decompress_threads_join();
341 exit(EXIT_FAILURE);
342 }
343
344 /*
345 * This must happen after all error conditions are dealt with and
346 * we're sure the VM is going to be running on this host.
347 */
348 qemu_announce_self();
349
350 /* If global state section was not received or we are in running
351 state, we need to obey autostart. Any other state is set with
352 runstate_set. */
353
354 if (!global_state_received() ||
355 global_state_get_runstate() == RUN_STATE_RUNNING) {
356 if (autostart) {
357 vm_start();
358 } else {
359 runstate_set(RUN_STATE_PAUSED);
360 }
361 } else {
362 runstate_set(global_state_get_runstate());
363 }
364 migrate_decompress_threads_join();
365 /*
366 * This must happen after any state changes since as soon as an external
367 * observer sees this event they might start to prod at the VM assuming
368 * it's ready to use.
369 */
370 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
371 MIGRATION_STATUS_COMPLETED);
372 qemu_bh_delete(mis->bh);
373 migration_incoming_state_destroy();
374 }
375
376 static void process_incoming_migration_co(void *opaque)
377 {
378 QEMUFile *f = opaque;
379 MigrationIncomingState *mis;
380 PostcopyState ps;
381 int ret;
382
383 mis = migration_incoming_state_new(f);
384 postcopy_state_set(POSTCOPY_INCOMING_NONE);
385 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
386 MIGRATION_STATUS_ACTIVE);
387 ret = qemu_loadvm_state(f);
388
389 ps = postcopy_state_get();
390 trace_process_incoming_migration_co_end(ret, ps);
391 if (ps != POSTCOPY_INCOMING_NONE) {
392 if (ps == POSTCOPY_INCOMING_ADVISE) {
393 /*
394 * Where a migration had postcopy enabled (and thus went to advise)
395 * but managed to complete within the precopy period, we can use
396 * the normal exit.
397 */
398 postcopy_ram_incoming_cleanup(mis);
399 } else if (ret >= 0) {
400 /*
401 * Postcopy was started, cleanup should happen at the end of the
402 * postcopy thread.
403 */
404 trace_process_incoming_migration_co_postcopy_end_main();
405 return;
406 }
407 /* Else if something went wrong then just fall out of the normal exit */
408 }
409
410 /* we get COLO info, and know if we are in COLO mode */
411 if (!ret && migration_incoming_enable_colo()) {
412 mis->migration_incoming_co = qemu_coroutine_self();
413 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
414 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
415 mis->have_colo_incoming_thread = true;
416 qemu_coroutine_yield();
417
418 /* Wait checkpoint incoming thread exit before free resource */
419 qemu_thread_join(&mis->colo_incoming_thread);
420 }
421
422 qemu_fclose(f);
423 free_xbzrle_decoded_buf();
424
425 if (ret < 0) {
426 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
427 MIGRATION_STATUS_FAILED);
428 error_report("load of migration failed: %s", strerror(-ret));
429 migrate_decompress_threads_join();
430 exit(EXIT_FAILURE);
431 }
432
433 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
434 qemu_bh_schedule(mis->bh);
435 }
436
437 void migration_fd_process_incoming(QEMUFile *f)
438 {
439 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
440
441 migrate_decompress_threads_create();
442 qemu_file_set_blocking(f, false);
443 qemu_coroutine_enter(co);
444 }
445
446
447 void migration_channel_process_incoming(MigrationState *s,
448 QIOChannel *ioc)
449 {
450 trace_migration_set_incoming_channel(
451 ioc, object_get_typename(OBJECT(ioc)));
452
453 if (s->parameters.tls_creds &&
454 !object_dynamic_cast(OBJECT(ioc),
455 TYPE_QIO_CHANNEL_TLS)) {
456 Error *local_err = NULL;
457 migration_tls_channel_process_incoming(s, ioc, &local_err);
458 if (local_err) {
459 error_report_err(local_err);
460 }
461 } else {
462 QEMUFile *f = qemu_fopen_channel_input(ioc);
463 migration_fd_process_incoming(f);
464 }
465 }
466
467
468 void migration_channel_connect(MigrationState *s,
469 QIOChannel *ioc,
470 const char *hostname)
471 {
472 trace_migration_set_outgoing_channel(
473 ioc, object_get_typename(OBJECT(ioc)), hostname);
474
475 if (s->parameters.tls_creds &&
476 !object_dynamic_cast(OBJECT(ioc),
477 TYPE_QIO_CHANNEL_TLS)) {
478 Error *local_err = NULL;
479 migration_tls_channel_connect(s, ioc, hostname, &local_err);
480 if (local_err) {
481 migrate_fd_error(s, local_err);
482 error_free(local_err);
483 }
484 } else {
485 QEMUFile *f = qemu_fopen_channel_output(ioc);
486
487 s->to_dst_file = f;
488
489 migrate_fd_connect(s);
490 }
491 }
492
493
494 /*
495 * Send a message on the return channel back to the source
496 * of the migration.
497 */
498 void migrate_send_rp_message(MigrationIncomingState *mis,
499 enum mig_rp_message_type message_type,
500 uint16_t len, void *data)
501 {
502 trace_migrate_send_rp_message((int)message_type, len);
503 qemu_mutex_lock(&mis->rp_mutex);
504 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
505 qemu_put_be16(mis->to_src_file, len);
506 qemu_put_buffer(mis->to_src_file, data, len);
507 qemu_fflush(mis->to_src_file);
508 qemu_mutex_unlock(&mis->rp_mutex);
509 }
510
511 /*
512 * Send a 'SHUT' message on the return channel with the given value
513 * to indicate that we've finished with the RP. Non-0 value indicates
514 * error.
515 */
516 void migrate_send_rp_shut(MigrationIncomingState *mis,
517 uint32_t value)
518 {
519 uint32_t buf;
520
521 buf = cpu_to_be32(value);
522 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
523 }
524
525 /*
526 * Send a 'PONG' message on the return channel with the given value
527 * (normally in response to a 'PING')
528 */
529 void migrate_send_rp_pong(MigrationIncomingState *mis,
530 uint32_t value)
531 {
532 uint32_t buf;
533
534 buf = cpu_to_be32(value);
535 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
536 }
537
538 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
539 {
540 MigrationCapabilityStatusList *head = NULL;
541 MigrationCapabilityStatusList *caps;
542 MigrationState *s = migrate_get_current();
543 int i;
544
545 caps = NULL; /* silence compiler warning */
546 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
547 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
548 continue;
549 }
550 if (head == NULL) {
551 head = g_malloc0(sizeof(*caps));
552 caps = head;
553 } else {
554 caps->next = g_malloc0(sizeof(*caps));
555 caps = caps->next;
556 }
557 caps->value =
558 g_malloc(sizeof(*caps->value));
559 caps->value->capability = i;
560 caps->value->state = s->enabled_capabilities[i];
561 }
562
563 return head;
564 }
565
566 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
567 {
568 MigrationParameters *params;
569 MigrationState *s = migrate_get_current();
570
571 params = g_malloc0(sizeof(*params));
572 params->has_compress_level = true;
573 params->compress_level = s->parameters.compress_level;
574 params->has_compress_threads = true;
575 params->compress_threads = s->parameters.compress_threads;
576 params->has_decompress_threads = true;
577 params->decompress_threads = s->parameters.decompress_threads;
578 params->has_cpu_throttle_initial = true;
579 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
580 params->has_cpu_throttle_increment = true;
581 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
582 params->has_tls_creds = !!s->parameters.tls_creds;
583 params->tls_creds = g_strdup(s->parameters.tls_creds);
584 params->has_tls_hostname = !!s->parameters.tls_hostname;
585 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
586 params->has_max_bandwidth = true;
587 params->max_bandwidth = s->parameters.max_bandwidth;
588 params->has_downtime_limit = true;
589 params->downtime_limit = s->parameters.downtime_limit;
590
591 return params;
592 }
593
594 /*
595 * Return true if we're already in the middle of a migration
596 * (i.e. any of the active or setup states)
597 */
598 static bool migration_is_setup_or_active(int state)
599 {
600 switch (state) {
601 case MIGRATION_STATUS_ACTIVE:
602 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
603 case MIGRATION_STATUS_SETUP:
604 return true;
605
606 default:
607 return false;
608
609 }
610 }
611
612 static void get_xbzrle_cache_stats(MigrationInfo *info)
613 {
614 if (migrate_use_xbzrle()) {
615 info->has_xbzrle_cache = true;
616 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
617 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
618 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
619 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
620 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
621 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
622 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
623 }
624 }
625
626 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
627 {
628 info->has_ram = true;
629 info->ram = g_malloc0(sizeof(*info->ram));
630 info->ram->transferred = ram_bytes_transferred();
631 info->ram->total = ram_bytes_total();
632 info->ram->duplicate = dup_mig_pages_transferred();
633 info->ram->skipped = skipped_mig_pages_transferred();
634 info->ram->normal = norm_mig_pages_transferred();
635 info->ram->normal_bytes = norm_mig_bytes_transferred();
636 info->ram->mbps = s->mbps;
637 info->ram->dirty_sync_count = s->dirty_sync_count;
638 info->ram->postcopy_requests = s->postcopy_requests;
639
640 if (s->state != MIGRATION_STATUS_COMPLETED) {
641 info->ram->remaining = ram_bytes_remaining();
642 info->ram->dirty_pages_rate = s->dirty_pages_rate;
643 }
644 }
645
646 MigrationInfo *qmp_query_migrate(Error **errp)
647 {
648 MigrationInfo *info = g_malloc0(sizeof(*info));
649 MigrationState *s = migrate_get_current();
650
651 switch (s->state) {
652 case MIGRATION_STATUS_NONE:
653 /* no migration has happened ever */
654 break;
655 case MIGRATION_STATUS_SETUP:
656 info->has_status = true;
657 info->has_total_time = false;
658 break;
659 case MIGRATION_STATUS_ACTIVE:
660 case MIGRATION_STATUS_CANCELLING:
661 info->has_status = true;
662 info->has_total_time = true;
663 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
664 - s->total_time;
665 info->has_expected_downtime = true;
666 info->expected_downtime = s->expected_downtime;
667 info->has_setup_time = true;
668 info->setup_time = s->setup_time;
669
670 populate_ram_info(info, s);
671
672 if (blk_mig_active()) {
673 info->has_disk = true;
674 info->disk = g_malloc0(sizeof(*info->disk));
675 info->disk->transferred = blk_mig_bytes_transferred();
676 info->disk->remaining = blk_mig_bytes_remaining();
677 info->disk->total = blk_mig_bytes_total();
678 }
679
680 if (cpu_throttle_active()) {
681 info->has_cpu_throttle_percentage = true;
682 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
683 }
684
685 get_xbzrle_cache_stats(info);
686 break;
687 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
688 /* Mostly the same as active; TODO add some postcopy stats */
689 info->has_status = true;
690 info->has_total_time = true;
691 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
692 - s->total_time;
693 info->has_expected_downtime = true;
694 info->expected_downtime = s->expected_downtime;
695 info->has_setup_time = true;
696 info->setup_time = s->setup_time;
697
698 populate_ram_info(info, s);
699
700 if (blk_mig_active()) {
701 info->has_disk = true;
702 info->disk = g_malloc0(sizeof(*info->disk));
703 info->disk->transferred = blk_mig_bytes_transferred();
704 info->disk->remaining = blk_mig_bytes_remaining();
705 info->disk->total = blk_mig_bytes_total();
706 }
707
708 get_xbzrle_cache_stats(info);
709 break;
710 case MIGRATION_STATUS_COLO:
711 info->has_status = true;
712 /* TODO: display COLO specific information (checkpoint info etc.) */
713 break;
714 case MIGRATION_STATUS_COMPLETED:
715 get_xbzrle_cache_stats(info);
716
717 info->has_status = true;
718 info->has_total_time = true;
719 info->total_time = s->total_time;
720 info->has_downtime = true;
721 info->downtime = s->downtime;
722 info->has_setup_time = true;
723 info->setup_time = s->setup_time;
724
725 populate_ram_info(info, s);
726 break;
727 case MIGRATION_STATUS_FAILED:
728 info->has_status = true;
729 if (s->error) {
730 info->has_error_desc = true;
731 info->error_desc = g_strdup(error_get_pretty(s->error));
732 }
733 break;
734 case MIGRATION_STATUS_CANCELLED:
735 info->has_status = true;
736 break;
737 }
738 info->status = s->state;
739
740 return info;
741 }
742
743 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
744 Error **errp)
745 {
746 MigrationState *s = migrate_get_current();
747 MigrationCapabilityStatusList *cap;
748 bool old_postcopy_cap = migrate_postcopy_ram();
749
750 if (migration_is_setup_or_active(s->state)) {
751 error_setg(errp, QERR_MIGRATION_ACTIVE);
752 return;
753 }
754
755 for (cap = params; cap; cap = cap->next) {
756 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
757 if (!colo_supported()) {
758 error_setg(errp, "COLO is not currently supported, please"
759 " configure with --enable-colo option in order to"
760 " support COLO feature");
761 continue;
762 }
763 }
764 s->enabled_capabilities[cap->value->capability] = cap->value->state;
765 }
766
767 if (migrate_postcopy_ram()) {
768 if (migrate_use_compression()) {
769 /* The decompression threads asynchronously write into RAM
770 * rather than use the atomic copies needed to avoid
771 * userfaulting. It should be possible to fix the decompression
772 * threads for compatibility in future.
773 */
774 error_report("Postcopy is not currently compatible with "
775 "compression");
776 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
777 false;
778 }
779 /* This check is reasonably expensive, so only when it's being
780 * set the first time, also it's only the destination that needs
781 * special support.
782 */
783 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
784 !postcopy_ram_supported_by_host()) {
785 /* postcopy_ram_supported_by_host will have emitted a more
786 * detailed message
787 */
788 error_report("Postcopy is not supported");
789 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
790 false;
791 }
792 }
793 }
794
795 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
796 {
797 MigrationState *s = migrate_get_current();
798
799 if (params->has_compress_level &&
800 (params->compress_level < 0 || params->compress_level > 9)) {
801 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
802 "is invalid, it should be in the range of 0 to 9");
803 return;
804 }
805 if (params->has_compress_threads &&
806 (params->compress_threads < 1 || params->compress_threads > 255)) {
807 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
808 "compress_threads",
809 "is invalid, it should be in the range of 1 to 255");
810 return;
811 }
812 if (params->has_decompress_threads &&
813 (params->decompress_threads < 1 || params->decompress_threads > 255)) {
814 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
815 "decompress_threads",
816 "is invalid, it should be in the range of 1 to 255");
817 return;
818 }
819 if (params->has_cpu_throttle_initial &&
820 (params->cpu_throttle_initial < 1 ||
821 params->cpu_throttle_initial > 99)) {
822 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
823 "cpu_throttle_initial",
824 "an integer in the range of 1 to 99");
825 return;
826 }
827 if (params->has_cpu_throttle_increment &&
828 (params->cpu_throttle_increment < 1 ||
829 params->cpu_throttle_increment > 99)) {
830 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
831 "cpu_throttle_increment",
832 "an integer in the range of 1 to 99");
833 return;
834 }
835 if (params->has_max_bandwidth &&
836 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
837 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
838 " range of 0 to %zu bytes/second", SIZE_MAX);
839 return;
840 }
841 if (params->has_downtime_limit &&
842 (params->downtime_limit < 0 || params->downtime_limit > 2000000)) {
843 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
844 "downtime_limit",
845 "an integer in the range of 0 to 2000000 milliseconds");
846 return;
847 }
848
849 if (params->has_compress_level) {
850 s->parameters.compress_level = params->compress_level;
851 }
852 if (params->has_compress_threads) {
853 s->parameters.compress_threads = params->compress_threads;
854 }
855 if (params->has_decompress_threads) {
856 s->parameters.decompress_threads = params->decompress_threads;
857 }
858 if (params->has_cpu_throttle_initial) {
859 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
860 }
861 if (params->has_cpu_throttle_increment) {
862 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
863 }
864 if (params->has_tls_creds) {
865 g_free(s->parameters.tls_creds);
866 s->parameters.tls_creds = g_strdup(params->tls_creds);
867 }
868 if (params->has_tls_hostname) {
869 g_free(s->parameters.tls_hostname);
870 s->parameters.tls_hostname = g_strdup(params->tls_hostname);
871 }
872 if (params->has_max_bandwidth) {
873 s->parameters.max_bandwidth = params->max_bandwidth;
874 if (s->to_dst_file) {
875 qemu_file_set_rate_limit(s->to_dst_file,
876 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
877 }
878 }
879 if (params->has_downtime_limit) {
880 s->parameters.downtime_limit = params->downtime_limit;
881 }
882 }
883
884
885 void qmp_migrate_start_postcopy(Error **errp)
886 {
887 MigrationState *s = migrate_get_current();
888
889 if (!migrate_postcopy_ram()) {
890 error_setg(errp, "Enable postcopy with migrate_set_capability before"
891 " the start of migration");
892 return;
893 }
894
895 if (s->state == MIGRATION_STATUS_NONE) {
896 error_setg(errp, "Postcopy must be started after migration has been"
897 " started");
898 return;
899 }
900 /*
901 * we don't error if migration has finished since that would be racy
902 * with issuing this command.
903 */
904 atomic_set(&s->start_postcopy, true);
905 }
906
907 /* shared migration helpers */
908
909 void migrate_set_state(int *state, int old_state, int new_state)
910 {
911 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
912 trace_migrate_set_state(new_state);
913 migrate_generate_event(new_state);
914 }
915 }
916
917 static void migrate_fd_cleanup(void *opaque)
918 {
919 MigrationState *s = opaque;
920
921 qemu_bh_delete(s->cleanup_bh);
922 s->cleanup_bh = NULL;
923
924 flush_page_queue(s);
925
926 if (s->to_dst_file) {
927 trace_migrate_fd_cleanup();
928 qemu_mutex_unlock_iothread();
929 if (s->migration_thread_running) {
930 qemu_thread_join(&s->thread);
931 s->migration_thread_running = false;
932 }
933 qemu_mutex_lock_iothread();
934
935 migrate_compress_threads_join();
936 qemu_fclose(s->to_dst_file);
937 s->to_dst_file = NULL;
938 }
939
940 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
941 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
942
943 if (s->state == MIGRATION_STATUS_CANCELLING) {
944 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
945 MIGRATION_STATUS_CANCELLED);
946 }
947
948 notifier_list_notify(&migration_state_notifiers, s);
949 }
950
951 void migrate_fd_error(MigrationState *s, const Error *error)
952 {
953 trace_migrate_fd_error(error ? error_get_pretty(error) : "");
954 assert(s->to_dst_file == NULL);
955 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
956 MIGRATION_STATUS_FAILED);
957 if (!s->error) {
958 s->error = error_copy(error);
959 }
960 notifier_list_notify(&migration_state_notifiers, s);
961 }
962
963 static void migrate_fd_cancel(MigrationState *s)
964 {
965 int old_state ;
966 QEMUFile *f = migrate_get_current()->to_dst_file;
967 trace_migrate_fd_cancel();
968
969 if (s->rp_state.from_dst_file) {
970 /* shutdown the rp socket, so causing the rp thread to shutdown */
971 qemu_file_shutdown(s->rp_state.from_dst_file);
972 }
973
974 do {
975 old_state = s->state;
976 if (!migration_is_setup_or_active(old_state)) {
977 break;
978 }
979 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
980 } while (s->state != MIGRATION_STATUS_CANCELLING);
981
982 /*
983 * If we're unlucky the migration code might be stuck somewhere in a
984 * send/write while the network has failed and is waiting to timeout;
985 * if we've got shutdown(2) available then we can force it to quit.
986 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
987 * called in a bh, so there is no race against this cancel.
988 */
989 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
990 qemu_file_shutdown(f);
991 }
992 }
993
994 void add_migration_state_change_notifier(Notifier *notify)
995 {
996 notifier_list_add(&migration_state_notifiers, notify);
997 }
998
999 void remove_migration_state_change_notifier(Notifier *notify)
1000 {
1001 notifier_remove(notify);
1002 }
1003
1004 bool migration_in_setup(MigrationState *s)
1005 {
1006 return s->state == MIGRATION_STATUS_SETUP;
1007 }
1008
1009 bool migration_has_finished(MigrationState *s)
1010 {
1011 return s->state == MIGRATION_STATUS_COMPLETED;
1012 }
1013
1014 bool migration_has_failed(MigrationState *s)
1015 {
1016 return (s->state == MIGRATION_STATUS_CANCELLED ||
1017 s->state == MIGRATION_STATUS_FAILED);
1018 }
1019
1020 bool migration_in_postcopy(MigrationState *s)
1021 {
1022 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1023 }
1024
1025 bool migration_in_postcopy_after_devices(MigrationState *s)
1026 {
1027 return migration_in_postcopy(s) && s->postcopy_after_devices;
1028 }
1029
1030 MigrationState *migrate_init(const MigrationParams *params)
1031 {
1032 MigrationState *s = migrate_get_current();
1033
1034 /*
1035 * Reinitialise all migration state, except
1036 * parameters/capabilities that the user set, and
1037 * locks.
1038 */
1039 s->bytes_xfer = 0;
1040 s->xfer_limit = 0;
1041 s->cleanup_bh = 0;
1042 s->to_dst_file = NULL;
1043 s->state = MIGRATION_STATUS_NONE;
1044 s->params = *params;
1045 s->rp_state.from_dst_file = NULL;
1046 s->rp_state.error = false;
1047 s->mbps = 0.0;
1048 s->downtime = 0;
1049 s->expected_downtime = 0;
1050 s->dirty_pages_rate = 0;
1051 s->dirty_bytes_rate = 0;
1052 s->setup_time = 0;
1053 s->dirty_sync_count = 0;
1054 s->start_postcopy = false;
1055 s->postcopy_after_devices = false;
1056 s->postcopy_requests = 0;
1057 s->migration_thread_running = false;
1058 s->last_req_rb = NULL;
1059 error_free(s->error);
1060 s->error = NULL;
1061
1062 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1063
1064 QSIMPLEQ_INIT(&s->src_page_requests);
1065
1066 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1067 return s;
1068 }
1069
1070 static GSList *migration_blockers;
1071
1072 void migrate_add_blocker(Error *reason)
1073 {
1074 migration_blockers = g_slist_prepend(migration_blockers, reason);
1075 }
1076
1077 void migrate_del_blocker(Error *reason)
1078 {
1079 migration_blockers = g_slist_remove(migration_blockers, reason);
1080 }
1081
1082 void qmp_migrate_incoming(const char *uri, Error **errp)
1083 {
1084 Error *local_err = NULL;
1085 static bool once = true;
1086
1087 if (!deferred_incoming) {
1088 error_setg(errp, "For use with '-incoming defer'");
1089 return;
1090 }
1091 if (!once) {
1092 error_setg(errp, "The incoming migration has already been started");
1093 }
1094
1095 qemu_start_incoming_migration(uri, &local_err);
1096
1097 if (local_err) {
1098 error_propagate(errp, local_err);
1099 return;
1100 }
1101
1102 once = false;
1103 }
1104
1105 bool migration_is_blocked(Error **errp)
1106 {
1107 if (qemu_savevm_state_blocked(errp)) {
1108 return true;
1109 }
1110
1111 if (migration_blockers) {
1112 *errp = error_copy(migration_blockers->data);
1113 return true;
1114 }
1115
1116 return false;
1117 }
1118
1119 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1120 bool has_inc, bool inc, bool has_detach, bool detach,
1121 Error **errp)
1122 {
1123 Error *local_err = NULL;
1124 MigrationState *s = migrate_get_current();
1125 MigrationParams params;
1126 const char *p;
1127
1128 params.blk = has_blk && blk;
1129 params.shared = has_inc && inc;
1130
1131 if (migration_is_setup_or_active(s->state) ||
1132 s->state == MIGRATION_STATUS_CANCELLING ||
1133 s->state == MIGRATION_STATUS_COLO) {
1134 error_setg(errp, QERR_MIGRATION_ACTIVE);
1135 return;
1136 }
1137 if (runstate_check(RUN_STATE_INMIGRATE)) {
1138 error_setg(errp, "Guest is waiting for an incoming migration");
1139 return;
1140 }
1141
1142 if (migration_is_blocked(errp)) {
1143 return;
1144 }
1145
1146 s = migrate_init(&params);
1147
1148 if (strstart(uri, "tcp:", &p)) {
1149 tcp_start_outgoing_migration(s, p, &local_err);
1150 #ifdef CONFIG_RDMA
1151 } else if (strstart(uri, "rdma:", &p)) {
1152 rdma_start_outgoing_migration(s, p, &local_err);
1153 #endif
1154 } else if (strstart(uri, "exec:", &p)) {
1155 exec_start_outgoing_migration(s, p, &local_err);
1156 } else if (strstart(uri, "unix:", &p)) {
1157 unix_start_outgoing_migration(s, p, &local_err);
1158 } else if (strstart(uri, "fd:", &p)) {
1159 fd_start_outgoing_migration(s, p, &local_err);
1160 } else {
1161 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1162 "a valid migration protocol");
1163 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1164 MIGRATION_STATUS_FAILED);
1165 return;
1166 }
1167
1168 if (local_err) {
1169 migrate_fd_error(s, local_err);
1170 error_propagate(errp, local_err);
1171 return;
1172 }
1173 }
1174
1175 void qmp_migrate_cancel(Error **errp)
1176 {
1177 migrate_fd_cancel(migrate_get_current());
1178 }
1179
1180 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1181 {
1182 MigrationState *s = migrate_get_current();
1183 int64_t new_size;
1184
1185 /* Check for truncation */
1186 if (value != (size_t)value) {
1187 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1188 "exceeding address space");
1189 return;
1190 }
1191
1192 /* Cache should not be larger than guest ram size */
1193 if (value > ram_bytes_total()) {
1194 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1195 "exceeds guest ram size ");
1196 return;
1197 }
1198
1199 new_size = xbzrle_cache_resize(value);
1200 if (new_size < 0) {
1201 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1202 "is smaller than page size");
1203 return;
1204 }
1205
1206 s->xbzrle_cache_size = new_size;
1207 }
1208
1209 int64_t qmp_query_migrate_cache_size(Error **errp)
1210 {
1211 return migrate_xbzrle_cache_size();
1212 }
1213
1214 void qmp_migrate_set_speed(int64_t value, Error **errp)
1215 {
1216 MigrationParameters p = {
1217 .has_max_bandwidth = true,
1218 .max_bandwidth = value,
1219 };
1220
1221 qmp_migrate_set_parameters(&p, errp);
1222 }
1223
1224 void qmp_migrate_set_downtime(double value, Error **errp)
1225 {
1226 value *= 1000; /* Convert to milliseconds */
1227 value = MAX(0, MIN(INT64_MAX, value));
1228
1229 MigrationParameters p = {
1230 .has_downtime_limit = true,
1231 .downtime_limit = value,
1232 };
1233
1234 qmp_migrate_set_parameters(&p, errp);
1235 }
1236
1237 bool migrate_postcopy_ram(void)
1238 {
1239 MigrationState *s;
1240
1241 s = migrate_get_current();
1242
1243 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1244 }
1245
1246 bool migrate_auto_converge(void)
1247 {
1248 MigrationState *s;
1249
1250 s = migrate_get_current();
1251
1252 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1253 }
1254
1255 bool migrate_zero_blocks(void)
1256 {
1257 MigrationState *s;
1258
1259 s = migrate_get_current();
1260
1261 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1262 }
1263
1264 bool migrate_use_compression(void)
1265 {
1266 MigrationState *s;
1267
1268 s = migrate_get_current();
1269
1270 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1271 }
1272
1273 int migrate_compress_level(void)
1274 {
1275 MigrationState *s;
1276
1277 s = migrate_get_current();
1278
1279 return s->parameters.compress_level;
1280 }
1281
1282 int migrate_compress_threads(void)
1283 {
1284 MigrationState *s;
1285
1286 s = migrate_get_current();
1287
1288 return s->parameters.compress_threads;
1289 }
1290
1291 int migrate_decompress_threads(void)
1292 {
1293 MigrationState *s;
1294
1295 s = migrate_get_current();
1296
1297 return s->parameters.decompress_threads;
1298 }
1299
1300 bool migrate_use_events(void)
1301 {
1302 MigrationState *s;
1303
1304 s = migrate_get_current();
1305
1306 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1307 }
1308
1309 int migrate_use_xbzrle(void)
1310 {
1311 MigrationState *s;
1312
1313 s = migrate_get_current();
1314
1315 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1316 }
1317
1318 int64_t migrate_xbzrle_cache_size(void)
1319 {
1320 MigrationState *s;
1321
1322 s = migrate_get_current();
1323
1324 return s->xbzrle_cache_size;
1325 }
1326
1327 /* migration thread support */
1328 /*
1329 * Something bad happened to the RP stream, mark an error
1330 * The caller shall print or trace something to indicate why
1331 */
1332 static void mark_source_rp_bad(MigrationState *s)
1333 {
1334 s->rp_state.error = true;
1335 }
1336
1337 static struct rp_cmd_args {
1338 ssize_t len; /* -1 = variable */
1339 const char *name;
1340 } rp_cmd_args[] = {
1341 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1342 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1343 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1344 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1345 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1346 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1347 };
1348
1349 /*
1350 * Process a request for pages received on the return path,
1351 * We're allowed to send more than requested (e.g. to round to our page size)
1352 * and we don't need to send pages that have already been sent.
1353 */
1354 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1355 ram_addr_t start, size_t len)
1356 {
1357 long our_host_ps = getpagesize();
1358
1359 trace_migrate_handle_rp_req_pages(rbname, start, len);
1360
1361 /*
1362 * Since we currently insist on matching page sizes, just sanity check
1363 * we're being asked for whole host pages.
1364 */
1365 if (start & (our_host_ps-1) ||
1366 (len & (our_host_ps-1))) {
1367 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1368 " len: %zd", __func__, start, len);
1369 mark_source_rp_bad(ms);
1370 return;
1371 }
1372
1373 if (ram_save_queue_pages(ms, rbname, start, len)) {
1374 mark_source_rp_bad(ms);
1375 }
1376 }
1377
1378 /*
1379 * Handles messages sent on the return path towards the source VM
1380 *
1381 */
1382 static void *source_return_path_thread(void *opaque)
1383 {
1384 MigrationState *ms = opaque;
1385 QEMUFile *rp = ms->rp_state.from_dst_file;
1386 uint16_t header_len, header_type;
1387 uint8_t buf[512];
1388 uint32_t tmp32, sibling_error;
1389 ram_addr_t start = 0; /* =0 to silence warning */
1390 size_t len = 0, expected_len;
1391 int res;
1392
1393 trace_source_return_path_thread_entry();
1394 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1395 migration_is_setup_or_active(ms->state)) {
1396 trace_source_return_path_thread_loop_top();
1397 header_type = qemu_get_be16(rp);
1398 header_len = qemu_get_be16(rp);
1399
1400 if (header_type >= MIG_RP_MSG_MAX ||
1401 header_type == MIG_RP_MSG_INVALID) {
1402 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1403 header_type, header_len);
1404 mark_source_rp_bad(ms);
1405 goto out;
1406 }
1407
1408 if ((rp_cmd_args[header_type].len != -1 &&
1409 header_len != rp_cmd_args[header_type].len) ||
1410 header_len > sizeof(buf)) {
1411 error_report("RP: Received '%s' message (0x%04x) with"
1412 "incorrect length %d expecting %zu",
1413 rp_cmd_args[header_type].name, header_type, header_len,
1414 (size_t)rp_cmd_args[header_type].len);
1415 mark_source_rp_bad(ms);
1416 goto out;
1417 }
1418
1419 /* We know we've got a valid header by this point */
1420 res = qemu_get_buffer(rp, buf, header_len);
1421 if (res != header_len) {
1422 error_report("RP: Failed reading data for message 0x%04x"
1423 " read %d expected %d",
1424 header_type, res, header_len);
1425 mark_source_rp_bad(ms);
1426 goto out;
1427 }
1428
1429 /* OK, we have the message and the data */
1430 switch (header_type) {
1431 case MIG_RP_MSG_SHUT:
1432 sibling_error = ldl_be_p(buf);
1433 trace_source_return_path_thread_shut(sibling_error);
1434 if (sibling_error) {
1435 error_report("RP: Sibling indicated error %d", sibling_error);
1436 mark_source_rp_bad(ms);
1437 }
1438 /*
1439 * We'll let the main thread deal with closing the RP
1440 * we could do a shutdown(2) on it, but we're the only user
1441 * anyway, so there's nothing gained.
1442 */
1443 goto out;
1444
1445 case MIG_RP_MSG_PONG:
1446 tmp32 = ldl_be_p(buf);
1447 trace_source_return_path_thread_pong(tmp32);
1448 break;
1449
1450 case MIG_RP_MSG_REQ_PAGES:
1451 start = ldq_be_p(buf);
1452 len = ldl_be_p(buf + 8);
1453 migrate_handle_rp_req_pages(ms, NULL, start, len);
1454 break;
1455
1456 case MIG_RP_MSG_REQ_PAGES_ID:
1457 expected_len = 12 + 1; /* header + termination */
1458
1459 if (header_len >= expected_len) {
1460 start = ldq_be_p(buf);
1461 len = ldl_be_p(buf + 8);
1462 /* Now we expect an idstr */
1463 tmp32 = buf[12]; /* Length of the following idstr */
1464 buf[13 + tmp32] = '\0';
1465 expected_len += tmp32;
1466 }
1467 if (header_len != expected_len) {
1468 error_report("RP: Req_Page_id with length %d expecting %zd",
1469 header_len, expected_len);
1470 mark_source_rp_bad(ms);
1471 goto out;
1472 }
1473 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1474 break;
1475
1476 default:
1477 break;
1478 }
1479 }
1480 if (qemu_file_get_error(rp)) {
1481 trace_source_return_path_thread_bad_end();
1482 mark_source_rp_bad(ms);
1483 }
1484
1485 trace_source_return_path_thread_end();
1486 out:
1487 ms->rp_state.from_dst_file = NULL;
1488 qemu_fclose(rp);
1489 return NULL;
1490 }
1491
1492 static int open_return_path_on_source(MigrationState *ms)
1493 {
1494
1495 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1496 if (!ms->rp_state.from_dst_file) {
1497 return -1;
1498 }
1499
1500 trace_open_return_path_on_source();
1501 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1502 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1503
1504 trace_open_return_path_on_source_continue();
1505
1506 return 0;
1507 }
1508
1509 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1510 static int await_return_path_close_on_source(MigrationState *ms)
1511 {
1512 /*
1513 * If this is a normal exit then the destination will send a SHUT and the
1514 * rp_thread will exit, however if there's an error we need to cause
1515 * it to exit.
1516 */
1517 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1518 /*
1519 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1520 * waiting for the destination.
1521 */
1522 qemu_file_shutdown(ms->rp_state.from_dst_file);
1523 mark_source_rp_bad(ms);
1524 }
1525 trace_await_return_path_close_on_source_joining();
1526 qemu_thread_join(&ms->rp_state.rp_thread);
1527 trace_await_return_path_close_on_source_close();
1528 return ms->rp_state.error;
1529 }
1530
1531 /*
1532 * Switch from normal iteration to postcopy
1533 * Returns non-0 on error
1534 */
1535 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1536 {
1537 int ret;
1538 QIOChannelBuffer *bioc;
1539 QEMUFile *fb;
1540 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1541 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1542 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1543
1544 trace_postcopy_start();
1545 qemu_mutex_lock_iothread();
1546 trace_postcopy_start_set_run();
1547
1548 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1549 *old_vm_running = runstate_is_running();
1550 global_state_store();
1551 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1552 if (ret < 0) {
1553 goto fail;
1554 }
1555
1556 ret = bdrv_inactivate_all();
1557 if (ret < 0) {
1558 goto fail;
1559 }
1560
1561 /*
1562 * Cause any non-postcopiable, but iterative devices to
1563 * send out their final data.
1564 */
1565 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1566
1567 /*
1568 * in Finish migrate and with the io-lock held everything should
1569 * be quiet, but we've potentially still got dirty pages and we
1570 * need to tell the destination to throw any pages it's already received
1571 * that are dirty
1572 */
1573 if (ram_postcopy_send_discard_bitmap(ms)) {
1574 error_report("postcopy send discard bitmap failed");
1575 goto fail;
1576 }
1577
1578 /*
1579 * send rest of state - note things that are doing postcopy
1580 * will notice we're in POSTCOPY_ACTIVE and not actually
1581 * wrap their state up here
1582 */
1583 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1584 /* Ping just for debugging, helps line traces up */
1585 qemu_savevm_send_ping(ms->to_dst_file, 2);
1586
1587 /*
1588 * While loading the device state we may trigger page transfer
1589 * requests and the fd must be free to process those, and thus
1590 * the destination must read the whole device state off the fd before
1591 * it starts processing it. Unfortunately the ad-hoc migration format
1592 * doesn't allow the destination to know the size to read without fully
1593 * parsing it through each devices load-state code (especially the open
1594 * coded devices that use get/put).
1595 * So we wrap the device state up in a package with a length at the start;
1596 * to do this we use a qemu_buf to hold the whole of the device state.
1597 */
1598 bioc = qio_channel_buffer_new(4096);
1599 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
1600 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1601 object_unref(OBJECT(bioc));
1602
1603 /*
1604 * Make sure the receiver can get incoming pages before we send the rest
1605 * of the state
1606 */
1607 qemu_savevm_send_postcopy_listen(fb);
1608
1609 qemu_savevm_state_complete_precopy(fb, false);
1610 qemu_savevm_send_ping(fb, 3);
1611
1612 qemu_savevm_send_postcopy_run(fb);
1613
1614 /* <><> end of stuff going into the package */
1615
1616 /* Now send that blob */
1617 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1618 goto fail_closefb;
1619 }
1620 qemu_fclose(fb);
1621
1622 /* Send a notify to give a chance for anything that needs to happen
1623 * at the transition to postcopy and after the device state; in particular
1624 * spice needs to trigger a transition now
1625 */
1626 ms->postcopy_after_devices = true;
1627 notifier_list_notify(&migration_state_notifiers, ms);
1628
1629 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1630
1631 qemu_mutex_unlock_iothread();
1632
1633 /*
1634 * Although this ping is just for debug, it could potentially be
1635 * used for getting a better measurement of downtime at the source.
1636 */
1637 qemu_savevm_send_ping(ms->to_dst_file, 4);
1638
1639 ret = qemu_file_get_error(ms->to_dst_file);
1640 if (ret) {
1641 error_report("postcopy_start: Migration stream errored");
1642 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1643 MIGRATION_STATUS_FAILED);
1644 }
1645
1646 return ret;
1647
1648 fail_closefb:
1649 qemu_fclose(fb);
1650 fail:
1651 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1652 MIGRATION_STATUS_FAILED);
1653 qemu_mutex_unlock_iothread();
1654 return -1;
1655 }
1656
1657 /**
1658 * migration_completion: Used by migration_thread when there's not much left.
1659 * The caller 'breaks' the loop when this returns.
1660 *
1661 * @s: Current migration state
1662 * @current_active_state: The migration state we expect to be in
1663 * @*old_vm_running: Pointer to old_vm_running flag
1664 * @*start_time: Pointer to time to update
1665 */
1666 static void migration_completion(MigrationState *s, int current_active_state,
1667 bool *old_vm_running,
1668 int64_t *start_time)
1669 {
1670 int ret;
1671
1672 if (s->state == MIGRATION_STATUS_ACTIVE) {
1673 qemu_mutex_lock_iothread();
1674 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1675 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1676 *old_vm_running = runstate_is_running();
1677 ret = global_state_store();
1678
1679 if (!ret) {
1680 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1681 /*
1682 * Don't mark the image with BDRV_O_INACTIVE flag if
1683 * we will go into COLO stage later.
1684 */
1685 if (ret >= 0 && !migrate_colo_enabled()) {
1686 ret = bdrv_inactivate_all();
1687 }
1688 if (ret >= 0) {
1689 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1690 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1691 }
1692 }
1693 qemu_mutex_unlock_iothread();
1694
1695 if (ret < 0) {
1696 goto fail;
1697 }
1698 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1699 trace_migration_completion_postcopy_end();
1700
1701 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1702 trace_migration_completion_postcopy_end_after_complete();
1703 }
1704
1705 /*
1706 * If rp was opened we must clean up the thread before
1707 * cleaning everything else up (since if there are no failures
1708 * it will wait for the destination to send it's status in
1709 * a SHUT command).
1710 * Postcopy opens rp if enabled (even if it's not avtivated)
1711 */
1712 if (migrate_postcopy_ram()) {
1713 int rp_error;
1714 trace_migration_completion_postcopy_end_before_rp();
1715 rp_error = await_return_path_close_on_source(s);
1716 trace_migration_completion_postcopy_end_after_rp(rp_error);
1717 if (rp_error) {
1718 goto fail_invalidate;
1719 }
1720 }
1721
1722 if (qemu_file_get_error(s->to_dst_file)) {
1723 trace_migration_completion_file_err();
1724 goto fail_invalidate;
1725 }
1726
1727 if (!migrate_colo_enabled()) {
1728 migrate_set_state(&s->state, current_active_state,
1729 MIGRATION_STATUS_COMPLETED);
1730 }
1731
1732 return;
1733
1734 fail_invalidate:
1735 /* If not doing postcopy, vm_start() will be called: let's regain
1736 * control on images.
1737 */
1738 if (s->state == MIGRATION_STATUS_ACTIVE) {
1739 Error *local_err = NULL;
1740
1741 bdrv_invalidate_cache_all(&local_err);
1742 if (local_err) {
1743 error_report_err(local_err);
1744 }
1745 }
1746
1747 fail:
1748 migrate_set_state(&s->state, current_active_state,
1749 MIGRATION_STATUS_FAILED);
1750 }
1751
1752 bool migrate_colo_enabled(void)
1753 {
1754 MigrationState *s = migrate_get_current();
1755 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
1756 }
1757
1758 /*
1759 * Master migration thread on the source VM.
1760 * It drives the migration and pumps the data down the outgoing channel.
1761 */
1762 static void *migration_thread(void *opaque)
1763 {
1764 MigrationState *s = opaque;
1765 /* Used by the bandwidth calcs, updated later */
1766 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1767 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1768 int64_t initial_bytes = 0;
1769 int64_t max_size = 0;
1770 int64_t start_time = initial_time;
1771 int64_t end_time;
1772 bool old_vm_running = false;
1773 bool entered_postcopy = false;
1774 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1775 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1776 bool enable_colo = migrate_colo_enabled();
1777
1778 rcu_register_thread();
1779
1780 qemu_savevm_state_header(s->to_dst_file);
1781
1782 if (migrate_postcopy_ram()) {
1783 /* Now tell the dest that it should open its end so it can reply */
1784 qemu_savevm_send_open_return_path(s->to_dst_file);
1785
1786 /* And do a ping that will make stuff easier to debug */
1787 qemu_savevm_send_ping(s->to_dst_file, 1);
1788
1789 /*
1790 * Tell the destination that we *might* want to do postcopy later;
1791 * if the other end can't do postcopy it should fail now, nice and
1792 * early.
1793 */
1794 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1795 }
1796
1797 qemu_savevm_state_begin(s->to_dst_file, &s->params);
1798
1799 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1800 current_active_state = MIGRATION_STATUS_ACTIVE;
1801 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1802 MIGRATION_STATUS_ACTIVE);
1803
1804 trace_migration_thread_setup_complete();
1805
1806 while (s->state == MIGRATION_STATUS_ACTIVE ||
1807 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1808 int64_t current_time;
1809 uint64_t pending_size;
1810
1811 if (!qemu_file_rate_limit(s->to_dst_file)) {
1812 uint64_t pend_post, pend_nonpost;
1813
1814 qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1815 &pend_post);
1816 pending_size = pend_nonpost + pend_post;
1817 trace_migrate_pending(pending_size, max_size,
1818 pend_post, pend_nonpost);
1819 if (pending_size && pending_size >= max_size) {
1820 /* Still a significant amount to transfer */
1821
1822 if (migrate_postcopy_ram() &&
1823 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1824 pend_nonpost <= max_size &&
1825 atomic_read(&s->start_postcopy)) {
1826
1827 if (!postcopy_start(s, &old_vm_running)) {
1828 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1829 entered_postcopy = true;
1830 }
1831
1832 continue;
1833 }
1834 /* Just another iteration step */
1835 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1836 } else {
1837 trace_migration_thread_low_pending(pending_size);
1838 migration_completion(s, current_active_state,
1839 &old_vm_running, &start_time);
1840 break;
1841 }
1842 }
1843
1844 if (qemu_file_get_error(s->to_dst_file)) {
1845 migrate_set_state(&s->state, current_active_state,
1846 MIGRATION_STATUS_FAILED);
1847 trace_migration_thread_file_err();
1848 break;
1849 }
1850 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1851 if (current_time >= initial_time + BUFFER_DELAY) {
1852 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
1853 initial_bytes;
1854 uint64_t time_spent = current_time - initial_time;
1855 double bandwidth = (double)transferred_bytes / time_spent;
1856 max_size = bandwidth * s->parameters.downtime_limit;
1857
1858 s->mbps = (((double) transferred_bytes * 8.0) /
1859 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1860
1861 trace_migrate_transferred(transferred_bytes, time_spent,
1862 bandwidth, max_size);
1863 /* if we haven't sent anything, we don't want to recalculate
1864 10000 is a small enough number for our purposes */
1865 if (s->dirty_bytes_rate && transferred_bytes > 10000) {
1866 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
1867 }
1868
1869 qemu_file_reset_rate_limit(s->to_dst_file);
1870 initial_time = current_time;
1871 initial_bytes = qemu_ftell(s->to_dst_file);
1872 }
1873 if (qemu_file_rate_limit(s->to_dst_file)) {
1874 /* usleep expects microseconds */
1875 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
1876 }
1877 }
1878
1879 trace_migration_thread_after_loop();
1880 /* If we enabled cpu throttling for auto-converge, turn it off. */
1881 cpu_throttle_stop();
1882 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1883
1884 qemu_mutex_lock_iothread();
1885 /*
1886 * The resource has been allocated by migration will be reused in COLO
1887 * process, so don't release them.
1888 */
1889 if (!enable_colo) {
1890 qemu_savevm_state_cleanup();
1891 }
1892 if (s->state == MIGRATION_STATUS_COMPLETED) {
1893 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1894 s->total_time = end_time - s->total_time;
1895 if (!entered_postcopy) {
1896 s->downtime = end_time - start_time;
1897 }
1898 if (s->total_time) {
1899 s->mbps = (((double) transferred_bytes * 8.0) /
1900 ((double) s->total_time)) / 1000;
1901 }
1902 runstate_set(RUN_STATE_POSTMIGRATE);
1903 } else {
1904 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
1905 migrate_start_colo_process(s);
1906 qemu_savevm_state_cleanup();
1907 /*
1908 * Fixme: we will run VM in COLO no matter its old running state.
1909 * After exited COLO, we will keep running.
1910 */
1911 old_vm_running = true;
1912 }
1913 if (old_vm_running && !entered_postcopy) {
1914 vm_start();
1915 } else {
1916 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
1917 runstate_set(RUN_STATE_POSTMIGRATE);
1918 }
1919 }
1920 }
1921 qemu_bh_schedule(s->cleanup_bh);
1922 qemu_mutex_unlock_iothread();
1923
1924 rcu_unregister_thread();
1925 return NULL;
1926 }
1927
1928 void migrate_fd_connect(MigrationState *s)
1929 {
1930 s->expected_downtime = s->parameters.downtime_limit;
1931 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1932
1933 qemu_file_set_blocking(s->to_dst_file, true);
1934 qemu_file_set_rate_limit(s->to_dst_file,
1935 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
1936
1937 /* Notify before starting migration thread */
1938 notifier_list_notify(&migration_state_notifiers, s);
1939
1940 /*
1941 * Open the return path; currently for postcopy but other things might
1942 * also want it.
1943 */
1944 if (migrate_postcopy_ram()) {
1945 if (open_return_path_on_source(s)) {
1946 error_report("Unable to open return-path for postcopy");
1947 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1948 MIGRATION_STATUS_FAILED);
1949 migrate_fd_cleanup(s);
1950 return;
1951 }
1952 }
1953
1954 migrate_compress_threads_create();
1955 qemu_thread_create(&s->thread, "migration", migration_thread, s,
1956 QEMU_THREAD_JOINABLE);
1957 s->migration_thread_running = true;
1958 }
1959
1960 PostcopyState postcopy_state_get(void)
1961 {
1962 return atomic_mb_read(&incoming_postcopy_state);
1963 }
1964
1965 /* Set the state and return the old state */
1966 PostcopyState postcopy_state_set(PostcopyState new_state)
1967 {
1968 return atomic_xchg(&incoming_postcopy_state, new_state);
1969 }
1970