]> git.proxmox.com Git - mirror_qemu.git/blob - migration/migration.c
curl: strengthen assertion in curl_clean_state
[mirror_qemu.git] / migration / migration.c
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
37 #include "io/channel-buffer.h"
38 #include "io/channel-tls.h"
39 #include "migration/colo.h"
40
41 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
42
43 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
44 * data. */
45 #define BUFFER_DELAY 100
46 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
47
48 /* Time in milliseconds we are allowed to stop the source,
49 * for sending the last part */
50 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
51
52 /* Maximum migrate downtime set to 2000 seconds */
53 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000
54 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
55
56 /* Default compression thread count */
57 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
58 /* Default decompression thread count, usually decompression is at
59 * least 4 times as fast as compression.*/
60 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
61 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
62 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
63 /* Define default autoconverge cpu throttle migration parameters */
64 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
65 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
66
67 /* Migration XBZRLE default cache size */
68 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
69
70 /* The delay time (in ms) between two COLO checkpoints
71 * Note: Please change this default value to 10000 when we support hybrid mode.
72 */
73 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200
74
75 static NotifierList migration_state_notifiers =
76 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
77
78 static bool deferred_incoming;
79
80 /*
81 * Current state of incoming postcopy; note this is not part of
82 * MigrationIncomingState since it's state is used during cleanup
83 * at the end as MIS is being freed.
84 */
85 static PostcopyState incoming_postcopy_state;
86
87 /* When we add fault tolerance, we could have several
88 migrations at once. For now we don't need to add
89 dynamic creation of migration */
90
91 /* For outgoing */
92 MigrationState *migrate_get_current(void)
93 {
94 static bool once;
95 static MigrationState current_migration = {
96 .state = MIGRATION_STATUS_NONE,
97 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
98 .mbps = -1,
99 .parameters = {
100 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL,
101 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
102 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
103 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
104 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
105 .max_bandwidth = MAX_THROTTLE,
106 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME,
107 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY,
108 },
109 };
110
111 if (!once) {
112 qemu_mutex_init(&current_migration.src_page_req_mutex);
113 current_migration.parameters.tls_creds = g_strdup("");
114 current_migration.parameters.tls_hostname = g_strdup("");
115 once = true;
116 }
117 return &current_migration;
118 }
119
120 MigrationIncomingState *migration_incoming_get_current(void)
121 {
122 static bool once;
123 static MigrationIncomingState mis_current;
124
125 if (!once) {
126 mis_current.state = MIGRATION_STATUS_NONE;
127 memset(&mis_current, 0, sizeof(MigrationIncomingState));
128 QLIST_INIT(&mis_current.loadvm_handlers);
129 qemu_mutex_init(&mis_current.rp_mutex);
130 qemu_event_init(&mis_current.main_thread_load_event, false);
131 once = true;
132 }
133 return &mis_current;
134 }
135
136 void migration_incoming_state_destroy(void)
137 {
138 struct MigrationIncomingState *mis = migration_incoming_get_current();
139
140 qemu_event_destroy(&mis->main_thread_load_event);
141 loadvm_free_handlers(mis);
142 }
143
144
145 typedef struct {
146 bool optional;
147 uint32_t size;
148 uint8_t runstate[100];
149 RunState state;
150 bool received;
151 } GlobalState;
152
153 static GlobalState global_state;
154
155 int global_state_store(void)
156 {
157 if (!runstate_store((char *)global_state.runstate,
158 sizeof(global_state.runstate))) {
159 error_report("runstate name too big: %s", global_state.runstate);
160 trace_migrate_state_too_big();
161 return -EINVAL;
162 }
163 return 0;
164 }
165
166 void global_state_store_running(void)
167 {
168 const char *state = RunState_lookup[RUN_STATE_RUNNING];
169 strncpy((char *)global_state.runstate,
170 state, sizeof(global_state.runstate));
171 }
172
173 static bool global_state_received(void)
174 {
175 return global_state.received;
176 }
177
178 static RunState global_state_get_runstate(void)
179 {
180 return global_state.state;
181 }
182
183 void global_state_set_optional(void)
184 {
185 global_state.optional = true;
186 }
187
188 static bool global_state_needed(void *opaque)
189 {
190 GlobalState *s = opaque;
191 char *runstate = (char *)s->runstate;
192
193 /* If it is not optional, it is mandatory */
194
195 if (s->optional == false) {
196 return true;
197 }
198
199 /* If state is running or paused, it is not needed */
200
201 if (strcmp(runstate, "running") == 0 ||
202 strcmp(runstate, "paused") == 0) {
203 return false;
204 }
205
206 /* for any other state it is needed */
207 return true;
208 }
209
210 static int global_state_post_load(void *opaque, int version_id)
211 {
212 GlobalState *s = opaque;
213 Error *local_err = NULL;
214 int r;
215 char *runstate = (char *)s->runstate;
216
217 s->received = true;
218 trace_migrate_global_state_post_load(runstate);
219
220 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
221 -1, &local_err);
222
223 if (r == -1) {
224 if (local_err) {
225 error_report_err(local_err);
226 }
227 return -EINVAL;
228 }
229 s->state = r;
230
231 return 0;
232 }
233
234 static void global_state_pre_save(void *opaque)
235 {
236 GlobalState *s = opaque;
237
238 trace_migrate_global_state_pre_save((char *)s->runstate);
239 s->size = strlen((char *)s->runstate) + 1;
240 }
241
242 static const VMStateDescription vmstate_globalstate = {
243 .name = "globalstate",
244 .version_id = 1,
245 .minimum_version_id = 1,
246 .post_load = global_state_post_load,
247 .pre_save = global_state_pre_save,
248 .needed = global_state_needed,
249 .fields = (VMStateField[]) {
250 VMSTATE_UINT32(size, GlobalState),
251 VMSTATE_BUFFER(runstate, GlobalState),
252 VMSTATE_END_OF_LIST()
253 },
254 };
255
256 void register_global_state(void)
257 {
258 /* We would use it independently that we receive it */
259 strcpy((char *)&global_state.runstate, "");
260 global_state.received = false;
261 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
262 }
263
264 static void migrate_generate_event(int new_state)
265 {
266 if (migrate_use_events()) {
267 qapi_event_send_migration(new_state, &error_abort);
268 }
269 }
270
271 /*
272 * Called on -incoming with a defer: uri.
273 * The migration can be started later after any parameters have been
274 * changed.
275 */
276 static void deferred_incoming_migration(Error **errp)
277 {
278 if (deferred_incoming) {
279 error_setg(errp, "Incoming migration already deferred");
280 }
281 deferred_incoming = true;
282 }
283
284 /* Request a range of pages from the source VM at the given
285 * start address.
286 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
287 * as the last request (a name must have been given previously)
288 * Start: Address offset within the RB
289 * Len: Length in bytes required - must be a multiple of pagesize
290 */
291 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
292 ram_addr_t start, size_t len)
293 {
294 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
295 size_t msglen = 12; /* start + len */
296
297 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
298 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
299
300 if (rbname) {
301 int rbname_len = strlen(rbname);
302 assert(rbname_len < 256);
303
304 bufc[msglen++] = rbname_len;
305 memcpy(bufc + msglen, rbname, rbname_len);
306 msglen += rbname_len;
307 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
308 } else {
309 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
310 }
311 }
312
313 void qemu_start_incoming_migration(const char *uri, Error **errp)
314 {
315 const char *p;
316
317 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
318 if (!strcmp(uri, "defer")) {
319 deferred_incoming_migration(errp);
320 } else if (strstart(uri, "tcp:", &p)) {
321 tcp_start_incoming_migration(p, errp);
322 #ifdef CONFIG_RDMA
323 } else if (strstart(uri, "rdma:", &p)) {
324 rdma_start_incoming_migration(p, errp);
325 #endif
326 } else if (strstart(uri, "exec:", &p)) {
327 exec_start_incoming_migration(p, errp);
328 } else if (strstart(uri, "unix:", &p)) {
329 unix_start_incoming_migration(p, errp);
330 } else if (strstart(uri, "fd:", &p)) {
331 fd_start_incoming_migration(p, errp);
332 } else {
333 error_setg(errp, "unknown migration protocol: %s", uri);
334 }
335 }
336
337 static void process_incoming_migration_bh(void *opaque)
338 {
339 Error *local_err = NULL;
340 MigrationIncomingState *mis = opaque;
341
342 /* Make sure all file formats flush their mutable metadata */
343 bdrv_invalidate_cache_all(&local_err);
344 if (local_err) {
345 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
346 MIGRATION_STATUS_FAILED);
347 error_report_err(local_err);
348 migrate_decompress_threads_join();
349 exit(EXIT_FAILURE);
350 }
351
352 /* If we get an error here, just don't restart the VM yet. */
353 blk_resume_after_migration(&local_err);
354 if (local_err) {
355 error_free(local_err);
356 local_err = NULL;
357 autostart = false;
358 }
359
360 /*
361 * This must happen after all error conditions are dealt with and
362 * we're sure the VM is going to be running on this host.
363 */
364 qemu_announce_self();
365
366 /* If global state section was not received or we are in running
367 state, we need to obey autostart. Any other state is set with
368 runstate_set. */
369
370 if (!global_state_received() ||
371 global_state_get_runstate() == RUN_STATE_RUNNING) {
372 if (autostart) {
373 vm_start();
374 } else {
375 runstate_set(RUN_STATE_PAUSED);
376 }
377 } else {
378 runstate_set(global_state_get_runstate());
379 }
380 migrate_decompress_threads_join();
381 /*
382 * This must happen after any state changes since as soon as an external
383 * observer sees this event they might start to prod at the VM assuming
384 * it's ready to use.
385 */
386 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
387 MIGRATION_STATUS_COMPLETED);
388 qemu_bh_delete(mis->bh);
389 migration_incoming_state_destroy();
390 }
391
392 static void process_incoming_migration_co(void *opaque)
393 {
394 QEMUFile *f = opaque;
395 MigrationIncomingState *mis = migration_incoming_get_current();
396 PostcopyState ps;
397 int ret;
398
399 mis->from_src_file = f;
400 mis->largest_page_size = qemu_ram_pagesize_largest();
401 postcopy_state_set(POSTCOPY_INCOMING_NONE);
402 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
403 MIGRATION_STATUS_ACTIVE);
404 ret = qemu_loadvm_state(f);
405
406 ps = postcopy_state_get();
407 trace_process_incoming_migration_co_end(ret, ps);
408 if (ps != POSTCOPY_INCOMING_NONE) {
409 if (ps == POSTCOPY_INCOMING_ADVISE) {
410 /*
411 * Where a migration had postcopy enabled (and thus went to advise)
412 * but managed to complete within the precopy period, we can use
413 * the normal exit.
414 */
415 postcopy_ram_incoming_cleanup(mis);
416 } else if (ret >= 0) {
417 /*
418 * Postcopy was started, cleanup should happen at the end of the
419 * postcopy thread.
420 */
421 trace_process_incoming_migration_co_postcopy_end_main();
422 return;
423 }
424 /* Else if something went wrong then just fall out of the normal exit */
425 }
426
427 /* we get COLO info, and know if we are in COLO mode */
428 if (!ret && migration_incoming_enable_colo()) {
429 mis->migration_incoming_co = qemu_coroutine_self();
430 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
431 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
432 mis->have_colo_incoming_thread = true;
433 qemu_coroutine_yield();
434
435 /* Wait checkpoint incoming thread exit before free resource */
436 qemu_thread_join(&mis->colo_incoming_thread);
437 }
438
439 qemu_fclose(f);
440 free_xbzrle_decoded_buf();
441
442 if (ret < 0) {
443 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
444 MIGRATION_STATUS_FAILED);
445 error_report("load of migration failed: %s", strerror(-ret));
446 migrate_decompress_threads_join();
447 exit(EXIT_FAILURE);
448 }
449
450 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
451 qemu_bh_schedule(mis->bh);
452 }
453
454 void migration_fd_process_incoming(QEMUFile *f)
455 {
456 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
457
458 migrate_decompress_threads_create();
459 qemu_file_set_blocking(f, false);
460 qemu_coroutine_enter(co);
461 }
462
463
464 void migration_channel_process_incoming(MigrationState *s,
465 QIOChannel *ioc)
466 {
467 trace_migration_set_incoming_channel(
468 ioc, object_get_typename(OBJECT(ioc)));
469
470 if (s->parameters.tls_creds &&
471 *s->parameters.tls_creds &&
472 !object_dynamic_cast(OBJECT(ioc),
473 TYPE_QIO_CHANNEL_TLS)) {
474 Error *local_err = NULL;
475 migration_tls_channel_process_incoming(s, ioc, &local_err);
476 if (local_err) {
477 error_report_err(local_err);
478 }
479 } else {
480 QEMUFile *f = qemu_fopen_channel_input(ioc);
481 migration_fd_process_incoming(f);
482 }
483 }
484
485
486 void migration_channel_connect(MigrationState *s,
487 QIOChannel *ioc,
488 const char *hostname)
489 {
490 trace_migration_set_outgoing_channel(
491 ioc, object_get_typename(OBJECT(ioc)), hostname);
492
493 if (s->parameters.tls_creds &&
494 *s->parameters.tls_creds &&
495 !object_dynamic_cast(OBJECT(ioc),
496 TYPE_QIO_CHANNEL_TLS)) {
497 Error *local_err = NULL;
498 migration_tls_channel_connect(s, ioc, hostname, &local_err);
499 if (local_err) {
500 migrate_fd_error(s, local_err);
501 error_free(local_err);
502 }
503 } else {
504 QEMUFile *f = qemu_fopen_channel_output(ioc);
505
506 s->to_dst_file = f;
507
508 migrate_fd_connect(s);
509 }
510 }
511
512
513 /*
514 * Send a message on the return channel back to the source
515 * of the migration.
516 */
517 void migrate_send_rp_message(MigrationIncomingState *mis,
518 enum mig_rp_message_type message_type,
519 uint16_t len, void *data)
520 {
521 trace_migrate_send_rp_message((int)message_type, len);
522 qemu_mutex_lock(&mis->rp_mutex);
523 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
524 qemu_put_be16(mis->to_src_file, len);
525 qemu_put_buffer(mis->to_src_file, data, len);
526 qemu_fflush(mis->to_src_file);
527 qemu_mutex_unlock(&mis->rp_mutex);
528 }
529
530 /*
531 * Send a 'SHUT' message on the return channel with the given value
532 * to indicate that we've finished with the RP. Non-0 value indicates
533 * error.
534 */
535 void migrate_send_rp_shut(MigrationIncomingState *mis,
536 uint32_t value)
537 {
538 uint32_t buf;
539
540 buf = cpu_to_be32(value);
541 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
542 }
543
544 /*
545 * Send a 'PONG' message on the return channel with the given value
546 * (normally in response to a 'PING')
547 */
548 void migrate_send_rp_pong(MigrationIncomingState *mis,
549 uint32_t value)
550 {
551 uint32_t buf;
552
553 buf = cpu_to_be32(value);
554 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
555 }
556
557 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
558 {
559 MigrationCapabilityStatusList *head = NULL;
560 MigrationCapabilityStatusList *caps;
561 MigrationState *s = migrate_get_current();
562 int i;
563
564 caps = NULL; /* silence compiler warning */
565 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
566 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
567 continue;
568 }
569 if (head == NULL) {
570 head = g_malloc0(sizeof(*caps));
571 caps = head;
572 } else {
573 caps->next = g_malloc0(sizeof(*caps));
574 caps = caps->next;
575 }
576 caps->value =
577 g_malloc(sizeof(*caps->value));
578 caps->value->capability = i;
579 caps->value->state = s->enabled_capabilities[i];
580 }
581
582 return head;
583 }
584
585 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
586 {
587 MigrationParameters *params;
588 MigrationState *s = migrate_get_current();
589
590 params = g_malloc0(sizeof(*params));
591 params->has_compress_level = true;
592 params->compress_level = s->parameters.compress_level;
593 params->has_compress_threads = true;
594 params->compress_threads = s->parameters.compress_threads;
595 params->has_decompress_threads = true;
596 params->decompress_threads = s->parameters.decompress_threads;
597 params->has_cpu_throttle_initial = true;
598 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
599 params->has_cpu_throttle_increment = true;
600 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
601 params->has_tls_creds = !!s->parameters.tls_creds;
602 params->tls_creds = g_strdup(s->parameters.tls_creds);
603 params->has_tls_hostname = !!s->parameters.tls_hostname;
604 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
605 params->has_max_bandwidth = true;
606 params->max_bandwidth = s->parameters.max_bandwidth;
607 params->has_downtime_limit = true;
608 params->downtime_limit = s->parameters.downtime_limit;
609 params->has_x_checkpoint_delay = true;
610 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
611
612 return params;
613 }
614
615 /*
616 * Return true if we're already in the middle of a migration
617 * (i.e. any of the active or setup states)
618 */
619 static bool migration_is_setup_or_active(int state)
620 {
621 switch (state) {
622 case MIGRATION_STATUS_ACTIVE:
623 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
624 case MIGRATION_STATUS_SETUP:
625 return true;
626
627 default:
628 return false;
629
630 }
631 }
632
633 static void get_xbzrle_cache_stats(MigrationInfo *info)
634 {
635 if (migrate_use_xbzrle()) {
636 info->has_xbzrle_cache = true;
637 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
638 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
639 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
640 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
641 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
642 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
643 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
644 }
645 }
646
647 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
648 {
649 info->has_ram = true;
650 info->ram = g_malloc0(sizeof(*info->ram));
651 info->ram->transferred = ram_bytes_transferred();
652 info->ram->total = ram_bytes_total();
653 info->ram->duplicate = dup_mig_pages_transferred();
654 info->ram->skipped = skipped_mig_pages_transferred();
655 info->ram->normal = norm_mig_pages_transferred();
656 info->ram->normal_bytes = norm_mig_bytes_transferred();
657 info->ram->mbps = s->mbps;
658 info->ram->dirty_sync_count = s->dirty_sync_count;
659 info->ram->postcopy_requests = s->postcopy_requests;
660
661 if (s->state != MIGRATION_STATUS_COMPLETED) {
662 info->ram->remaining = ram_bytes_remaining();
663 info->ram->dirty_pages_rate = s->dirty_pages_rate;
664 }
665 }
666
667 MigrationInfo *qmp_query_migrate(Error **errp)
668 {
669 MigrationInfo *info = g_malloc0(sizeof(*info));
670 MigrationState *s = migrate_get_current();
671
672 switch (s->state) {
673 case MIGRATION_STATUS_NONE:
674 /* no migration has happened ever */
675 break;
676 case MIGRATION_STATUS_SETUP:
677 info->has_status = true;
678 info->has_total_time = false;
679 break;
680 case MIGRATION_STATUS_ACTIVE:
681 case MIGRATION_STATUS_CANCELLING:
682 info->has_status = true;
683 info->has_total_time = true;
684 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
685 - s->total_time;
686 info->has_expected_downtime = true;
687 info->expected_downtime = s->expected_downtime;
688 info->has_setup_time = true;
689 info->setup_time = s->setup_time;
690
691 populate_ram_info(info, s);
692
693 if (blk_mig_active()) {
694 info->has_disk = true;
695 info->disk = g_malloc0(sizeof(*info->disk));
696 info->disk->transferred = blk_mig_bytes_transferred();
697 info->disk->remaining = blk_mig_bytes_remaining();
698 info->disk->total = blk_mig_bytes_total();
699 }
700
701 if (cpu_throttle_active()) {
702 info->has_cpu_throttle_percentage = true;
703 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
704 }
705
706 get_xbzrle_cache_stats(info);
707 break;
708 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
709 /* Mostly the same as active; TODO add some postcopy stats */
710 info->has_status = true;
711 info->has_total_time = true;
712 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
713 - s->total_time;
714 info->has_expected_downtime = true;
715 info->expected_downtime = s->expected_downtime;
716 info->has_setup_time = true;
717 info->setup_time = s->setup_time;
718
719 populate_ram_info(info, s);
720
721 if (blk_mig_active()) {
722 info->has_disk = true;
723 info->disk = g_malloc0(sizeof(*info->disk));
724 info->disk->transferred = blk_mig_bytes_transferred();
725 info->disk->remaining = blk_mig_bytes_remaining();
726 info->disk->total = blk_mig_bytes_total();
727 }
728
729 get_xbzrle_cache_stats(info);
730 break;
731 case MIGRATION_STATUS_COLO:
732 info->has_status = true;
733 /* TODO: display COLO specific information (checkpoint info etc.) */
734 break;
735 case MIGRATION_STATUS_COMPLETED:
736 get_xbzrle_cache_stats(info);
737
738 info->has_status = true;
739 info->has_total_time = true;
740 info->total_time = s->total_time;
741 info->has_downtime = true;
742 info->downtime = s->downtime;
743 info->has_setup_time = true;
744 info->setup_time = s->setup_time;
745
746 populate_ram_info(info, s);
747 break;
748 case MIGRATION_STATUS_FAILED:
749 info->has_status = true;
750 if (s->error) {
751 info->has_error_desc = true;
752 info->error_desc = g_strdup(error_get_pretty(s->error));
753 }
754 break;
755 case MIGRATION_STATUS_CANCELLED:
756 info->has_status = true;
757 break;
758 }
759 info->status = s->state;
760
761 return info;
762 }
763
764 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
765 Error **errp)
766 {
767 MigrationState *s = migrate_get_current();
768 MigrationCapabilityStatusList *cap;
769 bool old_postcopy_cap = migrate_postcopy_ram();
770
771 if (migration_is_setup_or_active(s->state)) {
772 error_setg(errp, QERR_MIGRATION_ACTIVE);
773 return;
774 }
775
776 for (cap = params; cap; cap = cap->next) {
777 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
778 if (!colo_supported()) {
779 error_setg(errp, "COLO is not currently supported, please"
780 " configure with --enable-colo option in order to"
781 " support COLO feature");
782 continue;
783 }
784 }
785 s->enabled_capabilities[cap->value->capability] = cap->value->state;
786 }
787
788 if (migrate_postcopy_ram()) {
789 if (migrate_use_compression()) {
790 /* The decompression threads asynchronously write into RAM
791 * rather than use the atomic copies needed to avoid
792 * userfaulting. It should be possible to fix the decompression
793 * threads for compatibility in future.
794 */
795 error_report("Postcopy is not currently compatible with "
796 "compression");
797 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
798 false;
799 }
800 /* This check is reasonably expensive, so only when it's being
801 * set the first time, also it's only the destination that needs
802 * special support.
803 */
804 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
805 !postcopy_ram_supported_by_host()) {
806 /* postcopy_ram_supported_by_host will have emitted a more
807 * detailed message
808 */
809 error_report("Postcopy is not supported");
810 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
811 false;
812 }
813 }
814 }
815
816 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
817 {
818 MigrationState *s = migrate_get_current();
819
820 if (params->has_compress_level &&
821 (params->compress_level < 0 || params->compress_level > 9)) {
822 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
823 "is invalid, it should be in the range of 0 to 9");
824 return;
825 }
826 if (params->has_compress_threads &&
827 (params->compress_threads < 1 || params->compress_threads > 255)) {
828 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
829 "compress_threads",
830 "is invalid, it should be in the range of 1 to 255");
831 return;
832 }
833 if (params->has_decompress_threads &&
834 (params->decompress_threads < 1 || params->decompress_threads > 255)) {
835 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
836 "decompress_threads",
837 "is invalid, it should be in the range of 1 to 255");
838 return;
839 }
840 if (params->has_cpu_throttle_initial &&
841 (params->cpu_throttle_initial < 1 ||
842 params->cpu_throttle_initial > 99)) {
843 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
844 "cpu_throttle_initial",
845 "an integer in the range of 1 to 99");
846 return;
847 }
848 if (params->has_cpu_throttle_increment &&
849 (params->cpu_throttle_increment < 1 ||
850 params->cpu_throttle_increment > 99)) {
851 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
852 "cpu_throttle_increment",
853 "an integer in the range of 1 to 99");
854 return;
855 }
856 if (params->has_max_bandwidth &&
857 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
858 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
859 " range of 0 to %zu bytes/second", SIZE_MAX);
860 return;
861 }
862 if (params->has_downtime_limit &&
863 (params->downtime_limit < 0 ||
864 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
865 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
866 "the range of 0 to %d milliseconds",
867 MAX_MIGRATE_DOWNTIME);
868 return;
869 }
870 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
871 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
872 "x_checkpoint_delay",
873 "is invalid, it should be positive");
874 }
875
876 if (params->has_compress_level) {
877 s->parameters.compress_level = params->compress_level;
878 }
879 if (params->has_compress_threads) {
880 s->parameters.compress_threads = params->compress_threads;
881 }
882 if (params->has_decompress_threads) {
883 s->parameters.decompress_threads = params->decompress_threads;
884 }
885 if (params->has_cpu_throttle_initial) {
886 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
887 }
888 if (params->has_cpu_throttle_increment) {
889 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
890 }
891 if (params->has_tls_creds) {
892 g_free(s->parameters.tls_creds);
893 s->parameters.tls_creds = g_strdup(params->tls_creds);
894 }
895 if (params->has_tls_hostname) {
896 g_free(s->parameters.tls_hostname);
897 s->parameters.tls_hostname = g_strdup(params->tls_hostname);
898 }
899 if (params->has_max_bandwidth) {
900 s->parameters.max_bandwidth = params->max_bandwidth;
901 if (s->to_dst_file) {
902 qemu_file_set_rate_limit(s->to_dst_file,
903 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
904 }
905 }
906 if (params->has_downtime_limit) {
907 s->parameters.downtime_limit = params->downtime_limit;
908 }
909
910 if (params->has_x_checkpoint_delay) {
911 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
912 if (migration_in_colo_state()) {
913 colo_checkpoint_notify(s);
914 }
915 }
916 }
917
918
919 void qmp_migrate_start_postcopy(Error **errp)
920 {
921 MigrationState *s = migrate_get_current();
922
923 if (!migrate_postcopy_ram()) {
924 error_setg(errp, "Enable postcopy with migrate_set_capability before"
925 " the start of migration");
926 return;
927 }
928
929 if (s->state == MIGRATION_STATUS_NONE) {
930 error_setg(errp, "Postcopy must be started after migration has been"
931 " started");
932 return;
933 }
934 /*
935 * we don't error if migration has finished since that would be racy
936 * with issuing this command.
937 */
938 atomic_set(&s->start_postcopy, true);
939 }
940
941 /* shared migration helpers */
942
943 void migrate_set_state(int *state, int old_state, int new_state)
944 {
945 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
946 trace_migrate_set_state(new_state);
947 migrate_generate_event(new_state);
948 }
949 }
950
951 static void migrate_fd_cleanup(void *opaque)
952 {
953 MigrationState *s = opaque;
954
955 qemu_bh_delete(s->cleanup_bh);
956 s->cleanup_bh = NULL;
957
958 flush_page_queue(s);
959
960 if (s->to_dst_file) {
961 trace_migrate_fd_cleanup();
962 qemu_mutex_unlock_iothread();
963 if (s->migration_thread_running) {
964 qemu_thread_join(&s->thread);
965 s->migration_thread_running = false;
966 }
967 qemu_mutex_lock_iothread();
968
969 migrate_compress_threads_join();
970 qemu_fclose(s->to_dst_file);
971 s->to_dst_file = NULL;
972 }
973
974 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
975 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
976
977 if (s->state == MIGRATION_STATUS_CANCELLING) {
978 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
979 MIGRATION_STATUS_CANCELLED);
980 }
981
982 notifier_list_notify(&migration_state_notifiers, s);
983 }
984
985 void migrate_fd_error(MigrationState *s, const Error *error)
986 {
987 trace_migrate_fd_error(error_get_pretty(error));
988 assert(s->to_dst_file == NULL);
989 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
990 MIGRATION_STATUS_FAILED);
991 if (!s->error) {
992 s->error = error_copy(error);
993 }
994 notifier_list_notify(&migration_state_notifiers, s);
995 }
996
997 static void migrate_fd_cancel(MigrationState *s)
998 {
999 int old_state ;
1000 QEMUFile *f = migrate_get_current()->to_dst_file;
1001 trace_migrate_fd_cancel();
1002
1003 if (s->rp_state.from_dst_file) {
1004 /* shutdown the rp socket, so causing the rp thread to shutdown */
1005 qemu_file_shutdown(s->rp_state.from_dst_file);
1006 }
1007
1008 do {
1009 old_state = s->state;
1010 if (!migration_is_setup_or_active(old_state)) {
1011 break;
1012 }
1013 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1014 } while (s->state != MIGRATION_STATUS_CANCELLING);
1015
1016 /*
1017 * If we're unlucky the migration code might be stuck somewhere in a
1018 * send/write while the network has failed and is waiting to timeout;
1019 * if we've got shutdown(2) available then we can force it to quit.
1020 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1021 * called in a bh, so there is no race against this cancel.
1022 */
1023 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1024 qemu_file_shutdown(f);
1025 }
1026 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1027 Error *local_err = NULL;
1028
1029 bdrv_invalidate_cache_all(&local_err);
1030 if (local_err) {
1031 error_report_err(local_err);
1032 } else {
1033 s->block_inactive = false;
1034 }
1035 }
1036 }
1037
1038 void add_migration_state_change_notifier(Notifier *notify)
1039 {
1040 notifier_list_add(&migration_state_notifiers, notify);
1041 }
1042
1043 void remove_migration_state_change_notifier(Notifier *notify)
1044 {
1045 notifier_remove(notify);
1046 }
1047
1048 bool migration_in_setup(MigrationState *s)
1049 {
1050 return s->state == MIGRATION_STATUS_SETUP;
1051 }
1052
1053 bool migration_has_finished(MigrationState *s)
1054 {
1055 return s->state == MIGRATION_STATUS_COMPLETED;
1056 }
1057
1058 bool migration_has_failed(MigrationState *s)
1059 {
1060 return (s->state == MIGRATION_STATUS_CANCELLED ||
1061 s->state == MIGRATION_STATUS_FAILED);
1062 }
1063
1064 bool migration_in_postcopy(MigrationState *s)
1065 {
1066 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1067 }
1068
1069 bool migration_in_postcopy_after_devices(MigrationState *s)
1070 {
1071 return migration_in_postcopy(s) && s->postcopy_after_devices;
1072 }
1073
1074 bool migration_is_idle(MigrationState *s)
1075 {
1076 if (!s) {
1077 s = migrate_get_current();
1078 }
1079
1080 switch (s->state) {
1081 case MIGRATION_STATUS_NONE:
1082 case MIGRATION_STATUS_CANCELLED:
1083 case MIGRATION_STATUS_COMPLETED:
1084 case MIGRATION_STATUS_FAILED:
1085 return true;
1086 case MIGRATION_STATUS_SETUP:
1087 case MIGRATION_STATUS_CANCELLING:
1088 case MIGRATION_STATUS_ACTIVE:
1089 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1090 case MIGRATION_STATUS_COLO:
1091 return false;
1092 case MIGRATION_STATUS__MAX:
1093 g_assert_not_reached();
1094 }
1095
1096 return false;
1097 }
1098
1099 MigrationState *migrate_init(const MigrationParams *params)
1100 {
1101 MigrationState *s = migrate_get_current();
1102
1103 /*
1104 * Reinitialise all migration state, except
1105 * parameters/capabilities that the user set, and
1106 * locks.
1107 */
1108 s->bytes_xfer = 0;
1109 s->xfer_limit = 0;
1110 s->cleanup_bh = 0;
1111 s->to_dst_file = NULL;
1112 s->state = MIGRATION_STATUS_NONE;
1113 s->params = *params;
1114 s->rp_state.from_dst_file = NULL;
1115 s->rp_state.error = false;
1116 s->mbps = 0.0;
1117 s->downtime = 0;
1118 s->expected_downtime = 0;
1119 s->dirty_pages_rate = 0;
1120 s->dirty_bytes_rate = 0;
1121 s->setup_time = 0;
1122 s->dirty_sync_count = 0;
1123 s->start_postcopy = false;
1124 s->postcopy_after_devices = false;
1125 s->postcopy_requests = 0;
1126 s->migration_thread_running = false;
1127 s->last_req_rb = NULL;
1128 error_free(s->error);
1129 s->error = NULL;
1130
1131 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1132
1133 QSIMPLEQ_INIT(&s->src_page_requests);
1134
1135 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1136 return s;
1137 }
1138
1139 static GSList *migration_blockers;
1140
1141 int migrate_add_blocker(Error *reason, Error **errp)
1142 {
1143 if (only_migratable) {
1144 error_propagate(errp, error_copy(reason));
1145 error_prepend(errp, "disallowing migration blocker "
1146 "(--only_migratable) for: ");
1147 return -EACCES;
1148 }
1149
1150 if (migration_is_idle(NULL)) {
1151 migration_blockers = g_slist_prepend(migration_blockers, reason);
1152 return 0;
1153 }
1154
1155 error_propagate(errp, error_copy(reason));
1156 error_prepend(errp, "disallowing migration blocker (migration in "
1157 "progress) for: ");
1158 return -EBUSY;
1159 }
1160
1161 void migrate_del_blocker(Error *reason)
1162 {
1163 migration_blockers = g_slist_remove(migration_blockers, reason);
1164 }
1165
1166 int check_migratable(Object *obj, Error **err)
1167 {
1168 DeviceClass *dc = DEVICE_GET_CLASS(obj);
1169 if (only_migratable && dc->vmsd) {
1170 if (dc->vmsd->unmigratable) {
1171 error_setg(err, "Device %s is not migratable, but "
1172 "--only-migratable was specified",
1173 object_get_typename(obj));
1174 return -1;
1175 }
1176 }
1177
1178 return 0;
1179 }
1180
1181 void qmp_migrate_incoming(const char *uri, Error **errp)
1182 {
1183 Error *local_err = NULL;
1184 static bool once = true;
1185
1186 if (!deferred_incoming) {
1187 error_setg(errp, "For use with '-incoming defer'");
1188 return;
1189 }
1190 if (!once) {
1191 error_setg(errp, "The incoming migration has already been started");
1192 }
1193
1194 qemu_start_incoming_migration(uri, &local_err);
1195
1196 if (local_err) {
1197 error_propagate(errp, local_err);
1198 return;
1199 }
1200
1201 once = false;
1202 }
1203
1204 bool migration_is_blocked(Error **errp)
1205 {
1206 if (qemu_savevm_state_blocked(errp)) {
1207 return true;
1208 }
1209
1210 if (migration_blockers) {
1211 *errp = error_copy(migration_blockers->data);
1212 return true;
1213 }
1214
1215 return false;
1216 }
1217
1218 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1219 bool has_inc, bool inc, bool has_detach, bool detach,
1220 Error **errp)
1221 {
1222 Error *local_err = NULL;
1223 MigrationState *s = migrate_get_current();
1224 MigrationParams params;
1225 const char *p;
1226
1227 params.blk = has_blk && blk;
1228 params.shared = has_inc && inc;
1229
1230 if (migration_is_setup_or_active(s->state) ||
1231 s->state == MIGRATION_STATUS_CANCELLING ||
1232 s->state == MIGRATION_STATUS_COLO) {
1233 error_setg(errp, QERR_MIGRATION_ACTIVE);
1234 return;
1235 }
1236 if (runstate_check(RUN_STATE_INMIGRATE)) {
1237 error_setg(errp, "Guest is waiting for an incoming migration");
1238 return;
1239 }
1240
1241 if (migration_is_blocked(errp)) {
1242 return;
1243 }
1244
1245 s = migrate_init(&params);
1246
1247 if (strstart(uri, "tcp:", &p)) {
1248 tcp_start_outgoing_migration(s, p, &local_err);
1249 #ifdef CONFIG_RDMA
1250 } else if (strstart(uri, "rdma:", &p)) {
1251 rdma_start_outgoing_migration(s, p, &local_err);
1252 #endif
1253 } else if (strstart(uri, "exec:", &p)) {
1254 exec_start_outgoing_migration(s, p, &local_err);
1255 } else if (strstart(uri, "unix:", &p)) {
1256 unix_start_outgoing_migration(s, p, &local_err);
1257 } else if (strstart(uri, "fd:", &p)) {
1258 fd_start_outgoing_migration(s, p, &local_err);
1259 } else {
1260 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1261 "a valid migration protocol");
1262 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1263 MIGRATION_STATUS_FAILED);
1264 return;
1265 }
1266
1267 if (local_err) {
1268 migrate_fd_error(s, local_err);
1269 error_propagate(errp, local_err);
1270 return;
1271 }
1272 }
1273
1274 void qmp_migrate_cancel(Error **errp)
1275 {
1276 migrate_fd_cancel(migrate_get_current());
1277 }
1278
1279 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1280 {
1281 MigrationState *s = migrate_get_current();
1282 int64_t new_size;
1283
1284 /* Check for truncation */
1285 if (value != (size_t)value) {
1286 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1287 "exceeding address space");
1288 return;
1289 }
1290
1291 /* Cache should not be larger than guest ram size */
1292 if (value > ram_bytes_total()) {
1293 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1294 "exceeds guest ram size ");
1295 return;
1296 }
1297
1298 new_size = xbzrle_cache_resize(value);
1299 if (new_size < 0) {
1300 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1301 "is smaller than page size");
1302 return;
1303 }
1304
1305 s->xbzrle_cache_size = new_size;
1306 }
1307
1308 int64_t qmp_query_migrate_cache_size(Error **errp)
1309 {
1310 return migrate_xbzrle_cache_size();
1311 }
1312
1313 void qmp_migrate_set_speed(int64_t value, Error **errp)
1314 {
1315 MigrationParameters p = {
1316 .has_max_bandwidth = true,
1317 .max_bandwidth = value,
1318 };
1319
1320 qmp_migrate_set_parameters(&p, errp);
1321 }
1322
1323 void qmp_migrate_set_downtime(double value, Error **errp)
1324 {
1325 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
1326 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
1327 "the range of 0 to %d seconds",
1328 MAX_MIGRATE_DOWNTIME_SECONDS);
1329 return;
1330 }
1331
1332 value *= 1000; /* Convert to milliseconds */
1333 value = MAX(0, MIN(INT64_MAX, value));
1334
1335 MigrationParameters p = {
1336 .has_downtime_limit = true,
1337 .downtime_limit = value,
1338 };
1339
1340 qmp_migrate_set_parameters(&p, errp);
1341 }
1342
1343 bool migrate_release_ram(void)
1344 {
1345 MigrationState *s;
1346
1347 s = migrate_get_current();
1348
1349 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
1350 }
1351
1352 bool migrate_postcopy_ram(void)
1353 {
1354 MigrationState *s;
1355
1356 s = migrate_get_current();
1357
1358 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1359 }
1360
1361 bool migrate_auto_converge(void)
1362 {
1363 MigrationState *s;
1364
1365 s = migrate_get_current();
1366
1367 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1368 }
1369
1370 bool migrate_zero_blocks(void)
1371 {
1372 MigrationState *s;
1373
1374 s = migrate_get_current();
1375
1376 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1377 }
1378
1379 bool migrate_use_compression(void)
1380 {
1381 MigrationState *s;
1382
1383 s = migrate_get_current();
1384
1385 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1386 }
1387
1388 int migrate_compress_level(void)
1389 {
1390 MigrationState *s;
1391
1392 s = migrate_get_current();
1393
1394 return s->parameters.compress_level;
1395 }
1396
1397 int migrate_compress_threads(void)
1398 {
1399 MigrationState *s;
1400
1401 s = migrate_get_current();
1402
1403 return s->parameters.compress_threads;
1404 }
1405
1406 int migrate_decompress_threads(void)
1407 {
1408 MigrationState *s;
1409
1410 s = migrate_get_current();
1411
1412 return s->parameters.decompress_threads;
1413 }
1414
1415 bool migrate_use_events(void)
1416 {
1417 MigrationState *s;
1418
1419 s = migrate_get_current();
1420
1421 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1422 }
1423
1424 int migrate_use_xbzrle(void)
1425 {
1426 MigrationState *s;
1427
1428 s = migrate_get_current();
1429
1430 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1431 }
1432
1433 int64_t migrate_xbzrle_cache_size(void)
1434 {
1435 MigrationState *s;
1436
1437 s = migrate_get_current();
1438
1439 return s->xbzrle_cache_size;
1440 }
1441
1442 /* migration thread support */
1443 /*
1444 * Something bad happened to the RP stream, mark an error
1445 * The caller shall print or trace something to indicate why
1446 */
1447 static void mark_source_rp_bad(MigrationState *s)
1448 {
1449 s->rp_state.error = true;
1450 }
1451
1452 static struct rp_cmd_args {
1453 ssize_t len; /* -1 = variable */
1454 const char *name;
1455 } rp_cmd_args[] = {
1456 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1457 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1458 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1459 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1460 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1461 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1462 };
1463
1464 /*
1465 * Process a request for pages received on the return path,
1466 * We're allowed to send more than requested (e.g. to round to our page size)
1467 * and we don't need to send pages that have already been sent.
1468 */
1469 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1470 ram_addr_t start, size_t len)
1471 {
1472 long our_host_ps = getpagesize();
1473
1474 trace_migrate_handle_rp_req_pages(rbname, start, len);
1475
1476 /*
1477 * Since we currently insist on matching page sizes, just sanity check
1478 * we're being asked for whole host pages.
1479 */
1480 if (start & (our_host_ps-1) ||
1481 (len & (our_host_ps-1))) {
1482 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1483 " len: %zd", __func__, start, len);
1484 mark_source_rp_bad(ms);
1485 return;
1486 }
1487
1488 if (ram_save_queue_pages(ms, rbname, start, len)) {
1489 mark_source_rp_bad(ms);
1490 }
1491 }
1492
1493 /*
1494 * Handles messages sent on the return path towards the source VM
1495 *
1496 */
1497 static void *source_return_path_thread(void *opaque)
1498 {
1499 MigrationState *ms = opaque;
1500 QEMUFile *rp = ms->rp_state.from_dst_file;
1501 uint16_t header_len, header_type;
1502 uint8_t buf[512];
1503 uint32_t tmp32, sibling_error;
1504 ram_addr_t start = 0; /* =0 to silence warning */
1505 size_t len = 0, expected_len;
1506 int res;
1507
1508 trace_source_return_path_thread_entry();
1509 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1510 migration_is_setup_or_active(ms->state)) {
1511 trace_source_return_path_thread_loop_top();
1512 header_type = qemu_get_be16(rp);
1513 header_len = qemu_get_be16(rp);
1514
1515 if (header_type >= MIG_RP_MSG_MAX ||
1516 header_type == MIG_RP_MSG_INVALID) {
1517 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1518 header_type, header_len);
1519 mark_source_rp_bad(ms);
1520 goto out;
1521 }
1522
1523 if ((rp_cmd_args[header_type].len != -1 &&
1524 header_len != rp_cmd_args[header_type].len) ||
1525 header_len > sizeof(buf)) {
1526 error_report("RP: Received '%s' message (0x%04x) with"
1527 "incorrect length %d expecting %zu",
1528 rp_cmd_args[header_type].name, header_type, header_len,
1529 (size_t)rp_cmd_args[header_type].len);
1530 mark_source_rp_bad(ms);
1531 goto out;
1532 }
1533
1534 /* We know we've got a valid header by this point */
1535 res = qemu_get_buffer(rp, buf, header_len);
1536 if (res != header_len) {
1537 error_report("RP: Failed reading data for message 0x%04x"
1538 " read %d expected %d",
1539 header_type, res, header_len);
1540 mark_source_rp_bad(ms);
1541 goto out;
1542 }
1543
1544 /* OK, we have the message and the data */
1545 switch (header_type) {
1546 case MIG_RP_MSG_SHUT:
1547 sibling_error = ldl_be_p(buf);
1548 trace_source_return_path_thread_shut(sibling_error);
1549 if (sibling_error) {
1550 error_report("RP: Sibling indicated error %d", sibling_error);
1551 mark_source_rp_bad(ms);
1552 }
1553 /*
1554 * We'll let the main thread deal with closing the RP
1555 * we could do a shutdown(2) on it, but we're the only user
1556 * anyway, so there's nothing gained.
1557 */
1558 goto out;
1559
1560 case MIG_RP_MSG_PONG:
1561 tmp32 = ldl_be_p(buf);
1562 trace_source_return_path_thread_pong(tmp32);
1563 break;
1564
1565 case MIG_RP_MSG_REQ_PAGES:
1566 start = ldq_be_p(buf);
1567 len = ldl_be_p(buf + 8);
1568 migrate_handle_rp_req_pages(ms, NULL, start, len);
1569 break;
1570
1571 case MIG_RP_MSG_REQ_PAGES_ID:
1572 expected_len = 12 + 1; /* header + termination */
1573
1574 if (header_len >= expected_len) {
1575 start = ldq_be_p(buf);
1576 len = ldl_be_p(buf + 8);
1577 /* Now we expect an idstr */
1578 tmp32 = buf[12]; /* Length of the following idstr */
1579 buf[13 + tmp32] = '\0';
1580 expected_len += tmp32;
1581 }
1582 if (header_len != expected_len) {
1583 error_report("RP: Req_Page_id with length %d expecting %zd",
1584 header_len, expected_len);
1585 mark_source_rp_bad(ms);
1586 goto out;
1587 }
1588 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1589 break;
1590
1591 default:
1592 break;
1593 }
1594 }
1595 if (qemu_file_get_error(rp)) {
1596 trace_source_return_path_thread_bad_end();
1597 mark_source_rp_bad(ms);
1598 }
1599
1600 trace_source_return_path_thread_end();
1601 out:
1602 ms->rp_state.from_dst_file = NULL;
1603 qemu_fclose(rp);
1604 return NULL;
1605 }
1606
1607 static int open_return_path_on_source(MigrationState *ms)
1608 {
1609
1610 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1611 if (!ms->rp_state.from_dst_file) {
1612 return -1;
1613 }
1614
1615 trace_open_return_path_on_source();
1616 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1617 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1618
1619 trace_open_return_path_on_source_continue();
1620
1621 return 0;
1622 }
1623
1624 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1625 static int await_return_path_close_on_source(MigrationState *ms)
1626 {
1627 /*
1628 * If this is a normal exit then the destination will send a SHUT and the
1629 * rp_thread will exit, however if there's an error we need to cause
1630 * it to exit.
1631 */
1632 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1633 /*
1634 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1635 * waiting for the destination.
1636 */
1637 qemu_file_shutdown(ms->rp_state.from_dst_file);
1638 mark_source_rp_bad(ms);
1639 }
1640 trace_await_return_path_close_on_source_joining();
1641 qemu_thread_join(&ms->rp_state.rp_thread);
1642 trace_await_return_path_close_on_source_close();
1643 return ms->rp_state.error;
1644 }
1645
1646 /*
1647 * Switch from normal iteration to postcopy
1648 * Returns non-0 on error
1649 */
1650 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1651 {
1652 int ret;
1653 QIOChannelBuffer *bioc;
1654 QEMUFile *fb;
1655 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1656 bool restart_block = false;
1657 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1658 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1659
1660 trace_postcopy_start();
1661 qemu_mutex_lock_iothread();
1662 trace_postcopy_start_set_run();
1663
1664 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1665 *old_vm_running = runstate_is_running();
1666 global_state_store();
1667 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1668 if (ret < 0) {
1669 goto fail;
1670 }
1671
1672 ret = bdrv_inactivate_all();
1673 if (ret < 0) {
1674 goto fail;
1675 }
1676 restart_block = true;
1677
1678 /*
1679 * Cause any non-postcopiable, but iterative devices to
1680 * send out their final data.
1681 */
1682 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1683
1684 /*
1685 * in Finish migrate and with the io-lock held everything should
1686 * be quiet, but we've potentially still got dirty pages and we
1687 * need to tell the destination to throw any pages it's already received
1688 * that are dirty
1689 */
1690 if (ram_postcopy_send_discard_bitmap(ms)) {
1691 error_report("postcopy send discard bitmap failed");
1692 goto fail;
1693 }
1694
1695 /*
1696 * send rest of state - note things that are doing postcopy
1697 * will notice we're in POSTCOPY_ACTIVE and not actually
1698 * wrap their state up here
1699 */
1700 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1701 /* Ping just for debugging, helps line traces up */
1702 qemu_savevm_send_ping(ms->to_dst_file, 2);
1703
1704 /*
1705 * While loading the device state we may trigger page transfer
1706 * requests and the fd must be free to process those, and thus
1707 * the destination must read the whole device state off the fd before
1708 * it starts processing it. Unfortunately the ad-hoc migration format
1709 * doesn't allow the destination to know the size to read without fully
1710 * parsing it through each devices load-state code (especially the open
1711 * coded devices that use get/put).
1712 * So we wrap the device state up in a package with a length at the start;
1713 * to do this we use a qemu_buf to hold the whole of the device state.
1714 */
1715 bioc = qio_channel_buffer_new(4096);
1716 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
1717 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1718 object_unref(OBJECT(bioc));
1719
1720 /*
1721 * Make sure the receiver can get incoming pages before we send the rest
1722 * of the state
1723 */
1724 qemu_savevm_send_postcopy_listen(fb);
1725
1726 qemu_savevm_state_complete_precopy(fb, false);
1727 qemu_savevm_send_ping(fb, 3);
1728
1729 qemu_savevm_send_postcopy_run(fb);
1730
1731 /* <><> end of stuff going into the package */
1732
1733 /* Last point of recovery; as soon as we send the package the destination
1734 * can open devices and potentially start running.
1735 * Lets just check again we've not got any errors.
1736 */
1737 ret = qemu_file_get_error(ms->to_dst_file);
1738 if (ret) {
1739 error_report("postcopy_start: Migration stream errored (pre package)");
1740 goto fail_closefb;
1741 }
1742
1743 restart_block = false;
1744
1745 /* Now send that blob */
1746 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1747 goto fail_closefb;
1748 }
1749 qemu_fclose(fb);
1750
1751 /* Send a notify to give a chance for anything that needs to happen
1752 * at the transition to postcopy and after the device state; in particular
1753 * spice needs to trigger a transition now
1754 */
1755 ms->postcopy_after_devices = true;
1756 notifier_list_notify(&migration_state_notifiers, ms);
1757
1758 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1759
1760 qemu_mutex_unlock_iothread();
1761
1762 /*
1763 * Although this ping is just for debug, it could potentially be
1764 * used for getting a better measurement of downtime at the source.
1765 */
1766 qemu_savevm_send_ping(ms->to_dst_file, 4);
1767
1768 if (migrate_release_ram()) {
1769 ram_postcopy_migrated_memory_release(ms);
1770 }
1771
1772 ret = qemu_file_get_error(ms->to_dst_file);
1773 if (ret) {
1774 error_report("postcopy_start: Migration stream errored");
1775 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1776 MIGRATION_STATUS_FAILED);
1777 }
1778
1779 return ret;
1780
1781 fail_closefb:
1782 qemu_fclose(fb);
1783 fail:
1784 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1785 MIGRATION_STATUS_FAILED);
1786 if (restart_block) {
1787 /* A failure happened early enough that we know the destination hasn't
1788 * accessed block devices, so we're safe to recover.
1789 */
1790 Error *local_err = NULL;
1791
1792 bdrv_invalidate_cache_all(&local_err);
1793 if (local_err) {
1794 error_report_err(local_err);
1795 }
1796 }
1797 qemu_mutex_unlock_iothread();
1798 return -1;
1799 }
1800
1801 /**
1802 * migration_completion: Used by migration_thread when there's not much left.
1803 * The caller 'breaks' the loop when this returns.
1804 *
1805 * @s: Current migration state
1806 * @current_active_state: The migration state we expect to be in
1807 * @*old_vm_running: Pointer to old_vm_running flag
1808 * @*start_time: Pointer to time to update
1809 */
1810 static void migration_completion(MigrationState *s, int current_active_state,
1811 bool *old_vm_running,
1812 int64_t *start_time)
1813 {
1814 int ret;
1815
1816 if (s->state == MIGRATION_STATUS_ACTIVE) {
1817 qemu_mutex_lock_iothread();
1818 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1819 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1820 *old_vm_running = runstate_is_running();
1821 ret = global_state_store();
1822
1823 if (!ret) {
1824 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1825 /*
1826 * Don't mark the image with BDRV_O_INACTIVE flag if
1827 * we will go into COLO stage later.
1828 */
1829 if (ret >= 0 && !migrate_colo_enabled()) {
1830 ret = bdrv_inactivate_all();
1831 }
1832 if (ret >= 0) {
1833 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1834 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1835 s->block_inactive = true;
1836 }
1837 }
1838 qemu_mutex_unlock_iothread();
1839
1840 if (ret < 0) {
1841 goto fail;
1842 }
1843 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1844 trace_migration_completion_postcopy_end();
1845
1846 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1847 trace_migration_completion_postcopy_end_after_complete();
1848 }
1849
1850 /*
1851 * If rp was opened we must clean up the thread before
1852 * cleaning everything else up (since if there are no failures
1853 * it will wait for the destination to send it's status in
1854 * a SHUT command).
1855 * Postcopy opens rp if enabled (even if it's not avtivated)
1856 */
1857 if (migrate_postcopy_ram()) {
1858 int rp_error;
1859 trace_migration_completion_postcopy_end_before_rp();
1860 rp_error = await_return_path_close_on_source(s);
1861 trace_migration_completion_postcopy_end_after_rp(rp_error);
1862 if (rp_error) {
1863 goto fail_invalidate;
1864 }
1865 }
1866
1867 if (qemu_file_get_error(s->to_dst_file)) {
1868 trace_migration_completion_file_err();
1869 goto fail_invalidate;
1870 }
1871
1872 if (!migrate_colo_enabled()) {
1873 migrate_set_state(&s->state, current_active_state,
1874 MIGRATION_STATUS_COMPLETED);
1875 }
1876
1877 return;
1878
1879 fail_invalidate:
1880 /* If not doing postcopy, vm_start() will be called: let's regain
1881 * control on images.
1882 */
1883 if (s->state == MIGRATION_STATUS_ACTIVE) {
1884 Error *local_err = NULL;
1885
1886 qemu_mutex_lock_iothread();
1887 bdrv_invalidate_cache_all(&local_err);
1888 if (local_err) {
1889 error_report_err(local_err);
1890 } else {
1891 s->block_inactive = false;
1892 }
1893 qemu_mutex_unlock_iothread();
1894 }
1895
1896 fail:
1897 migrate_set_state(&s->state, current_active_state,
1898 MIGRATION_STATUS_FAILED);
1899 }
1900
1901 bool migrate_colo_enabled(void)
1902 {
1903 MigrationState *s = migrate_get_current();
1904 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
1905 }
1906
1907 /*
1908 * Master migration thread on the source VM.
1909 * It drives the migration and pumps the data down the outgoing channel.
1910 */
1911 static void *migration_thread(void *opaque)
1912 {
1913 MigrationState *s = opaque;
1914 /* Used by the bandwidth calcs, updated later */
1915 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1916 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1917 int64_t initial_bytes = 0;
1918 int64_t max_size = 0;
1919 int64_t start_time = initial_time;
1920 int64_t end_time;
1921 bool old_vm_running = false;
1922 bool entered_postcopy = false;
1923 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1924 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1925 bool enable_colo = migrate_colo_enabled();
1926
1927 rcu_register_thread();
1928
1929 qemu_savevm_state_header(s->to_dst_file);
1930
1931 if (migrate_postcopy_ram()) {
1932 /* Now tell the dest that it should open its end so it can reply */
1933 qemu_savevm_send_open_return_path(s->to_dst_file);
1934
1935 /* And do a ping that will make stuff easier to debug */
1936 qemu_savevm_send_ping(s->to_dst_file, 1);
1937
1938 /*
1939 * Tell the destination that we *might* want to do postcopy later;
1940 * if the other end can't do postcopy it should fail now, nice and
1941 * early.
1942 */
1943 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1944 }
1945
1946 qemu_savevm_state_begin(s->to_dst_file, &s->params);
1947
1948 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1949 current_active_state = MIGRATION_STATUS_ACTIVE;
1950 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1951 MIGRATION_STATUS_ACTIVE);
1952
1953 trace_migration_thread_setup_complete();
1954
1955 while (s->state == MIGRATION_STATUS_ACTIVE ||
1956 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1957 int64_t current_time;
1958 uint64_t pending_size;
1959
1960 if (!qemu_file_rate_limit(s->to_dst_file)) {
1961 uint64_t pend_post, pend_nonpost;
1962
1963 qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1964 &pend_post);
1965 pending_size = pend_nonpost + pend_post;
1966 trace_migrate_pending(pending_size, max_size,
1967 pend_post, pend_nonpost);
1968 if (pending_size && pending_size >= max_size) {
1969 /* Still a significant amount to transfer */
1970
1971 if (migrate_postcopy_ram() &&
1972 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1973 pend_nonpost <= max_size &&
1974 atomic_read(&s->start_postcopy)) {
1975
1976 if (!postcopy_start(s, &old_vm_running)) {
1977 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1978 entered_postcopy = true;
1979 }
1980
1981 continue;
1982 }
1983 /* Just another iteration step */
1984 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1985 } else {
1986 trace_migration_thread_low_pending(pending_size);
1987 migration_completion(s, current_active_state,
1988 &old_vm_running, &start_time);
1989 break;
1990 }
1991 }
1992
1993 if (qemu_file_get_error(s->to_dst_file)) {
1994 migrate_set_state(&s->state, current_active_state,
1995 MIGRATION_STATUS_FAILED);
1996 trace_migration_thread_file_err();
1997 break;
1998 }
1999 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2000 if (current_time >= initial_time + BUFFER_DELAY) {
2001 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
2002 initial_bytes;
2003 uint64_t time_spent = current_time - initial_time;
2004 double bandwidth = (double)transferred_bytes / time_spent;
2005 max_size = bandwidth * s->parameters.downtime_limit;
2006
2007 s->mbps = (((double) transferred_bytes * 8.0) /
2008 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
2009
2010 trace_migrate_transferred(transferred_bytes, time_spent,
2011 bandwidth, max_size);
2012 /* if we haven't sent anything, we don't want to recalculate
2013 10000 is a small enough number for our purposes */
2014 if (s->dirty_bytes_rate && transferred_bytes > 10000) {
2015 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
2016 }
2017
2018 qemu_file_reset_rate_limit(s->to_dst_file);
2019 initial_time = current_time;
2020 initial_bytes = qemu_ftell(s->to_dst_file);
2021 }
2022 if (qemu_file_rate_limit(s->to_dst_file)) {
2023 /* usleep expects microseconds */
2024 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
2025 }
2026 }
2027
2028 trace_migration_thread_after_loop();
2029 /* If we enabled cpu throttling for auto-converge, turn it off. */
2030 cpu_throttle_stop();
2031 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2032
2033 qemu_mutex_lock_iothread();
2034 /*
2035 * The resource has been allocated by migration will be reused in COLO
2036 * process, so don't release them.
2037 */
2038 if (!enable_colo) {
2039 qemu_savevm_state_cleanup();
2040 }
2041 if (s->state == MIGRATION_STATUS_COMPLETED) {
2042 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
2043 s->total_time = end_time - s->total_time;
2044 if (!entered_postcopy) {
2045 s->downtime = end_time - start_time;
2046 }
2047 if (s->total_time) {
2048 s->mbps = (((double) transferred_bytes * 8.0) /
2049 ((double) s->total_time)) / 1000;
2050 }
2051 runstate_set(RUN_STATE_POSTMIGRATE);
2052 } else {
2053 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
2054 migrate_start_colo_process(s);
2055 qemu_savevm_state_cleanup();
2056 /*
2057 * Fixme: we will run VM in COLO no matter its old running state.
2058 * After exited COLO, we will keep running.
2059 */
2060 old_vm_running = true;
2061 }
2062 if (old_vm_running && !entered_postcopy) {
2063 vm_start();
2064 } else {
2065 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2066 runstate_set(RUN_STATE_POSTMIGRATE);
2067 }
2068 }
2069 }
2070 qemu_bh_schedule(s->cleanup_bh);
2071 qemu_mutex_unlock_iothread();
2072
2073 rcu_unregister_thread();
2074 return NULL;
2075 }
2076
2077 void migrate_fd_connect(MigrationState *s)
2078 {
2079 s->expected_downtime = s->parameters.downtime_limit;
2080 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
2081
2082 qemu_file_set_blocking(s->to_dst_file, true);
2083 qemu_file_set_rate_limit(s->to_dst_file,
2084 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
2085
2086 /* Notify before starting migration thread */
2087 notifier_list_notify(&migration_state_notifiers, s);
2088
2089 /*
2090 * Open the return path; currently for postcopy but other things might
2091 * also want it.
2092 */
2093 if (migrate_postcopy_ram()) {
2094 if (open_return_path_on_source(s)) {
2095 error_report("Unable to open return-path for postcopy");
2096 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2097 MIGRATION_STATUS_FAILED);
2098 migrate_fd_cleanup(s);
2099 return;
2100 }
2101 }
2102
2103 migrate_compress_threads_create();
2104 qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
2105 QEMU_THREAD_JOINABLE);
2106 s->migration_thread_running = true;
2107 }
2108
2109 PostcopyState postcopy_state_get(void)
2110 {
2111 return atomic_mb_read(&incoming_postcopy_state);
2112 }
2113
2114 /* Set the state and return the old state */
2115 PostcopyState postcopy_state_set(PostcopyState new_state)
2116 {
2117 return atomic_xchg(&incoming_postcopy_state, new_state);
2118 }
2119