]> git.proxmox.com Git - mirror_qemu.git/blob - migration/migration.c
Merge branch 'icount-update' into HEAD
[mirror_qemu.git] / migration / migration.c
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
37 #include "io/channel-buffer.h"
38 #include "io/channel-tls.h"
39 #include "migration/colo.h"
40
41 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
42
43 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
44 * data. */
45 #define BUFFER_DELAY 100
46 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
47
48 /* Time in milliseconds we are allowed to stop the source,
49 * for sending the last part */
50 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
51
52 /* Maximum migrate downtime set to 2000 seconds */
53 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000
54 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
55
56 /* Default compression thread count */
57 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
58 /* Default decompression thread count, usually decompression is at
59 * least 4 times as fast as compression.*/
60 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
61 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
62 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
63 /* Define default autoconverge cpu throttle migration parameters */
64 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
65 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
66
67 /* Migration XBZRLE default cache size */
68 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
69
70 /* The delay time (in ms) between two COLO checkpoints
71 * Note: Please change this default value to 10000 when we support hybrid mode.
72 */
73 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200
74
75 static NotifierList migration_state_notifiers =
76 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
77
78 static bool deferred_incoming;
79
80 /*
81 * Current state of incoming postcopy; note this is not part of
82 * MigrationIncomingState since it's state is used during cleanup
83 * at the end as MIS is being freed.
84 */
85 static PostcopyState incoming_postcopy_state;
86
87 /* When we add fault tolerance, we could have several
88 migrations at once. For now we don't need to add
89 dynamic creation of migration */
90
91 /* For outgoing */
92 MigrationState *migrate_get_current(void)
93 {
94 static bool once;
95 static MigrationState current_migration = {
96 .state = MIGRATION_STATUS_NONE,
97 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
98 .mbps = -1,
99 .parameters = {
100 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL,
101 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
102 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
103 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
104 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
105 .max_bandwidth = MAX_THROTTLE,
106 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME,
107 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY,
108 },
109 };
110
111 if (!once) {
112 qemu_mutex_init(&current_migration.src_page_req_mutex);
113 once = true;
114 }
115 return &current_migration;
116 }
117
118 MigrationIncomingState *migration_incoming_get_current(void)
119 {
120 static bool once;
121 static MigrationIncomingState mis_current;
122
123 if (!once) {
124 mis_current.state = MIGRATION_STATUS_NONE;
125 memset(&mis_current, 0, sizeof(MigrationIncomingState));
126 QLIST_INIT(&mis_current.loadvm_handlers);
127 qemu_mutex_init(&mis_current.rp_mutex);
128 qemu_event_init(&mis_current.main_thread_load_event, false);
129 once = true;
130 }
131 return &mis_current;
132 }
133
134 void migration_incoming_state_destroy(void)
135 {
136 struct MigrationIncomingState *mis = migration_incoming_get_current();
137
138 qemu_event_destroy(&mis->main_thread_load_event);
139 loadvm_free_handlers(mis);
140 }
141
142
143 typedef struct {
144 bool optional;
145 uint32_t size;
146 uint8_t runstate[100];
147 RunState state;
148 bool received;
149 } GlobalState;
150
151 static GlobalState global_state;
152
153 int global_state_store(void)
154 {
155 if (!runstate_store((char *)global_state.runstate,
156 sizeof(global_state.runstate))) {
157 error_report("runstate name too big: %s", global_state.runstate);
158 trace_migrate_state_too_big();
159 return -EINVAL;
160 }
161 return 0;
162 }
163
164 void global_state_store_running(void)
165 {
166 const char *state = RunState_lookup[RUN_STATE_RUNNING];
167 strncpy((char *)global_state.runstate,
168 state, sizeof(global_state.runstate));
169 }
170
171 static bool global_state_received(void)
172 {
173 return global_state.received;
174 }
175
176 static RunState global_state_get_runstate(void)
177 {
178 return global_state.state;
179 }
180
181 void global_state_set_optional(void)
182 {
183 global_state.optional = true;
184 }
185
186 static bool global_state_needed(void *opaque)
187 {
188 GlobalState *s = opaque;
189 char *runstate = (char *)s->runstate;
190
191 /* If it is not optional, it is mandatory */
192
193 if (s->optional == false) {
194 return true;
195 }
196
197 /* If state is running or paused, it is not needed */
198
199 if (strcmp(runstate, "running") == 0 ||
200 strcmp(runstate, "paused") == 0) {
201 return false;
202 }
203
204 /* for any other state it is needed */
205 return true;
206 }
207
208 static int global_state_post_load(void *opaque, int version_id)
209 {
210 GlobalState *s = opaque;
211 Error *local_err = NULL;
212 int r;
213 char *runstate = (char *)s->runstate;
214
215 s->received = true;
216 trace_migrate_global_state_post_load(runstate);
217
218 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
219 -1, &local_err);
220
221 if (r == -1) {
222 if (local_err) {
223 error_report_err(local_err);
224 }
225 return -EINVAL;
226 }
227 s->state = r;
228
229 return 0;
230 }
231
232 static void global_state_pre_save(void *opaque)
233 {
234 GlobalState *s = opaque;
235
236 trace_migrate_global_state_pre_save((char *)s->runstate);
237 s->size = strlen((char *)s->runstate) + 1;
238 }
239
240 static const VMStateDescription vmstate_globalstate = {
241 .name = "globalstate",
242 .version_id = 1,
243 .minimum_version_id = 1,
244 .post_load = global_state_post_load,
245 .pre_save = global_state_pre_save,
246 .needed = global_state_needed,
247 .fields = (VMStateField[]) {
248 VMSTATE_UINT32(size, GlobalState),
249 VMSTATE_BUFFER(runstate, GlobalState),
250 VMSTATE_END_OF_LIST()
251 },
252 };
253
254 void register_global_state(void)
255 {
256 /* We would use it independently that we receive it */
257 strcpy((char *)&global_state.runstate, "");
258 global_state.received = false;
259 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
260 }
261
262 static void migrate_generate_event(int new_state)
263 {
264 if (migrate_use_events()) {
265 qapi_event_send_migration(new_state, &error_abort);
266 }
267 }
268
269 /*
270 * Called on -incoming with a defer: uri.
271 * The migration can be started later after any parameters have been
272 * changed.
273 */
274 static void deferred_incoming_migration(Error **errp)
275 {
276 if (deferred_incoming) {
277 error_setg(errp, "Incoming migration already deferred");
278 }
279 deferred_incoming = true;
280 }
281
282 /* Request a range of pages from the source VM at the given
283 * start address.
284 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
285 * as the last request (a name must have been given previously)
286 * Start: Address offset within the RB
287 * Len: Length in bytes required - must be a multiple of pagesize
288 */
289 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
290 ram_addr_t start, size_t len)
291 {
292 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
293 size_t msglen = 12; /* start + len */
294
295 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
296 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
297
298 if (rbname) {
299 int rbname_len = strlen(rbname);
300 assert(rbname_len < 256);
301
302 bufc[msglen++] = rbname_len;
303 memcpy(bufc + msglen, rbname, rbname_len);
304 msglen += rbname_len;
305 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
306 } else {
307 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
308 }
309 }
310
311 void qemu_start_incoming_migration(const char *uri, Error **errp)
312 {
313 const char *p;
314
315 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
316 if (!strcmp(uri, "defer")) {
317 deferred_incoming_migration(errp);
318 } else if (strstart(uri, "tcp:", &p)) {
319 tcp_start_incoming_migration(p, errp);
320 #ifdef CONFIG_RDMA
321 } else if (strstart(uri, "rdma:", &p)) {
322 rdma_start_incoming_migration(p, errp);
323 #endif
324 } else if (strstart(uri, "exec:", &p)) {
325 exec_start_incoming_migration(p, errp);
326 } else if (strstart(uri, "unix:", &p)) {
327 unix_start_incoming_migration(p, errp);
328 } else if (strstart(uri, "fd:", &p)) {
329 fd_start_incoming_migration(p, errp);
330 } else {
331 error_setg(errp, "unknown migration protocol: %s", uri);
332 }
333 }
334
335 static void process_incoming_migration_bh(void *opaque)
336 {
337 Error *local_err = NULL;
338 MigrationIncomingState *mis = opaque;
339
340 /* Make sure all file formats flush their mutable metadata */
341 bdrv_invalidate_cache_all(&local_err);
342 if (local_err) {
343 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
344 MIGRATION_STATUS_FAILED);
345 error_report_err(local_err);
346 migrate_decompress_threads_join();
347 exit(EXIT_FAILURE);
348 }
349
350 /*
351 * This must happen after all error conditions are dealt with and
352 * we're sure the VM is going to be running on this host.
353 */
354 qemu_announce_self();
355
356 /* If global state section was not received or we are in running
357 state, we need to obey autostart. Any other state is set with
358 runstate_set. */
359
360 if (!global_state_received() ||
361 global_state_get_runstate() == RUN_STATE_RUNNING) {
362 if (autostart) {
363 vm_start();
364 } else {
365 runstate_set(RUN_STATE_PAUSED);
366 }
367 } else {
368 runstate_set(global_state_get_runstate());
369 }
370 migrate_decompress_threads_join();
371 /*
372 * This must happen after any state changes since as soon as an external
373 * observer sees this event they might start to prod at the VM assuming
374 * it's ready to use.
375 */
376 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
377 MIGRATION_STATUS_COMPLETED);
378 qemu_bh_delete(mis->bh);
379 migration_incoming_state_destroy();
380 }
381
382 static void process_incoming_migration_co(void *opaque)
383 {
384 QEMUFile *f = opaque;
385 MigrationIncomingState *mis = migration_incoming_get_current();
386 PostcopyState ps;
387 int ret;
388
389 mis->from_src_file = f;
390 mis->largest_page_size = qemu_ram_pagesize_largest();
391 postcopy_state_set(POSTCOPY_INCOMING_NONE);
392 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
393 MIGRATION_STATUS_ACTIVE);
394 ret = qemu_loadvm_state(f);
395
396 ps = postcopy_state_get();
397 trace_process_incoming_migration_co_end(ret, ps);
398 if (ps != POSTCOPY_INCOMING_NONE) {
399 if (ps == POSTCOPY_INCOMING_ADVISE) {
400 /*
401 * Where a migration had postcopy enabled (and thus went to advise)
402 * but managed to complete within the precopy period, we can use
403 * the normal exit.
404 */
405 postcopy_ram_incoming_cleanup(mis);
406 } else if (ret >= 0) {
407 /*
408 * Postcopy was started, cleanup should happen at the end of the
409 * postcopy thread.
410 */
411 trace_process_incoming_migration_co_postcopy_end_main();
412 return;
413 }
414 /* Else if something went wrong then just fall out of the normal exit */
415 }
416
417 /* we get COLO info, and know if we are in COLO mode */
418 if (!ret && migration_incoming_enable_colo()) {
419 mis->migration_incoming_co = qemu_coroutine_self();
420 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
421 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
422 mis->have_colo_incoming_thread = true;
423 qemu_coroutine_yield();
424
425 /* Wait checkpoint incoming thread exit before free resource */
426 qemu_thread_join(&mis->colo_incoming_thread);
427 }
428
429 qemu_fclose(f);
430 free_xbzrle_decoded_buf();
431
432 if (ret < 0) {
433 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
434 MIGRATION_STATUS_FAILED);
435 error_report("load of migration failed: %s", strerror(-ret));
436 migrate_decompress_threads_join();
437 exit(EXIT_FAILURE);
438 }
439
440 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
441 qemu_bh_schedule(mis->bh);
442 }
443
444 void migration_fd_process_incoming(QEMUFile *f)
445 {
446 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
447
448 migrate_decompress_threads_create();
449 qemu_file_set_blocking(f, false);
450 qemu_coroutine_enter(co);
451 }
452
453
454 void migration_channel_process_incoming(MigrationState *s,
455 QIOChannel *ioc)
456 {
457 trace_migration_set_incoming_channel(
458 ioc, object_get_typename(OBJECT(ioc)));
459
460 if (s->parameters.tls_creds &&
461 !object_dynamic_cast(OBJECT(ioc),
462 TYPE_QIO_CHANNEL_TLS)) {
463 Error *local_err = NULL;
464 migration_tls_channel_process_incoming(s, ioc, &local_err);
465 if (local_err) {
466 error_report_err(local_err);
467 }
468 } else {
469 QEMUFile *f = qemu_fopen_channel_input(ioc);
470 migration_fd_process_incoming(f);
471 }
472 }
473
474
475 void migration_channel_connect(MigrationState *s,
476 QIOChannel *ioc,
477 const char *hostname)
478 {
479 trace_migration_set_outgoing_channel(
480 ioc, object_get_typename(OBJECT(ioc)), hostname);
481
482 if (s->parameters.tls_creds &&
483 !object_dynamic_cast(OBJECT(ioc),
484 TYPE_QIO_CHANNEL_TLS)) {
485 Error *local_err = NULL;
486 migration_tls_channel_connect(s, ioc, hostname, &local_err);
487 if (local_err) {
488 migrate_fd_error(s, local_err);
489 error_free(local_err);
490 }
491 } else {
492 QEMUFile *f = qemu_fopen_channel_output(ioc);
493
494 s->to_dst_file = f;
495
496 migrate_fd_connect(s);
497 }
498 }
499
500
501 /*
502 * Send a message on the return channel back to the source
503 * of the migration.
504 */
505 void migrate_send_rp_message(MigrationIncomingState *mis,
506 enum mig_rp_message_type message_type,
507 uint16_t len, void *data)
508 {
509 trace_migrate_send_rp_message((int)message_type, len);
510 qemu_mutex_lock(&mis->rp_mutex);
511 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
512 qemu_put_be16(mis->to_src_file, len);
513 qemu_put_buffer(mis->to_src_file, data, len);
514 qemu_fflush(mis->to_src_file);
515 qemu_mutex_unlock(&mis->rp_mutex);
516 }
517
518 /*
519 * Send a 'SHUT' message on the return channel with the given value
520 * to indicate that we've finished with the RP. Non-0 value indicates
521 * error.
522 */
523 void migrate_send_rp_shut(MigrationIncomingState *mis,
524 uint32_t value)
525 {
526 uint32_t buf;
527
528 buf = cpu_to_be32(value);
529 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
530 }
531
532 /*
533 * Send a 'PONG' message on the return channel with the given value
534 * (normally in response to a 'PING')
535 */
536 void migrate_send_rp_pong(MigrationIncomingState *mis,
537 uint32_t value)
538 {
539 uint32_t buf;
540
541 buf = cpu_to_be32(value);
542 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
543 }
544
545 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
546 {
547 MigrationCapabilityStatusList *head = NULL;
548 MigrationCapabilityStatusList *caps;
549 MigrationState *s = migrate_get_current();
550 int i;
551
552 caps = NULL; /* silence compiler warning */
553 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
554 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
555 continue;
556 }
557 if (head == NULL) {
558 head = g_malloc0(sizeof(*caps));
559 caps = head;
560 } else {
561 caps->next = g_malloc0(sizeof(*caps));
562 caps = caps->next;
563 }
564 caps->value =
565 g_malloc(sizeof(*caps->value));
566 caps->value->capability = i;
567 caps->value->state = s->enabled_capabilities[i];
568 }
569
570 return head;
571 }
572
573 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
574 {
575 MigrationParameters *params;
576 MigrationState *s = migrate_get_current();
577
578 params = g_malloc0(sizeof(*params));
579 params->has_compress_level = true;
580 params->compress_level = s->parameters.compress_level;
581 params->has_compress_threads = true;
582 params->compress_threads = s->parameters.compress_threads;
583 params->has_decompress_threads = true;
584 params->decompress_threads = s->parameters.decompress_threads;
585 params->has_cpu_throttle_initial = true;
586 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
587 params->has_cpu_throttle_increment = true;
588 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
589 params->has_tls_creds = !!s->parameters.tls_creds;
590 params->tls_creds = g_strdup(s->parameters.tls_creds);
591 params->has_tls_hostname = !!s->parameters.tls_hostname;
592 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
593 params->has_max_bandwidth = true;
594 params->max_bandwidth = s->parameters.max_bandwidth;
595 params->has_downtime_limit = true;
596 params->downtime_limit = s->parameters.downtime_limit;
597 params->has_x_checkpoint_delay = true;
598 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
599
600 return params;
601 }
602
603 /*
604 * Return true if we're already in the middle of a migration
605 * (i.e. any of the active or setup states)
606 */
607 static bool migration_is_setup_or_active(int state)
608 {
609 switch (state) {
610 case MIGRATION_STATUS_ACTIVE:
611 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
612 case MIGRATION_STATUS_SETUP:
613 return true;
614
615 default:
616 return false;
617
618 }
619 }
620
621 static void get_xbzrle_cache_stats(MigrationInfo *info)
622 {
623 if (migrate_use_xbzrle()) {
624 info->has_xbzrle_cache = true;
625 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
626 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
627 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
628 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
629 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
630 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
631 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
632 }
633 }
634
635 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
636 {
637 info->has_ram = true;
638 info->ram = g_malloc0(sizeof(*info->ram));
639 info->ram->transferred = ram_bytes_transferred();
640 info->ram->total = ram_bytes_total();
641 info->ram->duplicate = dup_mig_pages_transferred();
642 info->ram->skipped = skipped_mig_pages_transferred();
643 info->ram->normal = norm_mig_pages_transferred();
644 info->ram->normal_bytes = norm_mig_bytes_transferred();
645 info->ram->mbps = s->mbps;
646 info->ram->dirty_sync_count = s->dirty_sync_count;
647 info->ram->postcopy_requests = s->postcopy_requests;
648
649 if (s->state != MIGRATION_STATUS_COMPLETED) {
650 info->ram->remaining = ram_bytes_remaining();
651 info->ram->dirty_pages_rate = s->dirty_pages_rate;
652 }
653 }
654
655 MigrationInfo *qmp_query_migrate(Error **errp)
656 {
657 MigrationInfo *info = g_malloc0(sizeof(*info));
658 MigrationState *s = migrate_get_current();
659
660 switch (s->state) {
661 case MIGRATION_STATUS_NONE:
662 /* no migration has happened ever */
663 break;
664 case MIGRATION_STATUS_SETUP:
665 info->has_status = true;
666 info->has_total_time = false;
667 break;
668 case MIGRATION_STATUS_ACTIVE:
669 case MIGRATION_STATUS_CANCELLING:
670 info->has_status = true;
671 info->has_total_time = true;
672 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
673 - s->total_time;
674 info->has_expected_downtime = true;
675 info->expected_downtime = s->expected_downtime;
676 info->has_setup_time = true;
677 info->setup_time = s->setup_time;
678
679 populate_ram_info(info, s);
680
681 if (blk_mig_active()) {
682 info->has_disk = true;
683 info->disk = g_malloc0(sizeof(*info->disk));
684 info->disk->transferred = blk_mig_bytes_transferred();
685 info->disk->remaining = blk_mig_bytes_remaining();
686 info->disk->total = blk_mig_bytes_total();
687 }
688
689 if (cpu_throttle_active()) {
690 info->has_cpu_throttle_percentage = true;
691 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
692 }
693
694 get_xbzrle_cache_stats(info);
695 break;
696 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
697 /* Mostly the same as active; TODO add some postcopy stats */
698 info->has_status = true;
699 info->has_total_time = true;
700 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
701 - s->total_time;
702 info->has_expected_downtime = true;
703 info->expected_downtime = s->expected_downtime;
704 info->has_setup_time = true;
705 info->setup_time = s->setup_time;
706
707 populate_ram_info(info, s);
708
709 if (blk_mig_active()) {
710 info->has_disk = true;
711 info->disk = g_malloc0(sizeof(*info->disk));
712 info->disk->transferred = blk_mig_bytes_transferred();
713 info->disk->remaining = blk_mig_bytes_remaining();
714 info->disk->total = blk_mig_bytes_total();
715 }
716
717 get_xbzrle_cache_stats(info);
718 break;
719 case MIGRATION_STATUS_COLO:
720 info->has_status = true;
721 /* TODO: display COLO specific information (checkpoint info etc.) */
722 break;
723 case MIGRATION_STATUS_COMPLETED:
724 get_xbzrle_cache_stats(info);
725
726 info->has_status = true;
727 info->has_total_time = true;
728 info->total_time = s->total_time;
729 info->has_downtime = true;
730 info->downtime = s->downtime;
731 info->has_setup_time = true;
732 info->setup_time = s->setup_time;
733
734 populate_ram_info(info, s);
735 break;
736 case MIGRATION_STATUS_FAILED:
737 info->has_status = true;
738 if (s->error) {
739 info->has_error_desc = true;
740 info->error_desc = g_strdup(error_get_pretty(s->error));
741 }
742 break;
743 case MIGRATION_STATUS_CANCELLED:
744 info->has_status = true;
745 break;
746 }
747 info->status = s->state;
748
749 return info;
750 }
751
752 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
753 Error **errp)
754 {
755 MigrationState *s = migrate_get_current();
756 MigrationCapabilityStatusList *cap;
757 bool old_postcopy_cap = migrate_postcopy_ram();
758
759 if (migration_is_setup_or_active(s->state)) {
760 error_setg(errp, QERR_MIGRATION_ACTIVE);
761 return;
762 }
763
764 for (cap = params; cap; cap = cap->next) {
765 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
766 if (!colo_supported()) {
767 error_setg(errp, "COLO is not currently supported, please"
768 " configure with --enable-colo option in order to"
769 " support COLO feature");
770 continue;
771 }
772 }
773 s->enabled_capabilities[cap->value->capability] = cap->value->state;
774 }
775
776 if (migrate_postcopy_ram()) {
777 if (migrate_use_compression()) {
778 /* The decompression threads asynchronously write into RAM
779 * rather than use the atomic copies needed to avoid
780 * userfaulting. It should be possible to fix the decompression
781 * threads for compatibility in future.
782 */
783 error_report("Postcopy is not currently compatible with "
784 "compression");
785 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
786 false;
787 }
788 /* This check is reasonably expensive, so only when it's being
789 * set the first time, also it's only the destination that needs
790 * special support.
791 */
792 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
793 !postcopy_ram_supported_by_host()) {
794 /* postcopy_ram_supported_by_host will have emitted a more
795 * detailed message
796 */
797 error_report("Postcopy is not supported");
798 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
799 false;
800 }
801 }
802 }
803
804 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
805 {
806 MigrationState *s = migrate_get_current();
807
808 if (params->has_compress_level &&
809 (params->compress_level < 0 || params->compress_level > 9)) {
810 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
811 "is invalid, it should be in the range of 0 to 9");
812 return;
813 }
814 if (params->has_compress_threads &&
815 (params->compress_threads < 1 || params->compress_threads > 255)) {
816 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
817 "compress_threads",
818 "is invalid, it should be in the range of 1 to 255");
819 return;
820 }
821 if (params->has_decompress_threads &&
822 (params->decompress_threads < 1 || params->decompress_threads > 255)) {
823 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
824 "decompress_threads",
825 "is invalid, it should be in the range of 1 to 255");
826 return;
827 }
828 if (params->has_cpu_throttle_initial &&
829 (params->cpu_throttle_initial < 1 ||
830 params->cpu_throttle_initial > 99)) {
831 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
832 "cpu_throttle_initial",
833 "an integer in the range of 1 to 99");
834 return;
835 }
836 if (params->has_cpu_throttle_increment &&
837 (params->cpu_throttle_increment < 1 ||
838 params->cpu_throttle_increment > 99)) {
839 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
840 "cpu_throttle_increment",
841 "an integer in the range of 1 to 99");
842 return;
843 }
844 if (params->has_max_bandwidth &&
845 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
846 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
847 " range of 0 to %zu bytes/second", SIZE_MAX);
848 return;
849 }
850 if (params->has_downtime_limit &&
851 (params->downtime_limit < 0 ||
852 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
853 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
854 "the range of 0 to %d milliseconds",
855 MAX_MIGRATE_DOWNTIME);
856 return;
857 }
858 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
859 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
860 "x_checkpoint_delay",
861 "is invalid, it should be positive");
862 }
863
864 if (params->has_compress_level) {
865 s->parameters.compress_level = params->compress_level;
866 }
867 if (params->has_compress_threads) {
868 s->parameters.compress_threads = params->compress_threads;
869 }
870 if (params->has_decompress_threads) {
871 s->parameters.decompress_threads = params->decompress_threads;
872 }
873 if (params->has_cpu_throttle_initial) {
874 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
875 }
876 if (params->has_cpu_throttle_increment) {
877 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
878 }
879 if (params->has_tls_creds) {
880 g_free(s->parameters.tls_creds);
881 s->parameters.tls_creds = g_strdup(params->tls_creds);
882 }
883 if (params->has_tls_hostname) {
884 g_free(s->parameters.tls_hostname);
885 s->parameters.tls_hostname = g_strdup(params->tls_hostname);
886 }
887 if (params->has_max_bandwidth) {
888 s->parameters.max_bandwidth = params->max_bandwidth;
889 if (s->to_dst_file) {
890 qemu_file_set_rate_limit(s->to_dst_file,
891 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
892 }
893 }
894 if (params->has_downtime_limit) {
895 s->parameters.downtime_limit = params->downtime_limit;
896 }
897
898 if (params->has_x_checkpoint_delay) {
899 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
900 if (migration_in_colo_state()) {
901 colo_checkpoint_notify(s);
902 }
903 }
904 }
905
906
907 void qmp_migrate_start_postcopy(Error **errp)
908 {
909 MigrationState *s = migrate_get_current();
910
911 if (!migrate_postcopy_ram()) {
912 error_setg(errp, "Enable postcopy with migrate_set_capability before"
913 " the start of migration");
914 return;
915 }
916
917 if (s->state == MIGRATION_STATUS_NONE) {
918 error_setg(errp, "Postcopy must be started after migration has been"
919 " started");
920 return;
921 }
922 /*
923 * we don't error if migration has finished since that would be racy
924 * with issuing this command.
925 */
926 atomic_set(&s->start_postcopy, true);
927 }
928
929 /* shared migration helpers */
930
931 void migrate_set_state(int *state, int old_state, int new_state)
932 {
933 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
934 trace_migrate_set_state(new_state);
935 migrate_generate_event(new_state);
936 }
937 }
938
939 static void migrate_fd_cleanup(void *opaque)
940 {
941 MigrationState *s = opaque;
942
943 qemu_bh_delete(s->cleanup_bh);
944 s->cleanup_bh = NULL;
945
946 flush_page_queue(s);
947
948 if (s->to_dst_file) {
949 trace_migrate_fd_cleanup();
950 qemu_mutex_unlock_iothread();
951 if (s->migration_thread_running) {
952 qemu_thread_join(&s->thread);
953 s->migration_thread_running = false;
954 }
955 qemu_mutex_lock_iothread();
956
957 migrate_compress_threads_join();
958 qemu_fclose(s->to_dst_file);
959 s->to_dst_file = NULL;
960 }
961
962 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
963 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
964
965 if (s->state == MIGRATION_STATUS_CANCELLING) {
966 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
967 MIGRATION_STATUS_CANCELLED);
968 }
969
970 notifier_list_notify(&migration_state_notifiers, s);
971 }
972
973 void migrate_fd_error(MigrationState *s, const Error *error)
974 {
975 trace_migrate_fd_error(error_get_pretty(error));
976 assert(s->to_dst_file == NULL);
977 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
978 MIGRATION_STATUS_FAILED);
979 if (!s->error) {
980 s->error = error_copy(error);
981 }
982 notifier_list_notify(&migration_state_notifiers, s);
983 }
984
985 static void migrate_fd_cancel(MigrationState *s)
986 {
987 int old_state ;
988 QEMUFile *f = migrate_get_current()->to_dst_file;
989 trace_migrate_fd_cancel();
990
991 if (s->rp_state.from_dst_file) {
992 /* shutdown the rp socket, so causing the rp thread to shutdown */
993 qemu_file_shutdown(s->rp_state.from_dst_file);
994 }
995
996 do {
997 old_state = s->state;
998 if (!migration_is_setup_or_active(old_state)) {
999 break;
1000 }
1001 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1002 } while (s->state != MIGRATION_STATUS_CANCELLING);
1003
1004 /*
1005 * If we're unlucky the migration code might be stuck somewhere in a
1006 * send/write while the network has failed and is waiting to timeout;
1007 * if we've got shutdown(2) available then we can force it to quit.
1008 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1009 * called in a bh, so there is no race against this cancel.
1010 */
1011 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1012 qemu_file_shutdown(f);
1013 }
1014 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1015 Error *local_err = NULL;
1016
1017 bdrv_invalidate_cache_all(&local_err);
1018 if (local_err) {
1019 error_report_err(local_err);
1020 } else {
1021 s->block_inactive = false;
1022 }
1023 }
1024 }
1025
1026 void add_migration_state_change_notifier(Notifier *notify)
1027 {
1028 notifier_list_add(&migration_state_notifiers, notify);
1029 }
1030
1031 void remove_migration_state_change_notifier(Notifier *notify)
1032 {
1033 notifier_remove(notify);
1034 }
1035
1036 bool migration_in_setup(MigrationState *s)
1037 {
1038 return s->state == MIGRATION_STATUS_SETUP;
1039 }
1040
1041 bool migration_has_finished(MigrationState *s)
1042 {
1043 return s->state == MIGRATION_STATUS_COMPLETED;
1044 }
1045
1046 bool migration_has_failed(MigrationState *s)
1047 {
1048 return (s->state == MIGRATION_STATUS_CANCELLED ||
1049 s->state == MIGRATION_STATUS_FAILED);
1050 }
1051
1052 bool migration_in_postcopy(MigrationState *s)
1053 {
1054 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1055 }
1056
1057 bool migration_in_postcopy_after_devices(MigrationState *s)
1058 {
1059 return migration_in_postcopy(s) && s->postcopy_after_devices;
1060 }
1061
1062 bool migration_is_idle(MigrationState *s)
1063 {
1064 if (!s) {
1065 s = migrate_get_current();
1066 }
1067
1068 switch (s->state) {
1069 case MIGRATION_STATUS_NONE:
1070 case MIGRATION_STATUS_CANCELLED:
1071 case MIGRATION_STATUS_COMPLETED:
1072 case MIGRATION_STATUS_FAILED:
1073 return true;
1074 case MIGRATION_STATUS_SETUP:
1075 case MIGRATION_STATUS_CANCELLING:
1076 case MIGRATION_STATUS_ACTIVE:
1077 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1078 case MIGRATION_STATUS_COLO:
1079 return false;
1080 case MIGRATION_STATUS__MAX:
1081 g_assert_not_reached();
1082 }
1083
1084 return false;
1085 }
1086
1087 MigrationState *migrate_init(const MigrationParams *params)
1088 {
1089 MigrationState *s = migrate_get_current();
1090
1091 /*
1092 * Reinitialise all migration state, except
1093 * parameters/capabilities that the user set, and
1094 * locks.
1095 */
1096 s->bytes_xfer = 0;
1097 s->xfer_limit = 0;
1098 s->cleanup_bh = 0;
1099 s->to_dst_file = NULL;
1100 s->state = MIGRATION_STATUS_NONE;
1101 s->params = *params;
1102 s->rp_state.from_dst_file = NULL;
1103 s->rp_state.error = false;
1104 s->mbps = 0.0;
1105 s->downtime = 0;
1106 s->expected_downtime = 0;
1107 s->dirty_pages_rate = 0;
1108 s->dirty_bytes_rate = 0;
1109 s->setup_time = 0;
1110 s->dirty_sync_count = 0;
1111 s->start_postcopy = false;
1112 s->postcopy_after_devices = false;
1113 s->postcopy_requests = 0;
1114 s->migration_thread_running = false;
1115 s->last_req_rb = NULL;
1116 error_free(s->error);
1117 s->error = NULL;
1118
1119 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1120
1121 QSIMPLEQ_INIT(&s->src_page_requests);
1122
1123 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1124 return s;
1125 }
1126
1127 static GSList *migration_blockers;
1128
1129 int migrate_add_blocker(Error *reason, Error **errp)
1130 {
1131 if (only_migratable) {
1132 error_propagate(errp, error_copy(reason));
1133 error_prepend(errp, "disallowing migration blocker "
1134 "(--only_migratable) for: ");
1135 return -EACCES;
1136 }
1137
1138 if (migration_is_idle(NULL)) {
1139 migration_blockers = g_slist_prepend(migration_blockers, reason);
1140 return 0;
1141 }
1142
1143 error_propagate(errp, error_copy(reason));
1144 error_prepend(errp, "disallowing migration blocker (migration in "
1145 "progress) for: ");
1146 return -EBUSY;
1147 }
1148
1149 void migrate_del_blocker(Error *reason)
1150 {
1151 migration_blockers = g_slist_remove(migration_blockers, reason);
1152 }
1153
1154 int check_migratable(Object *obj, Error **err)
1155 {
1156 DeviceClass *dc = DEVICE_GET_CLASS(obj);
1157 if (only_migratable && dc->vmsd) {
1158 if (dc->vmsd->unmigratable) {
1159 error_setg(err, "Device %s is not migratable, but "
1160 "--only-migratable was specified",
1161 object_get_typename(obj));
1162 return -1;
1163 }
1164 }
1165
1166 return 0;
1167 }
1168
1169 void qmp_migrate_incoming(const char *uri, Error **errp)
1170 {
1171 Error *local_err = NULL;
1172 static bool once = true;
1173
1174 if (!deferred_incoming) {
1175 error_setg(errp, "For use with '-incoming defer'");
1176 return;
1177 }
1178 if (!once) {
1179 error_setg(errp, "The incoming migration has already been started");
1180 }
1181
1182 qemu_start_incoming_migration(uri, &local_err);
1183
1184 if (local_err) {
1185 error_propagate(errp, local_err);
1186 return;
1187 }
1188
1189 once = false;
1190 }
1191
1192 bool migration_is_blocked(Error **errp)
1193 {
1194 if (qemu_savevm_state_blocked(errp)) {
1195 return true;
1196 }
1197
1198 if (migration_blockers) {
1199 *errp = error_copy(migration_blockers->data);
1200 return true;
1201 }
1202
1203 return false;
1204 }
1205
1206 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1207 bool has_inc, bool inc, bool has_detach, bool detach,
1208 Error **errp)
1209 {
1210 Error *local_err = NULL;
1211 MigrationState *s = migrate_get_current();
1212 MigrationParams params;
1213 const char *p;
1214
1215 params.blk = has_blk && blk;
1216 params.shared = has_inc && inc;
1217
1218 if (migration_is_setup_or_active(s->state) ||
1219 s->state == MIGRATION_STATUS_CANCELLING ||
1220 s->state == MIGRATION_STATUS_COLO) {
1221 error_setg(errp, QERR_MIGRATION_ACTIVE);
1222 return;
1223 }
1224 if (runstate_check(RUN_STATE_INMIGRATE)) {
1225 error_setg(errp, "Guest is waiting for an incoming migration");
1226 return;
1227 }
1228
1229 if (migration_is_blocked(errp)) {
1230 return;
1231 }
1232
1233 s = migrate_init(&params);
1234
1235 if (strstart(uri, "tcp:", &p)) {
1236 tcp_start_outgoing_migration(s, p, &local_err);
1237 #ifdef CONFIG_RDMA
1238 } else if (strstart(uri, "rdma:", &p)) {
1239 rdma_start_outgoing_migration(s, p, &local_err);
1240 #endif
1241 } else if (strstart(uri, "exec:", &p)) {
1242 exec_start_outgoing_migration(s, p, &local_err);
1243 } else if (strstart(uri, "unix:", &p)) {
1244 unix_start_outgoing_migration(s, p, &local_err);
1245 } else if (strstart(uri, "fd:", &p)) {
1246 fd_start_outgoing_migration(s, p, &local_err);
1247 } else {
1248 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1249 "a valid migration protocol");
1250 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1251 MIGRATION_STATUS_FAILED);
1252 return;
1253 }
1254
1255 if (local_err) {
1256 migrate_fd_error(s, local_err);
1257 error_propagate(errp, local_err);
1258 return;
1259 }
1260 }
1261
1262 void qmp_migrate_cancel(Error **errp)
1263 {
1264 migrate_fd_cancel(migrate_get_current());
1265 }
1266
1267 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1268 {
1269 MigrationState *s = migrate_get_current();
1270 int64_t new_size;
1271
1272 /* Check for truncation */
1273 if (value != (size_t)value) {
1274 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1275 "exceeding address space");
1276 return;
1277 }
1278
1279 /* Cache should not be larger than guest ram size */
1280 if (value > ram_bytes_total()) {
1281 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1282 "exceeds guest ram size ");
1283 return;
1284 }
1285
1286 new_size = xbzrle_cache_resize(value);
1287 if (new_size < 0) {
1288 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1289 "is smaller than page size");
1290 return;
1291 }
1292
1293 s->xbzrle_cache_size = new_size;
1294 }
1295
1296 int64_t qmp_query_migrate_cache_size(Error **errp)
1297 {
1298 return migrate_xbzrle_cache_size();
1299 }
1300
1301 void qmp_migrate_set_speed(int64_t value, Error **errp)
1302 {
1303 MigrationParameters p = {
1304 .has_max_bandwidth = true,
1305 .max_bandwidth = value,
1306 };
1307
1308 qmp_migrate_set_parameters(&p, errp);
1309 }
1310
1311 void qmp_migrate_set_downtime(double value, Error **errp)
1312 {
1313 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
1314 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
1315 "the range of 0 to %d seconds",
1316 MAX_MIGRATE_DOWNTIME_SECONDS);
1317 return;
1318 }
1319
1320 value *= 1000; /* Convert to milliseconds */
1321 value = MAX(0, MIN(INT64_MAX, value));
1322
1323 MigrationParameters p = {
1324 .has_downtime_limit = true,
1325 .downtime_limit = value,
1326 };
1327
1328 qmp_migrate_set_parameters(&p, errp);
1329 }
1330
1331 bool migrate_release_ram(void)
1332 {
1333 MigrationState *s;
1334
1335 s = migrate_get_current();
1336
1337 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
1338 }
1339
1340 bool migrate_postcopy_ram(void)
1341 {
1342 MigrationState *s;
1343
1344 s = migrate_get_current();
1345
1346 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1347 }
1348
1349 bool migrate_auto_converge(void)
1350 {
1351 MigrationState *s;
1352
1353 s = migrate_get_current();
1354
1355 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1356 }
1357
1358 bool migrate_zero_blocks(void)
1359 {
1360 MigrationState *s;
1361
1362 s = migrate_get_current();
1363
1364 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1365 }
1366
1367 bool migrate_use_compression(void)
1368 {
1369 MigrationState *s;
1370
1371 s = migrate_get_current();
1372
1373 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1374 }
1375
1376 int migrate_compress_level(void)
1377 {
1378 MigrationState *s;
1379
1380 s = migrate_get_current();
1381
1382 return s->parameters.compress_level;
1383 }
1384
1385 int migrate_compress_threads(void)
1386 {
1387 MigrationState *s;
1388
1389 s = migrate_get_current();
1390
1391 return s->parameters.compress_threads;
1392 }
1393
1394 int migrate_decompress_threads(void)
1395 {
1396 MigrationState *s;
1397
1398 s = migrate_get_current();
1399
1400 return s->parameters.decompress_threads;
1401 }
1402
1403 bool migrate_use_events(void)
1404 {
1405 MigrationState *s;
1406
1407 s = migrate_get_current();
1408
1409 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1410 }
1411
1412 int migrate_use_xbzrle(void)
1413 {
1414 MigrationState *s;
1415
1416 s = migrate_get_current();
1417
1418 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1419 }
1420
1421 int64_t migrate_xbzrle_cache_size(void)
1422 {
1423 MigrationState *s;
1424
1425 s = migrate_get_current();
1426
1427 return s->xbzrle_cache_size;
1428 }
1429
1430 /* migration thread support */
1431 /*
1432 * Something bad happened to the RP stream, mark an error
1433 * The caller shall print or trace something to indicate why
1434 */
1435 static void mark_source_rp_bad(MigrationState *s)
1436 {
1437 s->rp_state.error = true;
1438 }
1439
1440 static struct rp_cmd_args {
1441 ssize_t len; /* -1 = variable */
1442 const char *name;
1443 } rp_cmd_args[] = {
1444 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1445 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1446 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1447 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1448 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1449 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1450 };
1451
1452 /*
1453 * Process a request for pages received on the return path,
1454 * We're allowed to send more than requested (e.g. to round to our page size)
1455 * and we don't need to send pages that have already been sent.
1456 */
1457 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1458 ram_addr_t start, size_t len)
1459 {
1460 long our_host_ps = getpagesize();
1461
1462 trace_migrate_handle_rp_req_pages(rbname, start, len);
1463
1464 /*
1465 * Since we currently insist on matching page sizes, just sanity check
1466 * we're being asked for whole host pages.
1467 */
1468 if (start & (our_host_ps-1) ||
1469 (len & (our_host_ps-1))) {
1470 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1471 " len: %zd", __func__, start, len);
1472 mark_source_rp_bad(ms);
1473 return;
1474 }
1475
1476 if (ram_save_queue_pages(ms, rbname, start, len)) {
1477 mark_source_rp_bad(ms);
1478 }
1479 }
1480
1481 /*
1482 * Handles messages sent on the return path towards the source VM
1483 *
1484 */
1485 static void *source_return_path_thread(void *opaque)
1486 {
1487 MigrationState *ms = opaque;
1488 QEMUFile *rp = ms->rp_state.from_dst_file;
1489 uint16_t header_len, header_type;
1490 uint8_t buf[512];
1491 uint32_t tmp32, sibling_error;
1492 ram_addr_t start = 0; /* =0 to silence warning */
1493 size_t len = 0, expected_len;
1494 int res;
1495
1496 trace_source_return_path_thread_entry();
1497 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1498 migration_is_setup_or_active(ms->state)) {
1499 trace_source_return_path_thread_loop_top();
1500 header_type = qemu_get_be16(rp);
1501 header_len = qemu_get_be16(rp);
1502
1503 if (header_type >= MIG_RP_MSG_MAX ||
1504 header_type == MIG_RP_MSG_INVALID) {
1505 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1506 header_type, header_len);
1507 mark_source_rp_bad(ms);
1508 goto out;
1509 }
1510
1511 if ((rp_cmd_args[header_type].len != -1 &&
1512 header_len != rp_cmd_args[header_type].len) ||
1513 header_len > sizeof(buf)) {
1514 error_report("RP: Received '%s' message (0x%04x) with"
1515 "incorrect length %d expecting %zu",
1516 rp_cmd_args[header_type].name, header_type, header_len,
1517 (size_t)rp_cmd_args[header_type].len);
1518 mark_source_rp_bad(ms);
1519 goto out;
1520 }
1521
1522 /* We know we've got a valid header by this point */
1523 res = qemu_get_buffer(rp, buf, header_len);
1524 if (res != header_len) {
1525 error_report("RP: Failed reading data for message 0x%04x"
1526 " read %d expected %d",
1527 header_type, res, header_len);
1528 mark_source_rp_bad(ms);
1529 goto out;
1530 }
1531
1532 /* OK, we have the message and the data */
1533 switch (header_type) {
1534 case MIG_RP_MSG_SHUT:
1535 sibling_error = ldl_be_p(buf);
1536 trace_source_return_path_thread_shut(sibling_error);
1537 if (sibling_error) {
1538 error_report("RP: Sibling indicated error %d", sibling_error);
1539 mark_source_rp_bad(ms);
1540 }
1541 /*
1542 * We'll let the main thread deal with closing the RP
1543 * we could do a shutdown(2) on it, but we're the only user
1544 * anyway, so there's nothing gained.
1545 */
1546 goto out;
1547
1548 case MIG_RP_MSG_PONG:
1549 tmp32 = ldl_be_p(buf);
1550 trace_source_return_path_thread_pong(tmp32);
1551 break;
1552
1553 case MIG_RP_MSG_REQ_PAGES:
1554 start = ldq_be_p(buf);
1555 len = ldl_be_p(buf + 8);
1556 migrate_handle_rp_req_pages(ms, NULL, start, len);
1557 break;
1558
1559 case MIG_RP_MSG_REQ_PAGES_ID:
1560 expected_len = 12 + 1; /* header + termination */
1561
1562 if (header_len >= expected_len) {
1563 start = ldq_be_p(buf);
1564 len = ldl_be_p(buf + 8);
1565 /* Now we expect an idstr */
1566 tmp32 = buf[12]; /* Length of the following idstr */
1567 buf[13 + tmp32] = '\0';
1568 expected_len += tmp32;
1569 }
1570 if (header_len != expected_len) {
1571 error_report("RP: Req_Page_id with length %d expecting %zd",
1572 header_len, expected_len);
1573 mark_source_rp_bad(ms);
1574 goto out;
1575 }
1576 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1577 break;
1578
1579 default:
1580 break;
1581 }
1582 }
1583 if (qemu_file_get_error(rp)) {
1584 trace_source_return_path_thread_bad_end();
1585 mark_source_rp_bad(ms);
1586 }
1587
1588 trace_source_return_path_thread_end();
1589 out:
1590 ms->rp_state.from_dst_file = NULL;
1591 qemu_fclose(rp);
1592 return NULL;
1593 }
1594
1595 static int open_return_path_on_source(MigrationState *ms)
1596 {
1597
1598 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1599 if (!ms->rp_state.from_dst_file) {
1600 return -1;
1601 }
1602
1603 trace_open_return_path_on_source();
1604 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1605 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1606
1607 trace_open_return_path_on_source_continue();
1608
1609 return 0;
1610 }
1611
1612 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1613 static int await_return_path_close_on_source(MigrationState *ms)
1614 {
1615 /*
1616 * If this is a normal exit then the destination will send a SHUT and the
1617 * rp_thread will exit, however if there's an error we need to cause
1618 * it to exit.
1619 */
1620 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1621 /*
1622 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1623 * waiting for the destination.
1624 */
1625 qemu_file_shutdown(ms->rp_state.from_dst_file);
1626 mark_source_rp_bad(ms);
1627 }
1628 trace_await_return_path_close_on_source_joining();
1629 qemu_thread_join(&ms->rp_state.rp_thread);
1630 trace_await_return_path_close_on_source_close();
1631 return ms->rp_state.error;
1632 }
1633
1634 /*
1635 * Switch from normal iteration to postcopy
1636 * Returns non-0 on error
1637 */
1638 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1639 {
1640 int ret;
1641 QIOChannelBuffer *bioc;
1642 QEMUFile *fb;
1643 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1644 bool restart_block = false;
1645 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1646 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1647
1648 trace_postcopy_start();
1649 qemu_mutex_lock_iothread();
1650 trace_postcopy_start_set_run();
1651
1652 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1653 *old_vm_running = runstate_is_running();
1654 global_state_store();
1655 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1656 if (ret < 0) {
1657 goto fail;
1658 }
1659
1660 ret = bdrv_inactivate_all();
1661 if (ret < 0) {
1662 goto fail;
1663 }
1664 restart_block = true;
1665
1666 /*
1667 * Cause any non-postcopiable, but iterative devices to
1668 * send out their final data.
1669 */
1670 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1671
1672 /*
1673 * in Finish migrate and with the io-lock held everything should
1674 * be quiet, but we've potentially still got dirty pages and we
1675 * need to tell the destination to throw any pages it's already received
1676 * that are dirty
1677 */
1678 if (ram_postcopy_send_discard_bitmap(ms)) {
1679 error_report("postcopy send discard bitmap failed");
1680 goto fail;
1681 }
1682
1683 /*
1684 * send rest of state - note things that are doing postcopy
1685 * will notice we're in POSTCOPY_ACTIVE and not actually
1686 * wrap their state up here
1687 */
1688 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1689 /* Ping just for debugging, helps line traces up */
1690 qemu_savevm_send_ping(ms->to_dst_file, 2);
1691
1692 /*
1693 * While loading the device state we may trigger page transfer
1694 * requests and the fd must be free to process those, and thus
1695 * the destination must read the whole device state off the fd before
1696 * it starts processing it. Unfortunately the ad-hoc migration format
1697 * doesn't allow the destination to know the size to read without fully
1698 * parsing it through each devices load-state code (especially the open
1699 * coded devices that use get/put).
1700 * So we wrap the device state up in a package with a length at the start;
1701 * to do this we use a qemu_buf to hold the whole of the device state.
1702 */
1703 bioc = qio_channel_buffer_new(4096);
1704 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
1705 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1706 object_unref(OBJECT(bioc));
1707
1708 /*
1709 * Make sure the receiver can get incoming pages before we send the rest
1710 * of the state
1711 */
1712 qemu_savevm_send_postcopy_listen(fb);
1713
1714 qemu_savevm_state_complete_precopy(fb, false);
1715 qemu_savevm_send_ping(fb, 3);
1716
1717 qemu_savevm_send_postcopy_run(fb);
1718
1719 /* <><> end of stuff going into the package */
1720
1721 /* Last point of recovery; as soon as we send the package the destination
1722 * can open devices and potentially start running.
1723 * Lets just check again we've not got any errors.
1724 */
1725 ret = qemu_file_get_error(ms->to_dst_file);
1726 if (ret) {
1727 error_report("postcopy_start: Migration stream errored (pre package)");
1728 goto fail_closefb;
1729 }
1730
1731 restart_block = false;
1732
1733 /* Now send that blob */
1734 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1735 goto fail_closefb;
1736 }
1737 qemu_fclose(fb);
1738
1739 /* Send a notify to give a chance for anything that needs to happen
1740 * at the transition to postcopy and after the device state; in particular
1741 * spice needs to trigger a transition now
1742 */
1743 ms->postcopy_after_devices = true;
1744 notifier_list_notify(&migration_state_notifiers, ms);
1745
1746 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1747
1748 qemu_mutex_unlock_iothread();
1749
1750 /*
1751 * Although this ping is just for debug, it could potentially be
1752 * used for getting a better measurement of downtime at the source.
1753 */
1754 qemu_savevm_send_ping(ms->to_dst_file, 4);
1755
1756 if (migrate_release_ram()) {
1757 ram_postcopy_migrated_memory_release(ms);
1758 }
1759
1760 ret = qemu_file_get_error(ms->to_dst_file);
1761 if (ret) {
1762 error_report("postcopy_start: Migration stream errored");
1763 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1764 MIGRATION_STATUS_FAILED);
1765 }
1766
1767 return ret;
1768
1769 fail_closefb:
1770 qemu_fclose(fb);
1771 fail:
1772 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1773 MIGRATION_STATUS_FAILED);
1774 if (restart_block) {
1775 /* A failure happened early enough that we know the destination hasn't
1776 * accessed block devices, so we're safe to recover.
1777 */
1778 Error *local_err = NULL;
1779
1780 bdrv_invalidate_cache_all(&local_err);
1781 if (local_err) {
1782 error_report_err(local_err);
1783 }
1784 }
1785 qemu_mutex_unlock_iothread();
1786 return -1;
1787 }
1788
1789 /**
1790 * migration_completion: Used by migration_thread when there's not much left.
1791 * The caller 'breaks' the loop when this returns.
1792 *
1793 * @s: Current migration state
1794 * @current_active_state: The migration state we expect to be in
1795 * @*old_vm_running: Pointer to old_vm_running flag
1796 * @*start_time: Pointer to time to update
1797 */
1798 static void migration_completion(MigrationState *s, int current_active_state,
1799 bool *old_vm_running,
1800 int64_t *start_time)
1801 {
1802 int ret;
1803
1804 if (s->state == MIGRATION_STATUS_ACTIVE) {
1805 qemu_mutex_lock_iothread();
1806 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1807 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1808 *old_vm_running = runstate_is_running();
1809 ret = global_state_store();
1810
1811 if (!ret) {
1812 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1813 /*
1814 * Don't mark the image with BDRV_O_INACTIVE flag if
1815 * we will go into COLO stage later.
1816 */
1817 if (ret >= 0 && !migrate_colo_enabled()) {
1818 ret = bdrv_inactivate_all();
1819 }
1820 if (ret >= 0) {
1821 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1822 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1823 s->block_inactive = true;
1824 }
1825 }
1826 qemu_mutex_unlock_iothread();
1827
1828 if (ret < 0) {
1829 goto fail;
1830 }
1831 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1832 trace_migration_completion_postcopy_end();
1833
1834 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1835 trace_migration_completion_postcopy_end_after_complete();
1836 }
1837
1838 /*
1839 * If rp was opened we must clean up the thread before
1840 * cleaning everything else up (since if there are no failures
1841 * it will wait for the destination to send it's status in
1842 * a SHUT command).
1843 * Postcopy opens rp if enabled (even if it's not avtivated)
1844 */
1845 if (migrate_postcopy_ram()) {
1846 int rp_error;
1847 trace_migration_completion_postcopy_end_before_rp();
1848 rp_error = await_return_path_close_on_source(s);
1849 trace_migration_completion_postcopy_end_after_rp(rp_error);
1850 if (rp_error) {
1851 goto fail_invalidate;
1852 }
1853 }
1854
1855 if (qemu_file_get_error(s->to_dst_file)) {
1856 trace_migration_completion_file_err();
1857 goto fail_invalidate;
1858 }
1859
1860 if (!migrate_colo_enabled()) {
1861 migrate_set_state(&s->state, current_active_state,
1862 MIGRATION_STATUS_COMPLETED);
1863 }
1864
1865 return;
1866
1867 fail_invalidate:
1868 /* If not doing postcopy, vm_start() will be called: let's regain
1869 * control on images.
1870 */
1871 if (s->state == MIGRATION_STATUS_ACTIVE) {
1872 Error *local_err = NULL;
1873
1874 qemu_mutex_lock_iothread();
1875 bdrv_invalidate_cache_all(&local_err);
1876 if (local_err) {
1877 error_report_err(local_err);
1878 } else {
1879 s->block_inactive = false;
1880 }
1881 qemu_mutex_unlock_iothread();
1882 }
1883
1884 fail:
1885 migrate_set_state(&s->state, current_active_state,
1886 MIGRATION_STATUS_FAILED);
1887 }
1888
1889 bool migrate_colo_enabled(void)
1890 {
1891 MigrationState *s = migrate_get_current();
1892 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
1893 }
1894
1895 /*
1896 * Master migration thread on the source VM.
1897 * It drives the migration and pumps the data down the outgoing channel.
1898 */
1899 static void *migration_thread(void *opaque)
1900 {
1901 MigrationState *s = opaque;
1902 /* Used by the bandwidth calcs, updated later */
1903 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1904 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1905 int64_t initial_bytes = 0;
1906 int64_t max_size = 0;
1907 int64_t start_time = initial_time;
1908 int64_t end_time;
1909 bool old_vm_running = false;
1910 bool entered_postcopy = false;
1911 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1912 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1913 bool enable_colo = migrate_colo_enabled();
1914
1915 rcu_register_thread();
1916
1917 qemu_savevm_state_header(s->to_dst_file);
1918
1919 if (migrate_postcopy_ram()) {
1920 /* Now tell the dest that it should open its end so it can reply */
1921 qemu_savevm_send_open_return_path(s->to_dst_file);
1922
1923 /* And do a ping that will make stuff easier to debug */
1924 qemu_savevm_send_ping(s->to_dst_file, 1);
1925
1926 /*
1927 * Tell the destination that we *might* want to do postcopy later;
1928 * if the other end can't do postcopy it should fail now, nice and
1929 * early.
1930 */
1931 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1932 }
1933
1934 qemu_savevm_state_begin(s->to_dst_file, &s->params);
1935
1936 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1937 current_active_state = MIGRATION_STATUS_ACTIVE;
1938 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1939 MIGRATION_STATUS_ACTIVE);
1940
1941 trace_migration_thread_setup_complete();
1942
1943 while (s->state == MIGRATION_STATUS_ACTIVE ||
1944 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1945 int64_t current_time;
1946 uint64_t pending_size;
1947
1948 if (!qemu_file_rate_limit(s->to_dst_file)) {
1949 uint64_t pend_post, pend_nonpost;
1950
1951 qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1952 &pend_post);
1953 pending_size = pend_nonpost + pend_post;
1954 trace_migrate_pending(pending_size, max_size,
1955 pend_post, pend_nonpost);
1956 if (pending_size && pending_size >= max_size) {
1957 /* Still a significant amount to transfer */
1958
1959 if (migrate_postcopy_ram() &&
1960 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1961 pend_nonpost <= max_size &&
1962 atomic_read(&s->start_postcopy)) {
1963
1964 if (!postcopy_start(s, &old_vm_running)) {
1965 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1966 entered_postcopy = true;
1967 }
1968
1969 continue;
1970 }
1971 /* Just another iteration step */
1972 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1973 } else {
1974 trace_migration_thread_low_pending(pending_size);
1975 migration_completion(s, current_active_state,
1976 &old_vm_running, &start_time);
1977 break;
1978 }
1979 }
1980
1981 if (qemu_file_get_error(s->to_dst_file)) {
1982 migrate_set_state(&s->state, current_active_state,
1983 MIGRATION_STATUS_FAILED);
1984 trace_migration_thread_file_err();
1985 break;
1986 }
1987 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1988 if (current_time >= initial_time + BUFFER_DELAY) {
1989 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
1990 initial_bytes;
1991 uint64_t time_spent = current_time - initial_time;
1992 double bandwidth = (double)transferred_bytes / time_spent;
1993 max_size = bandwidth * s->parameters.downtime_limit;
1994
1995 s->mbps = (((double) transferred_bytes * 8.0) /
1996 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1997
1998 trace_migrate_transferred(transferred_bytes, time_spent,
1999 bandwidth, max_size);
2000 /* if we haven't sent anything, we don't want to recalculate
2001 10000 is a small enough number for our purposes */
2002 if (s->dirty_bytes_rate && transferred_bytes > 10000) {
2003 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
2004 }
2005
2006 qemu_file_reset_rate_limit(s->to_dst_file);
2007 initial_time = current_time;
2008 initial_bytes = qemu_ftell(s->to_dst_file);
2009 }
2010 if (qemu_file_rate_limit(s->to_dst_file)) {
2011 /* usleep expects microseconds */
2012 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
2013 }
2014 }
2015
2016 trace_migration_thread_after_loop();
2017 /* If we enabled cpu throttling for auto-converge, turn it off. */
2018 cpu_throttle_stop();
2019 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2020
2021 qemu_mutex_lock_iothread();
2022 /*
2023 * The resource has been allocated by migration will be reused in COLO
2024 * process, so don't release them.
2025 */
2026 if (!enable_colo) {
2027 qemu_savevm_state_cleanup();
2028 }
2029 if (s->state == MIGRATION_STATUS_COMPLETED) {
2030 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
2031 s->total_time = end_time - s->total_time;
2032 if (!entered_postcopy) {
2033 s->downtime = end_time - start_time;
2034 }
2035 if (s->total_time) {
2036 s->mbps = (((double) transferred_bytes * 8.0) /
2037 ((double) s->total_time)) / 1000;
2038 }
2039 runstate_set(RUN_STATE_POSTMIGRATE);
2040 } else {
2041 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
2042 migrate_start_colo_process(s);
2043 qemu_savevm_state_cleanup();
2044 /*
2045 * Fixme: we will run VM in COLO no matter its old running state.
2046 * After exited COLO, we will keep running.
2047 */
2048 old_vm_running = true;
2049 }
2050 if (old_vm_running && !entered_postcopy) {
2051 vm_start();
2052 } else {
2053 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2054 runstate_set(RUN_STATE_POSTMIGRATE);
2055 }
2056 }
2057 }
2058 qemu_bh_schedule(s->cleanup_bh);
2059 qemu_mutex_unlock_iothread();
2060
2061 rcu_unregister_thread();
2062 return NULL;
2063 }
2064
2065 void migrate_fd_connect(MigrationState *s)
2066 {
2067 s->expected_downtime = s->parameters.downtime_limit;
2068 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
2069
2070 qemu_file_set_blocking(s->to_dst_file, true);
2071 qemu_file_set_rate_limit(s->to_dst_file,
2072 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
2073
2074 /* Notify before starting migration thread */
2075 notifier_list_notify(&migration_state_notifiers, s);
2076
2077 /*
2078 * Open the return path; currently for postcopy but other things might
2079 * also want it.
2080 */
2081 if (migrate_postcopy_ram()) {
2082 if (open_return_path_on_source(s)) {
2083 error_report("Unable to open return-path for postcopy");
2084 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2085 MIGRATION_STATUS_FAILED);
2086 migrate_fd_cleanup(s);
2087 return;
2088 }
2089 }
2090
2091 migrate_compress_threads_create();
2092 qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
2093 QEMU_THREAD_JOINABLE);
2094 s->migration_thread_running = true;
2095 }
2096
2097 PostcopyState postcopy_state_get(void)
2098 {
2099 return atomic_mb_read(&incoming_postcopy_state);
2100 }
2101
2102 /* Set the state and return the old state */
2103 PostcopyState postcopy_state_set(PostcopyState new_state)
2104 {
2105 return atomic_xchg(&incoming_postcopy_state, new_state);
2106 }
2107