2 * QEMU migration capabilities
4 * Copyright (c) 2012-2023 Red Hat Inc
7 * Orit Wasserman <owasserm@redhat.com>
8 * Juan Quintela <quintela@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-migration.h"
17 #include "qapi/qmp/qerror.h"
18 #include "sysemu/runstate.h"
19 #include "migration/misc.h"
20 #include "migration.h"
24 bool migrate_auto_converge(void)
28 s
= migrate_get_current();
30 return s
->capabilities
[MIGRATION_CAPABILITY_AUTO_CONVERGE
];
33 bool migrate_background_snapshot(void)
37 s
= migrate_get_current();
39 return s
->capabilities
[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT
];
42 bool migrate_block(void)
46 s
= migrate_get_current();
48 return s
->capabilities
[MIGRATION_CAPABILITY_BLOCK
];
51 bool migrate_colo(void)
53 MigrationState
*s
= migrate_get_current();
54 return s
->capabilities
[MIGRATION_CAPABILITY_X_COLO
];
57 bool migrate_compress(void)
61 s
= migrate_get_current();
63 return s
->capabilities
[MIGRATION_CAPABILITY_COMPRESS
];
66 bool migrate_dirty_bitmaps(void)
70 s
= migrate_get_current();
72 return s
->capabilities
[MIGRATION_CAPABILITY_DIRTY_BITMAPS
];
75 bool migrate_events(void)
79 s
= migrate_get_current();
81 return s
->capabilities
[MIGRATION_CAPABILITY_EVENTS
];
84 bool migrate_ignore_shared(void)
88 s
= migrate_get_current();
90 return s
->capabilities
[MIGRATION_CAPABILITY_X_IGNORE_SHARED
];
93 bool migrate_late_block_activate(void)
97 s
= migrate_get_current();
99 return s
->capabilities
[MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE
];
102 bool migrate_multifd(void)
106 s
= migrate_get_current();
108 return s
->capabilities
[MIGRATION_CAPABILITY_MULTIFD
];
111 bool migrate_pause_before_switchover(void)
115 s
= migrate_get_current();
117 return s
->capabilities
[MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER
];
120 bool migrate_postcopy_blocktime(void)
124 s
= migrate_get_current();
126 return s
->capabilities
[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME
];
129 bool migrate_postcopy_preempt(void)
133 s
= migrate_get_current();
135 return s
->capabilities
[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT
];
138 bool migrate_postcopy_ram(void)
142 s
= migrate_get_current();
144 return s
->capabilities
[MIGRATION_CAPABILITY_POSTCOPY_RAM
];
147 bool migrate_rdma_pin_all(void)
149 MigrationState
*s
= migrate_get_current();
151 return s
->capabilities
[MIGRATION_CAPABILITY_RDMA_PIN_ALL
];
154 bool migrate_release_ram(void)
158 s
= migrate_get_current();
160 return s
->capabilities
[MIGRATION_CAPABILITY_RELEASE_RAM
];
163 bool migrate_return_path(void)
167 s
= migrate_get_current();
169 return s
->capabilities
[MIGRATION_CAPABILITY_RETURN_PATH
];
172 bool migrate_validate_uuid(void)
176 s
= migrate_get_current();
178 return s
->capabilities
[MIGRATION_CAPABILITY_VALIDATE_UUID
];
181 bool migrate_xbzrle(void)
185 s
= migrate_get_current();
187 return s
->capabilities
[MIGRATION_CAPABILITY_XBZRLE
];
190 bool migrate_zero_blocks(void)
194 s
= migrate_get_current();
196 return s
->capabilities
[MIGRATION_CAPABILITY_ZERO_BLOCKS
];
199 bool migrate_zero_copy_send(void)
203 s
= migrate_get_current();
205 return s
->capabilities
[MIGRATION_CAPABILITY_ZERO_COPY_SEND
];
208 /* pseudo capabilities */
210 bool migrate_postcopy(void)
212 return migrate_postcopy_ram() || migrate_dirty_bitmaps();
215 typedef enum WriteTrackingSupport
{
216 WT_SUPPORT_UNKNOWN
= 0,
218 WT_SUPPORT_AVAILABLE
,
219 WT_SUPPORT_COMPATIBLE
220 } WriteTrackingSupport
;
223 WriteTrackingSupport
migrate_query_write_tracking(void)
225 /* Check if kernel supports required UFFD features */
226 if (!ram_write_tracking_available()) {
227 return WT_SUPPORT_ABSENT
;
230 * Check if current memory configuration is
231 * compatible with required UFFD features.
233 if (!ram_write_tracking_compatible()) {
234 return WT_SUPPORT_AVAILABLE
;
237 return WT_SUPPORT_COMPATIBLE
;
240 /* Migration capabilities set */
241 struct MigrateCapsSet
{
242 int size
; /* Capability set size */
243 MigrationCapability caps
[]; /* Variadic array of capabilities */
245 typedef struct MigrateCapsSet MigrateCapsSet
;
247 /* Define and initialize MigrateCapsSet */
248 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \
249 MigrateCapsSet _name = { \
250 .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
251 .caps = { __VA_ARGS__ } \
254 /* Background-snapshot compatibility check list */
256 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot
,
257 MIGRATION_CAPABILITY_POSTCOPY_RAM
,
258 MIGRATION_CAPABILITY_DIRTY_BITMAPS
,
259 MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME
,
260 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE
,
261 MIGRATION_CAPABILITY_RETURN_PATH
,
262 MIGRATION_CAPABILITY_MULTIFD
,
263 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER
,
264 MIGRATION_CAPABILITY_AUTO_CONVERGE
,
265 MIGRATION_CAPABILITY_RELEASE_RAM
,
266 MIGRATION_CAPABILITY_RDMA_PIN_ALL
,
267 MIGRATION_CAPABILITY_COMPRESS
,
268 MIGRATION_CAPABILITY_XBZRLE
,
269 MIGRATION_CAPABILITY_X_COLO
,
270 MIGRATION_CAPABILITY_VALIDATE_UUID
,
271 MIGRATION_CAPABILITY_ZERO_COPY_SEND
);
274 * @migration_caps_check - check capability compatibility
276 * @old_caps: old capability list
277 * @new_caps: new capability list
278 * @errp: set *errp if the check failed, with reason
280 * Returns true if check passed, otherwise false.
282 bool migrate_caps_check(bool *old_caps
, bool *new_caps
, Error
**errp
)
284 MigrationIncomingState
*mis
= migration_incoming_get_current();
286 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
287 if (new_caps
[MIGRATION_CAPABILITY_BLOCK
]) {
288 error_setg(errp
, "QEMU compiled without old-style (blk/-b, inc/-i) "
290 error_append_hint(errp
, "Use drive_mirror+NBD instead.\n");
295 #ifndef CONFIG_REPLICATION
296 if (new_caps
[MIGRATION_CAPABILITY_X_COLO
]) {
297 error_setg(errp
, "QEMU compiled without replication module"
298 " can't enable COLO");
299 error_append_hint(errp
, "Please enable replication before COLO.\n");
304 if (new_caps
[MIGRATION_CAPABILITY_POSTCOPY_RAM
]) {
305 /* This check is reasonably expensive, so only when it's being
306 * set the first time, also it's only the destination that needs
309 if (!old_caps
[MIGRATION_CAPABILITY_POSTCOPY_RAM
] &&
310 runstate_check(RUN_STATE_INMIGRATE
) &&
311 !postcopy_ram_supported_by_host(mis
)) {
312 /* postcopy_ram_supported_by_host will have emitted a more
315 error_setg(errp
, "Postcopy is not supported");
319 if (new_caps
[MIGRATION_CAPABILITY_X_IGNORE_SHARED
]) {
320 error_setg(errp
, "Postcopy is not compatible with ignore-shared");
325 if (new_caps
[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT
]) {
326 WriteTrackingSupport wt_support
;
329 * Check if 'background-snapshot' capability is supported by
330 * host kernel and compatible with guest memory configuration.
332 wt_support
= migrate_query_write_tracking();
333 if (wt_support
< WT_SUPPORT_AVAILABLE
) {
334 error_setg(errp
, "Background-snapshot is not supported by host kernel");
337 if (wt_support
< WT_SUPPORT_COMPATIBLE
) {
338 error_setg(errp
, "Background-snapshot is not compatible "
339 "with guest memory configuration");
344 * Check if there are any migration capabilities
345 * incompatible with 'background-snapshot'.
347 for (idx
= 0; idx
< check_caps_background_snapshot
.size
; idx
++) {
348 int incomp_cap
= check_caps_background_snapshot
.caps
[idx
];
349 if (new_caps
[incomp_cap
]) {
351 "Background-snapshot is not compatible with %s",
352 MigrationCapability_str(incomp_cap
));
359 if (new_caps
[MIGRATION_CAPABILITY_ZERO_COPY_SEND
] &&
360 (!new_caps
[MIGRATION_CAPABILITY_MULTIFD
] ||
361 new_caps
[MIGRATION_CAPABILITY_COMPRESS
] ||
362 new_caps
[MIGRATION_CAPABILITY_XBZRLE
] ||
363 migrate_multifd_compression() ||
364 migrate_use_tls())) {
366 "Zero copy only available for non-compressed non-TLS multifd migration");
370 if (new_caps
[MIGRATION_CAPABILITY_ZERO_COPY_SEND
]) {
372 "Zero copy currently only available on Linux");
377 if (new_caps
[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT
]) {
378 if (!new_caps
[MIGRATION_CAPABILITY_POSTCOPY_RAM
]) {
379 error_setg(errp
, "Postcopy preempt requires postcopy-ram");
384 * Preempt mode requires urgent pages to be sent in separate
385 * channel, OTOH compression logic will disorder all pages into
386 * different compression channels, which is not compatible with the
387 * preempt assumptions on channel assignments.
389 if (new_caps
[MIGRATION_CAPABILITY_COMPRESS
]) {
390 error_setg(errp
, "Postcopy preempt not compatible with compress");
395 if (new_caps
[MIGRATION_CAPABILITY_MULTIFD
]) {
396 if (new_caps
[MIGRATION_CAPABILITY_COMPRESS
]) {
397 error_setg(errp
, "Multifd is not compatible with compress");
405 bool migrate_cap_set(int cap
, bool value
, Error
**errp
)
407 MigrationState
*s
= migrate_get_current();
408 bool new_caps
[MIGRATION_CAPABILITY__MAX
];
410 if (migration_is_running(s
->state
)) {
411 error_setg(errp
, QERR_MIGRATION_ACTIVE
);
415 memcpy(new_caps
, s
->capabilities
, sizeof(new_caps
));
416 new_caps
[cap
] = value
;
418 if (!migrate_caps_check(s
->capabilities
, new_caps
, errp
)) {
421 s
->capabilities
[cap
] = value
;
425 MigrationCapabilityStatusList
*qmp_query_migrate_capabilities(Error
**errp
)
427 MigrationCapabilityStatusList
*head
= NULL
, **tail
= &head
;
428 MigrationCapabilityStatus
*caps
;
429 MigrationState
*s
= migrate_get_current();
432 for (i
= 0; i
< MIGRATION_CAPABILITY__MAX
; i
++) {
433 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
434 if (i
== MIGRATION_CAPABILITY_BLOCK
) {
438 caps
= g_malloc0(sizeof(*caps
));
439 caps
->capability
= i
;
440 caps
->state
= s
->capabilities
[i
];
441 QAPI_LIST_APPEND(tail
, caps
);
447 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList
*params
,
450 MigrationState
*s
= migrate_get_current();
451 MigrationCapabilityStatusList
*cap
;
452 bool new_caps
[MIGRATION_CAPABILITY__MAX
];
454 if (migration_is_running(s
->state
)) {
455 error_setg(errp
, QERR_MIGRATION_ACTIVE
);
459 memcpy(new_caps
, s
->capabilities
, sizeof(new_caps
));
460 for (cap
= params
; cap
; cap
= cap
->next
) {
461 new_caps
[cap
->value
->capability
] = cap
->value
->state
;
464 if (!migrate_caps_check(s
->capabilities
, new_caps
, errp
)) {
468 for (cap
= params
; cap
; cap
= cap
->next
) {
469 s
->capabilities
[cap
->value
->capability
] = cap
->value
->state
;
475 bool migrate_block_incremental(void)
479 s
= migrate_get_current();
481 return s
->parameters
.block_incremental
;
484 uint32_t migrate_checkpoint_delay(void)
488 s
= migrate_get_current();
490 return s
->parameters
.x_checkpoint_delay
;
493 int migrate_compress_level(void)
497 s
= migrate_get_current();
499 return s
->parameters
.compress_level
;
502 int migrate_compress_threads(void)
506 s
= migrate_get_current();
508 return s
->parameters
.compress_threads
;
511 int migrate_compress_wait_thread(void)
515 s
= migrate_get_current();
517 return s
->parameters
.compress_wait_thread
;
520 uint8_t migrate_cpu_throttle_increment(void)
524 s
= migrate_get_current();
526 return s
->parameters
.cpu_throttle_increment
;
529 uint8_t migrate_cpu_throttle_initial(void)
533 s
= migrate_get_current();
535 return s
->parameters
.cpu_throttle_initial
;
538 bool migrate_cpu_throttle_tailslow(void)
542 s
= migrate_get_current();
544 return s
->parameters
.cpu_throttle_tailslow
;
547 int migrate_decompress_threads(void)
551 s
= migrate_get_current();
553 return s
->parameters
.decompress_threads
;
556 uint8_t migrate_max_cpu_throttle(void)
560 s
= migrate_get_current();
562 return s
->parameters
.max_cpu_throttle
;
565 int64_t migrate_max_postcopy_bandwidth(void)
569 s
= migrate_get_current();
571 return s
->parameters
.max_postcopy_bandwidth
;
574 int migrate_multifd_channels(void)
578 s
= migrate_get_current();
580 return s
->parameters
.multifd_channels
;
583 MultiFDCompression
migrate_multifd_compression(void)
587 s
= migrate_get_current();
589 assert(s
->parameters
.multifd_compression
< MULTIFD_COMPRESSION__MAX
);
590 return s
->parameters
.multifd_compression
;
593 int migrate_multifd_zlib_level(void)
597 s
= migrate_get_current();
599 return s
->parameters
.multifd_zlib_level
;
602 int migrate_multifd_zstd_level(void)
606 s
= migrate_get_current();
608 return s
->parameters
.multifd_zstd_level
;
611 uint8_t migrate_throttle_trigger_threshold(void)
615 s
= migrate_get_current();
617 return s
->parameters
.throttle_trigger_threshold
;
620 uint64_t migrate_xbzrle_cache_size(void)
624 s
= migrate_get_current();
626 return s
->parameters
.xbzrle_cache_size
;
629 /* parameters helpers */
631 AnnounceParameters
*migrate_announce_params(void)
633 static AnnounceParameters ap
;
635 MigrationState
*s
= migrate_get_current();
637 ap
.initial
= s
->parameters
.announce_initial
;
638 ap
.max
= s
->parameters
.announce_max
;
639 ap
.rounds
= s
->parameters
.announce_rounds
;
640 ap
.step
= s
->parameters
.announce_step
;