]> git.proxmox.com Git - mirror_qemu.git/blob - migration/options.c
migration: Move migrate_postcopy() to options.c
[mirror_qemu.git] / migration / options.c
1 /*
2 * QEMU migration capabilities
3 *
4 * Copyright (c) 2012-2023 Red Hat Inc
5 *
6 * Authors:
7 * Orit Wasserman <owasserm@redhat.com>
8 * Juan Quintela <quintela@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qapi-commands-migration.h"
17 #include "qapi/qmp/qerror.h"
18 #include "sysemu/runstate.h"
19 #include "migration/misc.h"
20 #include "migration.h"
21 #include "ram.h"
22 #include "options.h"
23
24 bool migrate_auto_converge(void)
25 {
26 MigrationState *s;
27
28 s = migrate_get_current();
29
30 return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
31 }
32
33 bool migrate_background_snapshot(void)
34 {
35 MigrationState *s;
36
37 s = migrate_get_current();
38
39 return s->capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
40 }
41
42 bool migrate_block(void)
43 {
44 MigrationState *s;
45
46 s = migrate_get_current();
47
48 return s->capabilities[MIGRATION_CAPABILITY_BLOCK];
49 }
50
51 bool migrate_colo(void)
52 {
53 MigrationState *s = migrate_get_current();
54 return s->capabilities[MIGRATION_CAPABILITY_X_COLO];
55 }
56
57 bool migrate_compress(void)
58 {
59 MigrationState *s;
60
61 s = migrate_get_current();
62
63 return s->capabilities[MIGRATION_CAPABILITY_COMPRESS];
64 }
65
66 bool migrate_dirty_bitmaps(void)
67 {
68 MigrationState *s;
69
70 s = migrate_get_current();
71
72 return s->capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
73 }
74
75 bool migrate_events(void)
76 {
77 MigrationState *s;
78
79 s = migrate_get_current();
80
81 return s->capabilities[MIGRATION_CAPABILITY_EVENTS];
82 }
83
84 bool migrate_ignore_shared(void)
85 {
86 MigrationState *s;
87
88 s = migrate_get_current();
89
90 return s->capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
91 }
92
93 bool migrate_late_block_activate(void)
94 {
95 MigrationState *s;
96
97 s = migrate_get_current();
98
99 return s->capabilities[MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
100 }
101
102 bool migrate_multifd(void)
103 {
104 MigrationState *s;
105
106 s = migrate_get_current();
107
108 return s->capabilities[MIGRATION_CAPABILITY_MULTIFD];
109 }
110
111 bool migrate_pause_before_switchover(void)
112 {
113 MigrationState *s;
114
115 s = migrate_get_current();
116
117 return s->capabilities[MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
118 }
119
120 bool migrate_postcopy_blocktime(void)
121 {
122 MigrationState *s;
123
124 s = migrate_get_current();
125
126 return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
127 }
128
129 bool migrate_postcopy_preempt(void)
130 {
131 MigrationState *s;
132
133 s = migrate_get_current();
134
135 return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT];
136 }
137
138 bool migrate_postcopy_ram(void)
139 {
140 MigrationState *s;
141
142 s = migrate_get_current();
143
144 return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
145 }
146
147 bool migrate_rdma_pin_all(void)
148 {
149 MigrationState *s = migrate_get_current();
150
151 return s->capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
152 }
153
154 bool migrate_release_ram(void)
155 {
156 MigrationState *s;
157
158 s = migrate_get_current();
159
160 return s->capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
161 }
162
163 bool migrate_return_path(void)
164 {
165 MigrationState *s;
166
167 s = migrate_get_current();
168
169 return s->capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
170 }
171
172 bool migrate_validate_uuid(void)
173 {
174 MigrationState *s;
175
176 s = migrate_get_current();
177
178 return s->capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
179 }
180
181 bool migrate_xbzrle(void)
182 {
183 MigrationState *s;
184
185 s = migrate_get_current();
186
187 return s->capabilities[MIGRATION_CAPABILITY_XBZRLE];
188 }
189
190 bool migrate_zero_blocks(void)
191 {
192 MigrationState *s;
193
194 s = migrate_get_current();
195
196 return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
197 }
198
199 bool migrate_zero_copy_send(void)
200 {
201 MigrationState *s;
202
203 s = migrate_get_current();
204
205 return s->capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND];
206 }
207
208 /* pseudo capabilities */
209
210 bool migrate_postcopy(void)
211 {
212 return migrate_postcopy_ram() || migrate_dirty_bitmaps();
213 }
214
215 typedef enum WriteTrackingSupport {
216 WT_SUPPORT_UNKNOWN = 0,
217 WT_SUPPORT_ABSENT,
218 WT_SUPPORT_AVAILABLE,
219 WT_SUPPORT_COMPATIBLE
220 } WriteTrackingSupport;
221
222 static
223 WriteTrackingSupport migrate_query_write_tracking(void)
224 {
225 /* Check if kernel supports required UFFD features */
226 if (!ram_write_tracking_available()) {
227 return WT_SUPPORT_ABSENT;
228 }
229 /*
230 * Check if current memory configuration is
231 * compatible with required UFFD features.
232 */
233 if (!ram_write_tracking_compatible()) {
234 return WT_SUPPORT_AVAILABLE;
235 }
236
237 return WT_SUPPORT_COMPATIBLE;
238 }
239
240 /* Migration capabilities set */
241 struct MigrateCapsSet {
242 int size; /* Capability set size */
243 MigrationCapability caps[]; /* Variadic array of capabilities */
244 };
245 typedef struct MigrateCapsSet MigrateCapsSet;
246
247 /* Define and initialize MigrateCapsSet */
248 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \
249 MigrateCapsSet _name = { \
250 .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
251 .caps = { __VA_ARGS__ } \
252 }
253
254 /* Background-snapshot compatibility check list */
255 static const
256 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
257 MIGRATION_CAPABILITY_POSTCOPY_RAM,
258 MIGRATION_CAPABILITY_DIRTY_BITMAPS,
259 MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME,
260 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE,
261 MIGRATION_CAPABILITY_RETURN_PATH,
262 MIGRATION_CAPABILITY_MULTIFD,
263 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER,
264 MIGRATION_CAPABILITY_AUTO_CONVERGE,
265 MIGRATION_CAPABILITY_RELEASE_RAM,
266 MIGRATION_CAPABILITY_RDMA_PIN_ALL,
267 MIGRATION_CAPABILITY_COMPRESS,
268 MIGRATION_CAPABILITY_XBZRLE,
269 MIGRATION_CAPABILITY_X_COLO,
270 MIGRATION_CAPABILITY_VALIDATE_UUID,
271 MIGRATION_CAPABILITY_ZERO_COPY_SEND);
272
273 /**
274 * @migration_caps_check - check capability compatibility
275 *
276 * @old_caps: old capability list
277 * @new_caps: new capability list
278 * @errp: set *errp if the check failed, with reason
279 *
280 * Returns true if check passed, otherwise false.
281 */
282 bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
283 {
284 MigrationIncomingState *mis = migration_incoming_get_current();
285
286 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
287 if (new_caps[MIGRATION_CAPABILITY_BLOCK]) {
288 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
289 "block migration");
290 error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
291 return false;
292 }
293 #endif
294
295 #ifndef CONFIG_REPLICATION
296 if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
297 error_setg(errp, "QEMU compiled without replication module"
298 " can't enable COLO");
299 error_append_hint(errp, "Please enable replication before COLO.\n");
300 return false;
301 }
302 #endif
303
304 if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
305 /* This check is reasonably expensive, so only when it's being
306 * set the first time, also it's only the destination that needs
307 * special support.
308 */
309 if (!old_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] &&
310 runstate_check(RUN_STATE_INMIGRATE) &&
311 !postcopy_ram_supported_by_host(mis)) {
312 /* postcopy_ram_supported_by_host will have emitted a more
313 * detailed message
314 */
315 error_setg(errp, "Postcopy is not supported");
316 return false;
317 }
318
319 if (new_caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
320 error_setg(errp, "Postcopy is not compatible with ignore-shared");
321 return false;
322 }
323 }
324
325 if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
326 WriteTrackingSupport wt_support;
327 int idx;
328 /*
329 * Check if 'background-snapshot' capability is supported by
330 * host kernel and compatible with guest memory configuration.
331 */
332 wt_support = migrate_query_write_tracking();
333 if (wt_support < WT_SUPPORT_AVAILABLE) {
334 error_setg(errp, "Background-snapshot is not supported by host kernel");
335 return false;
336 }
337 if (wt_support < WT_SUPPORT_COMPATIBLE) {
338 error_setg(errp, "Background-snapshot is not compatible "
339 "with guest memory configuration");
340 return false;
341 }
342
343 /*
344 * Check if there are any migration capabilities
345 * incompatible with 'background-snapshot'.
346 */
347 for (idx = 0; idx < check_caps_background_snapshot.size; idx++) {
348 int incomp_cap = check_caps_background_snapshot.caps[idx];
349 if (new_caps[incomp_cap]) {
350 error_setg(errp,
351 "Background-snapshot is not compatible with %s",
352 MigrationCapability_str(incomp_cap));
353 return false;
354 }
355 }
356 }
357
358 #ifdef CONFIG_LINUX
359 if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND] &&
360 (!new_caps[MIGRATION_CAPABILITY_MULTIFD] ||
361 new_caps[MIGRATION_CAPABILITY_COMPRESS] ||
362 new_caps[MIGRATION_CAPABILITY_XBZRLE] ||
363 migrate_multifd_compression() ||
364 migrate_use_tls())) {
365 error_setg(errp,
366 "Zero copy only available for non-compressed non-TLS multifd migration");
367 return false;
368 }
369 #else
370 if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) {
371 error_setg(errp,
372 "Zero copy currently only available on Linux");
373 return false;
374 }
375 #endif
376
377 if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) {
378 if (!new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
379 error_setg(errp, "Postcopy preempt requires postcopy-ram");
380 return false;
381 }
382
383 /*
384 * Preempt mode requires urgent pages to be sent in separate
385 * channel, OTOH compression logic will disorder all pages into
386 * different compression channels, which is not compatible with the
387 * preempt assumptions on channel assignments.
388 */
389 if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
390 error_setg(errp, "Postcopy preempt not compatible with compress");
391 return false;
392 }
393 }
394
395 if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
396 if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
397 error_setg(errp, "Multifd is not compatible with compress");
398 return false;
399 }
400 }
401
402 return true;
403 }
404
405 bool migrate_cap_set(int cap, bool value, Error **errp)
406 {
407 MigrationState *s = migrate_get_current();
408 bool new_caps[MIGRATION_CAPABILITY__MAX];
409
410 if (migration_is_running(s->state)) {
411 error_setg(errp, QERR_MIGRATION_ACTIVE);
412 return false;
413 }
414
415 memcpy(new_caps, s->capabilities, sizeof(new_caps));
416 new_caps[cap] = value;
417
418 if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
419 return false;
420 }
421 s->capabilities[cap] = value;
422 return true;
423 }
424
425 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
426 {
427 MigrationCapabilityStatusList *head = NULL, **tail = &head;
428 MigrationCapabilityStatus *caps;
429 MigrationState *s = migrate_get_current();
430 int i;
431
432 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
433 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
434 if (i == MIGRATION_CAPABILITY_BLOCK) {
435 continue;
436 }
437 #endif
438 caps = g_malloc0(sizeof(*caps));
439 caps->capability = i;
440 caps->state = s->capabilities[i];
441 QAPI_LIST_APPEND(tail, caps);
442 }
443
444 return head;
445 }
446
447 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
448 Error **errp)
449 {
450 MigrationState *s = migrate_get_current();
451 MigrationCapabilityStatusList *cap;
452 bool new_caps[MIGRATION_CAPABILITY__MAX];
453
454 if (migration_is_running(s->state)) {
455 error_setg(errp, QERR_MIGRATION_ACTIVE);
456 return;
457 }
458
459 memcpy(new_caps, s->capabilities, sizeof(new_caps));
460 for (cap = params; cap; cap = cap->next) {
461 new_caps[cap->value->capability] = cap->value->state;
462 }
463
464 if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
465 return;
466 }
467
468 for (cap = params; cap; cap = cap->next) {
469 s->capabilities[cap->value->capability] = cap->value->state;
470 }
471 }
472
473 /* parameters */
474
475 bool migrate_block_incremental(void)
476 {
477 MigrationState *s;
478
479 s = migrate_get_current();
480
481 return s->parameters.block_incremental;
482 }
483
484 uint32_t migrate_checkpoint_delay(void)
485 {
486 MigrationState *s;
487
488 s = migrate_get_current();
489
490 return s->parameters.x_checkpoint_delay;
491 }
492
493 int migrate_compress_level(void)
494 {
495 MigrationState *s;
496
497 s = migrate_get_current();
498
499 return s->parameters.compress_level;
500 }
501
502 int migrate_compress_threads(void)
503 {
504 MigrationState *s;
505
506 s = migrate_get_current();
507
508 return s->parameters.compress_threads;
509 }
510
511 int migrate_compress_wait_thread(void)
512 {
513 MigrationState *s;
514
515 s = migrate_get_current();
516
517 return s->parameters.compress_wait_thread;
518 }
519
520 uint8_t migrate_cpu_throttle_increment(void)
521 {
522 MigrationState *s;
523
524 s = migrate_get_current();
525
526 return s->parameters.cpu_throttle_increment;
527 }
528
529 uint8_t migrate_cpu_throttle_initial(void)
530 {
531 MigrationState *s;
532
533 s = migrate_get_current();
534
535 return s->parameters.cpu_throttle_initial;
536 }
537
538 bool migrate_cpu_throttle_tailslow(void)
539 {
540 MigrationState *s;
541
542 s = migrate_get_current();
543
544 return s->parameters.cpu_throttle_tailslow;
545 }
546
547 int migrate_decompress_threads(void)
548 {
549 MigrationState *s;
550
551 s = migrate_get_current();
552
553 return s->parameters.decompress_threads;
554 }
555
556 uint8_t migrate_max_cpu_throttle(void)
557 {
558 MigrationState *s;
559
560 s = migrate_get_current();
561
562 return s->parameters.max_cpu_throttle;
563 }
564
565 int64_t migrate_max_postcopy_bandwidth(void)
566 {
567 MigrationState *s;
568
569 s = migrate_get_current();
570
571 return s->parameters.max_postcopy_bandwidth;
572 }
573
574 int migrate_multifd_channels(void)
575 {
576 MigrationState *s;
577
578 s = migrate_get_current();
579
580 return s->parameters.multifd_channels;
581 }
582
583 MultiFDCompression migrate_multifd_compression(void)
584 {
585 MigrationState *s;
586
587 s = migrate_get_current();
588
589 assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX);
590 return s->parameters.multifd_compression;
591 }
592
593 int migrate_multifd_zlib_level(void)
594 {
595 MigrationState *s;
596
597 s = migrate_get_current();
598
599 return s->parameters.multifd_zlib_level;
600 }
601
602 int migrate_multifd_zstd_level(void)
603 {
604 MigrationState *s;
605
606 s = migrate_get_current();
607
608 return s->parameters.multifd_zstd_level;
609 }
610
611 uint8_t migrate_throttle_trigger_threshold(void)
612 {
613 MigrationState *s;
614
615 s = migrate_get_current();
616
617 return s->parameters.throttle_trigger_threshold;
618 }
619
620 uint64_t migrate_xbzrle_cache_size(void)
621 {
622 MigrationState *s;
623
624 s = migrate_get_current();
625
626 return s->parameters.xbzrle_cache_size;
627 }
628
629 /* parameters helpers */
630
631 AnnounceParameters *migrate_announce_params(void)
632 {
633 static AnnounceParameters ap;
634
635 MigrationState *s = migrate_get_current();
636
637 ap.initial = s->parameters.announce_initial;
638 ap.max = s->parameters.announce_max;
639 ap.rounds = s->parameters.announce_rounds;
640 ap.step = s->parameters.announce_step;
641
642 return &ap;
643 }