]> git.proxmox.com Git - mirror_qemu.git/blob - migration/block-dirty-bitmap.c
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[mirror_qemu.git] / migration / block-dirty-bitmap.c
1 /*
2 * Block dirty bitmap postcopy migration
3 *
4 * Copyright IBM, Corp. 2009
5 * Copyright (c) 2016-2017 Virtuozzo International GmbH. All rights reserved.
6 *
7 * Authors:
8 * Liran Schour <lirans@il.ibm.com>
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 * This file is derived from migration/block.c, so it's author and IBM copyright
14 * are here, although content is quite different.
15 *
16 * Contributions after 2012-01-13 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
18 *
19 * ***
20 *
21 * Here postcopy migration of dirty bitmaps is realized. Only QMP-addressable
22 * bitmaps are migrated.
23 *
24 * Bitmap migration implies creating bitmap with the same name and granularity
25 * in destination QEMU. If the bitmap with the same name (for the same node)
26 * already exists on destination an error will be generated.
27 *
28 * format of migration:
29 *
30 * # Header (shared for different chunk types)
31 * 1, 2 or 4 bytes: flags (see qemu_{put,put}_flags)
32 * [ 1 byte: node alias size ] \ flags & DEVICE_NAME
33 * [ n bytes: node alias ] /
34 * [ 1 byte: bitmap alias size ] \ flags & BITMAP_NAME
35 * [ n bytes: bitmap alias ] /
36 *
37 * # Start of bitmap migration (flags & START)
38 * header
39 * be64: granularity
40 * 1 byte: bitmap flags (corresponds to BdrvDirtyBitmap)
41 * bit 0 - bitmap is enabled
42 * bit 1 - bitmap is persistent
43 * bit 2 - bitmap is autoloading
44 * bits 3-7 - reserved, must be zero
45 *
46 * # Complete of bitmap migration (flags & COMPLETE)
47 * header
48 *
49 * # Data chunk of bitmap migration
50 * header
51 * be64: start sector
52 * be32: number of sectors
53 * [ be64: buffer size ] \ ! (flags & ZEROES)
54 * [ n bytes: buffer ] /
55 *
56 * The last chunk in stream should contain flags & EOS. The chunk may skip
57 * device and/or bitmap names, assuming them to be the same with the previous
58 * chunk.
59 */
60
61 #include "qemu/osdep.h"
62 #include "block/block.h"
63 #include "block/block_int.h"
64 #include "block/dirty-bitmap.h"
65 #include "sysemu/block-backend.h"
66 #include "sysemu/runstate.h"
67 #include "qemu/main-loop.h"
68 #include "qemu/error-report.h"
69 #include "migration/misc.h"
70 #include "migration/migration.h"
71 #include "qemu-file.h"
72 #include "migration/vmstate.h"
73 #include "migration/register.h"
74 #include "qemu/hbitmap.h"
75 #include "qemu/cutils.h"
76 #include "qemu/id.h"
77 #include "qapi/error.h"
78 #include "qapi/qapi-commands-migration.h"
79 #include "qapi/qapi-visit-migration.h"
80 #include "qapi/clone-visitor.h"
81 #include "trace.h"
82
83 #define CHUNK_SIZE (1 << 10)
84
85 /* Flags occupy one, two or four bytes (Big Endian). The size is determined as
86 * follows:
87 * in first (most significant) byte bit 8 is clear --> one byte
88 * in first byte bit 8 is set --> two or four bytes, depending on second
89 * byte:
90 * | in second byte bit 8 is clear --> two bytes
91 * | in second byte bit 8 is set --> four bytes
92 */
93 #define DIRTY_BITMAP_MIG_FLAG_EOS 0x01
94 #define DIRTY_BITMAP_MIG_FLAG_ZEROES 0x02
95 #define DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME 0x04
96 #define DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME 0x08
97 #define DIRTY_BITMAP_MIG_FLAG_START 0x10
98 #define DIRTY_BITMAP_MIG_FLAG_COMPLETE 0x20
99 #define DIRTY_BITMAP_MIG_FLAG_BITS 0x40
100
101 #define DIRTY_BITMAP_MIG_EXTRA_FLAGS 0x80
102
103 #define DIRTY_BITMAP_MIG_START_FLAG_ENABLED 0x01
104 #define DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT 0x02
105 /* 0x04 was "AUTOLOAD" flags on older versions, now it is ignored */
106 #define DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK 0xf8
107
108 /* State of one bitmap during save process */
109 typedef struct SaveBitmapState {
110 /* Written during setup phase. */
111 BlockDriverState *bs;
112 char *node_alias;
113 char *bitmap_alias;
114 BdrvDirtyBitmap *bitmap;
115 uint64_t total_sectors;
116 uint64_t sectors_per_chunk;
117 QSIMPLEQ_ENTRY(SaveBitmapState) entry;
118 uint8_t flags;
119
120 /* For bulk phase. */
121 bool bulk_completed;
122 uint64_t cur_sector;
123 } SaveBitmapState;
124
125 /* State of the dirty bitmap migration (DBM) during save process */
126 typedef struct DBMSaveState {
127 QSIMPLEQ_HEAD(, SaveBitmapState) dbms_list;
128
129 bool bulk_completed;
130 bool no_bitmaps;
131
132 /* for send_bitmap_bits() */
133 BlockDriverState *prev_bs;
134 BdrvDirtyBitmap *prev_bitmap;
135 } DBMSaveState;
136
137 typedef struct LoadBitmapState {
138 BlockDriverState *bs;
139 BdrvDirtyBitmap *bitmap;
140 bool migrated;
141 bool enabled;
142 } LoadBitmapState;
143
144 /* State of the dirty bitmap migration (DBM) during load process */
145 typedef struct DBMLoadState {
146 uint32_t flags;
147 char node_alias[256];
148 char bitmap_alias[256];
149 char bitmap_name[BDRV_BITMAP_MAX_NAME_SIZE + 1];
150 BlockDriverState *bs;
151 BdrvDirtyBitmap *bitmap;
152
153 bool before_vm_start_handled; /* set in dirty_bitmap_mig_before_vm_start */
154 BitmapMigrationBitmapAlias *bmap_inner;
155
156 /*
157 * cancelled
158 * Incoming migration is cancelled for some reason. That means that we
159 * still should read our chunks from migration stream, to not affect other
160 * migration objects (like RAM), but just ignore them and do not touch any
161 * bitmaps or nodes.
162 */
163 bool cancelled;
164
165 GSList *bitmaps;
166 QemuMutex lock; /* protect bitmaps */
167 } DBMLoadState;
168
169 typedef struct DBMState {
170 DBMSaveState save;
171 DBMLoadState load;
172 } DBMState;
173
174 static DBMState dbm_state;
175
176 /* For hash tables that map node/bitmap names to aliases */
177 typedef struct AliasMapInnerNode {
178 char *string;
179 GHashTable *subtree;
180 } AliasMapInnerNode;
181
182 static void free_alias_map_inner_node(void *amin_ptr)
183 {
184 AliasMapInnerNode *amin = amin_ptr;
185
186 g_free(amin->string);
187 g_hash_table_unref(amin->subtree);
188 g_free(amin);
189 }
190
191 /**
192 * Construct an alias map based on the given QMP structure.
193 *
194 * (Note that we cannot store such maps in the MigrationParameters
195 * object, because that struct is defined by the QAPI schema, which
196 * makes it basically impossible to have dicts with arbitrary keys.
197 * Therefore, we instead have to construct these maps when migration
198 * starts.)
199 *
200 * @bbm is the block_bitmap_mapping from the migration parameters.
201 *
202 * If @name_to_alias is true, the returned hash table will map node
203 * and bitmap names to their respective aliases (for outgoing
204 * migration).
205 *
206 * If @name_to_alias is false, the returned hash table will map node
207 * and bitmap aliases to their respective names (for incoming
208 * migration).
209 *
210 * The hash table maps node names/aliases to AliasMapInnerNode
211 * objects, whose .string is the respective node alias/name, and whose
212 * .subtree table maps bitmap names/aliases to the respective bitmap
213 * alias/name.
214 */
215 static GHashTable *construct_alias_map(const BitmapMigrationNodeAliasList *bbm,
216 bool name_to_alias,
217 Error **errp)
218 {
219 GHashTable *alias_map;
220 size_t max_node_name_len = sizeof_field(BlockDriverState, node_name) - 1;
221
222 alias_map = g_hash_table_new_full(g_str_hash, g_str_equal,
223 g_free, free_alias_map_inner_node);
224
225 for (; bbm; bbm = bbm->next) {
226 const BitmapMigrationNodeAlias *bmna = bbm->value;
227 const BitmapMigrationBitmapAliasList *bmbal;
228 AliasMapInnerNode *amin;
229 GHashTable *bitmaps_map;
230 const char *node_map_from, *node_map_to;
231 GDestroyNotify gdn;
232
233 if (!id_wellformed(bmna->alias)) {
234 error_setg(errp, "The node alias '%s' is not well-formed",
235 bmna->alias);
236 goto fail;
237 }
238
239 if (strlen(bmna->alias) > UINT8_MAX) {
240 error_setg(errp, "The node alias '%s' is longer than %u bytes",
241 bmna->alias, UINT8_MAX);
242 goto fail;
243 }
244
245 if (strlen(bmna->node_name) > max_node_name_len) {
246 error_setg(errp, "The node name '%s' is longer than %zu bytes",
247 bmna->node_name, max_node_name_len);
248 goto fail;
249 }
250
251 if (name_to_alias) {
252 if (g_hash_table_contains(alias_map, bmna->node_name)) {
253 error_setg(errp, "The node name '%s' is mapped twice",
254 bmna->node_name);
255 goto fail;
256 }
257
258 node_map_from = bmna->node_name;
259 node_map_to = bmna->alias;
260 } else {
261 if (g_hash_table_contains(alias_map, bmna->alias)) {
262 error_setg(errp, "The node alias '%s' is used twice",
263 bmna->alias);
264 goto fail;
265 }
266
267 node_map_from = bmna->alias;
268 node_map_to = bmna->node_name;
269 }
270
271 gdn = (GDestroyNotify) qapi_free_BitmapMigrationBitmapAlias;
272 bitmaps_map = g_hash_table_new_full(g_str_hash, g_str_equal, g_free,
273 gdn);
274
275 amin = g_new(AliasMapInnerNode, 1);
276 *amin = (AliasMapInnerNode){
277 .string = g_strdup(node_map_to),
278 .subtree = bitmaps_map,
279 };
280
281 g_hash_table_insert(alias_map, g_strdup(node_map_from), amin);
282
283 for (bmbal = bmna->bitmaps; bmbal; bmbal = bmbal->next) {
284 const BitmapMigrationBitmapAlias *bmba = bmbal->value;
285 const char *bmap_map_from;
286
287 if (strlen(bmba->alias) > UINT8_MAX) {
288 error_setg(errp,
289 "The bitmap alias '%s' is longer than %u bytes",
290 bmba->alias, UINT8_MAX);
291 goto fail;
292 }
293
294 if (strlen(bmba->name) > BDRV_BITMAP_MAX_NAME_SIZE) {
295 error_setg(errp, "The bitmap name '%s' is longer than %d bytes",
296 bmba->name, BDRV_BITMAP_MAX_NAME_SIZE);
297 goto fail;
298 }
299
300 if (name_to_alias) {
301 bmap_map_from = bmba->name;
302
303 if (g_hash_table_contains(bitmaps_map, bmba->name)) {
304 error_setg(errp, "The bitmap '%s'/'%s' is mapped twice",
305 bmna->node_name, bmba->name);
306 goto fail;
307 }
308 } else {
309 bmap_map_from = bmba->alias;
310
311 if (g_hash_table_contains(bitmaps_map, bmba->alias)) {
312 error_setg(errp, "The bitmap alias '%s'/'%s' is used twice",
313 bmna->alias, bmba->alias);
314 goto fail;
315 }
316 }
317
318 g_hash_table_insert(bitmaps_map, g_strdup(bmap_map_from),
319 QAPI_CLONE(BitmapMigrationBitmapAlias, bmba));
320 }
321 }
322
323 return alias_map;
324
325 fail:
326 g_hash_table_destroy(alias_map);
327 return NULL;
328 }
329
330 /**
331 * Run construct_alias_map() in both directions to check whether @bbm
332 * is valid.
333 * (This function is to be used by migration/migration.c to validate
334 * the user-specified block-bitmap-mapping migration parameter.)
335 *
336 * Returns true if and only if the mapping is valid.
337 */
338 bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
339 Error **errp)
340 {
341 GHashTable *alias_map;
342
343 alias_map = construct_alias_map(bbm, true, errp);
344 if (!alias_map) {
345 return false;
346 }
347 g_hash_table_destroy(alias_map);
348
349 alias_map = construct_alias_map(bbm, false, errp);
350 if (!alias_map) {
351 return false;
352 }
353 g_hash_table_destroy(alias_map);
354
355 return true;
356 }
357
358 static uint32_t qemu_get_bitmap_flags(QEMUFile *f)
359 {
360 uint8_t flags = qemu_get_byte(f);
361 if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
362 flags = flags << 8 | qemu_get_byte(f);
363 if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
364 flags = flags << 16 | qemu_get_be16(f);
365 }
366 }
367
368 return flags;
369 }
370
371 static void qemu_put_bitmap_flags(QEMUFile *f, uint32_t flags)
372 {
373 /* The code currently does not send flags as more than one byte */
374 assert(!(flags & (0xffffff00 | DIRTY_BITMAP_MIG_EXTRA_FLAGS)));
375
376 qemu_put_byte(f, flags);
377 }
378
379 static void send_bitmap_header(QEMUFile *f, DBMSaveState *s,
380 SaveBitmapState *dbms, uint32_t additional_flags)
381 {
382 BlockDriverState *bs = dbms->bs;
383 BdrvDirtyBitmap *bitmap = dbms->bitmap;
384 uint32_t flags = additional_flags;
385 trace_send_bitmap_header_enter();
386
387 if (bs != s->prev_bs) {
388 s->prev_bs = bs;
389 flags |= DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME;
390 }
391
392 if (bitmap != s->prev_bitmap) {
393 s->prev_bitmap = bitmap;
394 flags |= DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME;
395 }
396
397 qemu_put_bitmap_flags(f, flags);
398
399 if (flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
400 qemu_put_counted_string(f, dbms->node_alias);
401 }
402
403 if (flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
404 qemu_put_counted_string(f, dbms->bitmap_alias);
405 }
406 }
407
408 static void send_bitmap_start(QEMUFile *f, DBMSaveState *s,
409 SaveBitmapState *dbms)
410 {
411 send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_START);
412 qemu_put_be32(f, bdrv_dirty_bitmap_granularity(dbms->bitmap));
413 qemu_put_byte(f, dbms->flags);
414 }
415
416 static void send_bitmap_complete(QEMUFile *f, DBMSaveState *s,
417 SaveBitmapState *dbms)
418 {
419 send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_COMPLETE);
420 }
421
422 static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
423 SaveBitmapState *dbms,
424 uint64_t start_sector, uint32_t nr_sectors)
425 {
426 /* align for buffer_is_zero() */
427 uint64_t align = 4 * sizeof(long);
428 uint64_t unaligned_size =
429 bdrv_dirty_bitmap_serialization_size(
430 dbms->bitmap, start_sector << BDRV_SECTOR_BITS,
431 (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
432 uint64_t buf_size = QEMU_ALIGN_UP(unaligned_size, align);
433 uint8_t *buf = g_malloc0(buf_size);
434 uint32_t flags = DIRTY_BITMAP_MIG_FLAG_BITS;
435
436 bdrv_dirty_bitmap_serialize_part(
437 dbms->bitmap, buf, start_sector << BDRV_SECTOR_BITS,
438 (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
439
440 if (buffer_is_zero(buf, buf_size)) {
441 g_free(buf);
442 buf = NULL;
443 flags |= DIRTY_BITMAP_MIG_FLAG_ZEROES;
444 }
445
446 trace_send_bitmap_bits(flags, start_sector, nr_sectors, buf_size);
447
448 send_bitmap_header(f, s, dbms, flags);
449
450 qemu_put_be64(f, start_sector);
451 qemu_put_be32(f, nr_sectors);
452
453 /* if a block is zero we need to flush here since the network
454 * bandwidth is now a lot higher than the storage device bandwidth.
455 * thus if we queue zero blocks we slow down the migration. */
456 if (flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
457 qemu_fflush(f);
458 } else {
459 qemu_put_be64(f, buf_size);
460 qemu_put_buffer(f, buf, buf_size);
461 }
462
463 g_free(buf);
464 }
465
466 /* Called with iothread lock taken. */
467 static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
468 {
469 SaveBitmapState *dbms;
470
471 while ((dbms = QSIMPLEQ_FIRST(&s->dbms_list)) != NULL) {
472 QSIMPLEQ_REMOVE_HEAD(&s->dbms_list, entry);
473 bdrv_dirty_bitmap_set_busy(dbms->bitmap, false);
474 bdrv_unref(dbms->bs);
475 g_free(dbms->node_alias);
476 g_free(dbms->bitmap_alias);
477 g_free(dbms);
478 }
479 }
480
481 /* Called with iothread lock taken. */
482 static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
483 const char *bs_name, GHashTable *alias_map)
484 {
485 BdrvDirtyBitmap *bitmap;
486 SaveBitmapState *dbms;
487 GHashTable *bitmap_aliases;
488 const char *node_alias, *bitmap_name, *bitmap_alias;
489 Error *local_err = NULL;
490
491 /* When an alias map is given, @bs_name must be @bs's node name */
492 assert(!alias_map || !strcmp(bs_name, bdrv_get_node_name(bs)));
493
494 FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
495 if (bdrv_dirty_bitmap_name(bitmap)) {
496 break;
497 }
498 }
499 if (!bitmap) {
500 return 0;
501 }
502
503 bitmap_name = bdrv_dirty_bitmap_name(bitmap);
504
505 if (!bs_name || strcmp(bs_name, "") == 0) {
506 error_report("Bitmap '%s' in unnamed node can't be migrated",
507 bitmap_name);
508 return -1;
509 }
510
511 if (alias_map) {
512 const AliasMapInnerNode *amin = g_hash_table_lookup(alias_map, bs_name);
513
514 if (!amin) {
515 /* Skip bitmaps on nodes with no alias */
516 return 0;
517 }
518
519 node_alias = amin->string;
520 bitmap_aliases = amin->subtree;
521 } else {
522 node_alias = bs_name;
523 bitmap_aliases = NULL;
524 }
525
526 if (node_alias[0] == '#') {
527 error_report("Bitmap '%s' in a node with auto-generated "
528 "name '%s' can't be migrated",
529 bitmap_name, node_alias);
530 return -1;
531 }
532
533 FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
534 BitmapMigrationBitmapAliasTransform *bitmap_transform = NULL;
535 bitmap_name = bdrv_dirty_bitmap_name(bitmap);
536 if (!bitmap_name) {
537 continue;
538 }
539
540 if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_DEFAULT, &local_err)) {
541 error_report_err(local_err);
542 return -1;
543 }
544
545 if (bitmap_aliases) {
546 BitmapMigrationBitmapAlias *bmap_inner;
547
548 bmap_inner = g_hash_table_lookup(bitmap_aliases, bitmap_name);
549 if (!bmap_inner) {
550 /* Skip bitmaps with no alias */
551 continue;
552 }
553
554 bitmap_alias = bmap_inner->alias;
555 if (bmap_inner->transform) {
556 bitmap_transform = bmap_inner->transform;
557 }
558 } else {
559 if (strlen(bitmap_name) > UINT8_MAX) {
560 error_report("Cannot migrate bitmap '%s' on node '%s': "
561 "Name is longer than %u bytes",
562 bitmap_name, bs_name, UINT8_MAX);
563 return -1;
564 }
565 bitmap_alias = bitmap_name;
566 }
567
568 bdrv_ref(bs);
569 bdrv_dirty_bitmap_set_busy(bitmap, true);
570
571 dbms = g_new0(SaveBitmapState, 1);
572 dbms->bs = bs;
573 dbms->node_alias = g_strdup(node_alias);
574 dbms->bitmap_alias = g_strdup(bitmap_alias);
575 dbms->bitmap = bitmap;
576 dbms->total_sectors = bdrv_nb_sectors(bs);
577 dbms->sectors_per_chunk = CHUNK_SIZE * 8LLU *
578 (bdrv_dirty_bitmap_granularity(bitmap) >> BDRV_SECTOR_BITS);
579 assert(dbms->sectors_per_chunk != 0);
580 if (bdrv_dirty_bitmap_enabled(bitmap)) {
581 dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
582 }
583 if (bitmap_transform &&
584 bitmap_transform->has_persistent) {
585 if (bitmap_transform->persistent) {
586 dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
587 }
588 } else {
589 if (bdrv_dirty_bitmap_get_persistence(bitmap)) {
590 dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
591 }
592 }
593
594 QSIMPLEQ_INSERT_TAIL(&s->dbms_list, dbms, entry);
595 }
596
597 return 0;
598 }
599
600 /* Called with iothread lock taken. */
601 static int init_dirty_bitmap_migration(DBMSaveState *s)
602 {
603 BlockDriverState *bs;
604 SaveBitmapState *dbms;
605 GHashTable *handled_by_blk = g_hash_table_new(NULL, NULL);
606 BlockBackend *blk;
607 const MigrationParameters *mig_params = &migrate_get_current()->parameters;
608 GHashTable *alias_map = NULL;
609
610 if (mig_params->has_block_bitmap_mapping) {
611 alias_map = construct_alias_map(mig_params->block_bitmap_mapping, true,
612 &error_abort);
613 }
614
615 s->bulk_completed = false;
616 s->prev_bs = NULL;
617 s->prev_bitmap = NULL;
618 s->no_bitmaps = false;
619
620 if (!alias_map) {
621 /*
622 * Use blockdevice name for direct (or filtered) children of named block
623 * backends.
624 */
625 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
626 const char *name = blk_name(blk);
627
628 if (!name || strcmp(name, "") == 0) {
629 continue;
630 }
631
632 bs = blk_bs(blk);
633
634 /* Skip filters without bitmaps */
635 while (bs && bs->drv && bs->drv->is_filter &&
636 !bdrv_has_named_bitmaps(bs))
637 {
638 bs = bdrv_filter_bs(bs);
639 }
640
641 if (bs && bs->drv && !bs->drv->is_filter) {
642 if (add_bitmaps_to_list(s, bs, name, NULL)) {
643 goto fail;
644 }
645 g_hash_table_add(handled_by_blk, bs);
646 }
647 }
648 }
649
650 for (bs = bdrv_next_all_states(NULL); bs; bs = bdrv_next_all_states(bs)) {
651 if (g_hash_table_contains(handled_by_blk, bs)) {
652 continue;
653 }
654
655 if (add_bitmaps_to_list(s, bs, bdrv_get_node_name(bs), alias_map)) {
656 goto fail;
657 }
658 }
659
660 /* unset migration flags here, to not roll back it */
661 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
662 bdrv_dirty_bitmap_skip_store(dbms->bitmap, true);
663 }
664
665 if (QSIMPLEQ_EMPTY(&s->dbms_list)) {
666 s->no_bitmaps = true;
667 }
668
669 g_hash_table_destroy(handled_by_blk);
670 if (alias_map) {
671 g_hash_table_destroy(alias_map);
672 }
673
674 return 0;
675
676 fail:
677 g_hash_table_destroy(handled_by_blk);
678 if (alias_map) {
679 g_hash_table_destroy(alias_map);
680 }
681 dirty_bitmap_do_save_cleanup(s);
682
683 return -1;
684 }
685
686 /* Called with no lock taken. */
687 static void bulk_phase_send_chunk(QEMUFile *f, DBMSaveState *s,
688 SaveBitmapState *dbms)
689 {
690 uint32_t nr_sectors = MIN(dbms->total_sectors - dbms->cur_sector,
691 dbms->sectors_per_chunk);
692
693 send_bitmap_bits(f, s, dbms, dbms->cur_sector, nr_sectors);
694
695 dbms->cur_sector += nr_sectors;
696 if (dbms->cur_sector >= dbms->total_sectors) {
697 dbms->bulk_completed = true;
698 }
699 }
700
701 /* Called with no lock taken. */
702 static void bulk_phase(QEMUFile *f, DBMSaveState *s, bool limit)
703 {
704 SaveBitmapState *dbms;
705
706 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
707 while (!dbms->bulk_completed) {
708 bulk_phase_send_chunk(f, s, dbms);
709 if (limit && qemu_file_rate_limit(f)) {
710 return;
711 }
712 }
713 }
714
715 s->bulk_completed = true;
716 }
717
718 /* for SaveVMHandlers */
719 static void dirty_bitmap_save_cleanup(void *opaque)
720 {
721 DBMSaveState *s = &((DBMState *)opaque)->save;
722
723 dirty_bitmap_do_save_cleanup(s);
724 }
725
726 static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
727 {
728 DBMSaveState *s = &((DBMState *)opaque)->save;
729
730 trace_dirty_bitmap_save_iterate(migration_in_postcopy());
731
732 if (migration_in_postcopy() && !s->bulk_completed) {
733 bulk_phase(f, s, true);
734 }
735
736 qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
737
738 return s->bulk_completed;
739 }
740
741 /* Called with iothread lock taken. */
742
743 static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
744 {
745 DBMSaveState *s = &((DBMState *)opaque)->save;
746 SaveBitmapState *dbms;
747 trace_dirty_bitmap_save_complete_enter();
748
749 if (!s->bulk_completed) {
750 bulk_phase(f, s, false);
751 }
752
753 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
754 send_bitmap_complete(f, s, dbms);
755 }
756
757 qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
758
759 trace_dirty_bitmap_save_complete_finish();
760
761 dirty_bitmap_save_cleanup(opaque);
762 return 0;
763 }
764
765 static void dirty_bitmap_state_pending(void *opaque,
766 uint64_t *must_precopy,
767 uint64_t *can_postcopy)
768 {
769 DBMSaveState *s = &((DBMState *)opaque)->save;
770 SaveBitmapState *dbms;
771 uint64_t pending = 0;
772
773 qemu_mutex_lock_iothread();
774
775 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
776 uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
777 uint64_t sectors = dbms->bulk_completed ? 0 :
778 dbms->total_sectors - dbms->cur_sector;
779
780 pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
781 }
782
783 qemu_mutex_unlock_iothread();
784
785 trace_dirty_bitmap_state_pending(pending);
786
787 *can_postcopy += pending;
788 }
789
790 /* First occurrence of this bitmap. It should be created if doesn't exist */
791 static int dirty_bitmap_load_start(QEMUFile *f, DBMLoadState *s)
792 {
793 Error *local_err = NULL;
794 uint32_t granularity = qemu_get_be32(f);
795 uint8_t flags = qemu_get_byte(f);
796 LoadBitmapState *b;
797 bool persistent;
798
799 if (s->cancelled) {
800 return 0;
801 }
802
803 if (s->bitmap) {
804 error_report("Bitmap with the same name ('%s') already exists on "
805 "destination", bdrv_dirty_bitmap_name(s->bitmap));
806 return -EINVAL;
807 } else {
808 s->bitmap = bdrv_create_dirty_bitmap(s->bs, granularity,
809 s->bitmap_name, &local_err);
810 if (!s->bitmap) {
811 error_report_err(local_err);
812 return -EINVAL;
813 }
814 }
815
816 if (flags & DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK) {
817 error_report("Unknown flags in migrated dirty bitmap header: %x",
818 flags);
819 return -EINVAL;
820 }
821
822 if (s->bmap_inner &&
823 s->bmap_inner->transform &&
824 s->bmap_inner->transform->has_persistent) {
825 persistent = s->bmap_inner->transform->persistent;
826 } else {
827 persistent = flags & DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
828 }
829
830 if (persistent) {
831 bdrv_dirty_bitmap_set_persistence(s->bitmap, true);
832 }
833
834 bdrv_disable_dirty_bitmap(s->bitmap);
835 if (flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED) {
836 bdrv_dirty_bitmap_create_successor(s->bitmap, &local_err);
837 if (local_err) {
838 error_report_err(local_err);
839 return -EINVAL;
840 }
841 } else {
842 bdrv_dirty_bitmap_set_busy(s->bitmap, true);
843 }
844
845 b = g_new(LoadBitmapState, 1);
846 b->bs = s->bs;
847 b->bitmap = s->bitmap;
848 b->migrated = false;
849 b->enabled = flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
850
851 s->bitmaps = g_slist_prepend(s->bitmaps, b);
852
853 return 0;
854 }
855
856 /*
857 * before_vm_start_handle_item
858 *
859 * g_slist_foreach helper
860 *
861 * item is LoadBitmapState*
862 * opaque is DBMLoadState*
863 */
864 static void before_vm_start_handle_item(void *item, void *opaque)
865 {
866 DBMLoadState *s = opaque;
867 LoadBitmapState *b = item;
868
869 if (b->enabled) {
870 if (b->migrated) {
871 bdrv_enable_dirty_bitmap(b->bitmap);
872 } else {
873 bdrv_dirty_bitmap_enable_successor(b->bitmap);
874 }
875 }
876
877 if (b->migrated) {
878 s->bitmaps = g_slist_remove(s->bitmaps, b);
879 g_free(b);
880 }
881 }
882
883 void dirty_bitmap_mig_before_vm_start(void)
884 {
885 DBMLoadState *s = &dbm_state.load;
886 qemu_mutex_lock(&s->lock);
887
888 assert(!s->before_vm_start_handled);
889 g_slist_foreach(s->bitmaps, before_vm_start_handle_item, s);
890 s->before_vm_start_handled = true;
891
892 qemu_mutex_unlock(&s->lock);
893 }
894
895 static void cancel_incoming_locked(DBMLoadState *s)
896 {
897 GSList *item;
898
899 if (s->cancelled) {
900 return;
901 }
902
903 s->cancelled = true;
904 s->bs = NULL;
905 s->bitmap = NULL;
906
907 /* Drop all unfinished bitmaps */
908 for (item = s->bitmaps; item; item = g_slist_next(item)) {
909 LoadBitmapState *b = item->data;
910
911 /*
912 * Bitmap must be unfinished, as finished bitmaps should already be
913 * removed from the list.
914 */
915 assert(!s->before_vm_start_handled || !b->migrated);
916 if (bdrv_dirty_bitmap_has_successor(b->bitmap)) {
917 bdrv_reclaim_dirty_bitmap(b->bitmap, &error_abort);
918 } else {
919 bdrv_dirty_bitmap_set_busy(b->bitmap, false);
920 }
921 bdrv_release_dirty_bitmap(b->bitmap);
922 }
923
924 g_slist_free_full(s->bitmaps, g_free);
925 s->bitmaps = NULL;
926 }
927
928 void dirty_bitmap_mig_cancel_outgoing(void)
929 {
930 dirty_bitmap_do_save_cleanup(&dbm_state.save);
931 }
932
933 void dirty_bitmap_mig_cancel_incoming(void)
934 {
935 DBMLoadState *s = &dbm_state.load;
936
937 qemu_mutex_lock(&s->lock);
938
939 cancel_incoming_locked(s);
940
941 qemu_mutex_unlock(&s->lock);
942 }
943
944 static void dirty_bitmap_load_complete(QEMUFile *f, DBMLoadState *s)
945 {
946 GSList *item;
947 trace_dirty_bitmap_load_complete();
948
949 if (s->cancelled) {
950 return;
951 }
952
953 bdrv_dirty_bitmap_deserialize_finish(s->bitmap);
954
955 if (bdrv_dirty_bitmap_has_successor(s->bitmap)) {
956 bdrv_reclaim_dirty_bitmap(s->bitmap, &error_abort);
957 } else {
958 bdrv_dirty_bitmap_set_busy(s->bitmap, false);
959 }
960
961 for (item = s->bitmaps; item; item = g_slist_next(item)) {
962 LoadBitmapState *b = item->data;
963
964 if (b->bitmap == s->bitmap) {
965 b->migrated = true;
966 if (s->before_vm_start_handled) {
967 s->bitmaps = g_slist_remove(s->bitmaps, b);
968 g_free(b);
969 }
970 break;
971 }
972 }
973 }
974
975 static int dirty_bitmap_load_bits(QEMUFile *f, DBMLoadState *s)
976 {
977 uint64_t first_byte = qemu_get_be64(f) << BDRV_SECTOR_BITS;
978 uint64_t nr_bytes = (uint64_t)qemu_get_be32(f) << BDRV_SECTOR_BITS;
979 trace_dirty_bitmap_load_bits_enter(first_byte >> BDRV_SECTOR_BITS,
980 nr_bytes >> BDRV_SECTOR_BITS);
981
982 if (s->flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
983 trace_dirty_bitmap_load_bits_zeroes();
984 if (!s->cancelled) {
985 bdrv_dirty_bitmap_deserialize_zeroes(s->bitmap, first_byte,
986 nr_bytes, false);
987 }
988 } else {
989 size_t ret;
990 g_autofree uint8_t *buf = NULL;
991 uint64_t buf_size = qemu_get_be64(f);
992 uint64_t needed_size;
993
994 /*
995 * The actual check for buf_size is done a bit later. We can't do it in
996 * cancelled mode as we don't have the bitmap to check the constraints
997 * (so, we allocate a buffer and read prior to the check). On the other
998 * hand, we shouldn't blindly g_malloc the number from the stream.
999 * Actually one chunk should not be larger than CHUNK_SIZE. Let's allow
1000 * a bit larger (which means that bitmap migration will fail anyway and
1001 * the whole migration will most probably fail soon due to broken
1002 * stream).
1003 */
1004 if (buf_size > 10 * CHUNK_SIZE) {
1005 error_report("Bitmap migration stream buffer allocation request "
1006 "is too large");
1007 return -EIO;
1008 }
1009
1010 buf = g_malloc(buf_size);
1011 ret = qemu_get_buffer(f, buf, buf_size);
1012 if (ret != buf_size) {
1013 error_report("Failed to read bitmap bits");
1014 return -EIO;
1015 }
1016
1017 if (s->cancelled) {
1018 return 0;
1019 }
1020
1021 needed_size = bdrv_dirty_bitmap_serialization_size(s->bitmap,
1022 first_byte,
1023 nr_bytes);
1024
1025 if (needed_size > buf_size ||
1026 buf_size > QEMU_ALIGN_UP(needed_size, 4 * sizeof(long))
1027 /* Here used same alignment as in send_bitmap_bits */
1028 ) {
1029 error_report("Migrated bitmap granularity doesn't "
1030 "match the destination bitmap '%s' granularity",
1031 bdrv_dirty_bitmap_name(s->bitmap));
1032 cancel_incoming_locked(s);
1033 return 0;
1034 }
1035
1036 bdrv_dirty_bitmap_deserialize_part(s->bitmap, buf, first_byte, nr_bytes,
1037 false);
1038 }
1039
1040 return 0;
1041 }
1042
1043 static int dirty_bitmap_load_header(QEMUFile *f, DBMLoadState *s,
1044 GHashTable *alias_map)
1045 {
1046 GHashTable *bitmap_alias_map = NULL;
1047 Error *local_err = NULL;
1048 bool nothing;
1049 s->flags = qemu_get_bitmap_flags(f);
1050 trace_dirty_bitmap_load_header(s->flags);
1051
1052 nothing = s->flags == (s->flags & DIRTY_BITMAP_MIG_FLAG_EOS);
1053
1054 if (s->flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
1055 if (!qemu_get_counted_string(f, s->node_alias)) {
1056 error_report("Unable to read node alias string");
1057 return -EINVAL;
1058 }
1059
1060 if (!s->cancelled) {
1061 if (alias_map) {
1062 const AliasMapInnerNode *amin;
1063
1064 amin = g_hash_table_lookup(alias_map, s->node_alias);
1065 if (!amin) {
1066 error_setg(&local_err, "Error: Unknown node alias '%s'",
1067 s->node_alias);
1068 s->bs = NULL;
1069 } else {
1070 bitmap_alias_map = amin->subtree;
1071 s->bs = bdrv_lookup_bs(NULL, amin->string, &local_err);
1072 }
1073 } else {
1074 s->bs = bdrv_lookup_bs(s->node_alias, s->node_alias,
1075 &local_err);
1076 }
1077 if (!s->bs) {
1078 error_report_err(local_err);
1079 cancel_incoming_locked(s);
1080 }
1081 }
1082 } else if (s->bs) {
1083 if (alias_map) {
1084 const AliasMapInnerNode *amin;
1085
1086 /* Must be present in the map, or s->bs would not be set */
1087 amin = g_hash_table_lookup(alias_map, s->node_alias);
1088 assert(amin != NULL);
1089
1090 bitmap_alias_map = amin->subtree;
1091 }
1092 } else if (!nothing && !s->cancelled) {
1093 error_report("Error: block device name is not set");
1094 cancel_incoming_locked(s);
1095 }
1096
1097 assert(nothing || s->cancelled || !!alias_map == !!bitmap_alias_map);
1098
1099 if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
1100 const char *bitmap_name;
1101
1102 if (!qemu_get_counted_string(f, s->bitmap_alias)) {
1103 error_report("Unable to read bitmap alias string");
1104 return -EINVAL;
1105 }
1106
1107 bitmap_name = s->bitmap_alias;
1108 if (!s->cancelled && bitmap_alias_map) {
1109 BitmapMigrationBitmapAlias *bmap_inner;
1110
1111 bmap_inner = g_hash_table_lookup(bitmap_alias_map, s->bitmap_alias);
1112 if (!bmap_inner) {
1113 error_report("Error: Unknown bitmap alias '%s' on node "
1114 "'%s' (alias '%s')", s->bitmap_alias,
1115 s->bs->node_name, s->node_alias);
1116 cancel_incoming_locked(s);
1117 } else {
1118 bitmap_name = bmap_inner->name;
1119 }
1120
1121 s->bmap_inner = bmap_inner;
1122 }
1123
1124 if (!s->cancelled) {
1125 g_strlcpy(s->bitmap_name, bitmap_name, sizeof(s->bitmap_name));
1126 s->bitmap = bdrv_find_dirty_bitmap(s->bs, s->bitmap_name);
1127
1128 /*
1129 * bitmap may be NULL here, it wouldn't be an error if it is the
1130 * first occurrence of the bitmap
1131 */
1132 if (!s->bitmap && !(s->flags & DIRTY_BITMAP_MIG_FLAG_START)) {
1133 error_report("Error: unknown dirty bitmap "
1134 "'%s' for block device '%s'",
1135 s->bitmap_name, s->bs->node_name);
1136 cancel_incoming_locked(s);
1137 }
1138 }
1139 } else if (!s->bitmap && !nothing && !s->cancelled) {
1140 error_report("Error: block device name is not set");
1141 cancel_incoming_locked(s);
1142 }
1143
1144 return 0;
1145 }
1146
1147 /*
1148 * dirty_bitmap_load
1149 *
1150 * Load sequence of dirty bitmap chunks. Return error only on fatal io stream
1151 * violations. On other errors just cancel bitmaps incoming migration and return
1152 * 0.
1153 *
1154 * Note, than when incoming bitmap migration is canceled, we still must read all
1155 * our chunks (and just ignore them), to not affect other migration objects.
1156 */
1157 static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id)
1158 {
1159 GHashTable *alias_map = NULL;
1160 const MigrationParameters *mig_params = &migrate_get_current()->parameters;
1161 DBMLoadState *s = &((DBMState *)opaque)->load;
1162 int ret = 0;
1163
1164 trace_dirty_bitmap_load_enter();
1165
1166 if (version_id != 1) {
1167 QEMU_LOCK_GUARD(&s->lock);
1168 cancel_incoming_locked(s);
1169 return -EINVAL;
1170 }
1171
1172 if (mig_params->has_block_bitmap_mapping) {
1173 alias_map = construct_alias_map(mig_params->block_bitmap_mapping,
1174 false, &error_abort);
1175 }
1176
1177 do {
1178 QEMU_LOCK_GUARD(&s->lock);
1179
1180 ret = dirty_bitmap_load_header(f, s, alias_map);
1181 if (ret < 0) {
1182 cancel_incoming_locked(s);
1183 goto fail;
1184 }
1185
1186 if (s->flags & DIRTY_BITMAP_MIG_FLAG_START) {
1187 ret = dirty_bitmap_load_start(f, s);
1188 } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_COMPLETE) {
1189 dirty_bitmap_load_complete(f, s);
1190 } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITS) {
1191 ret = dirty_bitmap_load_bits(f, s);
1192 }
1193
1194 if (!ret) {
1195 ret = qemu_file_get_error(f);
1196 }
1197
1198 if (ret) {
1199 cancel_incoming_locked(s);
1200 goto fail;
1201 }
1202 } while (!(s->flags & DIRTY_BITMAP_MIG_FLAG_EOS));
1203
1204 trace_dirty_bitmap_load_success();
1205 ret = 0;
1206 fail:
1207 if (alias_map) {
1208 g_hash_table_destroy(alias_map);
1209 }
1210 return ret;
1211 }
1212
1213 static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque)
1214 {
1215 DBMSaveState *s = &((DBMState *)opaque)->save;
1216 SaveBitmapState *dbms = NULL;
1217
1218 qemu_mutex_lock_iothread();
1219 if (init_dirty_bitmap_migration(s) < 0) {
1220 qemu_mutex_unlock_iothread();
1221 return -1;
1222 }
1223
1224 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
1225 send_bitmap_start(f, s, dbms);
1226 }
1227 qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
1228 qemu_mutex_unlock_iothread();
1229 return 0;
1230 }
1231
1232 static bool dirty_bitmap_is_active(void *opaque)
1233 {
1234 DBMSaveState *s = &((DBMState *)opaque)->save;
1235
1236 return migrate_dirty_bitmaps() && !s->no_bitmaps;
1237 }
1238
1239 static bool dirty_bitmap_is_active_iterate(void *opaque)
1240 {
1241 return dirty_bitmap_is_active(opaque) && !runstate_is_running();
1242 }
1243
1244 static bool dirty_bitmap_has_postcopy(void *opaque)
1245 {
1246 return true;
1247 }
1248
1249 static SaveVMHandlers savevm_dirty_bitmap_handlers = {
1250 .save_setup = dirty_bitmap_save_setup,
1251 .save_live_complete_postcopy = dirty_bitmap_save_complete,
1252 .save_live_complete_precopy = dirty_bitmap_save_complete,
1253 .has_postcopy = dirty_bitmap_has_postcopy,
1254 .state_pending_exact = dirty_bitmap_state_pending,
1255 .state_pending_estimate = dirty_bitmap_state_pending,
1256 .save_live_iterate = dirty_bitmap_save_iterate,
1257 .is_active_iterate = dirty_bitmap_is_active_iterate,
1258 .load_state = dirty_bitmap_load,
1259 .save_cleanup = dirty_bitmap_save_cleanup,
1260 .is_active = dirty_bitmap_is_active,
1261 };
1262
1263 void dirty_bitmap_mig_init(void)
1264 {
1265 QSIMPLEQ_INIT(&dbm_state.save.dbms_list);
1266 qemu_mutex_init(&dbm_state.load.lock);
1267
1268 register_savevm_live("dirty-bitmap", 0, 1,
1269 &savevm_dirty_bitmap_handlers,
1270 &dbm_state);
1271 }