]> git.proxmox.com Git - mirror_qemu.git/blame - migration/block-dirty-bitmap.c
memory: Explicitly tag doc comments for structs
[mirror_qemu.git] / migration / block-dirty-bitmap.c
CommitLineData
b35ebdf0
VSO
1/*
2 * Block dirty bitmap postcopy migration
3 *
4 * Copyright IBM, Corp. 2009
5 * Copyright (c) 2016-2017 Virtuozzo International GmbH. All rights reserved.
6 *
7 * Authors:
8 * Liran Schour <lirans@il.ibm.com>
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 * This file is derived from migration/block.c, so it's author and IBM copyright
14 * are here, although content is quite different.
15 *
16 * Contributions after 2012-01-13 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
18 *
19 * ***
20 *
21 * Here postcopy migration of dirty bitmaps is realized. Only QMP-addressable
22 * bitmaps are migrated.
23 *
24 * Bitmap migration implies creating bitmap with the same name and granularity
25 * in destination QEMU. If the bitmap with the same name (for the same node)
26 * already exists on destination an error will be generated.
27 *
28 * format of migration:
29 *
30 * # Header (shared for different chunk types)
31 * 1, 2 or 4 bytes: flags (see qemu_{put,put}_flags)
31e4c354
HR
32 * [ 1 byte: node alias size ] \ flags & DEVICE_NAME
33 * [ n bytes: node alias ] /
34 * [ 1 byte: bitmap alias size ] \ flags & BITMAP_NAME
35 * [ n bytes: bitmap alias ] /
b35ebdf0
VSO
36 *
37 * # Start of bitmap migration (flags & START)
38 * header
39 * be64: granularity
40 * 1 byte: bitmap flags (corresponds to BdrvDirtyBitmap)
41 * bit 0 - bitmap is enabled
42 * bit 1 - bitmap is persistent
43 * bit 2 - bitmap is autoloading
44 * bits 3-7 - reserved, must be zero
45 *
46 * # Complete of bitmap migration (flags & COMPLETE)
47 * header
48 *
49 * # Data chunk of bitmap migration
50 * header
51 * be64: start sector
52 * be32: number of sectors
53 * [ be64: buffer size ] \ ! (flags & ZEROES)
54 * [ n bytes: buffer ] /
55 *
56 * The last chunk in stream should contain flags & EOS. The chunk may skip
57 * device and/or bitmap names, assuming them to be the same with the previous
58 * chunk.
59 */
60
61#include "qemu/osdep.h"
62#include "block/block.h"
63#include "block/block_int.h"
64#include "sysemu/block-backend.h"
54d31236 65#include "sysemu/runstate.h"
b35ebdf0
VSO
66#include "qemu/main-loop.h"
67#include "qemu/error-report.h"
68#include "migration/misc.h"
69#include "migration/migration.h"
53d37d36 70#include "qemu-file.h"
b35ebdf0
VSO
71#include "migration/vmstate.h"
72#include "migration/register.h"
73#include "qemu/hbitmap.h"
b35ebdf0 74#include "qemu/cutils.h"
31e4c354 75#include "qemu/id.h"
b35ebdf0 76#include "qapi/error.h"
31e4c354 77#include "qapi/qapi-commands-migration.h"
b35ebdf0
VSO
78#include "trace.h"
79
80#define CHUNK_SIZE (1 << 10)
81
82/* Flags occupy one, two or four bytes (Big Endian). The size is determined as
83 * follows:
84 * in first (most significant) byte bit 8 is clear --> one byte
85 * in first byte bit 8 is set --> two or four bytes, depending on second
86 * byte:
87 * | in second byte bit 8 is clear --> two bytes
88 * | in second byte bit 8 is set --> four bytes
89 */
90#define DIRTY_BITMAP_MIG_FLAG_EOS 0x01
91#define DIRTY_BITMAP_MIG_FLAG_ZEROES 0x02
92#define DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME 0x04
93#define DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME 0x08
94#define DIRTY_BITMAP_MIG_FLAG_START 0x10
95#define DIRTY_BITMAP_MIG_FLAG_COMPLETE 0x20
96#define DIRTY_BITMAP_MIG_FLAG_BITS 0x40
97
98#define DIRTY_BITMAP_MIG_EXTRA_FLAGS 0x80
99
100#define DIRTY_BITMAP_MIG_START_FLAG_ENABLED 0x01
101#define DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT 0x02
37931e00 102/* 0x04 was "AUTOLOAD" flags on older versions, now it is ignored */
b35ebdf0
VSO
103#define DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK 0xf8
104
fbbc6b14
VSO
105/* State of one bitmap during save process */
106typedef struct SaveBitmapState {
b35ebdf0
VSO
107 /* Written during setup phase. */
108 BlockDriverState *bs;
31e4c354
HR
109 char *node_alias;
110 char *bitmap_alias;
b35ebdf0
VSO
111 BdrvDirtyBitmap *bitmap;
112 uint64_t total_sectors;
113 uint64_t sectors_per_chunk;
fbbc6b14 114 QSIMPLEQ_ENTRY(SaveBitmapState) entry;
b35ebdf0
VSO
115 uint8_t flags;
116
117 /* For bulk phase. */
118 bool bulk_completed;
119 uint64_t cur_sector;
fbbc6b14 120} SaveBitmapState;
b35ebdf0 121
fbbc6b14
VSO
122/* State of the dirty bitmap migration (DBM) during save process */
123typedef struct DBMSaveState {
124 QSIMPLEQ_HEAD(, SaveBitmapState) dbms_list;
b35ebdf0
VSO
125
126 bool bulk_completed;
127 bool no_bitmaps;
128
129 /* for send_bitmap_bits() */
130 BlockDriverState *prev_bs;
131 BdrvDirtyBitmap *prev_bitmap;
fbbc6b14 132} DBMSaveState;
b35ebdf0 133
3b52726e
VSO
134typedef struct LoadBitmapState {
135 BlockDriverState *bs;
136 BdrvDirtyBitmap *bitmap;
137 bool migrated;
0a47190a 138 bool enabled;
3b52726e
VSO
139} LoadBitmapState;
140
fbbc6b14
VSO
141/* State of the dirty bitmap migration (DBM) during load process */
142typedef struct DBMLoadState {
b35ebdf0 143 uint32_t flags;
31e4c354
HR
144 char node_alias[256];
145 char bitmap_alias[256];
146 char bitmap_name[BDRV_BITMAP_MAX_NAME_SIZE + 1];
b35ebdf0
VSO
147 BlockDriverState *bs;
148 BdrvDirtyBitmap *bitmap;
3b52726e 149
0a47190a
VSO
150 bool before_vm_start_handled; /* set in dirty_bitmap_mig_before_vm_start */
151
b91f33b8
VSO
152 /*
153 * cancelled
154 * Incoming migration is cancelled for some reason. That means that we
155 * still should read our chunks from migration stream, to not affect other
156 * migration objects (like RAM), but just ignore them and do not touch any
157 * bitmaps or nodes.
158 */
159 bool cancelled;
160
0a47190a
VSO
161 GSList *bitmaps;
162 QemuMutex lock; /* protect bitmaps */
fbbc6b14 163} DBMLoadState;
b35ebdf0 164
3b52726e
VSO
165typedef struct DBMState {
166 DBMSaveState save;
167 DBMLoadState load;
168} DBMState;
b35ebdf0 169
3b52726e 170static DBMState dbm_state;
b35ebdf0 171
31e4c354
HR
172/* For hash tables that map node/bitmap names to aliases */
173typedef struct AliasMapInnerNode {
174 char *string;
175 GHashTable *subtree;
176} AliasMapInnerNode;
177
178static void free_alias_map_inner_node(void *amin_ptr)
179{
180 AliasMapInnerNode *amin = amin_ptr;
181
182 g_free(amin->string);
183 g_hash_table_unref(amin->subtree);
184 g_free(amin);
185}
186
187/**
188 * Construct an alias map based on the given QMP structure.
189 *
190 * (Note that we cannot store such maps in the MigrationParameters
191 * object, because that struct is defined by the QAPI schema, which
192 * makes it basically impossible to have dicts with arbitrary keys.
193 * Therefore, we instead have to construct these maps when migration
194 * starts.)
195 *
196 * @bbm is the block_bitmap_mapping from the migration parameters.
197 *
198 * If @name_to_alias is true, the returned hash table will map node
199 * and bitmap names to their respective aliases (for outgoing
200 * migration).
201 *
202 * If @name_to_alias is false, the returned hash table will map node
203 * and bitmap aliases to their respective names (for incoming
204 * migration).
205 *
206 * The hash table maps node names/aliases to AliasMapInnerNode
207 * objects, whose .string is the respective node alias/name, and whose
208 * .subtree table maps bitmap names/aliases to the respective bitmap
209 * alias/name.
210 */
211static GHashTable *construct_alias_map(const BitmapMigrationNodeAliasList *bbm,
212 bool name_to_alias,
213 Error **errp)
214{
215 GHashTable *alias_map;
216 size_t max_node_name_len = sizeof_field(BlockDriverState, node_name) - 1;
217
218 alias_map = g_hash_table_new_full(g_str_hash, g_str_equal,
219 g_free, free_alias_map_inner_node);
220
221 for (; bbm; bbm = bbm->next) {
222 const BitmapMigrationNodeAlias *bmna = bbm->value;
223 const BitmapMigrationBitmapAliasList *bmbal;
224 AliasMapInnerNode *amin;
225 GHashTable *bitmaps_map;
226 const char *node_map_from, *node_map_to;
227
228 if (!id_wellformed(bmna->alias)) {
229 error_setg(errp, "The node alias '%s' is not well-formed",
230 bmna->alias);
231 goto fail;
232 }
233
234 if (strlen(bmna->alias) > UINT8_MAX) {
235 error_setg(errp, "The node alias '%s' is longer than %u bytes",
236 bmna->alias, UINT8_MAX);
237 goto fail;
238 }
239
240 if (strlen(bmna->node_name) > max_node_name_len) {
241 error_setg(errp, "The node name '%s' is longer than %zu bytes",
242 bmna->node_name, max_node_name_len);
243 goto fail;
244 }
245
246 if (name_to_alias) {
247 if (g_hash_table_contains(alias_map, bmna->node_name)) {
248 error_setg(errp, "The node name '%s' is mapped twice",
249 bmna->node_name);
250 goto fail;
251 }
252
253 node_map_from = bmna->node_name;
254 node_map_to = bmna->alias;
255 } else {
256 if (g_hash_table_contains(alias_map, bmna->alias)) {
257 error_setg(errp, "The node alias '%s' is used twice",
258 bmna->alias);
259 goto fail;
260 }
261
262 node_map_from = bmna->alias;
263 node_map_to = bmna->node_name;
264 }
265
266 bitmaps_map = g_hash_table_new_full(g_str_hash, g_str_equal,
267 g_free, g_free);
268
269 amin = g_new(AliasMapInnerNode, 1);
270 *amin = (AliasMapInnerNode){
271 .string = g_strdup(node_map_to),
272 .subtree = bitmaps_map,
273 };
274
275 g_hash_table_insert(alias_map, g_strdup(node_map_from), amin);
276
277 for (bmbal = bmna->bitmaps; bmbal; bmbal = bmbal->next) {
278 const BitmapMigrationBitmapAlias *bmba = bmbal->value;
279 const char *bmap_map_from, *bmap_map_to;
280
281 if (strlen(bmba->alias) > UINT8_MAX) {
282 error_setg(errp,
283 "The bitmap alias '%s' is longer than %u bytes",
284 bmba->alias, UINT8_MAX);
285 goto fail;
286 }
287
288 if (strlen(bmba->name) > BDRV_BITMAP_MAX_NAME_SIZE) {
289 error_setg(errp, "The bitmap name '%s' is longer than %d bytes",
290 bmba->name, BDRV_BITMAP_MAX_NAME_SIZE);
291 goto fail;
292 }
293
294 if (name_to_alias) {
295 bmap_map_from = bmba->name;
296 bmap_map_to = bmba->alias;
297
298 if (g_hash_table_contains(bitmaps_map, bmba->name)) {
299 error_setg(errp, "The bitmap '%s'/'%s' is mapped twice",
300 bmna->node_name, bmba->name);
301 goto fail;
302 }
303 } else {
304 bmap_map_from = bmba->alias;
305 bmap_map_to = bmba->name;
306
307 if (g_hash_table_contains(bitmaps_map, bmba->alias)) {
308 error_setg(errp, "The bitmap alias '%s'/'%s' is used twice",
309 bmna->alias, bmba->alias);
310 goto fail;
311 }
312 }
313
314 g_hash_table_insert(bitmaps_map,
315 g_strdup(bmap_map_from), g_strdup(bmap_map_to));
316 }
317 }
318
319 return alias_map;
320
321fail:
322 g_hash_table_destroy(alias_map);
323 return NULL;
324}
325
326/**
327 * Run construct_alias_map() in both directions to check whether @bbm
328 * is valid.
329 * (This function is to be used by migration/migration.c to validate
330 * the user-specified block-bitmap-mapping migration parameter.)
331 *
332 * Returns true if and only if the mapping is valid.
333 */
334bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
335 Error **errp)
336{
337 GHashTable *alias_map;
338
339 alias_map = construct_alias_map(bbm, true, errp);
340 if (!alias_map) {
341 return false;
342 }
343 g_hash_table_destroy(alias_map);
344
345 alias_map = construct_alias_map(bbm, false, errp);
346 if (!alias_map) {
347 return false;
348 }
349 g_hash_table_destroy(alias_map);
350
351 return true;
352}
353
b35ebdf0
VSO
354static uint32_t qemu_get_bitmap_flags(QEMUFile *f)
355{
356 uint8_t flags = qemu_get_byte(f);
357 if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
358 flags = flags << 8 | qemu_get_byte(f);
359 if (flags & DIRTY_BITMAP_MIG_EXTRA_FLAGS) {
360 flags = flags << 16 | qemu_get_be16(f);
361 }
362 }
363
364 return flags;
365}
366
367static void qemu_put_bitmap_flags(QEMUFile *f, uint32_t flags)
368{
37931e00 369 /* The code currently does not send flags as more than one byte */
b35ebdf0
VSO
370 assert(!(flags & (0xffffff00 | DIRTY_BITMAP_MIG_EXTRA_FLAGS)));
371
372 qemu_put_byte(f, flags);
373}
374
3b52726e
VSO
375static void send_bitmap_header(QEMUFile *f, DBMSaveState *s,
376 SaveBitmapState *dbms, uint32_t additional_flags)
b35ebdf0
VSO
377{
378 BlockDriverState *bs = dbms->bs;
379 BdrvDirtyBitmap *bitmap = dbms->bitmap;
380 uint32_t flags = additional_flags;
381 trace_send_bitmap_header_enter();
382
3b52726e
VSO
383 if (bs != s->prev_bs) {
384 s->prev_bs = bs;
b35ebdf0
VSO
385 flags |= DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME;
386 }
387
3b52726e
VSO
388 if (bitmap != s->prev_bitmap) {
389 s->prev_bitmap = bitmap;
b35ebdf0
VSO
390 flags |= DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME;
391 }
392
393 qemu_put_bitmap_flags(f, flags);
394
395 if (flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
31e4c354 396 qemu_put_counted_string(f, dbms->node_alias);
b35ebdf0
VSO
397 }
398
399 if (flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
31e4c354 400 qemu_put_counted_string(f, dbms->bitmap_alias);
b35ebdf0
VSO
401 }
402}
403
3b52726e
VSO
404static void send_bitmap_start(QEMUFile *f, DBMSaveState *s,
405 SaveBitmapState *dbms)
b35ebdf0 406{
3b52726e 407 send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_START);
b35ebdf0
VSO
408 qemu_put_be32(f, bdrv_dirty_bitmap_granularity(dbms->bitmap));
409 qemu_put_byte(f, dbms->flags);
410}
411
3b52726e
VSO
412static void send_bitmap_complete(QEMUFile *f, DBMSaveState *s,
413 SaveBitmapState *dbms)
b35ebdf0 414{
3b52726e 415 send_bitmap_header(f, s, dbms, DIRTY_BITMAP_MIG_FLAG_COMPLETE);
b35ebdf0
VSO
416}
417
3b52726e
VSO
418static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
419 SaveBitmapState *dbms,
b35ebdf0
VSO
420 uint64_t start_sector, uint32_t nr_sectors)
421{
422 /* align for buffer_is_zero() */
423 uint64_t align = 4 * sizeof(long);
424 uint64_t unaligned_size =
425 bdrv_dirty_bitmap_serialization_size(
426 dbms->bitmap, start_sector << BDRV_SECTOR_BITS,
427 (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
428 uint64_t buf_size = QEMU_ALIGN_UP(unaligned_size, align);
429 uint8_t *buf = g_malloc0(buf_size);
430 uint32_t flags = DIRTY_BITMAP_MIG_FLAG_BITS;
431
432 bdrv_dirty_bitmap_serialize_part(
433 dbms->bitmap, buf, start_sector << BDRV_SECTOR_BITS,
434 (uint64_t)nr_sectors << BDRV_SECTOR_BITS);
435
436 if (buffer_is_zero(buf, buf_size)) {
437 g_free(buf);
438 buf = NULL;
439 flags |= DIRTY_BITMAP_MIG_FLAG_ZEROES;
440 }
441
442 trace_send_bitmap_bits(flags, start_sector, nr_sectors, buf_size);
443
3b52726e 444 send_bitmap_header(f, s, dbms, flags);
b35ebdf0
VSO
445
446 qemu_put_be64(f, start_sector);
447 qemu_put_be32(f, nr_sectors);
448
449 /* if a block is zero we need to flush here since the network
450 * bandwidth is now a lot higher than the storage device bandwidth.
451 * thus if we queue zero blocks we slow down the migration. */
452 if (flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
453 qemu_fflush(f);
454 } else {
455 qemu_put_be64(f, buf_size);
456 qemu_put_buffer(f, buf, buf_size);
457 }
458
459 g_free(buf);
460}
461
462/* Called with iothread lock taken. */
3b52726e 463static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
b35ebdf0 464{
fbbc6b14 465 SaveBitmapState *dbms;
b35ebdf0 466
3b52726e
VSO
467 while ((dbms = QSIMPLEQ_FIRST(&s->dbms_list)) != NULL) {
468 QSIMPLEQ_REMOVE_HEAD(&s->dbms_list, entry);
27a1b301 469 bdrv_dirty_bitmap_set_busy(dbms->bitmap, false);
b35ebdf0 470 bdrv_unref(dbms->bs);
31e4c354
HR
471 g_free(dbms->node_alias);
472 g_free(dbms->bitmap_alias);
b35ebdf0
VSO
473 g_free(dbms);
474 }
475}
476
477/* Called with iothread lock taken. */
3b52726e 478static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
31e4c354 479 const char *bs_name, GHashTable *alias_map)
b35ebdf0 480{
b35ebdf0 481 BdrvDirtyBitmap *bitmap;
fbbc6b14 482 SaveBitmapState *dbms;
31e4c354
HR
483 GHashTable *bitmap_aliases;
484 const char *node_alias, *bitmap_name, *bitmap_alias;
3ae96d66 485 Error *local_err = NULL;
b35ebdf0 486
31e4c354
HR
487 /* When an alias map is given, @bs_name must be @bs's node name */
488 assert(!alias_map || !strcmp(bs_name, bdrv_get_node_name(bs)));
489
7cb01519
VSO
490 FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
491 if (bdrv_dirty_bitmap_name(bitmap)) {
492 break;
493 }
494 }
82640edb
VSO
495 if (!bitmap) {
496 return 0;
497 }
498
31e4c354
HR
499 bitmap_name = bdrv_dirty_bitmap_name(bitmap);
500
82640edb
VSO
501 if (!bs_name || strcmp(bs_name, "") == 0) {
502 error_report("Bitmap '%s' in unnamed node can't be migrated",
31e4c354 503 bitmap_name);
82640edb
VSO
504 return -1;
505 }
506
31e4c354
HR
507 if (alias_map) {
508 const AliasMapInnerNode *amin = g_hash_table_lookup(alias_map, bs_name);
509
510 if (!amin) {
511 /* Skip bitmaps on nodes with no alias */
512 return 0;
513 }
514
515 node_alias = amin->string;
516 bitmap_aliases = amin->subtree;
517 } else {
518 node_alias = bs_name;
519 bitmap_aliases = NULL;
520 }
521
522 if (node_alias[0] == '#') {
4ff5cc12
VSO
523 error_report("Bitmap '%s' in a node with auto-generated "
524 "name '%s' can't be migrated",
31e4c354 525 bitmap_name, node_alias);
4ff5cc12
VSO
526 return -1;
527 }
528
38908bbc 529 FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
31e4c354
HR
530 bitmap_name = bdrv_dirty_bitmap_name(bitmap);
531 if (!bitmap_name) {
38908bbc
VSO
532 continue;
533 }
534
38908bbc
VSO
535 if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_DEFAULT, &local_err)) {
536 error_report_err(local_err);
537 return -1;
538 }
539
31e4c354
HR
540 if (bitmap_aliases) {
541 bitmap_alias = g_hash_table_lookup(bitmap_aliases, bitmap_name);
542 if (!bitmap_alias) {
543 /* Skip bitmaps with no alias */
544 continue;
545 }
546 } else {
547 if (strlen(bitmap_name) > UINT8_MAX) {
548 error_report("Cannot migrate bitmap '%s' on node '%s': "
549 "Name is longer than %u bytes",
550 bitmap_name, bs_name, UINT8_MAX);
551 return -1;
552 }
553 bitmap_alias = bitmap_name;
554 }
555
38908bbc
VSO
556 bdrv_ref(bs);
557 bdrv_dirty_bitmap_set_busy(bitmap, true);
558
fbbc6b14 559 dbms = g_new0(SaveBitmapState, 1);
38908bbc 560 dbms->bs = bs;
31e4c354
HR
561 dbms->node_alias = g_strdup(node_alias);
562 dbms->bitmap_alias = g_strdup(bitmap_alias);
38908bbc
VSO
563 dbms->bitmap = bitmap;
564 dbms->total_sectors = bdrv_nb_sectors(bs);
565 dbms->sectors_per_chunk = CHUNK_SIZE * 8 *
566 bdrv_dirty_bitmap_granularity(bitmap) >> BDRV_SECTOR_BITS;
567 if (bdrv_dirty_bitmap_enabled(bitmap)) {
568 dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
569 }
570 if (bdrv_dirty_bitmap_get_persistence(bitmap)) {
571 dbms->flags |= DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT;
572 }
573
3b52726e 574 QSIMPLEQ_INSERT_TAIL(&s->dbms_list, dbms, entry);
38908bbc
VSO
575 }
576
577 return 0;
578}
579
580/* Called with iothread lock taken. */
3b52726e 581static int init_dirty_bitmap_migration(DBMSaveState *s)
38908bbc
VSO
582{
583 BlockDriverState *bs;
fbbc6b14 584 SaveBitmapState *dbms;
107cfb72
VSO
585 GHashTable *handled_by_blk = g_hash_table_new(NULL, NULL);
586 BlockBackend *blk;
31e4c354
HR
587 const MigrationParameters *mig_params = &migrate_get_current()->parameters;
588 GHashTable *alias_map = NULL;
589
590 if (mig_params->has_block_bitmap_mapping) {
591 alias_map = construct_alias_map(mig_params->block_bitmap_mapping, true,
592 &error_abort);
593 }
38908bbc 594
3b52726e
VSO
595 s->bulk_completed = false;
596 s->prev_bs = NULL;
597 s->prev_bitmap = NULL;
598 s->no_bitmaps = false;
b35ebdf0 599
31e4c354
HR
600 if (!alias_map) {
601 /*
602 * Use blockdevice name for direct (or filtered) children of named block
603 * backends.
604 */
605 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
606 const char *name = blk_name(blk);
107cfb72 607
31e4c354
HR
608 if (!name || strcmp(name, "") == 0) {
609 continue;
610 }
107cfb72 611
31e4c354
HR
612 bs = blk_bs(blk);
613
614 /* Skip filters without bitmaps */
615 while (bs && bs->drv && bs->drv->is_filter &&
616 !bdrv_has_named_bitmaps(bs))
617 {
93393e69 618 bs = bdrv_filter_bs(bs);
107cfb72 619 }
107cfb72 620
31e4c354
HR
621 if (bs && bs->drv && !bs->drv->is_filter) {
622 if (add_bitmaps_to_list(s, bs, name, NULL)) {
623 goto fail;
624 }
625 g_hash_table_add(handled_by_blk, bs);
107cfb72 626 }
107cfb72
VSO
627 }
628 }
629
592203e7 630 for (bs = bdrv_next_all_states(NULL); bs; bs = bdrv_next_all_states(bs)) {
107cfb72
VSO
631 if (g_hash_table_contains(handled_by_blk, bs)) {
632 continue;
633 }
634
31e4c354 635 if (add_bitmaps_to_list(s, bs, bdrv_get_node_name(bs), alias_map)) {
38908bbc 636 goto fail;
b35ebdf0
VSO
637 }
638 }
639
9c98f145 640 /* unset migration flags here, to not roll back it */
3b52726e 641 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
c4e4b0fa 642 bdrv_dirty_bitmap_skip_store(dbms->bitmap, true);
b35ebdf0
VSO
643 }
644
3b52726e
VSO
645 if (QSIMPLEQ_EMPTY(&s->dbms_list)) {
646 s->no_bitmaps = true;
b35ebdf0
VSO
647 }
648
107cfb72 649 g_hash_table_destroy(handled_by_blk);
31e4c354
HR
650 if (alias_map) {
651 g_hash_table_destroy(alias_map);
652 }
107cfb72 653
b35ebdf0
VSO
654 return 0;
655
656fail:
107cfb72 657 g_hash_table_destroy(handled_by_blk);
31e4c354
HR
658 if (alias_map) {
659 g_hash_table_destroy(alias_map);
660 }
3b52726e 661 dirty_bitmap_do_save_cleanup(s);
b35ebdf0
VSO
662
663 return -1;
664}
665
666/* Called with no lock taken. */
3b52726e
VSO
667static void bulk_phase_send_chunk(QEMUFile *f, DBMSaveState *s,
668 SaveBitmapState *dbms)
b35ebdf0
VSO
669{
670 uint32_t nr_sectors = MIN(dbms->total_sectors - dbms->cur_sector,
671 dbms->sectors_per_chunk);
672
3b52726e 673 send_bitmap_bits(f, s, dbms, dbms->cur_sector, nr_sectors);
b35ebdf0
VSO
674
675 dbms->cur_sector += nr_sectors;
676 if (dbms->cur_sector >= dbms->total_sectors) {
677 dbms->bulk_completed = true;
678 }
679}
680
681/* Called with no lock taken. */
3b52726e 682static void bulk_phase(QEMUFile *f, DBMSaveState *s, bool limit)
b35ebdf0 683{
fbbc6b14 684 SaveBitmapState *dbms;
b35ebdf0 685
3b52726e 686 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
b35ebdf0 687 while (!dbms->bulk_completed) {
3b52726e 688 bulk_phase_send_chunk(f, s, dbms);
b35ebdf0
VSO
689 if (limit && qemu_file_rate_limit(f)) {
690 return;
691 }
692 }
693 }
694
3b52726e 695 s->bulk_completed = true;
b35ebdf0
VSO
696}
697
698/* for SaveVMHandlers */
699static void dirty_bitmap_save_cleanup(void *opaque)
700{
3b52726e
VSO
701 DBMSaveState *s = &((DBMState *)opaque)->save;
702
703 dirty_bitmap_do_save_cleanup(s);
b35ebdf0
VSO
704}
705
706static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
707{
3b52726e
VSO
708 DBMSaveState *s = &((DBMState *)opaque)->save;
709
b35ebdf0
VSO
710 trace_dirty_bitmap_save_iterate(migration_in_postcopy());
711
3b52726e
VSO
712 if (migration_in_postcopy() && !s->bulk_completed) {
713 bulk_phase(f, s, true);
b35ebdf0
VSO
714 }
715
716 qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
717
3b52726e 718 return s->bulk_completed;
b35ebdf0
VSO
719}
720
721/* Called with iothread lock taken. */
722
723static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
724{
3b52726e 725 DBMSaveState *s = &((DBMState *)opaque)->save;
fbbc6b14 726 SaveBitmapState *dbms;
b35ebdf0
VSO
727 trace_dirty_bitmap_save_complete_enter();
728
3b52726e
VSO
729 if (!s->bulk_completed) {
730 bulk_phase(f, s, false);
b35ebdf0
VSO
731 }
732
3b52726e
VSO
733 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
734 send_bitmap_complete(f, s, dbms);
b35ebdf0
VSO
735 }
736
737 qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
738
739 trace_dirty_bitmap_save_complete_finish();
740
3b52726e 741 dirty_bitmap_save_cleanup(opaque);
b35ebdf0
VSO
742 return 0;
743}
744
745static void dirty_bitmap_save_pending(QEMUFile *f, void *opaque,
746 uint64_t max_size,
747 uint64_t *res_precopy_only,
748 uint64_t *res_compatible,
749 uint64_t *res_postcopy_only)
750{
3b52726e 751 DBMSaveState *s = &((DBMState *)opaque)->save;
fbbc6b14 752 SaveBitmapState *dbms;
b35ebdf0
VSO
753 uint64_t pending = 0;
754
755 qemu_mutex_lock_iothread();
756
3b52726e 757 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
b35ebdf0
VSO
758 uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
759 uint64_t sectors = dbms->bulk_completed ? 0 :
760 dbms->total_sectors - dbms->cur_sector;
761
762 pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
763 }
764
765 qemu_mutex_unlock_iothread();
766
767 trace_dirty_bitmap_save_pending(pending, max_size);
768
769 *res_postcopy_only += pending;
770}
771
772/* First occurrence of this bitmap. It should be created if doesn't exist */
fbbc6b14 773static int dirty_bitmap_load_start(QEMUFile *f, DBMLoadState *s)
b35ebdf0
VSO
774{
775 Error *local_err = NULL;
776 uint32_t granularity = qemu_get_be32(f);
777 uint8_t flags = qemu_get_byte(f);
0a47190a 778 LoadBitmapState *b;
b35ebdf0 779
b91f33b8
VSO
780 if (s->cancelled) {
781 return 0;
782 }
783
b35ebdf0
VSO
784 if (s->bitmap) {
785 error_report("Bitmap with the same name ('%s') already exists on "
786 "destination", bdrv_dirty_bitmap_name(s->bitmap));
787 return -EINVAL;
788 } else {
789 s->bitmap = bdrv_create_dirty_bitmap(s->bs, granularity,
790 s->bitmap_name, &local_err);
791 if (!s->bitmap) {
792 error_report_err(local_err);
793 return -EINVAL;
794 }
795 }
796
797 if (flags & DIRTY_BITMAP_MIG_START_FLAG_RESERVED_MASK) {
798 error_report("Unknown flags in migrated dirty bitmap header: %x",
799 flags);
800 return -EINVAL;
801 }
802
803 if (flags & DIRTY_BITMAP_MIG_START_FLAG_PERSISTENT) {
796a3798 804 bdrv_dirty_bitmap_set_persistence(s->bitmap, true);
b35ebdf0
VSO
805 }
806
807 bdrv_disable_dirty_bitmap(s->bitmap);
808 if (flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED) {
5deb6cbd 809 bdrv_dirty_bitmap_create_successor(s->bitmap, &local_err);
b35ebdf0
VSO
810 if (local_err) {
811 error_report_err(local_err);
812 return -EINVAL;
813 }
b35ebdf0
VSO
814 }
815
0a47190a
VSO
816 b = g_new(LoadBitmapState, 1);
817 b->bs = s->bs;
818 b->bitmap = s->bitmap;
819 b->migrated = false;
820 b->enabled = flags & DIRTY_BITMAP_MIG_START_FLAG_ENABLED;
821
822 s->bitmaps = g_slist_prepend(s->bitmaps, b);
823
b35ebdf0
VSO
824 return 0;
825}
826
0a47190a
VSO
827/*
828 * before_vm_start_handle_item
829 *
830 * g_slist_foreach helper
831 *
832 * item is LoadBitmapState*
833 * opaque is DBMLoadState*
834 */
835static void before_vm_start_handle_item(void *item, void *opaque)
b35ebdf0 836{
0a47190a
VSO
837 DBMLoadState *s = opaque;
838 LoadBitmapState *b = item;
b35ebdf0 839
0a47190a 840 if (b->enabled) {
b35ebdf0 841 if (b->migrated) {
e6ce5e92 842 bdrv_enable_dirty_bitmap(b->bitmap);
b35ebdf0
VSO
843 } else {
844 bdrv_dirty_bitmap_enable_successor(b->bitmap);
845 }
0a47190a 846 }
b35ebdf0 847
0a47190a
VSO
848 if (b->migrated) {
849 s->bitmaps = g_slist_remove(s->bitmaps, b);
b35ebdf0
VSO
850 g_free(b);
851 }
0a47190a 852}
b35ebdf0 853
0a47190a
VSO
854void dirty_bitmap_mig_before_vm_start(void)
855{
856 DBMLoadState *s = &dbm_state.load;
857 qemu_mutex_lock(&s->lock);
858
859 assert(!s->before_vm_start_handled);
860 g_slist_foreach(s->bitmaps, before_vm_start_handle_item, s);
861 s->before_vm_start_handled = true;
b35ebdf0 862
89491216 863 qemu_mutex_unlock(&s->lock);
b35ebdf0
VSO
864}
865
b91f33b8
VSO
866static void cancel_incoming_locked(DBMLoadState *s)
867{
868 GSList *item;
869
870 if (s->cancelled) {
871 return;
872 }
873
874 s->cancelled = true;
875 s->bs = NULL;
876 s->bitmap = NULL;
877
878 /* Drop all unfinished bitmaps */
879 for (item = s->bitmaps; item; item = g_slist_next(item)) {
880 LoadBitmapState *b = item->data;
881
882 /*
883 * Bitmap must be unfinished, as finished bitmaps should already be
884 * removed from the list.
885 */
886 assert(!s->before_vm_start_handled || !b->migrated);
887 if (bdrv_dirty_bitmap_has_successor(b->bitmap)) {
888 bdrv_reclaim_dirty_bitmap(b->bitmap, &error_abort);
889 }
890 bdrv_release_dirty_bitmap(b->bitmap);
891 }
892
893 g_slist_free_full(s->bitmaps, g_free);
894 s->bitmaps = NULL;
895}
896
1499ab09
VSO
897void dirty_bitmap_mig_cancel_outgoing(void)
898{
899 dirty_bitmap_do_save_cleanup(&dbm_state.save);
900}
901
902void dirty_bitmap_mig_cancel_incoming(void)
903{
904 DBMLoadState *s = &dbm_state.load;
905
906 qemu_mutex_lock(&s->lock);
907
908 cancel_incoming_locked(s);
909
910 qemu_mutex_unlock(&s->lock);
911}
912
fbbc6b14 913static void dirty_bitmap_load_complete(QEMUFile *f, DBMLoadState *s)
b35ebdf0
VSO
914{
915 GSList *item;
916 trace_dirty_bitmap_load_complete();
b35ebdf0 917
b91f33b8
VSO
918 if (s->cancelled) {
919 return;
920 }
921
922 bdrv_dirty_bitmap_deserialize_finish(s->bitmap);
b35ebdf0 923
f3045b9a
VSO
924 if (bdrv_dirty_bitmap_has_successor(s->bitmap)) {
925 bdrv_reclaim_dirty_bitmap(s->bitmap, &error_abort);
926 }
927
0a47190a 928 for (item = s->bitmaps; item; item = g_slist_next(item)) {
fbbc6b14 929 LoadBitmapState *b = item->data;
b35ebdf0
VSO
930
931 if (b->bitmap == s->bitmap) {
932 b->migrated = true;
0a47190a
VSO
933 if (s->before_vm_start_handled) {
934 s->bitmaps = g_slist_remove(s->bitmaps, b);
935 g_free(b);
936 }
b35ebdf0
VSO
937 break;
938 }
939 }
b35ebdf0
VSO
940}
941
fbbc6b14 942static int dirty_bitmap_load_bits(QEMUFile *f, DBMLoadState *s)
b35ebdf0
VSO
943{
944 uint64_t first_byte = qemu_get_be64(f) << BDRV_SECTOR_BITS;
945 uint64_t nr_bytes = (uint64_t)qemu_get_be32(f) << BDRV_SECTOR_BITS;
946 trace_dirty_bitmap_load_bits_enter(first_byte >> BDRV_SECTOR_BITS,
947 nr_bytes >> BDRV_SECTOR_BITS);
948
949 if (s->flags & DIRTY_BITMAP_MIG_FLAG_ZEROES) {
950 trace_dirty_bitmap_load_bits_zeroes();
b91f33b8
VSO
951 if (!s->cancelled) {
952 bdrv_dirty_bitmap_deserialize_zeroes(s->bitmap, first_byte,
953 nr_bytes, false);
954 }
b35ebdf0
VSO
955 } else {
956 size_t ret;
b91f33b8 957 g_autofree uint8_t *buf = NULL;
b35ebdf0 958 uint64_t buf_size = qemu_get_be64(f);
b91f33b8
VSO
959 uint64_t needed_size;
960
961 /*
962 * The actual check for buf_size is done a bit later. We can't do it in
963 * cancelled mode as we don't have the bitmap to check the constraints
964 * (so, we allocate a buffer and read prior to the check). On the other
965 * hand, we shouldn't blindly g_malloc the number from the stream.
966 * Actually one chunk should not be larger than CHUNK_SIZE. Let's allow
967 * a bit larger (which means that bitmap migration will fail anyway and
968 * the whole migration will most probably fail soon due to broken
969 * stream).
970 */
971 if (buf_size > 10 * CHUNK_SIZE) {
972 error_report("Bitmap migration stream buffer allocation request "
973 "is too large");
974 return -EIO;
975 }
976
977 buf = g_malloc(buf_size);
978 ret = qemu_get_buffer(f, buf, buf_size);
979 if (ret != buf_size) {
980 error_report("Failed to read bitmap bits");
981 return -EIO;
982 }
983
984 if (s->cancelled) {
985 return 0;
986 }
987
988 needed_size = bdrv_dirty_bitmap_serialization_size(s->bitmap,
989 first_byte,
990 nr_bytes);
b35ebdf0
VSO
991
992 if (needed_size > buf_size ||
993 buf_size > QEMU_ALIGN_UP(needed_size, 4 * sizeof(long))
994 /* Here used same alignment as in send_bitmap_bits */
995 ) {
996 error_report("Migrated bitmap granularity doesn't "
997 "match the destination bitmap '%s' granularity",
998 bdrv_dirty_bitmap_name(s->bitmap));
b91f33b8
VSO
999 cancel_incoming_locked(s);
1000 return 0;
b35ebdf0
VSO
1001 }
1002
1003 bdrv_dirty_bitmap_deserialize_part(s->bitmap, buf, first_byte, nr_bytes,
1004 false);
b35ebdf0
VSO
1005 }
1006
1007 return 0;
1008}
1009
31e4c354
HR
1010static int dirty_bitmap_load_header(QEMUFile *f, DBMLoadState *s,
1011 GHashTable *alias_map)
b35ebdf0 1012{
31e4c354 1013 GHashTable *bitmap_alias_map = NULL;
b35ebdf0
VSO
1014 Error *local_err = NULL;
1015 bool nothing;
1016 s->flags = qemu_get_bitmap_flags(f);
1017 trace_dirty_bitmap_load_header(s->flags);
1018
1019 nothing = s->flags == (s->flags & DIRTY_BITMAP_MIG_FLAG_EOS);
1020
1021 if (s->flags & DIRTY_BITMAP_MIG_FLAG_DEVICE_NAME) {
31e4c354
HR
1022 if (!qemu_get_counted_string(f, s->node_alias)) {
1023 error_report("Unable to read node alias string");
b35ebdf0
VSO
1024 return -EINVAL;
1025 }
31e4c354 1026
b91f33b8 1027 if (!s->cancelled) {
31e4c354
HR
1028 if (alias_map) {
1029 const AliasMapInnerNode *amin;
1030
1031 amin = g_hash_table_lookup(alias_map, s->node_alias);
1032 if (!amin) {
1033 error_setg(&local_err, "Error: Unknown node alias '%s'",
1034 s->node_alias);
1035 s->bs = NULL;
1036 } else {
1037 bitmap_alias_map = amin->subtree;
1038 s->bs = bdrv_lookup_bs(NULL, amin->string, &local_err);
1039 }
1040 } else {
1041 s->bs = bdrv_lookup_bs(s->node_alias, s->node_alias,
1042 &local_err);
1043 }
b91f33b8
VSO
1044 if (!s->bs) {
1045 error_report_err(local_err);
1046 cancel_incoming_locked(s);
1047 }
b35ebdf0 1048 }
31e4c354
HR
1049 } else if (s->bs) {
1050 if (alias_map) {
1051 const AliasMapInnerNode *amin;
1052
1053 /* Must be present in the map, or s->bs would not be set */
1054 amin = g_hash_table_lookup(alias_map, s->node_alias);
1055 assert(amin != NULL);
1056
1057 bitmap_alias_map = amin->subtree;
1058 }
1059 } else if (!nothing && !s->cancelled) {
b35ebdf0 1060 error_report("Error: block device name is not set");
b91f33b8 1061 cancel_incoming_locked(s);
b35ebdf0
VSO
1062 }
1063
31e4c354
HR
1064 assert(nothing || s->cancelled || !!alias_map == !!bitmap_alias_map);
1065
b35ebdf0 1066 if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITMAP_NAME) {
31e4c354
HR
1067 const char *bitmap_name;
1068
1069 if (!qemu_get_counted_string(f, s->bitmap_alias)) {
1070 error_report("Unable to read bitmap alias string");
b35ebdf0
VSO
1071 return -EINVAL;
1072 }
31e4c354 1073
b91f33b8 1074 if (!s->cancelled) {
31e4c354
HR
1075 if (bitmap_alias_map) {
1076 bitmap_name = g_hash_table_lookup(bitmap_alias_map,
1077 s->bitmap_alias);
1078 if (!bitmap_name) {
1079 error_report("Error: Unknown bitmap alias '%s' on node "
1080 "'%s' (alias '%s')", s->bitmap_alias,
1081 s->bs->node_name, s->node_alias);
1082 cancel_incoming_locked(s);
1083 }
1084 } else {
1085 bitmap_name = s->bitmap_alias;
1086 }
1087 }
1088
1089 if (!s->cancelled) {
1090 g_strlcpy(s->bitmap_name, bitmap_name, sizeof(s->bitmap_name));
b91f33b8
VSO
1091 s->bitmap = bdrv_find_dirty_bitmap(s->bs, s->bitmap_name);
1092
1093 /*
1094 * bitmap may be NULL here, it wouldn't be an error if it is the
1095 * first occurrence of the bitmap
1096 */
1097 if (!s->bitmap && !(s->flags & DIRTY_BITMAP_MIG_FLAG_START)) {
1098 error_report("Error: unknown dirty bitmap "
1099 "'%s' for block device '%s'",
31e4c354 1100 s->bitmap_name, s->bs->node_name);
b91f33b8
VSO
1101 cancel_incoming_locked(s);
1102 }
b35ebdf0 1103 }
b91f33b8 1104 } else if (!s->bitmap && !nothing && !s->cancelled) {
b35ebdf0 1105 error_report("Error: block device name is not set");
b91f33b8 1106 cancel_incoming_locked(s);
b35ebdf0
VSO
1107 }
1108
1109 return 0;
1110}
1111
b91f33b8
VSO
1112/*
1113 * dirty_bitmap_load
1114 *
1115 * Load sequence of dirty bitmap chunks. Return error only on fatal io stream
1116 * violations. On other errors just cancel bitmaps incoming migration and return
1117 * 0.
1118 *
1119 * Note, than when incoming bitmap migration is canceled, we still must read all
1120 * our chunks (and just ignore them), to not affect other migration objects.
1121 */
b35ebdf0
VSO
1122static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id)
1123{
31e4c354
HR
1124 GHashTable *alias_map = NULL;
1125 const MigrationParameters *mig_params = &migrate_get_current()->parameters;
3b52726e 1126 DBMLoadState *s = &((DBMState *)opaque)->load;
b35ebdf0
VSO
1127 int ret = 0;
1128
1129 trace_dirty_bitmap_load_enter();
1130
1131 if (version_id != 1) {
b91f33b8
VSO
1132 QEMU_LOCK_GUARD(&s->lock);
1133 cancel_incoming_locked(s);
b35ebdf0
VSO
1134 return -EINVAL;
1135 }
1136
31e4c354
HR
1137 if (mig_params->has_block_bitmap_mapping) {
1138 alias_map = construct_alias_map(mig_params->block_bitmap_mapping,
1139 false, &error_abort);
1140 }
1141
b35ebdf0 1142 do {
b91f33b8
VSO
1143 QEMU_LOCK_GUARD(&s->lock);
1144
31e4c354 1145 ret = dirty_bitmap_load_header(f, s, alias_map);
a36f6ff4 1146 if (ret < 0) {
b91f33b8 1147 cancel_incoming_locked(s);
31e4c354 1148 goto fail;
a36f6ff4 1149 }
b35ebdf0 1150
3b52726e
VSO
1151 if (s->flags & DIRTY_BITMAP_MIG_FLAG_START) {
1152 ret = dirty_bitmap_load_start(f, s);
1153 } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_COMPLETE) {
1154 dirty_bitmap_load_complete(f, s);
1155 } else if (s->flags & DIRTY_BITMAP_MIG_FLAG_BITS) {
1156 ret = dirty_bitmap_load_bits(f, s);
b35ebdf0
VSO
1157 }
1158
1159 if (!ret) {
1160 ret = qemu_file_get_error(f);
1161 }
1162
1163 if (ret) {
b91f33b8 1164 cancel_incoming_locked(s);
31e4c354 1165 goto fail;
b35ebdf0 1166 }
3b52726e 1167 } while (!(s->flags & DIRTY_BITMAP_MIG_FLAG_EOS));
b35ebdf0
VSO
1168
1169 trace_dirty_bitmap_load_success();
31e4c354
HR
1170 ret = 0;
1171fail:
1172 if (alias_map) {
1173 g_hash_table_destroy(alias_map);
1174 }
1175 return ret;
b35ebdf0
VSO
1176}
1177
1178static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque)
1179{
3b52726e 1180 DBMSaveState *s = &((DBMState *)opaque)->save;
fbbc6b14 1181 SaveBitmapState *dbms = NULL;
3b52726e 1182 if (init_dirty_bitmap_migration(s) < 0) {
b35ebdf0
VSO
1183 return -1;
1184 }
1185
3b52726e
VSO
1186 QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
1187 send_bitmap_start(f, s, dbms);
b35ebdf0
VSO
1188 }
1189 qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS);
1190
1191 return 0;
1192}
1193
1194static bool dirty_bitmap_is_active(void *opaque)
1195{
3b52726e
VSO
1196 DBMSaveState *s = &((DBMState *)opaque)->save;
1197
1198 return migrate_dirty_bitmaps() && !s->no_bitmaps;
b35ebdf0
VSO
1199}
1200
1201static bool dirty_bitmap_is_active_iterate(void *opaque)
1202{
1203 return dirty_bitmap_is_active(opaque) && !runstate_is_running();
1204}
1205
1206static bool dirty_bitmap_has_postcopy(void *opaque)
1207{
1208 return true;
1209}
1210
1211static SaveVMHandlers savevm_dirty_bitmap_handlers = {
1212 .save_setup = dirty_bitmap_save_setup,
1213 .save_live_complete_postcopy = dirty_bitmap_save_complete,
1214 .save_live_complete_precopy = dirty_bitmap_save_complete,
1215 .has_postcopy = dirty_bitmap_has_postcopy,
1216 .save_live_pending = dirty_bitmap_save_pending,
1217 .save_live_iterate = dirty_bitmap_save_iterate,
1218 .is_active_iterate = dirty_bitmap_is_active_iterate,
1219 .load_state = dirty_bitmap_load,
1220 .save_cleanup = dirty_bitmap_save_cleanup,
1221 .is_active = dirty_bitmap_is_active,
1222};
1223
1224void dirty_bitmap_mig_init(void)
1225{
3b52726e 1226 QSIMPLEQ_INIT(&dbm_state.save.dbms_list);
89491216 1227 qemu_mutex_init(&dbm_state.load.lock);
b35ebdf0 1228
ce62df53 1229 register_savevm_live("dirty-bitmap", 0, 1,
b35ebdf0 1230 &savevm_dirty_bitmap_handlers,
3b52726e 1231 &dbm_state);
b35ebdf0 1232}