2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "block/block.h"
20 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
23 #include "qemu/queue.h"
24 #include "qemu/timer.h"
25 #include "migration/block.h"
26 #include "migration/migration.h"
27 #include "sysemu/blockdev.h"
28 #include "sysemu/block-backend.h"
30 #define BLOCK_SIZE (1 << 20)
31 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
33 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
34 #define BLK_MIG_FLAG_EOS 0x02
35 #define BLK_MIG_FLAG_PROGRESS 0x04
36 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08
38 #define MAX_IS_ALLOCATED_SEARCH 65536
40 #define MAX_INFLIGHT_IO 512
42 //#define DEBUG_BLK_MIGRATION
44 #ifdef DEBUG_BLK_MIGRATION
45 #define DPRINTF(fmt, ...) \
46 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
48 #define DPRINTF(fmt, ...) \
52 typedef struct BlkMigDevState
{
53 /* Written during setup phase. Can be read without a lock. */
56 int64_t total_sectors
;
57 QSIMPLEQ_ENTRY(BlkMigDevState
) entry
;
60 /* Only used by migration thread. Does not need a lock. */
65 /* Data in the aio_bitmap is protected by block migration lock.
66 * Allocation and free happen during setup and cleanup respectively.
68 unsigned long *aio_bitmap
;
70 /* Protected by block migration lock. */
71 int64_t completed_sectors
;
73 /* During migration this is protected by iothread lock / AioContext.
74 * Allocation and free happen during setup and cleanup respectively.
76 BdrvDirtyBitmap
*dirty_bitmap
;
79 typedef struct BlkMigBlock
{
80 /* Only used by migration thread. */
89 /* Protected by block migration lock. */
91 QSIMPLEQ_ENTRY(BlkMigBlock
) entry
;
94 typedef struct BlkMigState
{
95 /* Written during setup phase. Can be read without a lock. */
98 QSIMPLEQ_HEAD(bmds_list
, BlkMigDevState
) bmds_list
;
99 int64_t total_sector_sum
;
102 /* Protected by lock. */
103 QSIMPLEQ_HEAD(blk_list
, BlkMigBlock
) blk_list
;
107 /* Only used by migration thread. Does not need a lock. */
112 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
116 static BlkMigState block_mig_state
;
118 static void blk_mig_lock(void)
120 qemu_mutex_lock(&block_mig_state
.lock
);
123 static void blk_mig_unlock(void)
125 qemu_mutex_unlock(&block_mig_state
.lock
);
128 /* Must run outside of the iothread lock during the bulk phase,
129 * or the VM will stall.
132 static void blk_send(QEMUFile
*f
, BlkMigBlock
* blk
)
135 uint64_t flags
= BLK_MIG_FLAG_DEVICE_BLOCK
;
137 if (block_mig_state
.zero_blocks
&&
138 buffer_is_zero(blk
->buf
, BLOCK_SIZE
)) {
139 flags
|= BLK_MIG_FLAG_ZERO_BLOCK
;
142 /* sector number and flags */
143 qemu_put_be64(f
, (blk
->sector
<< BDRV_SECTOR_BITS
)
147 len
= strlen(bdrv_get_device_name(blk
->bmds
->bs
));
148 qemu_put_byte(f
, len
);
149 qemu_put_buffer(f
, (uint8_t *)bdrv_get_device_name(blk
->bmds
->bs
), len
);
151 /* if a block is zero we need to flush here since the network
152 * bandwidth is now a lot higher than the storage device bandwidth.
153 * thus if we queue zero blocks we slow down the migration */
154 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
159 qemu_put_buffer(f
, blk
->buf
, BLOCK_SIZE
);
162 int blk_mig_active(void)
164 return !QSIMPLEQ_EMPTY(&block_mig_state
.bmds_list
);
167 uint64_t blk_mig_bytes_transferred(void)
169 BlkMigDevState
*bmds
;
173 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
174 sum
+= bmds
->completed_sectors
;
177 return sum
<< BDRV_SECTOR_BITS
;
180 uint64_t blk_mig_bytes_remaining(void)
182 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
185 uint64_t blk_mig_bytes_total(void)
187 BlkMigDevState
*bmds
;
190 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
191 sum
+= bmds
->total_sectors
;
193 return sum
<< BDRV_SECTOR_BITS
;
197 /* Called with migration lock held. */
199 static int bmds_aio_inflight(BlkMigDevState
*bmds
, int64_t sector
)
201 int64_t chunk
= sector
/ (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
;
203 if (sector
< bdrv_nb_sectors(bmds
->bs
)) {
204 return !!(bmds
->aio_bitmap
[chunk
/ (sizeof(unsigned long) * 8)] &
205 (1UL << (chunk
% (sizeof(unsigned long) * 8))));
211 /* Called with migration lock held. */
213 static void bmds_set_aio_inflight(BlkMigDevState
*bmds
, int64_t sector_num
,
214 int nb_sectors
, int set
)
217 unsigned long val
, idx
, bit
;
219 start
= sector_num
/ BDRV_SECTORS_PER_DIRTY_CHUNK
;
220 end
= (sector_num
+ nb_sectors
- 1) / BDRV_SECTORS_PER_DIRTY_CHUNK
;
222 for (; start
<= end
; start
++) {
223 idx
= start
/ (sizeof(unsigned long) * 8);
224 bit
= start
% (sizeof(unsigned long) * 8);
225 val
= bmds
->aio_bitmap
[idx
];
229 val
&= ~(1UL << bit
);
231 bmds
->aio_bitmap
[idx
] = val
;
235 static void alloc_aio_bitmap(BlkMigDevState
*bmds
)
237 BlockDriverState
*bs
= bmds
->bs
;
240 bitmap_size
= bdrv_nb_sectors(bs
) + BDRV_SECTORS_PER_DIRTY_CHUNK
* 8 - 1;
241 bitmap_size
/= BDRV_SECTORS_PER_DIRTY_CHUNK
* 8;
243 bmds
->aio_bitmap
= g_malloc0(bitmap_size
);
246 /* Never hold migration lock when yielding to the main loop! */
248 static void blk_mig_read_cb(void *opaque
, int ret
)
250 BlkMigBlock
*blk
= opaque
;
255 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.blk_list
, blk
, entry
);
256 bmds_set_aio_inflight(blk
->bmds
, blk
->sector
, blk
->nr_sectors
, 0);
258 block_mig_state
.submitted
--;
259 block_mig_state
.read_done
++;
260 assert(block_mig_state
.submitted
>= 0);
264 /* Called with no lock taken. */
266 static int mig_save_device_bulk(QEMUFile
*f
, BlkMigDevState
*bmds
)
268 int64_t total_sectors
= bmds
->total_sectors
;
269 int64_t cur_sector
= bmds
->cur_sector
;
270 BlockDriverState
*bs
= bmds
->bs
;
274 if (bmds
->shared_base
) {
275 qemu_mutex_lock_iothread();
276 aio_context_acquire(bdrv_get_aio_context(bs
));
277 while (cur_sector
< total_sectors
&&
278 !bdrv_is_allocated(bs
, cur_sector
, MAX_IS_ALLOCATED_SEARCH
,
280 cur_sector
+= nr_sectors
;
282 aio_context_release(bdrv_get_aio_context(bs
));
283 qemu_mutex_unlock_iothread();
286 if (cur_sector
>= total_sectors
) {
287 bmds
->cur_sector
= bmds
->completed_sectors
= total_sectors
;
291 bmds
->completed_sectors
= cur_sector
;
293 cur_sector
&= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
- 1);
295 /* we are going to transfer a full block even if it is not allocated */
296 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
298 if (total_sectors
- cur_sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
299 nr_sectors
= total_sectors
- cur_sector
;
302 blk
= g_new(BlkMigBlock
, 1);
303 blk
->buf
= g_malloc(BLOCK_SIZE
);
305 blk
->sector
= cur_sector
;
306 blk
->nr_sectors
= nr_sectors
;
308 blk
->iov
.iov_base
= blk
->buf
;
309 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
310 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
313 block_mig_state
.submitted
++;
316 /* We do not know if bs is under the main thread (and thus does
317 * not acquire the AioContext when doing AIO) or rather under
318 * dataplane. Thus acquire both the iothread mutex and the
321 * This is ugly and will disappear when we make bdrv_* thread-safe,
322 * without the need to acquire the AioContext.
324 qemu_mutex_lock_iothread();
325 aio_context_acquire(bdrv_get_aio_context(bmds
->bs
));
326 blk
->aiocb
= bdrv_aio_readv(bs
, cur_sector
, &blk
->qiov
,
327 nr_sectors
, blk_mig_read_cb
, blk
);
329 bdrv_reset_dirty_bitmap(bmds
->dirty_bitmap
, cur_sector
, nr_sectors
);
330 aio_context_release(bdrv_get_aio_context(bmds
->bs
));
331 qemu_mutex_unlock_iothread();
333 bmds
->cur_sector
= cur_sector
+ nr_sectors
;
334 return (bmds
->cur_sector
>= total_sectors
);
337 /* Called with iothread lock taken. */
339 static int set_dirty_tracking(void)
341 BlkMigDevState
*bmds
;
344 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
345 aio_context_acquire(bdrv_get_aio_context(bmds
->bs
));
346 bmds
->dirty_bitmap
= bdrv_create_dirty_bitmap(bmds
->bs
, BLOCK_SIZE
,
348 aio_context_release(bdrv_get_aio_context(bmds
->bs
));
349 if (!bmds
->dirty_bitmap
) {
357 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
358 if (bmds
->dirty_bitmap
) {
359 aio_context_acquire(bdrv_get_aio_context(bmds
->bs
));
360 bdrv_release_dirty_bitmap(bmds
->bs
, bmds
->dirty_bitmap
);
361 aio_context_release(bdrv_get_aio_context(bmds
->bs
));
367 /* Called with iothread lock taken. */
369 static void unset_dirty_tracking(void)
371 BlkMigDevState
*bmds
;
373 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
374 aio_context_acquire(bdrv_get_aio_context(bmds
->bs
));
375 bdrv_release_dirty_bitmap(bmds
->bs
, bmds
->dirty_bitmap
);
376 aio_context_release(bdrv_get_aio_context(bmds
->bs
));
380 static void init_blk_migration(QEMUFile
*f
)
382 BlockDriverState
*bs
;
383 BlkMigDevState
*bmds
;
386 block_mig_state
.submitted
= 0;
387 block_mig_state
.read_done
= 0;
388 block_mig_state
.transferred
= 0;
389 block_mig_state
.total_sector_sum
= 0;
390 block_mig_state
.prev_progress
= -1;
391 block_mig_state
.bulk_completed
= 0;
392 block_mig_state
.zero_blocks
= migrate_zero_blocks();
394 for (bs
= bdrv_next(NULL
); bs
; bs
= bdrv_next(bs
)) {
395 if (bdrv_is_read_only(bs
)) {
399 sectors
= bdrv_nb_sectors(bs
);
404 bmds
= g_new0(BlkMigDevState
, 1);
406 bmds
->bulk_completed
= 0;
407 bmds
->total_sectors
= sectors
;
408 bmds
->completed_sectors
= 0;
409 bmds
->shared_base
= block_mig_state
.shared_base
;
410 alloc_aio_bitmap(bmds
);
411 error_setg(&bmds
->blocker
, "block device is in use by migration");
412 bdrv_op_block_all(bs
, bmds
->blocker
);
415 block_mig_state
.total_sector_sum
+= sectors
;
417 if (bmds
->shared_base
) {
418 DPRINTF("Start migration for %s with shared base image\n",
419 bdrv_get_device_name(bs
));
421 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs
));
424 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.bmds_list
, bmds
, entry
);
428 /* Called with no lock taken. */
430 static int blk_mig_save_bulked_block(QEMUFile
*f
)
432 int64_t completed_sector_sum
= 0;
433 BlkMigDevState
*bmds
;
437 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
438 if (bmds
->bulk_completed
== 0) {
439 if (mig_save_device_bulk(f
, bmds
) == 1) {
440 /* completed bulk section for this device */
441 bmds
->bulk_completed
= 1;
443 completed_sector_sum
+= bmds
->completed_sectors
;
447 completed_sector_sum
+= bmds
->completed_sectors
;
451 if (block_mig_state
.total_sector_sum
!= 0) {
452 progress
= completed_sector_sum
* 100 /
453 block_mig_state
.total_sector_sum
;
457 if (progress
!= block_mig_state
.prev_progress
) {
458 block_mig_state
.prev_progress
= progress
;
459 qemu_put_be64(f
, (progress
<< BDRV_SECTOR_BITS
)
460 | BLK_MIG_FLAG_PROGRESS
);
461 DPRINTF("Completed %d %%\r", progress
);
467 static void blk_mig_reset_dirty_cursor(void)
469 BlkMigDevState
*bmds
;
471 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
476 /* Called with iothread lock and AioContext taken. */
478 static int mig_save_device_dirty(QEMUFile
*f
, BlkMigDevState
*bmds
,
482 int64_t total_sectors
= bmds
->total_sectors
;
487 for (sector
= bmds
->cur_dirty
; sector
< bmds
->total_sectors
;) {
489 if (bmds_aio_inflight(bmds
, sector
)) {
491 bdrv_drain(bmds
->bs
);
495 if (bdrv_get_dirty(bmds
->bs
, bmds
->dirty_bitmap
, sector
)) {
497 if (total_sectors
- sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
498 nr_sectors
= total_sectors
- sector
;
500 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
502 blk
= g_new(BlkMigBlock
, 1);
503 blk
->buf
= g_malloc(BLOCK_SIZE
);
505 blk
->sector
= sector
;
506 blk
->nr_sectors
= nr_sectors
;
509 blk
->iov
.iov_base
= blk
->buf
;
510 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
511 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
513 blk
->aiocb
= bdrv_aio_readv(bmds
->bs
, sector
, &blk
->qiov
,
514 nr_sectors
, blk_mig_read_cb
, blk
);
517 block_mig_state
.submitted
++;
518 bmds_set_aio_inflight(bmds
, sector
, nr_sectors
, 1);
521 ret
= bdrv_read(bmds
->bs
, sector
, blk
->buf
, nr_sectors
);
531 bdrv_reset_dirty_bitmap(bmds
->dirty_bitmap
, sector
, nr_sectors
);
534 sector
+= BDRV_SECTORS_PER_DIRTY_CHUNK
;
535 bmds
->cur_dirty
= sector
;
538 return (bmds
->cur_dirty
>= bmds
->total_sectors
);
541 DPRINTF("Error reading sector %" PRId64
"\n", sector
);
547 /* Called with iothread lock taken.
550 * 0: too much data for max_downtime
551 * 1: few enough data for max_downtime
553 static int blk_mig_save_dirty_block(QEMUFile
*f
, int is_async
)
555 BlkMigDevState
*bmds
;
558 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
559 aio_context_acquire(bdrv_get_aio_context(bmds
->bs
));
560 ret
= mig_save_device_dirty(f
, bmds
, is_async
);
561 aio_context_release(bdrv_get_aio_context(bmds
->bs
));
570 /* Called with no locks taken. */
572 static int flush_blks(QEMUFile
*f
)
577 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
578 __FUNCTION__
, block_mig_state
.submitted
, block_mig_state
.read_done
,
579 block_mig_state
.transferred
);
582 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
583 if (qemu_file_rate_limit(f
)) {
591 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
599 block_mig_state
.read_done
--;
600 block_mig_state
.transferred
++;
601 assert(block_mig_state
.read_done
>= 0);
605 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__
,
606 block_mig_state
.submitted
, block_mig_state
.read_done
,
607 block_mig_state
.transferred
);
611 /* Called with iothread lock taken. */
613 static int64_t get_remaining_dirty(void)
615 BlkMigDevState
*bmds
;
618 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
619 aio_context_acquire(bdrv_get_aio_context(bmds
->bs
));
620 dirty
+= bdrv_get_dirty_count(bmds
->dirty_bitmap
);
621 aio_context_release(bdrv_get_aio_context(bmds
->bs
));
624 return dirty
<< BDRV_SECTOR_BITS
;
627 /* Called with iothread lock taken. */
629 static void block_migration_cleanup(void *opaque
)
631 BlkMigDevState
*bmds
;
637 unset_dirty_tracking();
639 while ((bmds
= QSIMPLEQ_FIRST(&block_mig_state
.bmds_list
)) != NULL
) {
640 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.bmds_list
, entry
);
641 bdrv_op_unblock_all(bmds
->bs
, bmds
->blocker
);
642 error_free(bmds
->blocker
);
644 /* Save ctx, because bmds->bs can disappear during bdrv_unref. */
645 ctx
= bdrv_get_aio_context(bmds
->bs
);
646 aio_context_acquire(ctx
);
647 bdrv_unref(bmds
->bs
);
648 aio_context_release(ctx
);
650 g_free(bmds
->aio_bitmap
);
655 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
656 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
663 static int block_save_setup(QEMUFile
*f
, void *opaque
)
667 DPRINTF("Enter save live setup submitted %d transferred %d\n",
668 block_mig_state
.submitted
, block_mig_state
.transferred
);
670 qemu_mutex_lock_iothread();
671 init_blk_migration(f
);
673 /* start track dirty blocks */
674 ret
= set_dirty_tracking();
676 qemu_mutex_unlock_iothread();
683 blk_mig_reset_dirty_cursor();
684 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
689 static int block_save_iterate(QEMUFile
*f
, void *opaque
)
692 int64_t last_ftell
= qemu_ftell(f
);
695 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
696 block_mig_state
.submitted
, block_mig_state
.transferred
);
703 blk_mig_reset_dirty_cursor();
705 /* control the rate of transfer */
707 while ((block_mig_state
.submitted
+
708 block_mig_state
.read_done
) * BLOCK_SIZE
<
709 qemu_file_get_rate_limit(f
) &&
710 (block_mig_state
.submitted
+
711 block_mig_state
.read_done
) <
714 if (block_mig_state
.bulk_completed
== 0) {
715 /* first finish the bulk phase */
716 if (blk_mig_save_bulked_block(f
) == 0) {
717 /* finished saving bulk on all devices */
718 block_mig_state
.bulk_completed
= 1;
722 /* Always called with iothread lock taken for
723 * simplicity, block_save_complete also calls it.
725 qemu_mutex_lock_iothread();
726 ret
= blk_mig_save_dirty_block(f
, 1);
727 qemu_mutex_unlock_iothread();
734 /* no more dirty blocks */
745 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
746 delta_ftell
= qemu_ftell(f
) - last_ftell
;
747 if (delta_ftell
> 0) {
749 } else if (delta_ftell
< 0) {
756 /* Called with iothread lock taken. */
758 static int block_save_complete(QEMUFile
*f
, void *opaque
)
762 DPRINTF("Enter save live complete submitted %d transferred %d\n",
763 block_mig_state
.submitted
, block_mig_state
.transferred
);
770 blk_mig_reset_dirty_cursor();
772 /* we know for sure that save bulk is completed and
773 all async read completed */
775 assert(block_mig_state
.submitted
== 0);
779 ret
= blk_mig_save_dirty_block(f
, 0);
785 /* report completion */
786 qemu_put_be64(f
, (100 << BDRV_SECTOR_BITS
) | BLK_MIG_FLAG_PROGRESS
);
788 DPRINTF("Block migration completed\n");
790 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
795 static void block_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
796 uint64_t *non_postcopiable_pending
,
797 uint64_t *postcopiable_pending
)
799 /* Estimate pending number of bytes to send */
802 qemu_mutex_lock_iothread();
803 pending
= get_remaining_dirty();
804 qemu_mutex_unlock_iothread();
807 pending
+= block_mig_state
.submitted
* BLOCK_SIZE
+
808 block_mig_state
.read_done
* BLOCK_SIZE
;
811 /* Report at least one block pending during bulk phase */
812 if (pending
<= max_size
&& !block_mig_state
.bulk_completed
) {
813 pending
= max_size
+ BLOCK_SIZE
;
816 DPRINTF("Enter save live pending %" PRIu64
"\n", pending
);
817 /* We don't do postcopy */
818 *non_postcopiable_pending
+= pending
;
821 static int block_load(QEMUFile
*f
, void *opaque
, int version_id
)
823 static int banner_printed
;
825 char device_name
[256];
827 BlockDriverState
*bs
, *bs_prev
= NULL
;
829 Error
*local_err
= NULL
;
831 int64_t total_sectors
= 0;
836 addr
= qemu_get_be64(f
);
838 flags
= addr
& ~BDRV_SECTOR_MASK
;
839 addr
>>= BDRV_SECTOR_BITS
;
841 if (flags
& BLK_MIG_FLAG_DEVICE_BLOCK
) {
842 /* get device name */
843 len
= qemu_get_byte(f
);
844 qemu_get_buffer(f
, (uint8_t *)device_name
, len
);
845 device_name
[len
] = '\0';
847 blk
= blk_by_name(device_name
);
849 fprintf(stderr
, "Error unknown block device %s\n",
855 fprintf(stderr
, "Block device %s has no medium\n",
862 total_sectors
= bdrv_nb_sectors(bs
);
863 if (total_sectors
<= 0) {
864 error_report("Error getting length of block device %s",
869 bdrv_invalidate_cache(bs
, &local_err
);
871 error_report_err(local_err
);
876 if (total_sectors
- addr
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
877 nr_sectors
= total_sectors
- addr
;
879 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
882 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
883 ret
= bdrv_write_zeroes(bs
, addr
, nr_sectors
,
886 buf
= g_malloc(BLOCK_SIZE
);
887 qemu_get_buffer(f
, buf
, BLOCK_SIZE
);
888 ret
= bdrv_write(bs
, addr
, buf
, nr_sectors
);
895 } else if (flags
& BLK_MIG_FLAG_PROGRESS
) {
896 if (!banner_printed
) {
897 printf("Receiving block device images\n");
900 printf("Completed %d %%%c", (int)addr
,
901 (addr
== 100) ? '\n' : '\r');
903 } else if (!(flags
& BLK_MIG_FLAG_EOS
)) {
904 fprintf(stderr
, "Unknown block migration flags: %#x\n", flags
);
907 ret
= qemu_file_get_error(f
);
911 } while (!(flags
& BLK_MIG_FLAG_EOS
));
916 static void block_set_params(const MigrationParams
*params
, void *opaque
)
918 block_mig_state
.blk_enable
= params
->blk
;
919 block_mig_state
.shared_base
= params
->shared
;
921 /* shared base means that blk_enable = 1 */
922 block_mig_state
.blk_enable
|= params
->shared
;
925 static bool block_is_active(void *opaque
)
927 return block_mig_state
.blk_enable
== 1;
930 static SaveVMHandlers savevm_block_handlers
= {
931 .set_params
= block_set_params
,
932 .save_live_setup
= block_save_setup
,
933 .save_live_iterate
= block_save_iterate
,
934 .save_live_complete_precopy
= block_save_complete
,
935 .save_live_pending
= block_save_pending
,
936 .load_state
= block_load
,
937 .cleanup
= block_migration_cleanup
,
938 .is_active
= block_is_active
,
941 void blk_mig_init(void)
943 QSIMPLEQ_INIT(&block_mig_state
.bmds_list
);
944 QSIMPLEQ_INIT(&block_mig_state
.blk_list
);
945 qemu_mutex_init(&block_mig_state
.lock
);
947 register_savevm_live(NULL
, "block", 0, 1, &savevm_block_handlers
,