2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "qemu/cutils.h"
21 #include "qemu/queue.h"
23 #include "block/dirty-bitmap.h"
24 #include "migration/misc.h"
25 #include "migration.h"
26 #include "migration/register.h"
27 #include "qemu-file.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/block-backend.h"
32 #define BLK_MIG_BLOCK_SIZE (1ULL << 20)
33 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS)
35 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
36 #define BLK_MIG_FLAG_EOS 0x02
37 #define BLK_MIG_FLAG_PROGRESS 0x04
38 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08
40 #define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
42 #define MAX_IO_BUFFERS 512
43 #define MAX_PARALLEL_IO 16
45 /* #define DEBUG_BLK_MIGRATION */
47 #ifdef DEBUG_BLK_MIGRATION
48 #define DPRINTF(fmt, ...) \
49 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
51 #define DPRINTF(fmt, ...) \
55 typedef struct BlkMigDevState
{
56 /* Written during setup phase. Can be read without a lock. */
60 int64_t total_sectors
;
61 QSIMPLEQ_ENTRY(BlkMigDevState
) entry
;
64 /* Only used by migration thread. Does not need a lock. */
69 /* Data in the aio_bitmap is protected by block migration lock.
70 * Allocation and free happen during setup and cleanup respectively.
72 unsigned long *aio_bitmap
;
74 /* Protected by block migration lock. */
75 int64_t completed_sectors
;
77 /* During migration this is protected by iothread lock / AioContext.
78 * Allocation and free happen during setup and cleanup respectively.
80 BdrvDirtyBitmap
*dirty_bitmap
;
83 typedef struct BlkMigBlock
{
84 /* Only used by migration thread. */
92 /* Protected by block migration lock. */
94 QSIMPLEQ_ENTRY(BlkMigBlock
) entry
;
97 typedef struct BlkMigState
{
98 QSIMPLEQ_HEAD(, BlkMigDevState
) bmds_list
;
99 int64_t total_sector_sum
;
102 /* Protected by lock. */
103 QSIMPLEQ_HEAD(, BlkMigBlock
) blk_list
;
107 /* Only used by migration thread. Does not need a lock. */
112 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
116 static BlkMigState block_mig_state
;
118 static void blk_mig_lock(void)
120 qemu_mutex_lock(&block_mig_state
.lock
);
123 static void blk_mig_unlock(void)
125 qemu_mutex_unlock(&block_mig_state
.lock
);
128 /* Must run outside of the iothread lock during the bulk phase,
129 * or the VM will stall.
132 static void blk_send(QEMUFile
*f
, BlkMigBlock
* blk
)
135 uint64_t flags
= BLK_MIG_FLAG_DEVICE_BLOCK
;
137 if (block_mig_state
.zero_blocks
&&
138 buffer_is_zero(blk
->buf
, BLK_MIG_BLOCK_SIZE
)) {
139 flags
|= BLK_MIG_FLAG_ZERO_BLOCK
;
142 /* sector number and flags */
143 qemu_put_be64(f
, (blk
->sector
<< BDRV_SECTOR_BITS
)
147 len
= strlen(blk
->bmds
->blk_name
);
148 qemu_put_byte(f
, len
);
149 qemu_put_buffer(f
, (uint8_t *) blk
->bmds
->blk_name
, len
);
151 /* if a block is zero we need to flush here since the network
152 * bandwidth is now a lot higher than the storage device bandwidth.
153 * thus if we queue zero blocks we slow down the migration */
154 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
159 qemu_put_buffer(f
, blk
->buf
, BLK_MIG_BLOCK_SIZE
);
162 int blk_mig_active(void)
164 return !QSIMPLEQ_EMPTY(&block_mig_state
.bmds_list
);
167 int blk_mig_bulk_active(void)
169 return blk_mig_active() && !block_mig_state
.bulk_completed
;
172 uint64_t blk_mig_bytes_transferred(void)
174 BlkMigDevState
*bmds
;
178 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
179 sum
+= bmds
->completed_sectors
;
182 return sum
<< BDRV_SECTOR_BITS
;
185 uint64_t blk_mig_bytes_remaining(void)
187 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
190 uint64_t blk_mig_bytes_total(void)
192 BlkMigDevState
*bmds
;
195 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
196 sum
+= bmds
->total_sectors
;
198 return sum
<< BDRV_SECTOR_BITS
;
202 /* Called with migration lock held. */
204 static int bmds_aio_inflight(BlkMigDevState
*bmds
, int64_t sector
)
206 int64_t chunk
= sector
/ (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
;
208 if (sector
< blk_nb_sectors(bmds
->blk
)) {
209 return !!(bmds
->aio_bitmap
[chunk
/ (sizeof(unsigned long) * 8)] &
210 (1UL << (chunk
% (sizeof(unsigned long) * 8))));
216 /* Called with migration lock held. */
218 static void bmds_set_aio_inflight(BlkMigDevState
*bmds
, int64_t sector_num
,
219 int nb_sectors
, int set
)
222 unsigned long val
, idx
, bit
;
224 start
= sector_num
/ BDRV_SECTORS_PER_DIRTY_CHUNK
;
225 end
= (sector_num
+ nb_sectors
- 1) / BDRV_SECTORS_PER_DIRTY_CHUNK
;
227 for (; start
<= end
; start
++) {
228 idx
= start
/ (sizeof(unsigned long) * 8);
229 bit
= start
% (sizeof(unsigned long) * 8);
230 val
= bmds
->aio_bitmap
[idx
];
234 val
&= ~(1UL << bit
);
236 bmds
->aio_bitmap
[idx
] = val
;
240 static void alloc_aio_bitmap(BlkMigDevState
*bmds
)
242 BlockBackend
*bb
= bmds
->blk
;
245 bitmap_size
= blk_nb_sectors(bb
) + BDRV_SECTORS_PER_DIRTY_CHUNK
* 8 - 1;
246 bitmap_size
/= BDRV_SECTORS_PER_DIRTY_CHUNK
* 8;
248 bmds
->aio_bitmap
= g_malloc0(bitmap_size
);
251 /* Never hold migration lock when yielding to the main loop! */
253 static void blk_mig_read_cb(void *opaque
, int ret
)
255 BlkMigBlock
*blk
= opaque
;
260 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.blk_list
, blk
, entry
);
261 bmds_set_aio_inflight(blk
->bmds
, blk
->sector
, blk
->nr_sectors
, 0);
263 block_mig_state
.submitted
--;
264 block_mig_state
.read_done
++;
265 assert(block_mig_state
.submitted
>= 0);
269 /* Called with no lock taken. */
271 static int mig_save_device_bulk(QEMUFile
*f
, BlkMigDevState
*bmds
)
273 int64_t total_sectors
= bmds
->total_sectors
;
274 int64_t cur_sector
= bmds
->cur_sector
;
275 BlockBackend
*bb
= bmds
->blk
;
280 if (bmds
->shared_base
) {
281 qemu_mutex_lock_iothread();
282 aio_context_acquire(blk_get_aio_context(bb
));
283 /* Skip unallocated sectors; intentionally treats failure or
284 * partial sector as an allocated sector */
285 while (cur_sector
< total_sectors
&&
286 !bdrv_is_allocated(blk_bs(bb
), cur_sector
* BDRV_SECTOR_SIZE
,
287 MAX_IS_ALLOCATED_SEARCH
, &count
)) {
288 if (count
< BDRV_SECTOR_SIZE
) {
291 cur_sector
+= count
>> BDRV_SECTOR_BITS
;
293 aio_context_release(blk_get_aio_context(bb
));
294 qemu_mutex_unlock_iothread();
297 if (cur_sector
>= total_sectors
) {
298 bmds
->cur_sector
= bmds
->completed_sectors
= total_sectors
;
302 bmds
->completed_sectors
= cur_sector
;
304 cur_sector
&= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
- 1);
306 /* we are going to transfer a full block even if it is not allocated */
307 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
309 if (total_sectors
- cur_sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
310 nr_sectors
= total_sectors
- cur_sector
;
313 blk
= g_new(BlkMigBlock
, 1);
314 blk
->buf
= g_malloc(BLK_MIG_BLOCK_SIZE
);
316 blk
->sector
= cur_sector
;
317 blk
->nr_sectors
= nr_sectors
;
319 qemu_iovec_init_buf(&blk
->qiov
, blk
->buf
, nr_sectors
* BDRV_SECTOR_SIZE
);
322 block_mig_state
.submitted
++;
325 /* We do not know if bs is under the main thread (and thus does
326 * not acquire the AioContext when doing AIO) or rather under
327 * dataplane. Thus acquire both the iothread mutex and the
330 * This is ugly and will disappear when we make bdrv_* thread-safe,
331 * without the need to acquire the AioContext.
333 qemu_mutex_lock_iothread();
334 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
335 bdrv_reset_dirty_bitmap(bmds
->dirty_bitmap
, cur_sector
* BDRV_SECTOR_SIZE
,
336 nr_sectors
* BDRV_SECTOR_SIZE
);
337 blk
->aiocb
= blk_aio_preadv(bb
, cur_sector
* BDRV_SECTOR_SIZE
, &blk
->qiov
,
338 0, blk_mig_read_cb
, blk
);
339 aio_context_release(blk_get_aio_context(bmds
->blk
));
340 qemu_mutex_unlock_iothread();
342 bmds
->cur_sector
= cur_sector
+ nr_sectors
;
343 return (bmds
->cur_sector
>= total_sectors
);
346 /* Called with iothread lock taken. */
348 static int set_dirty_tracking(void)
350 BlkMigDevState
*bmds
;
353 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
354 bmds
->dirty_bitmap
= bdrv_create_dirty_bitmap(blk_bs(bmds
->blk
),
357 if (!bmds
->dirty_bitmap
) {
365 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
366 if (bmds
->dirty_bitmap
) {
367 bdrv_release_dirty_bitmap(bmds
->dirty_bitmap
);
373 /* Called with iothread lock taken. */
375 static void unset_dirty_tracking(void)
377 BlkMigDevState
*bmds
;
379 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
380 bdrv_release_dirty_bitmap(bmds
->dirty_bitmap
);
384 static int init_blk_migration(QEMUFile
*f
)
386 BlockDriverState
*bs
;
387 BlkMigDevState
*bmds
;
392 BlkMigDevState
*bmds
;
393 BlockDriverState
*bs
;
395 Error
*local_err
= NULL
;
398 block_mig_state
.submitted
= 0;
399 block_mig_state
.read_done
= 0;
400 block_mig_state
.transferred
= 0;
401 block_mig_state
.total_sector_sum
= 0;
402 block_mig_state
.prev_progress
= -1;
403 block_mig_state
.bulk_completed
= 0;
404 block_mig_state
.zero_blocks
= migrate_zero_blocks();
406 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
409 bmds_bs
= g_malloc0(num_bs
* sizeof(*bmds_bs
));
411 for (i
= 0, bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
), i
++) {
412 if (bdrv_is_read_only(bs
)) {
416 sectors
= bdrv_nb_sectors(bs
);
419 bdrv_next_cleanup(&it
);
423 bmds
= g_new0(BlkMigDevState
, 1);
424 bmds
->blk
= blk_new(qemu_get_aio_context(),
425 BLK_PERM_CONSISTENT_READ
, BLK_PERM_ALL
);
426 bmds
->blk_name
= g_strdup(bdrv_get_device_name(bs
));
427 bmds
->bulk_completed
= 0;
428 bmds
->total_sectors
= sectors
;
429 bmds
->completed_sectors
= 0;
430 bmds
->shared_base
= migrate_use_block_incremental();
433 bmds_bs
[i
].bmds
= bmds
;
436 block_mig_state
.total_sector_sum
+= sectors
;
438 if (bmds
->shared_base
) {
439 trace_migration_block_init_shared(bdrv_get_device_name(bs
));
441 trace_migration_block_init_full(bdrv_get_device_name(bs
));
444 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.bmds_list
, bmds
, entry
);
447 /* Can only insert new BDSes now because doing so while iterating block
448 * devices may end up in a deadlock (iterating the new BDSes, too). */
449 for (i
= 0; i
< num_bs
; i
++) {
450 BlkMigDevState
*bmds
= bmds_bs
[i
].bmds
;
451 BlockDriverState
*bs
= bmds_bs
[i
].bs
;
454 ret
= blk_insert_bs(bmds
->blk
, bs
, &local_err
);
456 error_report_err(local_err
);
460 alloc_aio_bitmap(bmds
);
461 error_setg(&bmds
->blocker
, "block device is in use by migration");
462 bdrv_op_block_all(bs
, bmds
->blocker
);
472 /* Called with no lock taken. */
474 static int blk_mig_save_bulked_block(QEMUFile
*f
)
476 int64_t completed_sector_sum
= 0;
477 BlkMigDevState
*bmds
;
481 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
482 if (bmds
->bulk_completed
== 0) {
483 if (mig_save_device_bulk(f
, bmds
) == 1) {
484 /* completed bulk section for this device */
485 bmds
->bulk_completed
= 1;
487 completed_sector_sum
+= bmds
->completed_sectors
;
491 completed_sector_sum
+= bmds
->completed_sectors
;
495 if (block_mig_state
.total_sector_sum
!= 0) {
496 progress
= completed_sector_sum
* 100 /
497 block_mig_state
.total_sector_sum
;
501 if (progress
!= block_mig_state
.prev_progress
) {
502 block_mig_state
.prev_progress
= progress
;
503 qemu_put_be64(f
, (progress
<< BDRV_SECTOR_BITS
)
504 | BLK_MIG_FLAG_PROGRESS
);
505 DPRINTF("Completed %d %%\r", progress
);
511 static void blk_mig_reset_dirty_cursor(void)
513 BlkMigDevState
*bmds
;
515 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
520 /* Called with iothread lock and AioContext taken. */
522 static int mig_save_device_dirty(QEMUFile
*f
, BlkMigDevState
*bmds
,
526 int64_t total_sectors
= bmds
->total_sectors
;
531 for (sector
= bmds
->cur_dirty
; sector
< bmds
->total_sectors
;) {
533 if (bmds_aio_inflight(bmds
, sector
)) {
535 blk_drain(bmds
->blk
);
539 bdrv_dirty_bitmap_lock(bmds
->dirty_bitmap
);
540 if (bdrv_dirty_bitmap_get_locked(bmds
->dirty_bitmap
,
541 sector
* BDRV_SECTOR_SIZE
)) {
542 if (total_sectors
- sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
543 nr_sectors
= total_sectors
- sector
;
545 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
547 bdrv_reset_dirty_bitmap_locked(bmds
->dirty_bitmap
,
548 sector
* BDRV_SECTOR_SIZE
,
549 nr_sectors
* BDRV_SECTOR_SIZE
);
550 bdrv_dirty_bitmap_unlock(bmds
->dirty_bitmap
);
552 blk
= g_new(BlkMigBlock
, 1);
553 blk
->buf
= g_malloc(BLK_MIG_BLOCK_SIZE
);
555 blk
->sector
= sector
;
556 blk
->nr_sectors
= nr_sectors
;
559 qemu_iovec_init_buf(&blk
->qiov
, blk
->buf
,
560 nr_sectors
* BDRV_SECTOR_SIZE
);
562 blk
->aiocb
= blk_aio_preadv(bmds
->blk
,
563 sector
* BDRV_SECTOR_SIZE
,
564 &blk
->qiov
, 0, blk_mig_read_cb
,
568 block_mig_state
.submitted
++;
569 bmds_set_aio_inflight(bmds
, sector
, nr_sectors
, 1);
572 ret
= blk_pread(bmds
->blk
, sector
* BDRV_SECTOR_SIZE
,
573 nr_sectors
* BDRV_SECTOR_SIZE
, blk
->buf
, 0);
583 sector
+= nr_sectors
;
584 bmds
->cur_dirty
= sector
;
588 bdrv_dirty_bitmap_unlock(bmds
->dirty_bitmap
);
589 sector
+= BDRV_SECTORS_PER_DIRTY_CHUNK
;
590 bmds
->cur_dirty
= sector
;
593 return (bmds
->cur_dirty
>= bmds
->total_sectors
);
596 trace_migration_block_save_device_dirty(sector
);
602 /* Called with iothread lock taken.
605 * 0: too much data for max_downtime
606 * 1: few enough data for max_downtime
608 static int blk_mig_save_dirty_block(QEMUFile
*f
, int is_async
)
610 BlkMigDevState
*bmds
;
613 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
614 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
615 ret
= mig_save_device_dirty(f
, bmds
, is_async
);
616 aio_context_release(blk_get_aio_context(bmds
->blk
));
625 /* Called with no locks taken. */
627 static int flush_blks(QEMUFile
*f
)
632 trace_migration_block_flush_blks("Enter", block_mig_state
.submitted
,
633 block_mig_state
.read_done
,
634 block_mig_state
.transferred
);
637 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
638 if (qemu_file_rate_limit(f
)) {
646 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
654 block_mig_state
.read_done
--;
655 block_mig_state
.transferred
++;
656 assert(block_mig_state
.read_done
>= 0);
660 trace_migration_block_flush_blks("Exit", block_mig_state
.submitted
,
661 block_mig_state
.read_done
,
662 block_mig_state
.transferred
);
666 /* Called with iothread lock taken. */
668 static int64_t get_remaining_dirty(void)
670 BlkMigDevState
*bmds
;
673 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
674 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
675 dirty
+= bdrv_get_dirty_count(bmds
->dirty_bitmap
);
676 aio_context_release(blk_get_aio_context(bmds
->blk
));
684 /* Called with iothread lock taken. */
685 static void block_migration_cleanup_bmds(void)
687 BlkMigDevState
*bmds
;
690 unset_dirty_tracking();
692 while ((bmds
= QSIMPLEQ_FIRST(&block_mig_state
.bmds_list
)) != NULL
) {
693 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.bmds_list
, entry
);
694 bdrv_op_unblock_all(blk_bs(bmds
->blk
), bmds
->blocker
);
695 error_free(bmds
->blocker
);
697 /* Save ctx, because bmds->blk can disappear during blk_unref. */
698 ctx
= blk_get_aio_context(bmds
->blk
);
699 aio_context_acquire(ctx
);
700 blk_unref(bmds
->blk
);
701 aio_context_release(ctx
);
703 g_free(bmds
->blk_name
);
704 g_free(bmds
->aio_bitmap
);
709 /* Called with iothread lock taken. */
710 static void block_migration_cleanup(void *opaque
)
716 block_migration_cleanup_bmds();
719 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
720 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
727 static int block_save_setup(QEMUFile
*f
, void *opaque
)
731 trace_migration_block_save("setup", block_mig_state
.submitted
,
732 block_mig_state
.transferred
);
734 qemu_mutex_lock_iothread();
735 ret
= init_blk_migration(f
);
737 qemu_mutex_unlock_iothread();
741 /* start track dirty blocks */
742 ret
= set_dirty_tracking();
744 qemu_mutex_unlock_iothread();
751 blk_mig_reset_dirty_cursor();
752 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
757 static int block_save_iterate(QEMUFile
*f
, void *opaque
)
760 int64_t last_bytes
= qemu_file_total_transferred(f
);
763 trace_migration_block_save("iterate", block_mig_state
.submitted
,
764 block_mig_state
.transferred
);
771 blk_mig_reset_dirty_cursor();
773 /* control the rate of transfer */
775 while (block_mig_state
.read_done
* BLK_MIG_BLOCK_SIZE
<
776 qemu_file_get_rate_limit(f
) &&
777 block_mig_state
.submitted
< MAX_PARALLEL_IO
&&
778 (block_mig_state
.submitted
+ block_mig_state
.read_done
) <
781 if (block_mig_state
.bulk_completed
== 0) {
782 /* first finish the bulk phase */
783 if (blk_mig_save_bulked_block(f
) == 0) {
784 /* finished saving bulk on all devices */
785 block_mig_state
.bulk_completed
= 1;
789 /* Always called with iothread lock taken for
790 * simplicity, block_save_complete also calls it.
792 qemu_mutex_lock_iothread();
793 ret
= blk_mig_save_dirty_block(f
, 1);
794 qemu_mutex_unlock_iothread();
801 /* no more dirty blocks */
812 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
813 delta_bytes
= qemu_file_total_transferred(f
) - last_bytes
;
814 if (delta_bytes
> 0) {
816 } else if (delta_bytes
< 0) {
823 /* Called with iothread lock taken. */
825 static int block_save_complete(QEMUFile
*f
, void *opaque
)
829 trace_migration_block_save("complete", block_mig_state
.submitted
,
830 block_mig_state
.transferred
);
837 blk_mig_reset_dirty_cursor();
839 /* we know for sure that save bulk is completed and
840 all async read completed */
842 assert(block_mig_state
.submitted
== 0);
846 ret
= blk_mig_save_dirty_block(f
, 0);
852 /* report completion */
853 qemu_put_be64(f
, (100 << BDRV_SECTOR_BITS
) | BLK_MIG_FLAG_PROGRESS
);
855 trace_migration_block_save_complete();
857 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
859 /* Make sure that our BlockBackends are gone, so that the block driver
860 * nodes can be inactivated. */
861 block_migration_cleanup_bmds();
866 static void block_state_pending(void *opaque
,
867 uint64_t *res_precopy_only
,
868 uint64_t *res_compatible
,
869 uint64_t *res_postcopy_only
)
871 /* Estimate pending number of bytes to send */
874 qemu_mutex_lock_iothread();
875 pending
= get_remaining_dirty();
876 qemu_mutex_unlock_iothread();
879 pending
+= block_mig_state
.submitted
* BLK_MIG_BLOCK_SIZE
+
880 block_mig_state
.read_done
* BLK_MIG_BLOCK_SIZE
;
883 /* Report at least one block pending during bulk phase */
884 if (!pending
&& !block_mig_state
.bulk_completed
) {
885 pending
= BLK_MIG_BLOCK_SIZE
;
888 trace_migration_block_state_pending(pending
);
889 /* We don't do postcopy */
890 *res_precopy_only
+= pending
;
893 static int block_load(QEMUFile
*f
, void *opaque
, int version_id
)
895 static int banner_printed
;
897 char device_name
[256];
899 BlockBackend
*blk
, *blk_prev
= NULL
;
900 Error
*local_err
= NULL
;
902 int64_t total_sectors
= 0;
906 int cluster_size
= BLK_MIG_BLOCK_SIZE
;
909 addr
= qemu_get_be64(f
);
911 flags
= addr
& (BDRV_SECTOR_SIZE
- 1);
912 addr
>>= BDRV_SECTOR_BITS
;
914 if (flags
& BLK_MIG_FLAG_DEVICE_BLOCK
) {
915 /* get device name */
916 len
= qemu_get_byte(f
);
917 qemu_get_buffer(f
, (uint8_t *)device_name
, len
);
918 device_name
[len
] = '\0';
920 blk
= blk_by_name(device_name
);
922 fprintf(stderr
, "Error unknown block device %s\n",
927 if (blk
!= blk_prev
) {
929 total_sectors
= blk_nb_sectors(blk
);
930 if (total_sectors
<= 0) {
931 error_report("Error getting length of block device %s",
936 blk_activate(blk
, &local_err
);
938 error_report_err(local_err
);
942 ret
= bdrv_get_info(blk_bs(blk
), &bdi
);
943 if (ret
== 0 && bdi
.cluster_size
> 0 &&
944 bdi
.cluster_size
<= BLK_MIG_BLOCK_SIZE
&&
945 BLK_MIG_BLOCK_SIZE
% bdi
.cluster_size
== 0) {
946 cluster_size
= bdi
.cluster_size
;
948 cluster_size
= BLK_MIG_BLOCK_SIZE
;
952 if (total_sectors
- addr
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
953 nr_sectors
= total_sectors
- addr
;
955 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
958 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
959 ret
= blk_pwrite_zeroes(blk
, addr
* BDRV_SECTOR_SIZE
,
960 nr_sectors
* BDRV_SECTOR_SIZE
,
967 buf
= g_malloc(BLK_MIG_BLOCK_SIZE
);
968 qemu_get_buffer(f
, buf
, BLK_MIG_BLOCK_SIZE
);
969 for (i
= 0; i
< BLK_MIG_BLOCK_SIZE
/ cluster_size
; i
++) {
970 cur_addr
= addr
* BDRV_SECTOR_SIZE
+ i
* cluster_size
;
971 cur_buf
= buf
+ i
* cluster_size
;
973 if ((!block_mig_state
.zero_blocks
||
974 cluster_size
< BLK_MIG_BLOCK_SIZE
) &&
975 buffer_is_zero(cur_buf
, cluster_size
)) {
976 ret
= blk_pwrite_zeroes(blk
, cur_addr
,
980 ret
= blk_pwrite(blk
, cur_addr
, cluster_size
, cur_buf
,
993 } else if (flags
& BLK_MIG_FLAG_PROGRESS
) {
994 if (!banner_printed
) {
995 printf("Receiving block device images\n");
998 printf("Completed %d %%%c", (int)addr
,
999 (addr
== 100) ? '\n' : '\r');
1001 } else if (!(flags
& BLK_MIG_FLAG_EOS
)) {
1002 fprintf(stderr
, "Unknown block migration flags: 0x%x\n", flags
);
1005 ret
= qemu_file_get_error(f
);
1009 } while (!(flags
& BLK_MIG_FLAG_EOS
));
1014 static bool block_is_active(void *opaque
)
1016 return migrate_use_block();
1019 static SaveVMHandlers savevm_block_handlers
= {
1020 .save_setup
= block_save_setup
,
1021 .save_live_iterate
= block_save_iterate
,
1022 .save_live_complete_precopy
= block_save_complete
,
1023 .state_pending_exact
= block_state_pending
,
1024 .state_pending_estimate
= block_state_pending
,
1025 .load_state
= block_load
,
1026 .save_cleanup
= block_migration_cleanup
,
1027 .is_active
= block_is_active
,
1030 void blk_mig_init(void)
1032 QSIMPLEQ_INIT(&block_mig_state
.bmds_list
);
1033 QSIMPLEQ_INIT(&block_mig_state
.blk_list
);
1034 qemu_mutex_init(&block_mig_state
.lock
);
1036 register_savevm_live("block", 0, 1, &savevm_block_handlers
,