2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "block/block.h"
20 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
23 #include "qemu/cutils.h"
24 #include "qemu/queue.h"
25 #include "qemu/timer.h"
26 #include "migration/block.h"
27 #include "migration/migration.h"
28 #include "sysemu/blockdev.h"
29 #include "sysemu/block-backend.h"
31 #define BLOCK_SIZE (1 << 20)
32 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
34 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
35 #define BLK_MIG_FLAG_EOS 0x02
36 #define BLK_MIG_FLAG_PROGRESS 0x04
37 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08
39 #define MAX_IS_ALLOCATED_SEARCH 65536
41 #define MAX_INFLIGHT_IO 512
43 //#define DEBUG_BLK_MIGRATION
45 #ifdef DEBUG_BLK_MIGRATION
46 #define DPRINTF(fmt, ...) \
47 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
49 #define DPRINTF(fmt, ...) \
53 typedef struct BlkMigDevState
{
54 /* Written during setup phase. Can be read without a lock. */
58 int64_t total_sectors
;
59 QSIMPLEQ_ENTRY(BlkMigDevState
) entry
;
62 /* Only used by migration thread. Does not need a lock. */
67 /* Data in the aio_bitmap is protected by block migration lock.
68 * Allocation and free happen during setup and cleanup respectively.
70 unsigned long *aio_bitmap
;
72 /* Protected by block migration lock. */
73 int64_t completed_sectors
;
75 /* During migration this is protected by iothread lock / AioContext.
76 * Allocation and free happen during setup and cleanup respectively.
78 BdrvDirtyBitmap
*dirty_bitmap
;
81 typedef struct BlkMigBlock
{
82 /* Only used by migration thread. */
91 /* Protected by block migration lock. */
93 QSIMPLEQ_ENTRY(BlkMigBlock
) entry
;
96 typedef struct BlkMigState
{
97 /* Written during setup phase. Can be read without a lock. */
100 QSIMPLEQ_HEAD(bmds_list
, BlkMigDevState
) bmds_list
;
101 int64_t total_sector_sum
;
104 /* Protected by lock. */
105 QSIMPLEQ_HEAD(blk_list
, BlkMigBlock
) blk_list
;
109 /* Only used by migration thread. Does not need a lock. */
114 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
118 static BlkMigState block_mig_state
;
120 static void blk_mig_lock(void)
122 qemu_mutex_lock(&block_mig_state
.lock
);
125 static void blk_mig_unlock(void)
127 qemu_mutex_unlock(&block_mig_state
.lock
);
130 /* Must run outside of the iothread lock during the bulk phase,
131 * or the VM will stall.
134 static void blk_send(QEMUFile
*f
, BlkMigBlock
* blk
)
137 uint64_t flags
= BLK_MIG_FLAG_DEVICE_BLOCK
;
139 if (block_mig_state
.zero_blocks
&&
140 buffer_is_zero(blk
->buf
, BLOCK_SIZE
)) {
141 flags
|= BLK_MIG_FLAG_ZERO_BLOCK
;
144 /* sector number and flags */
145 qemu_put_be64(f
, (blk
->sector
<< BDRV_SECTOR_BITS
)
149 len
= strlen(blk
->bmds
->blk_name
);
150 qemu_put_byte(f
, len
);
151 qemu_put_buffer(f
, (uint8_t *) blk
->bmds
->blk_name
, len
);
153 /* if a block is zero we need to flush here since the network
154 * bandwidth is now a lot higher than the storage device bandwidth.
155 * thus if we queue zero blocks we slow down the migration */
156 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
161 qemu_put_buffer(f
, blk
->buf
, BLOCK_SIZE
);
164 int blk_mig_active(void)
166 return !QSIMPLEQ_EMPTY(&block_mig_state
.bmds_list
);
169 uint64_t blk_mig_bytes_transferred(void)
171 BlkMigDevState
*bmds
;
175 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
176 sum
+= bmds
->completed_sectors
;
179 return sum
<< BDRV_SECTOR_BITS
;
182 uint64_t blk_mig_bytes_remaining(void)
184 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
187 uint64_t blk_mig_bytes_total(void)
189 BlkMigDevState
*bmds
;
192 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
193 sum
+= bmds
->total_sectors
;
195 return sum
<< BDRV_SECTOR_BITS
;
199 /* Called with migration lock held. */
201 static int bmds_aio_inflight(BlkMigDevState
*bmds
, int64_t sector
)
203 int64_t chunk
= sector
/ (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
;
205 if (sector
< blk_nb_sectors(bmds
->blk
)) {
206 return !!(bmds
->aio_bitmap
[chunk
/ (sizeof(unsigned long) * 8)] &
207 (1UL << (chunk
% (sizeof(unsigned long) * 8))));
213 /* Called with migration lock held. */
215 static void bmds_set_aio_inflight(BlkMigDevState
*bmds
, int64_t sector_num
,
216 int nb_sectors
, int set
)
219 unsigned long val
, idx
, bit
;
221 start
= sector_num
/ BDRV_SECTORS_PER_DIRTY_CHUNK
;
222 end
= (sector_num
+ nb_sectors
- 1) / BDRV_SECTORS_PER_DIRTY_CHUNK
;
224 for (; start
<= end
; start
++) {
225 idx
= start
/ (sizeof(unsigned long) * 8);
226 bit
= start
% (sizeof(unsigned long) * 8);
227 val
= bmds
->aio_bitmap
[idx
];
231 val
&= ~(1UL << bit
);
233 bmds
->aio_bitmap
[idx
] = val
;
237 static void alloc_aio_bitmap(BlkMigDevState
*bmds
)
239 BlockBackend
*bb
= bmds
->blk
;
242 bitmap_size
= blk_nb_sectors(bb
) + BDRV_SECTORS_PER_DIRTY_CHUNK
* 8 - 1;
243 bitmap_size
/= BDRV_SECTORS_PER_DIRTY_CHUNK
* 8;
245 bmds
->aio_bitmap
= g_malloc0(bitmap_size
);
248 /* Never hold migration lock when yielding to the main loop! */
250 static void blk_mig_read_cb(void *opaque
, int ret
)
252 BlkMigBlock
*blk
= opaque
;
257 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.blk_list
, blk
, entry
);
258 bmds_set_aio_inflight(blk
->bmds
, blk
->sector
, blk
->nr_sectors
, 0);
260 block_mig_state
.submitted
--;
261 block_mig_state
.read_done
++;
262 assert(block_mig_state
.submitted
>= 0);
266 /* Called with no lock taken. */
268 static int mig_save_device_bulk(QEMUFile
*f
, BlkMigDevState
*bmds
)
270 int64_t total_sectors
= bmds
->total_sectors
;
271 int64_t cur_sector
= bmds
->cur_sector
;
272 BlockBackend
*bb
= bmds
->blk
;
276 if (bmds
->shared_base
) {
277 qemu_mutex_lock_iothread();
278 aio_context_acquire(blk_get_aio_context(bb
));
279 while (cur_sector
< total_sectors
&&
280 !bdrv_is_allocated(blk_bs(bb
), cur_sector
,
281 MAX_IS_ALLOCATED_SEARCH
, &nr_sectors
)) {
282 cur_sector
+= nr_sectors
;
284 aio_context_release(blk_get_aio_context(bb
));
285 qemu_mutex_unlock_iothread();
288 if (cur_sector
>= total_sectors
) {
289 bmds
->cur_sector
= bmds
->completed_sectors
= total_sectors
;
293 bmds
->completed_sectors
= cur_sector
;
295 cur_sector
&= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
- 1);
297 /* we are going to transfer a full block even if it is not allocated */
298 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
300 if (total_sectors
- cur_sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
301 nr_sectors
= total_sectors
- cur_sector
;
304 blk
= g_new(BlkMigBlock
, 1);
305 blk
->buf
= g_malloc(BLOCK_SIZE
);
307 blk
->sector
= cur_sector
;
308 blk
->nr_sectors
= nr_sectors
;
310 blk
->iov
.iov_base
= blk
->buf
;
311 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
312 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
315 block_mig_state
.submitted
++;
318 /* We do not know if bs is under the main thread (and thus does
319 * not acquire the AioContext when doing AIO) or rather under
320 * dataplane. Thus acquire both the iothread mutex and the
323 * This is ugly and will disappear when we make bdrv_* thread-safe,
324 * without the need to acquire the AioContext.
326 qemu_mutex_lock_iothread();
327 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
328 blk
->aiocb
= blk_aio_preadv(bb
, cur_sector
* BDRV_SECTOR_SIZE
, &blk
->qiov
,
329 0, blk_mig_read_cb
, blk
);
331 bdrv_reset_dirty_bitmap(bmds
->dirty_bitmap
, cur_sector
, nr_sectors
);
332 aio_context_release(blk_get_aio_context(bmds
->blk
));
333 qemu_mutex_unlock_iothread();
335 bmds
->cur_sector
= cur_sector
+ nr_sectors
;
336 return (bmds
->cur_sector
>= total_sectors
);
339 /* Called with iothread lock taken. */
341 static int set_dirty_tracking(void)
343 BlkMigDevState
*bmds
;
346 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
347 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
348 bmds
->dirty_bitmap
= bdrv_create_dirty_bitmap(blk_bs(bmds
->blk
),
349 BLOCK_SIZE
, NULL
, NULL
);
350 aio_context_release(blk_get_aio_context(bmds
->blk
));
351 if (!bmds
->dirty_bitmap
) {
359 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
360 if (bmds
->dirty_bitmap
) {
361 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
362 bdrv_release_dirty_bitmap(blk_bs(bmds
->blk
), bmds
->dirty_bitmap
);
363 aio_context_release(blk_get_aio_context(bmds
->blk
));
369 /* Called with iothread lock taken. */
371 static void unset_dirty_tracking(void)
373 BlkMigDevState
*bmds
;
375 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
376 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
377 bdrv_release_dirty_bitmap(blk_bs(bmds
->blk
), bmds
->dirty_bitmap
);
378 aio_context_release(blk_get_aio_context(bmds
->blk
));
382 static void init_blk_migration(QEMUFile
*f
)
384 BlockDriverState
*bs
;
385 BlkMigDevState
*bmds
;
390 BlkMigDevState
*bmds
;
391 BlockDriverState
*bs
;
394 block_mig_state
.submitted
= 0;
395 block_mig_state
.read_done
= 0;
396 block_mig_state
.transferred
= 0;
397 block_mig_state
.total_sector_sum
= 0;
398 block_mig_state
.prev_progress
= -1;
399 block_mig_state
.bulk_completed
= 0;
400 block_mig_state
.zero_blocks
= migrate_zero_blocks();
402 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
405 bmds_bs
= g_malloc0(num_bs
* sizeof(*bmds_bs
));
407 for (i
= 0, bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
), i
++) {
408 if (bdrv_is_read_only(bs
)) {
412 sectors
= bdrv_nb_sectors(bs
);
417 bmds
= g_new0(BlkMigDevState
, 1);
418 /* FIXME Use real permissions */
419 bmds
->blk
= blk_new(0, BLK_PERM_ALL
);
420 bmds
->blk_name
= g_strdup(bdrv_get_device_name(bs
));
421 bmds
->bulk_completed
= 0;
422 bmds
->total_sectors
= sectors
;
423 bmds
->completed_sectors
= 0;
424 bmds
->shared_base
= block_mig_state
.shared_base
;
427 bmds_bs
[i
].bmds
= bmds
;
430 block_mig_state
.total_sector_sum
+= sectors
;
432 if (bmds
->shared_base
) {
433 DPRINTF("Start migration for %s with shared base image\n",
434 bdrv_get_device_name(bs
));
436 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs
));
439 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.bmds_list
, bmds
, entry
);
442 /* Can only insert new BDSes now because doing so while iterating block
443 * devices may end up in a deadlock (iterating the new BDSes, too). */
444 for (i
= 0; i
< num_bs
; i
++) {
445 BlkMigDevState
*bmds
= bmds_bs
[i
].bmds
;
446 BlockDriverState
*bs
= bmds_bs
[i
].bs
;
449 blk_insert_bs(bmds
->blk
, bs
, &error_abort
);
451 alloc_aio_bitmap(bmds
);
452 error_setg(&bmds
->blocker
, "block device is in use by migration");
453 bdrv_op_block_all(bs
, bmds
->blocker
);
461 /* Called with no lock taken. */
463 static int blk_mig_save_bulked_block(QEMUFile
*f
)
465 int64_t completed_sector_sum
= 0;
466 BlkMigDevState
*bmds
;
470 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
471 if (bmds
->bulk_completed
== 0) {
472 if (mig_save_device_bulk(f
, bmds
) == 1) {
473 /* completed bulk section for this device */
474 bmds
->bulk_completed
= 1;
476 completed_sector_sum
+= bmds
->completed_sectors
;
480 completed_sector_sum
+= bmds
->completed_sectors
;
484 if (block_mig_state
.total_sector_sum
!= 0) {
485 progress
= completed_sector_sum
* 100 /
486 block_mig_state
.total_sector_sum
;
490 if (progress
!= block_mig_state
.prev_progress
) {
491 block_mig_state
.prev_progress
= progress
;
492 qemu_put_be64(f
, (progress
<< BDRV_SECTOR_BITS
)
493 | BLK_MIG_FLAG_PROGRESS
);
494 DPRINTF("Completed %d %%\r", progress
);
500 static void blk_mig_reset_dirty_cursor(void)
502 BlkMigDevState
*bmds
;
504 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
509 /* Called with iothread lock and AioContext taken. */
511 static int mig_save_device_dirty(QEMUFile
*f
, BlkMigDevState
*bmds
,
515 BlockDriverState
*bs
= blk_bs(bmds
->blk
);
516 int64_t total_sectors
= bmds
->total_sectors
;
521 for (sector
= bmds
->cur_dirty
; sector
< bmds
->total_sectors
;) {
523 if (bmds_aio_inflight(bmds
, sector
)) {
525 blk_drain(bmds
->blk
);
529 if (bdrv_get_dirty(bs
, bmds
->dirty_bitmap
, sector
)) {
531 if (total_sectors
- sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
532 nr_sectors
= total_sectors
- sector
;
534 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
536 blk
= g_new(BlkMigBlock
, 1);
537 blk
->buf
= g_malloc(BLOCK_SIZE
);
539 blk
->sector
= sector
;
540 blk
->nr_sectors
= nr_sectors
;
543 blk
->iov
.iov_base
= blk
->buf
;
544 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
545 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
547 blk
->aiocb
= blk_aio_preadv(bmds
->blk
,
548 sector
* BDRV_SECTOR_SIZE
,
549 &blk
->qiov
, 0, blk_mig_read_cb
,
553 block_mig_state
.submitted
++;
554 bmds_set_aio_inflight(bmds
, sector
, nr_sectors
, 1);
557 ret
= blk_pread(bmds
->blk
, sector
* BDRV_SECTOR_SIZE
, blk
->buf
,
558 nr_sectors
* BDRV_SECTOR_SIZE
);
568 bdrv_reset_dirty_bitmap(bmds
->dirty_bitmap
, sector
, nr_sectors
);
571 sector
+= BDRV_SECTORS_PER_DIRTY_CHUNK
;
572 bmds
->cur_dirty
= sector
;
575 return (bmds
->cur_dirty
>= bmds
->total_sectors
);
578 DPRINTF("Error reading sector %" PRId64
"\n", sector
);
584 /* Called with iothread lock taken.
587 * 0: too much data for max_downtime
588 * 1: few enough data for max_downtime
590 static int blk_mig_save_dirty_block(QEMUFile
*f
, int is_async
)
592 BlkMigDevState
*bmds
;
595 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
596 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
597 ret
= mig_save_device_dirty(f
, bmds
, is_async
);
598 aio_context_release(blk_get_aio_context(bmds
->blk
));
607 /* Called with no locks taken. */
609 static int flush_blks(QEMUFile
*f
)
614 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
615 __FUNCTION__
, block_mig_state
.submitted
, block_mig_state
.read_done
,
616 block_mig_state
.transferred
);
619 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
620 if (qemu_file_rate_limit(f
)) {
628 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
636 block_mig_state
.read_done
--;
637 block_mig_state
.transferred
++;
638 assert(block_mig_state
.read_done
>= 0);
642 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__
,
643 block_mig_state
.submitted
, block_mig_state
.read_done
,
644 block_mig_state
.transferred
);
648 /* Called with iothread lock taken. */
650 static int64_t get_remaining_dirty(void)
652 BlkMigDevState
*bmds
;
655 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
656 aio_context_acquire(blk_get_aio_context(bmds
->blk
));
657 dirty
+= bdrv_get_dirty_count(bmds
->dirty_bitmap
);
658 aio_context_release(blk_get_aio_context(bmds
->blk
));
661 return dirty
<< BDRV_SECTOR_BITS
;
664 /* Called with iothread lock taken. */
666 static void block_migration_cleanup(void *opaque
)
668 BlkMigDevState
*bmds
;
674 unset_dirty_tracking();
676 while ((bmds
= QSIMPLEQ_FIRST(&block_mig_state
.bmds_list
)) != NULL
) {
677 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.bmds_list
, entry
);
678 bdrv_op_unblock_all(blk_bs(bmds
->blk
), bmds
->blocker
);
679 error_free(bmds
->blocker
);
681 /* Save ctx, because bmds->blk can disappear during blk_unref. */
682 ctx
= blk_get_aio_context(bmds
->blk
);
683 aio_context_acquire(ctx
);
684 blk_unref(bmds
->blk
);
685 aio_context_release(ctx
);
687 g_free(bmds
->blk_name
);
688 g_free(bmds
->aio_bitmap
);
693 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
694 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
701 static int block_save_setup(QEMUFile
*f
, void *opaque
)
705 DPRINTF("Enter save live setup submitted %d transferred %d\n",
706 block_mig_state
.submitted
, block_mig_state
.transferred
);
708 qemu_mutex_lock_iothread();
709 init_blk_migration(f
);
711 /* start track dirty blocks */
712 ret
= set_dirty_tracking();
714 qemu_mutex_unlock_iothread();
721 blk_mig_reset_dirty_cursor();
722 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
727 static int block_save_iterate(QEMUFile
*f
, void *opaque
)
730 int64_t last_ftell
= qemu_ftell(f
);
733 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
734 block_mig_state
.submitted
, block_mig_state
.transferred
);
741 blk_mig_reset_dirty_cursor();
743 /* control the rate of transfer */
745 while ((block_mig_state
.submitted
+
746 block_mig_state
.read_done
) * BLOCK_SIZE
<
747 qemu_file_get_rate_limit(f
) &&
748 (block_mig_state
.submitted
+
749 block_mig_state
.read_done
) <
752 if (block_mig_state
.bulk_completed
== 0) {
753 /* first finish the bulk phase */
754 if (blk_mig_save_bulked_block(f
) == 0) {
755 /* finished saving bulk on all devices */
756 block_mig_state
.bulk_completed
= 1;
760 /* Always called with iothread lock taken for
761 * simplicity, block_save_complete also calls it.
763 qemu_mutex_lock_iothread();
764 ret
= blk_mig_save_dirty_block(f
, 1);
765 qemu_mutex_unlock_iothread();
772 /* no more dirty blocks */
783 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
784 delta_ftell
= qemu_ftell(f
) - last_ftell
;
785 if (delta_ftell
> 0) {
787 } else if (delta_ftell
< 0) {
794 /* Called with iothread lock taken. */
796 static int block_save_complete(QEMUFile
*f
, void *opaque
)
800 DPRINTF("Enter save live complete submitted %d transferred %d\n",
801 block_mig_state
.submitted
, block_mig_state
.transferred
);
808 blk_mig_reset_dirty_cursor();
810 /* we know for sure that save bulk is completed and
811 all async read completed */
813 assert(block_mig_state
.submitted
== 0);
817 ret
= blk_mig_save_dirty_block(f
, 0);
823 /* report completion */
824 qemu_put_be64(f
, (100 << BDRV_SECTOR_BITS
) | BLK_MIG_FLAG_PROGRESS
);
826 DPRINTF("Block migration completed\n");
828 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
833 static void block_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
834 uint64_t *non_postcopiable_pending
,
835 uint64_t *postcopiable_pending
)
837 /* Estimate pending number of bytes to send */
840 qemu_mutex_lock_iothread();
841 pending
= get_remaining_dirty();
842 qemu_mutex_unlock_iothread();
845 pending
+= block_mig_state
.submitted
* BLOCK_SIZE
+
846 block_mig_state
.read_done
* BLOCK_SIZE
;
849 /* Report at least one block pending during bulk phase */
850 if (pending
<= max_size
&& !block_mig_state
.bulk_completed
) {
851 pending
= max_size
+ BLOCK_SIZE
;
854 DPRINTF("Enter save live pending %" PRIu64
"\n", pending
);
855 /* We don't do postcopy */
856 *non_postcopiable_pending
+= pending
;
859 static int block_load(QEMUFile
*f
, void *opaque
, int version_id
)
861 static int banner_printed
;
863 char device_name
[256];
865 BlockBackend
*blk
, *blk_prev
= NULL
;;
866 Error
*local_err
= NULL
;
868 int64_t total_sectors
= 0;
873 addr
= qemu_get_be64(f
);
875 flags
= addr
& ~BDRV_SECTOR_MASK
;
876 addr
>>= BDRV_SECTOR_BITS
;
878 if (flags
& BLK_MIG_FLAG_DEVICE_BLOCK
) {
879 /* get device name */
880 len
= qemu_get_byte(f
);
881 qemu_get_buffer(f
, (uint8_t *)device_name
, len
);
882 device_name
[len
] = '\0';
884 blk
= blk_by_name(device_name
);
886 fprintf(stderr
, "Error unknown block device %s\n",
891 if (blk
!= blk_prev
) {
893 total_sectors
= blk_nb_sectors(blk
);
894 if (total_sectors
<= 0) {
895 error_report("Error getting length of block device %s",
900 blk_invalidate_cache(blk
, &local_err
);
902 error_report_err(local_err
);
907 if (total_sectors
- addr
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
908 nr_sectors
= total_sectors
- addr
;
910 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
913 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
914 ret
= blk_pwrite_zeroes(blk
, addr
* BDRV_SECTOR_SIZE
,
915 nr_sectors
* BDRV_SECTOR_SIZE
,
918 buf
= g_malloc(BLOCK_SIZE
);
919 qemu_get_buffer(f
, buf
, BLOCK_SIZE
);
920 ret
= blk_pwrite(blk
, addr
* BDRV_SECTOR_SIZE
, buf
,
921 nr_sectors
* BDRV_SECTOR_SIZE
, 0);
928 } else if (flags
& BLK_MIG_FLAG_PROGRESS
) {
929 if (!banner_printed
) {
930 printf("Receiving block device images\n");
933 printf("Completed %d %%%c", (int)addr
,
934 (addr
== 100) ? '\n' : '\r');
936 } else if (!(flags
& BLK_MIG_FLAG_EOS
)) {
937 fprintf(stderr
, "Unknown block migration flags: %#x\n", flags
);
940 ret
= qemu_file_get_error(f
);
944 } while (!(flags
& BLK_MIG_FLAG_EOS
));
949 static void block_set_params(const MigrationParams
*params
, void *opaque
)
951 block_mig_state
.blk_enable
= params
->blk
;
952 block_mig_state
.shared_base
= params
->shared
;
954 /* shared base means that blk_enable = 1 */
955 block_mig_state
.blk_enable
|= params
->shared
;
958 static bool block_is_active(void *opaque
)
960 return block_mig_state
.blk_enable
== 1;
963 static SaveVMHandlers savevm_block_handlers
= {
964 .set_params
= block_set_params
,
965 .save_live_setup
= block_save_setup
,
966 .save_live_iterate
= block_save_iterate
,
967 .save_live_complete_precopy
= block_save_complete
,
968 .save_live_pending
= block_save_pending
,
969 .load_state
= block_load
,
970 .cleanup
= block_migration_cleanup
,
971 .is_active
= block_is_active
,
974 void blk_mig_init(void)
976 QSIMPLEQ_INIT(&block_mig_state
.bmds_list
);
977 QSIMPLEQ_INIT(&block_mig_state
.blk_list
);
978 qemu_mutex_init(&block_mig_state
.lock
);
980 register_savevm_live(NULL
, "block", 0, 1, &savevm_block_handlers
,