2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "block/block_int.h"
19 #include "qemu/queue.h"
20 #include "qemu/timer.h"
21 #include "migration/block.h"
22 #include "migration/migration.h"
23 #include "sysemu/blockdev.h"
26 #define BLOCK_SIZE (1 << 20)
27 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
29 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
30 #define BLK_MIG_FLAG_EOS 0x02
31 #define BLK_MIG_FLAG_PROGRESS 0x04
33 #define MAX_IS_ALLOCATED_SEARCH 65536
35 //#define DEBUG_BLK_MIGRATION
37 #ifdef DEBUG_BLK_MIGRATION
38 #define DPRINTF(fmt, ...) \
39 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
41 #define DPRINTF(fmt, ...) \
45 typedef struct BlkMigDevState
{
46 /* Written during setup phase. Can be read without a lock. */
49 int64_t total_sectors
;
50 QSIMPLEQ_ENTRY(BlkMigDevState
) entry
;
52 /* Only used by migration thread. Does not need a lock. */
57 /* Protected by block migration lock. */
58 unsigned long *aio_bitmap
;
59 int64_t completed_sectors
;
62 typedef struct BlkMigBlock
{
63 /* Only used by migration thread. */
70 BlockDriverAIOCB
*aiocb
;
72 /* Protected by block migration lock. */
74 QSIMPLEQ_ENTRY(BlkMigBlock
) entry
;
77 typedef struct BlkMigState
{
78 /* Written during setup phase. Can be read without a lock. */
81 QSIMPLEQ_HEAD(bmds_list
, BlkMigDevState
) bmds_list
;
82 int64_t total_sector_sum
;
84 /* Protected by lock. */
85 QSIMPLEQ_HEAD(blk_list
, BlkMigBlock
) blk_list
;
89 /* Only used by migration thread. Does not need a lock. */
94 /* Lock must be taken _inside_ the iothread lock. */
98 static BlkMigState block_mig_state
;
100 static void blk_mig_lock(void)
102 qemu_mutex_lock(&block_mig_state
.lock
);
105 static void blk_mig_unlock(void)
107 qemu_mutex_unlock(&block_mig_state
.lock
);
110 static void blk_send(QEMUFile
*f
, BlkMigBlock
* blk
)
114 /* sector number and flags */
115 qemu_put_be64(f
, (blk
->sector
<< BDRV_SECTOR_BITS
)
116 | BLK_MIG_FLAG_DEVICE_BLOCK
);
119 len
= strlen(blk
->bmds
->bs
->device_name
);
120 qemu_put_byte(f
, len
);
121 qemu_put_buffer(f
, (uint8_t *)blk
->bmds
->bs
->device_name
, len
);
123 qemu_put_buffer(f
, blk
->buf
, BLOCK_SIZE
);
126 int blk_mig_active(void)
128 return !QSIMPLEQ_EMPTY(&block_mig_state
.bmds_list
);
131 uint64_t blk_mig_bytes_transferred(void)
133 BlkMigDevState
*bmds
;
137 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
138 sum
+= bmds
->completed_sectors
;
141 return sum
<< BDRV_SECTOR_BITS
;
144 uint64_t blk_mig_bytes_remaining(void)
146 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
149 uint64_t blk_mig_bytes_total(void)
151 BlkMigDevState
*bmds
;
154 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
155 sum
+= bmds
->total_sectors
;
157 return sum
<< BDRV_SECTOR_BITS
;
161 /* Called with migration lock held. */
163 static int bmds_aio_inflight(BlkMigDevState
*bmds
, int64_t sector
)
165 int64_t chunk
= sector
/ (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
;
167 if ((sector
<< BDRV_SECTOR_BITS
) < bdrv_getlength(bmds
->bs
)) {
168 return !!(bmds
->aio_bitmap
[chunk
/ (sizeof(unsigned long) * 8)] &
169 (1UL << (chunk
% (sizeof(unsigned long) * 8))));
175 /* Called with migration lock held. */
177 static void bmds_set_aio_inflight(BlkMigDevState
*bmds
, int64_t sector_num
,
178 int nb_sectors
, int set
)
181 unsigned long val
, idx
, bit
;
183 start
= sector_num
/ BDRV_SECTORS_PER_DIRTY_CHUNK
;
184 end
= (sector_num
+ nb_sectors
- 1) / BDRV_SECTORS_PER_DIRTY_CHUNK
;
186 for (; start
<= end
; start
++) {
187 idx
= start
/ (sizeof(unsigned long) * 8);
188 bit
= start
% (sizeof(unsigned long) * 8);
189 val
= bmds
->aio_bitmap
[idx
];
193 val
&= ~(1UL << bit
);
195 bmds
->aio_bitmap
[idx
] = val
;
199 static void alloc_aio_bitmap(BlkMigDevState
*bmds
)
201 BlockDriverState
*bs
= bmds
->bs
;
204 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
) +
205 BDRV_SECTORS_PER_DIRTY_CHUNK
* 8 - 1;
206 bitmap_size
/= BDRV_SECTORS_PER_DIRTY_CHUNK
* 8;
208 bmds
->aio_bitmap
= g_malloc0(bitmap_size
);
211 /* Never hold migration lock when yielding to the main loop! */
213 static void blk_mig_read_cb(void *opaque
, int ret
)
215 BlkMigBlock
*blk
= opaque
;
220 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.blk_list
, blk
, entry
);
221 bmds_set_aio_inflight(blk
->bmds
, blk
->sector
, blk
->nr_sectors
, 0);
223 block_mig_state
.submitted
--;
224 block_mig_state
.read_done
++;
225 assert(block_mig_state
.submitted
>= 0);
229 static int mig_save_device_bulk(QEMUFile
*f
, BlkMigDevState
*bmds
)
231 int64_t total_sectors
= bmds
->total_sectors
;
232 int64_t cur_sector
= bmds
->cur_sector
;
233 BlockDriverState
*bs
= bmds
->bs
;
237 if (bmds
->shared_base
) {
238 while (cur_sector
< total_sectors
&&
239 !bdrv_is_allocated(bs
, cur_sector
, MAX_IS_ALLOCATED_SEARCH
,
241 cur_sector
+= nr_sectors
;
245 if (cur_sector
>= total_sectors
) {
246 bmds
->cur_sector
= bmds
->completed_sectors
= total_sectors
;
250 bmds
->completed_sectors
= cur_sector
;
252 cur_sector
&= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
- 1);
254 /* we are going to transfer a full block even if it is not allocated */
255 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
257 if (total_sectors
- cur_sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
258 nr_sectors
= total_sectors
- cur_sector
;
261 blk
= g_malloc(sizeof(BlkMigBlock
));
262 blk
->buf
= g_malloc(BLOCK_SIZE
);
264 blk
->sector
= cur_sector
;
265 blk
->nr_sectors
= nr_sectors
;
267 blk
->iov
.iov_base
= blk
->buf
;
268 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
269 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
272 block_mig_state
.submitted
++;
275 blk
->aiocb
= bdrv_aio_readv(bs
, cur_sector
, &blk
->qiov
,
276 nr_sectors
, blk_mig_read_cb
, blk
);
278 bdrv_reset_dirty(bs
, cur_sector
, nr_sectors
);
279 bmds
->cur_sector
= cur_sector
+ nr_sectors
;
281 return (bmds
->cur_sector
>= total_sectors
);
284 static void set_dirty_tracking(int enable
)
286 BlkMigDevState
*bmds
;
288 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
289 bdrv_set_dirty_tracking(bmds
->bs
, enable
? BLOCK_SIZE
: 0);
293 static void init_blk_migration_it(void *opaque
, BlockDriverState
*bs
)
295 BlkMigDevState
*bmds
;
298 if (!bdrv_is_read_only(bs
)) {
299 sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
304 bmds
= g_malloc0(sizeof(BlkMigDevState
));
306 bmds
->bulk_completed
= 0;
307 bmds
->total_sectors
= sectors
;
308 bmds
->completed_sectors
= 0;
309 bmds
->shared_base
= block_mig_state
.shared_base
;
310 alloc_aio_bitmap(bmds
);
311 drive_get_ref(drive_get_by_blockdev(bs
));
312 bdrv_set_in_use(bs
, 1);
314 block_mig_state
.total_sector_sum
+= sectors
;
316 if (bmds
->shared_base
) {
317 DPRINTF("Start migration for %s with shared base image\n",
320 DPRINTF("Start full migration for %s\n", bs
->device_name
);
323 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.bmds_list
, bmds
, entry
);
327 static void init_blk_migration(QEMUFile
*f
)
329 block_mig_state
.submitted
= 0;
330 block_mig_state
.read_done
= 0;
331 block_mig_state
.transferred
= 0;
332 block_mig_state
.total_sector_sum
= 0;
333 block_mig_state
.prev_progress
= -1;
334 block_mig_state
.bulk_completed
= 0;
336 bdrv_iterate(init_blk_migration_it
, NULL
);
339 static int blk_mig_save_bulked_block(QEMUFile
*f
)
341 int64_t completed_sector_sum
= 0;
342 BlkMigDevState
*bmds
;
346 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
347 if (bmds
->bulk_completed
== 0) {
348 if (mig_save_device_bulk(f
, bmds
) == 1) {
349 /* completed bulk section for this device */
350 bmds
->bulk_completed
= 1;
352 completed_sector_sum
+= bmds
->completed_sectors
;
356 completed_sector_sum
+= bmds
->completed_sectors
;
360 if (block_mig_state
.total_sector_sum
!= 0) {
361 progress
= completed_sector_sum
* 100 /
362 block_mig_state
.total_sector_sum
;
366 if (progress
!= block_mig_state
.prev_progress
) {
367 block_mig_state
.prev_progress
= progress
;
368 qemu_put_be64(f
, (progress
<< BDRV_SECTOR_BITS
)
369 | BLK_MIG_FLAG_PROGRESS
);
370 DPRINTF("Completed %d %%\r", progress
);
376 static void blk_mig_reset_dirty_cursor(void)
378 BlkMigDevState
*bmds
;
380 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
385 static int mig_save_device_dirty(QEMUFile
*f
, BlkMigDevState
*bmds
,
389 int64_t total_sectors
= bmds
->total_sectors
;
394 for (sector
= bmds
->cur_dirty
; sector
< bmds
->total_sectors
;) {
396 if (bmds_aio_inflight(bmds
, sector
)) {
402 if (bdrv_get_dirty(bmds
->bs
, sector
)) {
404 if (total_sectors
- sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
405 nr_sectors
= total_sectors
- sector
;
407 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
409 blk
= g_malloc(sizeof(BlkMigBlock
));
410 blk
->buf
= g_malloc(BLOCK_SIZE
);
412 blk
->sector
= sector
;
413 blk
->nr_sectors
= nr_sectors
;
416 blk
->iov
.iov_base
= blk
->buf
;
417 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
418 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
420 blk
->aiocb
= bdrv_aio_readv(bmds
->bs
, sector
, &blk
->qiov
,
421 nr_sectors
, blk_mig_read_cb
, blk
);
424 block_mig_state
.submitted
++;
425 bmds_set_aio_inflight(bmds
, sector
, nr_sectors
, 1);
428 ret
= bdrv_read(bmds
->bs
, sector
, blk
->buf
, nr_sectors
);
438 bdrv_reset_dirty(bmds
->bs
, sector
, nr_sectors
);
441 sector
+= BDRV_SECTORS_PER_DIRTY_CHUNK
;
442 bmds
->cur_dirty
= sector
;
445 return (bmds
->cur_dirty
>= bmds
->total_sectors
);
448 DPRINTF("Error reading sector %" PRId64
"\n", sector
);
455 * 0: too much data for max_downtime
456 * 1: few enough data for max_downtime
458 static int blk_mig_save_dirty_block(QEMUFile
*f
, int is_async
)
460 BlkMigDevState
*bmds
;
463 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
464 ret
= mig_save_device_dirty(f
, bmds
, is_async
);
473 static int flush_blks(QEMUFile
*f
)
478 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
479 __FUNCTION__
, block_mig_state
.submitted
, block_mig_state
.read_done
,
480 block_mig_state
.transferred
);
483 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
484 if (qemu_file_rate_limit(f
)) {
492 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
500 block_mig_state
.read_done
--;
501 block_mig_state
.transferred
++;
502 assert(block_mig_state
.read_done
>= 0);
506 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__
,
507 block_mig_state
.submitted
, block_mig_state
.read_done
,
508 block_mig_state
.transferred
);
512 static int64_t get_remaining_dirty(void)
514 BlkMigDevState
*bmds
;
517 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
518 dirty
+= bdrv_get_dirty_count(bmds
->bs
);
521 return dirty
<< BDRV_SECTOR_BITS
;
524 static void blk_mig_cleanup(void)
526 BlkMigDevState
*bmds
;
531 set_dirty_tracking(0);
534 while ((bmds
= QSIMPLEQ_FIRST(&block_mig_state
.bmds_list
)) != NULL
) {
535 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.bmds_list
, entry
);
536 bdrv_set_in_use(bmds
->bs
, 0);
537 drive_put_ref(drive_get_by_blockdev(bmds
->bs
));
538 g_free(bmds
->aio_bitmap
);
542 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
543 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
550 static void block_migration_cancel(void *opaque
)
555 static int block_save_setup(QEMUFile
*f
, void *opaque
)
559 DPRINTF("Enter save live setup submitted %d transferred %d\n",
560 block_mig_state
.submitted
, block_mig_state
.transferred
);
562 init_blk_migration(f
);
564 /* start track dirty blocks */
565 set_dirty_tracking(1);
568 blk_mig_reset_dirty_cursor();
569 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
574 static int block_save_iterate(QEMUFile
*f
, void *opaque
)
577 int64_t last_ftell
= qemu_ftell(f
);
579 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
580 block_mig_state
.submitted
, block_mig_state
.transferred
);
587 blk_mig_reset_dirty_cursor();
589 /* control the rate of transfer */
591 while ((block_mig_state
.submitted
+
592 block_mig_state
.read_done
) * BLOCK_SIZE
<
593 qemu_file_get_rate_limit(f
)) {
595 if (block_mig_state
.bulk_completed
== 0) {
596 /* first finish the bulk phase */
597 if (blk_mig_save_bulked_block(f
) == 0) {
598 /* finished saving bulk on all devices */
599 block_mig_state
.bulk_completed
= 1;
603 ret
= blk_mig_save_dirty_block(f
, 1);
610 /* no more dirty blocks */
621 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
622 return qemu_ftell(f
) - last_ftell
;
625 static int block_save_complete(QEMUFile
*f
, void *opaque
)
629 DPRINTF("Enter save live complete submitted %d transferred %d\n",
630 block_mig_state
.submitted
, block_mig_state
.transferred
);
637 blk_mig_reset_dirty_cursor();
639 /* we know for sure that save bulk is completed and
640 all async read completed */
642 assert(block_mig_state
.submitted
== 0);
646 ret
= blk_mig_save_dirty_block(f
, 0);
652 /* report completion */
653 qemu_put_be64(f
, (100 << BDRV_SECTOR_BITS
) | BLK_MIG_FLAG_PROGRESS
);
655 DPRINTF("Block migration completed\n");
657 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
663 static uint64_t block_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
)
665 /* Estimate pending number of bytes to send */
669 pending
= get_remaining_dirty() +
670 block_mig_state
.submitted
* BLOCK_SIZE
+
671 block_mig_state
.read_done
* BLOCK_SIZE
;
673 /* Report at least one block pending during bulk phase */
674 if (pending
== 0 && !block_mig_state
.bulk_completed
) {
675 pending
= BLOCK_SIZE
;
679 DPRINTF("Enter save live pending %" PRIu64
"\n", pending
);
683 static int block_load(QEMUFile
*f
, void *opaque
, int version_id
)
685 static int banner_printed
;
687 char device_name
[256];
689 BlockDriverState
*bs
, *bs_prev
= NULL
;
691 int64_t total_sectors
= 0;
696 addr
= qemu_get_be64(f
);
698 flags
= addr
& ~BDRV_SECTOR_MASK
;
699 addr
>>= BDRV_SECTOR_BITS
;
701 if (flags
& BLK_MIG_FLAG_DEVICE_BLOCK
) {
702 /* get device name */
703 len
= qemu_get_byte(f
);
704 qemu_get_buffer(f
, (uint8_t *)device_name
, len
);
705 device_name
[len
] = '\0';
707 bs
= bdrv_find(device_name
);
709 fprintf(stderr
, "Error unknown block device %s\n",
716 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
717 if (total_sectors
<= 0) {
718 error_report("Error getting length of block device %s",
724 if (total_sectors
- addr
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
725 nr_sectors
= total_sectors
- addr
;
727 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
730 buf
= g_malloc(BLOCK_SIZE
);
732 qemu_get_buffer(f
, buf
, BLOCK_SIZE
);
733 ret
= bdrv_write(bs
, addr
, buf
, nr_sectors
);
739 } else if (flags
& BLK_MIG_FLAG_PROGRESS
) {
740 if (!banner_printed
) {
741 printf("Receiving block device images\n");
744 printf("Completed %d %%%c", (int)addr
,
745 (addr
== 100) ? '\n' : '\r');
747 } else if (!(flags
& BLK_MIG_FLAG_EOS
)) {
748 fprintf(stderr
, "Unknown block migration flags: %#x\n", flags
);
751 ret
= qemu_file_get_error(f
);
755 } while (!(flags
& BLK_MIG_FLAG_EOS
));
760 static void block_set_params(const MigrationParams
*params
, void *opaque
)
762 block_mig_state
.blk_enable
= params
->blk
;
763 block_mig_state
.shared_base
= params
->shared
;
765 /* shared base means that blk_enable = 1 */
766 block_mig_state
.blk_enable
|= params
->shared
;
769 static bool block_is_active(void *opaque
)
771 return block_mig_state
.blk_enable
== 1;
774 SaveVMHandlers savevm_block_handlers
= {
775 .set_params
= block_set_params
,
776 .save_live_setup
= block_save_setup
,
777 .save_live_iterate
= block_save_iterate
,
778 .save_live_complete
= block_save_complete
,
779 .save_live_pending
= block_save_pending
,
780 .load_state
= block_load
,
781 .cancel
= block_migration_cancel
,
782 .is_active
= block_is_active
,
785 void blk_mig_init(void)
787 QSIMPLEQ_INIT(&block_mig_state
.bmds_list
);
788 QSIMPLEQ_INIT(&block_mig_state
.blk_list
);
789 qemu_mutex_init(&block_mig_state
.lock
);
791 register_savevm_live(NULL
, "block", 0, 1, &savevm_block_handlers
,