2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "block_int.h"
17 #include "block-migration.h"
20 #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
22 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
23 #define BLK_MIG_FLAG_EOS 0x02
25 #define MAX_IS_ALLOCATED_SEARCH 65536
26 #define MAX_BLOCKS_READ 10000
27 #define BLOCKS_READ_CHANGE 100
28 #define INITIAL_BLOCKS_READ 100
30 //#define DEBUG_BLK_MIGRATION
32 #ifdef DEBUG_BLK_MIGRATION
33 #define dprintf(fmt, ...) \
34 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
36 #define dprintf(fmt, ...) \
40 typedef struct BlkMigDevState
{
44 struct BlkMigDevState
*next
;
46 int64_t total_sectors
;
50 typedef struct BlkMigBlock
{
56 BlockDriverAIOCB
*aiocb
;
58 struct BlkMigBlock
*next
;
61 typedef struct BlkMigState
{
67 BlkMigDevState
*bmds_first
;
68 BlkMigBlock
*first_blk
;
69 BlkMigBlock
*last_blk
;
73 int64_t print_completion
;
76 static BlkMigState block_mig_state
;
78 static void blk_mig_read_cb(void *opaque
, int ret
)
80 BlkMigBlock
*blk
= opaque
;
84 /* insert at the end */
85 if (block_mig_state
.last_blk
== NULL
) {
86 block_mig_state
.first_blk
= blk
;
87 block_mig_state
.last_blk
= blk
;
89 block_mig_state
.last_blk
->next
= blk
;
90 block_mig_state
.last_blk
= blk
;
93 block_mig_state
.submitted
--;
94 block_mig_state
.read_done
++;
95 assert(block_mig_state
.submitted
>= 0);
98 static int mig_read_device_bulk(QEMUFile
*f
, BlkMigDevState
*bms
)
101 int64_t total_sectors
, cur_sector
= 0;
102 BlockDriverState
*bs
= bms
->bs
;
105 blk
= qemu_malloc(sizeof(BlkMigBlock
));
106 blk
->buf
= qemu_malloc(BLOCK_SIZE
);
108 cur_sector
= bms
->cur_sector
;
109 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
111 if (bms
->shared_base
) {
112 while (cur_sector
< bms
->total_sectors
&&
113 !bdrv_is_allocated(bms
->bs
, cur_sector
,
114 MAX_IS_ALLOCATED_SEARCH
, &nr_sectors
)) {
115 cur_sector
+= nr_sectors
;
119 if (cur_sector
>= total_sectors
) {
120 bms
->cur_sector
= total_sectors
;
126 if (cur_sector
>= block_mig_state
.print_completion
) {
127 printf("Completed %" PRId64
" %%\r", cur_sector
* 100 / total_sectors
);
129 block_mig_state
.print_completion
+=
130 (BDRV_SECTORS_PER_DIRTY_CHUNK
* 10000);
133 /* we are going to transfer a full block even if it is not allocated */
134 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
136 cur_sector
&= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
- 1);
138 if (total_sectors
- cur_sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
139 nr_sectors
= (total_sectors
- cur_sector
);
142 bms
->cur_sector
= cur_sector
+ nr_sectors
;
143 blk
->sector
= cur_sector
;
147 blk
->iov
.iov_base
= blk
->buf
;
148 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
149 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
151 blk
->aiocb
= bdrv_aio_readv(bs
, cur_sector
, &blk
->qiov
,
152 nr_sectors
, blk_mig_read_cb
, blk
);
155 printf("Error reading sector %" PRId64
"\n", cur_sector
);
161 bdrv_reset_dirty(bms
->bs
, cur_sector
, nr_sectors
);
162 block_mig_state
.submitted
++;
164 return (bms
->cur_sector
>= total_sectors
);
167 static int mig_save_device_bulk(QEMUFile
*f
, BlkMigDevState
*bmds
)
170 int64_t total_sectors
= bmds
->total_sectors
, cur_sector
= 0;
171 uint8_t *tmp_buf
= NULL
;
172 BlockDriverState
*bs
= bmds
->bs
;
174 tmp_buf
= qemu_malloc(BLOCK_SIZE
);
176 cur_sector
= bmds
->cur_sector
;
178 if (bmds
->shared_base
) {
179 while (cur_sector
< bmds
->total_sectors
&&
180 !bdrv_is_allocated(bmds
->bs
, cur_sector
,
181 MAX_IS_ALLOCATED_SEARCH
, &nr_sectors
)) {
182 cur_sector
+= nr_sectors
;
186 if (cur_sector
>= total_sectors
) {
187 bmds
->cur_sector
= total_sectors
;
192 if (cur_sector
>= block_mig_state
.print_completion
) {
193 printf("Completed %" PRId64
" %%\r", cur_sector
* 100 / total_sectors
);
195 block_mig_state
.print_completion
+=
196 (BDRV_SECTORS_PER_DIRTY_CHUNK
* 10000);
199 cur_sector
&= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
- 1);
201 /* we are going to transfer a full block even if it is not allocated */
202 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
204 if (total_sectors
- cur_sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
205 nr_sectors
= (total_sectors
- cur_sector
);
208 if (bdrv_read(bs
, cur_sector
, tmp_buf
, nr_sectors
) < 0) {
209 printf("Error reading sector %" PRId64
"\n", cur_sector
);
212 bdrv_reset_dirty(bs
, cur_sector
, nr_sectors
);
214 /* sector number and flags */
215 qemu_put_be64(f
, (cur_sector
<< BDRV_SECTOR_BITS
)
216 | BLK_MIG_FLAG_DEVICE_BLOCK
);
219 len
= strlen(bs
->device_name
);
220 qemu_put_byte(f
, len
);
221 qemu_put_buffer(f
, (uint8_t *)bs
->device_name
, len
);
223 qemu_put_buffer(f
, tmp_buf
, BLOCK_SIZE
);
225 bmds
->cur_sector
= cur_sector
+ BDRV_SECTORS_PER_DIRTY_CHUNK
;
229 return (bmds
->cur_sector
>= total_sectors
);
232 static void send_blk(QEMUFile
*f
, BlkMigBlock
* blk
)
236 /* sector number and flags */
237 qemu_put_be64(f
, (blk
->sector
<< BDRV_SECTOR_BITS
)
238 | BLK_MIG_FLAG_DEVICE_BLOCK
);
241 len
= strlen(blk
->bmds
->bs
->device_name
);
242 qemu_put_byte(f
, len
);
243 qemu_put_buffer(f
, (uint8_t *)blk
->bmds
->bs
->device_name
, len
);
245 qemu_put_buffer(f
, blk
->buf
, BLOCK_SIZE
);
248 static void blk_mig_save_dev_info(QEMUFile
*f
, BlkMigDevState
*bmds
)
252 static void set_dirty_tracking(int enable
)
254 BlkMigDevState
*bmds
;
255 for (bmds
= block_mig_state
.bmds_first
; bmds
!= NULL
; bmds
= bmds
->next
) {
256 bdrv_set_dirty_tracking(bmds
->bs
, enable
);
260 static void init_blk_migration(QEMUFile
*f
)
262 BlkMigDevState
**pbmds
, *bmds
;
263 BlockDriverState
*bs
;
265 for (bs
= bdrv_first
; bs
!= NULL
; bs
= bs
->next
) {
266 if (bs
->type
== BDRV_TYPE_HD
) {
267 bmds
= qemu_mallocz(sizeof(BlkMigDevState
));
269 bmds
->bulk_completed
= 0;
270 bmds
->total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
271 bmds
->shared_base
= block_mig_state
.shared_base
;
273 if (bmds
->shared_base
) {
274 printf("Start migration for %s with shared base image\n",
277 printf("Start full migration for %s\n", bs
->device_name
);
280 /* insert at the end */
281 pbmds
= &block_mig_state
.bmds_first
;
282 while (*pbmds
!= NULL
) {
283 pbmds
= &(*pbmds
)->next
;
287 blk_mig_save_dev_info(f
, bmds
);
292 static int blk_mig_save_bulked_block(QEMUFile
*f
, int is_async
)
294 BlkMigDevState
*bmds
;
296 for (bmds
= block_mig_state
.bmds_first
; bmds
!= NULL
; bmds
= bmds
->next
) {
297 if (bmds
->bulk_completed
== 0) {
299 if (mig_read_device_bulk(f
, bmds
) == 1) {
300 /* completed bulk section for this device */
301 bmds
->bulk_completed
= 1;
304 if (mig_save_device_bulk(f
, bmds
) == 1) {
305 /* completed bulk section for this device */
306 bmds
->bulk_completed
= 1;
313 /* we reached here means bulk is completed */
314 block_mig_state
.bulk_completed
= 1;
319 #define MAX_NUM_BLOCKS 4
321 static void blk_mig_save_dirty_blocks(QEMUFile
*f
)
323 BlkMigDevState
*bmds
;
328 buf
= qemu_malloc(BLOCK_SIZE
);
330 for (bmds
= block_mig_state
.bmds_first
; bmds
!= NULL
; bmds
= bmds
->next
) {
331 for (sector
= 0; sector
< bmds
->cur_sector
;) {
332 if (bdrv_get_dirty(bmds
->bs
, sector
)) {
333 if (bdrv_read(bmds
->bs
, sector
, buf
,
334 BDRV_SECTORS_PER_DIRTY_CHUNK
) < 0) {
335 /* FIXME: add error handling */
338 /* sector number and flags */
339 qemu_put_be64(f
, (sector
<< BDRV_SECTOR_BITS
)
340 | BLK_MIG_FLAG_DEVICE_BLOCK
);
343 len
= strlen(bmds
->bs
->device_name
);
344 qemu_put_byte(f
, len
);
345 qemu_put_buffer(f
, (uint8_t *)bmds
->bs
->device_name
, len
);
347 qemu_put_buffer(f
, buf
, BLOCK_SIZE
);
349 bdrv_reset_dirty(bmds
->bs
, sector
,
350 BDRV_SECTORS_PER_DIRTY_CHUNK
);
352 sector
+= BDRV_SECTORS_PER_DIRTY_CHUNK
;
359 static void flush_blks(QEMUFile
* f
)
361 BlkMigBlock
*blk
, *next
;
363 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
364 __FUNCTION__
, block_mig_state
.submitted
, block_mig_state
.read_done
,
365 block_mig_state
.transferred
);
367 for (blk
= block_mig_state
.first_blk
;
368 blk
!= NULL
&& !qemu_file_rate_limit(f
);
376 block_mig_state
.read_done
--;
377 block_mig_state
.transferred
++;
378 assert(block_mig_state
.read_done
>= 0);
380 block_mig_state
.first_blk
= blk
;
382 if (block_mig_state
.first_blk
== NULL
) {
383 block_mig_state
.last_blk
= NULL
;
386 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__
,
387 block_mig_state
.submitted
, block_mig_state
.read_done
,
388 block_mig_state
.transferred
);
391 static int is_stage2_completed(void)
393 BlkMigDevState
*bmds
;
395 if (block_mig_state
.submitted
> 0) {
399 for (bmds
= block_mig_state
.bmds_first
; bmds
!= NULL
; bmds
= bmds
->next
) {
400 if (bmds
->bulk_completed
== 0) {
408 static int block_save_live(QEMUFile
*f
, int stage
, void *opaque
)
410 dprintf("Enter save live stage %d submitted %d transferred %d\n",
411 stage
, block_mig_state
.submitted
, block_mig_state
.transferred
);
413 if (block_mig_state
.blk_enable
!= 1) {
414 /* no need to migrate storage */
415 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
420 init_blk_migration(f
);
422 /* start track dirty blocks */
423 set_dirty_tracking(1);
428 /* control the rate of transfer */
429 while ((block_mig_state
.submitted
+
430 block_mig_state
.read_done
) * BLOCK_SIZE
<
431 qemu_file_get_rate_limit(f
)) {
432 if (blk_mig_save_bulked_block(f
, 1) == 0) {
433 /* no more bulk blocks for now */
441 while (blk_mig_save_bulked_block(f
, 0) != 0) {
445 blk_mig_save_dirty_blocks(f
);
447 /* stop track dirty blocks */
448 set_dirty_tracking(0);
450 printf("\nBlock migration completed\n");
453 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
455 return ((stage
== 2) && is_stage2_completed());
458 static int block_load(QEMUFile
*f
, void *opaque
, int version_id
)
461 char device_name
[256];
463 BlockDriverState
*bs
;
467 addr
= qemu_get_be64(f
);
469 flags
= addr
& ~BDRV_SECTOR_MASK
;
470 addr
>>= BDRV_SECTOR_BITS
;
472 if (flags
& BLK_MIG_FLAG_DEVICE_BLOCK
) {
473 /* get device name */
474 len
= qemu_get_byte(f
);
476 qemu_get_buffer(f
, (uint8_t *)device_name
, len
);
477 device_name
[len
] = '\0';
479 bs
= bdrv_find(device_name
);
481 buf
= qemu_malloc(BLOCK_SIZE
);
483 qemu_get_buffer(f
, buf
, BLOCK_SIZE
);
485 bdrv_write(bs
, addr
, buf
, BDRV_SECTORS_PER_DIRTY_CHUNK
);
487 printf("Error unknown block device %s\n", device_name
);
488 /* FIXME: add error handling */
492 } else if (!(flags
& BLK_MIG_FLAG_EOS
)) {
493 printf("Unknown flags\n");
494 /* FIXME: add error handling */
496 } while (!(flags
& BLK_MIG_FLAG_EOS
));
501 static void block_set_params(int blk_enable
, int shared_base
, void *opaque
)
503 block_mig_state
.blk_enable
= blk_enable
;
504 block_mig_state
.shared_base
= shared_base
;
506 /* shared base means that blk_enable = 1 */
507 block_mig_state
.blk_enable
|= shared_base
;
510 void blk_mig_info(void)
512 BlockDriverState
*bs
;
514 for (bs
= bdrv_first
; bs
!= NULL
; bs
= bs
->next
) {
515 printf("Device %s\n", bs
->device_name
);
516 if (bs
->type
== BDRV_TYPE_HD
) {
517 printf("device %s format %s\n",
518 bs
->device_name
, bs
->drv
->format_name
);
523 void blk_mig_init(void)
525 register_savevm_live("block", 0, 1, block_set_params
, block_save_live
,
526 NULL
, block_load
, &block_mig_state
);