]> git.proxmox.com Git - qemu.git/blame - block-migration.c
Import a simple queue implementation from NetBSD
[qemu.git] / block-migration.c
CommitLineData
c163b5ca 1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "qemu-common.h"
15#include "block_int.h"
16#include "hw/hw.h"
17#include "block-migration.h"
18#include <assert.h>
c163b5ca 19
6ea44308 20#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
c163b5ca 21
22#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
23#define BLK_MIG_FLAG_EOS 0x02
24
25#define MAX_IS_ALLOCATED_SEARCH 65536
26#define MAX_BLOCKS_READ 10000
27#define BLOCKS_READ_CHANGE 100
28#define INITIAL_BLOCKS_READ 100
29
30//#define DEBUG_BLK_MIGRATION
31
32#ifdef DEBUG_BLK_MIGRATION
a55eb92c 33#define dprintf(fmt, ...) \
c163b5ca 34 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
35#else
a55eb92c 36#define dprintf(fmt, ...) \
c163b5ca 37 do { } while (0)
38#endif
39
a55eb92c
JK
40typedef struct BlkMigDevState {
41 BlockDriverState *bs;
42 int bulk_completed;
43 int shared_base;
44 struct BlkMigDevState *next;
45 int64_t cur_sector;
46 int64_t total_sectors;
47 int64_t dirty;
48} BlkMigDevState;
49
c163b5ca 50typedef struct BlkMigBlock {
51 uint8_t *buf;
52 BlkMigDevState *bmds;
53 int64_t sector;
54 struct iovec iov;
55 QEMUIOVector qiov;
56 BlockDriverAIOCB *aiocb;
57 int ret;
58 struct BlkMigBlock *next;
59} BlkMigBlock;
60
61typedef struct BlkMigState {
c163b5ca 62 int blk_enable;
63 int shared_base;
c163b5ca 64 BlkMigDevState *bmds_first;
c163b5ca 65 BlkMigBlock *first_blk;
66 BlkMigBlock *last_blk;
67 int submitted;
68 int read_done;
69 int transferred;
70 int64_t print_completion;
71} BlkMigState;
72
d11ecd3d 73static BlkMigState block_mig_state;
c163b5ca 74
75static void blk_mig_read_cb(void *opaque, int ret)
76{
77 BlkMigBlock *blk = opaque;
a55eb92c 78
c163b5ca 79 blk->ret = ret;
a55eb92c 80
c163b5ca 81 /* insert at the end */
d11ecd3d
JK
82 if (block_mig_state.last_blk == NULL) {
83 block_mig_state.first_blk = blk;
84 block_mig_state.last_blk = blk;
c163b5ca 85 } else {
d11ecd3d
JK
86 block_mig_state.last_blk->next = blk;
87 block_mig_state.last_blk = blk;
c163b5ca 88 }
a55eb92c 89
d11ecd3d
JK
90 block_mig_state.submitted--;
91 block_mig_state.read_done++;
92 assert(block_mig_state.submitted >= 0);
c163b5ca 93}
94
95static int mig_read_device_bulk(QEMUFile *f, BlkMigDevState *bms)
a55eb92c 96{
c163b5ca 97 int nr_sectors;
98 int64_t total_sectors, cur_sector = 0;
99 BlockDriverState *bs = bms->bs;
100 BlkMigBlock *blk;
a55eb92c 101
c163b5ca 102 blk = qemu_malloc(sizeof(BlkMigBlock));
103 blk->buf = qemu_malloc(BLOCK_SIZE);
a55eb92c 104
c163b5ca 105 cur_sector = bms->cur_sector;
6ea44308 106 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
a55eb92c
JK
107
108 if (bms->shared_base) {
109 while (cur_sector < bms->total_sectors &&
110 !bdrv_is_allocated(bms->bs, cur_sector,
111 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca 112 cur_sector += nr_sectors;
113 }
114 }
a55eb92c
JK
115
116 if (cur_sector >= total_sectors) {
c163b5ca 117 bms->cur_sector = total_sectors;
118 qemu_free(blk->buf);
119 qemu_free(blk);
120 return 1;
121 }
a55eb92c 122
d11ecd3d 123 if (cur_sector >= block_mig_state.print_completion) {
c163b5ca 124 printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
125 fflush(stdout);
d11ecd3d 126 block_mig_state.print_completion +=
6ea44308 127 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
c163b5ca 128 }
a55eb92c 129
6ea44308
JK
130 /* we are going to transfer a full block even if it is not allocated */
131 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 132
6ea44308 133 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
a55eb92c 134
6ea44308 135 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
c163b5ca 136 nr_sectors = (total_sectors - cur_sector);
137 }
a55eb92c 138
c163b5ca 139 bms->cur_sector = cur_sector + nr_sectors;
140 blk->sector = cur_sector;
141 blk->bmds = bms;
142 blk->next = NULL;
a55eb92c 143
c163b5ca 144 blk->iov.iov_base = blk->buf;
6ea44308 145 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
c163b5ca 146 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 147
c163b5ca 148 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
149 nr_sectors, blk_mig_read_cb, blk);
a55eb92c
JK
150
151 if (!blk->aiocb) {
c163b5ca 152 printf("Error reading sector %" PRId64 "\n", cur_sector);
153 qemu_free(blk->buf);
154 qemu_free(blk);
155 return 0;
156 }
157
158 bdrv_reset_dirty(bms->bs, cur_sector, nr_sectors);
d11ecd3d 159 block_mig_state.submitted++;
a55eb92c 160
c163b5ca 161 return (bms->cur_sector >= total_sectors);
162}
163
164static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 165{
c163b5ca 166 int len, nr_sectors;
167 int64_t total_sectors = bmds->total_sectors, cur_sector = 0;
168 uint8_t *tmp_buf = NULL;
169 BlockDriverState *bs = bmds->bs;
170
171 tmp_buf = qemu_malloc(BLOCK_SIZE);
a55eb92c 172
c163b5ca 173 cur_sector = bmds->cur_sector;
a55eb92c
JK
174
175 if (bmds->shared_base) {
176 while (cur_sector < bmds->total_sectors &&
177 !bdrv_is_allocated(bmds->bs, cur_sector,
178 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca 179 cur_sector += nr_sectors;
180 }
181 }
a55eb92c
JK
182
183 if (cur_sector >= total_sectors) {
c163b5ca 184 bmds->cur_sector = total_sectors;
185 qemu_free(tmp_buf);
186 return 1;
187 }
a55eb92c 188
d11ecd3d 189 if (cur_sector >= block_mig_state.print_completion) {
c163b5ca 190 printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
191 fflush(stdout);
d11ecd3d 192 block_mig_state.print_completion +=
6ea44308 193 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
c163b5ca 194 }
a55eb92c 195
6ea44308 196 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
a55eb92c 197
6ea44308
JK
198 /* we are going to transfer a full block even if it is not allocated */
199 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
a55eb92c 200
6ea44308 201 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
c163b5ca 202 nr_sectors = (total_sectors - cur_sector);
203 }
a55eb92c
JK
204
205 if (bdrv_read(bs, cur_sector, tmp_buf, nr_sectors) < 0) {
c163b5ca 206 printf("Error reading sector %" PRId64 "\n", cur_sector);
207 }
208
209 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
a55eb92c
JK
210
211 /* sector number and flags */
6ea44308
JK
212 qemu_put_be64(f, (cur_sector << BDRV_SECTOR_BITS)
213 | BLK_MIG_FLAG_DEVICE_BLOCK);
a55eb92c
JK
214
215 /* device name */
c163b5ca 216 len = strlen(bs->device_name);
217 qemu_put_byte(f, len);
218 qemu_put_buffer(f, (uint8_t *)bs->device_name, len);
a55eb92c
JK
219
220 qemu_put_buffer(f, tmp_buf, BLOCK_SIZE);
221
6ea44308 222 bmds->cur_sector = cur_sector + BDRV_SECTORS_PER_DIRTY_CHUNK;
a55eb92c 223
c163b5ca 224 qemu_free(tmp_buf);
a55eb92c 225
c163b5ca 226 return (bmds->cur_sector >= total_sectors);
227}
228
229static void send_blk(QEMUFile *f, BlkMigBlock * blk)
230{
231 int len;
a55eb92c
JK
232
233 /* sector number and flags */
6ea44308
JK
234 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
235 | BLK_MIG_FLAG_DEVICE_BLOCK);
a55eb92c
JK
236
237 /* device name */
c163b5ca 238 len = strlen(blk->bmds->bs->device_name);
239 qemu_put_byte(f, len);
240 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
a55eb92c
JK
241
242 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
c163b5ca 243}
244
c163b5ca 245static void set_dirty_tracking(int enable)
246{
247 BlkMigDevState *bmds;
d11ecd3d 248 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
a55eb92c 249 bdrv_set_dirty_tracking(bmds->bs, enable);
c163b5ca 250 }
c163b5ca 251}
252
253static void init_blk_migration(QEMUFile *f)
254{
255 BlkMigDevState **pbmds, *bmds;
256 BlockDriverState *bs;
a55eb92c 257
c163b5ca 258 for (bs = bdrv_first; bs != NULL; bs = bs->next) {
a55eb92c 259 if (bs->type == BDRV_TYPE_HD) {
c163b5ca 260 bmds = qemu_mallocz(sizeof(BlkMigDevState));
261 bmds->bs = bs;
262 bmds->bulk_completed = 0;
6ea44308 263 bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
d11ecd3d 264 bmds->shared_base = block_mig_state.shared_base;
a55eb92c
JK
265
266 if (bmds->shared_base) {
267 printf("Start migration for %s with shared base image\n",
c163b5ca 268 bs->device_name);
269 } else {
270 printf("Start full migration for %s\n", bs->device_name);
271 }
a55eb92c 272
c163b5ca 273 /* insert at the end */
d11ecd3d 274 pbmds = &block_mig_state.bmds_first;
a55eb92c 275 while (*pbmds != NULL) {
c163b5ca 276 pbmds = &(*pbmds)->next;
a55eb92c 277 }
c163b5ca 278 *pbmds = bmds;
c163b5ca 279 }
a55eb92c 280 }
c163b5ca 281}
282
283static int blk_mig_save_bulked_block(QEMUFile *f, int is_async)
284{
285 BlkMigDevState *bmds;
286
d11ecd3d 287 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
a55eb92c
JK
288 if (bmds->bulk_completed == 0) {
289 if (is_async) {
290 if (mig_read_device_bulk(f, bmds) == 1) {
c163b5ca 291 /* completed bulk section for this device */
292 bmds->bulk_completed = 1;
293 }
294 } else {
a55eb92c 295 if (mig_save_device_bulk(f, bmds) == 1) {
c163b5ca 296 /* completed bulk section for this device */
297 bmds->bulk_completed = 1;
298 }
299 }
300 return 1;
301 }
302 }
a55eb92c 303
c163b5ca 304 /* we reached here means bulk is completed */
c163b5ca 305 return 0;
c163b5ca 306}
307
308#define MAX_NUM_BLOCKS 4
309
310static void blk_mig_save_dirty_blocks(QEMUFile *f)
311{
312 BlkMigDevState *bmds;
575a58d7 313 uint8_t *buf;
c163b5ca 314 int64_t sector;
315 int len;
a55eb92c 316
575a58d7
JK
317 buf = qemu_malloc(BLOCK_SIZE);
318
d11ecd3d 319 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
a55eb92c
JK
320 for (sector = 0; sector < bmds->cur_sector;) {
321 if (bdrv_get_dirty(bmds->bs, sector)) {
322 if (bdrv_read(bmds->bs, sector, buf,
6ea44308 323 BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
a55eb92c 324 /* FIXME: add error handling */
c163b5ca 325 }
a55eb92c
JK
326
327 /* sector number and flags */
6ea44308 328 qemu_put_be64(f, (sector << BDRV_SECTOR_BITS)
a55eb92c
JK
329 | BLK_MIG_FLAG_DEVICE_BLOCK);
330
c163b5ca 331 /* device name */
c163b5ca 332 len = strlen(bmds->bs->device_name);
c163b5ca 333 qemu_put_byte(f, len);
334 qemu_put_buffer(f, (uint8_t *)bmds->bs->device_name, len);
a55eb92c 335
6ea44308 336 qemu_put_buffer(f, buf, BLOCK_SIZE);
a55eb92c
JK
337
338 bdrv_reset_dirty(bmds->bs, sector,
6ea44308 339 BDRV_SECTORS_PER_DIRTY_CHUNK);
a55eb92c 340 }
6ea44308 341 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 342 }
343 }
575a58d7
JK
344
345 qemu_free(buf);
c163b5ca 346}
347
348static void flush_blks(QEMUFile* f)
349{
a55eb92c
JK
350 BlkMigBlock *blk, *next;
351
d11ecd3d
JK
352 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
353 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
354 block_mig_state.transferred);
a55eb92c 355
d11ecd3d 356 for (blk = block_mig_state.first_blk;
a55eb92c
JK
357 blk != NULL && !qemu_file_rate_limit(f);
358 blk = next) {
c163b5ca 359 send_blk(f, blk);
a55eb92c
JK
360
361 next = blk->next;
c163b5ca 362 qemu_free(blk->buf);
363 qemu_free(blk);
a55eb92c 364
d11ecd3d
JK
365 block_mig_state.read_done--;
366 block_mig_state.transferred++;
367 assert(block_mig_state.read_done >= 0);
c163b5ca 368 }
d11ecd3d 369 block_mig_state.first_blk = blk;
a55eb92c 370
d11ecd3d
JK
371 if (block_mig_state.first_blk == NULL) {
372 block_mig_state.last_blk = NULL;
c163b5ca 373 }
374
d11ecd3d
JK
375 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
376 block_mig_state.submitted, block_mig_state.read_done,
377 block_mig_state.transferred);
c163b5ca 378}
379
380static int is_stage2_completed(void)
381{
382 BlkMigDevState *bmds;
a55eb92c 383
d11ecd3d 384 if (block_mig_state.submitted > 0) {
c163b5ca 385 return 0;
386 }
a55eb92c 387
d11ecd3d 388 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
a55eb92c 389 if (bmds->bulk_completed == 0) {
c163b5ca 390 return 0;
391 }
392 }
a55eb92c 393
c163b5ca 394 return 1;
395}
396
397static int block_save_live(QEMUFile *f, int stage, void *opaque)
398{
d11ecd3d
JK
399 dprintf("Enter save live stage %d submitted %d transferred %d\n",
400 stage, block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 401
d11ecd3d 402 if (block_mig_state.blk_enable != 1) {
c163b5ca 403 /* no need to migrate storage */
a55eb92c 404 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
c163b5ca 405 return 1;
406 }
a55eb92c
JK
407
408 if (stage == 1) {
c163b5ca 409 init_blk_migration(f);
a55eb92c 410
c163b5ca 411 /* start track dirty blocks */
412 set_dirty_tracking(1);
c163b5ca 413 }
414
415 flush_blks(f);
a55eb92c 416
c163b5ca 417 /* control the rate of transfer */
d11ecd3d
JK
418 while ((block_mig_state.submitted +
419 block_mig_state.read_done) * BLOCK_SIZE <
a55eb92c
JK
420 qemu_file_get_rate_limit(f)) {
421 if (blk_mig_save_bulked_block(f, 1) == 0) {
422 /* no more bulk blocks for now */
c163b5ca 423 break;
a55eb92c 424 }
c163b5ca 425 }
a55eb92c 426
c163b5ca 427 flush_blks(f);
a55eb92c
JK
428
429 if (stage == 3) {
430 while (blk_mig_save_bulked_block(f, 0) != 0) {
431 /* empty */
432 }
433
c163b5ca 434 blk_mig_save_dirty_blocks(f);
a55eb92c 435
c163b5ca 436 /* stop track dirty blocks */
a55eb92c
JK
437 set_dirty_tracking(0);
438
439 printf("\nBlock migration completed\n");
c163b5ca 440 }
a55eb92c
JK
441
442 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
443
c163b5ca 444 return ((stage == 2) && is_stage2_completed());
445}
446
447static int block_load(QEMUFile *f, void *opaque, int version_id)
448{
449 int len, flags;
450 char device_name[256];
451 int64_t addr;
452 BlockDriverState *bs;
453 uint8_t *buf;
a55eb92c 454
c163b5ca 455 do {
c163b5ca 456 addr = qemu_get_be64(f);
a55eb92c 457
6ea44308
JK
458 flags = addr & ~BDRV_SECTOR_MASK;
459 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
460
461 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca 462 /* get device name */
463 len = qemu_get_byte(f);
a55eb92c 464
c163b5ca 465 qemu_get_buffer(f, (uint8_t *)device_name, len);
466 device_name[len] = '\0';
a55eb92c 467
c163b5ca 468 bs = bdrv_find(device_name);
a55eb92c 469
575a58d7
JK
470 buf = qemu_malloc(BLOCK_SIZE);
471
a55eb92c
JK
472 qemu_get_buffer(f, buf, BLOCK_SIZE);
473 if (bs != NULL) {
6ea44308 474 bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
c163b5ca 475 } else {
476 printf("Error unknown block device %s\n", device_name);
a55eb92c 477 /* FIXME: add error handling */
c163b5ca 478 }
575a58d7
JK
479
480 qemu_free(buf);
a55eb92c 481 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
c163b5ca 482 printf("Unknown flags\n");
a55eb92c 483 /* FIXME: add error handling */
c163b5ca 484 }
a55eb92c
JK
485 } while (!(flags & BLK_MIG_FLAG_EOS));
486
c163b5ca 487 return 0;
488}
489
490static void block_set_params(int blk_enable, int shared_base, void *opaque)
491{
d11ecd3d
JK
492 block_mig_state.blk_enable = blk_enable;
493 block_mig_state.shared_base = shared_base;
a55eb92c 494
c163b5ca 495 /* shared base means that blk_enable = 1 */
d11ecd3d 496 block_mig_state.blk_enable |= shared_base;
c163b5ca 497}
498
c163b5ca 499void blk_mig_init(void)
a55eb92c 500{
a55eb92c 501 register_savevm_live("block", 0, 1, block_set_params, block_save_live,
d11ecd3d 502 NULL, block_load, &block_mig_state);
c163b5ca 503}