]> git.proxmox.com Git - qemu.git/blame - block-migration.c
block migration: Switch device and block lists to QSIMPLEQ
[qemu.git] / block-migration.c
CommitLineData
c163b5ca 1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "qemu-common.h"
15#include "block_int.h"
16#include "hw/hw.h"
5e5328be 17#include "qemu-queue.h"
c163b5ca 18#include "block-migration.h"
19#include <assert.h>
c163b5ca 20
6ea44308 21#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
c163b5ca 22
23#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
24#define BLK_MIG_FLAG_EOS 0x02
25
26#define MAX_IS_ALLOCATED_SEARCH 65536
27#define MAX_BLOCKS_READ 10000
28#define BLOCKS_READ_CHANGE 100
29#define INITIAL_BLOCKS_READ 100
30
31//#define DEBUG_BLK_MIGRATION
32
33#ifdef DEBUG_BLK_MIGRATION
a55eb92c 34#define dprintf(fmt, ...) \
c163b5ca 35 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
36#else
a55eb92c 37#define dprintf(fmt, ...) \
c163b5ca 38 do { } while (0)
39#endif
40
a55eb92c
JK
41typedef struct BlkMigDevState {
42 BlockDriverState *bs;
43 int bulk_completed;
44 int shared_base;
a55eb92c
JK
45 int64_t cur_sector;
46 int64_t total_sectors;
47 int64_t dirty;
5e5328be 48 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
a55eb92c
JK
49} BlkMigDevState;
50
c163b5ca 51typedef struct BlkMigBlock {
52 uint8_t *buf;
53 BlkMigDevState *bmds;
54 int64_t sector;
55 struct iovec iov;
56 QEMUIOVector qiov;
57 BlockDriverAIOCB *aiocb;
58 int ret;
5e5328be 59 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca 60} BlkMigBlock;
61
62typedef struct BlkMigState {
c163b5ca 63 int blk_enable;
64 int shared_base;
5e5328be
JK
65 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
66 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca 67 int submitted;
68 int read_done;
69 int transferred;
70 int64_t print_completion;
71} BlkMigState;
72
d11ecd3d 73static BlkMigState block_mig_state;
c163b5ca 74
75static void blk_mig_read_cb(void *opaque, int ret)
76{
77 BlkMigBlock *blk = opaque;
a55eb92c 78
c163b5ca 79 blk->ret = ret;
a55eb92c 80
5e5328be 81 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
a55eb92c 82
d11ecd3d
JK
83 block_mig_state.submitted--;
84 block_mig_state.read_done++;
85 assert(block_mig_state.submitted >= 0);
c163b5ca 86}
87
88static int mig_read_device_bulk(QEMUFile *f, BlkMigDevState *bms)
a55eb92c 89{
c163b5ca 90 int nr_sectors;
91 int64_t total_sectors, cur_sector = 0;
92 BlockDriverState *bs = bms->bs;
93 BlkMigBlock *blk;
a55eb92c 94
c163b5ca 95 blk = qemu_malloc(sizeof(BlkMigBlock));
96 blk->buf = qemu_malloc(BLOCK_SIZE);
a55eb92c 97
c163b5ca 98 cur_sector = bms->cur_sector;
6ea44308 99 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
a55eb92c
JK
100
101 if (bms->shared_base) {
102 while (cur_sector < bms->total_sectors &&
103 !bdrv_is_allocated(bms->bs, cur_sector,
104 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca 105 cur_sector += nr_sectors;
106 }
107 }
a55eb92c
JK
108
109 if (cur_sector >= total_sectors) {
c163b5ca 110 bms->cur_sector = total_sectors;
111 qemu_free(blk->buf);
112 qemu_free(blk);
113 return 1;
114 }
a55eb92c 115
d11ecd3d 116 if (cur_sector >= block_mig_state.print_completion) {
c163b5ca 117 printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
118 fflush(stdout);
d11ecd3d 119 block_mig_state.print_completion +=
6ea44308 120 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
c163b5ca 121 }
a55eb92c 122
6ea44308
JK
123 /* we are going to transfer a full block even if it is not allocated */
124 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 125
6ea44308 126 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
a55eb92c 127
6ea44308 128 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
c163b5ca 129 nr_sectors = (total_sectors - cur_sector);
130 }
a55eb92c 131
c163b5ca 132 bms->cur_sector = cur_sector + nr_sectors;
133 blk->sector = cur_sector;
134 blk->bmds = bms;
a55eb92c 135
c163b5ca 136 blk->iov.iov_base = blk->buf;
6ea44308 137 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
c163b5ca 138 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 139
c163b5ca 140 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
141 nr_sectors, blk_mig_read_cb, blk);
a55eb92c
JK
142
143 if (!blk->aiocb) {
c163b5ca 144 printf("Error reading sector %" PRId64 "\n", cur_sector);
145 qemu_free(blk->buf);
146 qemu_free(blk);
147 return 0;
148 }
149
150 bdrv_reset_dirty(bms->bs, cur_sector, nr_sectors);
d11ecd3d 151 block_mig_state.submitted++;
a55eb92c 152
c163b5ca 153 return (bms->cur_sector >= total_sectors);
154}
155
156static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 157{
c163b5ca 158 int len, nr_sectors;
159 int64_t total_sectors = bmds->total_sectors, cur_sector = 0;
160 uint8_t *tmp_buf = NULL;
161 BlockDriverState *bs = bmds->bs;
162
163 tmp_buf = qemu_malloc(BLOCK_SIZE);
a55eb92c 164
c163b5ca 165 cur_sector = bmds->cur_sector;
a55eb92c
JK
166
167 if (bmds->shared_base) {
168 while (cur_sector < bmds->total_sectors &&
169 !bdrv_is_allocated(bmds->bs, cur_sector,
170 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca 171 cur_sector += nr_sectors;
172 }
173 }
a55eb92c
JK
174
175 if (cur_sector >= total_sectors) {
c163b5ca 176 bmds->cur_sector = total_sectors;
177 qemu_free(tmp_buf);
178 return 1;
179 }
a55eb92c 180
d11ecd3d 181 if (cur_sector >= block_mig_state.print_completion) {
c163b5ca 182 printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
183 fflush(stdout);
d11ecd3d 184 block_mig_state.print_completion +=
6ea44308 185 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
c163b5ca 186 }
a55eb92c 187
6ea44308 188 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
a55eb92c 189
6ea44308
JK
190 /* we are going to transfer a full block even if it is not allocated */
191 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
a55eb92c 192
6ea44308 193 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
c163b5ca 194 nr_sectors = (total_sectors - cur_sector);
195 }
a55eb92c
JK
196
197 if (bdrv_read(bs, cur_sector, tmp_buf, nr_sectors) < 0) {
c163b5ca 198 printf("Error reading sector %" PRId64 "\n", cur_sector);
199 }
200
201 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
a55eb92c
JK
202
203 /* sector number and flags */
6ea44308
JK
204 qemu_put_be64(f, (cur_sector << BDRV_SECTOR_BITS)
205 | BLK_MIG_FLAG_DEVICE_BLOCK);
a55eb92c
JK
206
207 /* device name */
c163b5ca 208 len = strlen(bs->device_name);
209 qemu_put_byte(f, len);
210 qemu_put_buffer(f, (uint8_t *)bs->device_name, len);
a55eb92c
JK
211
212 qemu_put_buffer(f, tmp_buf, BLOCK_SIZE);
213
6ea44308 214 bmds->cur_sector = cur_sector + BDRV_SECTORS_PER_DIRTY_CHUNK;
a55eb92c 215
c163b5ca 216 qemu_free(tmp_buf);
a55eb92c 217
c163b5ca 218 return (bmds->cur_sector >= total_sectors);
219}
220
221static void send_blk(QEMUFile *f, BlkMigBlock * blk)
222{
223 int len;
a55eb92c
JK
224
225 /* sector number and flags */
6ea44308
JK
226 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
227 | BLK_MIG_FLAG_DEVICE_BLOCK);
a55eb92c
JK
228
229 /* device name */
c163b5ca 230 len = strlen(blk->bmds->bs->device_name);
231 qemu_put_byte(f, len);
232 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
a55eb92c
JK
233
234 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
c163b5ca 235}
236
c163b5ca 237static void set_dirty_tracking(int enable)
238{
239 BlkMigDevState *bmds;
5e5328be
JK
240
241 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 242 bdrv_set_dirty_tracking(bmds->bs, enable);
c163b5ca 243 }
c163b5ca 244}
245
246static void init_blk_migration(QEMUFile *f)
247{
5e5328be 248 BlkMigDevState *bmds;
c163b5ca 249 BlockDriverState *bs;
a55eb92c 250
c163b5ca 251 for (bs = bdrv_first; bs != NULL; bs = bs->next) {
a55eb92c 252 if (bs->type == BDRV_TYPE_HD) {
c163b5ca 253 bmds = qemu_mallocz(sizeof(BlkMigDevState));
254 bmds->bs = bs;
255 bmds->bulk_completed = 0;
6ea44308 256 bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
d11ecd3d 257 bmds->shared_base = block_mig_state.shared_base;
a55eb92c
JK
258
259 if (bmds->shared_base) {
260 printf("Start migration for %s with shared base image\n",
c163b5ca 261 bs->device_name);
262 } else {
263 printf("Start full migration for %s\n", bs->device_name);
264 }
a55eb92c 265
5e5328be 266 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
c163b5ca 267 }
a55eb92c 268 }
c163b5ca 269}
270
271static int blk_mig_save_bulked_block(QEMUFile *f, int is_async)
272{
273 BlkMigDevState *bmds;
274
5e5328be 275 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c
JK
276 if (bmds->bulk_completed == 0) {
277 if (is_async) {
278 if (mig_read_device_bulk(f, bmds) == 1) {
c163b5ca 279 /* completed bulk section for this device */
280 bmds->bulk_completed = 1;
281 }
282 } else {
a55eb92c 283 if (mig_save_device_bulk(f, bmds) == 1) {
c163b5ca 284 /* completed bulk section for this device */
285 bmds->bulk_completed = 1;
286 }
287 }
288 return 1;
289 }
290 }
a55eb92c 291
c163b5ca 292 /* we reached here means bulk is completed */
c163b5ca 293 return 0;
c163b5ca 294}
295
296#define MAX_NUM_BLOCKS 4
297
298static void blk_mig_save_dirty_blocks(QEMUFile *f)
299{
300 BlkMigDevState *bmds;
575a58d7 301 uint8_t *buf;
c163b5ca 302 int64_t sector;
303 int len;
a55eb92c 304
575a58d7
JK
305 buf = qemu_malloc(BLOCK_SIZE);
306
5e5328be 307 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c
JK
308 for (sector = 0; sector < bmds->cur_sector;) {
309 if (bdrv_get_dirty(bmds->bs, sector)) {
310 if (bdrv_read(bmds->bs, sector, buf,
6ea44308 311 BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
a55eb92c 312 /* FIXME: add error handling */
c163b5ca 313 }
a55eb92c
JK
314
315 /* sector number and flags */
6ea44308 316 qemu_put_be64(f, (sector << BDRV_SECTOR_BITS)
a55eb92c
JK
317 | BLK_MIG_FLAG_DEVICE_BLOCK);
318
c163b5ca 319 /* device name */
c163b5ca 320 len = strlen(bmds->bs->device_name);
c163b5ca 321 qemu_put_byte(f, len);
322 qemu_put_buffer(f, (uint8_t *)bmds->bs->device_name, len);
a55eb92c 323
6ea44308 324 qemu_put_buffer(f, buf, BLOCK_SIZE);
a55eb92c
JK
325
326 bdrv_reset_dirty(bmds->bs, sector,
6ea44308 327 BDRV_SECTORS_PER_DIRTY_CHUNK);
a55eb92c 328 }
6ea44308 329 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 330 }
331 }
575a58d7
JK
332
333 qemu_free(buf);
c163b5ca 334}
335
336static void flush_blks(QEMUFile* f)
337{
5e5328be 338 BlkMigBlock *blk;
a55eb92c 339
d11ecd3d
JK
340 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
341 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
342 block_mig_state.transferred);
a55eb92c 343
5e5328be
JK
344 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
345 if (qemu_file_rate_limit(f)) {
346 break;
347 }
c163b5ca 348 send_blk(f, blk);
a55eb92c 349
5e5328be 350 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
c163b5ca 351 qemu_free(blk->buf);
352 qemu_free(blk);
a55eb92c 353
d11ecd3d
JK
354 block_mig_state.read_done--;
355 block_mig_state.transferred++;
356 assert(block_mig_state.read_done >= 0);
c163b5ca 357 }
c163b5ca 358
d11ecd3d
JK
359 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
360 block_mig_state.submitted, block_mig_state.read_done,
361 block_mig_state.transferred);
c163b5ca 362}
363
364static int is_stage2_completed(void)
365{
366 BlkMigDevState *bmds;
a55eb92c 367
d11ecd3d 368 if (block_mig_state.submitted > 0) {
c163b5ca 369 return 0;
370 }
a55eb92c 371
5e5328be 372 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 373 if (bmds->bulk_completed == 0) {
c163b5ca 374 return 0;
375 }
376 }
a55eb92c 377
c163b5ca 378 return 1;
379}
380
381static int block_save_live(QEMUFile *f, int stage, void *opaque)
382{
d11ecd3d
JK
383 dprintf("Enter save live stage %d submitted %d transferred %d\n",
384 stage, block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 385
d11ecd3d 386 if (block_mig_state.blk_enable != 1) {
c163b5ca 387 /* no need to migrate storage */
a55eb92c 388 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
c163b5ca 389 return 1;
390 }
a55eb92c
JK
391
392 if (stage == 1) {
c163b5ca 393 init_blk_migration(f);
a55eb92c 394
c163b5ca 395 /* start track dirty blocks */
396 set_dirty_tracking(1);
c163b5ca 397 }
398
399 flush_blks(f);
a55eb92c 400
c163b5ca 401 /* control the rate of transfer */
d11ecd3d
JK
402 while ((block_mig_state.submitted +
403 block_mig_state.read_done) * BLOCK_SIZE <
a55eb92c
JK
404 qemu_file_get_rate_limit(f)) {
405 if (blk_mig_save_bulked_block(f, 1) == 0) {
406 /* no more bulk blocks for now */
c163b5ca 407 break;
a55eb92c 408 }
c163b5ca 409 }
a55eb92c 410
c163b5ca 411 flush_blks(f);
a55eb92c
JK
412
413 if (stage == 3) {
414 while (blk_mig_save_bulked_block(f, 0) != 0) {
415 /* empty */
416 }
417
c163b5ca 418 blk_mig_save_dirty_blocks(f);
a55eb92c 419
c163b5ca 420 /* stop track dirty blocks */
a55eb92c
JK
421 set_dirty_tracking(0);
422
423 printf("\nBlock migration completed\n");
c163b5ca 424 }
a55eb92c
JK
425
426 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
427
c163b5ca 428 return ((stage == 2) && is_stage2_completed());
429}
430
431static int block_load(QEMUFile *f, void *opaque, int version_id)
432{
433 int len, flags;
434 char device_name[256];
435 int64_t addr;
436 BlockDriverState *bs;
437 uint8_t *buf;
a55eb92c 438
c163b5ca 439 do {
c163b5ca 440 addr = qemu_get_be64(f);
a55eb92c 441
6ea44308
JK
442 flags = addr & ~BDRV_SECTOR_MASK;
443 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
444
445 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca 446 /* get device name */
447 len = qemu_get_byte(f);
a55eb92c 448
c163b5ca 449 qemu_get_buffer(f, (uint8_t *)device_name, len);
450 device_name[len] = '\0';
a55eb92c 451
c163b5ca 452 bs = bdrv_find(device_name);
a55eb92c 453
575a58d7
JK
454 buf = qemu_malloc(BLOCK_SIZE);
455
a55eb92c
JK
456 qemu_get_buffer(f, buf, BLOCK_SIZE);
457 if (bs != NULL) {
6ea44308 458 bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
c163b5ca 459 } else {
460 printf("Error unknown block device %s\n", device_name);
a55eb92c 461 /* FIXME: add error handling */
c163b5ca 462 }
575a58d7
JK
463
464 qemu_free(buf);
a55eb92c 465 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
c163b5ca 466 printf("Unknown flags\n");
a55eb92c 467 /* FIXME: add error handling */
c163b5ca 468 }
a55eb92c
JK
469 } while (!(flags & BLK_MIG_FLAG_EOS));
470
c163b5ca 471 return 0;
472}
473
474static void block_set_params(int blk_enable, int shared_base, void *opaque)
475{
d11ecd3d
JK
476 block_mig_state.blk_enable = blk_enable;
477 block_mig_state.shared_base = shared_base;
a55eb92c 478
c163b5ca 479 /* shared base means that blk_enable = 1 */
d11ecd3d 480 block_mig_state.blk_enable |= shared_base;
c163b5ca 481}
482
c163b5ca 483void blk_mig_init(void)
a55eb92c 484{
5e5328be
JK
485 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
486 QSIMPLEQ_INIT(&block_mig_state.blk_list);
487
a55eb92c 488 register_savevm_live("block", 0, 1, block_set_params, block_save_live,
d11ecd3d 489 NULL, block_load, &block_mig_state);
c163b5ca 490}