]> git.proxmox.com Git - qemu.git/blame - block-migration.c
qemu-iotests: add 052 BDRV_O_SNAPSHOT test
[qemu.git] / block-migration.c
CommitLineData
c163b5ca 1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca 14 */
15
16#include "qemu-common.h"
737e150e 17#include "block/block_int.h"
c163b5ca 18#include "hw/hw.h"
1de7afc9
PB
19#include "qemu/queue.h"
20#include "qemu/timer.h"
caf71f86
PB
21#include "migration/block.h"
22#include "migration/migration.h"
9c17d615 23#include "sysemu/blockdev.h"
c163b5ca 24#include <assert.h>
c163b5ca 25
50717e94
PB
26#define BLOCK_SIZE (1 << 20)
27#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca 28
29#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
30#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 31#define BLK_MIG_FLAG_PROGRESS 0x04
c163b5ca 32
33#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca 34
35//#define DEBUG_BLK_MIGRATION
36
37#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 38#define DPRINTF(fmt, ...) \
c163b5ca 39 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
40#else
d0f2c4c6 41#define DPRINTF(fmt, ...) \
c163b5ca 42 do { } while (0)
43#endif
44
a55eb92c 45typedef struct BlkMigDevState {
323920c4 46 /* Written during setup phase. Can be read without a lock. */
a55eb92c 47 BlockDriverState *bs;
a55eb92c 48 int shared_base;
a55eb92c 49 int64_t total_sectors;
5e5328be 50 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
323920c4
PB
51
52 /* Only used by migration thread. Does not need a lock. */
53 int bulk_completed;
54 int64_t cur_sector;
55 int64_t cur_dirty;
56
52e850de 57 /* Protected by block migration lock. */
33656af7 58 unsigned long *aio_bitmap;
323920c4 59 int64_t completed_sectors;
a55eb92c
JK
60} BlkMigDevState;
61
c163b5ca 62typedef struct BlkMigBlock {
323920c4 63 /* Only used by migration thread. */
c163b5ca 64 uint8_t *buf;
65 BlkMigDevState *bmds;
66 int64_t sector;
33656af7 67 int nr_sectors;
c163b5ca 68 struct iovec iov;
69 QEMUIOVector qiov;
70 BlockDriverAIOCB *aiocb;
323920c4 71
52e850de 72 /* Protected by block migration lock. */
c163b5ca 73 int ret;
5e5328be 74 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca 75} BlkMigBlock;
76
77typedef struct BlkMigState {
323920c4 78 /* Written during setup phase. Can be read without a lock. */
c163b5ca 79 int blk_enable;
80 int shared_base;
5e5328be 81 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4
PB
82 int64_t total_sector_sum;
83
52e850de 84 /* Protected by lock. */
5e5328be 85 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca 86 int submitted;
87 int read_done;
323920c4
PB
88
89 /* Only used by migration thread. Does not need a lock. */
c163b5ca 90 int transferred;
01e61e2d 91 int prev_progress;
e970ec0b 92 int bulk_completed;
52e850de
PB
93
94 /* Lock must be taken _inside_ the iothread lock. */
95 QemuMutex lock;
c163b5ca 96} BlkMigState;
97
d11ecd3d 98static BlkMigState block_mig_state;
c163b5ca 99
52e850de
PB
100static void blk_mig_lock(void)
101{
102 qemu_mutex_lock(&block_mig_state.lock);
103}
104
105static void blk_mig_unlock(void)
106{
107 qemu_mutex_unlock(&block_mig_state.lock);
108}
109
32c835ba
PB
110/* Must run outside of the iothread lock during the bulk phase,
111 * or the VM will stall.
112 */
113
13f0b67f
JK
114static void blk_send(QEMUFile *f, BlkMigBlock * blk)
115{
116 int len;
117
118 /* sector number and flags */
119 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
120 | BLK_MIG_FLAG_DEVICE_BLOCK);
121
122 /* device name */
123 len = strlen(blk->bmds->bs->device_name);
124 qemu_put_byte(f, len);
125 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
126
127 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
128}
129
25f23643
JK
130int blk_mig_active(void)
131{
132 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
133}
134
135uint64_t blk_mig_bytes_transferred(void)
136{
137 BlkMigDevState *bmds;
138 uint64_t sum = 0;
139
52e850de 140 blk_mig_lock();
25f23643
JK
141 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
142 sum += bmds->completed_sectors;
143 }
52e850de 144 blk_mig_unlock();
25f23643
JK
145 return sum << BDRV_SECTOR_BITS;
146}
147
148uint64_t blk_mig_bytes_remaining(void)
149{
150 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
151}
152
153uint64_t blk_mig_bytes_total(void)
154{
155 BlkMigDevState *bmds;
156 uint64_t sum = 0;
157
158 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
159 sum += bmds->total_sectors;
160 }
161 return sum << BDRV_SECTOR_BITS;
162}
163
52e850de
PB
164
165/* Called with migration lock held. */
166
33656af7
MT
167static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
168{
169 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
170
62155e2b 171 if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
33656af7
MT
172 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
173 (1UL << (chunk % (sizeof(unsigned long) * 8))));
174 } else {
175 return 0;
176 }
177}
178
52e850de
PB
179/* Called with migration lock held. */
180
33656af7
MT
181static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
182 int nb_sectors, int set)
183{
184 int64_t start, end;
185 unsigned long val, idx, bit;
186
187 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
188 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
189
190 for (; start <= end; start++) {
191 idx = start / (sizeof(unsigned long) * 8);
192 bit = start % (sizeof(unsigned long) * 8);
193 val = bmds->aio_bitmap[idx];
194 if (set) {
62155e2b 195 val |= 1UL << bit;
33656af7 196 } else {
62155e2b 197 val &= ~(1UL << bit);
33656af7
MT
198 }
199 bmds->aio_bitmap[idx] = val;
200 }
201}
202
203static void alloc_aio_bitmap(BlkMigDevState *bmds)
204{
205 BlockDriverState *bs = bmds->bs;
206 int64_t bitmap_size;
207
208 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
209 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
210 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
211
7267c094 212 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
213}
214
52e850de
PB
215/* Never hold migration lock when yielding to the main loop! */
216
c163b5ca 217static void blk_mig_read_cb(void *opaque, int ret)
218{
219 BlkMigBlock *blk = opaque;
a55eb92c 220
52e850de 221 blk_mig_lock();
c163b5ca 222 blk->ret = ret;
a55eb92c 223
5e5328be 224 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 225 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 226
d11ecd3d
JK
227 block_mig_state.submitted--;
228 block_mig_state.read_done++;
229 assert(block_mig_state.submitted >= 0);
52e850de 230 blk_mig_unlock();
c163b5ca 231}
232
32c835ba
PB
233/* Called with no lock taken. */
234
539de124 235static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 236{
57cce12d
JK
237 int64_t total_sectors = bmds->total_sectors;
238 int64_t cur_sector = bmds->cur_sector;
239 BlockDriverState *bs = bmds->bs;
c163b5ca 240 BlkMigBlock *blk;
13f0b67f 241 int nr_sectors;
a55eb92c 242
57cce12d 243 if (bmds->shared_base) {
32c835ba 244 qemu_mutex_lock_iothread();
b1d10856 245 while (cur_sector < total_sectors &&
57cce12d
JK
246 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
247 &nr_sectors)) {
c163b5ca 248 cur_sector += nr_sectors;
249 }
32c835ba 250 qemu_mutex_unlock_iothread();
c163b5ca 251 }
a55eb92c
JK
252
253 if (cur_sector >= total_sectors) {
82801d8f 254 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca 255 return 1;
256 }
a55eb92c 257
82801d8f 258 bmds->completed_sectors = cur_sector;
a55eb92c 259
57cce12d
JK
260 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
261
6ea44308
JK
262 /* we are going to transfer a full block even if it is not allocated */
263 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 264
6ea44308 265 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 266 nr_sectors = total_sectors - cur_sector;
c163b5ca 267 }
a55eb92c 268
7267c094
AL
269 blk = g_malloc(sizeof(BlkMigBlock));
270 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
271 blk->bmds = bmds;
272 blk->sector = cur_sector;
33656af7 273 blk->nr_sectors = nr_sectors;
a55eb92c 274
e970ec0b
LS
275 blk->iov.iov_base = blk->buf;
276 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
277 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 278
52e850de 279 blk_mig_lock();
13197e3c 280 block_mig_state.submitted++;
52e850de 281 blk_mig_unlock();
13197e3c 282
32c835ba 283 qemu_mutex_lock_iothread();
e970ec0b
LS
284 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
285 nr_sectors, blk_mig_read_cb, blk);
d76cac7d 286
13f0b67f 287 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
32c835ba 288 qemu_mutex_unlock_iothread();
a55eb92c 289
32c835ba 290 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 291 return (bmds->cur_sector >= total_sectors);
c163b5ca 292}
293
32c835ba
PB
294/* Called with iothread lock taken. */
295
c163b5ca 296static void set_dirty_tracking(int enable)
297{
298 BlkMigDevState *bmds;
5e5328be
JK
299
300 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
50717e94 301 bdrv_set_dirty_tracking(bmds->bs, enable ? BLOCK_SIZE : 0);
c163b5ca 302 }
c163b5ca 303}
304
b66460e4 305static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
c163b5ca 306{
5e5328be 307 BlkMigDevState *bmds;
792773b2 308 int64_t sectors;
a55eb92c 309
d246673d 310 if (!bdrv_is_read_only(bs)) {
b66460e4 311 sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
31f54f24 312 if (sectors <= 0) {
b66460e4
SH
313 return;
314 }
315
7267c094 316 bmds = g_malloc0(sizeof(BlkMigDevState));
b66460e4
SH
317 bmds->bs = bs;
318 bmds->bulk_completed = 0;
319 bmds->total_sectors = sectors;
320 bmds->completed_sectors = 0;
321 bmds->shared_base = block_mig_state.shared_base;
33656af7 322 alloc_aio_bitmap(bmds);
f48905d4 323 drive_get_ref(drive_get_by_blockdev(bs));
8591675f 324 bdrv_set_in_use(bs, 1);
b66460e4
SH
325
326 block_mig_state.total_sector_sum += sectors;
327
328 if (bmds->shared_base) {
539de124
LC
329 DPRINTF("Start migration for %s with shared base image\n",
330 bs->device_name);
b66460e4 331 } else {
539de124 332 DPRINTF("Start full migration for %s\n", bs->device_name);
b66460e4
SH
333 }
334
335 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
336 }
337}
338
539de124 339static void init_blk_migration(QEMUFile *f)
b66460e4 340{
69d63a97
JK
341 block_mig_state.submitted = 0;
342 block_mig_state.read_done = 0;
343 block_mig_state.transferred = 0;
82801d8f 344 block_mig_state.total_sector_sum = 0;
01e61e2d 345 block_mig_state.prev_progress = -1;
e970ec0b 346 block_mig_state.bulk_completed = 0;
69d63a97 347
539de124 348 bdrv_iterate(init_blk_migration_it, NULL);
c163b5ca 349}
350
32c835ba
PB
351/* Called with no lock taken. */
352
539de124 353static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 354{
82801d8f 355 int64_t completed_sector_sum = 0;
c163b5ca 356 BlkMigDevState *bmds;
01e61e2d 357 int progress;
82801d8f 358 int ret = 0;
c163b5ca 359
5e5328be 360 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 361 if (bmds->bulk_completed == 0) {
539de124 362 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
363 /* completed bulk section for this device */
364 bmds->bulk_completed = 1;
c163b5ca 365 }
82801d8f
JK
366 completed_sector_sum += bmds->completed_sectors;
367 ret = 1;
368 break;
369 } else {
370 completed_sector_sum += bmds->completed_sectors;
c163b5ca 371 }
372 }
a55eb92c 373
8b6b2afc
PR
374 if (block_mig_state.total_sector_sum != 0) {
375 progress = completed_sector_sum * 100 /
376 block_mig_state.total_sector_sum;
377 } else {
378 progress = 100;
379 }
01e61e2d
JK
380 if (progress != block_mig_state.prev_progress) {
381 block_mig_state.prev_progress = progress;
382 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
383 | BLK_MIG_FLAG_PROGRESS);
539de124 384 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
385 }
386
387 return ret;
c163b5ca 388}
389
d76cac7d 390static void blk_mig_reset_dirty_cursor(void)
c163b5ca 391{
392 BlkMigDevState *bmds;
d76cac7d
LS
393
394 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
395 bmds->cur_dirty = 0;
396 }
397}
398
32c835ba
PB
399/* Called with iothread lock taken. */
400
539de124
LC
401static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
402 int is_async)
d76cac7d
LS
403{
404 BlkMigBlock *blk;
405 int64_t total_sectors = bmds->total_sectors;
c163b5ca 406 int64_t sector;
d76cac7d 407 int nr_sectors;
dcd1d224 408 int ret = -EIO;
a55eb92c 409
d76cac7d 410 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 411 blk_mig_lock();
62155e2b 412 if (bmds_aio_inflight(bmds, sector)) {
52e850de 413 blk_mig_unlock();
922453bc 414 bdrv_drain_all();
52e850de
PB
415 } else {
416 blk_mig_unlock();
62155e2b 417 }
d76cac7d 418 if (bdrv_get_dirty(bmds->bs, sector)) {
575a58d7 419
d76cac7d
LS
420 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
421 nr_sectors = total_sectors - sector;
422 } else {
423 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
424 }
7267c094
AL
425 blk = g_malloc(sizeof(BlkMigBlock));
426 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
427 blk->bmds = bmds;
428 blk->sector = sector;
33656af7 429 blk->nr_sectors = nr_sectors;
d76cac7d 430
889ae39c 431 if (is_async) {
d76cac7d
LS
432 blk->iov.iov_base = blk->buf;
433 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
434 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
435
436 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
437 nr_sectors, blk_mig_read_cb, blk);
52e850de
PB
438
439 blk_mig_lock();
d76cac7d 440 block_mig_state.submitted++;
33656af7 441 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 442 blk_mig_unlock();
d76cac7d 443 } else {
dcd1d224
JQ
444 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
445 if (ret < 0) {
d76cac7d 446 goto error;
c163b5ca 447 }
d76cac7d 448 blk_send(f, blk);
a55eb92c 449
7267c094
AL
450 g_free(blk->buf);
451 g_free(blk);
a55eb92c 452 }
d76cac7d
LS
453
454 bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
455 break;
c163b5ca 456 }
d76cac7d
LS
457 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
458 bmds->cur_dirty = sector;
c163b5ca 459 }
575a58d7 460
d76cac7d
LS
461 return (bmds->cur_dirty >= bmds->total_sectors);
462
889ae39c 463error:
539de124 464 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
465 g_free(blk->buf);
466 g_free(blk);
43be3a25 467 return ret;
d76cac7d
LS
468}
469
32c835ba
PB
470/* Called with iothread lock taken.
471 *
472 * return value:
ceb2bd09
JQ
473 * 0: too much data for max_downtime
474 * 1: few enough data for max_downtime
475*/
539de124 476static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
477{
478 BlkMigDevState *bmds;
ceb2bd09 479 int ret = 1;
d76cac7d
LS
480
481 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ceb2bd09 482 ret = mig_save_device_dirty(f, bmds, is_async);
43be3a25 483 if (ret <= 0) {
d76cac7d
LS
484 break;
485 }
486 }
487
488 return ret;
c163b5ca 489}
490
32c835ba
PB
491/* Called with no locks taken. */
492
59feec42 493static int flush_blks(QEMUFile *f)
c163b5ca 494{
5e5328be 495 BlkMigBlock *blk;
59feec42 496 int ret = 0;
a55eb92c 497
d0f2c4c6 498 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
499 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
500 block_mig_state.transferred);
a55eb92c 501
52e850de 502 blk_mig_lock();
5e5328be
JK
503 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
504 if (qemu_file_rate_limit(f)) {
505 break;
506 }
4b640365 507 if (blk->ret < 0) {
59feec42 508 ret = blk->ret;
4b640365
JK
509 break;
510 }
a55eb92c 511
5e5328be 512 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 513 blk_mig_unlock();
13197e3c 514 blk_send(f, blk);
52e850de 515 blk_mig_lock();
13197e3c 516
7267c094
AL
517 g_free(blk->buf);
518 g_free(blk);
a55eb92c 519
d11ecd3d
JK
520 block_mig_state.read_done--;
521 block_mig_state.transferred++;
522 assert(block_mig_state.read_done >= 0);
c163b5ca 523 }
52e850de 524 blk_mig_unlock();
c163b5ca 525
d0f2c4c6 526 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
527 block_mig_state.submitted, block_mig_state.read_done,
528 block_mig_state.transferred);
59feec42 529 return ret;
c163b5ca 530}
531
32c835ba
PB
532/* Called with iothread lock taken. */
533
889ae39c
LS
534static int64_t get_remaining_dirty(void)
535{
536 BlkMigDevState *bmds;
537 int64_t dirty = 0;
538
539 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
540 dirty += bdrv_get_dirty_count(bmds->bs);
541 }
542
acc906c6 543 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
544}
545
32c835ba
PB
546/* Called with iothread lock taken. */
547
539de124 548static void blk_mig_cleanup(void)
4ec7fcc7 549{
82801d8f
JK
550 BlkMigDevState *bmds;
551 BlkMigBlock *blk;
4ec7fcc7 552
946d58be
KW
553 bdrv_drain_all();
554
8f794c55
MT
555 set_dirty_tracking(0);
556
52e850de 557 blk_mig_lock();
82801d8f
JK
558 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
559 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
8591675f 560 bdrv_set_in_use(bmds->bs, 0);
f48905d4 561 drive_put_ref(drive_get_by_blockdev(bmds->bs));
7267c094
AL
562 g_free(bmds->aio_bitmap);
563 g_free(bmds);
4ec7fcc7
JK
564 }
565
82801d8f
JK
566 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
567 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
568 g_free(blk->buf);
569 g_free(blk);
4ec7fcc7 570 }
52e850de 571 blk_mig_unlock();
4ec7fcc7
JK
572}
573
9b5bfab0
JQ
574static void block_migration_cancel(void *opaque)
575{
576 blk_mig_cleanup();
577}
578
d1315aac 579static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 580{
2975725f
JQ
581 int ret;
582
d1315aac
JQ
583 DPRINTF("Enter save live setup submitted %d transferred %d\n",
584 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 585
9b095037 586 qemu_mutex_lock_iothread();
d1315aac
JQ
587 init_blk_migration(f);
588
589 /* start track dirty blocks */
590 set_dirty_tracking(1);
9b095037 591 qemu_mutex_unlock_iothread();
d1315aac 592
59feec42 593 ret = flush_blks(f);
d1315aac 594 blk_mig_reset_dirty_cursor();
d1315aac
JQ
595 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
596
d418cf57 597 return ret;
d1315aac
JQ
598}
599
16310a3c 600static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
601{
602 int ret;
6aaa9dae 603 int64_t last_ftell = qemu_ftell(f);
d1315aac 604
16310a3c
JQ
605 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
606 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 607
59feec42 608 ret = flush_blks(f);
2975725f 609 if (ret) {
2975725f 610 return ret;
4b640365
JK
611 }
612
d76cac7d
LS
613 blk_mig_reset_dirty_cursor();
614
16310a3c 615 /* control the rate of transfer */
52e850de 616 blk_mig_lock();
16310a3c
JQ
617 while ((block_mig_state.submitted +
618 block_mig_state.read_done) * BLOCK_SIZE <
619 qemu_file_get_rate_limit(f)) {
52e850de 620 blk_mig_unlock();
16310a3c
JQ
621 if (block_mig_state.bulk_completed == 0) {
622 /* first finish the bulk phase */
623 if (blk_mig_save_bulked_block(f) == 0) {
624 /* finished saving bulk on all devices */
625 block_mig_state.bulk_completed = 1;
626 }
13197e3c 627 ret = 0;
16310a3c 628 } else {
32c835ba
PB
629 /* Always called with iothread lock taken for
630 * simplicity, block_save_complete also calls it.
631 */
632 qemu_mutex_lock_iothread();
43be3a25 633 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 634 qemu_mutex_unlock_iothread();
13197e3c
PB
635 }
636 if (ret < 0) {
637 return ret;
638 }
52e850de 639 blk_mig_lock();
13197e3c
PB
640 if (ret != 0) {
641 /* no more dirty blocks */
642 break;
a55eb92c 643 }
16310a3c 644 }
52e850de 645 blk_mig_unlock();
a55eb92c 646
59feec42 647 ret = flush_blks(f);
16310a3c 648 if (ret) {
16310a3c 649 return ret;
4b640365
JK
650 }
651
16310a3c 652 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
6aaa9dae 653 return qemu_ftell(f) - last_ftell;
16310a3c
JQ
654}
655
32c835ba
PB
656/* Called with iothread lock taken. */
657
16310a3c
JQ
658static int block_save_complete(QEMUFile *f, void *opaque)
659{
660 int ret;
661
662 DPRINTF("Enter save live complete submitted %d transferred %d\n",
663 block_mig_state.submitted, block_mig_state.transferred);
664
59feec42 665 ret = flush_blks(f);
16310a3c 666 if (ret) {
16310a3c
JQ
667 return ret;
668 }
a55eb92c 669
16310a3c 670 blk_mig_reset_dirty_cursor();
01e61e2d 671
16310a3c
JQ
672 /* we know for sure that save bulk is completed and
673 all async read completed */
52e850de 674 blk_mig_lock();
16310a3c 675 assert(block_mig_state.submitted == 0);
52e850de 676 blk_mig_unlock();
16310a3c 677
43be3a25
JQ
678 do {
679 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
680 if (ret < 0) {
681 return ret;
682 }
43be3a25 683 } while (ret == 0);
4b640365 684
43be3a25
JQ
685 /* report completion */
686 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 687
16310a3c
JQ
688 DPRINTF("Block migration completed\n");
689
a55eb92c
JK
690 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
691
d418cf57 692 blk_mig_cleanup();
16310a3c 693 return 0;
c163b5ca 694}
695
e4ed1541
JQ
696static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
697{
6aaa9dae 698 /* Estimate pending number of bytes to send */
13197e3c
PB
699 uint64_t pending;
700
32c835ba 701 qemu_mutex_lock_iothread();
52e850de 702 blk_mig_lock();
13197e3c 703 pending = get_remaining_dirty() +
6aaa9dae
SH
704 block_mig_state.submitted * BLOCK_SIZE +
705 block_mig_state.read_done * BLOCK_SIZE;
706
707 /* Report at least one block pending during bulk phase */
708 if (pending == 0 && !block_mig_state.bulk_completed) {
709 pending = BLOCK_SIZE;
710 }
52e850de 711 blk_mig_unlock();
32c835ba 712 qemu_mutex_unlock_iothread();
e4ed1541 713
6aaa9dae
SH
714 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
715 return pending;
e4ed1541
JQ
716}
717
c163b5ca 718static int block_load(QEMUFile *f, void *opaque, int version_id)
719{
01e61e2d 720 static int banner_printed;
c163b5ca 721 int len, flags;
722 char device_name[256];
723 int64_t addr;
77358b59 724 BlockDriverState *bs, *bs_prev = NULL;
c163b5ca 725 uint8_t *buf;
77358b59
PR
726 int64_t total_sectors = 0;
727 int nr_sectors;
42802d47 728 int ret;
a55eb92c 729
c163b5ca 730 do {
c163b5ca 731 addr = qemu_get_be64(f);
a55eb92c 732
6ea44308
JK
733 flags = addr & ~BDRV_SECTOR_MASK;
734 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
735
736 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca 737 /* get device name */
738 len = qemu_get_byte(f);
c163b5ca 739 qemu_get_buffer(f, (uint8_t *)device_name, len);
740 device_name[len] = '\0';
a55eb92c 741
c163b5ca 742 bs = bdrv_find(device_name);
4b640365
JK
743 if (!bs) {
744 fprintf(stderr, "Error unknown block device %s\n",
745 device_name);
746 return -EINVAL;
747 }
a55eb92c 748
77358b59
PR
749 if (bs != bs_prev) {
750 bs_prev = bs;
751 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
752 if (total_sectors <= 0) {
6daf194d 753 error_report("Error getting length of block device %s",
77358b59
PR
754 device_name);
755 return -EINVAL;
756 }
757 }
758
759 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
760 nr_sectors = total_sectors - addr;
761 } else {
762 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
763 }
764
7267c094 765 buf = g_malloc(BLOCK_SIZE);
575a58d7 766
a55eb92c 767 qemu_get_buffer(f, buf, BLOCK_SIZE);
77358b59 768 ret = bdrv_write(bs, addr, buf, nr_sectors);
575a58d7 769
7267c094 770 g_free(buf);
b02bea3a
YT
771 if (ret < 0) {
772 return ret;
773 }
01e61e2d
JK
774 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
775 if (!banner_printed) {
776 printf("Receiving block device images\n");
777 banner_printed = 1;
778 }
779 printf("Completed %d %%%c", (int)addr,
780 (addr == 100) ? '\n' : '\r');
781 fflush(stdout);
a55eb92c 782 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 783 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
784 return -EINVAL;
785 }
42802d47
JQ
786 ret = qemu_file_get_error(f);
787 if (ret != 0) {
788 return ret;
c163b5ca 789 }
a55eb92c
JK
790 } while (!(flags & BLK_MIG_FLAG_EOS));
791
c163b5ca 792 return 0;
793}
794
6607ae23 795static void block_set_params(const MigrationParams *params, void *opaque)
c163b5ca 796{
6607ae23
IY
797 block_mig_state.blk_enable = params->blk;
798 block_mig_state.shared_base = params->shared;
a55eb92c 799
c163b5ca 800 /* shared base means that blk_enable = 1 */
6607ae23 801 block_mig_state.blk_enable |= params->shared;
c163b5ca 802}
803
6bd68781
JQ
804static bool block_is_active(void *opaque)
805{
806 return block_mig_state.blk_enable == 1;
807}
808
7908c78d
JQ
809SaveVMHandlers savevm_block_handlers = {
810 .set_params = block_set_params,
d1315aac 811 .save_live_setup = block_save_setup,
16310a3c
JQ
812 .save_live_iterate = block_save_iterate,
813 .save_live_complete = block_save_complete,
e4ed1541 814 .save_live_pending = block_save_pending,
7908c78d 815 .load_state = block_load,
9b5bfab0 816 .cancel = block_migration_cancel,
6bd68781 817 .is_active = block_is_active,
7908c78d
JQ
818};
819
c163b5ca 820void blk_mig_init(void)
a55eb92c 821{
5e5328be
JK
822 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
823 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 824 qemu_mutex_init(&block_mig_state.lock);
5e5328be 825
7908c78d
JQ
826 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
827 &block_mig_state);
c163b5ca 828}