]> git.proxmox.com Git - qemu.git/blame - block-migration.c
smc91c111: Fix receive starvation
[qemu.git] / block-migration.c
CommitLineData
c163b5ca 1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca 14 */
15
16#include "qemu-common.h"
737e150e 17#include "block/block_int.h"
c163b5ca 18#include "hw/hw.h"
1de7afc9
PB
19#include "qemu/queue.h"
20#include "qemu/timer.h"
caf71f86
PB
21#include "migration/block.h"
22#include "migration/migration.h"
9c17d615 23#include "sysemu/blockdev.h"
c163b5ca 24#include <assert.h>
c163b5ca 25
50717e94
PB
26#define BLOCK_SIZE (1 << 20)
27#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca 28
29#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
30#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 31#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 32#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca 33
34#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca 35
36//#define DEBUG_BLK_MIGRATION
37
38#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 39#define DPRINTF(fmt, ...) \
c163b5ca 40 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
41#else
d0f2c4c6 42#define DPRINTF(fmt, ...) \
c163b5ca 43 do { } while (0)
44#endif
45
a55eb92c 46typedef struct BlkMigDevState {
323920c4 47 /* Written during setup phase. Can be read without a lock. */
a55eb92c 48 BlockDriverState *bs;
a55eb92c 49 int shared_base;
a55eb92c 50 int64_t total_sectors;
5e5328be 51 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
323920c4
PB
52
53 /* Only used by migration thread. Does not need a lock. */
54 int bulk_completed;
55 int64_t cur_sector;
56 int64_t cur_dirty;
57
52e850de 58 /* Protected by block migration lock. */
33656af7 59 unsigned long *aio_bitmap;
323920c4 60 int64_t completed_sectors;
a55eb92c
JK
61} BlkMigDevState;
62
c163b5ca 63typedef struct BlkMigBlock {
323920c4 64 /* Only used by migration thread. */
c163b5ca 65 uint8_t *buf;
66 BlkMigDevState *bmds;
67 int64_t sector;
33656af7 68 int nr_sectors;
c163b5ca 69 struct iovec iov;
70 QEMUIOVector qiov;
71 BlockDriverAIOCB *aiocb;
323920c4 72
52e850de 73 /* Protected by block migration lock. */
c163b5ca 74 int ret;
5e5328be 75 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca 76} BlkMigBlock;
77
78typedef struct BlkMigState {
323920c4 79 /* Written during setup phase. Can be read without a lock. */
c163b5ca 80 int blk_enable;
81 int shared_base;
5e5328be 82 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 83 int64_t total_sector_sum;
323004a3 84 bool zero_blocks;
323920c4 85
52e850de 86 /* Protected by lock. */
5e5328be 87 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca 88 int submitted;
89 int read_done;
323920c4
PB
90
91 /* Only used by migration thread. Does not need a lock. */
c163b5ca 92 int transferred;
01e61e2d 93 int prev_progress;
e970ec0b 94 int bulk_completed;
52e850de
PB
95
96 /* Lock must be taken _inside_ the iothread lock. */
97 QemuMutex lock;
c163b5ca 98} BlkMigState;
99
d11ecd3d 100static BlkMigState block_mig_state;
c163b5ca 101
52e850de
PB
102static void blk_mig_lock(void)
103{
104 qemu_mutex_lock(&block_mig_state.lock);
105}
106
107static void blk_mig_unlock(void)
108{
109 qemu_mutex_unlock(&block_mig_state.lock);
110}
111
32c835ba
PB
112/* Must run outside of the iothread lock during the bulk phase,
113 * or the VM will stall.
114 */
115
13f0b67f
JK
116static void blk_send(QEMUFile *f, BlkMigBlock * blk)
117{
118 int len;
323004a3
PL
119 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
120
121 if (block_mig_state.zero_blocks &&
122 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
123 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
124 }
13f0b67f
JK
125
126 /* sector number and flags */
127 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 128 | flags);
13f0b67f
JK
129
130 /* device name */
131 len = strlen(blk->bmds->bs->device_name);
132 qemu_put_byte(f, len);
133 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
134
323004a3
PL
135 /* if a block is zero we need to flush here since the network
136 * bandwidth is now a lot higher than the storage device bandwidth.
137 * thus if we queue zero blocks we slow down the migration */
138 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
139 qemu_fflush(f);
140 return;
141 }
142
13f0b67f
JK
143 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
144}
145
25f23643
JK
146int blk_mig_active(void)
147{
148 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
149}
150
151uint64_t blk_mig_bytes_transferred(void)
152{
153 BlkMigDevState *bmds;
154 uint64_t sum = 0;
155
52e850de 156 blk_mig_lock();
25f23643
JK
157 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
158 sum += bmds->completed_sectors;
159 }
52e850de 160 blk_mig_unlock();
25f23643
JK
161 return sum << BDRV_SECTOR_BITS;
162}
163
164uint64_t blk_mig_bytes_remaining(void)
165{
166 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
167}
168
169uint64_t blk_mig_bytes_total(void)
170{
171 BlkMigDevState *bmds;
172 uint64_t sum = 0;
173
174 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
175 sum += bmds->total_sectors;
176 }
177 return sum << BDRV_SECTOR_BITS;
178}
179
52e850de
PB
180
181/* Called with migration lock held. */
182
33656af7
MT
183static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
184{
185 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
186
62155e2b 187 if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
33656af7
MT
188 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
189 (1UL << (chunk % (sizeof(unsigned long) * 8))));
190 } else {
191 return 0;
192 }
193}
194
52e850de
PB
195/* Called with migration lock held. */
196
33656af7
MT
197static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
198 int nb_sectors, int set)
199{
200 int64_t start, end;
201 unsigned long val, idx, bit;
202
203 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
204 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
205
206 for (; start <= end; start++) {
207 idx = start / (sizeof(unsigned long) * 8);
208 bit = start % (sizeof(unsigned long) * 8);
209 val = bmds->aio_bitmap[idx];
210 if (set) {
62155e2b 211 val |= 1UL << bit;
33656af7 212 } else {
62155e2b 213 val &= ~(1UL << bit);
33656af7
MT
214 }
215 bmds->aio_bitmap[idx] = val;
216 }
217}
218
219static void alloc_aio_bitmap(BlkMigDevState *bmds)
220{
221 BlockDriverState *bs = bmds->bs;
222 int64_t bitmap_size;
223
224 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
225 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
226 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
227
7267c094 228 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
229}
230
52e850de
PB
231/* Never hold migration lock when yielding to the main loop! */
232
c163b5ca 233static void blk_mig_read_cb(void *opaque, int ret)
234{
235 BlkMigBlock *blk = opaque;
a55eb92c 236
52e850de 237 blk_mig_lock();
c163b5ca 238 blk->ret = ret;
a55eb92c 239
5e5328be 240 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 241 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 242
d11ecd3d
JK
243 block_mig_state.submitted--;
244 block_mig_state.read_done++;
245 assert(block_mig_state.submitted >= 0);
52e850de 246 blk_mig_unlock();
c163b5ca 247}
248
32c835ba
PB
249/* Called with no lock taken. */
250
539de124 251static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 252{
57cce12d
JK
253 int64_t total_sectors = bmds->total_sectors;
254 int64_t cur_sector = bmds->cur_sector;
255 BlockDriverState *bs = bmds->bs;
c163b5ca 256 BlkMigBlock *blk;
13f0b67f 257 int nr_sectors;
a55eb92c 258
57cce12d 259 if (bmds->shared_base) {
32c835ba 260 qemu_mutex_lock_iothread();
b1d10856 261 while (cur_sector < total_sectors &&
57cce12d
JK
262 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
263 &nr_sectors)) {
c163b5ca 264 cur_sector += nr_sectors;
265 }
32c835ba 266 qemu_mutex_unlock_iothread();
c163b5ca 267 }
a55eb92c
JK
268
269 if (cur_sector >= total_sectors) {
82801d8f 270 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca 271 return 1;
272 }
a55eb92c 273
82801d8f 274 bmds->completed_sectors = cur_sector;
a55eb92c 275
57cce12d
JK
276 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
277
6ea44308
JK
278 /* we are going to transfer a full block even if it is not allocated */
279 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 280
6ea44308 281 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 282 nr_sectors = total_sectors - cur_sector;
c163b5ca 283 }
a55eb92c 284
7267c094
AL
285 blk = g_malloc(sizeof(BlkMigBlock));
286 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
287 blk->bmds = bmds;
288 blk->sector = cur_sector;
33656af7 289 blk->nr_sectors = nr_sectors;
a55eb92c 290
e970ec0b
LS
291 blk->iov.iov_base = blk->buf;
292 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
293 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 294
52e850de 295 blk_mig_lock();
13197e3c 296 block_mig_state.submitted++;
52e850de 297 blk_mig_unlock();
13197e3c 298
32c835ba 299 qemu_mutex_lock_iothread();
e970ec0b
LS
300 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
301 nr_sectors, blk_mig_read_cb, blk);
d76cac7d 302
13f0b67f 303 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
32c835ba 304 qemu_mutex_unlock_iothread();
a55eb92c 305
32c835ba 306 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 307 return (bmds->cur_sector >= total_sectors);
c163b5ca 308}
309
32c835ba
PB
310/* Called with iothread lock taken. */
311
c163b5ca 312static void set_dirty_tracking(int enable)
313{
314 BlkMigDevState *bmds;
5e5328be
JK
315
316 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
50717e94 317 bdrv_set_dirty_tracking(bmds->bs, enable ? BLOCK_SIZE : 0);
c163b5ca 318 }
c163b5ca 319}
320
b66460e4 321static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
c163b5ca 322{
5e5328be 323 BlkMigDevState *bmds;
792773b2 324 int64_t sectors;
a55eb92c 325
d246673d 326 if (!bdrv_is_read_only(bs)) {
b66460e4 327 sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
31f54f24 328 if (sectors <= 0) {
b66460e4
SH
329 return;
330 }
331
7267c094 332 bmds = g_malloc0(sizeof(BlkMigDevState));
b66460e4
SH
333 bmds->bs = bs;
334 bmds->bulk_completed = 0;
335 bmds->total_sectors = sectors;
336 bmds->completed_sectors = 0;
337 bmds->shared_base = block_mig_state.shared_base;
33656af7 338 alloc_aio_bitmap(bmds);
8591675f 339 bdrv_set_in_use(bs, 1);
8442cfd0 340 bdrv_ref(bs);
b66460e4
SH
341
342 block_mig_state.total_sector_sum += sectors;
343
344 if (bmds->shared_base) {
539de124
LC
345 DPRINTF("Start migration for %s with shared base image\n",
346 bs->device_name);
b66460e4 347 } else {
539de124 348 DPRINTF("Start full migration for %s\n", bs->device_name);
b66460e4
SH
349 }
350
351 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
352 }
353}
354
539de124 355static void init_blk_migration(QEMUFile *f)
b66460e4 356{
69d63a97
JK
357 block_mig_state.submitted = 0;
358 block_mig_state.read_done = 0;
359 block_mig_state.transferred = 0;
82801d8f 360 block_mig_state.total_sector_sum = 0;
01e61e2d 361 block_mig_state.prev_progress = -1;
e970ec0b 362 block_mig_state.bulk_completed = 0;
323004a3 363 block_mig_state.zero_blocks = migrate_zero_blocks();
69d63a97 364
539de124 365 bdrv_iterate(init_blk_migration_it, NULL);
c163b5ca 366}
367
32c835ba
PB
368/* Called with no lock taken. */
369
539de124 370static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 371{
82801d8f 372 int64_t completed_sector_sum = 0;
c163b5ca 373 BlkMigDevState *bmds;
01e61e2d 374 int progress;
82801d8f 375 int ret = 0;
c163b5ca 376
5e5328be 377 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 378 if (bmds->bulk_completed == 0) {
539de124 379 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
380 /* completed bulk section for this device */
381 bmds->bulk_completed = 1;
c163b5ca 382 }
82801d8f
JK
383 completed_sector_sum += bmds->completed_sectors;
384 ret = 1;
385 break;
386 } else {
387 completed_sector_sum += bmds->completed_sectors;
c163b5ca 388 }
389 }
a55eb92c 390
8b6b2afc
PR
391 if (block_mig_state.total_sector_sum != 0) {
392 progress = completed_sector_sum * 100 /
393 block_mig_state.total_sector_sum;
394 } else {
395 progress = 100;
396 }
01e61e2d
JK
397 if (progress != block_mig_state.prev_progress) {
398 block_mig_state.prev_progress = progress;
399 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
400 | BLK_MIG_FLAG_PROGRESS);
539de124 401 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
402 }
403
404 return ret;
c163b5ca 405}
406
d76cac7d 407static void blk_mig_reset_dirty_cursor(void)
c163b5ca 408{
409 BlkMigDevState *bmds;
d76cac7d
LS
410
411 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
412 bmds->cur_dirty = 0;
413 }
414}
415
32c835ba
PB
416/* Called with iothread lock taken. */
417
539de124
LC
418static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
419 int is_async)
d76cac7d
LS
420{
421 BlkMigBlock *blk;
422 int64_t total_sectors = bmds->total_sectors;
c163b5ca 423 int64_t sector;
d76cac7d 424 int nr_sectors;
dcd1d224 425 int ret = -EIO;
a55eb92c 426
d76cac7d 427 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 428 blk_mig_lock();
62155e2b 429 if (bmds_aio_inflight(bmds, sector)) {
52e850de 430 blk_mig_unlock();
922453bc 431 bdrv_drain_all();
52e850de
PB
432 } else {
433 blk_mig_unlock();
62155e2b 434 }
d76cac7d 435 if (bdrv_get_dirty(bmds->bs, sector)) {
575a58d7 436
d76cac7d
LS
437 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
438 nr_sectors = total_sectors - sector;
439 } else {
440 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
441 }
7267c094
AL
442 blk = g_malloc(sizeof(BlkMigBlock));
443 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
444 blk->bmds = bmds;
445 blk->sector = sector;
33656af7 446 blk->nr_sectors = nr_sectors;
d76cac7d 447
889ae39c 448 if (is_async) {
d76cac7d
LS
449 blk->iov.iov_base = blk->buf;
450 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
451 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
452
453 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
454 nr_sectors, blk_mig_read_cb, blk);
52e850de
PB
455
456 blk_mig_lock();
d76cac7d 457 block_mig_state.submitted++;
33656af7 458 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 459 blk_mig_unlock();
d76cac7d 460 } else {
dcd1d224
JQ
461 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
462 if (ret < 0) {
d76cac7d 463 goto error;
c163b5ca 464 }
d76cac7d 465 blk_send(f, blk);
a55eb92c 466
7267c094
AL
467 g_free(blk->buf);
468 g_free(blk);
a55eb92c 469 }
d76cac7d
LS
470
471 bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
472 break;
c163b5ca 473 }
d76cac7d
LS
474 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
475 bmds->cur_dirty = sector;
c163b5ca 476 }
575a58d7 477
d76cac7d
LS
478 return (bmds->cur_dirty >= bmds->total_sectors);
479
889ae39c 480error:
539de124 481 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
482 g_free(blk->buf);
483 g_free(blk);
43be3a25 484 return ret;
d76cac7d
LS
485}
486
32c835ba
PB
487/* Called with iothread lock taken.
488 *
489 * return value:
ceb2bd09
JQ
490 * 0: too much data for max_downtime
491 * 1: few enough data for max_downtime
492*/
539de124 493static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
494{
495 BlkMigDevState *bmds;
ceb2bd09 496 int ret = 1;
d76cac7d
LS
497
498 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ceb2bd09 499 ret = mig_save_device_dirty(f, bmds, is_async);
43be3a25 500 if (ret <= 0) {
d76cac7d
LS
501 break;
502 }
503 }
504
505 return ret;
c163b5ca 506}
507
32c835ba
PB
508/* Called with no locks taken. */
509
59feec42 510static int flush_blks(QEMUFile *f)
c163b5ca 511{
5e5328be 512 BlkMigBlock *blk;
59feec42 513 int ret = 0;
a55eb92c 514
d0f2c4c6 515 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
516 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
517 block_mig_state.transferred);
a55eb92c 518
52e850de 519 blk_mig_lock();
5e5328be
JK
520 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
521 if (qemu_file_rate_limit(f)) {
522 break;
523 }
4b640365 524 if (blk->ret < 0) {
59feec42 525 ret = blk->ret;
4b640365
JK
526 break;
527 }
a55eb92c 528
5e5328be 529 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 530 blk_mig_unlock();
13197e3c 531 blk_send(f, blk);
52e850de 532 blk_mig_lock();
13197e3c 533
7267c094
AL
534 g_free(blk->buf);
535 g_free(blk);
a55eb92c 536
d11ecd3d
JK
537 block_mig_state.read_done--;
538 block_mig_state.transferred++;
539 assert(block_mig_state.read_done >= 0);
c163b5ca 540 }
52e850de 541 blk_mig_unlock();
c163b5ca 542
d0f2c4c6 543 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
544 block_mig_state.submitted, block_mig_state.read_done,
545 block_mig_state.transferred);
59feec42 546 return ret;
c163b5ca 547}
548
32c835ba
PB
549/* Called with iothread lock taken. */
550
889ae39c
LS
551static int64_t get_remaining_dirty(void)
552{
553 BlkMigDevState *bmds;
554 int64_t dirty = 0;
555
556 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
557 dirty += bdrv_get_dirty_count(bmds->bs);
558 }
559
acc906c6 560 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
561}
562
32c835ba
PB
563/* Called with iothread lock taken. */
564
539de124 565static void blk_mig_cleanup(void)
4ec7fcc7 566{
82801d8f
JK
567 BlkMigDevState *bmds;
568 BlkMigBlock *blk;
4ec7fcc7 569
946d58be
KW
570 bdrv_drain_all();
571
8f794c55
MT
572 set_dirty_tracking(0);
573
52e850de 574 blk_mig_lock();
82801d8f
JK
575 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
576 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
8591675f 577 bdrv_set_in_use(bmds->bs, 0);
8442cfd0 578 bdrv_unref(bmds->bs);
7267c094
AL
579 g_free(bmds->aio_bitmap);
580 g_free(bmds);
4ec7fcc7
JK
581 }
582
82801d8f
JK
583 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
584 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
585 g_free(blk->buf);
586 g_free(blk);
4ec7fcc7 587 }
52e850de 588 blk_mig_unlock();
4ec7fcc7
JK
589}
590
9b5bfab0
JQ
591static void block_migration_cancel(void *opaque)
592{
593 blk_mig_cleanup();
594}
595
d1315aac 596static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 597{
2975725f
JQ
598 int ret;
599
d1315aac
JQ
600 DPRINTF("Enter save live setup submitted %d transferred %d\n",
601 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 602
9b095037 603 qemu_mutex_lock_iothread();
d1315aac
JQ
604 init_blk_migration(f);
605
606 /* start track dirty blocks */
607 set_dirty_tracking(1);
9b095037 608 qemu_mutex_unlock_iothread();
d1315aac 609
59feec42 610 ret = flush_blks(f);
d1315aac 611 blk_mig_reset_dirty_cursor();
d1315aac
JQ
612 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
613
d418cf57 614 return ret;
d1315aac
JQ
615}
616
16310a3c 617static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
618{
619 int ret;
6aaa9dae 620 int64_t last_ftell = qemu_ftell(f);
d1315aac 621
16310a3c
JQ
622 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
623 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 624
59feec42 625 ret = flush_blks(f);
2975725f 626 if (ret) {
2975725f 627 return ret;
4b640365
JK
628 }
629
d76cac7d
LS
630 blk_mig_reset_dirty_cursor();
631
16310a3c 632 /* control the rate of transfer */
52e850de 633 blk_mig_lock();
16310a3c
JQ
634 while ((block_mig_state.submitted +
635 block_mig_state.read_done) * BLOCK_SIZE <
636 qemu_file_get_rate_limit(f)) {
52e850de 637 blk_mig_unlock();
16310a3c
JQ
638 if (block_mig_state.bulk_completed == 0) {
639 /* first finish the bulk phase */
640 if (blk_mig_save_bulked_block(f) == 0) {
641 /* finished saving bulk on all devices */
642 block_mig_state.bulk_completed = 1;
643 }
13197e3c 644 ret = 0;
16310a3c 645 } else {
32c835ba
PB
646 /* Always called with iothread lock taken for
647 * simplicity, block_save_complete also calls it.
648 */
649 qemu_mutex_lock_iothread();
43be3a25 650 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 651 qemu_mutex_unlock_iothread();
13197e3c
PB
652 }
653 if (ret < 0) {
654 return ret;
655 }
52e850de 656 blk_mig_lock();
13197e3c
PB
657 if (ret != 0) {
658 /* no more dirty blocks */
659 break;
a55eb92c 660 }
16310a3c 661 }
52e850de 662 blk_mig_unlock();
a55eb92c 663
59feec42 664 ret = flush_blks(f);
16310a3c 665 if (ret) {
16310a3c 666 return ret;
4b640365
JK
667 }
668
16310a3c 669 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
6aaa9dae 670 return qemu_ftell(f) - last_ftell;
16310a3c
JQ
671}
672
32c835ba
PB
673/* Called with iothread lock taken. */
674
16310a3c
JQ
675static int block_save_complete(QEMUFile *f, void *opaque)
676{
677 int ret;
678
679 DPRINTF("Enter save live complete submitted %d transferred %d\n",
680 block_mig_state.submitted, block_mig_state.transferred);
681
59feec42 682 ret = flush_blks(f);
16310a3c 683 if (ret) {
16310a3c
JQ
684 return ret;
685 }
a55eb92c 686
16310a3c 687 blk_mig_reset_dirty_cursor();
01e61e2d 688
16310a3c
JQ
689 /* we know for sure that save bulk is completed and
690 all async read completed */
52e850de 691 blk_mig_lock();
16310a3c 692 assert(block_mig_state.submitted == 0);
52e850de 693 blk_mig_unlock();
16310a3c 694
43be3a25
JQ
695 do {
696 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
697 if (ret < 0) {
698 return ret;
699 }
43be3a25 700 } while (ret == 0);
4b640365 701
43be3a25
JQ
702 /* report completion */
703 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 704
16310a3c
JQ
705 DPRINTF("Block migration completed\n");
706
a55eb92c
JK
707 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
708
d418cf57 709 blk_mig_cleanup();
16310a3c 710 return 0;
c163b5ca 711}
712
e4ed1541
JQ
713static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
714{
6aaa9dae 715 /* Estimate pending number of bytes to send */
13197e3c
PB
716 uint64_t pending;
717
32c835ba 718 qemu_mutex_lock_iothread();
52e850de 719 blk_mig_lock();
13197e3c 720 pending = get_remaining_dirty() +
6aaa9dae
SH
721 block_mig_state.submitted * BLOCK_SIZE +
722 block_mig_state.read_done * BLOCK_SIZE;
723
724 /* Report at least one block pending during bulk phase */
725 if (pending == 0 && !block_mig_state.bulk_completed) {
726 pending = BLOCK_SIZE;
727 }
52e850de 728 blk_mig_unlock();
32c835ba 729 qemu_mutex_unlock_iothread();
e4ed1541 730
6aaa9dae
SH
731 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
732 return pending;
e4ed1541
JQ
733}
734
c163b5ca 735static int block_load(QEMUFile *f, void *opaque, int version_id)
736{
01e61e2d 737 static int banner_printed;
c163b5ca 738 int len, flags;
739 char device_name[256];
740 int64_t addr;
77358b59 741 BlockDriverState *bs, *bs_prev = NULL;
c163b5ca 742 uint8_t *buf;
77358b59
PR
743 int64_t total_sectors = 0;
744 int nr_sectors;
42802d47 745 int ret;
a55eb92c 746
c163b5ca 747 do {
c163b5ca 748 addr = qemu_get_be64(f);
a55eb92c 749
6ea44308
JK
750 flags = addr & ~BDRV_SECTOR_MASK;
751 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
752
753 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca 754 /* get device name */
755 len = qemu_get_byte(f);
c163b5ca 756 qemu_get_buffer(f, (uint8_t *)device_name, len);
757 device_name[len] = '\0';
a55eb92c 758
c163b5ca 759 bs = bdrv_find(device_name);
4b640365
JK
760 if (!bs) {
761 fprintf(stderr, "Error unknown block device %s\n",
762 device_name);
763 return -EINVAL;
764 }
a55eb92c 765
77358b59
PR
766 if (bs != bs_prev) {
767 bs_prev = bs;
768 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
769 if (total_sectors <= 0) {
6daf194d 770 error_report("Error getting length of block device %s",
77358b59
PR
771 device_name);
772 return -EINVAL;
773 }
774 }
775
776 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
777 nr_sectors = total_sectors - addr;
778 } else {
779 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
780 }
781
323004a3
PL
782 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
783 ret = bdrv_write_zeroes(bs, addr, nr_sectors);
784 } else {
785 buf = g_malloc(BLOCK_SIZE);
786 qemu_get_buffer(f, buf, BLOCK_SIZE);
787 ret = bdrv_write(bs, addr, buf, nr_sectors);
788 g_free(buf);
789 }
575a58d7 790
b02bea3a
YT
791 if (ret < 0) {
792 return ret;
793 }
01e61e2d
JK
794 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
795 if (!banner_printed) {
796 printf("Receiving block device images\n");
797 banner_printed = 1;
798 }
799 printf("Completed %d %%%c", (int)addr,
800 (addr == 100) ? '\n' : '\r');
801 fflush(stdout);
a55eb92c 802 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 803 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
804 return -EINVAL;
805 }
42802d47
JQ
806 ret = qemu_file_get_error(f);
807 if (ret != 0) {
808 return ret;
c163b5ca 809 }
a55eb92c
JK
810 } while (!(flags & BLK_MIG_FLAG_EOS));
811
c163b5ca 812 return 0;
813}
814
6607ae23 815static void block_set_params(const MigrationParams *params, void *opaque)
c163b5ca 816{
6607ae23
IY
817 block_mig_state.blk_enable = params->blk;
818 block_mig_state.shared_base = params->shared;
a55eb92c 819
c163b5ca 820 /* shared base means that blk_enable = 1 */
6607ae23 821 block_mig_state.blk_enable |= params->shared;
c163b5ca 822}
823
6bd68781
JQ
824static bool block_is_active(void *opaque)
825{
826 return block_mig_state.blk_enable == 1;
827}
828
7908c78d
JQ
829SaveVMHandlers savevm_block_handlers = {
830 .set_params = block_set_params,
d1315aac 831 .save_live_setup = block_save_setup,
16310a3c
JQ
832 .save_live_iterate = block_save_iterate,
833 .save_live_complete = block_save_complete,
e4ed1541 834 .save_live_pending = block_save_pending,
7908c78d 835 .load_state = block_load,
9b5bfab0 836 .cancel = block_migration_cancel,
6bd68781 837 .is_active = block_is_active,
7908c78d
JQ
838};
839
c163b5ca 840void blk_mig_init(void)
a55eb92c 841{
5e5328be
JK
842 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
843 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 844 qemu_mutex_init(&block_mig_state.lock);
5e5328be 845
7908c78d
JQ
846 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
847 &block_mig_state);
c163b5ca 848}