]> git.proxmox.com Git - mirror_qemu.git/blame - block-migration.c
target-ppc: Scalar Round to Single Precision
[mirror_qemu.git] / block-migration.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
16#include "qemu-common.h"
737e150e 17#include "block/block_int.h"
c163b5ca 18#include "hw/hw.h"
1de7afc9
PB
19#include "qemu/queue.h"
20#include "qemu/timer.h"
caf71f86
PB
21#include "migration/block.h"
22#include "migration/migration.h"
9c17d615 23#include "sysemu/blockdev.h"
c163b5ca 24#include <assert.h>
c163b5ca 25
50717e94
PB
26#define BLOCK_SIZE (1 << 20)
27#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
28
29#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
30#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 31#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 32#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca
LS
33
34#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca
LS
35
36//#define DEBUG_BLK_MIGRATION
37
38#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 39#define DPRINTF(fmt, ...) \
c163b5ca
LS
40 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
41#else
d0f2c4c6 42#define DPRINTF(fmt, ...) \
c163b5ca
LS
43 do { } while (0)
44#endif
45
a55eb92c 46typedef struct BlkMigDevState {
323920c4 47 /* Written during setup phase. Can be read without a lock. */
a55eb92c 48 BlockDriverState *bs;
a55eb92c 49 int shared_base;
a55eb92c 50 int64_t total_sectors;
5e5328be 51 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
323920c4
PB
52
53 /* Only used by migration thread. Does not need a lock. */
54 int bulk_completed;
55 int64_t cur_sector;
56 int64_t cur_dirty;
57
52e850de 58 /* Protected by block migration lock. */
33656af7 59 unsigned long *aio_bitmap;
323920c4 60 int64_t completed_sectors;
e4654d2d 61 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
62} BlkMigDevState;
63
c163b5ca 64typedef struct BlkMigBlock {
323920c4 65 /* Only used by migration thread. */
c163b5ca
LS
66 uint8_t *buf;
67 BlkMigDevState *bmds;
68 int64_t sector;
33656af7 69 int nr_sectors;
c163b5ca
LS
70 struct iovec iov;
71 QEMUIOVector qiov;
72 BlockDriverAIOCB *aiocb;
323920c4 73
52e850de 74 /* Protected by block migration lock. */
c163b5ca 75 int ret;
5e5328be 76 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
77} BlkMigBlock;
78
79typedef struct BlkMigState {
323920c4 80 /* Written during setup phase. Can be read without a lock. */
c163b5ca
LS
81 int blk_enable;
82 int shared_base;
5e5328be 83 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 84 int64_t total_sector_sum;
323004a3 85 bool zero_blocks;
323920c4 86
52e850de 87 /* Protected by lock. */
5e5328be 88 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca
LS
89 int submitted;
90 int read_done;
323920c4
PB
91
92 /* Only used by migration thread. Does not need a lock. */
c163b5ca 93 int transferred;
01e61e2d 94 int prev_progress;
e970ec0b 95 int bulk_completed;
52e850de
PB
96
97 /* Lock must be taken _inside_ the iothread lock. */
98 QemuMutex lock;
c163b5ca
LS
99} BlkMigState;
100
d11ecd3d 101static BlkMigState block_mig_state;
c163b5ca 102
52e850de
PB
103static void blk_mig_lock(void)
104{
105 qemu_mutex_lock(&block_mig_state.lock);
106}
107
108static void blk_mig_unlock(void)
109{
110 qemu_mutex_unlock(&block_mig_state.lock);
111}
112
32c835ba
PB
113/* Must run outside of the iothread lock during the bulk phase,
114 * or the VM will stall.
115 */
116
13f0b67f
JK
117static void blk_send(QEMUFile *f, BlkMigBlock * blk)
118{
119 int len;
323004a3
PL
120 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
121
122 if (block_mig_state.zero_blocks &&
123 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
124 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
125 }
13f0b67f
JK
126
127 /* sector number and flags */
128 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 129 | flags);
13f0b67f
JK
130
131 /* device name */
132 len = strlen(blk->bmds->bs->device_name);
133 qemu_put_byte(f, len);
134 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
135
323004a3
PL
136 /* if a block is zero we need to flush here since the network
137 * bandwidth is now a lot higher than the storage device bandwidth.
138 * thus if we queue zero blocks we slow down the migration */
139 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
140 qemu_fflush(f);
141 return;
142 }
143
13f0b67f
JK
144 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
145}
146
25f23643
JK
147int blk_mig_active(void)
148{
149 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
150}
151
152uint64_t blk_mig_bytes_transferred(void)
153{
154 BlkMigDevState *bmds;
155 uint64_t sum = 0;
156
52e850de 157 blk_mig_lock();
25f23643
JK
158 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
159 sum += bmds->completed_sectors;
160 }
52e850de 161 blk_mig_unlock();
25f23643
JK
162 return sum << BDRV_SECTOR_BITS;
163}
164
165uint64_t blk_mig_bytes_remaining(void)
166{
167 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
168}
169
170uint64_t blk_mig_bytes_total(void)
171{
172 BlkMigDevState *bmds;
173 uint64_t sum = 0;
174
175 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
176 sum += bmds->total_sectors;
177 }
178 return sum << BDRV_SECTOR_BITS;
179}
180
52e850de
PB
181
182/* Called with migration lock held. */
183
33656af7
MT
184static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
185{
186 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
187
62155e2b 188 if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
33656af7
MT
189 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
190 (1UL << (chunk % (sizeof(unsigned long) * 8))));
191 } else {
192 return 0;
193 }
194}
195
52e850de
PB
196/* Called with migration lock held. */
197
33656af7
MT
198static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
199 int nb_sectors, int set)
200{
201 int64_t start, end;
202 unsigned long val, idx, bit;
203
204 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
205 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
206
207 for (; start <= end; start++) {
208 idx = start / (sizeof(unsigned long) * 8);
209 bit = start % (sizeof(unsigned long) * 8);
210 val = bmds->aio_bitmap[idx];
211 if (set) {
62155e2b 212 val |= 1UL << bit;
33656af7 213 } else {
62155e2b 214 val &= ~(1UL << bit);
33656af7
MT
215 }
216 bmds->aio_bitmap[idx] = val;
217 }
218}
219
220static void alloc_aio_bitmap(BlkMigDevState *bmds)
221{
222 BlockDriverState *bs = bmds->bs;
223 int64_t bitmap_size;
224
225 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
226 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
227 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
228
7267c094 229 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
230}
231
52e850de
PB
232/* Never hold migration lock when yielding to the main loop! */
233
c163b5ca
LS
234static void blk_mig_read_cb(void *opaque, int ret)
235{
236 BlkMigBlock *blk = opaque;
a55eb92c 237
52e850de 238 blk_mig_lock();
c163b5ca 239 blk->ret = ret;
a55eb92c 240
5e5328be 241 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 242 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 243
d11ecd3d
JK
244 block_mig_state.submitted--;
245 block_mig_state.read_done++;
246 assert(block_mig_state.submitted >= 0);
52e850de 247 blk_mig_unlock();
c163b5ca
LS
248}
249
32c835ba
PB
250/* Called with no lock taken. */
251
539de124 252static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 253{
57cce12d
JK
254 int64_t total_sectors = bmds->total_sectors;
255 int64_t cur_sector = bmds->cur_sector;
256 BlockDriverState *bs = bmds->bs;
c163b5ca 257 BlkMigBlock *blk;
13f0b67f 258 int nr_sectors;
a55eb92c 259
57cce12d 260 if (bmds->shared_base) {
32c835ba 261 qemu_mutex_lock_iothread();
b1d10856 262 while (cur_sector < total_sectors &&
57cce12d
JK
263 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
264 &nr_sectors)) {
c163b5ca
LS
265 cur_sector += nr_sectors;
266 }
32c835ba 267 qemu_mutex_unlock_iothread();
c163b5ca 268 }
a55eb92c
JK
269
270 if (cur_sector >= total_sectors) {
82801d8f 271 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
272 return 1;
273 }
a55eb92c 274
82801d8f 275 bmds->completed_sectors = cur_sector;
a55eb92c 276
57cce12d
JK
277 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
278
6ea44308
JK
279 /* we are going to transfer a full block even if it is not allocated */
280 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 281
6ea44308 282 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 283 nr_sectors = total_sectors - cur_sector;
c163b5ca 284 }
a55eb92c 285
7267c094
AL
286 blk = g_malloc(sizeof(BlkMigBlock));
287 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
288 blk->bmds = bmds;
289 blk->sector = cur_sector;
33656af7 290 blk->nr_sectors = nr_sectors;
a55eb92c 291
e970ec0b
LS
292 blk->iov.iov_base = blk->buf;
293 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
294 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 295
52e850de 296 blk_mig_lock();
13197e3c 297 block_mig_state.submitted++;
52e850de 298 blk_mig_unlock();
13197e3c 299
32c835ba 300 qemu_mutex_lock_iothread();
e970ec0b
LS
301 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
302 nr_sectors, blk_mig_read_cb, blk);
d76cac7d 303
13f0b67f 304 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
32c835ba 305 qemu_mutex_unlock_iothread();
a55eb92c 306
32c835ba 307 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 308 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
309}
310
32c835ba
PB
311/* Called with iothread lock taken. */
312
e4654d2d 313static void set_dirty_tracking(void)
c163b5ca
LS
314{
315 BlkMigDevState *bmds;
5e5328be
JK
316
317 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
e4654d2d
FZ
318 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE);
319 }
320}
321
322static void unset_dirty_tracking(void)
323{
324 BlkMigDevState *bmds;
325
326 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
327 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
c163b5ca 328 }
c163b5ca
LS
329}
330
b66460e4 331static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
c163b5ca 332{
5e5328be 333 BlkMigDevState *bmds;
792773b2 334 int64_t sectors;
a55eb92c 335
d246673d 336 if (!bdrv_is_read_only(bs)) {
b66460e4 337 sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
31f54f24 338 if (sectors <= 0) {
b66460e4
SH
339 return;
340 }
341
7267c094 342 bmds = g_malloc0(sizeof(BlkMigDevState));
b66460e4
SH
343 bmds->bs = bs;
344 bmds->bulk_completed = 0;
345 bmds->total_sectors = sectors;
346 bmds->completed_sectors = 0;
347 bmds->shared_base = block_mig_state.shared_base;
33656af7 348 alloc_aio_bitmap(bmds);
8591675f 349 bdrv_set_in_use(bs, 1);
8442cfd0 350 bdrv_ref(bs);
b66460e4
SH
351
352 block_mig_state.total_sector_sum += sectors;
353
354 if (bmds->shared_base) {
539de124
LC
355 DPRINTF("Start migration for %s with shared base image\n",
356 bs->device_name);
b66460e4 357 } else {
539de124 358 DPRINTF("Start full migration for %s\n", bs->device_name);
b66460e4
SH
359 }
360
361 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
362 }
363}
364
539de124 365static void init_blk_migration(QEMUFile *f)
b66460e4 366{
69d63a97
JK
367 block_mig_state.submitted = 0;
368 block_mig_state.read_done = 0;
369 block_mig_state.transferred = 0;
82801d8f 370 block_mig_state.total_sector_sum = 0;
01e61e2d 371 block_mig_state.prev_progress = -1;
e970ec0b 372 block_mig_state.bulk_completed = 0;
323004a3 373 block_mig_state.zero_blocks = migrate_zero_blocks();
69d63a97 374
539de124 375 bdrv_iterate(init_blk_migration_it, NULL);
c163b5ca
LS
376}
377
32c835ba
PB
378/* Called with no lock taken. */
379
539de124 380static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 381{
82801d8f 382 int64_t completed_sector_sum = 0;
c163b5ca 383 BlkMigDevState *bmds;
01e61e2d 384 int progress;
82801d8f 385 int ret = 0;
c163b5ca 386
5e5328be 387 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 388 if (bmds->bulk_completed == 0) {
539de124 389 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
390 /* completed bulk section for this device */
391 bmds->bulk_completed = 1;
c163b5ca 392 }
82801d8f
JK
393 completed_sector_sum += bmds->completed_sectors;
394 ret = 1;
395 break;
396 } else {
397 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
398 }
399 }
a55eb92c 400
8b6b2afc
PR
401 if (block_mig_state.total_sector_sum != 0) {
402 progress = completed_sector_sum * 100 /
403 block_mig_state.total_sector_sum;
404 } else {
405 progress = 100;
406 }
01e61e2d
JK
407 if (progress != block_mig_state.prev_progress) {
408 block_mig_state.prev_progress = progress;
409 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
410 | BLK_MIG_FLAG_PROGRESS);
539de124 411 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
412 }
413
414 return ret;
c163b5ca
LS
415}
416
d76cac7d 417static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
418{
419 BlkMigDevState *bmds;
d76cac7d
LS
420
421 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
422 bmds->cur_dirty = 0;
423 }
424}
425
32c835ba
PB
426/* Called with iothread lock taken. */
427
539de124
LC
428static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
429 int is_async)
d76cac7d
LS
430{
431 BlkMigBlock *blk;
432 int64_t total_sectors = bmds->total_sectors;
c163b5ca 433 int64_t sector;
d76cac7d 434 int nr_sectors;
dcd1d224 435 int ret = -EIO;
a55eb92c 436
d76cac7d 437 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 438 blk_mig_lock();
62155e2b 439 if (bmds_aio_inflight(bmds, sector)) {
52e850de 440 blk_mig_unlock();
922453bc 441 bdrv_drain_all();
52e850de
PB
442 } else {
443 blk_mig_unlock();
62155e2b 444 }
e4654d2d 445 if (bdrv_get_dirty(bmds->bs, bmds->dirty_bitmap, sector)) {
575a58d7 446
d76cac7d
LS
447 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
448 nr_sectors = total_sectors - sector;
449 } else {
450 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
451 }
7267c094
AL
452 blk = g_malloc(sizeof(BlkMigBlock));
453 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
454 blk->bmds = bmds;
455 blk->sector = sector;
33656af7 456 blk->nr_sectors = nr_sectors;
d76cac7d 457
889ae39c 458 if (is_async) {
d76cac7d
LS
459 blk->iov.iov_base = blk->buf;
460 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
461 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
462
463 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
464 nr_sectors, blk_mig_read_cb, blk);
52e850de
PB
465
466 blk_mig_lock();
d76cac7d 467 block_mig_state.submitted++;
33656af7 468 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 469 blk_mig_unlock();
d76cac7d 470 } else {
dcd1d224
JQ
471 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
472 if (ret < 0) {
d76cac7d 473 goto error;
c163b5ca 474 }
d76cac7d 475 blk_send(f, blk);
a55eb92c 476
7267c094
AL
477 g_free(blk->buf);
478 g_free(blk);
a55eb92c 479 }
d76cac7d
LS
480
481 bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
482 break;
c163b5ca 483 }
d76cac7d
LS
484 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
485 bmds->cur_dirty = sector;
c163b5ca 486 }
575a58d7 487
d76cac7d
LS
488 return (bmds->cur_dirty >= bmds->total_sectors);
489
889ae39c 490error:
539de124 491 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
492 g_free(blk->buf);
493 g_free(blk);
43be3a25 494 return ret;
d76cac7d
LS
495}
496
32c835ba
PB
497/* Called with iothread lock taken.
498 *
499 * return value:
ceb2bd09
JQ
500 * 0: too much data for max_downtime
501 * 1: few enough data for max_downtime
502*/
539de124 503static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
504{
505 BlkMigDevState *bmds;
ceb2bd09 506 int ret = 1;
d76cac7d
LS
507
508 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ceb2bd09 509 ret = mig_save_device_dirty(f, bmds, is_async);
43be3a25 510 if (ret <= 0) {
d76cac7d
LS
511 break;
512 }
513 }
514
515 return ret;
c163b5ca
LS
516}
517
32c835ba
PB
518/* Called with no locks taken. */
519
59feec42 520static int flush_blks(QEMUFile *f)
c163b5ca 521{
5e5328be 522 BlkMigBlock *blk;
59feec42 523 int ret = 0;
a55eb92c 524
d0f2c4c6 525 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
526 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
527 block_mig_state.transferred);
a55eb92c 528
52e850de 529 blk_mig_lock();
5e5328be
JK
530 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
531 if (qemu_file_rate_limit(f)) {
532 break;
533 }
4b640365 534 if (blk->ret < 0) {
59feec42 535 ret = blk->ret;
4b640365
JK
536 break;
537 }
a55eb92c 538
5e5328be 539 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 540 blk_mig_unlock();
13197e3c 541 blk_send(f, blk);
52e850de 542 blk_mig_lock();
13197e3c 543
7267c094
AL
544 g_free(blk->buf);
545 g_free(blk);
a55eb92c 546
d11ecd3d
JK
547 block_mig_state.read_done--;
548 block_mig_state.transferred++;
549 assert(block_mig_state.read_done >= 0);
c163b5ca 550 }
52e850de 551 blk_mig_unlock();
c163b5ca 552
d0f2c4c6 553 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
554 block_mig_state.submitted, block_mig_state.read_done,
555 block_mig_state.transferred);
59feec42 556 return ret;
c163b5ca
LS
557}
558
32c835ba
PB
559/* Called with iothread lock taken. */
560
889ae39c
LS
561static int64_t get_remaining_dirty(void)
562{
563 BlkMigDevState *bmds;
564 int64_t dirty = 0;
565
566 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
e4654d2d 567 dirty += bdrv_get_dirty_count(bmds->bs, bmds->dirty_bitmap);
889ae39c
LS
568 }
569
acc906c6 570 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
571}
572
32c835ba
PB
573/* Called with iothread lock taken. */
574
539de124 575static void blk_mig_cleanup(void)
4ec7fcc7 576{
82801d8f
JK
577 BlkMigDevState *bmds;
578 BlkMigBlock *blk;
4ec7fcc7 579
946d58be
KW
580 bdrv_drain_all();
581
e4654d2d 582 unset_dirty_tracking();
8f794c55 583
52e850de 584 blk_mig_lock();
82801d8f
JK
585 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
586 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
8591675f 587 bdrv_set_in_use(bmds->bs, 0);
8442cfd0 588 bdrv_unref(bmds->bs);
7267c094
AL
589 g_free(bmds->aio_bitmap);
590 g_free(bmds);
4ec7fcc7
JK
591 }
592
82801d8f
JK
593 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
594 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
595 g_free(blk->buf);
596 g_free(blk);
4ec7fcc7 597 }
52e850de 598 blk_mig_unlock();
4ec7fcc7
JK
599}
600
9b5bfab0
JQ
601static void block_migration_cancel(void *opaque)
602{
603 blk_mig_cleanup();
604}
605
d1315aac 606static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 607{
2975725f
JQ
608 int ret;
609
d1315aac
JQ
610 DPRINTF("Enter save live setup submitted %d transferred %d\n",
611 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 612
9b095037 613 qemu_mutex_lock_iothread();
d1315aac
JQ
614 init_blk_migration(f);
615
616 /* start track dirty blocks */
e4654d2d 617 set_dirty_tracking();
9b095037 618 qemu_mutex_unlock_iothread();
d1315aac 619
59feec42 620 ret = flush_blks(f);
d1315aac 621 blk_mig_reset_dirty_cursor();
d1315aac
JQ
622 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
623
d418cf57 624 return ret;
d1315aac
JQ
625}
626
16310a3c 627static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
628{
629 int ret;
6aaa9dae 630 int64_t last_ftell = qemu_ftell(f);
d1315aac 631
16310a3c
JQ
632 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
633 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 634
59feec42 635 ret = flush_blks(f);
2975725f 636 if (ret) {
2975725f 637 return ret;
4b640365
JK
638 }
639
d76cac7d
LS
640 blk_mig_reset_dirty_cursor();
641
16310a3c 642 /* control the rate of transfer */
52e850de 643 blk_mig_lock();
16310a3c
JQ
644 while ((block_mig_state.submitted +
645 block_mig_state.read_done) * BLOCK_SIZE <
646 qemu_file_get_rate_limit(f)) {
52e850de 647 blk_mig_unlock();
16310a3c
JQ
648 if (block_mig_state.bulk_completed == 0) {
649 /* first finish the bulk phase */
650 if (blk_mig_save_bulked_block(f) == 0) {
651 /* finished saving bulk on all devices */
652 block_mig_state.bulk_completed = 1;
653 }
13197e3c 654 ret = 0;
16310a3c 655 } else {
32c835ba
PB
656 /* Always called with iothread lock taken for
657 * simplicity, block_save_complete also calls it.
658 */
659 qemu_mutex_lock_iothread();
43be3a25 660 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 661 qemu_mutex_unlock_iothread();
13197e3c
PB
662 }
663 if (ret < 0) {
664 return ret;
665 }
52e850de 666 blk_mig_lock();
13197e3c
PB
667 if (ret != 0) {
668 /* no more dirty blocks */
669 break;
a55eb92c 670 }
16310a3c 671 }
52e850de 672 blk_mig_unlock();
a55eb92c 673
59feec42 674 ret = flush_blks(f);
16310a3c 675 if (ret) {
16310a3c 676 return ret;
4b640365
JK
677 }
678
16310a3c 679 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
6aaa9dae 680 return qemu_ftell(f) - last_ftell;
16310a3c
JQ
681}
682
32c835ba
PB
683/* Called with iothread lock taken. */
684
16310a3c
JQ
685static int block_save_complete(QEMUFile *f, void *opaque)
686{
687 int ret;
688
689 DPRINTF("Enter save live complete submitted %d transferred %d\n",
690 block_mig_state.submitted, block_mig_state.transferred);
691
59feec42 692 ret = flush_blks(f);
16310a3c 693 if (ret) {
16310a3c
JQ
694 return ret;
695 }
a55eb92c 696
16310a3c 697 blk_mig_reset_dirty_cursor();
01e61e2d 698
16310a3c
JQ
699 /* we know for sure that save bulk is completed and
700 all async read completed */
52e850de 701 blk_mig_lock();
16310a3c 702 assert(block_mig_state.submitted == 0);
52e850de 703 blk_mig_unlock();
16310a3c 704
43be3a25
JQ
705 do {
706 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
707 if (ret < 0) {
708 return ret;
709 }
43be3a25 710 } while (ret == 0);
4b640365 711
43be3a25
JQ
712 /* report completion */
713 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 714
16310a3c
JQ
715 DPRINTF("Block migration completed\n");
716
a55eb92c
JK
717 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
718
d418cf57 719 blk_mig_cleanup();
16310a3c 720 return 0;
c163b5ca
LS
721}
722
e4ed1541
JQ
723static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
724{
6aaa9dae 725 /* Estimate pending number of bytes to send */
13197e3c
PB
726 uint64_t pending;
727
32c835ba 728 qemu_mutex_lock_iothread();
52e850de 729 blk_mig_lock();
13197e3c 730 pending = get_remaining_dirty() +
6aaa9dae
SH
731 block_mig_state.submitted * BLOCK_SIZE +
732 block_mig_state.read_done * BLOCK_SIZE;
733
734 /* Report at least one block pending during bulk phase */
735 if (pending == 0 && !block_mig_state.bulk_completed) {
736 pending = BLOCK_SIZE;
737 }
52e850de 738 blk_mig_unlock();
32c835ba 739 qemu_mutex_unlock_iothread();
e4ed1541 740
6aaa9dae
SH
741 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
742 return pending;
e4ed1541
JQ
743}
744
c163b5ca
LS
745static int block_load(QEMUFile *f, void *opaque, int version_id)
746{
01e61e2d 747 static int banner_printed;
c163b5ca
LS
748 int len, flags;
749 char device_name[256];
750 int64_t addr;
77358b59 751 BlockDriverState *bs, *bs_prev = NULL;
c163b5ca 752 uint8_t *buf;
77358b59
PR
753 int64_t total_sectors = 0;
754 int nr_sectors;
42802d47 755 int ret;
a55eb92c 756
c163b5ca 757 do {
c163b5ca 758 addr = qemu_get_be64(f);
a55eb92c 759
6ea44308
JK
760 flags = addr & ~BDRV_SECTOR_MASK;
761 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
762
763 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
764 /* get device name */
765 len = qemu_get_byte(f);
c163b5ca
LS
766 qemu_get_buffer(f, (uint8_t *)device_name, len);
767 device_name[len] = '\0';
a55eb92c 768
c163b5ca 769 bs = bdrv_find(device_name);
4b640365
JK
770 if (!bs) {
771 fprintf(stderr, "Error unknown block device %s\n",
772 device_name);
773 return -EINVAL;
774 }
a55eb92c 775
77358b59
PR
776 if (bs != bs_prev) {
777 bs_prev = bs;
778 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
779 if (total_sectors <= 0) {
6daf194d 780 error_report("Error getting length of block device %s",
77358b59
PR
781 device_name);
782 return -EINVAL;
783 }
784 }
785
786 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
787 nr_sectors = total_sectors - addr;
788 } else {
789 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
790 }
791
323004a3 792 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
d32f35cb
PL
793 ret = bdrv_write_zeroes(bs, addr, nr_sectors,
794 BDRV_REQ_MAY_UNMAP);
323004a3
PL
795 } else {
796 buf = g_malloc(BLOCK_SIZE);
797 qemu_get_buffer(f, buf, BLOCK_SIZE);
798 ret = bdrv_write(bs, addr, buf, nr_sectors);
799 g_free(buf);
800 }
575a58d7 801
b02bea3a
YT
802 if (ret < 0) {
803 return ret;
804 }
01e61e2d
JK
805 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
806 if (!banner_printed) {
807 printf("Receiving block device images\n");
808 banner_printed = 1;
809 }
810 printf("Completed %d %%%c", (int)addr,
811 (addr == 100) ? '\n' : '\r');
812 fflush(stdout);
a55eb92c 813 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 814 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
815 return -EINVAL;
816 }
42802d47
JQ
817 ret = qemu_file_get_error(f);
818 if (ret != 0) {
819 return ret;
c163b5ca 820 }
a55eb92c
JK
821 } while (!(flags & BLK_MIG_FLAG_EOS));
822
c163b5ca
LS
823 return 0;
824}
825
6607ae23 826static void block_set_params(const MigrationParams *params, void *opaque)
c163b5ca 827{
6607ae23
IY
828 block_mig_state.blk_enable = params->blk;
829 block_mig_state.shared_base = params->shared;
a55eb92c 830
c163b5ca 831 /* shared base means that blk_enable = 1 */
6607ae23 832 block_mig_state.blk_enable |= params->shared;
c163b5ca
LS
833}
834
6bd68781
JQ
835static bool block_is_active(void *opaque)
836{
837 return block_mig_state.blk_enable == 1;
838}
839
7908c78d
JQ
840SaveVMHandlers savevm_block_handlers = {
841 .set_params = block_set_params,
d1315aac 842 .save_live_setup = block_save_setup,
16310a3c
JQ
843 .save_live_iterate = block_save_iterate,
844 .save_live_complete = block_save_complete,
e4ed1541 845 .save_live_pending = block_save_pending,
7908c78d 846 .load_state = block_load,
9b5bfab0 847 .cancel = block_migration_cancel,
6bd68781 848 .is_active = block_is_active,
7908c78d
JQ
849};
850
c163b5ca 851void blk_mig_init(void)
a55eb92c 852{
5e5328be
JK
853 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
854 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 855 qemu_mutex_init(&block_mig_state.lock);
5e5328be 856
7908c78d
JQ
857 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
858 &block_mig_state);
c163b5ca 859}