]> git.proxmox.com Git - mirror_qemu.git/blame - migration/block.c
io: fix incoming client socket initialization
[mirror_qemu.git] / migration / block.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
1393a485 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
c163b5ca 18#include "qemu-common.h"
bfb197e0
MA
19#include "block/block.h"
20#include "qemu/error-report.h"
21#include "qemu/main-loop.h"
c163b5ca 22#include "hw/hw.h"
f348b6d1 23#include "qemu/cutils.h"
1de7afc9
PB
24#include "qemu/queue.h"
25#include "qemu/timer.h"
caf71f86
PB
26#include "migration/block.h"
27#include "migration/migration.h"
9c17d615 28#include "sysemu/blockdev.h"
c9ebaf74 29#include "sysemu/block-backend.h"
c163b5ca 30
50717e94
PB
31#define BLOCK_SIZE (1 << 20)
32#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
33
34#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
35#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 36#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 37#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca
LS
38
39#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca 40
f77dcdbc
WC
41#define MAX_INFLIGHT_IO 512
42
c163b5ca
LS
43//#define DEBUG_BLK_MIGRATION
44
45#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 46#define DPRINTF(fmt, ...) \
c163b5ca
LS
47 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
48#else
d0f2c4c6 49#define DPRINTF(fmt, ...) \
c163b5ca
LS
50 do { } while (0)
51#endif
52
a55eb92c 53typedef struct BlkMigDevState {
323920c4 54 /* Written during setup phase. Can be read without a lock. */
ebd2f9e7
KW
55 BlockBackend *blk;
56 char *blk_name;
a55eb92c 57 int shared_base;
a55eb92c 58 int64_t total_sectors;
5e5328be 59 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
ef0716df 60 Error *blocker;
323920c4
PB
61
62 /* Only used by migration thread. Does not need a lock. */
63 int bulk_completed;
64 int64_t cur_sector;
65 int64_t cur_dirty;
66
ef0716df
PB
67 /* Data in the aio_bitmap is protected by block migration lock.
68 * Allocation and free happen during setup and cleanup respectively.
69 */
33656af7 70 unsigned long *aio_bitmap;
ef0716df
PB
71
72 /* Protected by block migration lock. */
323920c4 73 int64_t completed_sectors;
ef0716df
PB
74
75 /* During migration this is protected by iothread lock / AioContext.
76 * Allocation and free happen during setup and cleanup respectively.
77 */
e4654d2d 78 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
79} BlkMigDevState;
80
c163b5ca 81typedef struct BlkMigBlock {
323920c4 82 /* Only used by migration thread. */
c163b5ca
LS
83 uint8_t *buf;
84 BlkMigDevState *bmds;
85 int64_t sector;
33656af7 86 int nr_sectors;
c163b5ca
LS
87 struct iovec iov;
88 QEMUIOVector qiov;
7c84b1b8 89 BlockAIOCB *aiocb;
323920c4 90
52e850de 91 /* Protected by block migration lock. */
c163b5ca 92 int ret;
5e5328be 93 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
94} BlkMigBlock;
95
96typedef struct BlkMigState {
323920c4 97 /* Written during setup phase. Can be read without a lock. */
c163b5ca
LS
98 int blk_enable;
99 int shared_base;
5e5328be 100 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 101 int64_t total_sector_sum;
323004a3 102 bool zero_blocks;
323920c4 103
52e850de 104 /* Protected by lock. */
5e5328be 105 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca
LS
106 int submitted;
107 int read_done;
323920c4
PB
108
109 /* Only used by migration thread. Does not need a lock. */
c163b5ca 110 int transferred;
01e61e2d 111 int prev_progress;
e970ec0b 112 int bulk_completed;
52e850de 113
ef0716df 114 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
52e850de 115 QemuMutex lock;
c163b5ca
LS
116} BlkMigState;
117
d11ecd3d 118static BlkMigState block_mig_state;
c163b5ca 119
52e850de
PB
120static void blk_mig_lock(void)
121{
122 qemu_mutex_lock(&block_mig_state.lock);
123}
124
125static void blk_mig_unlock(void)
126{
127 qemu_mutex_unlock(&block_mig_state.lock);
128}
129
32c835ba
PB
130/* Must run outside of the iothread lock during the bulk phase,
131 * or the VM will stall.
132 */
133
13f0b67f
JK
134static void blk_send(QEMUFile *f, BlkMigBlock * blk)
135{
136 int len;
323004a3
PL
137 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
138
139 if (block_mig_state.zero_blocks &&
140 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
141 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
142 }
13f0b67f
JK
143
144 /* sector number and flags */
145 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 146 | flags);
13f0b67f
JK
147
148 /* device name */
ebd2f9e7 149 len = strlen(blk->bmds->blk_name);
13f0b67f 150 qemu_put_byte(f, len);
ebd2f9e7 151 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
13f0b67f 152
323004a3
PL
153 /* if a block is zero we need to flush here since the network
154 * bandwidth is now a lot higher than the storage device bandwidth.
155 * thus if we queue zero blocks we slow down the migration */
156 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
157 qemu_fflush(f);
158 return;
159 }
160
13f0b67f
JK
161 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
162}
163
25f23643
JK
164int blk_mig_active(void)
165{
166 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
167}
168
169uint64_t blk_mig_bytes_transferred(void)
170{
171 BlkMigDevState *bmds;
172 uint64_t sum = 0;
173
52e850de 174 blk_mig_lock();
25f23643
JK
175 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
176 sum += bmds->completed_sectors;
177 }
52e850de 178 blk_mig_unlock();
25f23643
JK
179 return sum << BDRV_SECTOR_BITS;
180}
181
182uint64_t blk_mig_bytes_remaining(void)
183{
184 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
185}
186
187uint64_t blk_mig_bytes_total(void)
188{
189 BlkMigDevState *bmds;
190 uint64_t sum = 0;
191
192 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
193 sum += bmds->total_sectors;
194 }
195 return sum << BDRV_SECTOR_BITS;
196}
197
52e850de
PB
198
199/* Called with migration lock held. */
200
33656af7
MT
201static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
202{
203 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
204
ebd2f9e7 205 if (sector < blk_nb_sectors(bmds->blk)) {
33656af7
MT
206 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
207 (1UL << (chunk % (sizeof(unsigned long) * 8))));
208 } else {
209 return 0;
210 }
211}
212
52e850de
PB
213/* Called with migration lock held. */
214
33656af7
MT
215static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
216 int nb_sectors, int set)
217{
218 int64_t start, end;
219 unsigned long val, idx, bit;
220
221 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
222 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
223
224 for (; start <= end; start++) {
225 idx = start / (sizeof(unsigned long) * 8);
226 bit = start % (sizeof(unsigned long) * 8);
227 val = bmds->aio_bitmap[idx];
228 if (set) {
62155e2b 229 val |= 1UL << bit;
33656af7 230 } else {
62155e2b 231 val &= ~(1UL << bit);
33656af7
MT
232 }
233 bmds->aio_bitmap[idx] = val;
234 }
235}
236
237static void alloc_aio_bitmap(BlkMigDevState *bmds)
238{
ebd2f9e7 239 BlockBackend *bb = bmds->blk;
33656af7
MT
240 int64_t bitmap_size;
241
ebd2f9e7 242 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
33656af7
MT
243 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
244
7267c094 245 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
246}
247
52e850de
PB
248/* Never hold migration lock when yielding to the main loop! */
249
c163b5ca
LS
250static void blk_mig_read_cb(void *opaque, int ret)
251{
252 BlkMigBlock *blk = opaque;
a55eb92c 253
52e850de 254 blk_mig_lock();
c163b5ca 255 blk->ret = ret;
a55eb92c 256
5e5328be 257 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 258 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 259
d11ecd3d
JK
260 block_mig_state.submitted--;
261 block_mig_state.read_done++;
262 assert(block_mig_state.submitted >= 0);
52e850de 263 blk_mig_unlock();
c163b5ca
LS
264}
265
32c835ba
PB
266/* Called with no lock taken. */
267
539de124 268static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 269{
57cce12d
JK
270 int64_t total_sectors = bmds->total_sectors;
271 int64_t cur_sector = bmds->cur_sector;
ebd2f9e7 272 BlockBackend *bb = bmds->blk;
c163b5ca 273 BlkMigBlock *blk;
13f0b67f 274 int nr_sectors;
a55eb92c 275
57cce12d 276 if (bmds->shared_base) {
32c835ba 277 qemu_mutex_lock_iothread();
ebd2f9e7 278 aio_context_acquire(blk_get_aio_context(bb));
7d66b1fb
EB
279 /* Skip unallocated sectors; intentionally treats failure as
280 * an allocated sector */
b1d10856 281 while (cur_sector < total_sectors &&
ebd2f9e7
KW
282 !bdrv_is_allocated(blk_bs(bb), cur_sector,
283 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca
LS
284 cur_sector += nr_sectors;
285 }
ebd2f9e7 286 aio_context_release(blk_get_aio_context(bb));
32c835ba 287 qemu_mutex_unlock_iothread();
c163b5ca 288 }
a55eb92c
JK
289
290 if (cur_sector >= total_sectors) {
82801d8f 291 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
292 return 1;
293 }
a55eb92c 294
82801d8f 295 bmds->completed_sectors = cur_sector;
a55eb92c 296
57cce12d
JK
297 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
298
6ea44308
JK
299 /* we are going to transfer a full block even if it is not allocated */
300 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 301
6ea44308 302 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 303 nr_sectors = total_sectors - cur_sector;
c163b5ca 304 }
a55eb92c 305
5839e53b 306 blk = g_new(BlkMigBlock, 1);
7267c094 307 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
308 blk->bmds = bmds;
309 blk->sector = cur_sector;
33656af7 310 blk->nr_sectors = nr_sectors;
a55eb92c 311
e970ec0b
LS
312 blk->iov.iov_base = blk->buf;
313 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
314 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 315
52e850de 316 blk_mig_lock();
13197e3c 317 block_mig_state.submitted++;
52e850de 318 blk_mig_unlock();
13197e3c 319
ef0716df
PB
320 /* We do not know if bs is under the main thread (and thus does
321 * not acquire the AioContext when doing AIO) or rather under
322 * dataplane. Thus acquire both the iothread mutex and the
323 * AioContext.
324 *
325 * This is ugly and will disappear when we make bdrv_* thread-safe,
326 * without the need to acquire the AioContext.
327 */
32c835ba 328 qemu_mutex_lock_iothread();
ebd2f9e7
KW
329 aio_context_acquire(blk_get_aio_context(bmds->blk));
330 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
331 0, blk_mig_read_cb, blk);
d76cac7d 332
20dca810 333 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
ebd2f9e7 334 aio_context_release(blk_get_aio_context(bmds->blk));
32c835ba 335 qemu_mutex_unlock_iothread();
a55eb92c 336
32c835ba 337 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 338 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
339}
340
32c835ba
PB
341/* Called with iothread lock taken. */
342
b8afb520 343static int set_dirty_tracking(void)
c163b5ca
LS
344{
345 BlkMigDevState *bmds;
b8afb520
FZ
346 int ret;
347
348 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
349 aio_context_acquire(blk_get_aio_context(bmds->blk));
350 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
351 BLOCK_SIZE, NULL, NULL);
352 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520
FZ
353 if (!bmds->dirty_bitmap) {
354 ret = -errno;
355 goto fail;
356 }
357 }
358 return 0;
5e5328be 359
b8afb520 360fail:
5e5328be 361 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520 362 if (bmds->dirty_bitmap) {
ebd2f9e7
KW
363 aio_context_acquire(blk_get_aio_context(bmds->blk));
364 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
365 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520 366 }
e4654d2d 367 }
b8afb520 368 return ret;
e4654d2d
FZ
369}
370
ef0716df
PB
371/* Called with iothread lock taken. */
372
e4654d2d
FZ
373static void unset_dirty_tracking(void)
374{
375 BlkMigDevState *bmds;
376
377 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
378 aio_context_acquire(blk_get_aio_context(bmds->blk));
379 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
380 aio_context_release(blk_get_aio_context(bmds->blk));
c163b5ca 381 }
c163b5ca
LS
382}
383
6f5ef23a 384static int init_blk_migration(QEMUFile *f)
c163b5ca 385{
fea68bb6 386 BlockDriverState *bs;
5e5328be 387 BlkMigDevState *bmds;
792773b2 388 int64_t sectors;
88be7b4b 389 BdrvNextIterator it;
ebd2f9e7
KW
390 int i, num_bs = 0;
391 struct {
392 BlkMigDevState *bmds;
393 BlockDriverState *bs;
394 } *bmds_bs;
6f5ef23a
KW
395 Error *local_err = NULL;
396 int ret;
a55eb92c 397
fea68bb6
MA
398 block_mig_state.submitted = 0;
399 block_mig_state.read_done = 0;
400 block_mig_state.transferred = 0;
401 block_mig_state.total_sector_sum = 0;
402 block_mig_state.prev_progress = -1;
403 block_mig_state.bulk_completed = 0;
404 block_mig_state.zero_blocks = migrate_zero_blocks();
405
88be7b4b 406 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
ebd2f9e7
KW
407 num_bs++;
408 }
409 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
410
411 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
fea68bb6
MA
412 if (bdrv_is_read_only(bs)) {
413 continue;
414 }
415
57322b78 416 sectors = bdrv_nb_sectors(bs);
31f54f24 417 if (sectors <= 0) {
6f5ef23a 418 ret = sectors;
ebd2f9e7 419 goto out;
b66460e4
SH
420 }
421
5839e53b 422 bmds = g_new0(BlkMigDevState, 1);
6f5ef23a 423 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
ebd2f9e7 424 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
b66460e4
SH
425 bmds->bulk_completed = 0;
426 bmds->total_sectors = sectors;
427 bmds->completed_sectors = 0;
428 bmds->shared_base = block_mig_state.shared_base;
ebd2f9e7
KW
429
430 assert(i < num_bs);
431 bmds_bs[i].bmds = bmds;
432 bmds_bs[i].bs = bs;
b66460e4
SH
433
434 block_mig_state.total_sector_sum += sectors;
435
436 if (bmds->shared_base) {
539de124 437 DPRINTF("Start migration for %s with shared base image\n",
bfb197e0 438 bdrv_get_device_name(bs));
b66460e4 439 } else {
bfb197e0 440 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
b66460e4
SH
441 }
442
443 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
444 }
ebd2f9e7
KW
445
446 /* Can only insert new BDSes now because doing so while iterating block
447 * devices may end up in a deadlock (iterating the new BDSes, too). */
448 for (i = 0; i < num_bs; i++) {
449 BlkMigDevState *bmds = bmds_bs[i].bmds;
450 BlockDriverState *bs = bmds_bs[i].bs;
451
452 if (bmds) {
6f5ef23a
KW
453 ret = blk_insert_bs(bmds->blk, bs, &local_err);
454 if (ret < 0) {
455 error_report_err(local_err);
456 goto out;
457 }
ebd2f9e7
KW
458
459 alloc_aio_bitmap(bmds);
460 error_setg(&bmds->blocker, "block device is in use by migration");
461 bdrv_op_block_all(bs, bmds->blocker);
462 }
463 }
464
6f5ef23a 465 ret = 0;
ebd2f9e7
KW
466out:
467 g_free(bmds_bs);
6f5ef23a 468 return ret;
b66460e4
SH
469}
470
32c835ba
PB
471/* Called with no lock taken. */
472
539de124 473static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 474{
82801d8f 475 int64_t completed_sector_sum = 0;
c163b5ca 476 BlkMigDevState *bmds;
01e61e2d 477 int progress;
82801d8f 478 int ret = 0;
c163b5ca 479
5e5328be 480 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 481 if (bmds->bulk_completed == 0) {
539de124 482 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
483 /* completed bulk section for this device */
484 bmds->bulk_completed = 1;
c163b5ca 485 }
82801d8f
JK
486 completed_sector_sum += bmds->completed_sectors;
487 ret = 1;
488 break;
489 } else {
490 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
491 }
492 }
a55eb92c 493
8b6b2afc
PR
494 if (block_mig_state.total_sector_sum != 0) {
495 progress = completed_sector_sum * 100 /
496 block_mig_state.total_sector_sum;
497 } else {
498 progress = 100;
499 }
01e61e2d
JK
500 if (progress != block_mig_state.prev_progress) {
501 block_mig_state.prev_progress = progress;
502 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
503 | BLK_MIG_FLAG_PROGRESS);
539de124 504 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
505 }
506
507 return ret;
c163b5ca
LS
508}
509
d76cac7d 510static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
511{
512 BlkMigDevState *bmds;
d76cac7d
LS
513
514 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
515 bmds->cur_dirty = 0;
516 }
517}
518
ef0716df 519/* Called with iothread lock and AioContext taken. */
32c835ba 520
539de124
LC
521static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
522 int is_async)
d76cac7d
LS
523{
524 BlkMigBlock *blk;
ebd2f9e7 525 BlockDriverState *bs = blk_bs(bmds->blk);
d76cac7d 526 int64_t total_sectors = bmds->total_sectors;
c163b5ca 527 int64_t sector;
d76cac7d 528 int nr_sectors;
dcd1d224 529 int ret = -EIO;
a55eb92c 530
d76cac7d 531 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 532 blk_mig_lock();
62155e2b 533 if (bmds_aio_inflight(bmds, sector)) {
52e850de 534 blk_mig_unlock();
ebd2f9e7 535 blk_drain(bmds->blk);
52e850de
PB
536 } else {
537 blk_mig_unlock();
62155e2b 538 }
ebd2f9e7 539 if (bdrv_get_dirty(bs, bmds->dirty_bitmap, sector)) {
575a58d7 540
d76cac7d
LS
541 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
542 nr_sectors = total_sectors - sector;
543 } else {
544 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
545 }
5839e53b 546 blk = g_new(BlkMigBlock, 1);
7267c094 547 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
548 blk->bmds = bmds;
549 blk->sector = sector;
33656af7 550 blk->nr_sectors = nr_sectors;
d76cac7d 551
889ae39c 552 if (is_async) {
d76cac7d
LS
553 blk->iov.iov_base = blk->buf;
554 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
555 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
556
ebd2f9e7
KW
557 blk->aiocb = blk_aio_preadv(bmds->blk,
558 sector * BDRV_SECTOR_SIZE,
559 &blk->qiov, 0, blk_mig_read_cb,
560 blk);
52e850de
PB
561
562 blk_mig_lock();
d76cac7d 563 block_mig_state.submitted++;
33656af7 564 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 565 blk_mig_unlock();
d76cac7d 566 } else {
ebd2f9e7
KW
567 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
568 nr_sectors * BDRV_SECTOR_SIZE);
dcd1d224 569 if (ret < 0) {
d76cac7d 570 goto error;
c163b5ca 571 }
d76cac7d 572 blk_send(f, blk);
a55eb92c 573
7267c094
AL
574 g_free(blk->buf);
575 g_free(blk);
a55eb92c 576 }
d76cac7d 577
20dca810 578 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
1cf6aa74
LC
579 sector += nr_sectors;
580 bmds->cur_dirty = sector;
581
d76cac7d 582 break;
c163b5ca 583 }
d76cac7d
LS
584 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
585 bmds->cur_dirty = sector;
c163b5ca 586 }
575a58d7 587
d76cac7d
LS
588 return (bmds->cur_dirty >= bmds->total_sectors);
589
889ae39c 590error:
539de124 591 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
592 g_free(blk->buf);
593 g_free(blk);
43be3a25 594 return ret;
d76cac7d
LS
595}
596
32c835ba
PB
597/* Called with iothread lock taken.
598 *
599 * return value:
ceb2bd09
JQ
600 * 0: too much data for max_downtime
601 * 1: few enough data for max_downtime
602*/
539de124 603static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
604{
605 BlkMigDevState *bmds;
ceb2bd09 606 int ret = 1;
d76cac7d
LS
607
608 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 609 aio_context_acquire(blk_get_aio_context(bmds->blk));
ceb2bd09 610 ret = mig_save_device_dirty(f, bmds, is_async);
ebd2f9e7 611 aio_context_release(blk_get_aio_context(bmds->blk));
43be3a25 612 if (ret <= 0) {
d76cac7d
LS
613 break;
614 }
615 }
616
617 return ret;
c163b5ca
LS
618}
619
32c835ba
PB
620/* Called with no locks taken. */
621
59feec42 622static int flush_blks(QEMUFile *f)
c163b5ca 623{
5e5328be 624 BlkMigBlock *blk;
59feec42 625 int ret = 0;
a55eb92c 626
d0f2c4c6 627 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
628 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
629 block_mig_state.transferred);
a55eb92c 630
52e850de 631 blk_mig_lock();
5e5328be
JK
632 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
633 if (qemu_file_rate_limit(f)) {
634 break;
635 }
4b640365 636 if (blk->ret < 0) {
59feec42 637 ret = blk->ret;
4b640365
JK
638 break;
639 }
a55eb92c 640
5e5328be 641 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 642 blk_mig_unlock();
13197e3c 643 blk_send(f, blk);
52e850de 644 blk_mig_lock();
13197e3c 645
7267c094
AL
646 g_free(blk->buf);
647 g_free(blk);
a55eb92c 648
d11ecd3d
JK
649 block_mig_state.read_done--;
650 block_mig_state.transferred++;
651 assert(block_mig_state.read_done >= 0);
c163b5ca 652 }
52e850de 653 blk_mig_unlock();
c163b5ca 654
d0f2c4c6 655 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
656 block_mig_state.submitted, block_mig_state.read_done,
657 block_mig_state.transferred);
59feec42 658 return ret;
c163b5ca
LS
659}
660
32c835ba
PB
661/* Called with iothread lock taken. */
662
889ae39c
LS
663static int64_t get_remaining_dirty(void)
664{
665 BlkMigDevState *bmds;
666 int64_t dirty = 0;
667
668 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 669 aio_context_acquire(blk_get_aio_context(bmds->blk));
20dca810 670 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
ebd2f9e7 671 aio_context_release(blk_get_aio_context(bmds->blk));
889ae39c
LS
672 }
673
acc906c6 674 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
675}
676
32c835ba
PB
677/* Called with iothread lock taken. */
678
6ad2a215 679static void block_migration_cleanup(void *opaque)
4ec7fcc7 680{
82801d8f
JK
681 BlkMigDevState *bmds;
682 BlkMigBlock *blk;
ef0716df 683 AioContext *ctx;
4ec7fcc7 684
946d58be
KW
685 bdrv_drain_all();
686
e4654d2d 687 unset_dirty_tracking();
8f794c55 688
82801d8f
JK
689 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
690 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
ebd2f9e7 691 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
3718d8ab 692 error_free(bmds->blocker);
ef0716df 693
ebd2f9e7
KW
694 /* Save ctx, because bmds->blk can disappear during blk_unref. */
695 ctx = blk_get_aio_context(bmds->blk);
ef0716df 696 aio_context_acquire(ctx);
ebd2f9e7 697 blk_unref(bmds->blk);
ef0716df
PB
698 aio_context_release(ctx);
699
ebd2f9e7 700 g_free(bmds->blk_name);
7267c094
AL
701 g_free(bmds->aio_bitmap);
702 g_free(bmds);
4ec7fcc7
JK
703 }
704
ef0716df 705 blk_mig_lock();
82801d8f
JK
706 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
707 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
708 g_free(blk->buf);
709 g_free(blk);
4ec7fcc7 710 }
52e850de 711 blk_mig_unlock();
4ec7fcc7
JK
712}
713
d1315aac 714static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 715{
2975725f
JQ
716 int ret;
717
d1315aac
JQ
718 DPRINTF("Enter save live setup submitted %d transferred %d\n",
719 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 720
9b095037 721 qemu_mutex_lock_iothread();
6f5ef23a
KW
722 ret = init_blk_migration(f);
723 if (ret < 0) {
724 qemu_mutex_unlock_iothread();
725 return ret;
726 }
d1315aac
JQ
727
728 /* start track dirty blocks */
b8afb520
FZ
729 ret = set_dirty_tracking();
730
ef0716df
PB
731 qemu_mutex_unlock_iothread();
732
b8afb520 733 if (ret) {
b8afb520
FZ
734 return ret;
735 }
736
59feec42 737 ret = flush_blks(f);
d1315aac 738 blk_mig_reset_dirty_cursor();
d1315aac
JQ
739 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
740
d418cf57 741 return ret;
d1315aac
JQ
742}
743
16310a3c 744static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
745{
746 int ret;
6aaa9dae 747 int64_t last_ftell = qemu_ftell(f);
ebd9fbd7 748 int64_t delta_ftell;
d1315aac 749
16310a3c
JQ
750 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
751 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 752
59feec42 753 ret = flush_blks(f);
2975725f 754 if (ret) {
2975725f 755 return ret;
4b640365
JK
756 }
757
d76cac7d
LS
758 blk_mig_reset_dirty_cursor();
759
16310a3c 760 /* control the rate of transfer */
52e850de 761 blk_mig_lock();
16310a3c
JQ
762 while ((block_mig_state.submitted +
763 block_mig_state.read_done) * BLOCK_SIZE <
f77dcdbc
WC
764 qemu_file_get_rate_limit(f) &&
765 (block_mig_state.submitted +
766 block_mig_state.read_done) <
767 MAX_INFLIGHT_IO) {
52e850de 768 blk_mig_unlock();
16310a3c
JQ
769 if (block_mig_state.bulk_completed == 0) {
770 /* first finish the bulk phase */
771 if (blk_mig_save_bulked_block(f) == 0) {
772 /* finished saving bulk on all devices */
773 block_mig_state.bulk_completed = 1;
774 }
13197e3c 775 ret = 0;
16310a3c 776 } else {
32c835ba
PB
777 /* Always called with iothread lock taken for
778 * simplicity, block_save_complete also calls it.
779 */
780 qemu_mutex_lock_iothread();
43be3a25 781 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 782 qemu_mutex_unlock_iothread();
13197e3c
PB
783 }
784 if (ret < 0) {
785 return ret;
786 }
52e850de 787 blk_mig_lock();
13197e3c
PB
788 if (ret != 0) {
789 /* no more dirty blocks */
790 break;
a55eb92c 791 }
16310a3c 792 }
52e850de 793 blk_mig_unlock();
a55eb92c 794
59feec42 795 ret = flush_blks(f);
16310a3c 796 if (ret) {
16310a3c 797 return ret;
4b640365
JK
798 }
799
16310a3c 800 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
ebd9fbd7
GH
801 delta_ftell = qemu_ftell(f) - last_ftell;
802 if (delta_ftell > 0) {
803 return 1;
804 } else if (delta_ftell < 0) {
805 return -1;
806 } else {
807 return 0;
808 }
16310a3c
JQ
809}
810
32c835ba
PB
811/* Called with iothread lock taken. */
812
16310a3c
JQ
813static int block_save_complete(QEMUFile *f, void *opaque)
814{
815 int ret;
816
817 DPRINTF("Enter save live complete submitted %d transferred %d\n",
818 block_mig_state.submitted, block_mig_state.transferred);
819
59feec42 820 ret = flush_blks(f);
16310a3c 821 if (ret) {
16310a3c
JQ
822 return ret;
823 }
a55eb92c 824
16310a3c 825 blk_mig_reset_dirty_cursor();
01e61e2d 826
16310a3c
JQ
827 /* we know for sure that save bulk is completed and
828 all async read completed */
52e850de 829 blk_mig_lock();
16310a3c 830 assert(block_mig_state.submitted == 0);
52e850de 831 blk_mig_unlock();
16310a3c 832
43be3a25
JQ
833 do {
834 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
835 if (ret < 0) {
836 return ret;
837 }
43be3a25 838 } while (ret == 0);
4b640365 839
43be3a25
JQ
840 /* report completion */
841 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 842
16310a3c
JQ
843 DPRINTF("Block migration completed\n");
844
a55eb92c
JK
845 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
846
16310a3c 847 return 0;
c163b5ca
LS
848}
849
c31b098f
DDAG
850static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
851 uint64_t *non_postcopiable_pending,
852 uint64_t *postcopiable_pending)
e4ed1541 853{
6aaa9dae 854 /* Estimate pending number of bytes to send */
13197e3c
PB
855 uint64_t pending;
856
32c835ba 857 qemu_mutex_lock_iothread();
ef0716df
PB
858 pending = get_remaining_dirty();
859 qemu_mutex_unlock_iothread();
860
52e850de 861 blk_mig_lock();
ef0716df
PB
862 pending += block_mig_state.submitted * BLOCK_SIZE +
863 block_mig_state.read_done * BLOCK_SIZE;
864 blk_mig_unlock();
6aaa9dae
SH
865
866 /* Report at least one block pending during bulk phase */
04636dc4
VSO
867 if (pending <= max_size && !block_mig_state.bulk_completed) {
868 pending = max_size + BLOCK_SIZE;
6aaa9dae 869 }
e4ed1541 870
6aaa9dae 871 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
c31b098f
DDAG
872 /* We don't do postcopy */
873 *non_postcopiable_pending += pending;
e4ed1541
JQ
874}
875
c163b5ca
LS
876static int block_load(QEMUFile *f, void *opaque, int version_id)
877{
01e61e2d 878 static int banner_printed;
c163b5ca
LS
879 int len, flags;
880 char device_name[256];
881 int64_t addr;
ad2964b4 882 BlockBackend *blk, *blk_prev = NULL;;
9bd9c7f5 883 Error *local_err = NULL;
c163b5ca 884 uint8_t *buf;
77358b59
PR
885 int64_t total_sectors = 0;
886 int nr_sectors;
42802d47 887 int ret;
a55eb92c 888
c163b5ca 889 do {
c163b5ca 890 addr = qemu_get_be64(f);
a55eb92c 891
6ea44308
JK
892 flags = addr & ~BDRV_SECTOR_MASK;
893 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
894
895 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
896 /* get device name */
897 len = qemu_get_byte(f);
c163b5ca
LS
898 qemu_get_buffer(f, (uint8_t *)device_name, len);
899 device_name[len] = '\0';
a55eb92c 900
c9ebaf74
FZ
901 blk = blk_by_name(device_name);
902 if (!blk) {
4b640365
JK
903 fprintf(stderr, "Error unknown block device %s\n",
904 device_name);
905 return -EINVAL;
906 }
a55eb92c 907
ad2964b4
KW
908 if (blk != blk_prev) {
909 blk_prev = blk;
910 total_sectors = blk_nb_sectors(blk);
77358b59 911 if (total_sectors <= 0) {
6daf194d 912 error_report("Error getting length of block device %s",
77358b59
PR
913 device_name);
914 return -EINVAL;
915 }
9bd9c7f5 916
ad2964b4 917 blk_invalidate_cache(blk, &local_err);
9bd9c7f5
KW
918 if (local_err) {
919 error_report_err(local_err);
920 return -EINVAL;
921 }
77358b59
PR
922 }
923
924 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
925 nr_sectors = total_sectors - addr;
926 } else {
927 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
928 }
929
323004a3 930 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
ad2964b4
KW
931 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
932 nr_sectors * BDRV_SECTOR_SIZE,
933 BDRV_REQ_MAY_UNMAP);
323004a3
PL
934 } else {
935 buf = g_malloc(BLOCK_SIZE);
936 qemu_get_buffer(f, buf, BLOCK_SIZE);
ad2964b4
KW
937 ret = blk_pwrite(blk, addr * BDRV_SECTOR_SIZE, buf,
938 nr_sectors * BDRV_SECTOR_SIZE, 0);
323004a3
PL
939 g_free(buf);
940 }
575a58d7 941
b02bea3a
YT
942 if (ret < 0) {
943 return ret;
944 }
01e61e2d
JK
945 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
946 if (!banner_printed) {
947 printf("Receiving block device images\n");
948 banner_printed = 1;
949 }
950 printf("Completed %d %%%c", (int)addr,
951 (addr == 100) ? '\n' : '\r');
952 fflush(stdout);
a55eb92c 953 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 954 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
955 return -EINVAL;
956 }
42802d47
JQ
957 ret = qemu_file_get_error(f);
958 if (ret != 0) {
959 return ret;
c163b5ca 960 }
a55eb92c
JK
961 } while (!(flags & BLK_MIG_FLAG_EOS));
962
c163b5ca
LS
963 return 0;
964}
965
6607ae23 966static void block_set_params(const MigrationParams *params, void *opaque)
c163b5ca 967{
6607ae23
IY
968 block_mig_state.blk_enable = params->blk;
969 block_mig_state.shared_base = params->shared;
a55eb92c 970
c163b5ca 971 /* shared base means that blk_enable = 1 */
6607ae23 972 block_mig_state.blk_enable |= params->shared;
c163b5ca
LS
973}
974
6bd68781
JQ
975static bool block_is_active(void *opaque)
976{
977 return block_mig_state.blk_enable == 1;
978}
979
7a46d042 980static SaveVMHandlers savevm_block_handlers = {
7908c78d 981 .set_params = block_set_params,
d1315aac 982 .save_live_setup = block_save_setup,
16310a3c 983 .save_live_iterate = block_save_iterate,
a3e06c3d 984 .save_live_complete_precopy = block_save_complete,
e4ed1541 985 .save_live_pending = block_save_pending,
7908c78d 986 .load_state = block_load,
6ad2a215 987 .cleanup = block_migration_cleanup,
6bd68781 988 .is_active = block_is_active,
7908c78d
JQ
989};
990
c163b5ca 991void blk_mig_init(void)
a55eb92c 992{
5e5328be
JK
993 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
994 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 995 qemu_mutex_init(&block_mig_state.lock);
5e5328be 996
7908c78d
JQ
997 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
998 &block_mig_state);
c163b5ca 999}