]> git.proxmox.com Git - mirror_qemu.git/blame - migration/block.c
Revert "vl: Fix to create migration object before block backends again"
[mirror_qemu.git] / migration / block.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
1393a485 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
bfb197e0 18#include "qemu/error-report.h"
f348b6d1 19#include "qemu/cutils.h"
1de7afc9 20#include "qemu/queue.h"
2c9e6fec
JQ
21#include "block.h"
22#include "migration/misc.h"
6666c96a 23#include "migration.h"
f2a8f0a6 24#include "migration/register.h"
08a0aee1 25#include "qemu-file.h"
987772d9 26#include "migration/vmstate.h"
c9ebaf74 27#include "sysemu/block-backend.h"
c163b5ca 28
50717e94
PB
29#define BLOCK_SIZE (1 << 20)
30#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
31
32#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
33#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 34#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 35#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca 36
d6a644bb 37#define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
c163b5ca 38
ef9c5160 39#define MAX_IO_BUFFERS 512
44815334 40#define MAX_PARALLEL_IO 16
f77dcdbc 41
c163b5ca
LS
42//#define DEBUG_BLK_MIGRATION
43
44#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 45#define DPRINTF(fmt, ...) \
c163b5ca
LS
46 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
47#else
d0f2c4c6 48#define DPRINTF(fmt, ...) \
c163b5ca
LS
49 do { } while (0)
50#endif
51
a55eb92c 52typedef struct BlkMigDevState {
323920c4 53 /* Written during setup phase. Can be read without a lock. */
ebd2f9e7
KW
54 BlockBackend *blk;
55 char *blk_name;
a55eb92c 56 int shared_base;
a55eb92c 57 int64_t total_sectors;
5e5328be 58 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
ef0716df 59 Error *blocker;
323920c4
PB
60
61 /* Only used by migration thread. Does not need a lock. */
62 int bulk_completed;
63 int64_t cur_sector;
64 int64_t cur_dirty;
65
ef0716df
PB
66 /* Data in the aio_bitmap is protected by block migration lock.
67 * Allocation and free happen during setup and cleanup respectively.
68 */
33656af7 69 unsigned long *aio_bitmap;
ef0716df
PB
70
71 /* Protected by block migration lock. */
323920c4 72 int64_t completed_sectors;
ef0716df
PB
73
74 /* During migration this is protected by iothread lock / AioContext.
75 * Allocation and free happen during setup and cleanup respectively.
76 */
e4654d2d 77 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
78} BlkMigDevState;
79
c163b5ca 80typedef struct BlkMigBlock {
323920c4 81 /* Only used by migration thread. */
c163b5ca
LS
82 uint8_t *buf;
83 BlkMigDevState *bmds;
84 int64_t sector;
33656af7 85 int nr_sectors;
c163b5ca 86 QEMUIOVector qiov;
7c84b1b8 87 BlockAIOCB *aiocb;
323920c4 88
52e850de 89 /* Protected by block migration lock. */
c163b5ca 90 int ret;
5e5328be 91 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
92} BlkMigBlock;
93
94typedef struct BlkMigState {
b58deb34 95 QSIMPLEQ_HEAD(, BlkMigDevState) bmds_list;
323920c4 96 int64_t total_sector_sum;
323004a3 97 bool zero_blocks;
323920c4 98
52e850de 99 /* Protected by lock. */
b58deb34 100 QSIMPLEQ_HEAD(, BlkMigBlock) blk_list;
c163b5ca
LS
101 int submitted;
102 int read_done;
323920c4
PB
103
104 /* Only used by migration thread. Does not need a lock. */
c163b5ca 105 int transferred;
01e61e2d 106 int prev_progress;
e970ec0b 107 int bulk_completed;
52e850de 108
ef0716df 109 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
52e850de 110 QemuMutex lock;
c163b5ca
LS
111} BlkMigState;
112
d11ecd3d 113static BlkMigState block_mig_state;
c163b5ca 114
52e850de
PB
115static void blk_mig_lock(void)
116{
117 qemu_mutex_lock(&block_mig_state.lock);
118}
119
120static void blk_mig_unlock(void)
121{
122 qemu_mutex_unlock(&block_mig_state.lock);
123}
124
32c835ba
PB
125/* Must run outside of the iothread lock during the bulk phase,
126 * or the VM will stall.
127 */
128
13f0b67f
JK
129static void blk_send(QEMUFile *f, BlkMigBlock * blk)
130{
131 int len;
323004a3
PL
132 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
133
134 if (block_mig_state.zero_blocks &&
135 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
136 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
137 }
13f0b67f
JK
138
139 /* sector number and flags */
140 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 141 | flags);
13f0b67f
JK
142
143 /* device name */
ebd2f9e7 144 len = strlen(blk->bmds->blk_name);
13f0b67f 145 qemu_put_byte(f, len);
ebd2f9e7 146 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
13f0b67f 147
323004a3
PL
148 /* if a block is zero we need to flush here since the network
149 * bandwidth is now a lot higher than the storage device bandwidth.
150 * thus if we queue zero blocks we slow down the migration */
151 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
152 qemu_fflush(f);
153 return;
154 }
155
13f0b67f
JK
156 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
157}
158
25f23643
JK
159int blk_mig_active(void)
160{
161 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
162}
163
9ac78b61
PL
164int blk_mig_bulk_active(void)
165{
166 return blk_mig_active() && !block_mig_state.bulk_completed;
167}
168
25f23643
JK
169uint64_t blk_mig_bytes_transferred(void)
170{
171 BlkMigDevState *bmds;
172 uint64_t sum = 0;
173
52e850de 174 blk_mig_lock();
25f23643
JK
175 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
176 sum += bmds->completed_sectors;
177 }
52e850de 178 blk_mig_unlock();
25f23643
JK
179 return sum << BDRV_SECTOR_BITS;
180}
181
182uint64_t blk_mig_bytes_remaining(void)
183{
184 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
185}
186
187uint64_t blk_mig_bytes_total(void)
188{
189 BlkMigDevState *bmds;
190 uint64_t sum = 0;
191
192 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
193 sum += bmds->total_sectors;
194 }
195 return sum << BDRV_SECTOR_BITS;
196}
197
52e850de
PB
198
199/* Called with migration lock held. */
200
33656af7
MT
201static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
202{
203 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
204
ebd2f9e7 205 if (sector < blk_nb_sectors(bmds->blk)) {
33656af7
MT
206 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
207 (1UL << (chunk % (sizeof(unsigned long) * 8))));
208 } else {
209 return 0;
210 }
211}
212
52e850de
PB
213/* Called with migration lock held. */
214
33656af7
MT
215static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
216 int nb_sectors, int set)
217{
218 int64_t start, end;
219 unsigned long val, idx, bit;
220
221 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
222 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
223
224 for (; start <= end; start++) {
225 idx = start / (sizeof(unsigned long) * 8);
226 bit = start % (sizeof(unsigned long) * 8);
227 val = bmds->aio_bitmap[idx];
228 if (set) {
62155e2b 229 val |= 1UL << bit;
33656af7 230 } else {
62155e2b 231 val &= ~(1UL << bit);
33656af7
MT
232 }
233 bmds->aio_bitmap[idx] = val;
234 }
235}
236
237static void alloc_aio_bitmap(BlkMigDevState *bmds)
238{
ebd2f9e7 239 BlockBackend *bb = bmds->blk;
33656af7
MT
240 int64_t bitmap_size;
241
ebd2f9e7 242 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
33656af7
MT
243 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
244
7267c094 245 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
246}
247
52e850de
PB
248/* Never hold migration lock when yielding to the main loop! */
249
c163b5ca
LS
250static void blk_mig_read_cb(void *opaque, int ret)
251{
252 BlkMigBlock *blk = opaque;
a55eb92c 253
52e850de 254 blk_mig_lock();
c163b5ca 255 blk->ret = ret;
a55eb92c 256
5e5328be 257 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 258 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 259
d11ecd3d
JK
260 block_mig_state.submitted--;
261 block_mig_state.read_done++;
262 assert(block_mig_state.submitted >= 0);
52e850de 263 blk_mig_unlock();
c163b5ca
LS
264}
265
32c835ba
PB
266/* Called with no lock taken. */
267
539de124 268static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 269{
57cce12d
JK
270 int64_t total_sectors = bmds->total_sectors;
271 int64_t cur_sector = bmds->cur_sector;
ebd2f9e7 272 BlockBackend *bb = bmds->blk;
c163b5ca 273 BlkMigBlock *blk;
13f0b67f 274 int nr_sectors;
d6a644bb 275 int64_t count;
a55eb92c 276
57cce12d 277 if (bmds->shared_base) {
32c835ba 278 qemu_mutex_lock_iothread();
ebd2f9e7 279 aio_context_acquire(blk_get_aio_context(bb));
d6a644bb
EB
280 /* Skip unallocated sectors; intentionally treats failure or
281 * partial sector as an allocated sector */
b1d10856 282 while (cur_sector < total_sectors &&
d6a644bb
EB
283 !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE,
284 MAX_IS_ALLOCATED_SEARCH, &count)) {
285 if (count < BDRV_SECTOR_SIZE) {
286 break;
287 }
288 cur_sector += count >> BDRV_SECTOR_BITS;
c163b5ca 289 }
ebd2f9e7 290 aio_context_release(blk_get_aio_context(bb));
32c835ba 291 qemu_mutex_unlock_iothread();
c163b5ca 292 }
a55eb92c
JK
293
294 if (cur_sector >= total_sectors) {
82801d8f 295 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
296 return 1;
297 }
a55eb92c 298
82801d8f 299 bmds->completed_sectors = cur_sector;
a55eb92c 300
57cce12d
JK
301 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
302
6ea44308
JK
303 /* we are going to transfer a full block even if it is not allocated */
304 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 305
6ea44308 306 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 307 nr_sectors = total_sectors - cur_sector;
c163b5ca 308 }
a55eb92c 309
5839e53b 310 blk = g_new(BlkMigBlock, 1);
7267c094 311 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
312 blk->bmds = bmds;
313 blk->sector = cur_sector;
33656af7 314 blk->nr_sectors = nr_sectors;
a55eb92c 315
f556f37b 316 qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE);
a55eb92c 317
52e850de 318 blk_mig_lock();
13197e3c 319 block_mig_state.submitted++;
52e850de 320 blk_mig_unlock();
13197e3c 321
ef0716df
PB
322 /* We do not know if bs is under the main thread (and thus does
323 * not acquire the AioContext when doing AIO) or rather under
324 * dataplane. Thus acquire both the iothread mutex and the
325 * AioContext.
326 *
327 * This is ugly and will disappear when we make bdrv_* thread-safe,
328 * without the need to acquire the AioContext.
329 */
32c835ba 330 qemu_mutex_lock_iothread();
ebd2f9e7 331 aio_context_acquire(blk_get_aio_context(bmds->blk));
e0d7f73e
EB
332 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
333 nr_sectors * BDRV_SECTOR_SIZE);
86b124bc
PL
334 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
335 0, blk_mig_read_cb, blk);
ebd2f9e7 336 aio_context_release(blk_get_aio_context(bmds->blk));
32c835ba 337 qemu_mutex_unlock_iothread();
a55eb92c 338
32c835ba 339 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 340 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
341}
342
32c835ba
PB
343/* Called with iothread lock taken. */
344
b8afb520 345static int set_dirty_tracking(void)
c163b5ca
LS
346{
347 BlkMigDevState *bmds;
b8afb520
FZ
348 int ret;
349
350 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
351 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
352 BLOCK_SIZE, NULL, NULL);
b8afb520
FZ
353 if (!bmds->dirty_bitmap) {
354 ret = -errno;
355 goto fail;
356 }
357 }
358 return 0;
5e5328be 359
b8afb520 360fail:
5e5328be 361 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520 362 if (bmds->dirty_bitmap) {
ebd2f9e7 363 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
b8afb520 364 }
e4654d2d 365 }
b8afb520 366 return ret;
e4654d2d
FZ
367}
368
ef0716df
PB
369/* Called with iothread lock taken. */
370
e4654d2d
FZ
371static void unset_dirty_tracking(void)
372{
373 BlkMigDevState *bmds;
374
375 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 376 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
c163b5ca 377 }
c163b5ca
LS
378}
379
6f5ef23a 380static int init_blk_migration(QEMUFile *f)
c163b5ca 381{
fea68bb6 382 BlockDriverState *bs;
5e5328be 383 BlkMigDevState *bmds;
792773b2 384 int64_t sectors;
88be7b4b 385 BdrvNextIterator it;
ebd2f9e7
KW
386 int i, num_bs = 0;
387 struct {
388 BlkMigDevState *bmds;
389 BlockDriverState *bs;
390 } *bmds_bs;
6f5ef23a
KW
391 Error *local_err = NULL;
392 int ret;
a55eb92c 393
fea68bb6
MA
394 block_mig_state.submitted = 0;
395 block_mig_state.read_done = 0;
396 block_mig_state.transferred = 0;
397 block_mig_state.total_sector_sum = 0;
398 block_mig_state.prev_progress = -1;
399 block_mig_state.bulk_completed = 0;
400 block_mig_state.zero_blocks = migrate_zero_blocks();
401
88be7b4b 402 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
ebd2f9e7
KW
403 num_bs++;
404 }
405 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
406
407 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
fea68bb6
MA
408 if (bdrv_is_read_only(bs)) {
409 continue;
410 }
411
57322b78 412 sectors = bdrv_nb_sectors(bs);
31f54f24 413 if (sectors <= 0) {
6f5ef23a 414 ret = sectors;
5e003f17 415 bdrv_next_cleanup(&it);
ebd2f9e7 416 goto out;
b66460e4
SH
417 }
418
5839e53b 419 bmds = g_new0(BlkMigDevState, 1);
6f5ef23a 420 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
ebd2f9e7 421 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
b66460e4
SH
422 bmds->bulk_completed = 0;
423 bmds->total_sectors = sectors;
424 bmds->completed_sectors = 0;
ce7c817c 425 bmds->shared_base = migrate_use_block_incremental();
ebd2f9e7
KW
426
427 assert(i < num_bs);
428 bmds_bs[i].bmds = bmds;
429 bmds_bs[i].bs = bs;
b66460e4
SH
430
431 block_mig_state.total_sector_sum += sectors;
432
433 if (bmds->shared_base) {
539de124 434 DPRINTF("Start migration for %s with shared base image\n",
bfb197e0 435 bdrv_get_device_name(bs));
b66460e4 436 } else {
bfb197e0 437 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
b66460e4
SH
438 }
439
440 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
441 }
ebd2f9e7
KW
442
443 /* Can only insert new BDSes now because doing so while iterating block
444 * devices may end up in a deadlock (iterating the new BDSes, too). */
445 for (i = 0; i < num_bs; i++) {
446 BlkMigDevState *bmds = bmds_bs[i].bmds;
447 BlockDriverState *bs = bmds_bs[i].bs;
448
449 if (bmds) {
6f5ef23a
KW
450 ret = blk_insert_bs(bmds->blk, bs, &local_err);
451 if (ret < 0) {
452 error_report_err(local_err);
453 goto out;
454 }
ebd2f9e7
KW
455
456 alloc_aio_bitmap(bmds);
457 error_setg(&bmds->blocker, "block device is in use by migration");
458 bdrv_op_block_all(bs, bmds->blocker);
459 }
460 }
461
6f5ef23a 462 ret = 0;
ebd2f9e7
KW
463out:
464 g_free(bmds_bs);
6f5ef23a 465 return ret;
b66460e4
SH
466}
467
32c835ba
PB
468/* Called with no lock taken. */
469
539de124 470static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 471{
82801d8f 472 int64_t completed_sector_sum = 0;
c163b5ca 473 BlkMigDevState *bmds;
01e61e2d 474 int progress;
82801d8f 475 int ret = 0;
c163b5ca 476
5e5328be 477 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 478 if (bmds->bulk_completed == 0) {
539de124 479 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
480 /* completed bulk section for this device */
481 bmds->bulk_completed = 1;
c163b5ca 482 }
82801d8f
JK
483 completed_sector_sum += bmds->completed_sectors;
484 ret = 1;
485 break;
486 } else {
487 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
488 }
489 }
a55eb92c 490
8b6b2afc
PR
491 if (block_mig_state.total_sector_sum != 0) {
492 progress = completed_sector_sum * 100 /
493 block_mig_state.total_sector_sum;
494 } else {
495 progress = 100;
496 }
01e61e2d
JK
497 if (progress != block_mig_state.prev_progress) {
498 block_mig_state.prev_progress = progress;
499 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
500 | BLK_MIG_FLAG_PROGRESS);
539de124 501 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
502 }
503
504 return ret;
c163b5ca
LS
505}
506
d76cac7d 507static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
508{
509 BlkMigDevState *bmds;
d76cac7d
LS
510
511 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
512 bmds->cur_dirty = 0;
513 }
514}
515
ef0716df 516/* Called with iothread lock and AioContext taken. */
32c835ba 517
539de124
LC
518static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
519 int is_async)
d76cac7d
LS
520{
521 BlkMigBlock *blk;
ebd2f9e7 522 BlockDriverState *bs = blk_bs(bmds->blk);
d76cac7d 523 int64_t total_sectors = bmds->total_sectors;
c163b5ca 524 int64_t sector;
d76cac7d 525 int nr_sectors;
dcd1d224 526 int ret = -EIO;
a55eb92c 527
d76cac7d 528 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 529 blk_mig_lock();
62155e2b 530 if (bmds_aio_inflight(bmds, sector)) {
52e850de 531 blk_mig_unlock();
ebd2f9e7 532 blk_drain(bmds->blk);
52e850de
PB
533 } else {
534 blk_mig_unlock();
62155e2b 535 }
b64bd51e 536 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
3b5d4df0
EB
537 if (bdrv_get_dirty_locked(bs, bmds->dirty_bitmap,
538 sector * BDRV_SECTOR_SIZE)) {
d76cac7d
LS
539 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
540 nr_sectors = total_sectors - sector;
541 } else {
542 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
543 }
e0d7f73e
EB
544 bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap,
545 sector * BDRV_SECTOR_SIZE,
546 nr_sectors * BDRV_SECTOR_SIZE);
b64bd51e 547 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
c0bad499 548
5839e53b 549 blk = g_new(BlkMigBlock, 1);
7267c094 550 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
551 blk->bmds = bmds;
552 blk->sector = sector;
33656af7 553 blk->nr_sectors = nr_sectors;
d76cac7d 554
889ae39c 555 if (is_async) {
f556f37b
VSO
556 qemu_iovec_init_buf(&blk->qiov, blk->buf,
557 nr_sectors * BDRV_SECTOR_SIZE);
d76cac7d 558
ebd2f9e7
KW
559 blk->aiocb = blk_aio_preadv(bmds->blk,
560 sector * BDRV_SECTOR_SIZE,
561 &blk->qiov, 0, blk_mig_read_cb,
562 blk);
52e850de
PB
563
564 blk_mig_lock();
d76cac7d 565 block_mig_state.submitted++;
33656af7 566 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 567 blk_mig_unlock();
d76cac7d 568 } else {
ebd2f9e7
KW
569 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
570 nr_sectors * BDRV_SECTOR_SIZE);
dcd1d224 571 if (ret < 0) {
d76cac7d 572 goto error;
c163b5ca 573 }
d76cac7d 574 blk_send(f, blk);
a55eb92c 575
7267c094
AL
576 g_free(blk->buf);
577 g_free(blk);
a55eb92c 578 }
d76cac7d 579
1cf6aa74
LC
580 sector += nr_sectors;
581 bmds->cur_dirty = sector;
d76cac7d 582 break;
c163b5ca 583 }
b64bd51e
PB
584
585 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
d76cac7d
LS
586 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
587 bmds->cur_dirty = sector;
c163b5ca 588 }
575a58d7 589
d76cac7d
LS
590 return (bmds->cur_dirty >= bmds->total_sectors);
591
889ae39c 592error:
539de124 593 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
594 g_free(blk->buf);
595 g_free(blk);
43be3a25 596 return ret;
d76cac7d
LS
597}
598
32c835ba
PB
599/* Called with iothread lock taken.
600 *
601 * return value:
ceb2bd09
JQ
602 * 0: too much data for max_downtime
603 * 1: few enough data for max_downtime
604*/
539de124 605static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
606{
607 BlkMigDevState *bmds;
ceb2bd09 608 int ret = 1;
d76cac7d
LS
609
610 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 611 aio_context_acquire(blk_get_aio_context(bmds->blk));
ceb2bd09 612 ret = mig_save_device_dirty(f, bmds, is_async);
ebd2f9e7 613 aio_context_release(blk_get_aio_context(bmds->blk));
43be3a25 614 if (ret <= 0) {
d76cac7d
LS
615 break;
616 }
617 }
618
619 return ret;
c163b5ca
LS
620}
621
32c835ba
PB
622/* Called with no locks taken. */
623
59feec42 624static int flush_blks(QEMUFile *f)
c163b5ca 625{
5e5328be 626 BlkMigBlock *blk;
59feec42 627 int ret = 0;
a55eb92c 628
d0f2c4c6 629 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
a89f364a 630 __func__, block_mig_state.submitted, block_mig_state.read_done,
d11ecd3d 631 block_mig_state.transferred);
a55eb92c 632
52e850de 633 blk_mig_lock();
5e5328be
JK
634 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
635 if (qemu_file_rate_limit(f)) {
636 break;
637 }
4b640365 638 if (blk->ret < 0) {
59feec42 639 ret = blk->ret;
4b640365
JK
640 break;
641 }
a55eb92c 642
5e5328be 643 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 644 blk_mig_unlock();
13197e3c 645 blk_send(f, blk);
52e850de 646 blk_mig_lock();
13197e3c 647
7267c094
AL
648 g_free(blk->buf);
649 g_free(blk);
a55eb92c 650
d11ecd3d
JK
651 block_mig_state.read_done--;
652 block_mig_state.transferred++;
653 assert(block_mig_state.read_done >= 0);
c163b5ca 654 }
52e850de 655 blk_mig_unlock();
c163b5ca 656
a89f364a 657 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __func__,
d11ecd3d
JK
658 block_mig_state.submitted, block_mig_state.read_done,
659 block_mig_state.transferred);
59feec42 660 return ret;
c163b5ca
LS
661}
662
32c835ba
PB
663/* Called with iothread lock taken. */
664
889ae39c
LS
665static int64_t get_remaining_dirty(void)
666{
667 BlkMigDevState *bmds;
668 int64_t dirty = 0;
669
670 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 671 aio_context_acquire(blk_get_aio_context(bmds->blk));
20dca810 672 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
ebd2f9e7 673 aio_context_release(blk_get_aio_context(bmds->blk));
889ae39c
LS
674 }
675
9a46dba7 676 return dirty;
889ae39c
LS
677}
678
32c835ba 679
362fdf17
KW
680
681/* Called with iothread lock taken. */
682static void block_migration_cleanup_bmds(void)
4ec7fcc7 683{
82801d8f 684 BlkMigDevState *bmds;
ef0716df 685 AioContext *ctx;
4ec7fcc7 686
e4654d2d 687 unset_dirty_tracking();
8f794c55 688
82801d8f
JK
689 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
690 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
ebd2f9e7 691 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
3718d8ab 692 error_free(bmds->blocker);
ef0716df 693
ebd2f9e7
KW
694 /* Save ctx, because bmds->blk can disappear during blk_unref. */
695 ctx = blk_get_aio_context(bmds->blk);
ef0716df 696 aio_context_acquire(ctx);
ebd2f9e7 697 blk_unref(bmds->blk);
ef0716df
PB
698 aio_context_release(ctx);
699
ebd2f9e7 700 g_free(bmds->blk_name);
7267c094
AL
701 g_free(bmds->aio_bitmap);
702 g_free(bmds);
4ec7fcc7 703 }
362fdf17
KW
704}
705
706/* Called with iothread lock taken. */
707static void block_migration_cleanup(void *opaque)
708{
709 BlkMigBlock *blk;
710
711 bdrv_drain_all();
712
713 block_migration_cleanup_bmds();
4ec7fcc7 714
ef0716df 715 blk_mig_lock();
82801d8f
JK
716 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
717 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
718 g_free(blk->buf);
719 g_free(blk);
4ec7fcc7 720 }
52e850de 721 blk_mig_unlock();
4ec7fcc7
JK
722}
723
d1315aac 724static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 725{
2975725f
JQ
726 int ret;
727
d1315aac
JQ
728 DPRINTF("Enter save live setup submitted %d transferred %d\n",
729 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 730
9b095037 731 qemu_mutex_lock_iothread();
6f5ef23a
KW
732 ret = init_blk_migration(f);
733 if (ret < 0) {
734 qemu_mutex_unlock_iothread();
735 return ret;
736 }
d1315aac
JQ
737
738 /* start track dirty blocks */
b8afb520
FZ
739 ret = set_dirty_tracking();
740
ef0716df
PB
741 qemu_mutex_unlock_iothread();
742
b8afb520 743 if (ret) {
b8afb520
FZ
744 return ret;
745 }
746
59feec42 747 ret = flush_blks(f);
d1315aac 748 blk_mig_reset_dirty_cursor();
d1315aac
JQ
749 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
750
d418cf57 751 return ret;
d1315aac
JQ
752}
753
16310a3c 754static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
755{
756 int ret;
6aaa9dae 757 int64_t last_ftell = qemu_ftell(f);
ebd9fbd7 758 int64_t delta_ftell;
d1315aac 759
16310a3c
JQ
760 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
761 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 762
59feec42 763 ret = flush_blks(f);
2975725f 764 if (ret) {
2975725f 765 return ret;
4b640365
JK
766 }
767
d76cac7d
LS
768 blk_mig_reset_dirty_cursor();
769
16310a3c 770 /* control the rate of transfer */
52e850de 771 blk_mig_lock();
b47d1e9f 772 while (block_mig_state.read_done * BLOCK_SIZE <
f77dcdbc 773 qemu_file_get_rate_limit(f) &&
44815334 774 block_mig_state.submitted < MAX_PARALLEL_IO &&
ef9c5160
PL
775 (block_mig_state.submitted + block_mig_state.read_done) <
776 MAX_IO_BUFFERS) {
52e850de 777 blk_mig_unlock();
16310a3c
JQ
778 if (block_mig_state.bulk_completed == 0) {
779 /* first finish the bulk phase */
780 if (blk_mig_save_bulked_block(f) == 0) {
781 /* finished saving bulk on all devices */
782 block_mig_state.bulk_completed = 1;
783 }
13197e3c 784 ret = 0;
16310a3c 785 } else {
32c835ba
PB
786 /* Always called with iothread lock taken for
787 * simplicity, block_save_complete also calls it.
788 */
789 qemu_mutex_lock_iothread();
43be3a25 790 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 791 qemu_mutex_unlock_iothread();
13197e3c
PB
792 }
793 if (ret < 0) {
794 return ret;
795 }
52e850de 796 blk_mig_lock();
13197e3c
PB
797 if (ret != 0) {
798 /* no more dirty blocks */
799 break;
a55eb92c 800 }
16310a3c 801 }
52e850de 802 blk_mig_unlock();
a55eb92c 803
59feec42 804 ret = flush_blks(f);
16310a3c 805 if (ret) {
16310a3c 806 return ret;
4b640365
JK
807 }
808
16310a3c 809 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
ebd9fbd7
GH
810 delta_ftell = qemu_ftell(f) - last_ftell;
811 if (delta_ftell > 0) {
812 return 1;
813 } else if (delta_ftell < 0) {
814 return -1;
815 } else {
816 return 0;
817 }
16310a3c
JQ
818}
819
32c835ba
PB
820/* Called with iothread lock taken. */
821
16310a3c
JQ
822static int block_save_complete(QEMUFile *f, void *opaque)
823{
824 int ret;
825
826 DPRINTF("Enter save live complete submitted %d transferred %d\n",
827 block_mig_state.submitted, block_mig_state.transferred);
828
59feec42 829 ret = flush_blks(f);
16310a3c 830 if (ret) {
16310a3c
JQ
831 return ret;
832 }
a55eb92c 833
16310a3c 834 blk_mig_reset_dirty_cursor();
01e61e2d 835
16310a3c
JQ
836 /* we know for sure that save bulk is completed and
837 all async read completed */
52e850de 838 blk_mig_lock();
16310a3c 839 assert(block_mig_state.submitted == 0);
52e850de 840 blk_mig_unlock();
16310a3c 841
43be3a25
JQ
842 do {
843 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
844 if (ret < 0) {
845 return ret;
846 }
43be3a25 847 } while (ret == 0);
4b640365 848
43be3a25
JQ
849 /* report completion */
850 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 851
16310a3c
JQ
852 DPRINTF("Block migration completed\n");
853
a55eb92c
JK
854 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
855
362fdf17
KW
856 /* Make sure that our BlockBackends are gone, so that the block driver
857 * nodes can be inactivated. */
858 block_migration_cleanup_bmds();
859
16310a3c 860 return 0;
c163b5ca
LS
861}
862
c31b098f 863static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
47995026
VSO
864 uint64_t *res_precopy_only,
865 uint64_t *res_compatible,
866 uint64_t *res_postcopy_only)
e4ed1541 867{
6aaa9dae 868 /* Estimate pending number of bytes to send */
13197e3c
PB
869 uint64_t pending;
870
32c835ba 871 qemu_mutex_lock_iothread();
ef0716df
PB
872 pending = get_remaining_dirty();
873 qemu_mutex_unlock_iothread();
874
52e850de 875 blk_mig_lock();
ef0716df
PB
876 pending += block_mig_state.submitted * BLOCK_SIZE +
877 block_mig_state.read_done * BLOCK_SIZE;
878 blk_mig_unlock();
6aaa9dae
SH
879
880 /* Report at least one block pending during bulk phase */
04636dc4
VSO
881 if (pending <= max_size && !block_mig_state.bulk_completed) {
882 pending = max_size + BLOCK_SIZE;
6aaa9dae 883 }
e4ed1541 884
6aaa9dae 885 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
c31b098f 886 /* We don't do postcopy */
47995026 887 *res_precopy_only += pending;
e4ed1541
JQ
888}
889
c163b5ca
LS
890static int block_load(QEMUFile *f, void *opaque, int version_id)
891{
01e61e2d 892 static int banner_printed;
c163b5ca
LS
893 int len, flags;
894 char device_name[256];
895 int64_t addr;
3c254ab8 896 BlockBackend *blk, *blk_prev = NULL;
9bd9c7f5 897 Error *local_err = NULL;
c163b5ca 898 uint8_t *buf;
77358b59
PR
899 int64_t total_sectors = 0;
900 int nr_sectors;
42802d47 901 int ret;
3928d50b
LC
902 BlockDriverInfo bdi;
903 int cluster_size = BLOCK_SIZE;
a55eb92c 904
c163b5ca 905 do {
c163b5ca 906 addr = qemu_get_be64(f);
a55eb92c 907
6ea44308
JK
908 flags = addr & ~BDRV_SECTOR_MASK;
909 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
910
911 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
912 /* get device name */
913 len = qemu_get_byte(f);
c163b5ca
LS
914 qemu_get_buffer(f, (uint8_t *)device_name, len);
915 device_name[len] = '\0';
a55eb92c 916
c9ebaf74
FZ
917 blk = blk_by_name(device_name);
918 if (!blk) {
4b640365
JK
919 fprintf(stderr, "Error unknown block device %s\n",
920 device_name);
921 return -EINVAL;
922 }
a55eb92c 923
ad2964b4
KW
924 if (blk != blk_prev) {
925 blk_prev = blk;
926 total_sectors = blk_nb_sectors(blk);
77358b59 927 if (total_sectors <= 0) {
6daf194d 928 error_report("Error getting length of block device %s",
77358b59
PR
929 device_name);
930 return -EINVAL;
931 }
9bd9c7f5 932
ad2964b4 933 blk_invalidate_cache(blk, &local_err);
9bd9c7f5
KW
934 if (local_err) {
935 error_report_err(local_err);
936 return -EINVAL;
937 }
3928d50b
LC
938
939 ret = bdrv_get_info(blk_bs(blk), &bdi);
940 if (ret == 0 && bdi.cluster_size > 0 &&
941 bdi.cluster_size <= BLOCK_SIZE &&
942 BLOCK_SIZE % bdi.cluster_size == 0) {
943 cluster_size = bdi.cluster_size;
944 } else {
945 cluster_size = BLOCK_SIZE;
946 }
77358b59
PR
947 }
948
949 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
950 nr_sectors = total_sectors - addr;
951 } else {
952 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
953 }
954
323004a3 955 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
ad2964b4
KW
956 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
957 nr_sectors * BDRV_SECTOR_SIZE,
958 BDRV_REQ_MAY_UNMAP);
323004a3 959 } else {
3928d50b
LC
960 int i;
961 int64_t cur_addr;
962 uint8_t *cur_buf;
963
323004a3
PL
964 buf = g_malloc(BLOCK_SIZE);
965 qemu_get_buffer(f, buf, BLOCK_SIZE);
3928d50b
LC
966 for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
967 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
968 cur_buf = buf + i * cluster_size;
969
970 if ((!block_mig_state.zero_blocks ||
971 cluster_size < BLOCK_SIZE) &&
972 buffer_is_zero(cur_buf, cluster_size)) {
973 ret = blk_pwrite_zeroes(blk, cur_addr,
974 cluster_size,
975 BDRV_REQ_MAY_UNMAP);
976 } else {
977 ret = blk_pwrite(blk, cur_addr, cur_buf,
978 cluster_size, 0);
979 }
980 if (ret < 0) {
981 break;
982 }
983 }
323004a3
PL
984 g_free(buf);
985 }
575a58d7 986
b02bea3a
YT
987 if (ret < 0) {
988 return ret;
989 }
01e61e2d
JK
990 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
991 if (!banner_printed) {
992 printf("Receiving block device images\n");
993 banner_printed = 1;
994 }
995 printf("Completed %d %%%c", (int)addr,
996 (addr == 100) ? '\n' : '\r');
997 fflush(stdout);
a55eb92c 998 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 999 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
1000 return -EINVAL;
1001 }
42802d47
JQ
1002 ret = qemu_file_get_error(f);
1003 if (ret != 0) {
1004 return ret;
c163b5ca 1005 }
a55eb92c
JK
1006 } while (!(flags & BLK_MIG_FLAG_EOS));
1007
c163b5ca
LS
1008 return 0;
1009}
1010
6bd68781
JQ
1011static bool block_is_active(void *opaque)
1012{
ce7c817c 1013 return migrate_use_block();
6bd68781
JQ
1014}
1015
7a46d042 1016static SaveVMHandlers savevm_block_handlers = {
9907e842 1017 .save_setup = block_save_setup,
16310a3c 1018 .save_live_iterate = block_save_iterate,
a3e06c3d 1019 .save_live_complete_precopy = block_save_complete,
e4ed1541 1020 .save_live_pending = block_save_pending,
7908c78d 1021 .load_state = block_load,
70f794fc 1022 .save_cleanup = block_migration_cleanup,
6bd68781 1023 .is_active = block_is_active,
7908c78d
JQ
1024};
1025
c163b5ca 1026void blk_mig_init(void)
a55eb92c 1027{
5e5328be
JK
1028 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1029 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 1030 qemu_mutex_init(&block_mig_state.lock);
5e5328be 1031
7908c78d
JQ
1032 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
1033 &block_mig_state);
c163b5ca 1034}