]> git.proxmox.com Git - mirror_qemu.git/blame - migration/block.c
block: remove AioContext locking
[mirror_qemu.git] / migration / block.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
1393a485 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
bfb197e0 18#include "qemu/error-report.h"
db725815 19#include "qemu/main-loop.h"
f348b6d1 20#include "qemu/cutils.h"
1de7afc9 21#include "qemu/queue.h"
2c9e6fec 22#include "block.h"
e2c1c34f 23#include "block/dirty-bitmap.h"
2c9e6fec 24#include "migration/misc.h"
6666c96a 25#include "migration.h"
e1fde0e0 26#include "migration-stats.h"
f2a8f0a6 27#include "migration/register.h"
08a0aee1 28#include "qemu-file.h"
987772d9 29#include "migration/vmstate.h"
c9ebaf74 30#include "sysemu/block-backend.h"
fe80c024 31#include "trace.h"
1f0776f1 32#include "options.h"
c163b5ca 33
4bcb7de0 34#define BLK_MIG_BLOCK_SIZE (1ULL << 20)
a152bd00 35#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
36
37#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
38#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 39#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 40#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca 41
d6a644bb 42#define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
c163b5ca 43
ef9c5160 44#define MAX_IO_BUFFERS 512
44815334 45#define MAX_PARALLEL_IO 16
f77dcdbc 46
a55eb92c 47typedef struct BlkMigDevState {
323920c4 48 /* Written during setup phase. Can be read without a lock. */
ebd2f9e7
KW
49 BlockBackend *blk;
50 char *blk_name;
a55eb92c 51 int shared_base;
a55eb92c 52 int64_t total_sectors;
5e5328be 53 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
ef0716df 54 Error *blocker;
323920c4
PB
55
56 /* Only used by migration thread. Does not need a lock. */
57 int bulk_completed;
58 int64_t cur_sector;
59 int64_t cur_dirty;
60
ef0716df
PB
61 /* Data in the aio_bitmap is protected by block migration lock.
62 * Allocation and free happen during setup and cleanup respectively.
63 */
33656af7 64 unsigned long *aio_bitmap;
ef0716df
PB
65
66 /* Protected by block migration lock. */
323920c4 67 int64_t completed_sectors;
ef0716df 68
b49f4755 69 /* During migration this is protected by bdrv_dirty_bitmap_lock().
ef0716df
PB
70 * Allocation and free happen during setup and cleanup respectively.
71 */
e4654d2d 72 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
73} BlkMigDevState;
74
c163b5ca 75typedef struct BlkMigBlock {
323920c4 76 /* Only used by migration thread. */
c163b5ca
LS
77 uint8_t *buf;
78 BlkMigDevState *bmds;
79 int64_t sector;
33656af7 80 int nr_sectors;
c163b5ca 81 QEMUIOVector qiov;
7c84b1b8 82 BlockAIOCB *aiocb;
323920c4 83
52e850de 84 /* Protected by block migration lock. */
c163b5ca 85 int ret;
5e5328be 86 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
87} BlkMigBlock;
88
89typedef struct BlkMigState {
b58deb34 90 QSIMPLEQ_HEAD(, BlkMigDevState) bmds_list;
323920c4 91 int64_t total_sector_sum;
323004a3 92 bool zero_blocks;
323920c4 93
52e850de 94 /* Protected by lock. */
b58deb34 95 QSIMPLEQ_HEAD(, BlkMigBlock) blk_list;
c163b5ca
LS
96 int submitted;
97 int read_done;
323920c4
PB
98
99 /* Only used by migration thread. Does not need a lock. */
c163b5ca 100 int transferred;
01e61e2d 101 int prev_progress;
e970ec0b 102 int bulk_completed;
52e850de 103
b49f4755 104 /* Lock must be taken _inside_ the iothread lock. */
52e850de 105 QemuMutex lock;
c163b5ca
LS
106} BlkMigState;
107
d11ecd3d 108static BlkMigState block_mig_state;
c163b5ca 109
52e850de
PB
110static void blk_mig_lock(void)
111{
112 qemu_mutex_lock(&block_mig_state.lock);
113}
114
115static void blk_mig_unlock(void)
116{
117 qemu_mutex_unlock(&block_mig_state.lock);
118}
119
32c835ba
PB
120/* Must run outside of the iothread lock during the bulk phase,
121 * or the VM will stall.
122 */
123
13f0b67f
JK
124static void blk_send(QEMUFile *f, BlkMigBlock * blk)
125{
126 int len;
323004a3
PL
127 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
128
129 if (block_mig_state.zero_blocks &&
a152bd00 130 buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) {
323004a3
PL
131 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
132 }
13f0b67f
JK
133
134 /* sector number and flags */
135 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 136 | flags);
13f0b67f
JK
137
138 /* device name */
ebd2f9e7 139 len = strlen(blk->bmds->blk_name);
13f0b67f 140 qemu_put_byte(f, len);
ebd2f9e7 141 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
13f0b67f 142
323004a3
PL
143 /* if a block is zero we need to flush here since the network
144 * bandwidth is now a lot higher than the storage device bandwidth.
145 * thus if we queue zero blocks we slow down the migration */
146 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
147 qemu_fflush(f);
148 return;
149 }
150
a152bd00 151 qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE);
13f0b67f
JK
152}
153
25f23643
JK
154int blk_mig_active(void)
155{
156 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
157}
158
9ac78b61
PL
159int blk_mig_bulk_active(void)
160{
161 return blk_mig_active() && !block_mig_state.bulk_completed;
162}
163
25f23643
JK
164uint64_t blk_mig_bytes_transferred(void)
165{
166 BlkMigDevState *bmds;
167 uint64_t sum = 0;
168
52e850de 169 blk_mig_lock();
25f23643
JK
170 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
171 sum += bmds->completed_sectors;
172 }
52e850de 173 blk_mig_unlock();
25f23643
JK
174 return sum << BDRV_SECTOR_BITS;
175}
176
177uint64_t blk_mig_bytes_remaining(void)
178{
179 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
180}
181
182uint64_t blk_mig_bytes_total(void)
183{
184 BlkMigDevState *bmds;
185 uint64_t sum = 0;
186
187 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
188 sum += bmds->total_sectors;
189 }
190 return sum << BDRV_SECTOR_BITS;
191}
192
52e850de
PB
193
194/* Called with migration lock held. */
195
33656af7
MT
196static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
197{
198 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
199
2c5451ca 200 if (sector < bmds->total_sectors) {
33656af7
MT
201 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
202 (1UL << (chunk % (sizeof(unsigned long) * 8))));
203 } else {
204 return 0;
205 }
206}
207
52e850de
PB
208/* Called with migration lock held. */
209
33656af7
MT
210static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
211 int nb_sectors, int set)
212{
213 int64_t start, end;
214 unsigned long val, idx, bit;
215
216 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
217 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
218
219 for (; start <= end; start++) {
220 idx = start / (sizeof(unsigned long) * 8);
221 bit = start % (sizeof(unsigned long) * 8);
222 val = bmds->aio_bitmap[idx];
223 if (set) {
62155e2b 224 val |= 1UL << bit;
33656af7 225 } else {
62155e2b 226 val &= ~(1UL << bit);
33656af7
MT
227 }
228 bmds->aio_bitmap[idx] = val;
229 }
230}
231
232static void alloc_aio_bitmap(BlkMigDevState *bmds)
233{
33656af7
MT
234 int64_t bitmap_size;
235
2c5451ca 236 bitmap_size = bmds->total_sectors + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
33656af7
MT
237 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
238
7267c094 239 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
240}
241
52e850de
PB
242/* Never hold migration lock when yielding to the main loop! */
243
c163b5ca
LS
244static void blk_mig_read_cb(void *opaque, int ret)
245{
246 BlkMigBlock *blk = opaque;
a55eb92c 247
52e850de 248 blk_mig_lock();
c163b5ca 249 blk->ret = ret;
a55eb92c 250
5e5328be 251 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 252 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 253
d11ecd3d
JK
254 block_mig_state.submitted--;
255 block_mig_state.read_done++;
256 assert(block_mig_state.submitted >= 0);
52e850de 257 blk_mig_unlock();
c163b5ca
LS
258}
259
32c835ba
PB
260/* Called with no lock taken. */
261
539de124 262static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 263{
57cce12d
JK
264 int64_t total_sectors = bmds->total_sectors;
265 int64_t cur_sector = bmds->cur_sector;
ebd2f9e7 266 BlockBackend *bb = bmds->blk;
c163b5ca 267 BlkMigBlock *blk;
13f0b67f 268 int nr_sectors;
d6a644bb 269 int64_t count;
a55eb92c 270
57cce12d 271 if (bmds->shared_base) {
32c835ba 272 qemu_mutex_lock_iothread();
d6a644bb
EB
273 /* Skip unallocated sectors; intentionally treats failure or
274 * partial sector as an allocated sector */
b1d10856 275 while (cur_sector < total_sectors &&
d6a644bb
EB
276 !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE,
277 MAX_IS_ALLOCATED_SEARCH, &count)) {
278 if (count < BDRV_SECTOR_SIZE) {
279 break;
280 }
281 cur_sector += count >> BDRV_SECTOR_BITS;
c163b5ca 282 }
32c835ba 283 qemu_mutex_unlock_iothread();
c163b5ca 284 }
a55eb92c
JK
285
286 if (cur_sector >= total_sectors) {
82801d8f 287 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
288 return 1;
289 }
a55eb92c 290
82801d8f 291 bmds->completed_sectors = cur_sector;
a55eb92c 292
57cce12d
JK
293 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
294
6ea44308
JK
295 /* we are going to transfer a full block even if it is not allocated */
296 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 297
6ea44308 298 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 299 nr_sectors = total_sectors - cur_sector;
c163b5ca 300 }
a55eb92c 301
5839e53b 302 blk = g_new(BlkMigBlock, 1);
a152bd00 303 blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
13f0b67f
JK
304 blk->bmds = bmds;
305 blk->sector = cur_sector;
33656af7 306 blk->nr_sectors = nr_sectors;
a55eb92c 307
f556f37b 308 qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE);
a55eb92c 309
52e850de 310 blk_mig_lock();
13197e3c 311 block_mig_state.submitted++;
52e850de 312 blk_mig_unlock();
13197e3c 313
b49f4755
SH
314 /*
315 * The migration thread does not have an AioContext. Lock the BQL so that
316 * I/O runs in the main loop AioContext (see
317 * qemu_get_current_aio_context()).
ef0716df 318 */
32c835ba 319 qemu_mutex_lock_iothread();
e0d7f73e
EB
320 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
321 nr_sectors * BDRV_SECTOR_SIZE);
86b124bc
PL
322 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
323 0, blk_mig_read_cb, blk);
32c835ba 324 qemu_mutex_unlock_iothread();
a55eb92c 325
32c835ba 326 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 327 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
328}
329
32c835ba
PB
330/* Called with iothread lock taken. */
331
b8afb520 332static int set_dirty_tracking(void)
c163b5ca
LS
333{
334 BlkMigDevState *bmds;
b8afb520
FZ
335 int ret;
336
337 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 338 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
a152bd00
SH
339 BLK_MIG_BLOCK_SIZE,
340 NULL, NULL);
b8afb520
FZ
341 if (!bmds->dirty_bitmap) {
342 ret = -errno;
343 goto fail;
344 }
345 }
346 return 0;
5e5328be 347
b8afb520 348fail:
5e5328be 349 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520 350 if (bmds->dirty_bitmap) {
5deb6cbd 351 bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
b8afb520 352 }
e4654d2d 353 }
b8afb520 354 return ret;
e4654d2d
FZ
355}
356
ef0716df
PB
357/* Called with iothread lock taken. */
358
e4654d2d
FZ
359static void unset_dirty_tracking(void)
360{
361 BlkMigDevState *bmds;
362
363 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
f187609f
FR
364 if (bmds->dirty_bitmap) {
365 bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
366 }
c163b5ca 367 }
c163b5ca
LS
368}
369
6f5ef23a 370static int init_blk_migration(QEMUFile *f)
c163b5ca 371{
fea68bb6 372 BlockDriverState *bs;
5e5328be 373 BlkMigDevState *bmds;
792773b2 374 int64_t sectors;
88be7b4b 375 BdrvNextIterator it;
ebd2f9e7
KW
376 int i, num_bs = 0;
377 struct {
378 BlkMigDevState *bmds;
379 BlockDriverState *bs;
380 } *bmds_bs;
6f5ef23a
KW
381 Error *local_err = NULL;
382 int ret;
a55eb92c 383
2b3912f1
KW
384 GRAPH_RDLOCK_GUARD_MAINLOOP();
385
fea68bb6
MA
386 block_mig_state.submitted = 0;
387 block_mig_state.read_done = 0;
388 block_mig_state.transferred = 0;
389 block_mig_state.total_sector_sum = 0;
390 block_mig_state.prev_progress = -1;
391 block_mig_state.bulk_completed = 0;
392 block_mig_state.zero_blocks = migrate_zero_blocks();
393
88be7b4b 394 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
ebd2f9e7
KW
395 num_bs++;
396 }
397 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
398
399 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
fea68bb6
MA
400 if (bdrv_is_read_only(bs)) {
401 continue;
402 }
403
57322b78 404 sectors = bdrv_nb_sectors(bs);
31f54f24 405 if (sectors <= 0) {
6f5ef23a 406 ret = sectors;
5e003f17 407 bdrv_next_cleanup(&it);
ebd2f9e7 408 goto out;
b66460e4
SH
409 }
410
5839e53b 411 bmds = g_new0(BlkMigDevState, 1);
d861ab3a
KW
412 bmds->blk = blk_new(qemu_get_aio_context(),
413 BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
ebd2f9e7 414 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
b66460e4
SH
415 bmds->bulk_completed = 0;
416 bmds->total_sectors = sectors;
417 bmds->completed_sectors = 0;
6f8be708 418 bmds->shared_base = migrate_block_incremental();
ebd2f9e7
KW
419
420 assert(i < num_bs);
421 bmds_bs[i].bmds = bmds;
422 bmds_bs[i].bs = bs;
b66460e4
SH
423
424 block_mig_state.total_sector_sum += sectors;
425
426 if (bmds->shared_base) {
fe80c024 427 trace_migration_block_init_shared(bdrv_get_device_name(bs));
b66460e4 428 } else {
fe80c024 429 trace_migration_block_init_full(bdrv_get_device_name(bs));
b66460e4
SH
430 }
431
432 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
433 }
ebd2f9e7
KW
434
435 /* Can only insert new BDSes now because doing so while iterating block
436 * devices may end up in a deadlock (iterating the new BDSes, too). */
437 for (i = 0; i < num_bs; i++) {
7f3de3f0
MA
438 bmds = bmds_bs[i].bmds;
439 bs = bmds_bs[i].bs;
ebd2f9e7
KW
440
441 if (bmds) {
6f5ef23a
KW
442 ret = blk_insert_bs(bmds->blk, bs, &local_err);
443 if (ret < 0) {
444 error_report_err(local_err);
445 goto out;
446 }
ebd2f9e7
KW
447
448 alloc_aio_bitmap(bmds);
449 error_setg(&bmds->blocker, "block device is in use by migration");
450 bdrv_op_block_all(bs, bmds->blocker);
451 }
452 }
453
6f5ef23a 454 ret = 0;
ebd2f9e7
KW
455out:
456 g_free(bmds_bs);
6f5ef23a 457 return ret;
b66460e4
SH
458}
459
32c835ba
PB
460/* Called with no lock taken. */
461
539de124 462static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 463{
82801d8f 464 int64_t completed_sector_sum = 0;
c163b5ca 465 BlkMigDevState *bmds;
01e61e2d 466 int progress;
82801d8f 467 int ret = 0;
c163b5ca 468
5e5328be 469 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 470 if (bmds->bulk_completed == 0) {
539de124 471 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
472 /* completed bulk section for this device */
473 bmds->bulk_completed = 1;
c163b5ca 474 }
82801d8f
JK
475 completed_sector_sum += bmds->completed_sectors;
476 ret = 1;
477 break;
478 } else {
479 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
480 }
481 }
a55eb92c 482
8b6b2afc
PR
483 if (block_mig_state.total_sector_sum != 0) {
484 progress = completed_sector_sum * 100 /
485 block_mig_state.total_sector_sum;
486 } else {
487 progress = 100;
488 }
01e61e2d
JK
489 if (progress != block_mig_state.prev_progress) {
490 block_mig_state.prev_progress = progress;
491 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
492 | BLK_MIG_FLAG_PROGRESS);
163b8663 493 trace_migration_block_progression(progress);
82801d8f
JK
494 }
495
496 return ret;
c163b5ca
LS
497}
498
d76cac7d 499static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
500{
501 BlkMigDevState *bmds;
d76cac7d
LS
502
503 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
504 bmds->cur_dirty = 0;
505 }
506}
507
b49f4755 508/* Called with iothread lock taken. */
32c835ba 509
539de124
LC
510static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
511 int is_async)
d76cac7d
LS
512{
513 BlkMigBlock *blk;
514 int64_t total_sectors = bmds->total_sectors;
c163b5ca 515 int64_t sector;
d76cac7d 516 int nr_sectors;
dcd1d224 517 int ret = -EIO;
a55eb92c 518
d76cac7d 519 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 520 blk_mig_lock();
62155e2b 521 if (bmds_aio_inflight(bmds, sector)) {
52e850de 522 blk_mig_unlock();
ebd2f9e7 523 blk_drain(bmds->blk);
52e850de
PB
524 } else {
525 blk_mig_unlock();
62155e2b 526 }
b64bd51e 527 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
28636b82
JS
528 if (bdrv_dirty_bitmap_get_locked(bmds->dirty_bitmap,
529 sector * BDRV_SECTOR_SIZE)) {
d76cac7d
LS
530 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
531 nr_sectors = total_sectors - sector;
532 } else {
533 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
534 }
e0d7f73e
EB
535 bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap,
536 sector * BDRV_SECTOR_SIZE,
537 nr_sectors * BDRV_SECTOR_SIZE);
b64bd51e 538 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
c0bad499 539
5839e53b 540 blk = g_new(BlkMigBlock, 1);
a152bd00 541 blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
d76cac7d
LS
542 blk->bmds = bmds;
543 blk->sector = sector;
33656af7 544 blk->nr_sectors = nr_sectors;
d76cac7d 545
889ae39c 546 if (is_async) {
f556f37b
VSO
547 qemu_iovec_init_buf(&blk->qiov, blk->buf,
548 nr_sectors * BDRV_SECTOR_SIZE);
d76cac7d 549
ebd2f9e7
KW
550 blk->aiocb = blk_aio_preadv(bmds->blk,
551 sector * BDRV_SECTOR_SIZE,
552 &blk->qiov, 0, blk_mig_read_cb,
553 blk);
52e850de
PB
554
555 blk_mig_lock();
d76cac7d 556 block_mig_state.submitted++;
33656af7 557 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 558 blk_mig_unlock();
d76cac7d 559 } else {
3b35d454 560 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE,
a9262f55 561 nr_sectors * BDRV_SECTOR_SIZE, blk->buf, 0);
dcd1d224 562 if (ret < 0) {
d76cac7d 563 goto error;
c163b5ca 564 }
d76cac7d 565 blk_send(f, blk);
a55eb92c 566
7267c094
AL
567 g_free(blk->buf);
568 g_free(blk);
a55eb92c 569 }
d76cac7d 570
1cf6aa74
LC
571 sector += nr_sectors;
572 bmds->cur_dirty = sector;
d76cac7d 573 break;
c163b5ca 574 }
b64bd51e
PB
575
576 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
d76cac7d
LS
577 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
578 bmds->cur_dirty = sector;
c163b5ca 579 }
575a58d7 580
d76cac7d
LS
581 return (bmds->cur_dirty >= bmds->total_sectors);
582
889ae39c 583error:
fe80c024 584 trace_migration_block_save_device_dirty(sector);
7267c094
AL
585 g_free(blk->buf);
586 g_free(blk);
43be3a25 587 return ret;
d76cac7d
LS
588}
589
32c835ba
PB
590/* Called with iothread lock taken.
591 *
592 * return value:
ceb2bd09
JQ
593 * 0: too much data for max_downtime
594 * 1: few enough data for max_downtime
595*/
539de124 596static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
597{
598 BlkMigDevState *bmds;
ceb2bd09 599 int ret = 1;
d76cac7d
LS
600
601 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ceb2bd09 602 ret = mig_save_device_dirty(f, bmds, is_async);
43be3a25 603 if (ret <= 0) {
d76cac7d
LS
604 break;
605 }
606 }
607
608 return ret;
c163b5ca
LS
609}
610
32c835ba
PB
611/* Called with no locks taken. */
612
59feec42 613static int flush_blks(QEMUFile *f)
c163b5ca 614{
5e5328be 615 BlkMigBlock *blk;
59feec42 616 int ret = 0;
a55eb92c 617
fe80c024
BY
618 trace_migration_block_flush_blks("Enter", block_mig_state.submitted,
619 block_mig_state.read_done,
620 block_mig_state.transferred);
a55eb92c 621
52e850de 622 blk_mig_lock();
5e5328be 623 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
e1fde0e0 624 if (migration_rate_exceeded(f)) {
5e5328be
JK
625 break;
626 }
4b640365 627 if (blk->ret < 0) {
59feec42 628 ret = blk->ret;
4b640365
JK
629 break;
630 }
a55eb92c 631
5e5328be 632 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 633 blk_mig_unlock();
13197e3c 634 blk_send(f, blk);
52e850de 635 blk_mig_lock();
13197e3c 636
7267c094
AL
637 g_free(blk->buf);
638 g_free(blk);
a55eb92c 639
d11ecd3d
JK
640 block_mig_state.read_done--;
641 block_mig_state.transferred++;
642 assert(block_mig_state.read_done >= 0);
c163b5ca 643 }
52e850de 644 blk_mig_unlock();
c163b5ca 645
fe80c024
BY
646 trace_migration_block_flush_blks("Exit", block_mig_state.submitted,
647 block_mig_state.read_done,
648 block_mig_state.transferred);
59feec42 649 return ret;
c163b5ca
LS
650}
651
32c835ba
PB
652/* Called with iothread lock taken. */
653
889ae39c
LS
654static int64_t get_remaining_dirty(void)
655{
656 BlkMigDevState *bmds;
657 int64_t dirty = 0;
658
659 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b49f4755 660 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
20dca810 661 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
b49f4755 662 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
889ae39c
LS
663 }
664
9a46dba7 665 return dirty;
889ae39c
LS
666}
667
32c835ba 668
362fdf17
KW
669
670/* Called with iothread lock taken. */
671static void block_migration_cleanup_bmds(void)
4ec7fcc7 672{
82801d8f 673 BlkMigDevState *bmds;
f187609f 674 BlockDriverState *bs;
4ec7fcc7 675
e4654d2d 676 unset_dirty_tracking();
8f794c55 677
82801d8f
JK
678 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
679 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
f187609f
FR
680
681 bs = blk_bs(bmds->blk);
682 if (bs) {
683 bdrv_op_unblock_all(bs, bmds->blocker);
684 }
3718d8ab 685 error_free(bmds->blocker);
ebd2f9e7 686 blk_unref(bmds->blk);
ebd2f9e7 687 g_free(bmds->blk_name);
7267c094
AL
688 g_free(bmds->aio_bitmap);
689 g_free(bmds);
4ec7fcc7 690 }
362fdf17
KW
691}
692
693/* Called with iothread lock taken. */
694static void block_migration_cleanup(void *opaque)
695{
696 BlkMigBlock *blk;
697
698 bdrv_drain_all();
699
700 block_migration_cleanup_bmds();
4ec7fcc7 701
ef0716df 702 blk_mig_lock();
82801d8f
JK
703 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
704 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
705 g_free(blk->buf);
706 g_free(blk);
4ec7fcc7 707 }
52e850de 708 blk_mig_unlock();
4ec7fcc7
JK
709}
710
d1315aac 711static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 712{
2975725f
JQ
713 int ret;
714
fe80c024
BY
715 trace_migration_block_save("setup", block_mig_state.submitted,
716 block_mig_state.transferred);
a55eb92c 717
66db46ca
JQ
718 warn_report("block migration is deprecated;"
719 " use blockdev-mirror with NBD instead");
720
6f5ef23a
KW
721 ret = init_blk_migration(f);
722 if (ret < 0) {
6f5ef23a
KW
723 return ret;
724 }
d1315aac
JQ
725
726 /* start track dirty blocks */
b8afb520 727 ret = set_dirty_tracking();
b8afb520 728 if (ret) {
b8afb520
FZ
729 return ret;
730 }
731
59feec42 732 ret = flush_blks(f);
d1315aac 733 blk_mig_reset_dirty_cursor();
d1315aac
JQ
734 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
735
d418cf57 736 return ret;
d1315aac
JQ
737}
738
16310a3c 739static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
740{
741 int ret;
e9c0eed7 742 uint64_t last_bytes = qemu_file_transferred(f);
d1315aac 743
fe80c024
BY
744 trace_migration_block_save("iterate", block_mig_state.submitted,
745 block_mig_state.transferred);
d1315aac 746
59feec42 747 ret = flush_blks(f);
2975725f 748 if (ret) {
2975725f 749 return ret;
4b640365
JK
750 }
751
d76cac7d
LS
752 blk_mig_reset_dirty_cursor();
753
16310a3c 754 /* control the rate of transfer */
52e850de 755 blk_mig_lock();
a152bd00 756 while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE <
e1fde0e0 757 migration_rate_get() &&
44815334 758 block_mig_state.submitted < MAX_PARALLEL_IO &&
ef9c5160
PL
759 (block_mig_state.submitted + block_mig_state.read_done) <
760 MAX_IO_BUFFERS) {
52e850de 761 blk_mig_unlock();
16310a3c
JQ
762 if (block_mig_state.bulk_completed == 0) {
763 /* first finish the bulk phase */
764 if (blk_mig_save_bulked_block(f) == 0) {
765 /* finished saving bulk on all devices */
766 block_mig_state.bulk_completed = 1;
767 }
13197e3c 768 ret = 0;
16310a3c 769 } else {
32c835ba
PB
770 /* Always called with iothread lock taken for
771 * simplicity, block_save_complete also calls it.
772 */
773 qemu_mutex_lock_iothread();
43be3a25 774 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 775 qemu_mutex_unlock_iothread();
13197e3c
PB
776 }
777 if (ret < 0) {
778 return ret;
779 }
52e850de 780 blk_mig_lock();
13197e3c
PB
781 if (ret != 0) {
782 /* no more dirty blocks */
783 break;
a55eb92c 784 }
16310a3c 785 }
52e850de 786 blk_mig_unlock();
a55eb92c 787
59feec42 788 ret = flush_blks(f);
16310a3c 789 if (ret) {
16310a3c 790 return ret;
4b640365
JK
791 }
792
16310a3c 793 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
e9c0eed7 794 uint64_t delta_bytes = qemu_file_transferred(f) - last_bytes;
f3030d34 795 return (delta_bytes > 0);
16310a3c
JQ
796}
797
32c835ba
PB
798/* Called with iothread lock taken. */
799
16310a3c
JQ
800static int block_save_complete(QEMUFile *f, void *opaque)
801{
802 int ret;
803
fe80c024
BY
804 trace_migration_block_save("complete", block_mig_state.submitted,
805 block_mig_state.transferred);
16310a3c 806
59feec42 807 ret = flush_blks(f);
16310a3c 808 if (ret) {
16310a3c
JQ
809 return ret;
810 }
a55eb92c 811
16310a3c 812 blk_mig_reset_dirty_cursor();
01e61e2d 813
16310a3c
JQ
814 /* we know for sure that save bulk is completed and
815 all async read completed */
52e850de 816 blk_mig_lock();
16310a3c 817 assert(block_mig_state.submitted == 0);
52e850de 818 blk_mig_unlock();
16310a3c 819
43be3a25
JQ
820 do {
821 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
822 if (ret < 0) {
823 return ret;
824 }
43be3a25 825 } while (ret == 0);
4b640365 826
43be3a25
JQ
827 /* report completion */
828 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 829
fe80c024 830 trace_migration_block_save_complete();
16310a3c 831
a55eb92c
JK
832 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
833
362fdf17
KW
834 /* Make sure that our BlockBackends are gone, so that the block driver
835 * nodes can be inactivated. */
836 block_migration_cleanup_bmds();
837
16310a3c 838 return 0;
c163b5ca
LS
839}
840
24beea4e
JQ
841static void block_state_pending(void *opaque, uint64_t *must_precopy,
842 uint64_t *can_postcopy)
e4ed1541 843{
6aaa9dae 844 /* Estimate pending number of bytes to send */
13197e3c
PB
845 uint64_t pending;
846
32c835ba 847 qemu_mutex_lock_iothread();
ef0716df
PB
848 pending = get_remaining_dirty();
849 qemu_mutex_unlock_iothread();
850
52e850de 851 blk_mig_lock();
a152bd00
SH
852 pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
853 block_mig_state.read_done * BLK_MIG_BLOCK_SIZE;
ef0716df 854 blk_mig_unlock();
6aaa9dae
SH
855
856 /* Report at least one block pending during bulk phase */
b5280437
JQ
857 if (!pending && !block_mig_state.bulk_completed) {
858 pending = BLK_MIG_BLOCK_SIZE;
6aaa9dae 859 }
e4ed1541 860
c8df4a7a 861 trace_migration_block_state_pending(pending);
c31b098f 862 /* We don't do postcopy */
24beea4e 863 *must_precopy += pending;
e4ed1541
JQ
864}
865
c163b5ca
LS
866static int block_load(QEMUFile *f, void *opaque, int version_id)
867{
01e61e2d 868 static int banner_printed;
c163b5ca
LS
869 int len, flags;
870 char device_name[256];
871 int64_t addr;
3c254ab8 872 BlockBackend *blk, *blk_prev = NULL;
9bd9c7f5 873 Error *local_err = NULL;
c163b5ca 874 uint8_t *buf;
77358b59
PR
875 int64_t total_sectors = 0;
876 int nr_sectors;
42802d47 877 int ret;
3928d50b 878 BlockDriverInfo bdi;
a152bd00 879 int cluster_size = BLK_MIG_BLOCK_SIZE;
a55eb92c 880
c163b5ca 881 do {
c163b5ca 882 addr = qemu_get_be64(f);
a55eb92c 883
89725715 884 flags = addr & (BDRV_SECTOR_SIZE - 1);
6ea44308 885 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
886
887 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
888 /* get device name */
889 len = qemu_get_byte(f);
c163b5ca
LS
890 qemu_get_buffer(f, (uint8_t *)device_name, len);
891 device_name[len] = '\0';
a55eb92c 892
c9ebaf74
FZ
893 blk = blk_by_name(device_name);
894 if (!blk) {
4b640365
JK
895 fprintf(stderr, "Error unknown block device %s\n",
896 device_name);
897 return -EINVAL;
898 }
a55eb92c 899
ad2964b4
KW
900 if (blk != blk_prev) {
901 blk_prev = blk;
902 total_sectors = blk_nb_sectors(blk);
77358b59 903 if (total_sectors <= 0) {
6daf194d 904 error_report("Error getting length of block device %s",
77358b59
PR
905 device_name);
906 return -EINVAL;
907 }
9bd9c7f5 908
3b717194 909 blk_activate(blk, &local_err);
9bd9c7f5
KW
910 if (local_err) {
911 error_report_err(local_err);
912 return -EINVAL;
913 }
3928d50b
LC
914
915 ret = bdrv_get_info(blk_bs(blk), &bdi);
916 if (ret == 0 && bdi.cluster_size > 0 &&
a152bd00
SH
917 bdi.cluster_size <= BLK_MIG_BLOCK_SIZE &&
918 BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) {
3928d50b
LC
919 cluster_size = bdi.cluster_size;
920 } else {
a152bd00 921 cluster_size = BLK_MIG_BLOCK_SIZE;
3928d50b 922 }
77358b59
PR
923 }
924
925 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
926 nr_sectors = total_sectors - addr;
927 } else {
928 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
929 }
930
323004a3 931 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
ad2964b4
KW
932 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
933 nr_sectors * BDRV_SECTOR_SIZE,
934 BDRV_REQ_MAY_UNMAP);
323004a3 935 } else {
3928d50b
LC
936 int i;
937 int64_t cur_addr;
938 uint8_t *cur_buf;
939
a152bd00
SH
940 buf = g_malloc(BLK_MIG_BLOCK_SIZE);
941 qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE);
942 for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) {
3928d50b
LC
943 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
944 cur_buf = buf + i * cluster_size;
945
946 if ((!block_mig_state.zero_blocks ||
a152bd00 947 cluster_size < BLK_MIG_BLOCK_SIZE) &&
3928d50b
LC
948 buffer_is_zero(cur_buf, cluster_size)) {
949 ret = blk_pwrite_zeroes(blk, cur_addr,
950 cluster_size,
951 BDRV_REQ_MAY_UNMAP);
952 } else {
a9262f55
AF
953 ret = blk_pwrite(blk, cur_addr, cluster_size, cur_buf,
954 0);
3928d50b
LC
955 }
956 if (ret < 0) {
957 break;
958 }
959 }
323004a3
PL
960 g_free(buf);
961 }
575a58d7 962
b02bea3a
YT
963 if (ret < 0) {
964 return ret;
965 }
01e61e2d
JK
966 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
967 if (!banner_printed) {
968 printf("Receiving block device images\n");
969 banner_printed = 1;
970 }
971 printf("Completed %d %%%c", (int)addr,
972 (addr == 100) ? '\n' : '\r');
973 fflush(stdout);
a55eb92c 974 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
29fccade 975 fprintf(stderr, "Unknown block migration flags: 0x%x\n", flags);
4b640365
JK
976 return -EINVAL;
977 }
42802d47
JQ
978 ret = qemu_file_get_error(f);
979 if (ret != 0) {
980 return ret;
c163b5ca 981 }
a55eb92c
JK
982 } while (!(flags & BLK_MIG_FLAG_EOS));
983
c163b5ca
LS
984 return 0;
985}
986
6bd68781
JQ
987static bool block_is_active(void *opaque)
988{
9d4b1e5f 989 return migrate_block();
6bd68781
JQ
990}
991
7a46d042 992static SaveVMHandlers savevm_block_handlers = {
9907e842 993 .save_setup = block_save_setup,
16310a3c 994 .save_live_iterate = block_save_iterate,
a3e06c3d 995 .save_live_complete_precopy = block_save_complete,
c8df4a7a
JQ
996 .state_pending_exact = block_state_pending,
997 .state_pending_estimate = block_state_pending,
7908c78d 998 .load_state = block_load,
70f794fc 999 .save_cleanup = block_migration_cleanup,
6bd68781 1000 .is_active = block_is_active,
7908c78d
JQ
1001};
1002
c163b5ca 1003void blk_mig_init(void)
a55eb92c 1004{
5e5328be
JK
1005 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1006 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 1007 qemu_mutex_init(&block_mig_state.lock);
5e5328be 1008
ce62df53 1009 register_savevm_live("block", 0, 1, &savevm_block_handlers,
7908c78d 1010 &block_mig_state);
c163b5ca 1011}