]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU live block migration | |
3 | * | |
4 | * Copyright IBM, Corp. 2009 | |
5 | * | |
6 | * Authors: | |
7 | * Liran Schour <lirans@il.ibm.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #include "qemu/osdep.h" | |
17 | #include "qapi/error.h" | |
18 | #include "qemu-common.h" | |
19 | #include "block/block.h" | |
20 | #include "qemu/error-report.h" | |
21 | #include "qemu/main-loop.h" | |
22 | #include "hw/hw.h" | |
23 | #include "qemu/cutils.h" | |
24 | #include "qemu/queue.h" | |
25 | #include "qemu/timer.h" | |
26 | #include "migration/block.h" | |
27 | #include "migration/migration.h" | |
28 | #include "sysemu/blockdev.h" | |
29 | #include "sysemu/block-backend.h" | |
30 | ||
31 | #define BLOCK_SIZE (1 << 20) | |
32 | #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS) | |
33 | ||
34 | #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 | |
35 | #define BLK_MIG_FLAG_EOS 0x02 | |
36 | #define BLK_MIG_FLAG_PROGRESS 0x04 | |
37 | #define BLK_MIG_FLAG_ZERO_BLOCK 0x08 | |
38 | ||
39 | #define MAX_IS_ALLOCATED_SEARCH 65536 | |
40 | ||
41 | #define MAX_INFLIGHT_IO 512 | |
42 | ||
43 | //#define DEBUG_BLK_MIGRATION | |
44 | ||
45 | #ifdef DEBUG_BLK_MIGRATION | |
46 | #define DPRINTF(fmt, ...) \ | |
47 | do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0) | |
48 | #else | |
49 | #define DPRINTF(fmt, ...) \ | |
50 | do { } while (0) | |
51 | #endif | |
52 | ||
53 | typedef struct BlkMigDevState { | |
54 | /* Written during setup phase. Can be read without a lock. */ | |
55 | BlockBackend *blk; | |
56 | char *blk_name; | |
57 | int shared_base; | |
58 | int64_t total_sectors; | |
59 | QSIMPLEQ_ENTRY(BlkMigDevState) entry; | |
60 | Error *blocker; | |
61 | ||
62 | /* Only used by migration thread. Does not need a lock. */ | |
63 | int bulk_completed; | |
64 | int64_t cur_sector; | |
65 | int64_t cur_dirty; | |
66 | ||
67 | /* Data in the aio_bitmap is protected by block migration lock. | |
68 | * Allocation and free happen during setup and cleanup respectively. | |
69 | */ | |
70 | unsigned long *aio_bitmap; | |
71 | ||
72 | /* Protected by block migration lock. */ | |
73 | int64_t completed_sectors; | |
74 | ||
75 | /* During migration this is protected by iothread lock / AioContext. | |
76 | * Allocation and free happen during setup and cleanup respectively. | |
77 | */ | |
78 | BdrvDirtyBitmap *dirty_bitmap; | |
79 | } BlkMigDevState; | |
80 | ||
81 | typedef struct BlkMigBlock { | |
82 | /* Only used by migration thread. */ | |
83 | uint8_t *buf; | |
84 | BlkMigDevState *bmds; | |
85 | int64_t sector; | |
86 | int nr_sectors; | |
87 | struct iovec iov; | |
88 | QEMUIOVector qiov; | |
89 | BlockAIOCB *aiocb; | |
90 | ||
91 | /* Protected by block migration lock. */ | |
92 | int ret; | |
93 | QSIMPLEQ_ENTRY(BlkMigBlock) entry; | |
94 | } BlkMigBlock; | |
95 | ||
96 | typedef struct BlkMigState { | |
97 | /* Written during setup phase. Can be read without a lock. */ | |
98 | int blk_enable; | |
99 | int shared_base; | |
100 | QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list; | |
101 | int64_t total_sector_sum; | |
102 | bool zero_blocks; | |
103 | ||
104 | /* Protected by lock. */ | |
105 | QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list; | |
106 | int submitted; | |
107 | int read_done; | |
108 | ||
109 | /* Only used by migration thread. Does not need a lock. */ | |
110 | int transferred; | |
111 | int prev_progress; | |
112 | int bulk_completed; | |
113 | ||
114 | /* Lock must be taken _inside_ the iothread lock and any AioContexts. */ | |
115 | QemuMutex lock; | |
116 | } BlkMigState; | |
117 | ||
118 | static BlkMigState block_mig_state; | |
119 | ||
120 | static void blk_mig_lock(void) | |
121 | { | |
122 | qemu_mutex_lock(&block_mig_state.lock); | |
123 | } | |
124 | ||
125 | static void blk_mig_unlock(void) | |
126 | { | |
127 | qemu_mutex_unlock(&block_mig_state.lock); | |
128 | } | |
129 | ||
130 | /* Must run outside of the iothread lock during the bulk phase, | |
131 | * or the VM will stall. | |
132 | */ | |
133 | ||
134 | static void blk_send(QEMUFile *f, BlkMigBlock * blk) | |
135 | { | |
136 | int len; | |
137 | uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK; | |
138 | ||
139 | if (block_mig_state.zero_blocks && | |
140 | buffer_is_zero(blk->buf, BLOCK_SIZE)) { | |
141 | flags |= BLK_MIG_FLAG_ZERO_BLOCK; | |
142 | } | |
143 | ||
144 | /* sector number and flags */ | |
145 | qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS) | |
146 | | flags); | |
147 | ||
148 | /* device name */ | |
149 | len = strlen(blk->bmds->blk_name); | |
150 | qemu_put_byte(f, len); | |
151 | qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len); | |
152 | ||
153 | /* if a block is zero we need to flush here since the network | |
154 | * bandwidth is now a lot higher than the storage device bandwidth. | |
155 | * thus if we queue zero blocks we slow down the migration */ | |
156 | if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { | |
157 | qemu_fflush(f); | |
158 | return; | |
159 | } | |
160 | ||
161 | qemu_put_buffer(f, blk->buf, BLOCK_SIZE); | |
162 | } | |
163 | ||
164 | int blk_mig_active(void) | |
165 | { | |
166 | return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list); | |
167 | } | |
168 | ||
169 | uint64_t blk_mig_bytes_transferred(void) | |
170 | { | |
171 | BlkMigDevState *bmds; | |
172 | uint64_t sum = 0; | |
173 | ||
174 | blk_mig_lock(); | |
175 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
176 | sum += bmds->completed_sectors; | |
177 | } | |
178 | blk_mig_unlock(); | |
179 | return sum << BDRV_SECTOR_BITS; | |
180 | } | |
181 | ||
182 | uint64_t blk_mig_bytes_remaining(void) | |
183 | { | |
184 | return blk_mig_bytes_total() - blk_mig_bytes_transferred(); | |
185 | } | |
186 | ||
187 | uint64_t blk_mig_bytes_total(void) | |
188 | { | |
189 | BlkMigDevState *bmds; | |
190 | uint64_t sum = 0; | |
191 | ||
192 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
193 | sum += bmds->total_sectors; | |
194 | } | |
195 | return sum << BDRV_SECTOR_BITS; | |
196 | } | |
197 | ||
198 | ||
199 | /* Called with migration lock held. */ | |
200 | ||
201 | static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector) | |
202 | { | |
203 | int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; | |
204 | ||
205 | if (sector < blk_nb_sectors(bmds->blk)) { | |
206 | return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] & | |
207 | (1UL << (chunk % (sizeof(unsigned long) * 8)))); | |
208 | } else { | |
209 | return 0; | |
210 | } | |
211 | } | |
212 | ||
213 | /* Called with migration lock held. */ | |
214 | ||
215 | static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num, | |
216 | int nb_sectors, int set) | |
217 | { | |
218 | int64_t start, end; | |
219 | unsigned long val, idx, bit; | |
220 | ||
221 | start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; | |
222 | end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK; | |
223 | ||
224 | for (; start <= end; start++) { | |
225 | idx = start / (sizeof(unsigned long) * 8); | |
226 | bit = start % (sizeof(unsigned long) * 8); | |
227 | val = bmds->aio_bitmap[idx]; | |
228 | if (set) { | |
229 | val |= 1UL << bit; | |
230 | } else { | |
231 | val &= ~(1UL << bit); | |
232 | } | |
233 | bmds->aio_bitmap[idx] = val; | |
234 | } | |
235 | } | |
236 | ||
237 | static void alloc_aio_bitmap(BlkMigDevState *bmds) | |
238 | { | |
239 | BlockBackend *bb = bmds->blk; | |
240 | int64_t bitmap_size; | |
241 | ||
242 | bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; | |
243 | bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8; | |
244 | ||
245 | bmds->aio_bitmap = g_malloc0(bitmap_size); | |
246 | } | |
247 | ||
248 | /* Never hold migration lock when yielding to the main loop! */ | |
249 | ||
250 | static void blk_mig_read_cb(void *opaque, int ret) | |
251 | { | |
252 | BlkMigBlock *blk = opaque; | |
253 | ||
254 | blk_mig_lock(); | |
255 | blk->ret = ret; | |
256 | ||
257 | QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); | |
258 | bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0); | |
259 | ||
260 | block_mig_state.submitted--; | |
261 | block_mig_state.read_done++; | |
262 | assert(block_mig_state.submitted >= 0); | |
263 | blk_mig_unlock(); | |
264 | } | |
265 | ||
266 | /* Called with no lock taken. */ | |
267 | ||
268 | static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) | |
269 | { | |
270 | int64_t total_sectors = bmds->total_sectors; | |
271 | int64_t cur_sector = bmds->cur_sector; | |
272 | BlockBackend *bb = bmds->blk; | |
273 | BlkMigBlock *blk; | |
274 | int nr_sectors; | |
275 | ||
276 | if (bmds->shared_base) { | |
277 | qemu_mutex_lock_iothread(); | |
278 | aio_context_acquire(blk_get_aio_context(bb)); | |
279 | while (cur_sector < total_sectors && | |
280 | !bdrv_is_allocated(blk_bs(bb), cur_sector, | |
281 | MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) { | |
282 | cur_sector += nr_sectors; | |
283 | } | |
284 | aio_context_release(blk_get_aio_context(bb)); | |
285 | qemu_mutex_unlock_iothread(); | |
286 | } | |
287 | ||
288 | if (cur_sector >= total_sectors) { | |
289 | bmds->cur_sector = bmds->completed_sectors = total_sectors; | |
290 | return 1; | |
291 | } | |
292 | ||
293 | bmds->completed_sectors = cur_sector; | |
294 | ||
295 | cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1); | |
296 | ||
297 | /* we are going to transfer a full block even if it is not allocated */ | |
298 | nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; | |
299 | ||
300 | if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { | |
301 | nr_sectors = total_sectors - cur_sector; | |
302 | } | |
303 | ||
304 | blk = g_new(BlkMigBlock, 1); | |
305 | blk->buf = g_malloc(BLOCK_SIZE); | |
306 | blk->bmds = bmds; | |
307 | blk->sector = cur_sector; | |
308 | blk->nr_sectors = nr_sectors; | |
309 | ||
310 | blk->iov.iov_base = blk->buf; | |
311 | blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; | |
312 | qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); | |
313 | ||
314 | blk_mig_lock(); | |
315 | block_mig_state.submitted++; | |
316 | blk_mig_unlock(); | |
317 | ||
318 | /* We do not know if bs is under the main thread (and thus does | |
319 | * not acquire the AioContext when doing AIO) or rather under | |
320 | * dataplane. Thus acquire both the iothread mutex and the | |
321 | * AioContext. | |
322 | * | |
323 | * This is ugly and will disappear when we make bdrv_* thread-safe, | |
324 | * without the need to acquire the AioContext. | |
325 | */ | |
326 | qemu_mutex_lock_iothread(); | |
327 | aio_context_acquire(blk_get_aio_context(bmds->blk)); | |
328 | blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov, | |
329 | 0, blk_mig_read_cb, blk); | |
330 | ||
331 | bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors); | |
332 | aio_context_release(blk_get_aio_context(bmds->blk)); | |
333 | qemu_mutex_unlock_iothread(); | |
334 | ||
335 | bmds->cur_sector = cur_sector + nr_sectors; | |
336 | return (bmds->cur_sector >= total_sectors); | |
337 | } | |
338 | ||
339 | /* Called with iothread lock taken. */ | |
340 | ||
341 | static int set_dirty_tracking(void) | |
342 | { | |
343 | BlkMigDevState *bmds; | |
344 | int ret; | |
345 | ||
346 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
347 | aio_context_acquire(blk_get_aio_context(bmds->blk)); | |
348 | bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk), | |
349 | BLOCK_SIZE, NULL, NULL); | |
350 | aio_context_release(blk_get_aio_context(bmds->blk)); | |
351 | if (!bmds->dirty_bitmap) { | |
352 | ret = -errno; | |
353 | goto fail; | |
354 | } | |
355 | } | |
356 | return 0; | |
357 | ||
358 | fail: | |
359 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
360 | if (bmds->dirty_bitmap) { | |
361 | aio_context_acquire(blk_get_aio_context(bmds->blk)); | |
362 | bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap); | |
363 | aio_context_release(blk_get_aio_context(bmds->blk)); | |
364 | } | |
365 | } | |
366 | return ret; | |
367 | } | |
368 | ||
369 | /* Called with iothread lock taken. */ | |
370 | ||
371 | static void unset_dirty_tracking(void) | |
372 | { | |
373 | BlkMigDevState *bmds; | |
374 | ||
375 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
376 | aio_context_acquire(blk_get_aio_context(bmds->blk)); | |
377 | bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap); | |
378 | aio_context_release(blk_get_aio_context(bmds->blk)); | |
379 | } | |
380 | } | |
381 | ||
382 | static int init_blk_migration(QEMUFile *f) | |
383 | { | |
384 | BlockDriverState *bs; | |
385 | BlkMigDevState *bmds; | |
386 | int64_t sectors; | |
387 | BdrvNextIterator it; | |
388 | int i, num_bs = 0; | |
389 | struct { | |
390 | BlkMigDevState *bmds; | |
391 | BlockDriverState *bs; | |
392 | } *bmds_bs; | |
393 | Error *local_err = NULL; | |
394 | int ret; | |
395 | ||
396 | block_mig_state.submitted = 0; | |
397 | block_mig_state.read_done = 0; | |
398 | block_mig_state.transferred = 0; | |
399 | block_mig_state.total_sector_sum = 0; | |
400 | block_mig_state.prev_progress = -1; | |
401 | block_mig_state.bulk_completed = 0; | |
402 | block_mig_state.zero_blocks = migrate_zero_blocks(); | |
403 | ||
404 | for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | |
405 | num_bs++; | |
406 | } | |
407 | bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs)); | |
408 | ||
409 | for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) { | |
410 | if (bdrv_is_read_only(bs)) { | |
411 | continue; | |
412 | } | |
413 | ||
414 | sectors = bdrv_nb_sectors(bs); | |
415 | if (sectors <= 0) { | |
416 | ret = sectors; | |
417 | goto out; | |
418 | } | |
419 | ||
420 | bmds = g_new0(BlkMigDevState, 1); | |
421 | bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); | |
422 | bmds->blk_name = g_strdup(bdrv_get_device_name(bs)); | |
423 | bmds->bulk_completed = 0; | |
424 | bmds->total_sectors = sectors; | |
425 | bmds->completed_sectors = 0; | |
426 | bmds->shared_base = block_mig_state.shared_base; | |
427 | ||
428 | assert(i < num_bs); | |
429 | bmds_bs[i].bmds = bmds; | |
430 | bmds_bs[i].bs = bs; | |
431 | ||
432 | block_mig_state.total_sector_sum += sectors; | |
433 | ||
434 | if (bmds->shared_base) { | |
435 | DPRINTF("Start migration for %s with shared base image\n", | |
436 | bdrv_get_device_name(bs)); | |
437 | } else { | |
438 | DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs)); | |
439 | } | |
440 | ||
441 | QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry); | |
442 | } | |
443 | ||
444 | /* Can only insert new BDSes now because doing so while iterating block | |
445 | * devices may end up in a deadlock (iterating the new BDSes, too). */ | |
446 | for (i = 0; i < num_bs; i++) { | |
447 | BlkMigDevState *bmds = bmds_bs[i].bmds; | |
448 | BlockDriverState *bs = bmds_bs[i].bs; | |
449 | ||
450 | if (bmds) { | |
451 | ret = blk_insert_bs(bmds->blk, bs, &local_err); | |
452 | if (ret < 0) { | |
453 | error_report_err(local_err); | |
454 | goto out; | |
455 | } | |
456 | ||
457 | alloc_aio_bitmap(bmds); | |
458 | error_setg(&bmds->blocker, "block device is in use by migration"); | |
459 | bdrv_op_block_all(bs, bmds->blocker); | |
460 | } | |
461 | } | |
462 | ||
463 | ret = 0; | |
464 | out: | |
465 | g_free(bmds_bs); | |
466 | return ret; | |
467 | } | |
468 | ||
469 | /* Called with no lock taken. */ | |
470 | ||
471 | static int blk_mig_save_bulked_block(QEMUFile *f) | |
472 | { | |
473 | int64_t completed_sector_sum = 0; | |
474 | BlkMigDevState *bmds; | |
475 | int progress; | |
476 | int ret = 0; | |
477 | ||
478 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
479 | if (bmds->bulk_completed == 0) { | |
480 | if (mig_save_device_bulk(f, bmds) == 1) { | |
481 | /* completed bulk section for this device */ | |
482 | bmds->bulk_completed = 1; | |
483 | } | |
484 | completed_sector_sum += bmds->completed_sectors; | |
485 | ret = 1; | |
486 | break; | |
487 | } else { | |
488 | completed_sector_sum += bmds->completed_sectors; | |
489 | } | |
490 | } | |
491 | ||
492 | if (block_mig_state.total_sector_sum != 0) { | |
493 | progress = completed_sector_sum * 100 / | |
494 | block_mig_state.total_sector_sum; | |
495 | } else { | |
496 | progress = 100; | |
497 | } | |
498 | if (progress != block_mig_state.prev_progress) { | |
499 | block_mig_state.prev_progress = progress; | |
500 | qemu_put_be64(f, (progress << BDRV_SECTOR_BITS) | |
501 | | BLK_MIG_FLAG_PROGRESS); | |
502 | DPRINTF("Completed %d %%\r", progress); | |
503 | } | |
504 | ||
505 | return ret; | |
506 | } | |
507 | ||
508 | static void blk_mig_reset_dirty_cursor(void) | |
509 | { | |
510 | BlkMigDevState *bmds; | |
511 | ||
512 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
513 | bmds->cur_dirty = 0; | |
514 | } | |
515 | } | |
516 | ||
517 | /* Called with iothread lock and AioContext taken. */ | |
518 | ||
519 | static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, | |
520 | int is_async) | |
521 | { | |
522 | BlkMigBlock *blk; | |
523 | BlockDriverState *bs = blk_bs(bmds->blk); | |
524 | int64_t total_sectors = bmds->total_sectors; | |
525 | int64_t sector; | |
526 | int nr_sectors; | |
527 | int ret = -EIO; | |
528 | ||
529 | for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) { | |
530 | blk_mig_lock(); | |
531 | if (bmds_aio_inflight(bmds, sector)) { | |
532 | blk_mig_unlock(); | |
533 | blk_drain(bmds->blk); | |
534 | } else { | |
535 | blk_mig_unlock(); | |
536 | } | |
537 | if (bdrv_get_dirty(bs, bmds->dirty_bitmap, sector)) { | |
538 | ||
539 | if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { | |
540 | nr_sectors = total_sectors - sector; | |
541 | } else { | |
542 | nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; | |
543 | } | |
544 | blk = g_new(BlkMigBlock, 1); | |
545 | blk->buf = g_malloc(BLOCK_SIZE); | |
546 | blk->bmds = bmds; | |
547 | blk->sector = sector; | |
548 | blk->nr_sectors = nr_sectors; | |
549 | ||
550 | if (is_async) { | |
551 | blk->iov.iov_base = blk->buf; | |
552 | blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; | |
553 | qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); | |
554 | ||
555 | blk->aiocb = blk_aio_preadv(bmds->blk, | |
556 | sector * BDRV_SECTOR_SIZE, | |
557 | &blk->qiov, 0, blk_mig_read_cb, | |
558 | blk); | |
559 | ||
560 | blk_mig_lock(); | |
561 | block_mig_state.submitted++; | |
562 | bmds_set_aio_inflight(bmds, sector, nr_sectors, 1); | |
563 | blk_mig_unlock(); | |
564 | } else { | |
565 | ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf, | |
566 | nr_sectors * BDRV_SECTOR_SIZE); | |
567 | if (ret < 0) { | |
568 | goto error; | |
569 | } | |
570 | blk_send(f, blk); | |
571 | ||
572 | g_free(blk->buf); | |
573 | g_free(blk); | |
574 | } | |
575 | ||
576 | bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors); | |
577 | break; | |
578 | } | |
579 | sector += BDRV_SECTORS_PER_DIRTY_CHUNK; | |
580 | bmds->cur_dirty = sector; | |
581 | } | |
582 | ||
583 | return (bmds->cur_dirty >= bmds->total_sectors); | |
584 | ||
585 | error: | |
586 | DPRINTF("Error reading sector %" PRId64 "\n", sector); | |
587 | g_free(blk->buf); | |
588 | g_free(blk); | |
589 | return ret; | |
590 | } | |
591 | ||
592 | /* Called with iothread lock taken. | |
593 | * | |
594 | * return value: | |
595 | * 0: too much data for max_downtime | |
596 | * 1: few enough data for max_downtime | |
597 | */ | |
598 | static int blk_mig_save_dirty_block(QEMUFile *f, int is_async) | |
599 | { | |
600 | BlkMigDevState *bmds; | |
601 | int ret = 1; | |
602 | ||
603 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
604 | aio_context_acquire(blk_get_aio_context(bmds->blk)); | |
605 | ret = mig_save_device_dirty(f, bmds, is_async); | |
606 | aio_context_release(blk_get_aio_context(bmds->blk)); | |
607 | if (ret <= 0) { | |
608 | break; | |
609 | } | |
610 | } | |
611 | ||
612 | return ret; | |
613 | } | |
614 | ||
615 | /* Called with no locks taken. */ | |
616 | ||
617 | static int flush_blks(QEMUFile *f) | |
618 | { | |
619 | BlkMigBlock *blk; | |
620 | int ret = 0; | |
621 | ||
622 | DPRINTF("%s Enter submitted %d read_done %d transferred %d\n", | |
623 | __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done, | |
624 | block_mig_state.transferred); | |
625 | ||
626 | blk_mig_lock(); | |
627 | while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { | |
628 | if (qemu_file_rate_limit(f)) { | |
629 | break; | |
630 | } | |
631 | if (blk->ret < 0) { | |
632 | ret = blk->ret; | |
633 | break; | |
634 | } | |
635 | ||
636 | QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); | |
637 | blk_mig_unlock(); | |
638 | blk_send(f, blk); | |
639 | blk_mig_lock(); | |
640 | ||
641 | g_free(blk->buf); | |
642 | g_free(blk); | |
643 | ||
644 | block_mig_state.read_done--; | |
645 | block_mig_state.transferred++; | |
646 | assert(block_mig_state.read_done >= 0); | |
647 | } | |
648 | blk_mig_unlock(); | |
649 | ||
650 | DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__, | |
651 | block_mig_state.submitted, block_mig_state.read_done, | |
652 | block_mig_state.transferred); | |
653 | return ret; | |
654 | } | |
655 | ||
656 | /* Called with iothread lock taken. */ | |
657 | ||
658 | static int64_t get_remaining_dirty(void) | |
659 | { | |
660 | BlkMigDevState *bmds; | |
661 | int64_t dirty = 0; | |
662 | ||
663 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | |
664 | aio_context_acquire(blk_get_aio_context(bmds->blk)); | |
665 | dirty += bdrv_get_dirty_count(bmds->dirty_bitmap); | |
666 | aio_context_release(blk_get_aio_context(bmds->blk)); | |
667 | } | |
668 | ||
669 | return dirty << BDRV_SECTOR_BITS; | |
670 | } | |
671 | ||
672 | /* Called with iothread lock taken. */ | |
673 | ||
674 | static void block_migration_cleanup(void *opaque) | |
675 | { | |
676 | BlkMigDevState *bmds; | |
677 | BlkMigBlock *blk; | |
678 | AioContext *ctx; | |
679 | ||
680 | bdrv_drain_all(); | |
681 | ||
682 | unset_dirty_tracking(); | |
683 | ||
684 | while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { | |
685 | QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); | |
686 | bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker); | |
687 | error_free(bmds->blocker); | |
688 | ||
689 | /* Save ctx, because bmds->blk can disappear during blk_unref. */ | |
690 | ctx = blk_get_aio_context(bmds->blk); | |
691 | aio_context_acquire(ctx); | |
692 | blk_unref(bmds->blk); | |
693 | aio_context_release(ctx); | |
694 | ||
695 | g_free(bmds->blk_name); | |
696 | g_free(bmds->aio_bitmap); | |
697 | g_free(bmds); | |
698 | } | |
699 | ||
700 | blk_mig_lock(); | |
701 | while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { | |
702 | QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); | |
703 | g_free(blk->buf); | |
704 | g_free(blk); | |
705 | } | |
706 | blk_mig_unlock(); | |
707 | } | |
708 | ||
709 | static int block_save_setup(QEMUFile *f, void *opaque) | |
710 | { | |
711 | int ret; | |
712 | ||
713 | DPRINTF("Enter save live setup submitted %d transferred %d\n", | |
714 | block_mig_state.submitted, block_mig_state.transferred); | |
715 | ||
716 | qemu_mutex_lock_iothread(); | |
717 | ret = init_blk_migration(f); | |
718 | if (ret < 0) { | |
719 | qemu_mutex_unlock_iothread(); | |
720 | return ret; | |
721 | } | |
722 | ||
723 | /* start track dirty blocks */ | |
724 | ret = set_dirty_tracking(); | |
725 | ||
726 | qemu_mutex_unlock_iothread(); | |
727 | ||
728 | if (ret) { | |
729 | return ret; | |
730 | } | |
731 | ||
732 | ret = flush_blks(f); | |
733 | blk_mig_reset_dirty_cursor(); | |
734 | qemu_put_be64(f, BLK_MIG_FLAG_EOS); | |
735 | ||
736 | return ret; | |
737 | } | |
738 | ||
739 | static int block_save_iterate(QEMUFile *f, void *opaque) | |
740 | { | |
741 | int ret; | |
742 | int64_t last_ftell = qemu_ftell(f); | |
743 | int64_t delta_ftell; | |
744 | ||
745 | DPRINTF("Enter save live iterate submitted %d transferred %d\n", | |
746 | block_mig_state.submitted, block_mig_state.transferred); | |
747 | ||
748 | ret = flush_blks(f); | |
749 | if (ret) { | |
750 | return ret; | |
751 | } | |
752 | ||
753 | blk_mig_reset_dirty_cursor(); | |
754 | ||
755 | /* control the rate of transfer */ | |
756 | blk_mig_lock(); | |
757 | while ((block_mig_state.submitted + | |
758 | block_mig_state.read_done) * BLOCK_SIZE < | |
759 | qemu_file_get_rate_limit(f) && | |
760 | (block_mig_state.submitted + | |
761 | block_mig_state.read_done) < | |
762 | MAX_INFLIGHT_IO) { | |
763 | blk_mig_unlock(); | |
764 | if (block_mig_state.bulk_completed == 0) { | |
765 | /* first finish the bulk phase */ | |
766 | if (blk_mig_save_bulked_block(f) == 0) { | |
767 | /* finished saving bulk on all devices */ | |
768 | block_mig_state.bulk_completed = 1; | |
769 | } | |
770 | ret = 0; | |
771 | } else { | |
772 | /* Always called with iothread lock taken for | |
773 | * simplicity, block_save_complete also calls it. | |
774 | */ | |
775 | qemu_mutex_lock_iothread(); | |
776 | ret = blk_mig_save_dirty_block(f, 1); | |
777 | qemu_mutex_unlock_iothread(); | |
778 | } | |
779 | if (ret < 0) { | |
780 | return ret; | |
781 | } | |
782 | blk_mig_lock(); | |
783 | if (ret != 0) { | |
784 | /* no more dirty blocks */ | |
785 | break; | |
786 | } | |
787 | } | |
788 | blk_mig_unlock(); | |
789 | ||
790 | ret = flush_blks(f); | |
791 | if (ret) { | |
792 | return ret; | |
793 | } | |
794 | ||
795 | qemu_put_be64(f, BLK_MIG_FLAG_EOS); | |
796 | delta_ftell = qemu_ftell(f) - last_ftell; | |
797 | if (delta_ftell > 0) { | |
798 | return 1; | |
799 | } else if (delta_ftell < 0) { | |
800 | return -1; | |
801 | } else { | |
802 | return 0; | |
803 | } | |
804 | } | |
805 | ||
806 | /* Called with iothread lock taken. */ | |
807 | ||
808 | static int block_save_complete(QEMUFile *f, void *opaque) | |
809 | { | |
810 | int ret; | |
811 | ||
812 | DPRINTF("Enter save live complete submitted %d transferred %d\n", | |
813 | block_mig_state.submitted, block_mig_state.transferred); | |
814 | ||
815 | ret = flush_blks(f); | |
816 | if (ret) { | |
817 | return ret; | |
818 | } | |
819 | ||
820 | blk_mig_reset_dirty_cursor(); | |
821 | ||
822 | /* we know for sure that save bulk is completed and | |
823 | all async read completed */ | |
824 | blk_mig_lock(); | |
825 | assert(block_mig_state.submitted == 0); | |
826 | blk_mig_unlock(); | |
827 | ||
828 | do { | |
829 | ret = blk_mig_save_dirty_block(f, 0); | |
830 | if (ret < 0) { | |
831 | return ret; | |
832 | } | |
833 | } while (ret == 0); | |
834 | ||
835 | /* report completion */ | |
836 | qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS); | |
837 | ||
838 | DPRINTF("Block migration completed\n"); | |
839 | ||
840 | qemu_put_be64(f, BLK_MIG_FLAG_EOS); | |
841 | ||
842 | return 0; | |
843 | } | |
844 | ||
845 | static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, | |
846 | uint64_t *non_postcopiable_pending, | |
847 | uint64_t *postcopiable_pending) | |
848 | { | |
849 | /* Estimate pending number of bytes to send */ | |
850 | uint64_t pending; | |
851 | ||
852 | qemu_mutex_lock_iothread(); | |
853 | pending = get_remaining_dirty(); | |
854 | qemu_mutex_unlock_iothread(); | |
855 | ||
856 | blk_mig_lock(); | |
857 | pending += block_mig_state.submitted * BLOCK_SIZE + | |
858 | block_mig_state.read_done * BLOCK_SIZE; | |
859 | blk_mig_unlock(); | |
860 | ||
861 | /* Report at least one block pending during bulk phase */ | |
862 | if (pending <= max_size && !block_mig_state.bulk_completed) { | |
863 | pending = max_size + BLOCK_SIZE; | |
864 | } | |
865 | ||
866 | DPRINTF("Enter save live pending %" PRIu64 "\n", pending); | |
867 | /* We don't do postcopy */ | |
868 | *non_postcopiable_pending += pending; | |
869 | } | |
870 | ||
871 | static int block_load(QEMUFile *f, void *opaque, int version_id) | |
872 | { | |
873 | static int banner_printed; | |
874 | int len, flags; | |
875 | char device_name[256]; | |
876 | int64_t addr; | |
877 | BlockBackend *blk, *blk_prev = NULL;; | |
878 | Error *local_err = NULL; | |
879 | uint8_t *buf; | |
880 | int64_t total_sectors = 0; | |
881 | int nr_sectors; | |
882 | int ret; | |
883 | ||
884 | do { | |
885 | addr = qemu_get_be64(f); | |
886 | ||
887 | flags = addr & ~BDRV_SECTOR_MASK; | |
888 | addr >>= BDRV_SECTOR_BITS; | |
889 | ||
890 | if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) { | |
891 | /* get device name */ | |
892 | len = qemu_get_byte(f); | |
893 | qemu_get_buffer(f, (uint8_t *)device_name, len); | |
894 | device_name[len] = '\0'; | |
895 | ||
896 | blk = blk_by_name(device_name); | |
897 | if (!blk) { | |
898 | fprintf(stderr, "Error unknown block device %s\n", | |
899 | device_name); | |
900 | return -EINVAL; | |
901 | } | |
902 | ||
903 | if (blk != blk_prev) { | |
904 | blk_prev = blk; | |
905 | total_sectors = blk_nb_sectors(blk); | |
906 | if (total_sectors <= 0) { | |
907 | error_report("Error getting length of block device %s", | |
908 | device_name); | |
909 | return -EINVAL; | |
910 | } | |
911 | ||
912 | blk_invalidate_cache(blk, &local_err); | |
913 | if (local_err) { | |
914 | error_report_err(local_err); | |
915 | return -EINVAL; | |
916 | } | |
917 | } | |
918 | ||
919 | if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) { | |
920 | nr_sectors = total_sectors - addr; | |
921 | } else { | |
922 | nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; | |
923 | } | |
924 | ||
925 | if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { | |
926 | ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE, | |
927 | nr_sectors * BDRV_SECTOR_SIZE, | |
928 | BDRV_REQ_MAY_UNMAP); | |
929 | } else { | |
930 | buf = g_malloc(BLOCK_SIZE); | |
931 | qemu_get_buffer(f, buf, BLOCK_SIZE); | |
932 | ret = blk_pwrite(blk, addr * BDRV_SECTOR_SIZE, buf, | |
933 | nr_sectors * BDRV_SECTOR_SIZE, 0); | |
934 | g_free(buf); | |
935 | } | |
936 | ||
937 | if (ret < 0) { | |
938 | return ret; | |
939 | } | |
940 | } else if (flags & BLK_MIG_FLAG_PROGRESS) { | |
941 | if (!banner_printed) { | |
942 | printf("Receiving block device images\n"); | |
943 | banner_printed = 1; | |
944 | } | |
945 | printf("Completed %d %%%c", (int)addr, | |
946 | (addr == 100) ? '\n' : '\r'); | |
947 | fflush(stdout); | |
948 | } else if (!(flags & BLK_MIG_FLAG_EOS)) { | |
949 | fprintf(stderr, "Unknown block migration flags: %#x\n", flags); | |
950 | return -EINVAL; | |
951 | } | |
952 | ret = qemu_file_get_error(f); | |
953 | if (ret != 0) { | |
954 | return ret; | |
955 | } | |
956 | } while (!(flags & BLK_MIG_FLAG_EOS)); | |
957 | ||
958 | return 0; | |
959 | } | |
960 | ||
961 | static void block_set_params(const MigrationParams *params, void *opaque) | |
962 | { | |
963 | block_mig_state.blk_enable = params->blk; | |
964 | block_mig_state.shared_base = params->shared; | |
965 | ||
966 | /* shared base means that blk_enable = 1 */ | |
967 | block_mig_state.blk_enable |= params->shared; | |
968 | } | |
969 | ||
970 | static bool block_is_active(void *opaque) | |
971 | { | |
972 | return block_mig_state.blk_enable == 1; | |
973 | } | |
974 | ||
975 | static SaveVMHandlers savevm_block_handlers = { | |
976 | .set_params = block_set_params, | |
977 | .save_live_setup = block_save_setup, | |
978 | .save_live_iterate = block_save_iterate, | |
979 | .save_live_complete_precopy = block_save_complete, | |
980 | .save_live_pending = block_save_pending, | |
981 | .load_state = block_load, | |
982 | .cleanup = block_migration_cleanup, | |
983 | .is_active = block_is_active, | |
984 | }; | |
985 | ||
986 | void blk_mig_init(void) | |
987 | { | |
988 | QSIMPLEQ_INIT(&block_mig_state.bmds_list); | |
989 | QSIMPLEQ_INIT(&block_mig_state.blk_list); | |
990 | qemu_mutex_init(&block_mig_state.lock); | |
991 | ||
992 | register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers, | |
993 | &block_mig_state); | |
994 | } |