]> git.proxmox.com Git - qemu.git/blob - block-migration.c
block migration: Report progress also via info migration
[qemu.git] / block-migration.c
1 /*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu-common.h"
15 #include "block_int.h"
16 #include "hw/hw.h"
17 #include "qemu-queue.h"
18 #include "monitor.h"
19 #include "block-migration.h"
20 #include <assert.h>
21
22 #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
23
24 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
25 #define BLK_MIG_FLAG_EOS 0x02
26
27 #define MAX_IS_ALLOCATED_SEARCH 65536
28 #define MAX_BLOCKS_READ 10000
29 #define BLOCKS_READ_CHANGE 100
30 #define INITIAL_BLOCKS_READ 100
31
32 //#define DEBUG_BLK_MIGRATION
33
34 #ifdef DEBUG_BLK_MIGRATION
35 #define dprintf(fmt, ...) \
36 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
37 #else
38 #define dprintf(fmt, ...) \
39 do { } while (0)
40 #endif
41
42 typedef struct BlkMigDevState {
43 BlockDriverState *bs;
44 int bulk_completed;
45 int shared_base;
46 int64_t cur_sector;
47 int64_t completed_sectors;
48 int64_t total_sectors;
49 int64_t dirty;
50 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
51 } BlkMigDevState;
52
53 typedef struct BlkMigBlock {
54 uint8_t *buf;
55 BlkMigDevState *bmds;
56 int64_t sector;
57 struct iovec iov;
58 QEMUIOVector qiov;
59 BlockDriverAIOCB *aiocb;
60 int ret;
61 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
62 } BlkMigBlock;
63
64 typedef struct BlkMigState {
65 int blk_enable;
66 int shared_base;
67 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
68 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
69 int submitted;
70 int read_done;
71 int transferred;
72 int64_t total_sector_sum;
73 int64_t print_completion;
74 } BlkMigState;
75
76 static BlkMigState block_mig_state;
77
78 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
79 {
80 int len;
81
82 /* sector number and flags */
83 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
84 | BLK_MIG_FLAG_DEVICE_BLOCK);
85
86 /* device name */
87 len = strlen(blk->bmds->bs->device_name);
88 qemu_put_byte(f, len);
89 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
90
91 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
92 }
93
94 int blk_mig_active(void)
95 {
96 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
97 }
98
99 uint64_t blk_mig_bytes_transferred(void)
100 {
101 BlkMigDevState *bmds;
102 uint64_t sum = 0;
103
104 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
105 sum += bmds->completed_sectors;
106 }
107 return sum << BDRV_SECTOR_BITS;
108 }
109
110 uint64_t blk_mig_bytes_remaining(void)
111 {
112 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
113 }
114
115 uint64_t blk_mig_bytes_total(void)
116 {
117 BlkMigDevState *bmds;
118 uint64_t sum = 0;
119
120 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
121 sum += bmds->total_sectors;
122 }
123 return sum << BDRV_SECTOR_BITS;
124 }
125
126 static void blk_mig_read_cb(void *opaque, int ret)
127 {
128 BlkMigBlock *blk = opaque;
129
130 blk->ret = ret;
131
132 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
133
134 block_mig_state.submitted--;
135 block_mig_state.read_done++;
136 assert(block_mig_state.submitted >= 0);
137 }
138
139 static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
140 BlkMigDevState *bmds, int is_async)
141 {
142 int64_t total_sectors = bmds->total_sectors;
143 int64_t cur_sector = bmds->cur_sector;
144 BlockDriverState *bs = bmds->bs;
145 BlkMigBlock *blk;
146 int nr_sectors;
147
148 if (bmds->shared_base) {
149 while (cur_sector < total_sectors &&
150 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
151 &nr_sectors)) {
152 cur_sector += nr_sectors;
153 }
154 }
155
156 if (cur_sector >= total_sectors) {
157 bmds->cur_sector = bmds->completed_sectors = total_sectors;
158 return 1;
159 }
160
161 bmds->completed_sectors = cur_sector;
162
163 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
164
165 /* we are going to transfer a full block even if it is not allocated */
166 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
167
168 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
169 nr_sectors = total_sectors - cur_sector;
170 }
171
172 blk = qemu_malloc(sizeof(BlkMigBlock));
173 blk->buf = qemu_malloc(BLOCK_SIZE);
174 blk->bmds = bmds;
175 blk->sector = cur_sector;
176
177 if (is_async) {
178 blk->iov.iov_base = blk->buf;
179 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
180 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
181
182 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
183 nr_sectors, blk_mig_read_cb, blk);
184 if (!blk->aiocb) {
185 goto error;
186 }
187 block_mig_state.submitted++;
188 } else {
189 if (bdrv_read(bs, cur_sector, blk->buf, nr_sectors) < 0) {
190 goto error;
191 }
192 blk_send(f, blk);
193
194 qemu_free(blk->buf);
195 qemu_free(blk);
196 }
197
198 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
199 bmds->cur_sector = cur_sector + nr_sectors;
200
201 return (bmds->cur_sector >= total_sectors);
202
203 error:
204 monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector);
205 qemu_file_set_error(f);
206 qemu_free(blk->buf);
207 qemu_free(blk);
208 return 0;
209 }
210
211 static void set_dirty_tracking(int enable)
212 {
213 BlkMigDevState *bmds;
214
215 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
216 bdrv_set_dirty_tracking(bmds->bs, enable);
217 }
218 }
219
220 static void init_blk_migration(Monitor *mon, QEMUFile *f)
221 {
222 BlkMigDevState *bmds;
223 BlockDriverState *bs;
224
225 block_mig_state.submitted = 0;
226 block_mig_state.read_done = 0;
227 block_mig_state.transferred = 0;
228 block_mig_state.total_sector_sum = 0;
229 block_mig_state.print_completion = 0;
230
231 for (bs = bdrv_first; bs != NULL; bs = bs->next) {
232 if (bs->type == BDRV_TYPE_HD) {
233 bmds = qemu_mallocz(sizeof(BlkMigDevState));
234 bmds->bs = bs;
235 bmds->bulk_completed = 0;
236 bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
237 bmds->completed_sectors = 0;
238 bmds->shared_base = block_mig_state.shared_base;
239
240 block_mig_state.total_sector_sum += bmds->total_sectors;
241
242 if (bmds->shared_base) {
243 monitor_printf(mon, "Start migration for %s with shared base "
244 "image\n",
245 bs->device_name);
246 } else {
247 monitor_printf(mon, "Start full migration for %s\n",
248 bs->device_name);
249 }
250
251 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
252 }
253 }
254 }
255
256 static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async)
257 {
258 int64_t completed_sector_sum = 0;
259 BlkMigDevState *bmds;
260 int ret = 0;
261
262 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
263 if (bmds->bulk_completed == 0) {
264 if (mig_save_device_bulk(mon, f, bmds, is_async) == 1) {
265 /* completed bulk section for this device */
266 bmds->bulk_completed = 1;
267 }
268 completed_sector_sum += bmds->completed_sectors;
269 ret = 1;
270 break;
271 } else {
272 completed_sector_sum += bmds->completed_sectors;
273 }
274 }
275
276 if (completed_sector_sum >= block_mig_state.print_completion) {
277 monitor_printf(mon, "Completed %" PRId64 " %%\r",
278 completed_sector_sum * 100 /
279 block_mig_state.total_sector_sum);
280 monitor_flush(mon);
281 block_mig_state.print_completion +=
282 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
283 }
284
285 return ret;
286 }
287
288 #define MAX_NUM_BLOCKS 4
289
290 static void blk_mig_save_dirty_blocks(Monitor *mon, QEMUFile *f)
291 {
292 BlkMigDevState *bmds;
293 BlkMigBlock blk;
294 int64_t sector;
295
296 blk.buf = qemu_malloc(BLOCK_SIZE);
297
298 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
299 for (sector = 0; sector < bmds->cur_sector;) {
300 if (bdrv_get_dirty(bmds->bs, sector)) {
301 if (bdrv_read(bmds->bs, sector, blk.buf,
302 BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
303 monitor_printf(mon, "Error reading sector %" PRId64 "\n",
304 sector);
305 qemu_file_set_error(f);
306 qemu_free(blk.buf);
307 return;
308 }
309 blk.bmds = bmds;
310 blk.sector = sector;
311 blk_send(f, &blk);
312
313 bdrv_reset_dirty(bmds->bs, sector,
314 BDRV_SECTORS_PER_DIRTY_CHUNK);
315 }
316 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
317 }
318 }
319
320 qemu_free(blk.buf);
321 }
322
323 static void flush_blks(QEMUFile* f)
324 {
325 BlkMigBlock *blk;
326
327 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
328 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
329 block_mig_state.transferred);
330
331 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
332 if (qemu_file_rate_limit(f)) {
333 break;
334 }
335 if (blk->ret < 0) {
336 qemu_file_set_error(f);
337 break;
338 }
339 blk_send(f, blk);
340
341 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
342 qemu_free(blk->buf);
343 qemu_free(blk);
344
345 block_mig_state.read_done--;
346 block_mig_state.transferred++;
347 assert(block_mig_state.read_done >= 0);
348 }
349
350 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
351 block_mig_state.submitted, block_mig_state.read_done,
352 block_mig_state.transferred);
353 }
354
355 static int is_stage2_completed(void)
356 {
357 BlkMigDevState *bmds;
358
359 if (block_mig_state.submitted > 0) {
360 return 0;
361 }
362
363 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
364 if (bmds->bulk_completed == 0) {
365 return 0;
366 }
367 }
368
369 return 1;
370 }
371
372 static void blk_mig_cleanup(Monitor *mon)
373 {
374 BlkMigDevState *bmds;
375 BlkMigBlock *blk;
376
377 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
378 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
379 qemu_free(bmds);
380 }
381
382 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
383 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
384 qemu_free(blk->buf);
385 qemu_free(blk);
386 }
387
388 set_dirty_tracking(0);
389
390 monitor_printf(mon, "\n");
391 }
392
393 static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
394 {
395 dprintf("Enter save live stage %d submitted %d transferred %d\n",
396 stage, block_mig_state.submitted, block_mig_state.transferred);
397
398 if (stage < 0) {
399 blk_mig_cleanup(mon);
400 return 0;
401 }
402
403 if (block_mig_state.blk_enable != 1) {
404 /* no need to migrate storage */
405 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
406 return 1;
407 }
408
409 if (stage == 1) {
410 init_blk_migration(mon, f);
411
412 /* start track dirty blocks */
413 set_dirty_tracking(1);
414 }
415
416 flush_blks(f);
417
418 if (qemu_file_has_error(f)) {
419 blk_mig_cleanup(mon);
420 return 0;
421 }
422
423 /* control the rate of transfer */
424 while ((block_mig_state.submitted +
425 block_mig_state.read_done) * BLOCK_SIZE <
426 qemu_file_get_rate_limit(f)) {
427 if (blk_mig_save_bulked_block(mon, f, 1) == 0) {
428 /* no more bulk blocks for now */
429 break;
430 }
431 }
432
433 flush_blks(f);
434
435 if (qemu_file_has_error(f)) {
436 blk_mig_cleanup(mon);
437 return 0;
438 }
439
440 if (stage == 3) {
441 while (blk_mig_save_bulked_block(mon, f, 0) != 0) {
442 /* empty */
443 }
444
445 blk_mig_save_dirty_blocks(mon, f);
446 blk_mig_cleanup(mon);
447
448 if (qemu_file_has_error(f)) {
449 return 0;
450 }
451
452 monitor_printf(mon, "Block migration completed\n");
453 }
454
455 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
456
457 return ((stage == 2) && is_stage2_completed());
458 }
459
460 static int block_load(QEMUFile *f, void *opaque, int version_id)
461 {
462 int len, flags;
463 char device_name[256];
464 int64_t addr;
465 BlockDriverState *bs;
466 uint8_t *buf;
467
468 do {
469 addr = qemu_get_be64(f);
470
471 flags = addr & ~BDRV_SECTOR_MASK;
472 addr >>= BDRV_SECTOR_BITS;
473
474 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
475 /* get device name */
476 len = qemu_get_byte(f);
477 qemu_get_buffer(f, (uint8_t *)device_name, len);
478 device_name[len] = '\0';
479
480 bs = bdrv_find(device_name);
481 if (!bs) {
482 fprintf(stderr, "Error unknown block device %s\n",
483 device_name);
484 return -EINVAL;
485 }
486
487 buf = qemu_malloc(BLOCK_SIZE);
488
489 qemu_get_buffer(f, buf, BLOCK_SIZE);
490 bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
491
492 qemu_free(buf);
493 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
494 fprintf(stderr, "Unknown flags\n");
495 return -EINVAL;
496 }
497 if (qemu_file_has_error(f)) {
498 return -EIO;
499 }
500 } while (!(flags & BLK_MIG_FLAG_EOS));
501
502 return 0;
503 }
504
505 static void block_set_params(int blk_enable, int shared_base, void *opaque)
506 {
507 block_mig_state.blk_enable = blk_enable;
508 block_mig_state.shared_base = shared_base;
509
510 /* shared base means that blk_enable = 1 */
511 block_mig_state.blk_enable |= shared_base;
512 }
513
514 void blk_mig_init(void)
515 {
516 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
517 QSIMPLEQ_INIT(&block_mig_state.blk_list);
518
519 register_savevm_live("block", 0, 1, block_set_params, block_save_live,
520 NULL, block_load, &block_mig_state);
521 }