]> git.proxmox.com Git - qemu.git/blob - block-migration.c
block migration: Drop dead code
[qemu.git] / block-migration.c
1 /*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu-common.h"
15 #include "block_int.h"
16 #include "hw/hw.h"
17 #include "block-migration.h"
18 #include <assert.h>
19
20 #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
21
22 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
23 #define BLK_MIG_FLAG_EOS 0x02
24
25 #define MAX_IS_ALLOCATED_SEARCH 65536
26 #define MAX_BLOCKS_READ 10000
27 #define BLOCKS_READ_CHANGE 100
28 #define INITIAL_BLOCKS_READ 100
29
30 //#define DEBUG_BLK_MIGRATION
31
32 #ifdef DEBUG_BLK_MIGRATION
33 #define dprintf(fmt, ...) \
34 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
35 #else
36 #define dprintf(fmt, ...) \
37 do { } while (0)
38 #endif
39
40 typedef struct BlkMigDevState {
41 BlockDriverState *bs;
42 int bulk_completed;
43 int shared_base;
44 struct BlkMigDevState *next;
45 int64_t cur_sector;
46 int64_t total_sectors;
47 int64_t dirty;
48 } BlkMigDevState;
49
50 typedef struct BlkMigBlock {
51 uint8_t *buf;
52 BlkMigDevState *bmds;
53 int64_t sector;
54 struct iovec iov;
55 QEMUIOVector qiov;
56 BlockDriverAIOCB *aiocb;
57 int ret;
58 struct BlkMigBlock *next;
59 } BlkMigBlock;
60
61 typedef struct BlkMigState {
62 int blk_enable;
63 int shared_base;
64 BlkMigDevState *bmds_first;
65 BlkMigBlock *first_blk;
66 BlkMigBlock *last_blk;
67 int submitted;
68 int read_done;
69 int transferred;
70 int64_t print_completion;
71 } BlkMigState;
72
73 static BlkMigState block_mig_state;
74
75 static void blk_mig_read_cb(void *opaque, int ret)
76 {
77 BlkMigBlock *blk = opaque;
78
79 blk->ret = ret;
80
81 /* insert at the end */
82 if (block_mig_state.last_blk == NULL) {
83 block_mig_state.first_blk = blk;
84 block_mig_state.last_blk = blk;
85 } else {
86 block_mig_state.last_blk->next = blk;
87 block_mig_state.last_blk = blk;
88 }
89
90 block_mig_state.submitted--;
91 block_mig_state.read_done++;
92 assert(block_mig_state.submitted >= 0);
93 }
94
95 static int mig_read_device_bulk(QEMUFile *f, BlkMigDevState *bms)
96 {
97 int nr_sectors;
98 int64_t total_sectors, cur_sector = 0;
99 BlockDriverState *bs = bms->bs;
100 BlkMigBlock *blk;
101
102 blk = qemu_malloc(sizeof(BlkMigBlock));
103 blk->buf = qemu_malloc(BLOCK_SIZE);
104
105 cur_sector = bms->cur_sector;
106 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
107
108 if (bms->shared_base) {
109 while (cur_sector < bms->total_sectors &&
110 !bdrv_is_allocated(bms->bs, cur_sector,
111 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
112 cur_sector += nr_sectors;
113 }
114 }
115
116 if (cur_sector >= total_sectors) {
117 bms->cur_sector = total_sectors;
118 qemu_free(blk->buf);
119 qemu_free(blk);
120 return 1;
121 }
122
123 if (cur_sector >= block_mig_state.print_completion) {
124 printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
125 fflush(stdout);
126 block_mig_state.print_completion +=
127 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
128 }
129
130 /* we are going to transfer a full block even if it is not allocated */
131 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
132
133 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
134
135 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
136 nr_sectors = (total_sectors - cur_sector);
137 }
138
139 bms->cur_sector = cur_sector + nr_sectors;
140 blk->sector = cur_sector;
141 blk->bmds = bms;
142 blk->next = NULL;
143
144 blk->iov.iov_base = blk->buf;
145 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
146 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
147
148 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
149 nr_sectors, blk_mig_read_cb, blk);
150
151 if (!blk->aiocb) {
152 printf("Error reading sector %" PRId64 "\n", cur_sector);
153 qemu_free(blk->buf);
154 qemu_free(blk);
155 return 0;
156 }
157
158 bdrv_reset_dirty(bms->bs, cur_sector, nr_sectors);
159 block_mig_state.submitted++;
160
161 return (bms->cur_sector >= total_sectors);
162 }
163
164 static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
165 {
166 int len, nr_sectors;
167 int64_t total_sectors = bmds->total_sectors, cur_sector = 0;
168 uint8_t *tmp_buf = NULL;
169 BlockDriverState *bs = bmds->bs;
170
171 tmp_buf = qemu_malloc(BLOCK_SIZE);
172
173 cur_sector = bmds->cur_sector;
174
175 if (bmds->shared_base) {
176 while (cur_sector < bmds->total_sectors &&
177 !bdrv_is_allocated(bmds->bs, cur_sector,
178 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
179 cur_sector += nr_sectors;
180 }
181 }
182
183 if (cur_sector >= total_sectors) {
184 bmds->cur_sector = total_sectors;
185 qemu_free(tmp_buf);
186 return 1;
187 }
188
189 if (cur_sector >= block_mig_state.print_completion) {
190 printf("Completed %" PRId64 " %%\r", cur_sector * 100 / total_sectors);
191 fflush(stdout);
192 block_mig_state.print_completion +=
193 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
194 }
195
196 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
197
198 /* we are going to transfer a full block even if it is not allocated */
199 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
200
201 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
202 nr_sectors = (total_sectors - cur_sector);
203 }
204
205 if (bdrv_read(bs, cur_sector, tmp_buf, nr_sectors) < 0) {
206 printf("Error reading sector %" PRId64 "\n", cur_sector);
207 }
208
209 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
210
211 /* sector number and flags */
212 qemu_put_be64(f, (cur_sector << BDRV_SECTOR_BITS)
213 | BLK_MIG_FLAG_DEVICE_BLOCK);
214
215 /* device name */
216 len = strlen(bs->device_name);
217 qemu_put_byte(f, len);
218 qemu_put_buffer(f, (uint8_t *)bs->device_name, len);
219
220 qemu_put_buffer(f, tmp_buf, BLOCK_SIZE);
221
222 bmds->cur_sector = cur_sector + BDRV_SECTORS_PER_DIRTY_CHUNK;
223
224 qemu_free(tmp_buf);
225
226 return (bmds->cur_sector >= total_sectors);
227 }
228
229 static void send_blk(QEMUFile *f, BlkMigBlock * blk)
230 {
231 int len;
232
233 /* sector number and flags */
234 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
235 | BLK_MIG_FLAG_DEVICE_BLOCK);
236
237 /* device name */
238 len = strlen(blk->bmds->bs->device_name);
239 qemu_put_byte(f, len);
240 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
241
242 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
243 }
244
245 static void set_dirty_tracking(int enable)
246 {
247 BlkMigDevState *bmds;
248 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
249 bdrv_set_dirty_tracking(bmds->bs, enable);
250 }
251 }
252
253 static void init_blk_migration(QEMUFile *f)
254 {
255 BlkMigDevState **pbmds, *bmds;
256 BlockDriverState *bs;
257
258 for (bs = bdrv_first; bs != NULL; bs = bs->next) {
259 if (bs->type == BDRV_TYPE_HD) {
260 bmds = qemu_mallocz(sizeof(BlkMigDevState));
261 bmds->bs = bs;
262 bmds->bulk_completed = 0;
263 bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
264 bmds->shared_base = block_mig_state.shared_base;
265
266 if (bmds->shared_base) {
267 printf("Start migration for %s with shared base image\n",
268 bs->device_name);
269 } else {
270 printf("Start full migration for %s\n", bs->device_name);
271 }
272
273 /* insert at the end */
274 pbmds = &block_mig_state.bmds_first;
275 while (*pbmds != NULL) {
276 pbmds = &(*pbmds)->next;
277 }
278 *pbmds = bmds;
279 }
280 }
281 }
282
283 static int blk_mig_save_bulked_block(QEMUFile *f, int is_async)
284 {
285 BlkMigDevState *bmds;
286
287 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
288 if (bmds->bulk_completed == 0) {
289 if (is_async) {
290 if (mig_read_device_bulk(f, bmds) == 1) {
291 /* completed bulk section for this device */
292 bmds->bulk_completed = 1;
293 }
294 } else {
295 if (mig_save_device_bulk(f, bmds) == 1) {
296 /* completed bulk section for this device */
297 bmds->bulk_completed = 1;
298 }
299 }
300 return 1;
301 }
302 }
303
304 /* we reached here means bulk is completed */
305 return 0;
306 }
307
308 #define MAX_NUM_BLOCKS 4
309
310 static void blk_mig_save_dirty_blocks(QEMUFile *f)
311 {
312 BlkMigDevState *bmds;
313 uint8_t *buf;
314 int64_t sector;
315 int len;
316
317 buf = qemu_malloc(BLOCK_SIZE);
318
319 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
320 for (sector = 0; sector < bmds->cur_sector;) {
321 if (bdrv_get_dirty(bmds->bs, sector)) {
322 if (bdrv_read(bmds->bs, sector, buf,
323 BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
324 /* FIXME: add error handling */
325 }
326
327 /* sector number and flags */
328 qemu_put_be64(f, (sector << BDRV_SECTOR_BITS)
329 | BLK_MIG_FLAG_DEVICE_BLOCK);
330
331 /* device name */
332 len = strlen(bmds->bs->device_name);
333 qemu_put_byte(f, len);
334 qemu_put_buffer(f, (uint8_t *)bmds->bs->device_name, len);
335
336 qemu_put_buffer(f, buf, BLOCK_SIZE);
337
338 bdrv_reset_dirty(bmds->bs, sector,
339 BDRV_SECTORS_PER_DIRTY_CHUNK);
340 }
341 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
342 }
343 }
344
345 qemu_free(buf);
346 }
347
348 static void flush_blks(QEMUFile* f)
349 {
350 BlkMigBlock *blk, *next;
351
352 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
353 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
354 block_mig_state.transferred);
355
356 for (blk = block_mig_state.first_blk;
357 blk != NULL && !qemu_file_rate_limit(f);
358 blk = next) {
359 send_blk(f, blk);
360
361 next = blk->next;
362 qemu_free(blk->buf);
363 qemu_free(blk);
364
365 block_mig_state.read_done--;
366 block_mig_state.transferred++;
367 assert(block_mig_state.read_done >= 0);
368 }
369 block_mig_state.first_blk = blk;
370
371 if (block_mig_state.first_blk == NULL) {
372 block_mig_state.last_blk = NULL;
373 }
374
375 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
376 block_mig_state.submitted, block_mig_state.read_done,
377 block_mig_state.transferred);
378 }
379
380 static int is_stage2_completed(void)
381 {
382 BlkMigDevState *bmds;
383
384 if (block_mig_state.submitted > 0) {
385 return 0;
386 }
387
388 for (bmds = block_mig_state.bmds_first; bmds != NULL; bmds = bmds->next) {
389 if (bmds->bulk_completed == 0) {
390 return 0;
391 }
392 }
393
394 return 1;
395 }
396
397 static int block_save_live(QEMUFile *f, int stage, void *opaque)
398 {
399 dprintf("Enter save live stage %d submitted %d transferred %d\n",
400 stage, block_mig_state.submitted, block_mig_state.transferred);
401
402 if (block_mig_state.blk_enable != 1) {
403 /* no need to migrate storage */
404 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
405 return 1;
406 }
407
408 if (stage == 1) {
409 init_blk_migration(f);
410
411 /* start track dirty blocks */
412 set_dirty_tracking(1);
413 }
414
415 flush_blks(f);
416
417 /* control the rate of transfer */
418 while ((block_mig_state.submitted +
419 block_mig_state.read_done) * BLOCK_SIZE <
420 qemu_file_get_rate_limit(f)) {
421 if (blk_mig_save_bulked_block(f, 1) == 0) {
422 /* no more bulk blocks for now */
423 break;
424 }
425 }
426
427 flush_blks(f);
428
429 if (stage == 3) {
430 while (blk_mig_save_bulked_block(f, 0) != 0) {
431 /* empty */
432 }
433
434 blk_mig_save_dirty_blocks(f);
435
436 /* stop track dirty blocks */
437 set_dirty_tracking(0);
438
439 printf("\nBlock migration completed\n");
440 }
441
442 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
443
444 return ((stage == 2) && is_stage2_completed());
445 }
446
447 static int block_load(QEMUFile *f, void *opaque, int version_id)
448 {
449 int len, flags;
450 char device_name[256];
451 int64_t addr;
452 BlockDriverState *bs;
453 uint8_t *buf;
454
455 do {
456 addr = qemu_get_be64(f);
457
458 flags = addr & ~BDRV_SECTOR_MASK;
459 addr >>= BDRV_SECTOR_BITS;
460
461 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
462 /* get device name */
463 len = qemu_get_byte(f);
464
465 qemu_get_buffer(f, (uint8_t *)device_name, len);
466 device_name[len] = '\0';
467
468 bs = bdrv_find(device_name);
469
470 buf = qemu_malloc(BLOCK_SIZE);
471
472 qemu_get_buffer(f, buf, BLOCK_SIZE);
473 if (bs != NULL) {
474 bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
475 } else {
476 printf("Error unknown block device %s\n", device_name);
477 /* FIXME: add error handling */
478 }
479
480 qemu_free(buf);
481 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
482 printf("Unknown flags\n");
483 /* FIXME: add error handling */
484 }
485 } while (!(flags & BLK_MIG_FLAG_EOS));
486
487 return 0;
488 }
489
490 static void block_set_params(int blk_enable, int shared_base, void *opaque)
491 {
492 block_mig_state.blk_enable = blk_enable;
493 block_mig_state.shared_base = shared_base;
494
495 /* shared base means that blk_enable = 1 */
496 block_mig_state.blk_enable |= shared_base;
497 }
498
499 void blk_mig_init(void)
500 {
501 register_savevm_live("block", 0, 1, block_set_params, block_save_live,
502 NULL, block_load, &block_mig_state);
503 }