]> git.proxmox.com Git - mirror_qemu.git/blob - block/mirror.c
qmp: Add command 'blockdev-backup'
[mirror_qemu.git] / block / mirror.c
1 /*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
14 #include "trace.h"
15 #include "block/blockjob.h"
16 #include "block/block_int.h"
17 #include "qemu/ratelimit.h"
18 #include "qemu/bitmap.h"
19
20 #define SLICE_TIME 100000000ULL /* ns */
21 #define MAX_IN_FLIGHT 16
22
23 /* The mirroring buffer is a list of granularity-sized chunks.
24 * Free chunks are organized in a list.
25 */
26 typedef struct MirrorBuffer {
27 QSIMPLEQ_ENTRY(MirrorBuffer) next;
28 } MirrorBuffer;
29
30 typedef struct MirrorBlockJob {
31 BlockJob common;
32 RateLimit limit;
33 BlockDriverState *target;
34 BlockDriverState *base;
35 /* The name of the graph node to replace */
36 char *replaces;
37 /* The BDS to replace */
38 BlockDriverState *to_replace;
39 /* Used to block operations on the drive-mirror-replace target */
40 Error *replace_blocker;
41 bool is_none_mode;
42 BlockdevOnError on_source_error, on_target_error;
43 bool synced;
44 bool should_complete;
45 int64_t sector_num;
46 int64_t granularity;
47 size_t buf_size;
48 int64_t bdev_length;
49 unsigned long *cow_bitmap;
50 BdrvDirtyBitmap *dirty_bitmap;
51 HBitmapIter hbi;
52 uint8_t *buf;
53 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
54 int buf_free_count;
55
56 unsigned long *in_flight_bitmap;
57 int in_flight;
58 int sectors_in_flight;
59 int ret;
60 } MirrorBlockJob;
61
62 typedef struct MirrorOp {
63 MirrorBlockJob *s;
64 QEMUIOVector qiov;
65 int64_t sector_num;
66 int nb_sectors;
67 } MirrorOp;
68
69 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
70 int error)
71 {
72 s->synced = false;
73 if (read) {
74 return block_job_error_action(&s->common, s->common.bs,
75 s->on_source_error, true, error);
76 } else {
77 return block_job_error_action(&s->common, s->target,
78 s->on_target_error, false, error);
79 }
80 }
81
82 static void mirror_iteration_done(MirrorOp *op, int ret)
83 {
84 MirrorBlockJob *s = op->s;
85 struct iovec *iov;
86 int64_t chunk_num;
87 int i, nb_chunks, sectors_per_chunk;
88
89 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
90
91 s->in_flight--;
92 s->sectors_in_flight -= op->nb_sectors;
93 iov = op->qiov.iov;
94 for (i = 0; i < op->qiov.niov; i++) {
95 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
96 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
97 s->buf_free_count++;
98 }
99
100 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
101 chunk_num = op->sector_num / sectors_per_chunk;
102 nb_chunks = op->nb_sectors / sectors_per_chunk;
103 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
104 if (ret >= 0) {
105 if (s->cow_bitmap) {
106 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
107 }
108 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
109 }
110
111 qemu_iovec_destroy(&op->qiov);
112 g_slice_free(MirrorOp, op);
113
114 /* Enter coroutine when it is not sleeping. The coroutine sleeps to
115 * rate-limit itself. The coroutine will eventually resume since there is
116 * a sleep timeout so don't wake it early.
117 */
118 if (s->common.busy) {
119 qemu_coroutine_enter(s->common.co, NULL);
120 }
121 }
122
123 static void mirror_write_complete(void *opaque, int ret)
124 {
125 MirrorOp *op = opaque;
126 MirrorBlockJob *s = op->s;
127 if (ret < 0) {
128 BlockDriverState *source = s->common.bs;
129 BlockErrorAction action;
130
131 bdrv_set_dirty_bitmap(source, s->dirty_bitmap, op->sector_num,
132 op->nb_sectors);
133 action = mirror_error_action(s, false, -ret);
134 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
135 s->ret = ret;
136 }
137 }
138 mirror_iteration_done(op, ret);
139 }
140
141 static void mirror_read_complete(void *opaque, int ret)
142 {
143 MirrorOp *op = opaque;
144 MirrorBlockJob *s = op->s;
145 if (ret < 0) {
146 BlockDriverState *source = s->common.bs;
147 BlockErrorAction action;
148
149 bdrv_set_dirty_bitmap(source, s->dirty_bitmap, op->sector_num,
150 op->nb_sectors);
151 action = mirror_error_action(s, true, -ret);
152 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
153 s->ret = ret;
154 }
155
156 mirror_iteration_done(op, ret);
157 return;
158 }
159 bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
160 mirror_write_complete, op);
161 }
162
163 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
164 {
165 BlockDriverState *source = s->common.bs;
166 int nb_sectors, sectors_per_chunk, nb_chunks;
167 int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
168 uint64_t delay_ns = 0;
169 MirrorOp *op;
170
171 s->sector_num = hbitmap_iter_next(&s->hbi);
172 if (s->sector_num < 0) {
173 bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi);
174 s->sector_num = hbitmap_iter_next(&s->hbi);
175 trace_mirror_restart_iter(s,
176 bdrv_get_dirty_count(source, s->dirty_bitmap));
177 assert(s->sector_num >= 0);
178 }
179
180 hbitmap_next_sector = s->sector_num;
181 sector_num = s->sector_num;
182 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
183 end = s->bdev_length / BDRV_SECTOR_SIZE;
184
185 /* Extend the QEMUIOVector to include all adjacent blocks that will
186 * be copied in this operation.
187 *
188 * We have to do this if we have no backing file yet in the destination,
189 * and the cluster size is very large. Then we need to do COW ourselves.
190 * The first time a cluster is copied, copy it entirely. Note that,
191 * because both the granularity and the cluster size are powers of two,
192 * the number of sectors to copy cannot exceed one cluster.
193 *
194 * We also want to extend the QEMUIOVector to include more adjacent
195 * dirty blocks if possible, to limit the number of I/O operations and
196 * run efficiently even with a small granularity.
197 */
198 nb_chunks = 0;
199 nb_sectors = 0;
200 next_sector = sector_num;
201 next_chunk = sector_num / sectors_per_chunk;
202
203 /* Wait for I/O to this cluster (from a previous iteration) to be done. */
204 while (test_bit(next_chunk, s->in_flight_bitmap)) {
205 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
206 qemu_coroutine_yield();
207 }
208
209 do {
210 int added_sectors, added_chunks;
211
212 if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
213 test_bit(next_chunk, s->in_flight_bitmap)) {
214 assert(nb_sectors > 0);
215 break;
216 }
217
218 added_sectors = sectors_per_chunk;
219 if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
220 bdrv_round_to_clusters(s->target,
221 next_sector, added_sectors,
222 &next_sector, &added_sectors);
223
224 /* On the first iteration, the rounding may make us copy
225 * sectors before the first dirty one.
226 */
227 if (next_sector < sector_num) {
228 assert(nb_sectors == 0);
229 sector_num = next_sector;
230 next_chunk = next_sector / sectors_per_chunk;
231 }
232 }
233
234 added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
235 added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
236
237 /* When doing COW, it may happen that there is not enough space for
238 * a full cluster. Wait if that is the case.
239 */
240 while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
241 trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
242 qemu_coroutine_yield();
243 }
244 if (s->buf_free_count < nb_chunks + added_chunks) {
245 trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
246 break;
247 }
248
249 /* We have enough free space to copy these sectors. */
250 bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
251
252 nb_sectors += added_sectors;
253 nb_chunks += added_chunks;
254 next_sector += added_sectors;
255 next_chunk += added_chunks;
256 if (!s->synced && s->common.speed) {
257 delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
258 }
259 } while (delay_ns == 0 && next_sector < end);
260
261 /* Allocate a MirrorOp that is used as an AIO callback. */
262 op = g_slice_new(MirrorOp);
263 op->s = s;
264 op->sector_num = sector_num;
265 op->nb_sectors = nb_sectors;
266
267 /* Now make a QEMUIOVector taking enough granularity-sized chunks
268 * from s->buf_free.
269 */
270 qemu_iovec_init(&op->qiov, nb_chunks);
271 next_sector = sector_num;
272 while (nb_chunks-- > 0) {
273 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
274 size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size;
275
276 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
277 s->buf_free_count--;
278 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
279
280 /* Advance the HBitmapIter in parallel, so that we do not examine
281 * the same sector twice.
282 */
283 if (next_sector > hbitmap_next_sector
284 && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
285 hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
286 }
287
288 next_sector += sectors_per_chunk;
289 }
290
291 bdrv_reset_dirty_bitmap(source, s->dirty_bitmap, sector_num,
292 nb_sectors);
293
294 /* Copy the dirty cluster. */
295 s->in_flight++;
296 s->sectors_in_flight += nb_sectors;
297 trace_mirror_one_iteration(s, sector_num, nb_sectors);
298 bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
299 mirror_read_complete, op);
300 return delay_ns;
301 }
302
303 static void mirror_free_init(MirrorBlockJob *s)
304 {
305 int granularity = s->granularity;
306 size_t buf_size = s->buf_size;
307 uint8_t *buf = s->buf;
308
309 assert(s->buf_free_count == 0);
310 QSIMPLEQ_INIT(&s->buf_free);
311 while (buf_size != 0) {
312 MirrorBuffer *cur = (MirrorBuffer *)buf;
313 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
314 s->buf_free_count++;
315 buf_size -= granularity;
316 buf += granularity;
317 }
318 }
319
320 static void mirror_drain(MirrorBlockJob *s)
321 {
322 while (s->in_flight > 0) {
323 qemu_coroutine_yield();
324 }
325 }
326
327 typedef struct {
328 int ret;
329 } MirrorExitData;
330
331 static void mirror_exit(BlockJob *job, void *opaque)
332 {
333 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
334 MirrorExitData *data = opaque;
335 AioContext *replace_aio_context = NULL;
336
337 if (s->to_replace) {
338 replace_aio_context = bdrv_get_aio_context(s->to_replace);
339 aio_context_acquire(replace_aio_context);
340 }
341
342 if (s->should_complete && data->ret == 0) {
343 BlockDriverState *to_replace = s->common.bs;
344 if (s->to_replace) {
345 to_replace = s->to_replace;
346 }
347 if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
348 bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
349 }
350 bdrv_swap(s->target, to_replace);
351 if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
352 /* drop the bs loop chain formed by the swap: break the loop then
353 * trigger the unref from the top one */
354 BlockDriverState *p = s->base->backing_hd;
355 bdrv_set_backing_hd(s->base, NULL);
356 bdrv_unref(p);
357 }
358 }
359 if (s->to_replace) {
360 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
361 error_free(s->replace_blocker);
362 bdrv_unref(s->to_replace);
363 }
364 if (replace_aio_context) {
365 aio_context_release(replace_aio_context);
366 }
367 g_free(s->replaces);
368 bdrv_unref(s->target);
369 block_job_completed(&s->common, data->ret);
370 g_free(data);
371 }
372
373 static void coroutine_fn mirror_run(void *opaque)
374 {
375 MirrorBlockJob *s = opaque;
376 MirrorExitData *data;
377 BlockDriverState *bs = s->common.bs;
378 int64_t sector_num, end, sectors_per_chunk, length;
379 uint64_t last_pause_ns;
380 BlockDriverInfo bdi;
381 char backing_filename[1024];
382 int ret = 0;
383 int n;
384
385 if (block_job_is_cancelled(&s->common)) {
386 goto immediate_exit;
387 }
388
389 s->bdev_length = bdrv_getlength(bs);
390 if (s->bdev_length < 0) {
391 ret = s->bdev_length;
392 goto immediate_exit;
393 } else if (s->bdev_length == 0) {
394 /* Report BLOCK_JOB_READY and wait for complete. */
395 block_job_event_ready(&s->common);
396 s->synced = true;
397 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
398 block_job_yield(&s->common);
399 }
400 s->common.cancelled = false;
401 goto immediate_exit;
402 }
403
404 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
405 s->in_flight_bitmap = bitmap_new(length);
406
407 /* If we have no backing file yet in the destination, we cannot let
408 * the destination do COW. Instead, we copy sectors around the
409 * dirty data if needed. We need a bitmap to do that.
410 */
411 bdrv_get_backing_filename(s->target, backing_filename,
412 sizeof(backing_filename));
413 if (backing_filename[0] && !s->target->backing_hd) {
414 ret = bdrv_get_info(s->target, &bdi);
415 if (ret < 0) {
416 goto immediate_exit;
417 }
418 if (s->granularity < bdi.cluster_size) {
419 s->buf_size = MAX(s->buf_size, bdi.cluster_size);
420 s->cow_bitmap = bitmap_new(length);
421 }
422 }
423
424 end = s->bdev_length / BDRV_SECTOR_SIZE;
425 s->buf = qemu_try_blockalign(bs, s->buf_size);
426 if (s->buf == NULL) {
427 ret = -ENOMEM;
428 goto immediate_exit;
429 }
430
431 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
432 mirror_free_init(s);
433
434 if (!s->is_none_mode) {
435 /* First part, loop on the sectors and initialize the dirty bitmap. */
436 BlockDriverState *base = s->base;
437 for (sector_num = 0; sector_num < end; ) {
438 int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
439 ret = bdrv_is_allocated_above(bs, base,
440 sector_num, next - sector_num, &n);
441
442 if (ret < 0) {
443 goto immediate_exit;
444 }
445
446 assert(n > 0);
447 if (ret == 1) {
448 bdrv_set_dirty_bitmap(bs, s->dirty_bitmap, sector_num, n);
449 sector_num = next;
450 } else {
451 sector_num += n;
452 }
453 }
454 }
455
456 bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi);
457 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
458 for (;;) {
459 uint64_t delay_ns = 0;
460 int64_t cnt;
461 bool should_complete;
462
463 if (s->ret < 0) {
464 ret = s->ret;
465 goto immediate_exit;
466 }
467
468 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
469 /* s->common.offset contains the number of bytes already processed so
470 * far, cnt is the number of dirty sectors remaining and
471 * s->sectors_in_flight is the number of sectors currently being
472 * processed; together those are the current total operation length */
473 s->common.len = s->common.offset +
474 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
475
476 /* Note that even when no rate limit is applied we need to yield
477 * periodically with no pending I/O so that qemu_aio_flush() returns.
478 * We do so every SLICE_TIME nanoseconds, or when there is an error,
479 * or when the source is clean, whichever comes first.
480 */
481 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
482 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
483 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
484 (cnt == 0 && s->in_flight > 0)) {
485 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
486 qemu_coroutine_yield();
487 continue;
488 } else if (cnt != 0) {
489 delay_ns = mirror_iteration(s);
490 if (delay_ns == 0) {
491 continue;
492 }
493 }
494 }
495
496 should_complete = false;
497 if (s->in_flight == 0 && cnt == 0) {
498 trace_mirror_before_flush(s);
499 ret = bdrv_flush(s->target);
500 if (ret < 0) {
501 if (mirror_error_action(s, false, -ret) ==
502 BLOCK_ERROR_ACTION_REPORT) {
503 goto immediate_exit;
504 }
505 } else {
506 /* We're out of the streaming phase. From now on, if the job
507 * is cancelled we will actually complete all pending I/O and
508 * report completion. This way, block-job-cancel will leave
509 * the target in a consistent state.
510 */
511 if (!s->synced) {
512 block_job_event_ready(&s->common);
513 s->synced = true;
514 }
515
516 should_complete = s->should_complete ||
517 block_job_is_cancelled(&s->common);
518 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
519 }
520 }
521
522 if (cnt == 0 && should_complete) {
523 /* The dirty bitmap is not updated while operations are pending.
524 * If we're about to exit, wait for pending operations before
525 * calling bdrv_get_dirty_count(bs), or we may exit while the
526 * source has dirty data to copy!
527 *
528 * Note that I/O can be submitted by the guest while
529 * mirror_populate runs.
530 */
531 trace_mirror_before_drain(s, cnt);
532 bdrv_drain(bs);
533 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
534 }
535
536 ret = 0;
537 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
538 if (!s->synced) {
539 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
540 if (block_job_is_cancelled(&s->common)) {
541 break;
542 }
543 } else if (!should_complete) {
544 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
545 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
546 } else if (cnt == 0) {
547 /* The two disks are in sync. Exit and report successful
548 * completion.
549 */
550 assert(QLIST_EMPTY(&bs->tracked_requests));
551 s->common.cancelled = false;
552 break;
553 }
554 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
555 }
556
557 immediate_exit:
558 if (s->in_flight > 0) {
559 /* We get here only if something went wrong. Either the job failed,
560 * or it was cancelled prematurely so that we do not guarantee that
561 * the target is a copy of the source.
562 */
563 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
564 mirror_drain(s);
565 }
566
567 assert(s->in_flight == 0);
568 qemu_vfree(s->buf);
569 g_free(s->cow_bitmap);
570 g_free(s->in_flight_bitmap);
571 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
572 bdrv_iostatus_disable(s->target);
573
574 data = g_malloc(sizeof(*data));
575 data->ret = ret;
576 block_job_defer_to_main_loop(&s->common, mirror_exit, data);
577 }
578
579 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
580 {
581 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
582
583 if (speed < 0) {
584 error_set(errp, QERR_INVALID_PARAMETER, "speed");
585 return;
586 }
587 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
588 }
589
590 static void mirror_iostatus_reset(BlockJob *job)
591 {
592 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
593
594 bdrv_iostatus_reset(s->target);
595 }
596
597 static void mirror_complete(BlockJob *job, Error **errp)
598 {
599 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
600 Error *local_err = NULL;
601 int ret;
602
603 ret = bdrv_open_backing_file(s->target, NULL, &local_err);
604 if (ret < 0) {
605 error_propagate(errp, local_err);
606 return;
607 }
608 if (!s->synced) {
609 error_set(errp, QERR_BLOCK_JOB_NOT_READY,
610 bdrv_get_device_name(job->bs));
611 return;
612 }
613
614 /* check the target bs is not blocked and block all operations on it */
615 if (s->replaces) {
616 AioContext *replace_aio_context;
617
618 s->to_replace = check_to_replace_node(s->replaces, &local_err);
619 if (!s->to_replace) {
620 error_propagate(errp, local_err);
621 return;
622 }
623
624 replace_aio_context = bdrv_get_aio_context(s->to_replace);
625 aio_context_acquire(replace_aio_context);
626
627 error_setg(&s->replace_blocker,
628 "block device is in use by block-job-complete");
629 bdrv_op_block_all(s->to_replace, s->replace_blocker);
630 bdrv_ref(s->to_replace);
631
632 aio_context_release(replace_aio_context);
633 }
634
635 s->should_complete = true;
636 block_job_resume(job);
637 }
638
639 static const BlockJobDriver mirror_job_driver = {
640 .instance_size = sizeof(MirrorBlockJob),
641 .job_type = BLOCK_JOB_TYPE_MIRROR,
642 .set_speed = mirror_set_speed,
643 .iostatus_reset= mirror_iostatus_reset,
644 .complete = mirror_complete,
645 };
646
647 static const BlockJobDriver commit_active_job_driver = {
648 .instance_size = sizeof(MirrorBlockJob),
649 .job_type = BLOCK_JOB_TYPE_COMMIT,
650 .set_speed = mirror_set_speed,
651 .iostatus_reset
652 = mirror_iostatus_reset,
653 .complete = mirror_complete,
654 };
655
656 static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
657 const char *replaces,
658 int64_t speed, int64_t granularity,
659 int64_t buf_size,
660 BlockdevOnError on_source_error,
661 BlockdevOnError on_target_error,
662 BlockCompletionFunc *cb,
663 void *opaque, Error **errp,
664 const BlockJobDriver *driver,
665 bool is_none_mode, BlockDriverState *base)
666 {
667 MirrorBlockJob *s;
668
669 if (granularity == 0) {
670 /* Choose the default granularity based on the target file's cluster
671 * size, clamped between 4k and 64k. */
672 BlockDriverInfo bdi;
673 if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) {
674 granularity = MAX(4096, bdi.cluster_size);
675 granularity = MIN(65536, granularity);
676 } else {
677 granularity = 65536;
678 }
679 }
680
681 assert ((granularity & (granularity - 1)) == 0);
682
683 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
684 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
685 !bdrv_iostatus_is_enabled(bs)) {
686 error_set(errp, QERR_INVALID_PARAMETER, "on-source-error");
687 return;
688 }
689
690
691 s = block_job_create(driver, bs, speed, cb, opaque, errp);
692 if (!s) {
693 return;
694 }
695
696 s->replaces = g_strdup(replaces);
697 s->on_source_error = on_source_error;
698 s->on_target_error = on_target_error;
699 s->target = target;
700 s->is_none_mode = is_none_mode;
701 s->base = base;
702 s->granularity = granularity;
703 s->buf_size = MAX(buf_size, granularity);
704
705 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp);
706 if (!s->dirty_bitmap) {
707 return;
708 }
709 bdrv_set_enable_write_cache(s->target, true);
710 bdrv_set_on_error(s->target, on_target_error, on_target_error);
711 bdrv_iostatus_enable(s->target);
712 s->common.co = qemu_coroutine_create(mirror_run);
713 trace_mirror_start(bs, s, s->common.co, opaque);
714 qemu_coroutine_enter(s->common.co, s);
715 }
716
717 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
718 const char *replaces,
719 int64_t speed, int64_t granularity, int64_t buf_size,
720 MirrorSyncMode mode, BlockdevOnError on_source_error,
721 BlockdevOnError on_target_error,
722 BlockCompletionFunc *cb,
723 void *opaque, Error **errp)
724 {
725 bool is_none_mode;
726 BlockDriverState *base;
727
728 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
729 base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
730 mirror_start_job(bs, target, replaces,
731 speed, granularity, buf_size,
732 on_source_error, on_target_error, cb, opaque, errp,
733 &mirror_job_driver, is_none_mode, base);
734 }
735
736 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
737 int64_t speed,
738 BlockdevOnError on_error,
739 BlockCompletionFunc *cb,
740 void *opaque, Error **errp)
741 {
742 int64_t length, base_length;
743 int orig_base_flags;
744 int ret;
745 Error *local_err = NULL;
746
747 orig_base_flags = bdrv_get_flags(base);
748
749 if (bdrv_reopen(base, bs->open_flags, errp)) {
750 return;
751 }
752
753 length = bdrv_getlength(bs);
754 if (length < 0) {
755 error_setg_errno(errp, -length,
756 "Unable to determine length of %s", bs->filename);
757 goto error_restore_flags;
758 }
759
760 base_length = bdrv_getlength(base);
761 if (base_length < 0) {
762 error_setg_errno(errp, -base_length,
763 "Unable to determine length of %s", base->filename);
764 goto error_restore_flags;
765 }
766
767 if (length > base_length) {
768 ret = bdrv_truncate(base, length);
769 if (ret < 0) {
770 error_setg_errno(errp, -ret,
771 "Top image %s is larger than base image %s, and "
772 "resize of base image failed",
773 bs->filename, base->filename);
774 goto error_restore_flags;
775 }
776 }
777
778 bdrv_ref(base);
779 mirror_start_job(bs, base, NULL, speed, 0, 0,
780 on_error, on_error, cb, opaque, &local_err,
781 &commit_active_job_driver, false, base);
782 if (local_err) {
783 error_propagate(errp, local_err);
784 goto error_restore_flags;
785 }
786
787 return;
788
789 error_restore_flags:
790 /* ignore error and errp for bdrv_reopen, because we want to propagate
791 * the original error */
792 bdrv_reopen(base, orig_base_flags, NULL);
793 return;
794 }