]> git.proxmox.com Git - mirror_qemu.git/blame - block/mirror.c
qemu-iotests: Don't run 005 on vmdk split formats
[mirror_qemu.git] / block / mirror.c
CommitLineData
893f7eba
PB
1/*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
14#include "trace.h"
737e150e
PB
15#include "block/blockjob.h"
16#include "block/block_int.h"
893f7eba 17#include "qemu/ratelimit.h"
b812f671 18#include "qemu/bitmap.h"
893f7eba 19
402a4741
PB
20#define SLICE_TIME 100000000ULL /* ns */
21#define MAX_IN_FLIGHT 16
22
23/* The mirroring buffer is a list of granularity-sized chunks.
24 * Free chunks are organized in a list.
25 */
26typedef struct MirrorBuffer {
27 QSIMPLEQ_ENTRY(MirrorBuffer) next;
28} MirrorBuffer;
893f7eba
PB
29
30typedef struct MirrorBlockJob {
31 BlockJob common;
32 RateLimit limit;
33 BlockDriverState *target;
5bc361b8 34 BlockDriverState *base;
03544a6e 35 bool is_none_mode;
b952b558 36 BlockdevOnError on_source_error, on_target_error;
d63ffd87
PB
37 bool synced;
38 bool should_complete;
893f7eba 39 int64_t sector_num;
eee13dfe 40 int64_t granularity;
b812f671
PB
41 size_t buf_size;
42 unsigned long *cow_bitmap;
e4654d2d 43 BdrvDirtyBitmap *dirty_bitmap;
8f0720ec 44 HBitmapIter hbi;
893f7eba 45 uint8_t *buf;
402a4741
PB
46 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
47 int buf_free_count;
bd48bde8 48
402a4741 49 unsigned long *in_flight_bitmap;
bd48bde8
PB
50 int in_flight;
51 int ret;
893f7eba
PB
52} MirrorBlockJob;
53
bd48bde8
PB
54typedef struct MirrorOp {
55 MirrorBlockJob *s;
56 QEMUIOVector qiov;
bd48bde8
PB
57 int64_t sector_num;
58 int nb_sectors;
59} MirrorOp;
60
b952b558
PB
61static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
62 int error)
63{
64 s->synced = false;
65 if (read) {
66 return block_job_error_action(&s->common, s->common.bs,
67 s->on_source_error, true, error);
68 } else {
69 return block_job_error_action(&s->common, s->target,
70 s->on_target_error, false, error);
71 }
72}
73
bd48bde8
PB
74static void mirror_iteration_done(MirrorOp *op, int ret)
75{
76 MirrorBlockJob *s = op->s;
402a4741 77 struct iovec *iov;
bd48bde8 78 int64_t chunk_num;
402a4741 79 int i, nb_chunks, sectors_per_chunk;
bd48bde8
PB
80
81 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
82
83 s->in_flight--;
402a4741
PB
84 iov = op->qiov.iov;
85 for (i = 0; i < op->qiov.niov; i++) {
86 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
87 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
88 s->buf_free_count++;
89 }
90
bd48bde8
PB
91 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
92 chunk_num = op->sector_num / sectors_per_chunk;
93 nb_chunks = op->nb_sectors / sectors_per_chunk;
402a4741 94 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
bd48bde8
PB
95 if (s->cow_bitmap && ret >= 0) {
96 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
97 }
98
6df3bf8e 99 qemu_iovec_destroy(&op->qiov);
bd48bde8
PB
100 g_slice_free(MirrorOp, op);
101 qemu_coroutine_enter(s->common.co, NULL);
102}
103
104static void mirror_write_complete(void *opaque, int ret)
105{
106 MirrorOp *op = opaque;
107 MirrorBlockJob *s = op->s;
108 if (ret < 0) {
109 BlockDriverState *source = s->common.bs;
110 BlockErrorAction action;
111
112 bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
113 action = mirror_error_action(s, false, -ret);
114 if (action == BDRV_ACTION_REPORT && s->ret >= 0) {
115 s->ret = ret;
116 }
117 }
118 mirror_iteration_done(op, ret);
119}
120
121static void mirror_read_complete(void *opaque, int ret)
122{
123 MirrorOp *op = opaque;
124 MirrorBlockJob *s = op->s;
125 if (ret < 0) {
126 BlockDriverState *source = s->common.bs;
127 BlockErrorAction action;
128
129 bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
130 action = mirror_error_action(s, true, -ret);
131 if (action == BDRV_ACTION_REPORT && s->ret >= 0) {
132 s->ret = ret;
133 }
134
135 mirror_iteration_done(op, ret);
136 return;
137 }
138 bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
139 mirror_write_complete, op);
140}
141
142static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
893f7eba
PB
143{
144 BlockDriverState *source = s->common.bs;
402a4741 145 int nb_sectors, sectors_per_chunk, nb_chunks;
884fea4e 146 int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
bd48bde8 147 MirrorOp *op;
893f7eba 148
8f0720ec
PB
149 s->sector_num = hbitmap_iter_next(&s->hbi);
150 if (s->sector_num < 0) {
e4654d2d 151 bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi);
8f0720ec 152 s->sector_num = hbitmap_iter_next(&s->hbi);
e4654d2d
FZ
153 trace_mirror_restart_iter(s,
154 bdrv_get_dirty_count(source, s->dirty_bitmap));
8f0720ec
PB
155 assert(s->sector_num >= 0);
156 }
157
402a4741 158 hbitmap_next_sector = s->sector_num;
884fea4e
PB
159 sector_num = s->sector_num;
160 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
161 end = s->common.len >> BDRV_SECTOR_BITS;
402a4741 162
884fea4e
PB
163 /* Extend the QEMUIOVector to include all adjacent blocks that will
164 * be copied in this operation.
b812f671 165 *
884fea4e
PB
166 * We have to do this if we have no backing file yet in the destination,
167 * and the cluster size is very large. Then we need to do COW ourselves.
168 * The first time a cluster is copied, copy it entirely. Note that,
169 * because both the granularity and the cluster size are powers of two,
170 * the number of sectors to copy cannot exceed one cluster.
171 *
172 * We also want to extend the QEMUIOVector to include more adjacent
173 * dirty blocks if possible, to limit the number of I/O operations and
174 * run efficiently even with a small granularity.
b812f671 175 */
884fea4e
PB
176 nb_chunks = 0;
177 nb_sectors = 0;
178 next_sector = sector_num;
179 next_chunk = sector_num / sectors_per_chunk;
402a4741
PB
180
181 /* Wait for I/O to this cluster (from a previous iteration) to be done. */
884fea4e 182 while (test_bit(next_chunk, s->in_flight_bitmap)) {
402a4741
PB
183 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
184 qemu_coroutine_yield();
b812f671
PB
185 }
186
884fea4e
PB
187 do {
188 int added_sectors, added_chunks;
189
e4654d2d 190 if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
884fea4e
PB
191 test_bit(next_chunk, s->in_flight_bitmap)) {
192 assert(nb_sectors > 0);
193 break;
194 }
195
196 added_sectors = sectors_per_chunk;
197 if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
198 bdrv_round_to_clusters(s->target,
199 next_sector, added_sectors,
200 &next_sector, &added_sectors);
201
202 /* On the first iteration, the rounding may make us copy
203 * sectors before the first dirty one.
204 */
205 if (next_sector < sector_num) {
206 assert(nb_sectors == 0);
207 sector_num = next_sector;
208 next_chunk = next_sector / sectors_per_chunk;
209 }
210 }
211
212 added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
213 added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
214
215 /* When doing COW, it may happen that there is not enough space for
216 * a full cluster. Wait if that is the case.
217 */
218 while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
219 trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
220 qemu_coroutine_yield();
221 }
222 if (s->buf_free_count < nb_chunks + added_chunks) {
223 trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
224 break;
225 }
226
227 /* We have enough free space to copy these sectors. */
228 bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
402a4741 229
884fea4e
PB
230 nb_sectors += added_sectors;
231 nb_chunks += added_chunks;
232 next_sector += added_sectors;
233 next_chunk += added_chunks;
234 } while (next_sector < end);
bd48bde8
PB
235
236 /* Allocate a MirrorOp that is used as an AIO callback. */
237 op = g_slice_new(MirrorOp);
238 op->s = s;
bd48bde8
PB
239 op->sector_num = sector_num;
240 op->nb_sectors = nb_sectors;
402a4741
PB
241
242 /* Now make a QEMUIOVector taking enough granularity-sized chunks
243 * from s->buf_free.
244 */
245 qemu_iovec_init(&op->qiov, nb_chunks);
246 next_sector = sector_num;
247 while (nb_chunks-- > 0) {
248 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
249 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
250 s->buf_free_count--;
251 qemu_iovec_add(&op->qiov, buf, s->granularity);
252
253 /* Advance the HBitmapIter in parallel, so that we do not examine
254 * the same sector twice.
255 */
e4654d2d
FZ
256 if (next_sector > hbitmap_next_sector
257 && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
402a4741
PB
258 hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
259 }
260
261 next_sector += sectors_per_chunk;
262 }
bd48bde8 263
b812f671 264 bdrv_reset_dirty(source, sector_num, nb_sectors);
893f7eba
PB
265
266 /* Copy the dirty cluster. */
bd48bde8 267 s->in_flight++;
b812f671 268 trace_mirror_one_iteration(s, sector_num, nb_sectors);
bd48bde8
PB
269 bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
270 mirror_read_complete, op);
271}
b952b558 272
402a4741
PB
273static void mirror_free_init(MirrorBlockJob *s)
274{
275 int granularity = s->granularity;
276 size_t buf_size = s->buf_size;
277 uint8_t *buf = s->buf;
278
279 assert(s->buf_free_count == 0);
280 QSIMPLEQ_INIT(&s->buf_free);
281 while (buf_size != 0) {
282 MirrorBuffer *cur = (MirrorBuffer *)buf;
283 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
284 s->buf_free_count++;
285 buf_size -= granularity;
286 buf += granularity;
287 }
288}
289
bd48bde8
PB
290static void mirror_drain(MirrorBlockJob *s)
291{
292 while (s->in_flight > 0) {
293 qemu_coroutine_yield();
294 }
893f7eba
PB
295}
296
297static void coroutine_fn mirror_run(void *opaque)
298{
299 MirrorBlockJob *s = opaque;
300 BlockDriverState *bs = s->common.bs;
eee13dfe 301 int64_t sector_num, end, sectors_per_chunk, length;
bd48bde8 302 uint64_t last_pause_ns;
b812f671
PB
303 BlockDriverInfo bdi;
304 char backing_filename[1024];
893f7eba
PB
305 int ret = 0;
306 int n;
893f7eba
PB
307
308 if (block_job_is_cancelled(&s->common)) {
309 goto immediate_exit;
310 }
311
312 s->common.len = bdrv_getlength(bs);
88ff0e48 313 if (s->common.len <= 0) {
893f7eba
PB
314 block_job_completed(&s->common, s->common.len);
315 return;
316 }
317
402a4741
PB
318 length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity;
319 s->in_flight_bitmap = bitmap_new(length);
320
b812f671
PB
321 /* If we have no backing file yet in the destination, we cannot let
322 * the destination do COW. Instead, we copy sectors around the
323 * dirty data if needed. We need a bitmap to do that.
324 */
325 bdrv_get_backing_filename(s->target, backing_filename,
326 sizeof(backing_filename));
327 if (backing_filename[0] && !s->target->backing_hd) {
328 bdrv_get_info(s->target, &bdi);
eee13dfe 329 if (s->granularity < bdi.cluster_size) {
08e4ed6c 330 s->buf_size = MAX(s->buf_size, bdi.cluster_size);
b812f671
PB
331 s->cow_bitmap = bitmap_new(length);
332 }
333 }
334
893f7eba 335 end = s->common.len >> BDRV_SECTOR_BITS;
b812f671 336 s->buf = qemu_blockalign(bs, s->buf_size);
eee13dfe 337 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
402a4741 338 mirror_free_init(s);
893f7eba 339
03544a6e 340 if (!s->is_none_mode) {
893f7eba 341 /* First part, loop on the sectors and initialize the dirty bitmap. */
5bc361b8 342 BlockDriverState *base = s->base;
893f7eba 343 for (sector_num = 0; sector_num < end; ) {
eee13dfe 344 int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
4f578637
PB
345 ret = bdrv_is_allocated_above(bs, base,
346 sector_num, next - sector_num, &n);
893f7eba
PB
347
348 if (ret < 0) {
349 goto immediate_exit;
350 }
351
352 assert(n > 0);
353 if (ret == 1) {
354 bdrv_set_dirty(bs, sector_num, n);
355 sector_num = next;
356 } else {
357 sector_num += n;
358 }
359 }
360 }
361
e4654d2d 362 bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi);
bc72ad67 363 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
893f7eba
PB
364 for (;;) {
365 uint64_t delay_ns;
366 int64_t cnt;
367 bool should_complete;
368
bd48bde8
PB
369 if (s->ret < 0) {
370 ret = s->ret;
371 goto immediate_exit;
372 }
373
e4654d2d 374 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
bd48bde8
PB
375
376 /* Note that even when no rate limit is applied we need to yield
377 * periodically with no pending I/O so that qemu_aio_flush() returns.
378 * We do so every SLICE_TIME nanoseconds, or when there is an error,
379 * or when the source is clean, whichever comes first.
380 */
bc72ad67 381 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
bd48bde8 382 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
402a4741
PB
383 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
384 (cnt == 0 && s->in_flight > 0)) {
385 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
bd48bde8
PB
386 qemu_coroutine_yield();
387 continue;
388 } else if (cnt != 0) {
389 mirror_iteration(s);
390 continue;
893f7eba 391 }
893f7eba
PB
392 }
393
394 should_complete = false;
bd48bde8 395 if (s->in_flight == 0 && cnt == 0) {
893f7eba
PB
396 trace_mirror_before_flush(s);
397 ret = bdrv_flush(s->target);
398 if (ret < 0) {
b952b558
PB
399 if (mirror_error_action(s, false, -ret) == BDRV_ACTION_REPORT) {
400 goto immediate_exit;
401 }
402 } else {
403 /* We're out of the streaming phase. From now on, if the job
404 * is cancelled we will actually complete all pending I/O and
405 * report completion. This way, block-job-cancel will leave
406 * the target in a consistent state.
407 */
408 s->common.offset = end * BDRV_SECTOR_SIZE;
409 if (!s->synced) {
410 block_job_ready(&s->common);
411 s->synced = true;
412 }
413
414 should_complete = s->should_complete ||
415 block_job_is_cancelled(&s->common);
e4654d2d 416 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
d63ffd87 417 }
893f7eba
PB
418 }
419
420 if (cnt == 0 && should_complete) {
421 /* The dirty bitmap is not updated while operations are pending.
422 * If we're about to exit, wait for pending operations before
423 * calling bdrv_get_dirty_count(bs), or we may exit while the
424 * source has dirty data to copy!
425 *
426 * Note that I/O can be submitted by the guest while
427 * mirror_populate runs.
428 */
429 trace_mirror_before_drain(s, cnt);
430 bdrv_drain_all();
e4654d2d 431 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
893f7eba
PB
432 }
433
434 ret = 0;
d63ffd87
PB
435 trace_mirror_before_sleep(s, cnt, s->synced);
436 if (!s->synced) {
893f7eba 437 /* Publish progress */
acc906c6 438 s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE;
893f7eba
PB
439
440 if (s->common.speed) {
eee13dfe 441 delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk);
893f7eba
PB
442 } else {
443 delay_ns = 0;
444 }
445
7483d1e5 446 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
893f7eba
PB
447 if (block_job_is_cancelled(&s->common)) {
448 break;
449 }
450 } else if (!should_complete) {
bd48bde8 451 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
7483d1e5 452 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
893f7eba
PB
453 } else if (cnt == 0) {
454 /* The two disks are in sync. Exit and report successful
455 * completion.
456 */
457 assert(QLIST_EMPTY(&bs->tracked_requests));
458 s->common.cancelled = false;
459 break;
460 }
bc72ad67 461 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
893f7eba
PB
462 }
463
464immediate_exit:
bd48bde8
PB
465 if (s->in_flight > 0) {
466 /* We get here only if something went wrong. Either the job failed,
467 * or it was cancelled prematurely so that we do not guarantee that
468 * the target is a copy of the source.
469 */
470 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
471 mirror_drain(s);
472 }
473
474 assert(s->in_flight == 0);
7191bf31 475 qemu_vfree(s->buf);
b812f671 476 g_free(s->cow_bitmap);
402a4741 477 g_free(s->in_flight_bitmap);
e4654d2d 478 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
b952b558 479 bdrv_iostatus_disable(s->target);
d63ffd87
PB
480 if (s->should_complete && ret == 0) {
481 if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) {
482 bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL);
483 }
484 bdrv_swap(s->target, s->common.bs);
20a63d2c
FZ
485 if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
486 /* drop the bs loop chain formed by the swap: break the loop then
487 * trigger the unref from the top one */
488 BlockDriverState *p = s->base->backing_hd;
489 s->base->backing_hd = NULL;
490 bdrv_unref(p);
491 }
d63ffd87 492 }
4f6fd349 493 bdrv_unref(s->target);
893f7eba
PB
494 block_job_completed(&s->common, ret);
495}
496
497static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
498{
499 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
500
501 if (speed < 0) {
502 error_set(errp, QERR_INVALID_PARAMETER, "speed");
503 return;
504 }
505 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
506}
507
b952b558
PB
508static void mirror_iostatus_reset(BlockJob *job)
509{
510 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
511
512 bdrv_iostatus_reset(s->target);
513}
514
d63ffd87
PB
515static void mirror_complete(BlockJob *job, Error **errp)
516{
517 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
34b5d2c6 518 Error *local_err = NULL;
d63ffd87
PB
519 int ret;
520
34b5d2c6 521 ret = bdrv_open_backing_file(s->target, NULL, &local_err);
d63ffd87
PB
522 if (ret < 0) {
523 char backing_filename[PATH_MAX];
524 bdrv_get_full_backing_filename(s->target, backing_filename,
525 sizeof(backing_filename));
34b5d2c6 526 error_propagate(errp, local_err);
d63ffd87
PB
527 return;
528 }
529 if (!s->synced) {
530 error_set(errp, QERR_BLOCK_JOB_NOT_READY, job->bs->device_name);
531 return;
532 }
533
534 s->should_complete = true;
535 block_job_resume(job);
536}
537
3fc4b10a 538static const BlockJobDriver mirror_job_driver = {
893f7eba 539 .instance_size = sizeof(MirrorBlockJob),
79e14bf7 540 .job_type = BLOCK_JOB_TYPE_MIRROR,
893f7eba 541 .set_speed = mirror_set_speed,
b952b558 542 .iostatus_reset= mirror_iostatus_reset,
d63ffd87 543 .complete = mirror_complete,
893f7eba
PB
544};
545
03544a6e
FZ
546static const BlockJobDriver commit_active_job_driver = {
547 .instance_size = sizeof(MirrorBlockJob),
548 .job_type = BLOCK_JOB_TYPE_COMMIT,
549 .set_speed = mirror_set_speed,
550 .iostatus_reset
551 = mirror_iostatus_reset,
552 .complete = mirror_complete,
553};
554
555static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
556 int64_t speed, int64_t granularity,
557 int64_t buf_size,
558 BlockdevOnError on_source_error,
559 BlockdevOnError on_target_error,
560 BlockDriverCompletionFunc *cb,
561 void *opaque, Error **errp,
562 const BlockJobDriver *driver,
563 bool is_none_mode, BlockDriverState *base)
893f7eba
PB
564{
565 MirrorBlockJob *s;
566
eee13dfe
PB
567 if (granularity == 0) {
568 /* Choose the default granularity based on the target file's cluster
569 * size, clamped between 4k and 64k. */
570 BlockDriverInfo bdi;
571 if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) {
572 granularity = MAX(4096, bdi.cluster_size);
573 granularity = MIN(65536, granularity);
574 } else {
575 granularity = 65536;
576 }
577 }
578
579 assert ((granularity & (granularity - 1)) == 0);
580
b952b558
PB
581 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
582 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
583 !bdrv_iostatus_is_enabled(bs)) {
584 error_set(errp, QERR_INVALID_PARAMETER, "on-source-error");
585 return;
586 }
587
5bc361b8 588
03544a6e 589 s = block_job_create(driver, bs, speed, cb, opaque, errp);
893f7eba
PB
590 if (!s) {
591 return;
592 }
593
b952b558
PB
594 s->on_source_error = on_source_error;
595 s->on_target_error = on_target_error;
893f7eba 596 s->target = target;
03544a6e 597 s->is_none_mode = is_none_mode;
5bc361b8 598 s->base = base;
eee13dfe 599 s->granularity = granularity;
08e4ed6c 600 s->buf_size = MAX(buf_size, granularity);
b812f671 601
e4654d2d 602 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity);
893f7eba 603 bdrv_set_enable_write_cache(s->target, true);
b952b558
PB
604 bdrv_set_on_error(s->target, on_target_error, on_target_error);
605 bdrv_iostatus_enable(s->target);
893f7eba
PB
606 s->common.co = qemu_coroutine_create(mirror_run);
607 trace_mirror_start(bs, s, s->common.co, opaque);
608 qemu_coroutine_enter(s->common.co, s);
609}
03544a6e
FZ
610
611void mirror_start(BlockDriverState *bs, BlockDriverState *target,
612 int64_t speed, int64_t granularity, int64_t buf_size,
613 MirrorSyncMode mode, BlockdevOnError on_source_error,
614 BlockdevOnError on_target_error,
615 BlockDriverCompletionFunc *cb,
616 void *opaque, Error **errp)
617{
618 bool is_none_mode;
619 BlockDriverState *base;
620
621 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
622 base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
623 mirror_start_job(bs, target, speed, granularity, buf_size,
624 on_source_error, on_target_error, cb, opaque, errp,
625 &mirror_job_driver, is_none_mode, base);
626}
627
628void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
629 int64_t speed,
630 BlockdevOnError on_error,
631 BlockDriverCompletionFunc *cb,
632 void *opaque, Error **errp)
633{
4da83585
JC
634 int64_t length, base_length;
635 int orig_base_flags;
39a611a3 636 int ret;
4da83585
JC
637
638 orig_base_flags = bdrv_get_flags(base);
639
20a63d2c
FZ
640 if (bdrv_reopen(base, bs->open_flags, errp)) {
641 return;
642 }
4da83585
JC
643
644 length = bdrv_getlength(bs);
645 if (length < 0) {
39a611a3
JC
646 error_setg_errno(errp, -length,
647 "Unable to determine length of %s", bs->filename);
4da83585
JC
648 goto error_restore_flags;
649 }
650
651 base_length = bdrv_getlength(base);
652 if (base_length < 0) {
39a611a3
JC
653 error_setg_errno(errp, -base_length,
654 "Unable to determine length of %s", base->filename);
4da83585
JC
655 goto error_restore_flags;
656 }
657
658 if (length > base_length) {
39a611a3
JC
659 ret = bdrv_truncate(base, length);
660 if (ret < 0) {
661 error_setg_errno(errp, -ret,
662 "Top image %s is larger than base image %s, and "
4da83585
JC
663 "resize of base image failed",
664 bs->filename, base->filename);
665 goto error_restore_flags;
666 }
667 }
668
20a63d2c 669 bdrv_ref(base);
03544a6e
FZ
670 mirror_start_job(bs, base, speed, 0, 0,
671 on_error, on_error, cb, opaque, errp,
672 &commit_active_job_driver, false, base);
4da83585
JC
673 if (error_is_set(errp)) {
674 goto error_restore_flags;
675 }
676
677 return;
678
679error_restore_flags:
680 /* ignore error and errp for bdrv_reopen, because we want to propagate
681 * the original error */
682 bdrv_reopen(base, orig_base_flags, NULL);
683 return;
03544a6e 684}