]> git.proxmox.com Git - mirror_qemu.git/blame - block/mirror.c
blockjob: Factor out block_job_remove_all_bdrv()
[mirror_qemu.git] / block / mirror.c
CommitLineData
893f7eba
PB
1/*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
80c71a24 14#include "qemu/osdep.h"
893f7eba 15#include "trace.h"
c87621ea 16#include "block/blockjob_int.h"
737e150e 17#include "block/block_int.h"
373340b2 18#include "sysemu/block-backend.h"
da34e65c 19#include "qapi/error.h"
cc7a8ea7 20#include "qapi/qmp/qerror.h"
893f7eba 21#include "qemu/ratelimit.h"
b812f671 22#include "qemu/bitmap.h"
893f7eba 23
402a4741
PB
24#define SLICE_TIME 100000000ULL /* ns */
25#define MAX_IN_FLIGHT 16
0965a41e
VSO
26#define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */
27#define DEFAULT_MIRROR_BUF_SIZE \
28 (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE)
402a4741
PB
29
30/* The mirroring buffer is a list of granularity-sized chunks.
31 * Free chunks are organized in a list.
32 */
33typedef struct MirrorBuffer {
34 QSIMPLEQ_ENTRY(MirrorBuffer) next;
35} MirrorBuffer;
893f7eba
PB
36
37typedef struct MirrorBlockJob {
38 BlockJob common;
39 RateLimit limit;
e253f4b8 40 BlockBackend *target;
5bc361b8 41 BlockDriverState *base;
09158f00
BC
42 /* The name of the graph node to replace */
43 char *replaces;
44 /* The BDS to replace */
45 BlockDriverState *to_replace;
46 /* Used to block operations on the drive-mirror-replace target */
47 Error *replace_blocker;
03544a6e 48 bool is_none_mode;
274fccee 49 BlockMirrorBackingMode backing_mode;
b952b558 50 BlockdevOnError on_source_error, on_target_error;
d63ffd87
PB
51 bool synced;
52 bool should_complete;
eee13dfe 53 int64_t granularity;
b812f671 54 size_t buf_size;
b21c7652 55 int64_t bdev_length;
b812f671 56 unsigned long *cow_bitmap;
e4654d2d 57 BdrvDirtyBitmap *dirty_bitmap;
dc162c8e 58 BdrvDirtyBitmapIter *dbi;
893f7eba 59 uint8_t *buf;
402a4741
PB
60 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
61 int buf_free_count;
bd48bde8 62
49efb1f5 63 uint64_t last_pause_ns;
402a4741 64 unsigned long *in_flight_bitmap;
bd48bde8 65 int in_flight;
531509ba 66 int64_t sectors_in_flight;
bd48bde8 67 int ret;
0fc9f8ea 68 bool unmap;
e424aff5 69 bool waiting_for_io;
e5b43573
FZ
70 int target_cluster_sectors;
71 int max_iov;
90ab48eb 72 bool initial_zeroing_ongoing;
893f7eba
PB
73} MirrorBlockJob;
74
bd48bde8
PB
75typedef struct MirrorOp {
76 MirrorBlockJob *s;
77 QEMUIOVector qiov;
bd48bde8
PB
78 int64_t sector_num;
79 int nb_sectors;
80} MirrorOp;
81
b952b558
PB
82static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
83 int error)
84{
85 s->synced = false;
86 if (read) {
81e254dc
KW
87 return block_job_error_action(&s->common, s->on_source_error,
88 true, error);
b952b558 89 } else {
81e254dc
KW
90 return block_job_error_action(&s->common, s->on_target_error,
91 false, error);
b952b558
PB
92 }
93}
94
bd48bde8
PB
95static void mirror_iteration_done(MirrorOp *op, int ret)
96{
97 MirrorBlockJob *s = op->s;
402a4741 98 struct iovec *iov;
bd48bde8 99 int64_t chunk_num;
402a4741 100 int i, nb_chunks, sectors_per_chunk;
bd48bde8
PB
101
102 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
103
104 s->in_flight--;
b21c7652 105 s->sectors_in_flight -= op->nb_sectors;
402a4741
PB
106 iov = op->qiov.iov;
107 for (i = 0; i < op->qiov.niov; i++) {
108 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
109 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
110 s->buf_free_count++;
111 }
112
bd48bde8
PB
113 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
114 chunk_num = op->sector_num / sectors_per_chunk;
4150ae60 115 nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk);
402a4741 116 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
b21c7652
HR
117 if (ret >= 0) {
118 if (s->cow_bitmap) {
119 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
120 }
90ab48eb
AN
121 if (!s->initial_zeroing_ongoing) {
122 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
123 }
bd48bde8 124 }
6df3bf8e 125 qemu_iovec_destroy(&op->qiov);
c84b3192 126 g_free(op);
7b770c72 127
e424aff5 128 if (s->waiting_for_io) {
0b8b8753 129 qemu_coroutine_enter(s->common.co);
7b770c72 130 }
bd48bde8
PB
131}
132
133static void mirror_write_complete(void *opaque, int ret)
134{
135 MirrorOp *op = opaque;
136 MirrorBlockJob *s = op->s;
b9e413dd
PB
137
138 aio_context_acquire(blk_get_aio_context(s->common.blk));
bd48bde8 139 if (ret < 0) {
bd48bde8
PB
140 BlockErrorAction action;
141
20dca810 142 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
bd48bde8 143 action = mirror_error_action(s, false, -ret);
a589569f 144 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
145 s->ret = ret;
146 }
147 }
148 mirror_iteration_done(op, ret);
b9e413dd 149 aio_context_release(blk_get_aio_context(s->common.blk));
bd48bde8
PB
150}
151
152static void mirror_read_complete(void *opaque, int ret)
153{
154 MirrorOp *op = opaque;
155 MirrorBlockJob *s = op->s;
b9e413dd
PB
156
157 aio_context_acquire(blk_get_aio_context(s->common.blk));
bd48bde8 158 if (ret < 0) {
bd48bde8
PB
159 BlockErrorAction action;
160
20dca810 161 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
bd48bde8 162 action = mirror_error_action(s, true, -ret);
a589569f 163 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
164 s->ret = ret;
165 }
166
167 mirror_iteration_done(op, ret);
b9e413dd
PB
168 } else {
169 blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
170 0, mirror_write_complete, op);
bd48bde8 171 }
b9e413dd 172 aio_context_release(blk_get_aio_context(s->common.blk));
bd48bde8
PB
173}
174
4150ae60
FZ
175static inline void mirror_clip_sectors(MirrorBlockJob *s,
176 int64_t sector_num,
177 int *nb_sectors)
178{
179 *nb_sectors = MIN(*nb_sectors,
180 s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
181}
182
e5b43573
FZ
183/* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
184 * return the offset of the adjusted tail sector against original. */
185static int mirror_cow_align(MirrorBlockJob *s,
186 int64_t *sector_num,
187 int *nb_sectors)
893f7eba 188{
e5b43573
FZ
189 bool need_cow;
190 int ret = 0;
191 int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS;
192 int64_t align_sector_num = *sector_num;
193 int align_nb_sectors = *nb_sectors;
194 int max_sectors = chunk_sectors * s->max_iov;
195
196 need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap);
197 need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
198 s->cow_bitmap);
199 if (need_cow) {
244483e6
KW
200 bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
201 *nb_sectors, &align_sector_num,
202 &align_nb_sectors);
e5b43573 203 }
3515727f 204
e5b43573
FZ
205 if (align_nb_sectors > max_sectors) {
206 align_nb_sectors = max_sectors;
207 if (need_cow) {
208 align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors,
209 s->target_cluster_sectors);
210 }
8f0720ec 211 }
4150ae60
FZ
212 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
213 * that doesn't matter because it's already the end of source image. */
214 mirror_clip_sectors(s, align_sector_num, &align_nb_sectors);
8f0720ec 215
e5b43573
FZ
216 ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors);
217 *sector_num = align_sector_num;
218 *nb_sectors = align_nb_sectors;
219 assert(ret >= 0);
220 return ret;
221}
222
21cd917f
FZ
223static inline void mirror_wait_for_io(MirrorBlockJob *s)
224{
225 assert(!s->waiting_for_io);
226 s->waiting_for_io = true;
227 qemu_coroutine_yield();
228 s->waiting_for_io = false;
229}
230
e5b43573 231/* Submit async read while handling COW.
17612955
JS
232 * Returns: The number of sectors copied after and including sector_num,
233 * excluding any sectors copied prior to sector_num due to alignment.
234 * This will be nb_sectors if no alignment is necessary, or
e5b43573
FZ
235 * (new_end - sector_num) if tail is rounded up or down due to
236 * alignment or buffer limit.
237 */
238static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
239 int nb_sectors)
240{
e253f4b8 241 BlockBackend *source = s->common.blk;
e5b43573 242 int sectors_per_chunk, nb_chunks;
17612955 243 int ret;
e5b43573 244 MirrorOp *op;
e4808881 245 int max_sectors;
e5b43573 246
884fea4e 247 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
e4808881 248 max_sectors = sectors_per_chunk * s->max_iov;
402a4741 249
e5b43573
FZ
250 /* We can only handle as much as buf_size at a time. */
251 nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
e4808881 252 nb_sectors = MIN(max_sectors, nb_sectors);
e5b43573 253 assert(nb_sectors);
17612955 254 ret = nb_sectors;
402a4741 255
e5b43573
FZ
256 if (s->cow_bitmap) {
257 ret += mirror_cow_align(s, &sector_num, &nb_sectors);
258 }
259 assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
260 /* The sector range must meet granularity because:
261 * 1) Caller passes in aligned values;
262 * 2) mirror_cow_align is used only when target cluster is larger. */
e5b43573 263 assert(!(sector_num % sectors_per_chunk));
4150ae60 264 nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
e5b43573
FZ
265
266 while (s->buf_free_count < nb_chunks) {
402a4741 267 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
21cd917f 268 mirror_wait_for_io(s);
b812f671
PB
269 }
270
bd48bde8 271 /* Allocate a MirrorOp that is used as an AIO callback. */
c84b3192 272 op = g_new(MirrorOp, 1);
bd48bde8 273 op->s = s;
bd48bde8
PB
274 op->sector_num = sector_num;
275 op->nb_sectors = nb_sectors;
402a4741
PB
276
277 /* Now make a QEMUIOVector taking enough granularity-sized chunks
278 * from s->buf_free.
279 */
280 qemu_iovec_init(&op->qiov, nb_chunks);
402a4741
PB
281 while (nb_chunks-- > 0) {
282 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
e5b43573 283 size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
5a0f6fd5 284
402a4741
PB
285 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
286 s->buf_free_count--;
5a0f6fd5 287 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
402a4741 288 }
bd48bde8 289
893f7eba 290 /* Copy the dirty cluster. */
bd48bde8 291 s->in_flight++;
b21c7652 292 s->sectors_in_flight += nb_sectors;
b812f671 293 trace_mirror_one_iteration(s, sector_num, nb_sectors);
dcfb3beb 294
73698c30 295 blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
e5b43573
FZ
296 mirror_read_complete, op);
297 return ret;
298}
299
300static void mirror_do_zero_or_discard(MirrorBlockJob *s,
301 int64_t sector_num,
302 int nb_sectors,
303 bool is_discard)
304{
305 MirrorOp *op;
306
307 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
308 * so the freeing in mirror_iteration_done is nop. */
309 op = g_new0(MirrorOp, 1);
310 op->s = s;
311 op->sector_num = sector_num;
312 op->nb_sectors = nb_sectors;
313
314 s->in_flight++;
315 s->sectors_in_flight += nb_sectors;
316 if (is_discard) {
1c6c4bb7
EB
317 blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS,
318 op->nb_sectors << BDRV_SECTOR_BITS,
319 mirror_write_complete, op);
e5b43573 320 } else {
e253f4b8
KW
321 blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
322 op->nb_sectors * BDRV_SECTOR_SIZE,
dcfb3beb
FZ
323 s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
324 mirror_write_complete, op);
e5b43573
FZ
325 }
326}
327
328static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
329{
e253f4b8 330 BlockDriverState *source = blk_bs(s->common.blk);
9c83625b 331 int64_t sector_num, first_chunk;
e5b43573
FZ
332 uint64_t delay_ns = 0;
333 /* At least the first dirty chunk is mirrored in one iteration. */
334 int nb_chunks = 1;
335 int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
336 int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
4b5004d9 337 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
0965a41e
VSO
338 int max_io_sectors = MAX((s->buf_size >> BDRV_SECTOR_BITS) / MAX_IN_FLIGHT,
339 MAX_IO_SECTORS);
e5b43573 340
dc162c8e 341 sector_num = bdrv_dirty_iter_next(s->dbi);
e5b43573 342 if (sector_num < 0) {
dc162c8e
FZ
343 bdrv_set_dirty_iter(s->dbi, 0);
344 sector_num = bdrv_dirty_iter_next(s->dbi);
e5b43573
FZ
345 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
346 assert(sector_num >= 0);
347 }
348
9c83625b
HR
349 first_chunk = sector_num / sectors_per_chunk;
350 while (test_bit(first_chunk, s->in_flight_bitmap)) {
ff04198b 351 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
9c83625b
HR
352 mirror_wait_for_io(s);
353 }
354
565ac01f
SH
355 block_job_pause_point(&s->common);
356
e5b43573
FZ
357 /* Find the number of consective dirty chunks following the first dirty
358 * one, and wait for in flight requests in them. */
359 while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
dc162c8e 360 int64_t next_dirty;
e5b43573
FZ
361 int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
362 int64_t next_chunk = next_sector / sectors_per_chunk;
363 if (next_sector >= end ||
364 !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
365 break;
366 }
367 if (test_bit(next_chunk, s->in_flight_bitmap)) {
9c83625b 368 break;
e5b43573 369 }
9c83625b 370
dc162c8e
FZ
371 next_dirty = bdrv_dirty_iter_next(s->dbi);
372 if (next_dirty > next_sector || next_dirty < 0) {
f27a2742 373 /* The bitmap iterator's cache is stale, refresh it */
dc162c8e
FZ
374 bdrv_set_dirty_iter(s->dbi, next_sector);
375 next_dirty = bdrv_dirty_iter_next(s->dbi);
f27a2742 376 }
dc162c8e 377 assert(next_dirty == next_sector);
9c83625b 378 nb_chunks++;
e5b43573
FZ
379 }
380
381 /* Clear dirty bits before querying the block status, because
382 * calling bdrv_get_block_status_above could yield - if some blocks are
383 * marked dirty in this window, we need to know.
384 */
385 bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num,
386 nb_chunks * sectors_per_chunk);
387 bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
388 while (nb_chunks > 0 && sector_num < end) {
39c11580 389 int64_t ret;
4b5004d9 390 int io_sectors, io_sectors_acct;
e5b43573
FZ
391 BlockDriverState *file;
392 enum MirrorMethod {
393 MIRROR_METHOD_COPY,
394 MIRROR_METHOD_ZERO,
395 MIRROR_METHOD_DISCARD
396 } mirror_method = MIRROR_METHOD_COPY;
397
398 assert(!(sector_num % sectors_per_chunk));
399 ret = bdrv_get_block_status_above(source, NULL, sector_num,
400 nb_chunks * sectors_per_chunk,
401 &io_sectors, &file);
402 if (ret < 0) {
0965a41e
VSO
403 io_sectors = MIN(nb_chunks * sectors_per_chunk, max_io_sectors);
404 } else if (ret & BDRV_BLOCK_DATA) {
405 io_sectors = MIN(io_sectors, max_io_sectors);
e5b43573
FZ
406 }
407
408 io_sectors -= io_sectors % sectors_per_chunk;
409 if (io_sectors < sectors_per_chunk) {
410 io_sectors = sectors_per_chunk;
411 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
412 int64_t target_sector_num;
413 int target_nb_sectors;
244483e6
KW
414 bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
415 io_sectors, &target_sector_num,
416 &target_nb_sectors);
e5b43573
FZ
417 if (target_sector_num == sector_num &&
418 target_nb_sectors == io_sectors) {
419 mirror_method = ret & BDRV_BLOCK_ZERO ?
420 MIRROR_METHOD_ZERO :
421 MIRROR_METHOD_DISCARD;
422 }
423 }
424
cf56a3c6
DL
425 while (s->in_flight >= MAX_IN_FLIGHT) {
426 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
427 mirror_wait_for_io(s);
428 }
429
dbaa7b57
VSO
430 if (s->ret < 0) {
431 return 0;
432 }
433
4150ae60 434 mirror_clip_sectors(s, sector_num, &io_sectors);
e5b43573
FZ
435 switch (mirror_method) {
436 case MIRROR_METHOD_COPY:
437 io_sectors = mirror_do_read(s, sector_num, io_sectors);
4b5004d9 438 io_sectors_acct = io_sectors;
e5b43573
FZ
439 break;
440 case MIRROR_METHOD_ZERO:
e5b43573 441 case MIRROR_METHOD_DISCARD:
4b5004d9
DL
442 mirror_do_zero_or_discard(s, sector_num, io_sectors,
443 mirror_method == MIRROR_METHOD_DISCARD);
444 if (write_zeroes_ok) {
445 io_sectors_acct = 0;
446 } else {
447 io_sectors_acct = io_sectors;
448 }
e5b43573
FZ
449 break;
450 default:
451 abort();
452 }
453 assert(io_sectors);
454 sector_num += io_sectors;
4150ae60 455 nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
f14a39cc 456 if (s->common.speed) {
4b5004d9 457 delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct);
f14a39cc 458 }
dcfb3beb 459 }
cc8c9d6c 460 return delay_ns;
bd48bde8 461}
b952b558 462
402a4741
PB
463static void mirror_free_init(MirrorBlockJob *s)
464{
465 int granularity = s->granularity;
466 size_t buf_size = s->buf_size;
467 uint8_t *buf = s->buf;
468
469 assert(s->buf_free_count == 0);
470 QSIMPLEQ_INIT(&s->buf_free);
471 while (buf_size != 0) {
472 MirrorBuffer *cur = (MirrorBuffer *)buf;
473 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
474 s->buf_free_count++;
475 buf_size -= granularity;
476 buf += granularity;
477 }
478}
479
bae8196d
PB
480/* This is also used for the .pause callback. There is no matching
481 * mirror_resume() because mirror_run() will begin iterating again
482 * when the job is resumed.
483 */
484static void mirror_wait_for_all_io(MirrorBlockJob *s)
bd48bde8
PB
485{
486 while (s->in_flight > 0) {
21cd917f 487 mirror_wait_for_io(s);
bd48bde8 488 }
893f7eba
PB
489}
490
5a7e7a0b
SH
491typedef struct {
492 int ret;
493} MirrorExitData;
494
495static void mirror_exit(BlockJob *job, void *opaque)
496{
497 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
498 MirrorExitData *data = opaque;
499 AioContext *replace_aio_context = NULL;
e253f4b8
KW
500 BlockDriverState *src = blk_bs(s->common.blk);
501 BlockDriverState *target_bs = blk_bs(s->target);
3f09bfbc
KW
502
503 /* Make sure that the source BDS doesn't go away before we called
504 * block_job_completed(). */
505 bdrv_ref(src);
5a7e7a0b
SH
506
507 if (s->to_replace) {
508 replace_aio_context = bdrv_get_aio_context(s->to_replace);
509 aio_context_acquire(replace_aio_context);
510 }
511
512 if (s->should_complete && data->ret == 0) {
e253f4b8 513 BlockDriverState *to_replace = src;
5a7e7a0b
SH
514 if (s->to_replace) {
515 to_replace = s->to_replace;
516 }
40365552 517
e253f4b8
KW
518 if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
519 bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
5a7e7a0b 520 }
b8804815
KW
521
522 /* The mirror job has no requests in flight any more, but we need to
523 * drain potential other users of the BDS before changing the graph. */
e253f4b8
KW
524 bdrv_drained_begin(target_bs);
525 bdrv_replace_in_backing_chain(to_replace, target_bs);
526 bdrv_drained_end(target_bs);
b8804815 527
d7086422
KW
528 /* We just changed the BDS the job BB refers to, so switch the BB back
529 * so the cleanup does the right thing. We don't need any permissions
530 * any more now. */
b6d2e599 531 blk_remove_bs(job->blk);
d7086422
KW
532 blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
533 blk_insert_bs(job->blk, src, &error_abort);
5a7e7a0b
SH
534 }
535 if (s->to_replace) {
536 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
537 error_free(s->replace_blocker);
538 bdrv_unref(s->to_replace);
539 }
540 if (replace_aio_context) {
541 aio_context_release(replace_aio_context);
542 }
543 g_free(s->replaces);
e253f4b8 544 blk_unref(s->target);
bae8196d 545 s->target = NULL;
5a7e7a0b
SH
546 block_job_completed(&s->common, data->ret);
547 g_free(data);
176c3699 548 bdrv_drained_end(src);
3f09bfbc 549 bdrv_unref(src);
5a7e7a0b
SH
550}
551
49efb1f5
DL
552static void mirror_throttle(MirrorBlockJob *s)
553{
554 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
555
556 if (now - s->last_pause_ns > SLICE_TIME) {
557 s->last_pause_ns = now;
558 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
559 } else {
560 block_job_pause_point(&s->common);
561 }
562}
563
c0b363ad
DL
564static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
565{
566 int64_t sector_num, end;
567 BlockDriverState *base = s->base;
568 BlockDriverState *bs = blk_bs(s->common.blk);
569 BlockDriverState *target_bs = blk_bs(s->target);
c0b363ad
DL
570 int ret, n;
571
572 end = s->bdev_length / BDRV_SECTOR_SIZE;
573
b7d5062c 574 if (base == NULL && !bdrv_has_zero_init(target_bs)) {
c7c2769c
DL
575 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
576 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
577 return 0;
578 }
579
90ab48eb 580 s->initial_zeroing_ongoing = true;
c7c2769c
DL
581 for (sector_num = 0; sector_num < end; ) {
582 int nb_sectors = MIN(end - sector_num,
583 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
584
585 mirror_throttle(s);
586
587 if (block_job_is_cancelled(&s->common)) {
90ab48eb 588 s->initial_zeroing_ongoing = false;
c7c2769c
DL
589 return 0;
590 }
591
592 if (s->in_flight >= MAX_IN_FLIGHT) {
593 trace_mirror_yield(s, s->in_flight, s->buf_free_count, -1);
594 mirror_wait_for_io(s);
595 continue;
596 }
597
598 mirror_do_zero_or_discard(s, sector_num, nb_sectors, false);
599 sector_num += nb_sectors;
600 }
601
bae8196d 602 mirror_wait_for_all_io(s);
90ab48eb 603 s->initial_zeroing_ongoing = false;
b7d5062c
DL
604 }
605
c0b363ad
DL
606 /* First part, loop on the sectors and initialize the dirty bitmap. */
607 for (sector_num = 0; sector_num < end; ) {
608 /* Just to make sure we are not exceeding int limit. */
609 int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
610 end - sector_num);
611
612 mirror_throttle(s);
613
614 if (block_job_is_cancelled(&s->common)) {
615 return 0;
616 }
617
618 ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
619 if (ret < 0) {
620 return ret;
621 }
622
623 assert(n > 0);
b7d5062c 624 if (ret == 1) {
c0b363ad
DL
625 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
626 }
627 sector_num += n;
628 }
629 return 0;
630}
631
bdffb31d
PB
632/* Called when going out of the streaming phase to flush the bulk of the
633 * data to the medium, or just before completing.
634 */
635static int mirror_flush(MirrorBlockJob *s)
636{
637 int ret = blk_flush(s->target);
638 if (ret < 0) {
639 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
640 s->ret = ret;
641 }
642 }
643 return ret;
644}
645
893f7eba
PB
646static void coroutine_fn mirror_run(void *opaque)
647{
648 MirrorBlockJob *s = opaque;
5a7e7a0b 649 MirrorExitData *data;
e253f4b8
KW
650 BlockDriverState *bs = blk_bs(s->common.blk);
651 BlockDriverState *target_bs = blk_bs(s->target);
9a0cec66 652 bool need_drain = true;
c0b363ad 653 int64_t length;
b812f671 654 BlockDriverInfo bdi;
1d33936e
JC
655 char backing_filename[2]; /* we only need 2 characters because we are only
656 checking for a NULL string */
893f7eba 657 int ret = 0;
e5b43573 658 int target_cluster_size = BDRV_SECTOR_SIZE;
893f7eba
PB
659
660 if (block_job_is_cancelled(&s->common)) {
661 goto immediate_exit;
662 }
663
b21c7652
HR
664 s->bdev_length = bdrv_getlength(bs);
665 if (s->bdev_length < 0) {
666 ret = s->bdev_length;
373df5b1 667 goto immediate_exit;
becc347e
KW
668 }
669
670 /* Active commit must resize the base image if its size differs from the
671 * active layer. */
672 if (s->base == blk_bs(s->target)) {
673 int64_t base_length;
674
675 base_length = blk_getlength(s->target);
676 if (base_length < 0) {
677 ret = base_length;
678 goto immediate_exit;
679 }
680
681 if (s->bdev_length > base_length) {
682 ret = blk_truncate(s->target, s->bdev_length);
683 if (ret < 0) {
684 goto immediate_exit;
685 }
686 }
687 }
688
689 if (s->bdev_length == 0) {
9e48b025
FZ
690 /* Report BLOCK_JOB_READY and wait for complete. */
691 block_job_event_ready(&s->common);
692 s->synced = true;
693 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
694 block_job_yield(&s->common);
695 }
696 s->common.cancelled = false;
697 goto immediate_exit;
893f7eba
PB
698 }
699
b21c7652 700 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
402a4741
PB
701 s->in_flight_bitmap = bitmap_new(length);
702
b812f671
PB
703 /* If we have no backing file yet in the destination, we cannot let
704 * the destination do COW. Instead, we copy sectors around the
705 * dirty data if needed. We need a bitmap to do that.
706 */
e253f4b8 707 bdrv_get_backing_filename(target_bs, backing_filename,
b812f671 708 sizeof(backing_filename));
e253f4b8 709 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
e5b43573
FZ
710 target_cluster_size = bdi.cluster_size;
711 }
e253f4b8 712 if (backing_filename[0] && !target_bs->backing
e5b43573
FZ
713 && s->granularity < target_cluster_size) {
714 s->buf_size = MAX(s->buf_size, target_cluster_size);
715 s->cow_bitmap = bitmap_new(length);
b812f671 716 }
e5b43573 717 s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS;
e253f4b8 718 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
b812f671 719
7504edf4
KW
720 s->buf = qemu_try_blockalign(bs, s->buf_size);
721 if (s->buf == NULL) {
722 ret = -ENOMEM;
723 goto immediate_exit;
724 }
725
402a4741 726 mirror_free_init(s);
893f7eba 727
49efb1f5 728 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
03544a6e 729 if (!s->is_none_mode) {
c0b363ad
DL
730 ret = mirror_dirty_init(s);
731 if (ret < 0 || block_job_is_cancelled(&s->common)) {
732 goto immediate_exit;
893f7eba
PB
733 }
734 }
735
dc162c8e
FZ
736 assert(!s->dbi);
737 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
893f7eba 738 for (;;) {
cc8c9d6c 739 uint64_t delay_ns = 0;
49efb1f5 740 int64_t cnt, delta;
893f7eba
PB
741 bool should_complete;
742
bd48bde8
PB
743 if (s->ret < 0) {
744 ret = s->ret;
745 goto immediate_exit;
746 }
747
565ac01f
SH
748 block_job_pause_point(&s->common);
749
20dca810 750 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
b21c7652
HR
751 /* s->common.offset contains the number of bytes already processed so
752 * far, cnt is the number of dirty sectors remaining and
753 * s->sectors_in_flight is the number of sectors currently being
754 * processed; together those are the current total operation length */
755 s->common.len = s->common.offset +
756 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
bd48bde8
PB
757
758 /* Note that even when no rate limit is applied we need to yield
a7282330 759 * periodically with no pending I/O so that bdrv_drain_all() returns.
bd48bde8
PB
760 * We do so every SLICE_TIME nanoseconds, or when there is an error,
761 * or when the source is clean, whichever comes first.
762 */
49efb1f5
DL
763 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
764 if (delta < SLICE_TIME &&
bd48bde8 765 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
cf56a3c6 766 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
402a4741
PB
767 (cnt == 0 && s->in_flight > 0)) {
768 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
21cd917f 769 mirror_wait_for_io(s);
bd48bde8
PB
770 continue;
771 } else if (cnt != 0) {
cc8c9d6c 772 delay_ns = mirror_iteration(s);
893f7eba 773 }
893f7eba
PB
774 }
775
776 should_complete = false;
bd48bde8 777 if (s->in_flight == 0 && cnt == 0) {
893f7eba 778 trace_mirror_before_flush(s);
bdffb31d
PB
779 if (!s->synced) {
780 if (mirror_flush(s) < 0) {
781 /* Go check s->ret. */
782 continue;
b952b558 783 }
b952b558
PB
784 /* We're out of the streaming phase. From now on, if the job
785 * is cancelled we will actually complete all pending I/O and
786 * report completion. This way, block-job-cancel will leave
787 * the target in a consistent state.
788 */
bdffb31d
PB
789 block_job_event_ready(&s->common);
790 s->synced = true;
d63ffd87 791 }
bdffb31d
PB
792
793 should_complete = s->should_complete ||
794 block_job_is_cancelled(&s->common);
795 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
893f7eba
PB
796 }
797
798 if (cnt == 0 && should_complete) {
799 /* The dirty bitmap is not updated while operations are pending.
800 * If we're about to exit, wait for pending operations before
801 * calling bdrv_get_dirty_count(bs), or we may exit while the
802 * source has dirty data to copy!
803 *
804 * Note that I/O can be submitted by the guest while
9a0cec66
PB
805 * mirror_populate runs, so pause it now. Before deciding
806 * whether to switch to target check one last time if I/O has
807 * come in the meanwhile, and if not flush the data to disk.
893f7eba
PB
808 */
809 trace_mirror_before_drain(s, cnt);
9a0cec66
PB
810
811 bdrv_drained_begin(bs);
20dca810 812 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
bdffb31d 813 if (cnt > 0 || mirror_flush(s) < 0) {
9a0cec66
PB
814 bdrv_drained_end(bs);
815 continue;
816 }
817
818 /* The two disks are in sync. Exit and report successful
819 * completion.
820 */
821 assert(QLIST_EMPTY(&bs->tracked_requests));
822 s->common.cancelled = false;
823 need_drain = false;
824 break;
893f7eba
PB
825 }
826
827 ret = 0;
cc8c9d6c 828 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
d63ffd87 829 if (!s->synced) {
7483d1e5 830 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
893f7eba
PB
831 if (block_job_is_cancelled(&s->common)) {
832 break;
833 }
834 } else if (!should_complete) {
bd48bde8 835 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
7483d1e5 836 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
893f7eba 837 }
49efb1f5 838 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
893f7eba
PB
839 }
840
841immediate_exit:
bd48bde8
PB
842 if (s->in_flight > 0) {
843 /* We get here only if something went wrong. Either the job failed,
844 * or it was cancelled prematurely so that we do not guarantee that
845 * the target is a copy of the source.
846 */
847 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
9a0cec66 848 assert(need_drain);
bae8196d 849 mirror_wait_for_all_io(s);
bd48bde8
PB
850 }
851
852 assert(s->in_flight == 0);
7191bf31 853 qemu_vfree(s->buf);
b812f671 854 g_free(s->cow_bitmap);
402a4741 855 g_free(s->in_flight_bitmap);
dc162c8e 856 bdrv_dirty_iter_free(s->dbi);
e4654d2d 857 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
5a7e7a0b
SH
858
859 data = g_malloc(sizeof(*data));
860 data->ret = ret;
9a0cec66
PB
861
862 if (need_drain) {
863 bdrv_drained_begin(bs);
864 }
5a7e7a0b 865 block_job_defer_to_main_loop(&s->common, mirror_exit, data);
893f7eba
PB
866}
867
868static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
869{
870 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
871
872 if (speed < 0) {
c6bd8c70 873 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
893f7eba
PB
874 return;
875 }
876 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
877}
878
d63ffd87
PB
879static void mirror_complete(BlockJob *job, Error **errp)
880{
881 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
274fccee
HR
882 BlockDriverState *src, *target;
883
884 src = blk_bs(job->blk);
885 target = blk_bs(s->target);
d63ffd87 886
d63ffd87 887 if (!s->synced) {
9df229c3
AG
888 error_setg(errp, "The active block job '%s' cannot be completed",
889 job->id);
d63ffd87
PB
890 return;
891 }
892
274fccee
HR
893 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
894 int ret;
895
896 assert(!target->backing);
897 ret = bdrv_open_backing_file(target, NULL, "backing", errp);
898 if (ret < 0) {
899 return;
900 }
901 }
902
15d67298 903 /* block all operations on to_replace bs */
09158f00 904 if (s->replaces) {
5a7e7a0b
SH
905 AioContext *replace_aio_context;
906
e12f3784 907 s->to_replace = bdrv_find_node(s->replaces);
09158f00 908 if (!s->to_replace) {
e12f3784 909 error_setg(errp, "Node name '%s' not found", s->replaces);
09158f00
BC
910 return;
911 }
912
5a7e7a0b
SH
913 replace_aio_context = bdrv_get_aio_context(s->to_replace);
914 aio_context_acquire(replace_aio_context);
915
09158f00
BC
916 error_setg(&s->replace_blocker,
917 "block device is in use by block-job-complete");
918 bdrv_op_block_all(s->to_replace, s->replace_blocker);
919 bdrv_ref(s->to_replace);
5a7e7a0b
SH
920
921 aio_context_release(replace_aio_context);
09158f00
BC
922 }
923
274fccee
HR
924 if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
925 BlockDriverState *backing = s->is_none_mode ? src : s->base;
926 if (backing_bs(target) != backing) {
927 bdrv_set_backing_hd(target, backing);
928 }
929 }
930
d63ffd87 931 s->should_complete = true;
751ebd76 932 block_job_enter(&s->common);
d63ffd87
PB
933}
934
bae8196d 935static void mirror_pause(BlockJob *job)
565ac01f
SH
936{
937 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
938
bae8196d 939 mirror_wait_for_all_io(s);
565ac01f
SH
940}
941
942static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
943{
944 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
945
946 blk_set_aio_context(s->target, new_context);
947}
948
bae8196d
PB
949static void mirror_drain(BlockJob *job)
950{
951 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
952
953 /* Need to keep a reference in case blk_drain triggers execution
954 * of mirror_complete...
955 */
956 if (s->target) {
957 BlockBackend *target = s->target;
958 blk_ref(target);
959 blk_drain(target);
960 blk_unref(target);
961 }
962}
963
3fc4b10a 964static const BlockJobDriver mirror_job_driver = {
565ac01f
SH
965 .instance_size = sizeof(MirrorBlockJob),
966 .job_type = BLOCK_JOB_TYPE_MIRROR,
967 .set_speed = mirror_set_speed,
a7815a76 968 .start = mirror_run,
565ac01f
SH
969 .complete = mirror_complete,
970 .pause = mirror_pause,
971 .attached_aio_context = mirror_attached_aio_context,
bae8196d 972 .drain = mirror_drain,
893f7eba
PB
973};
974
03544a6e 975static const BlockJobDriver commit_active_job_driver = {
565ac01f
SH
976 .instance_size = sizeof(MirrorBlockJob),
977 .job_type = BLOCK_JOB_TYPE_COMMIT,
978 .set_speed = mirror_set_speed,
a7815a76 979 .start = mirror_run,
565ac01f
SH
980 .complete = mirror_complete,
981 .pause = mirror_pause,
982 .attached_aio_context = mirror_attached_aio_context,
bae8196d 983 .drain = mirror_drain,
03544a6e
FZ
984};
985
71aa9867 986static void mirror_start_job(const char *job_id, BlockDriverState *bs,
47970dfb
JS
987 int creation_flags, BlockDriverState *target,
988 const char *replaces, int64_t speed,
989 uint32_t granularity, int64_t buf_size,
274fccee 990 BlockMirrorBackingMode backing_mode,
09158f00
BC
991 BlockdevOnError on_source_error,
992 BlockdevOnError on_target_error,
0fc9f8ea 993 bool unmap,
097310b5 994 BlockCompletionFunc *cb,
09158f00
BC
995 void *opaque, Error **errp,
996 const BlockJobDriver *driver,
b49f7ead
WC
997 bool is_none_mode, BlockDriverState *base,
998 bool auto_complete)
893f7eba
PB
999{
1000 MirrorBlockJob *s;
d7086422 1001 int ret;
893f7eba 1002
eee13dfe 1003 if (granularity == 0) {
341ebc2f 1004 granularity = bdrv_get_default_bitmap_granularity(target);
eee13dfe
PB
1005 }
1006
1007 assert ((granularity & (granularity - 1)) == 0);
1008
48ac0a4d
WC
1009 if (buf_size < 0) {
1010 error_setg(errp, "Invalid parameter 'buf-size'");
1011 return;
1012 }
1013
1014 if (buf_size == 0) {
1015 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1016 }
5bc361b8 1017
c6cc12bf
KW
1018 /* FIXME Use real permissions */
1019 s = block_job_create(job_id, driver, bs, 0, BLK_PERM_ALL, speed,
1020 creation_flags, cb, opaque, errp);
893f7eba
PB
1021 if (!s) {
1022 return;
1023 }
1024
6d0eb64d
KW
1025 /* FIXME Use real permissions */
1026 s->target = blk_new(0, BLK_PERM_ALL);
d7086422
KW
1027 ret = blk_insert_bs(s->target, target, errp);
1028 if (ret < 0) {
1029 blk_unref(s->target);
1030 block_job_unref(&s->common);
1031 return;
1032 }
e253f4b8 1033
09158f00 1034 s->replaces = g_strdup(replaces);
b952b558
PB
1035 s->on_source_error = on_source_error;
1036 s->on_target_error = on_target_error;
03544a6e 1037 s->is_none_mode = is_none_mode;
274fccee 1038 s->backing_mode = backing_mode;
5bc361b8 1039 s->base = base;
eee13dfe 1040 s->granularity = granularity;
48ac0a4d 1041 s->buf_size = ROUND_UP(buf_size, granularity);
0fc9f8ea 1042 s->unmap = unmap;
b49f7ead
WC
1043 if (auto_complete) {
1044 s->should_complete = true;
1045 }
b812f671 1046
0db6e54a 1047 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
b8afb520 1048 if (!s->dirty_bitmap) {
97031164 1049 g_free(s->replaces);
e253f4b8 1050 blk_unref(s->target);
18930ba3 1051 block_job_unref(&s->common);
b8afb520
FZ
1052 return;
1053 }
10f3cd15 1054
76d554e2
KW
1055 /* FIXME Use real permissions */
1056 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1057 &error_abort);
1058
f3ede4b0
AG
1059 /* In commit_active_start() all intermediate nodes disappear, so
1060 * any jobs in them must be blocked */
1061 if (bdrv_chain_contains(bs, target)) {
1062 BlockDriverState *iter;
1063 for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
76d554e2
KW
1064 /* FIXME Use real permissions */
1065 block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1066 BLK_PERM_ALL, &error_abort);
f3ede4b0
AG
1067 }
1068 }
10f3cd15 1069
5ccac6f1
JS
1070 trace_mirror_start(bs, s, opaque);
1071 block_job_start(&s->common);
893f7eba 1072}
03544a6e 1073
71aa9867
AG
1074void mirror_start(const char *job_id, BlockDriverState *bs,
1075 BlockDriverState *target, const char *replaces,
5fba6c0e 1076 int64_t speed, uint32_t granularity, int64_t buf_size,
274fccee
HR
1077 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1078 BlockdevOnError on_source_error,
03544a6e 1079 BlockdevOnError on_target_error,
8254b6d9 1080 bool unmap, Error **errp)
03544a6e
FZ
1081{
1082 bool is_none_mode;
1083 BlockDriverState *base;
1084
4b80ab2b
JS
1085 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1086 error_setg(errp, "Sync mode 'incremental' not supported");
d58d8453
JS
1087 return;
1088 }
03544a6e 1089 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
760e0063 1090 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
47970dfb 1091 mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
274fccee 1092 speed, granularity, buf_size, backing_mode,
8254b6d9 1093 on_source_error, on_target_error, unmap, NULL, NULL, errp,
b49f7ead 1094 &mirror_job_driver, is_none_mode, base, false);
03544a6e
FZ
1095}
1096
fd62c609 1097void commit_active_start(const char *job_id, BlockDriverState *bs,
47970dfb
JS
1098 BlockDriverState *base, int creation_flags,
1099 int64_t speed, BlockdevOnError on_error,
1100 BlockCompletionFunc *cb, void *opaque, Error **errp,
b49f7ead 1101 bool auto_complete)
03544a6e 1102{
4da83585 1103 int orig_base_flags;
cc67f4d1 1104 Error *local_err = NULL;
4da83585
JC
1105
1106 orig_base_flags = bdrv_get_flags(base);
1107
20a63d2c
FZ
1108 if (bdrv_reopen(base, bs->open_flags, errp)) {
1109 return;
1110 }
4da83585 1111
47970dfb 1112 mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
71aa9867 1113 MIRROR_LEAVE_BACKING_CHAIN,
6f13acf9 1114 on_error, on_error, true, cb, opaque, &local_err,
b49f7ead 1115 &commit_active_job_driver, false, base, auto_complete);
0fb6395c 1116 if (local_err) {
cc67f4d1 1117 error_propagate(errp, local_err);
4da83585
JC
1118 goto error_restore_flags;
1119 }
1120
1121 return;
1122
1123error_restore_flags:
1124 /* ignore error and errp for bdrv_reopen, because we want to propagate
1125 * the original error */
1126 bdrv_reopen(base, orig_base_flags, NULL);
1127 return;
03544a6e 1128}