]> git.proxmox.com Git - mirror_qemu.git/blob - block/mirror.c
blockjob: Adhere to rate limit even when reentered early
[mirror_qemu.git] / block / mirror.c
1 /*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
18 #include "trace.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "block/dirty-bitmap.h"
22 #include "sysemu/block-backend.h"
23 #include "qapi/error.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
26 #include "qemu/memalign.h"
27
28 #define MAX_IN_FLIGHT 16
29 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
31
32 /* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
34 */
35 typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37 } MirrorBuffer;
38
39 typedef struct MirrorOp MirrorOp;
40
41 typedef struct MirrorBlockJob {
42 BlockJob common;
43 BlockBackend *target;
44 BlockDriverState *mirror_top_bs;
45 BlockDriverState *base;
46 BlockDriverState *base_overlay;
47
48 /* The name of the graph node to replace */
49 char *replaces;
50 /* The BDS to replace */
51 BlockDriverState *to_replace;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error *replace_blocker;
54 bool is_none_mode;
55 BlockMirrorBackingMode backing_mode;
56 /* Whether the target image requires explicit zero-initialization */
57 bool zero_target;
58 MirrorCopyMode copy_mode;
59 BlockdevOnError on_source_error, on_target_error;
60 /* Set when the target is synced (dirty bitmap is clean, nothing
61 * in flight) and the job is running in active mode */
62 bool actively_synced;
63 bool should_complete;
64 int64_t granularity;
65 size_t buf_size;
66 int64_t bdev_length;
67 unsigned long *cow_bitmap;
68 BdrvDirtyBitmap *dirty_bitmap;
69 BdrvDirtyBitmapIter *dbi;
70 uint8_t *buf;
71 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
72 int buf_free_count;
73
74 uint64_t last_pause_ns;
75 unsigned long *in_flight_bitmap;
76 unsigned in_flight;
77 int64_t bytes_in_flight;
78 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
79 int ret;
80 bool unmap;
81 int target_cluster_size;
82 int max_iov;
83 bool initial_zeroing_ongoing;
84 int in_active_write_counter;
85 int64_t active_write_bytes_in_flight;
86 bool prepared;
87 bool in_drain;
88 } MirrorBlockJob;
89
90 typedef struct MirrorBDSOpaque {
91 MirrorBlockJob *job;
92 bool stop;
93 bool is_commit;
94 } MirrorBDSOpaque;
95
96 struct MirrorOp {
97 MirrorBlockJob *s;
98 QEMUIOVector qiov;
99 int64_t offset;
100 uint64_t bytes;
101
102 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
103 * mirror_co_discard() before yielding for the first time */
104 int64_t *bytes_handled;
105
106 bool is_pseudo_op;
107 bool is_active_write;
108 bool is_in_flight;
109 CoQueue waiting_requests;
110 Coroutine *co;
111 MirrorOp *waiting_for_op;
112
113 QTAILQ_ENTRY(MirrorOp) next;
114 };
115
116 typedef enum MirrorMethod {
117 MIRROR_METHOD_COPY,
118 MIRROR_METHOD_ZERO,
119 MIRROR_METHOD_DISCARD,
120 } MirrorMethod;
121
122 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
123 int error)
124 {
125 s->actively_synced = false;
126 if (read) {
127 return block_job_error_action(&s->common, s->on_source_error,
128 true, error);
129 } else {
130 return block_job_error_action(&s->common, s->on_target_error,
131 false, error);
132 }
133 }
134
135 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
136 MirrorBlockJob *s,
137 uint64_t offset,
138 uint64_t bytes)
139 {
140 uint64_t self_start_chunk = offset / s->granularity;
141 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
142 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
143
144 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
145 self_start_chunk) < self_end_chunk &&
146 s->ret >= 0)
147 {
148 MirrorOp *op;
149
150 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
151 uint64_t op_start_chunk = op->offset / s->granularity;
152 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
153 s->granularity) -
154 op_start_chunk;
155
156 if (op == self) {
157 continue;
158 }
159
160 if (ranges_overlap(self_start_chunk, self_nb_chunks,
161 op_start_chunk, op_nb_chunks))
162 {
163 if (self) {
164 /*
165 * If the operation is already (indirectly) waiting for us,
166 * or will wait for us as soon as it wakes up, then just go
167 * on (instead of producing a deadlock in the former case).
168 */
169 if (op->waiting_for_op) {
170 continue;
171 }
172
173 self->waiting_for_op = op;
174 }
175
176 qemu_co_queue_wait(&op->waiting_requests, NULL);
177
178 if (self) {
179 self->waiting_for_op = NULL;
180 }
181
182 break;
183 }
184 }
185 }
186 }
187
188 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
189 {
190 MirrorBlockJob *s = op->s;
191 struct iovec *iov;
192 int64_t chunk_num;
193 int i, nb_chunks;
194
195 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
196
197 s->in_flight--;
198 s->bytes_in_flight -= op->bytes;
199 iov = op->qiov.iov;
200 for (i = 0; i < op->qiov.niov; i++) {
201 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
202 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
203 s->buf_free_count++;
204 }
205
206 chunk_num = op->offset / s->granularity;
207 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
208
209 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
210 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
211 if (ret >= 0) {
212 if (s->cow_bitmap) {
213 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
214 }
215 if (!s->initial_zeroing_ongoing) {
216 job_progress_update(&s->common.job, op->bytes);
217 }
218 }
219 qemu_iovec_destroy(&op->qiov);
220
221 qemu_co_queue_restart_all(&op->waiting_requests);
222 g_free(op);
223 }
224
225 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
226 {
227 MirrorBlockJob *s = op->s;
228
229 if (ret < 0) {
230 BlockErrorAction action;
231
232 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
233 action = mirror_error_action(s, false, -ret);
234 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
235 s->ret = ret;
236 }
237 }
238
239 mirror_iteration_done(op, ret);
240 }
241
242 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
243 {
244 MirrorBlockJob *s = op->s;
245
246 if (ret < 0) {
247 BlockErrorAction action;
248
249 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
250 action = mirror_error_action(s, true, -ret);
251 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
252 s->ret = ret;
253 }
254
255 mirror_iteration_done(op, ret);
256 return;
257 }
258
259 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
260 mirror_write_complete(op, ret);
261 }
262
263 /* Clip bytes relative to offset to not exceed end-of-file */
264 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
265 int64_t offset,
266 int64_t bytes)
267 {
268 return MIN(bytes, s->bdev_length - offset);
269 }
270
271 /* Round offset and/or bytes to target cluster if COW is needed, and
272 * return the offset of the adjusted tail against original. */
273 static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
274 uint64_t *bytes)
275 {
276 bool need_cow;
277 int ret = 0;
278 int64_t align_offset = *offset;
279 int64_t align_bytes = *bytes;
280 int max_bytes = s->granularity * s->max_iov;
281
282 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
283 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
284 s->cow_bitmap);
285 if (need_cow) {
286 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
287 &align_offset, &align_bytes);
288 }
289
290 if (align_bytes > max_bytes) {
291 align_bytes = max_bytes;
292 if (need_cow) {
293 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
294 }
295 }
296 /* Clipping may result in align_bytes unaligned to chunk boundary, but
297 * that doesn't matter because it's already the end of source image. */
298 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
299
300 ret = align_offset + align_bytes - (*offset + *bytes);
301 *offset = align_offset;
302 *bytes = align_bytes;
303 assert(ret >= 0);
304 return ret;
305 }
306
307 static inline void coroutine_fn
308 mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
309 {
310 MirrorOp *op;
311
312 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
313 /*
314 * Do not wait on pseudo ops, because it may in turn wait on
315 * some other operation to start, which may in fact be the
316 * caller of this function. Since there is only one pseudo op
317 * at any given time, we will always find some real operation
318 * to wait on.
319 * Also, do not wait on active operations, because they do not
320 * use up in-flight slots.
321 */
322 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) {
323 qemu_co_queue_wait(&op->waiting_requests, NULL);
324 return;
325 }
326 }
327 abort();
328 }
329
330 /* Perform a mirror copy operation.
331 *
332 * *op->bytes_handled is set to the number of bytes copied after and
333 * including offset, excluding any bytes copied prior to offset due
334 * to alignment. This will be op->bytes if no alignment is necessary,
335 * or (new_end - op->offset) if the tail is rounded up or down due to
336 * alignment or buffer limit.
337 */
338 static void coroutine_fn mirror_co_read(void *opaque)
339 {
340 MirrorOp *op = opaque;
341 MirrorBlockJob *s = op->s;
342 int nb_chunks;
343 uint64_t ret;
344 uint64_t max_bytes;
345
346 max_bytes = s->granularity * s->max_iov;
347
348 /* We can only handle as much as buf_size at a time. */
349 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
350 assert(op->bytes);
351 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
352 *op->bytes_handled = op->bytes;
353
354 if (s->cow_bitmap) {
355 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
356 }
357 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
358 assert(*op->bytes_handled <= UINT_MAX);
359 assert(op->bytes <= s->buf_size);
360 /* The offset is granularity-aligned because:
361 * 1) Caller passes in aligned values;
362 * 2) mirror_cow_align is used only when target cluster is larger. */
363 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
364 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
365 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
366 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
367
368 while (s->buf_free_count < nb_chunks) {
369 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
370 mirror_wait_for_free_in_flight_slot(s);
371 }
372
373 /* Now make a QEMUIOVector taking enough granularity-sized chunks
374 * from s->buf_free.
375 */
376 qemu_iovec_init(&op->qiov, nb_chunks);
377 while (nb_chunks-- > 0) {
378 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
379 size_t remaining = op->bytes - op->qiov.size;
380
381 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
382 s->buf_free_count--;
383 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
384 }
385
386 /* Copy the dirty cluster. */
387 s->in_flight++;
388 s->bytes_in_flight += op->bytes;
389 op->is_in_flight = true;
390 trace_mirror_one_iteration(s, op->offset, op->bytes);
391
392 WITH_GRAPH_RDLOCK_GUARD() {
393 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
394 &op->qiov, 0);
395 }
396 mirror_read_complete(op, ret);
397 }
398
399 static void coroutine_fn mirror_co_zero(void *opaque)
400 {
401 MirrorOp *op = opaque;
402 int ret;
403
404 op->s->in_flight++;
405 op->s->bytes_in_flight += op->bytes;
406 *op->bytes_handled = op->bytes;
407 op->is_in_flight = true;
408
409 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
410 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
411 mirror_write_complete(op, ret);
412 }
413
414 static void coroutine_fn mirror_co_discard(void *opaque)
415 {
416 MirrorOp *op = opaque;
417 int ret;
418
419 op->s->in_flight++;
420 op->s->bytes_in_flight += op->bytes;
421 *op->bytes_handled = op->bytes;
422 op->is_in_flight = true;
423
424 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
425 mirror_write_complete(op, ret);
426 }
427
428 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
429 unsigned bytes, MirrorMethod mirror_method)
430 {
431 MirrorOp *op;
432 Coroutine *co;
433 int64_t bytes_handled = -1;
434
435 op = g_new(MirrorOp, 1);
436 *op = (MirrorOp){
437 .s = s,
438 .offset = offset,
439 .bytes = bytes,
440 .bytes_handled = &bytes_handled,
441 };
442 qemu_co_queue_init(&op->waiting_requests);
443
444 switch (mirror_method) {
445 case MIRROR_METHOD_COPY:
446 co = qemu_coroutine_create(mirror_co_read, op);
447 break;
448 case MIRROR_METHOD_ZERO:
449 co = qemu_coroutine_create(mirror_co_zero, op);
450 break;
451 case MIRROR_METHOD_DISCARD:
452 co = qemu_coroutine_create(mirror_co_discard, op);
453 break;
454 default:
455 abort();
456 }
457 op->co = co;
458
459 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
460 qemu_coroutine_enter(co);
461 /* At this point, ownership of op has been moved to the coroutine
462 * and the object may already be freed */
463
464 /* Assert that this value has been set */
465 assert(bytes_handled >= 0);
466
467 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
468 * and mirror_co_discard(), bytes_handled == op->bytes, which
469 * is the @bytes parameter given to this function) */
470 assert(bytes_handled <= UINT_MAX);
471 return bytes_handled;
472 }
473
474 static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
475 {
476 BlockDriverState *source = s->mirror_top_bs->backing->bs;
477 MirrorOp *pseudo_op;
478 int64_t offset;
479 /* At least the first dirty chunk is mirrored in one iteration. */
480 int nb_chunks = 1;
481 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
482 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
483
484 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
485 offset = bdrv_dirty_iter_next(s->dbi);
486 if (offset < 0) {
487 bdrv_set_dirty_iter(s->dbi, 0);
488 offset = bdrv_dirty_iter_next(s->dbi);
489 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
490 assert(offset >= 0);
491 }
492 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
493
494 /*
495 * Wait for concurrent requests to @offset. The next loop will limit the
496 * copied area based on in_flight_bitmap so we only copy an area that does
497 * not overlap with concurrent in-flight requests. Still, we would like to
498 * copy something, so wait until there are at least no more requests to the
499 * very beginning of the area.
500 */
501 mirror_wait_on_conflicts(NULL, s, offset, 1);
502
503 job_pause_point(&s->common.job);
504
505 /* Find the number of consective dirty chunks following the first dirty
506 * one, and wait for in flight requests in them. */
507 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
508 while (nb_chunks * s->granularity < s->buf_size) {
509 int64_t next_dirty;
510 int64_t next_offset = offset + nb_chunks * s->granularity;
511 int64_t next_chunk = next_offset / s->granularity;
512 if (next_offset >= s->bdev_length ||
513 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
514 break;
515 }
516 if (test_bit(next_chunk, s->in_flight_bitmap)) {
517 break;
518 }
519
520 next_dirty = bdrv_dirty_iter_next(s->dbi);
521 if (next_dirty > next_offset || next_dirty < 0) {
522 /* The bitmap iterator's cache is stale, refresh it */
523 bdrv_set_dirty_iter(s->dbi, next_offset);
524 next_dirty = bdrv_dirty_iter_next(s->dbi);
525 }
526 assert(next_dirty == next_offset);
527 nb_chunks++;
528 }
529
530 /* Clear dirty bits before querying the block status, because
531 * calling bdrv_block_status_above could yield - if some blocks are
532 * marked dirty in this window, we need to know.
533 */
534 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
535 nb_chunks * s->granularity);
536 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
537
538 /* Before claiming an area in the in-flight bitmap, we have to
539 * create a MirrorOp for it so that conflicting requests can wait
540 * for it. mirror_perform() will create the real MirrorOps later,
541 * for now we just create a pseudo operation that will wake up all
542 * conflicting requests once all real operations have been
543 * launched. */
544 pseudo_op = g_new(MirrorOp, 1);
545 *pseudo_op = (MirrorOp){
546 .offset = offset,
547 .bytes = nb_chunks * s->granularity,
548 .is_pseudo_op = true,
549 };
550 qemu_co_queue_init(&pseudo_op->waiting_requests);
551 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
552
553 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
554 while (nb_chunks > 0 && offset < s->bdev_length) {
555 int ret;
556 int64_t io_bytes;
557 int64_t io_bytes_acct;
558 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
559
560 assert(!(offset % s->granularity));
561 WITH_GRAPH_RDLOCK_GUARD() {
562 ret = bdrv_block_status_above(source, NULL, offset,
563 nb_chunks * s->granularity,
564 &io_bytes, NULL, NULL);
565 }
566 if (ret < 0) {
567 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
568 } else if (ret & BDRV_BLOCK_DATA) {
569 io_bytes = MIN(io_bytes, max_io_bytes);
570 }
571
572 io_bytes -= io_bytes % s->granularity;
573 if (io_bytes < s->granularity) {
574 io_bytes = s->granularity;
575 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
576 int64_t target_offset;
577 int64_t target_bytes;
578 WITH_GRAPH_RDLOCK_GUARD() {
579 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
580 &target_offset, &target_bytes);
581 }
582 if (target_offset == offset &&
583 target_bytes == io_bytes) {
584 mirror_method = ret & BDRV_BLOCK_ZERO ?
585 MIRROR_METHOD_ZERO :
586 MIRROR_METHOD_DISCARD;
587 }
588 }
589
590 while (s->in_flight >= MAX_IN_FLIGHT) {
591 trace_mirror_yield_in_flight(s, offset, s->in_flight);
592 mirror_wait_for_free_in_flight_slot(s);
593 }
594
595 if (s->ret < 0) {
596 ret = 0;
597 goto fail;
598 }
599
600 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
601 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
602 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
603 io_bytes_acct = 0;
604 } else {
605 io_bytes_acct = io_bytes;
606 }
607 assert(io_bytes);
608 offset += io_bytes;
609 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
610 block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct);
611 }
612
613 fail:
614 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
615 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
616 g_free(pseudo_op);
617 }
618
619 static void mirror_free_init(MirrorBlockJob *s)
620 {
621 int granularity = s->granularity;
622 size_t buf_size = s->buf_size;
623 uint8_t *buf = s->buf;
624
625 assert(s->buf_free_count == 0);
626 QSIMPLEQ_INIT(&s->buf_free);
627 while (buf_size != 0) {
628 MirrorBuffer *cur = (MirrorBuffer *)buf;
629 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
630 s->buf_free_count++;
631 buf_size -= granularity;
632 buf += granularity;
633 }
634 }
635
636 /* This is also used for the .pause callback. There is no matching
637 * mirror_resume() because mirror_run() will begin iterating again
638 * when the job is resumed.
639 */
640 static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
641 {
642 while (s->in_flight > 0) {
643 mirror_wait_for_free_in_flight_slot(s);
644 }
645 }
646
647 /**
648 * mirror_exit_common: handle both abort() and prepare() cases.
649 * for .prepare, returns 0 on success and -errno on failure.
650 * for .abort cases, denoted by abort = true, MUST return 0.
651 */
652 static int mirror_exit_common(Job *job)
653 {
654 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
655 BlockJob *bjob = &s->common;
656 MirrorBDSOpaque *bs_opaque;
657 AioContext *replace_aio_context = NULL;
658 BlockDriverState *src;
659 BlockDriverState *target_bs;
660 BlockDriverState *mirror_top_bs;
661 Error *local_err = NULL;
662 bool abort = job->ret < 0;
663 int ret = 0;
664
665 if (s->prepared) {
666 return 0;
667 }
668 s->prepared = true;
669
670 mirror_top_bs = s->mirror_top_bs;
671 bs_opaque = mirror_top_bs->opaque;
672 src = mirror_top_bs->backing->bs;
673 target_bs = blk_bs(s->target);
674
675 if (bdrv_chain_contains(src, target_bs)) {
676 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
677 }
678
679 bdrv_release_dirty_bitmap(s->dirty_bitmap);
680
681 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
682 * before we can call bdrv_drained_end */
683 bdrv_ref(src);
684 bdrv_ref(mirror_top_bs);
685 bdrv_ref(target_bs);
686
687 /*
688 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
689 * inserting target_bs at s->to_replace, where we might not be able to get
690 * these permissions.
691 */
692 blk_unref(s->target);
693 s->target = NULL;
694
695 /* We don't access the source any more. Dropping any WRITE/RESIZE is
696 * required before it could become a backing file of target_bs. Not having
697 * these permissions any more means that we can't allow any new requests on
698 * mirror_top_bs from now on, so keep it drained. */
699 bdrv_drained_begin(mirror_top_bs);
700 bs_opaque->stop = true;
701 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
702 &error_abort);
703 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
704 BlockDriverState *backing = s->is_none_mode ? src : s->base;
705 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
706
707 if (bdrv_cow_bs(unfiltered_target) != backing) {
708 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
709 if (local_err) {
710 error_report_err(local_err);
711 local_err = NULL;
712 ret = -EPERM;
713 }
714 }
715 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
716 assert(!bdrv_backing_chain_next(target_bs));
717 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
718 "backing", &local_err);
719 if (ret < 0) {
720 error_report_err(local_err);
721 local_err = NULL;
722 }
723 }
724
725 if (s->to_replace) {
726 replace_aio_context = bdrv_get_aio_context(s->to_replace);
727 aio_context_acquire(replace_aio_context);
728 }
729
730 if (s->should_complete && !abort) {
731 BlockDriverState *to_replace = s->to_replace ?: src;
732 bool ro = bdrv_is_read_only(to_replace);
733
734 if (ro != bdrv_is_read_only(target_bs)) {
735 bdrv_reopen_set_read_only(target_bs, ro, NULL);
736 }
737
738 /* The mirror job has no requests in flight any more, but we need to
739 * drain potential other users of the BDS before changing the graph. */
740 assert(s->in_drain);
741 bdrv_drained_begin(target_bs);
742 /*
743 * Cannot use check_to_replace_node() here, because that would
744 * check for an op blocker on @to_replace, and we have our own
745 * there.
746 *
747 * TODO Pull out the writer lock from bdrv_replace_node() to here
748 */
749 bdrv_graph_rdlock_main_loop();
750 if (bdrv_recurse_can_replace(src, to_replace)) {
751 bdrv_replace_node(to_replace, target_bs, &local_err);
752 } else {
753 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
754 "because it can no longer be guaranteed that doing so "
755 "would not lead to an abrupt change of visible data",
756 to_replace->node_name, target_bs->node_name);
757 }
758 bdrv_graph_rdunlock_main_loop();
759 bdrv_drained_end(target_bs);
760 if (local_err) {
761 error_report_err(local_err);
762 ret = -EPERM;
763 }
764 }
765 if (s->to_replace) {
766 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
767 error_free(s->replace_blocker);
768 bdrv_unref(s->to_replace);
769 }
770 if (replace_aio_context) {
771 aio_context_release(replace_aio_context);
772 }
773 g_free(s->replaces);
774 bdrv_unref(target_bs);
775
776 /*
777 * Remove the mirror filter driver from the graph. Before this, get rid of
778 * the blockers on the intermediate nodes so that the resulting state is
779 * valid.
780 */
781 block_job_remove_all_bdrv(bjob);
782 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
783
784 bs_opaque->job = NULL;
785
786 bdrv_drained_end(src);
787 bdrv_drained_end(mirror_top_bs);
788 s->in_drain = false;
789 bdrv_unref(mirror_top_bs);
790 bdrv_unref(src);
791
792 return ret;
793 }
794
795 static int mirror_prepare(Job *job)
796 {
797 return mirror_exit_common(job);
798 }
799
800 static void mirror_abort(Job *job)
801 {
802 int ret = mirror_exit_common(job);
803 assert(ret == 0);
804 }
805
806 static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
807 {
808 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
809
810 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
811 s->last_pause_ns = now;
812 job_sleep_ns(&s->common.job, 0);
813 } else {
814 job_pause_point(&s->common.job);
815 }
816 }
817
818 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
819 {
820 int64_t offset;
821 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
822 BlockDriverState *target_bs = blk_bs(s->target);
823 int ret;
824 int64_t count;
825
826 if (s->zero_target) {
827 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
828 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
829 return 0;
830 }
831
832 s->initial_zeroing_ongoing = true;
833 for (offset = 0; offset < s->bdev_length; ) {
834 int bytes = MIN(s->bdev_length - offset,
835 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
836
837 mirror_throttle(s);
838
839 if (job_is_cancelled(&s->common.job)) {
840 s->initial_zeroing_ongoing = false;
841 return 0;
842 }
843
844 if (s->in_flight >= MAX_IN_FLIGHT) {
845 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
846 s->in_flight);
847 mirror_wait_for_free_in_flight_slot(s);
848 continue;
849 }
850
851 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
852 offset += bytes;
853 }
854
855 mirror_wait_for_all_io(s);
856 s->initial_zeroing_ongoing = false;
857 }
858
859 /* First part, loop on the sectors and initialize the dirty bitmap. */
860 for (offset = 0; offset < s->bdev_length; ) {
861 /* Just to make sure we are not exceeding int limit. */
862 int bytes = MIN(s->bdev_length - offset,
863 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
864
865 mirror_throttle(s);
866
867 if (job_is_cancelled(&s->common.job)) {
868 return 0;
869 }
870
871 WITH_GRAPH_RDLOCK_GUARD() {
872 ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset,
873 bytes, &count);
874 }
875 if (ret < 0) {
876 return ret;
877 }
878
879 assert(count);
880 if (ret > 0) {
881 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
882 }
883 offset += count;
884 }
885 return 0;
886 }
887
888 /* Called when going out of the streaming phase to flush the bulk of the
889 * data to the medium, or just before completing.
890 */
891 static int coroutine_fn mirror_flush(MirrorBlockJob *s)
892 {
893 int ret = blk_co_flush(s->target);
894 if (ret < 0) {
895 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
896 s->ret = ret;
897 }
898 }
899 return ret;
900 }
901
902 static int coroutine_fn mirror_run(Job *job, Error **errp)
903 {
904 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
905 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
906 MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
907 BlockDriverState *target_bs = blk_bs(s->target);
908 bool need_drain = true;
909 BlockDeviceIoStatus iostatus;
910 int64_t length;
911 int64_t target_length;
912 BlockDriverInfo bdi;
913 char backing_filename[2]; /* we only need 2 characters because we are only
914 checking for a NULL string */
915 int ret = 0;
916
917 if (job_is_cancelled(&s->common.job)) {
918 goto immediate_exit;
919 }
920
921 bdrv_graph_co_rdlock();
922 s->bdev_length = bdrv_co_getlength(bs);
923 bdrv_graph_co_rdunlock();
924
925 if (s->bdev_length < 0) {
926 ret = s->bdev_length;
927 goto immediate_exit;
928 }
929
930 target_length = blk_co_getlength(s->target);
931 if (target_length < 0) {
932 ret = target_length;
933 goto immediate_exit;
934 }
935
936 /* Active commit must resize the base image if its size differs from the
937 * active layer. */
938 if (s->base == blk_bs(s->target)) {
939 if (s->bdev_length > target_length) {
940 ret = blk_co_truncate(s->target, s->bdev_length, false,
941 PREALLOC_MODE_OFF, 0, NULL);
942 if (ret < 0) {
943 goto immediate_exit;
944 }
945 }
946 } else if (s->bdev_length != target_length) {
947 error_setg(errp, "Source and target image have different sizes");
948 ret = -EINVAL;
949 goto immediate_exit;
950 }
951
952 if (s->bdev_length == 0) {
953 /* Transition to the READY state and wait for complete. */
954 job_transition_to_ready(&s->common.job);
955 s->actively_synced = true;
956 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
957 job_yield(&s->common.job);
958 }
959 goto immediate_exit;
960 }
961
962 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
963 s->in_flight_bitmap = bitmap_new(length);
964
965 /* If we have no backing file yet in the destination, we cannot let
966 * the destination do COW. Instead, we copy sectors around the
967 * dirty data if needed. We need a bitmap to do that.
968 */
969 bdrv_get_backing_filename(target_bs, backing_filename,
970 sizeof(backing_filename));
971 bdrv_graph_co_rdlock();
972 if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) {
973 s->target_cluster_size = bdi.cluster_size;
974 } else {
975 s->target_cluster_size = BDRV_SECTOR_SIZE;
976 }
977 bdrv_graph_co_rdunlock();
978 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
979 s->granularity < s->target_cluster_size) {
980 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
981 s->cow_bitmap = bitmap_new(length);
982 }
983 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
984
985 s->buf = qemu_try_blockalign(bs, s->buf_size);
986 if (s->buf == NULL) {
987 ret = -ENOMEM;
988 goto immediate_exit;
989 }
990
991 mirror_free_init(s);
992
993 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
994 if (!s->is_none_mode) {
995 ret = mirror_dirty_init(s);
996 if (ret < 0 || job_is_cancelled(&s->common.job)) {
997 goto immediate_exit;
998 }
999 }
1000
1001 /*
1002 * Only now the job is fully initialised and mirror_top_bs should start
1003 * accessing it.
1004 */
1005 mirror_top_opaque->job = s;
1006
1007 assert(!s->dbi);
1008 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
1009 for (;;) {
1010 int64_t cnt, delta;
1011 bool should_complete;
1012
1013 if (s->ret < 0) {
1014 ret = s->ret;
1015 goto immediate_exit;
1016 }
1017
1018 job_pause_point(&s->common.job);
1019
1020 if (job_is_cancelled(&s->common.job)) {
1021 ret = 0;
1022 goto immediate_exit;
1023 }
1024
1025 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1026 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1027 * the number of bytes currently being processed; together those are
1028 * the current remaining operation length */
1029 job_progress_set_remaining(&s->common.job,
1030 s->bytes_in_flight + cnt +
1031 s->active_write_bytes_in_flight);
1032
1033 /* Note that even when no rate limit is applied we need to yield
1034 * periodically with no pending I/O so that bdrv_drain_all() returns.
1035 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1036 * an error, or when the source is clean, whichever comes first. */
1037 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
1038 WITH_JOB_LOCK_GUARD() {
1039 iostatus = s->common.iostatus;
1040 }
1041 if (delta < BLOCK_JOB_SLICE_TIME &&
1042 iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1043 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
1044 (cnt == 0 && s->in_flight > 0)) {
1045 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
1046 mirror_wait_for_free_in_flight_slot(s);
1047 continue;
1048 } else if (cnt != 0) {
1049 mirror_iteration(s);
1050 }
1051 }
1052
1053 should_complete = false;
1054 if (s->in_flight == 0 && cnt == 0) {
1055 trace_mirror_before_flush(s);
1056 if (!job_is_ready(&s->common.job)) {
1057 if (mirror_flush(s) < 0) {
1058 /* Go check s->ret. */
1059 continue;
1060 }
1061 /* We're out of the streaming phase. From now on, if the job
1062 * is cancelled we will actually complete all pending I/O and
1063 * report completion. This way, block-job-cancel will leave
1064 * the target in a consistent state.
1065 */
1066 job_transition_to_ready(&s->common.job);
1067 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
1068 s->actively_synced = true;
1069 }
1070 }
1071
1072 should_complete = s->should_complete ||
1073 job_cancel_requested(&s->common.job);
1074 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1075 }
1076
1077 if (cnt == 0 && should_complete) {
1078 /* The dirty bitmap is not updated while operations are pending.
1079 * If we're about to exit, wait for pending operations before
1080 * calling bdrv_get_dirty_count(bs), or we may exit while the
1081 * source has dirty data to copy!
1082 *
1083 * Note that I/O can be submitted by the guest while
1084 * mirror_populate runs, so pause it now. Before deciding
1085 * whether to switch to target check one last time if I/O has
1086 * come in the meanwhile, and if not flush the data to disk.
1087 */
1088 trace_mirror_before_drain(s, cnt);
1089
1090 s->in_drain = true;
1091 bdrv_drained_begin(bs);
1092
1093 /* Must be zero because we are drained */
1094 assert(s->in_active_write_counter == 0);
1095
1096 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1097 if (cnt > 0 || mirror_flush(s) < 0) {
1098 bdrv_drained_end(bs);
1099 s->in_drain = false;
1100 continue;
1101 }
1102
1103 /* The two disks are in sync. Exit and report successful
1104 * completion.
1105 */
1106 assert(QLIST_EMPTY(&bs->tracked_requests));
1107 need_drain = false;
1108 break;
1109 }
1110
1111 if (job_is_ready(&s->common.job) && !should_complete) {
1112 if (s->in_flight == 0 && cnt == 0) {
1113 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1114 BLOCK_JOB_SLICE_TIME);
1115 job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME);
1116 }
1117 } else {
1118 block_job_ratelimit_sleep(&s->common);
1119 }
1120 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1121 }
1122
1123 immediate_exit:
1124 if (s->in_flight > 0) {
1125 /* We get here only if something went wrong. Either the job failed,
1126 * or it was cancelled prematurely so that we do not guarantee that
1127 * the target is a copy of the source.
1128 */
1129 assert(ret < 0 || job_is_cancelled(&s->common.job));
1130 assert(need_drain);
1131 mirror_wait_for_all_io(s);
1132 }
1133
1134 assert(s->in_flight == 0);
1135 qemu_vfree(s->buf);
1136 g_free(s->cow_bitmap);
1137 g_free(s->in_flight_bitmap);
1138 bdrv_dirty_iter_free(s->dbi);
1139
1140 if (need_drain) {
1141 s->in_drain = true;
1142 bdrv_drained_begin(bs);
1143 }
1144
1145 return ret;
1146 }
1147
1148 static void mirror_complete(Job *job, Error **errp)
1149 {
1150 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1151
1152 if (!job_is_ready(job)) {
1153 error_setg(errp, "The active block job '%s' cannot be completed",
1154 job->id);
1155 return;
1156 }
1157
1158 /* block all operations on to_replace bs */
1159 if (s->replaces) {
1160 AioContext *replace_aio_context;
1161
1162 s->to_replace = bdrv_find_node(s->replaces);
1163 if (!s->to_replace) {
1164 error_setg(errp, "Node name '%s' not found", s->replaces);
1165 return;
1166 }
1167
1168 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1169 aio_context_acquire(replace_aio_context);
1170
1171 /* TODO Translate this into child freeze system. */
1172 error_setg(&s->replace_blocker,
1173 "block device is in use by block-job-complete");
1174 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1175 bdrv_ref(s->to_replace);
1176
1177 aio_context_release(replace_aio_context);
1178 }
1179
1180 s->should_complete = true;
1181
1182 /* If the job is paused, it will be re-entered when it is resumed */
1183 WITH_JOB_LOCK_GUARD() {
1184 if (!job->paused) {
1185 job_enter_cond_locked(job, NULL);
1186 }
1187 }
1188 }
1189
1190 static void coroutine_fn mirror_pause(Job *job)
1191 {
1192 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1193
1194 mirror_wait_for_all_io(s);
1195 }
1196
1197 static bool mirror_drained_poll(BlockJob *job)
1198 {
1199 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1200
1201 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1202 * issue more requests. We make an exception if we've reached this point
1203 * from one of our own drain sections, to avoid a deadlock waiting for
1204 * ourselves.
1205 */
1206 WITH_JOB_LOCK_GUARD() {
1207 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
1208 && !s->in_drain) {
1209 return true;
1210 }
1211 }
1212
1213 return !!s->in_flight;
1214 }
1215
1216 static bool mirror_cancel(Job *job, bool force)
1217 {
1218 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1219 BlockDriverState *target = blk_bs(s->target);
1220
1221 /*
1222 * Before the job is READY, we treat any cancellation like a
1223 * force-cancellation.
1224 */
1225 force = force || !job_is_ready(job);
1226
1227 if (force) {
1228 bdrv_cancel_in_flight(target);
1229 }
1230 return force;
1231 }
1232
1233 static bool commit_active_cancel(Job *job, bool force)
1234 {
1235 /* Same as above in mirror_cancel() */
1236 return force || !job_is_ready(job);
1237 }
1238
1239 static const BlockJobDriver mirror_job_driver = {
1240 .job_driver = {
1241 .instance_size = sizeof(MirrorBlockJob),
1242 .job_type = JOB_TYPE_MIRROR,
1243 .free = block_job_free,
1244 .user_resume = block_job_user_resume,
1245 .run = mirror_run,
1246 .prepare = mirror_prepare,
1247 .abort = mirror_abort,
1248 .pause = mirror_pause,
1249 .complete = mirror_complete,
1250 .cancel = mirror_cancel,
1251 },
1252 .drained_poll = mirror_drained_poll,
1253 };
1254
1255 static const BlockJobDriver commit_active_job_driver = {
1256 .job_driver = {
1257 .instance_size = sizeof(MirrorBlockJob),
1258 .job_type = JOB_TYPE_COMMIT,
1259 .free = block_job_free,
1260 .user_resume = block_job_user_resume,
1261 .run = mirror_run,
1262 .prepare = mirror_prepare,
1263 .abort = mirror_abort,
1264 .pause = mirror_pause,
1265 .complete = mirror_complete,
1266 .cancel = commit_active_cancel,
1267 },
1268 .drained_poll = mirror_drained_poll,
1269 };
1270
1271 static void coroutine_fn
1272 do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1273 uint64_t offset, uint64_t bytes,
1274 QEMUIOVector *qiov, int flags)
1275 {
1276 int ret;
1277 size_t qiov_offset = 0;
1278 int64_t bitmap_offset, bitmap_end;
1279
1280 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1281 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1282 {
1283 /*
1284 * Dirty unaligned padding: ignore it.
1285 *
1286 * Reasoning:
1287 * 1. If we copy it, we can't reset corresponding bit in
1288 * dirty_bitmap as there may be some "dirty" bytes still not
1289 * copied.
1290 * 2. It's already dirty, so skipping it we don't diverge mirror
1291 * progress.
1292 *
1293 * Note, that because of this, guest write may have no contribution
1294 * into mirror converge, but that's not bad, as we have background
1295 * process of mirroring. If under some bad circumstances (high guest
1296 * IO load) background process starve, we will not converge anyway,
1297 * even if each write will contribute, as guest is not guaranteed to
1298 * rewrite the whole disk.
1299 */
1300 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1301 if (bytes <= qiov_offset) {
1302 /* nothing to do after shrink */
1303 return;
1304 }
1305 offset += qiov_offset;
1306 bytes -= qiov_offset;
1307 }
1308
1309 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1310 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1311 {
1312 uint64_t tail = (offset + bytes) % job->granularity;
1313
1314 if (bytes <= tail) {
1315 /* nothing to do after shrink */
1316 return;
1317 }
1318 bytes -= tail;
1319 }
1320
1321 /*
1322 * Tails are either clean or shrunk, so for bitmap resetting
1323 * we safely align the range down.
1324 */
1325 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1326 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1327 if (bitmap_offset < bitmap_end) {
1328 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1329 bitmap_end - bitmap_offset);
1330 }
1331
1332 job_progress_increase_remaining(&job->common.job, bytes);
1333 job->active_write_bytes_in_flight += bytes;
1334
1335 switch (method) {
1336 case MIRROR_METHOD_COPY:
1337 ret = blk_co_pwritev_part(job->target, offset, bytes,
1338 qiov, qiov_offset, flags);
1339 break;
1340
1341 case MIRROR_METHOD_ZERO:
1342 assert(!qiov);
1343 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1344 break;
1345
1346 case MIRROR_METHOD_DISCARD:
1347 assert(!qiov);
1348 ret = blk_co_pdiscard(job->target, offset, bytes);
1349 break;
1350
1351 default:
1352 abort();
1353 }
1354
1355 job->active_write_bytes_in_flight -= bytes;
1356 if (ret >= 0) {
1357 job_progress_update(&job->common.job, bytes);
1358 } else {
1359 BlockErrorAction action;
1360
1361 /*
1362 * We failed, so we should mark dirty the whole area, aligned up.
1363 * Note that we don't care about shrunk tails if any: they were dirty
1364 * at function start, and they must be still dirty, as we've locked
1365 * the region for in-flight op.
1366 */
1367 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1368 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1369 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1370 bitmap_end - bitmap_offset);
1371 job->actively_synced = false;
1372
1373 action = mirror_error_action(job, false, -ret);
1374 if (action == BLOCK_ERROR_ACTION_REPORT) {
1375 if (!job->ret) {
1376 job->ret = ret;
1377 }
1378 }
1379 }
1380 }
1381
1382 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1383 uint64_t offset,
1384 uint64_t bytes)
1385 {
1386 MirrorOp *op;
1387 uint64_t start_chunk = offset / s->granularity;
1388 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1389
1390 op = g_new(MirrorOp, 1);
1391 *op = (MirrorOp){
1392 .s = s,
1393 .offset = offset,
1394 .bytes = bytes,
1395 .is_active_write = true,
1396 .is_in_flight = true,
1397 .co = qemu_coroutine_self(),
1398 };
1399 qemu_co_queue_init(&op->waiting_requests);
1400 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1401
1402 s->in_active_write_counter++;
1403
1404 /*
1405 * Wait for concurrent requests affecting the area. If there are already
1406 * running requests that are copying off now-to-be stale data in the area,
1407 * we must wait for them to finish before we begin writing fresh data to the
1408 * target so that the write operations appear in the correct order.
1409 * Note that background requests (see mirror_iteration()) in contrast only
1410 * wait for conflicting requests at the start of the dirty area, and then
1411 * (based on the in_flight_bitmap) truncate the area to copy so it will not
1412 * conflict with any requests beyond that. For active writes, however, we
1413 * cannot truncate that area. The request from our parent must be blocked
1414 * until the area is copied in full. Therefore, we must wait for the whole
1415 * area to become free of concurrent requests.
1416 */
1417 mirror_wait_on_conflicts(op, s, offset, bytes);
1418
1419 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1420
1421 return op;
1422 }
1423
1424 static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op)
1425 {
1426 uint64_t start_chunk = op->offset / op->s->granularity;
1427 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1428 op->s->granularity);
1429
1430 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1431 BdrvChild *source = op->s->mirror_top_bs->backing;
1432
1433 if (QLIST_FIRST(&source->bs->parents) == source &&
1434 QLIST_NEXT(source, next_parent) == NULL)
1435 {
1436 /* Assert that we are back in sync once all active write
1437 * operations are settled.
1438 * Note that we can only assert this if the mirror node
1439 * is the source node's only parent. */
1440 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1441 }
1442 }
1443 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1444 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1445 qemu_co_queue_restart_all(&op->waiting_requests);
1446 g_free(op);
1447 }
1448
1449 static int coroutine_fn GRAPH_RDLOCK
1450 bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1451 QEMUIOVector *qiov, BdrvRequestFlags flags)
1452 {
1453 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1454 }
1455
1456 static int coroutine_fn GRAPH_RDLOCK
1457 bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
1458 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1459 int flags)
1460 {
1461 MirrorOp *op = NULL;
1462 MirrorBDSOpaque *s = bs->opaque;
1463 int ret = 0;
1464 bool copy_to_target = false;
1465
1466 if (s->job) {
1467 copy_to_target = s->job->ret >= 0 &&
1468 !job_is_cancelled(&s->job->common.job) &&
1469 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1470 }
1471
1472 if (copy_to_target) {
1473 op = active_write_prepare(s->job, offset, bytes);
1474 }
1475
1476 switch (method) {
1477 case MIRROR_METHOD_COPY:
1478 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1479 break;
1480
1481 case MIRROR_METHOD_ZERO:
1482 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1483 break;
1484
1485 case MIRROR_METHOD_DISCARD:
1486 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1487 break;
1488
1489 default:
1490 abort();
1491 }
1492
1493 if (ret < 0) {
1494 goto out;
1495 }
1496
1497 if (copy_to_target) {
1498 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1499 }
1500
1501 out:
1502 if (copy_to_target) {
1503 active_write_settle(op);
1504 }
1505 return ret;
1506 }
1507
1508 static int coroutine_fn GRAPH_RDLOCK
1509 bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1510 QEMUIOVector *qiov, BdrvRequestFlags flags)
1511 {
1512 MirrorBDSOpaque *s = bs->opaque;
1513 QEMUIOVector bounce_qiov;
1514 void *bounce_buf;
1515 int ret = 0;
1516 bool copy_to_target = false;
1517
1518 if (s->job) {
1519 copy_to_target = s->job->ret >= 0 &&
1520 !job_is_cancelled(&s->job->common.job) &&
1521 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1522 }
1523
1524 if (copy_to_target) {
1525 /* The guest might concurrently modify the data to write; but
1526 * the data on source and destination must match, so we have
1527 * to use a bounce buffer if we are going to write to the
1528 * target now. */
1529 bounce_buf = qemu_blockalign(bs, bytes);
1530 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1531
1532 qemu_iovec_init(&bounce_qiov, 1);
1533 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1534 qiov = &bounce_qiov;
1535
1536 flags &= ~BDRV_REQ_REGISTERED_BUF;
1537 }
1538
1539 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1540 flags);
1541
1542 if (copy_to_target) {
1543 qemu_iovec_destroy(&bounce_qiov);
1544 qemu_vfree(bounce_buf);
1545 }
1546
1547 return ret;
1548 }
1549
1550 static int coroutine_fn GRAPH_RDLOCK bdrv_mirror_top_flush(BlockDriverState *bs)
1551 {
1552 if (bs->backing == NULL) {
1553 /* we can be here after failed bdrv_append in mirror_start_job */
1554 return 0;
1555 }
1556 return bdrv_co_flush(bs->backing->bs);
1557 }
1558
1559 static int coroutine_fn GRAPH_RDLOCK
1560 bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1561 int64_t bytes, BdrvRequestFlags flags)
1562 {
1563 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1564 flags);
1565 }
1566
1567 static int coroutine_fn GRAPH_RDLOCK
1568 bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
1569 {
1570 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1571 NULL, 0);
1572 }
1573
1574 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1575 {
1576 if (bs->backing == NULL) {
1577 /* we can be here after failed bdrv_attach_child in
1578 * bdrv_set_backing_hd */
1579 return;
1580 }
1581 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1582 bs->backing->bs->filename);
1583 }
1584
1585 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1586 BdrvChildRole role,
1587 BlockReopenQueue *reopen_queue,
1588 uint64_t perm, uint64_t shared,
1589 uint64_t *nperm, uint64_t *nshared)
1590 {
1591 MirrorBDSOpaque *s = bs->opaque;
1592
1593 if (s->stop) {
1594 /*
1595 * If the job is to be stopped, we do not need to forward
1596 * anything to the real image.
1597 */
1598 *nperm = 0;
1599 *nshared = BLK_PERM_ALL;
1600 return;
1601 }
1602
1603 bdrv_default_perms(bs, c, role, reopen_queue,
1604 perm, shared, nperm, nshared);
1605
1606 if (s->is_commit) {
1607 /*
1608 * For commit jobs, we cannot take CONSISTENT_READ, because
1609 * that permission is unshared for everything above the base
1610 * node (except for filters on the base node).
1611 * We also have to force-share the WRITE permission, or
1612 * otherwise we would block ourselves at the base node (if
1613 * writes are blocked for a node, they are also blocked for
1614 * its backing file).
1615 * (We could also share RESIZE, because it may be needed for
1616 * the target if its size is less than the top node's; but
1617 * bdrv_default_perms_for_cow() automatically shares RESIZE
1618 * for backing nodes if WRITE is shared, so there is no need
1619 * to do it here.)
1620 */
1621 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1622 *nshared |= BLK_PERM_WRITE;
1623 }
1624 }
1625
1626 /* Dummy node that provides consistent read to its users without requiring it
1627 * from its backing file and that allows writes on the backing file chain. */
1628 static BlockDriver bdrv_mirror_top = {
1629 .format_name = "mirror_top",
1630 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1631 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1632 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1633 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1634 .bdrv_co_flush = bdrv_mirror_top_flush,
1635 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1636 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1637
1638 .is_filter = true,
1639 .filtered_child_is_backing = true,
1640 };
1641
1642 static BlockJob *mirror_start_job(
1643 const char *job_id, BlockDriverState *bs,
1644 int creation_flags, BlockDriverState *target,
1645 const char *replaces, int64_t speed,
1646 uint32_t granularity, int64_t buf_size,
1647 BlockMirrorBackingMode backing_mode,
1648 bool zero_target,
1649 BlockdevOnError on_source_error,
1650 BlockdevOnError on_target_error,
1651 bool unmap,
1652 BlockCompletionFunc *cb,
1653 void *opaque,
1654 const BlockJobDriver *driver,
1655 bool is_none_mode, BlockDriverState *base,
1656 bool auto_complete, const char *filter_node_name,
1657 bool is_mirror, MirrorCopyMode copy_mode,
1658 Error **errp)
1659 {
1660 MirrorBlockJob *s;
1661 MirrorBDSOpaque *bs_opaque;
1662 BlockDriverState *mirror_top_bs;
1663 bool target_is_backing;
1664 uint64_t target_perms, target_shared_perms;
1665 int ret;
1666
1667 if (granularity == 0) {
1668 granularity = bdrv_get_default_bitmap_granularity(target);
1669 }
1670
1671 assert(is_power_of_2(granularity));
1672
1673 if (buf_size < 0) {
1674 error_setg(errp, "Invalid parameter 'buf-size'");
1675 return NULL;
1676 }
1677
1678 if (buf_size == 0) {
1679 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1680 }
1681
1682 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
1683 error_setg(errp, "Can't mirror node into itself");
1684 return NULL;
1685 }
1686
1687 target_is_backing = bdrv_chain_contains(bs, target);
1688
1689 /* In the case of active commit, add dummy driver to provide consistent
1690 * reads on the top, while disabling it in the intermediate nodes, and make
1691 * the backing chain writable. */
1692 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1693 BDRV_O_RDWR, errp);
1694 if (mirror_top_bs == NULL) {
1695 return NULL;
1696 }
1697 if (!filter_node_name) {
1698 mirror_top_bs->implicit = true;
1699 }
1700
1701 /* So that we can always drop this node */
1702 mirror_top_bs->never_freeze = true;
1703
1704 mirror_top_bs->total_sectors = bs->total_sectors;
1705 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1706 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1707 BDRV_REQ_NO_FALLBACK;
1708 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1709 mirror_top_bs->opaque = bs_opaque;
1710
1711 bs_opaque->is_commit = target_is_backing;
1712
1713 bdrv_drained_begin(bs);
1714 ret = bdrv_append(mirror_top_bs, bs, errp);
1715 bdrv_drained_end(bs);
1716
1717 if (ret < 0) {
1718 bdrv_unref(mirror_top_bs);
1719 return NULL;
1720 }
1721
1722 /* Make sure that the source is not resized while the job is running */
1723 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1724 BLK_PERM_CONSISTENT_READ,
1725 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1726 BLK_PERM_WRITE, speed,
1727 creation_flags, cb, opaque, errp);
1728 if (!s) {
1729 goto fail;
1730 }
1731
1732 /* The block job now has a reference to this node */
1733 bdrv_unref(mirror_top_bs);
1734
1735 s->mirror_top_bs = mirror_top_bs;
1736
1737 /* No resize for the target either; while the mirror is still running, a
1738 * consistent read isn't necessarily possible. We could possibly allow
1739 * writes and graph modifications, though it would likely defeat the
1740 * purpose of a mirror, so leave them blocked for now.
1741 *
1742 * In the case of active commit, things look a bit different, though,
1743 * because the target is an already populated backing file in active use.
1744 * We can allow anything except resize there.*/
1745
1746 target_perms = BLK_PERM_WRITE;
1747 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1748
1749 if (target_is_backing) {
1750 int64_t bs_size, target_size;
1751 bs_size = bdrv_getlength(bs);
1752 if (bs_size < 0) {
1753 error_setg_errno(errp, -bs_size,
1754 "Could not inquire top image size");
1755 goto fail;
1756 }
1757
1758 target_size = bdrv_getlength(target);
1759 if (target_size < 0) {
1760 error_setg_errno(errp, -target_size,
1761 "Could not inquire base image size");
1762 goto fail;
1763 }
1764
1765 if (target_size < bs_size) {
1766 target_perms |= BLK_PERM_RESIZE;
1767 }
1768
1769 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
1770 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1771 /*
1772 * We may want to allow this in the future, but it would
1773 * require taking some extra care.
1774 */
1775 error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1776 "source's backing chain");
1777 goto fail;
1778 }
1779
1780 s->target = blk_new(s->common.job.aio_context,
1781 target_perms, target_shared_perms);
1782 ret = blk_insert_bs(s->target, target, errp);
1783 if (ret < 0) {
1784 goto fail;
1785 }
1786 if (is_mirror) {
1787 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1788 * of non-shared block migration. To allow migration completion, we
1789 * have to allow "inactivate" of the target BB. When that happens, we
1790 * know the job is drained, and the vcpus are stopped, so no write
1791 * operation will be performed. Block layer already has assertions to
1792 * ensure that. */
1793 blk_set_force_allow_inactivate(s->target);
1794 }
1795 blk_set_allow_aio_context_change(s->target, true);
1796 blk_set_disable_request_queuing(s->target, true);
1797
1798 s->replaces = g_strdup(replaces);
1799 s->on_source_error = on_source_error;
1800 s->on_target_error = on_target_error;
1801 s->is_none_mode = is_none_mode;
1802 s->backing_mode = backing_mode;
1803 s->zero_target = zero_target;
1804 s->copy_mode = copy_mode;
1805 s->base = base;
1806 s->base_overlay = bdrv_find_overlay(bs, base);
1807 s->granularity = granularity;
1808 s->buf_size = ROUND_UP(buf_size, granularity);
1809 s->unmap = unmap;
1810 if (auto_complete) {
1811 s->should_complete = true;
1812 }
1813
1814 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1815 if (!s->dirty_bitmap) {
1816 goto fail;
1817 }
1818 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1819 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1820 }
1821
1822 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1823 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1824 BLK_PERM_CONSISTENT_READ,
1825 errp);
1826 if (ret < 0) {
1827 goto fail;
1828 }
1829
1830 /* Required permissions are already taken with blk_new() */
1831 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1832 &error_abort);
1833
1834 /* In commit_active_start() all intermediate nodes disappear, so
1835 * any jobs in them must be blocked */
1836 if (target_is_backing) {
1837 BlockDriverState *iter, *filtered_target;
1838 uint64_t iter_shared_perms;
1839
1840 /*
1841 * The topmost node with
1842 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1843 */
1844 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1845
1846 assert(bdrv_skip_filters(filtered_target) ==
1847 bdrv_skip_filters(target));
1848
1849 /*
1850 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1851 * ourselves at s->base (if writes are blocked for a node, they are
1852 * also blocked for its backing file). The other options would be a
1853 * second filter driver above s->base (== target).
1854 */
1855 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1856
1857 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1858 iter = bdrv_filter_or_cow_bs(iter))
1859 {
1860 if (iter == filtered_target) {
1861 /*
1862 * From here on, all nodes are filters on the base.
1863 * This allows us to share BLK_PERM_CONSISTENT_READ.
1864 */
1865 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1866 }
1867
1868 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1869 iter_shared_perms, errp);
1870 if (ret < 0) {
1871 goto fail;
1872 }
1873 }
1874
1875 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1876 goto fail;
1877 }
1878 }
1879
1880 QTAILQ_INIT(&s->ops_in_flight);
1881
1882 trace_mirror_start(bs, s, opaque);
1883 job_start(&s->common.job);
1884
1885 return &s->common;
1886
1887 fail:
1888 if (s) {
1889 /* Make sure this BDS does not go away until we have completed the graph
1890 * changes below */
1891 bdrv_ref(mirror_top_bs);
1892
1893 g_free(s->replaces);
1894 blk_unref(s->target);
1895 bs_opaque->job = NULL;
1896 if (s->dirty_bitmap) {
1897 bdrv_release_dirty_bitmap(s->dirty_bitmap);
1898 }
1899 job_early_fail(&s->common.job);
1900 }
1901
1902 bs_opaque->stop = true;
1903 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1904 &error_abort);
1905 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
1906
1907 bdrv_unref(mirror_top_bs);
1908
1909 return NULL;
1910 }
1911
1912 void mirror_start(const char *job_id, BlockDriverState *bs,
1913 BlockDriverState *target, const char *replaces,
1914 int creation_flags, int64_t speed,
1915 uint32_t granularity, int64_t buf_size,
1916 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1917 bool zero_target,
1918 BlockdevOnError on_source_error,
1919 BlockdevOnError on_target_error,
1920 bool unmap, const char *filter_node_name,
1921 MirrorCopyMode copy_mode, Error **errp)
1922 {
1923 bool is_none_mode;
1924 BlockDriverState *base;
1925
1926 GLOBAL_STATE_CODE();
1927
1928 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1929 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1930 error_setg(errp, "Sync mode '%s' not supported",
1931 MirrorSyncMode_str(mode));
1932 return;
1933 }
1934 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1935 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
1936 mirror_start_job(job_id, bs, creation_flags, target, replaces,
1937 speed, granularity, buf_size, backing_mode, zero_target,
1938 on_source_error, on_target_error, unmap, NULL, NULL,
1939 &mirror_job_driver, is_none_mode, base, false,
1940 filter_node_name, true, copy_mode, errp);
1941 }
1942
1943 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1944 BlockDriverState *base, int creation_flags,
1945 int64_t speed, BlockdevOnError on_error,
1946 const char *filter_node_name,
1947 BlockCompletionFunc *cb, void *opaque,
1948 bool auto_complete, Error **errp)
1949 {
1950 bool base_read_only;
1951 BlockJob *job;
1952
1953 GLOBAL_STATE_CODE();
1954
1955 base_read_only = bdrv_is_read_only(base);
1956
1957 if (base_read_only) {
1958 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1959 return NULL;
1960 }
1961 }
1962
1963 job = mirror_start_job(
1964 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1965 MIRROR_LEAVE_BACKING_CHAIN, false,
1966 on_error, on_error, true, cb, opaque,
1967 &commit_active_job_driver, false, base, auto_complete,
1968 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1969 errp);
1970 if (!job) {
1971 goto error_restore_flags;
1972 }
1973
1974 return job;
1975
1976 error_restore_flags:
1977 /* ignore error and errp for bdrv_reopen, because we want to propagate
1978 * the original error */
1979 if (base_read_only) {
1980 bdrv_reopen_set_read_only(base, true, NULL);
1981 }
1982 return NULL;
1983 }