]> git.proxmox.com Git - mirror_qemu.git/blame - block/mirror.c
block: Mark bdrv_skip_implicit_filters() and callers GRAPH_RDLOCK
[mirror_qemu.git] / block / mirror.c
CommitLineData
893f7eba
PB
1/*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
80c71a24 14#include "qemu/osdep.h"
fd4a6493 15#include "qemu/cutils.h"
12aa4082 16#include "qemu/coroutine.h"
1181e19a 17#include "qemu/range.h"
893f7eba 18#include "trace.h"
c87621ea 19#include "block/blockjob_int.h"
737e150e 20#include "block/block_int.h"
e2c1c34f 21#include "block/dirty-bitmap.h"
373340b2 22#include "sysemu/block-backend.h"
da34e65c 23#include "qapi/error.h"
893f7eba 24#include "qemu/ratelimit.h"
b812f671 25#include "qemu/bitmap.h"
5df022cf 26#include "qemu/memalign.h"
893f7eba 27
402a4741 28#define MAX_IN_FLIGHT 16
b436982f
EB
29#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
402a4741
PB
31
32/* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
34 */
35typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37} MirrorBuffer;
893f7eba 38
12aa4082
HR
39typedef struct MirrorOp MirrorOp;
40
893f7eba
PB
41typedef struct MirrorBlockJob {
42 BlockJob common;
e253f4b8 43 BlockBackend *target;
4ef85a9c 44 BlockDriverState *mirror_top_bs;
5bc361b8 45 BlockDriverState *base;
3f072a7f 46 BlockDriverState *base_overlay;
4ef85a9c 47
09158f00
BC
48 /* The name of the graph node to replace */
49 char *replaces;
50 /* The BDS to replace */
51 BlockDriverState *to_replace;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error *replace_blocker;
03544a6e 54 bool is_none_mode;
274fccee 55 BlockMirrorBackingMode backing_mode;
cdf3bc93
HR
56 /* Whether the target image requires explicit zero-initialization */
57 bool zero_target;
2d400d15
FE
58 /*
59 * To be accesssed with atomics. Written only under the BQL (required by the
60 * current implementation of mirror_change()).
61 */
d06107ad 62 MirrorCopyMode copy_mode;
b952b558 63 BlockdevOnError on_source_error, on_target_error;
76cb2f24
FE
64 /*
65 * To be accessed with atomics.
66 *
67 * Set when the target is synced (dirty bitmap is clean, nothing in flight)
68 * and the job is running in active mode.
69 */
d06107ad 70 bool actively_synced;
d63ffd87 71 bool should_complete;
eee13dfe 72 int64_t granularity;
b812f671 73 size_t buf_size;
b21c7652 74 int64_t bdev_length;
b812f671 75 unsigned long *cow_bitmap;
e4654d2d 76 BdrvDirtyBitmap *dirty_bitmap;
dc162c8e 77 BdrvDirtyBitmapIter *dbi;
893f7eba 78 uint8_t *buf;
402a4741
PB
79 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
80 int buf_free_count;
bd48bde8 81
49efb1f5 82 uint64_t last_pause_ns;
402a4741 83 unsigned long *in_flight_bitmap;
1b8f7776 84 unsigned in_flight;
b436982f 85 int64_t bytes_in_flight;
b58deb34 86 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
bd48bde8 87 int ret;
0fc9f8ea 88 bool unmap;
b436982f 89 int target_cluster_size;
e5b43573 90 int max_iov;
90ab48eb 91 bool initial_zeroing_ongoing;
d06107ad 92 int in_active_write_counter;
d69a879b 93 int64_t active_write_bytes_in_flight;
737efc1e 94 bool prepared;
5e771752 95 bool in_drain;
893f7eba
PB
96} MirrorBlockJob;
97
429076e8
HR
98typedef struct MirrorBDSOpaque {
99 MirrorBlockJob *job;
f94dc3b4 100 bool stop;
53431b90 101 bool is_commit;
429076e8
HR
102} MirrorBDSOpaque;
103
12aa4082 104struct MirrorOp {
bd48bde8
PB
105 MirrorBlockJob *s;
106 QEMUIOVector qiov;
b436982f
EB
107 int64_t offset;
108 uint64_t bytes;
2e1990b2
HR
109
110 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
111 * mirror_co_discard() before yielding for the first time */
112 int64_t *bytes_handled;
12aa4082 113
1181e19a 114 bool is_pseudo_op;
d06107ad 115 bool is_active_write;
ce8cabbd 116 bool is_in_flight;
12aa4082 117 CoQueue waiting_requests;
eed325b9 118 Coroutine *co;
d44dae1a 119 MirrorOp *waiting_for_op;
12aa4082
HR
120
121 QTAILQ_ENTRY(MirrorOp) next;
122};
bd48bde8 123
4295c5fc
HR
124typedef enum MirrorMethod {
125 MIRROR_METHOD_COPY,
126 MIRROR_METHOD_ZERO,
127 MIRROR_METHOD_DISCARD,
128} MirrorMethod;
129
b952b558
PB
130static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
131 int error)
132{
76cb2f24 133 qatomic_set(&s->actively_synced, false);
b952b558 134 if (read) {
81e254dc
KW
135 return block_job_error_action(&s->common, s->on_source_error,
136 true, error);
b952b558 137 } else {
81e254dc
KW
138 return block_job_error_action(&s->common, s->on_target_error,
139 false, error);
b952b558
PB
140 }
141}
142
1181e19a
HR
143static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
144 MirrorBlockJob *s,
145 uint64_t offset,
146 uint64_t bytes)
147{
148 uint64_t self_start_chunk = offset / s->granularity;
149 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
150 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
151
152 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
153 self_start_chunk) < self_end_chunk &&
154 s->ret >= 0)
155 {
156 MirrorOp *op;
157
158 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
159 uint64_t op_start_chunk = op->offset / s->granularity;
160 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
161 s->granularity) -
162 op_start_chunk;
163
164 if (op == self) {
165 continue;
166 }
167
168 if (ranges_overlap(self_start_chunk, self_nb_chunks,
169 op_start_chunk, op_nb_chunks))
170 {
66fed30c
SG
171 if (self) {
172 /*
173 * If the operation is already (indirectly) waiting for us,
174 * or will wait for us as soon as it wakes up, then just go
175 * on (instead of producing a deadlock in the former case).
176 */
177 if (op->waiting_for_op) {
178 continue;
179 }
180
181 self->waiting_for_op = op;
d44dae1a
VSO
182 }
183
1181e19a 184 qemu_co_queue_wait(&op->waiting_requests, NULL);
66fed30c
SG
185
186 if (self) {
187 self->waiting_for_op = NULL;
188 }
189
1181e19a
HR
190 break;
191 }
192 }
193 }
194}
195
2e1990b2 196static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
bd48bde8
PB
197{
198 MirrorBlockJob *s = op->s;
402a4741 199 struct iovec *iov;
bd48bde8 200 int64_t chunk_num;
b436982f 201 int i, nb_chunks;
bd48bde8 202
b436982f 203 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
bd48bde8
PB
204
205 s->in_flight--;
b436982f 206 s->bytes_in_flight -= op->bytes;
402a4741
PB
207 iov = op->qiov.iov;
208 for (i = 0; i < op->qiov.niov; i++) {
209 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
210 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
211 s->buf_free_count++;
212 }
213
b436982f
EB
214 chunk_num = op->offset / s->granularity;
215 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
12aa4082 216
402a4741 217 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
12aa4082 218 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
b21c7652
HR
219 if (ret >= 0) {
220 if (s->cow_bitmap) {
221 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
222 }
90ab48eb 223 if (!s->initial_zeroing_ongoing) {
30a5c887 224 job_progress_update(&s->common.job, op->bytes);
90ab48eb 225 }
bd48bde8 226 }
6df3bf8e 227 qemu_iovec_destroy(&op->qiov);
7b770c72 228
12aa4082
HR
229 qemu_co_queue_restart_all(&op->waiting_requests);
230 g_free(op);
bd48bde8
PB
231}
232
2e1990b2 233static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
bd48bde8 234{
bd48bde8 235 MirrorBlockJob *s = op->s;
b9e413dd 236
bd48bde8 237 if (ret < 0) {
bd48bde8
PB
238 BlockErrorAction action;
239
e0d7f73e 240 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
bd48bde8 241 action = mirror_error_action(s, false, -ret);
a589569f 242 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
243 s->ret = ret;
244 }
245 }
d12ade57 246
bd48bde8
PB
247 mirror_iteration_done(op, ret);
248}
249
2e1990b2 250static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
bd48bde8 251{
bd48bde8 252 MirrorBlockJob *s = op->s;
b9e413dd 253
bd48bde8 254 if (ret < 0) {
bd48bde8
PB
255 BlockErrorAction action;
256
e0d7f73e 257 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
bd48bde8 258 action = mirror_error_action(s, true, -ret);
a589569f 259 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
260 s->ret = ret;
261 }
262
263 mirror_iteration_done(op, ret);
d12ade57 264 return;
bd48bde8 265 }
d12ade57
VSO
266
267 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
268 mirror_write_complete(op, ret);
bd48bde8
PB
269}
270
782d97ef
EB
271/* Clip bytes relative to offset to not exceed end-of-file */
272static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
273 int64_t offset,
274 int64_t bytes)
275{
276 return MIN(bytes, s->bdev_length - offset);
277}
278
782d97ef
EB
279/* Round offset and/or bytes to target cluster if COW is needed, and
280 * return the offset of the adjusted tail against original. */
17ac39c3
PB
281static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
282 uint64_t *bytes)
893f7eba 283{
e5b43573
FZ
284 bool need_cow;
285 int ret = 0;
782d97ef 286 int64_t align_offset = *offset;
7cfd5275 287 int64_t align_bytes = *bytes;
782d97ef 288 int max_bytes = s->granularity * s->max_iov;
e5b43573 289
782d97ef
EB
290 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
291 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
e5b43573
FZ
292 s->cow_bitmap);
293 if (need_cow) {
fc6b211f
AD
294 bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes,
295 &align_offset, &align_bytes);
e5b43573 296 }
3515727f 297
782d97ef
EB
298 if (align_bytes > max_bytes) {
299 align_bytes = max_bytes;
e5b43573 300 if (need_cow) {
782d97ef 301 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
e5b43573 302 }
8f0720ec 303 }
782d97ef 304 /* Clipping may result in align_bytes unaligned to chunk boundary, but
4150ae60 305 * that doesn't matter because it's already the end of source image. */
782d97ef 306 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
8f0720ec 307
782d97ef
EB
308 ret = align_offset + align_bytes - (*offset + *bytes);
309 *offset = align_offset;
310 *bytes = align_bytes;
e5b43573
FZ
311 assert(ret >= 0);
312 return ret;
313}
314
537c3d4f 315static inline void coroutine_fn
eb994912 316mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
21cd917f 317{
12aa4082
HR
318 MirrorOp *op;
319
1181e19a 320 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
eb994912
HR
321 /*
322 * Do not wait on pseudo ops, because it may in turn wait on
1181e19a
HR
323 * some other operation to start, which may in fact be the
324 * caller of this function. Since there is only one pseudo op
325 * at any given time, we will always find some real operation
eb994912
HR
326 * to wait on.
327 * Also, do not wait on active operations, because they do not
328 * use up in-flight slots.
329 */
330 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) {
1181e19a
HR
331 qemu_co_queue_wait(&op->waiting_requests, NULL);
332 return;
333 }
334 }
335 abort();
21cd917f
FZ
336}
337
2e1990b2
HR
338/* Perform a mirror copy operation.
339 *
340 * *op->bytes_handled is set to the number of bytes copied after and
341 * including offset, excluding any bytes copied prior to offset due
342 * to alignment. This will be op->bytes if no alignment is necessary,
343 * or (new_end - op->offset) if the tail is rounded up or down due to
344 * alignment or buffer limit.
e5b43573 345 */
2e1990b2 346static void coroutine_fn mirror_co_read(void *opaque)
e5b43573 347{
2e1990b2
HR
348 MirrorOp *op = opaque;
349 MirrorBlockJob *s = op->s;
ae4cc877
EB
350 int nb_chunks;
351 uint64_t ret;
ae4cc877 352 uint64_t max_bytes;
e5b43573 353
ae4cc877 354 max_bytes = s->granularity * s->max_iov;
402a4741 355
e5b43573 356 /* We can only handle as much as buf_size at a time. */
2e1990b2
HR
357 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
358 assert(op->bytes);
359 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
360 *op->bytes_handled = op->bytes;
402a4741 361
e5b43573 362 if (s->cow_bitmap) {
2e1990b2 363 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
e5b43573 364 }
2e1990b2
HR
365 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
366 assert(*op->bytes_handled <= UINT_MAX);
367 assert(op->bytes <= s->buf_size);
ae4cc877 368 /* The offset is granularity-aligned because:
e5b43573
FZ
369 * 1) Caller passes in aligned values;
370 * 2) mirror_cow_align is used only when target cluster is larger. */
2e1990b2 371 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
ae4cc877 372 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
2e1990b2
HR
373 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
374 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
e5b43573
FZ
375
376 while (s->buf_free_count < nb_chunks) {
2e1990b2 377 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
9178f4fe 378 mirror_wait_for_free_in_flight_slot(s);
b812f671
PB
379 }
380
402a4741
PB
381 /* Now make a QEMUIOVector taking enough granularity-sized chunks
382 * from s->buf_free.
383 */
384 qemu_iovec_init(&op->qiov, nb_chunks);
402a4741
PB
385 while (nb_chunks-- > 0) {
386 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
2e1990b2 387 size_t remaining = op->bytes - op->qiov.size;
5a0f6fd5 388
402a4741
PB
389 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
390 s->buf_free_count--;
5a0f6fd5 391 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
402a4741 392 }
bd48bde8 393
893f7eba 394 /* Copy the dirty cluster. */
bd48bde8 395 s->in_flight++;
2e1990b2 396 s->bytes_in_flight += op->bytes;
ce8cabbd 397 op->is_in_flight = true;
2e1990b2 398 trace_mirror_one_iteration(s, op->offset, op->bytes);
dcfb3beb 399
b9b10c35
KW
400 WITH_GRAPH_RDLOCK_GUARD() {
401 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
402 &op->qiov, 0);
403 }
2e1990b2 404 mirror_read_complete(op, ret);
e5b43573
FZ
405}
406
2e1990b2 407static void coroutine_fn mirror_co_zero(void *opaque)
e5b43573 408{
2e1990b2
HR
409 MirrorOp *op = opaque;
410 int ret;
e5b43573 411
2e1990b2
HR
412 op->s->in_flight++;
413 op->s->bytes_in_flight += op->bytes;
414 *op->bytes_handled = op->bytes;
ce8cabbd 415 op->is_in_flight = true;
e5b43573 416
2e1990b2
HR
417 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
418 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
419 mirror_write_complete(op, ret);
420}
421
422static void coroutine_fn mirror_co_discard(void *opaque)
423{
424 MirrorOp *op = opaque;
425 int ret;
426
427 op->s->in_flight++;
428 op->s->bytes_in_flight += op->bytes;
429 *op->bytes_handled = op->bytes;
ce8cabbd 430 op->is_in_flight = true;
2e1990b2
HR
431
432 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
433 mirror_write_complete(op, ret);
e5b43573
FZ
434}
435
4295c5fc
HR
436static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
437 unsigned bytes, MirrorMethod mirror_method)
438{
2e1990b2
HR
439 MirrorOp *op;
440 Coroutine *co;
441 int64_t bytes_handled = -1;
442
443 op = g_new(MirrorOp, 1);
444 *op = (MirrorOp){
445 .s = s,
446 .offset = offset,
447 .bytes = bytes,
448 .bytes_handled = &bytes_handled,
449 };
12aa4082 450 qemu_co_queue_init(&op->waiting_requests);
2e1990b2 451
4295c5fc
HR
452 switch (mirror_method) {
453 case MIRROR_METHOD_COPY:
2e1990b2
HR
454 co = qemu_coroutine_create(mirror_co_read, op);
455 break;
4295c5fc 456 case MIRROR_METHOD_ZERO:
2e1990b2
HR
457 co = qemu_coroutine_create(mirror_co_zero, op);
458 break;
4295c5fc 459 case MIRROR_METHOD_DISCARD:
2e1990b2
HR
460 co = qemu_coroutine_create(mirror_co_discard, op);
461 break;
4295c5fc
HR
462 default:
463 abort();
464 }
eed325b9 465 op->co = co;
2e1990b2 466
12aa4082 467 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
2e1990b2
HR
468 qemu_coroutine_enter(co);
469 /* At this point, ownership of op has been moved to the coroutine
470 * and the object may already be freed */
471
472 /* Assert that this value has been set */
473 assert(bytes_handled >= 0);
474
475 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
476 * and mirror_co_discard(), bytes_handled == op->bytes, which
477 * is the @bytes parameter given to this function) */
478 assert(bytes_handled <= UINT_MAX);
479 return bytes_handled;
4295c5fc
HR
480}
481
018e5987 482static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
e5b43573 483{
138f9fff 484 BlockDriverState *source = s->mirror_top_bs->backing->bs;
1181e19a
HR
485 MirrorOp *pseudo_op;
486 int64_t offset;
e5b43573
FZ
487 /* At least the first dirty chunk is mirrored in one iteration. */
488 int nb_chunks = 1;
4b5004d9 489 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
b436982f 490 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
e5b43573 491
b64bd51e 492 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
f798184c 493 offset = bdrv_dirty_iter_next(s->dbi);
fb2ef791 494 if (offset < 0) {
dc162c8e 495 bdrv_set_dirty_iter(s->dbi, 0);
f798184c 496 offset = bdrv_dirty_iter_next(s->dbi);
9a46dba7 497 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
fb2ef791 498 assert(offset >= 0);
e5b43573 499 }
b64bd51e 500 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
e5b43573 501
d69a879b
HR
502 /*
503 * Wait for concurrent requests to @offset. The next loop will limit the
504 * copied area based on in_flight_bitmap so we only copy an area that does
505 * not overlap with concurrent in-flight requests. Still, we would like to
506 * copy something, so wait until there are at least no more requests to the
507 * very beginning of the area.
508 */
1181e19a 509 mirror_wait_on_conflicts(NULL, s, offset, 1);
9c83625b 510
da01ff7f 511 job_pause_point(&s->common.job);
565ac01f 512
3202d8e4 513 /* Find the number of consecutive dirty chunks following the first dirty
e5b43573 514 * one, and wait for in flight requests in them. */
b64bd51e 515 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
fb2ef791 516 while (nb_chunks * s->granularity < s->buf_size) {
dc162c8e 517 int64_t next_dirty;
fb2ef791
EB
518 int64_t next_offset = offset + nb_chunks * s->granularity;
519 int64_t next_chunk = next_offset / s->granularity;
520 if (next_offset >= s->bdev_length ||
28636b82 521 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
e5b43573
FZ
522 break;
523 }
524 if (test_bit(next_chunk, s->in_flight_bitmap)) {
9c83625b 525 break;
e5b43573 526 }
9c83625b 527
f798184c 528 next_dirty = bdrv_dirty_iter_next(s->dbi);
fb2ef791 529 if (next_dirty > next_offset || next_dirty < 0) {
f27a2742 530 /* The bitmap iterator's cache is stale, refresh it */
715a74d8 531 bdrv_set_dirty_iter(s->dbi, next_offset);
f798184c 532 next_dirty = bdrv_dirty_iter_next(s->dbi);
f27a2742 533 }
fb2ef791 534 assert(next_dirty == next_offset);
9c83625b 535 nb_chunks++;
e5b43573
FZ
536 }
537
538 /* Clear dirty bits before querying the block status, because
31826642 539 * calling bdrv_block_status_above could yield - if some blocks are
e5b43573
FZ
540 * marked dirty in this window, we need to know.
541 */
e0d7f73e
EB
542 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
543 nb_chunks * s->granularity);
b64bd51e
PB
544 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
545
1181e19a
HR
546 /* Before claiming an area in the in-flight bitmap, we have to
547 * create a MirrorOp for it so that conflicting requests can wait
548 * for it. mirror_perform() will create the real MirrorOps later,
549 * for now we just create a pseudo operation that will wake up all
550 * conflicting requests once all real operations have been
551 * launched. */
552 pseudo_op = g_new(MirrorOp, 1);
553 *pseudo_op = (MirrorOp){
554 .offset = offset,
555 .bytes = nb_chunks * s->granularity,
556 .is_pseudo_op = true,
557 };
558 qemu_co_queue_init(&pseudo_op->waiting_requests);
559 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
560
fb2ef791
EB
561 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
562 while (nb_chunks > 0 && offset < s->bdev_length) {
31826642 563 int ret;
7cfd5275 564 int64_t io_bytes;
f3e4ce4a 565 int64_t io_bytes_acct;
4295c5fc 566 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
e5b43573 567
fb2ef791 568 assert(!(offset % s->granularity));
7ff9579e 569 WITH_GRAPH_RDLOCK_GUARD() {
cc323997
PB
570 ret = bdrv_co_block_status_above(source, NULL, offset,
571 nb_chunks * s->granularity,
572 &io_bytes, NULL, NULL);
7ff9579e 573 }
e5b43573 574 if (ret < 0) {
fb2ef791 575 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
0965a41e 576 } else if (ret & BDRV_BLOCK_DATA) {
fb2ef791 577 io_bytes = MIN(io_bytes, max_io_bytes);
e5b43573
FZ
578 }
579
fb2ef791
EB
580 io_bytes -= io_bytes % s->granularity;
581 if (io_bytes < s->granularity) {
582 io_bytes = s->granularity;
e5b43573 583 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
fb2ef791 584 int64_t target_offset;
7cfd5275 585 int64_t target_bytes;
a00e70c0 586 WITH_GRAPH_RDLOCK_GUARD() {
fc6b211f
AD
587 bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes,
588 &target_offset, &target_bytes);
a00e70c0 589 }
fb2ef791
EB
590 if (target_offset == offset &&
591 target_bytes == io_bytes) {
e5b43573
FZ
592 mirror_method = ret & BDRV_BLOCK_ZERO ?
593 MIRROR_METHOD_ZERO :
594 MIRROR_METHOD_DISCARD;
595 }
596 }
597
cf56a3c6 598 while (s->in_flight >= MAX_IN_FLIGHT) {
fb2ef791 599 trace_mirror_yield_in_flight(s, offset, s->in_flight);
9178f4fe 600 mirror_wait_for_free_in_flight_slot(s);
cf56a3c6
DL
601 }
602
dbaa7b57 603 if (s->ret < 0) {
1181e19a
HR
604 ret = 0;
605 goto fail;
dbaa7b57
VSO
606 }
607
fb2ef791 608 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
4295c5fc
HR
609 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
610 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
611 io_bytes_acct = 0;
612 } else {
613 io_bytes_acct = io_bytes;
e5b43573 614 }
fb2ef791
EB
615 assert(io_bytes);
616 offset += io_bytes;
617 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
018e5987 618 block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct);
dcfb3beb 619 }
1181e19a 620
1181e19a
HR
621fail:
622 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
623 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
624 g_free(pseudo_op);
bd48bde8 625}
b952b558 626
402a4741
PB
627static void mirror_free_init(MirrorBlockJob *s)
628{
629 int granularity = s->granularity;
630 size_t buf_size = s->buf_size;
631 uint8_t *buf = s->buf;
632
633 assert(s->buf_free_count == 0);
634 QSIMPLEQ_INIT(&s->buf_free);
635 while (buf_size != 0) {
636 MirrorBuffer *cur = (MirrorBuffer *)buf;
637 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
638 s->buf_free_count++;
639 buf_size -= granularity;
640 buf += granularity;
641 }
642}
643
bae8196d
PB
644/* This is also used for the .pause callback. There is no matching
645 * mirror_resume() because mirror_run() will begin iterating again
646 * when the job is resumed.
647 */
537c3d4f 648static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
bd48bde8
PB
649{
650 while (s->in_flight > 0) {
9178f4fe 651 mirror_wait_for_free_in_flight_slot(s);
bd48bde8 652 }
893f7eba
PB
653}
654
737efc1e
JS
655/**
656 * mirror_exit_common: handle both abort() and prepare() cases.
657 * for .prepare, returns 0 on success and -errno on failure.
658 * for .abort cases, denoted by abort = true, MUST return 0.
659 */
660static int mirror_exit_common(Job *job)
5a7e7a0b 661{
1908a559
KW
662 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
663 BlockJob *bjob = &s->common;
f93c3add 664 MirrorBDSOpaque *bs_opaque;
5a7e7a0b 665 AioContext *replace_aio_context = NULL;
f93c3add
HR
666 BlockDriverState *src;
667 BlockDriverState *target_bs;
668 BlockDriverState *mirror_top_bs;
12fa4af6 669 Error *local_err = NULL;
737efc1e
JS
670 bool abort = job->ret < 0;
671 int ret = 0;
672
2626d27f
KW
673 GLOBAL_STATE_CODE();
674
737efc1e
JS
675 if (s->prepared) {
676 return 0;
677 }
678 s->prepared = true;
3f09bfbc 679
2626d27f
KW
680 aio_context_acquire(qemu_get_aio_context());
681
f93c3add
HR
682 mirror_top_bs = s->mirror_top_bs;
683 bs_opaque = mirror_top_bs->opaque;
684 src = mirror_top_bs->backing->bs;
685 target_bs = blk_bs(s->target);
686
ef53dc09
AG
687 if (bdrv_chain_contains(src, target_bs)) {
688 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
689 }
690
5deb6cbd 691 bdrv_release_dirty_bitmap(s->dirty_bitmap);
2119882c 692
7b508f6b
JS
693 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
694 * before we can call bdrv_drained_end */
3f09bfbc 695 bdrv_ref(src);
4ef85a9c 696 bdrv_ref(mirror_top_bs);
7d9fcb39
KW
697 bdrv_ref(target_bs);
698
bb0c9409
VSO
699 /*
700 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
7d9fcb39 701 * inserting target_bs at s->to_replace, where we might not be able to get
63c8ef28 702 * these permissions.
bb0c9409 703 */
7d9fcb39
KW
704 blk_unref(s->target);
705 s->target = NULL;
4ef85a9c
KW
706
707 /* We don't access the source any more. Dropping any WRITE/RESIZE is
d2da5e28
KW
708 * required before it could become a backing file of target_bs. Not having
709 * these permissions any more means that we can't allow any new requests on
710 * mirror_top_bs from now on, so keep it drained. */
711 bdrv_drained_begin(mirror_top_bs);
f94dc3b4 712 bs_opaque->stop = true;
3804e3cf
KW
713
714 bdrv_graph_rdlock_main_loop();
f94dc3b4
HR
715 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
716 &error_abort);
3804e3cf
KW
717 bdrv_graph_rdunlock_main_loop();
718
737efc1e 719 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
4ef85a9c 720 BlockDriverState *backing = s->is_none_mode ? src : s->base;
3f072a7f
HR
721 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
722
723 if (bdrv_cow_bs(unfiltered_target) != backing) {
724 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
12fa4af6
KW
725 if (local_err) {
726 error_report_err(local_err);
66c8672d 727 local_err = NULL;
7b508f6b 728 ret = -EPERM;
12fa4af6 729 }
4ef85a9c 730 }
c41f5b96
HR
731 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
732 assert(!bdrv_backing_chain_next(target_bs));
733 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
734 "backing", &local_err);
735 if (ret < 0) {
736 error_report_err(local_err);
737 local_err = NULL;
738 }
4ef85a9c 739 }
5a7e7a0b
SH
740
741 if (s->to_replace) {
742 replace_aio_context = bdrv_get_aio_context(s->to_replace);
743 aio_context_acquire(replace_aio_context);
744 }
745
737efc1e
JS
746 if (s->should_complete && !abort) {
747 BlockDriverState *to_replace = s->to_replace ?: src;
1ba79388 748 bool ro = bdrv_is_read_only(to_replace);
40365552 749
1ba79388
AG
750 if (ro != bdrv_is_read_only(target_bs)) {
751 bdrv_reopen_set_read_only(target_bs, ro, NULL);
5a7e7a0b 752 }
b8804815
KW
753
754 /* The mirror job has no requests in flight any more, but we need to
755 * drain potential other users of the BDS before changing the graph. */
5e771752 756 assert(s->in_drain);
e253f4b8 757 bdrv_drained_begin(target_bs);
6e9cc051
HR
758 /*
759 * Cannot use check_to_replace_node() here, because that would
760 * check for an op blocker on @to_replace, and we have our own
761 * there.
533c6e4e
KW
762 *
763 * TODO Pull out the writer lock from bdrv_replace_node() to here
6e9cc051 764 */
533c6e4e 765 bdrv_graph_rdlock_main_loop();
6e9cc051
HR
766 if (bdrv_recurse_can_replace(src, to_replace)) {
767 bdrv_replace_node(to_replace, target_bs, &local_err);
768 } else {
769 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
770 "because it can no longer be guaranteed that doing so "
771 "would not lead to an abrupt change of visible data",
772 to_replace->node_name, target_bs->node_name);
773 }
533c6e4e 774 bdrv_graph_rdunlock_main_loop();
e253f4b8 775 bdrv_drained_end(target_bs);
5fe31c25
KW
776 if (local_err) {
777 error_report_err(local_err);
7b508f6b 778 ret = -EPERM;
5fe31c25 779 }
5a7e7a0b
SH
780 }
781 if (s->to_replace) {
782 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
783 error_free(s->replace_blocker);
784 bdrv_unref(s->to_replace);
785 }
786 if (replace_aio_context) {
787 aio_context_release(replace_aio_context);
788 }
789 g_free(s->replaces);
7d9fcb39 790 bdrv_unref(target_bs);
4ef85a9c 791
f94dc3b4
HR
792 /*
793 * Remove the mirror filter driver from the graph. Before this, get rid of
4ef85a9c 794 * the blockers on the intermediate nodes so that the resulting state is
f94dc3b4
HR
795 * valid.
796 */
1908a559 797 block_job_remove_all_bdrv(bjob);
3f072a7f 798 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
4ef85a9c 799
429076e8 800 bs_opaque->job = NULL;
4ef85a9c 801
176c3699 802 bdrv_drained_end(src);
d2da5e28 803 bdrv_drained_end(mirror_top_bs);
5e771752 804 s->in_drain = false;
4ef85a9c 805 bdrv_unref(mirror_top_bs);
3f09bfbc 806 bdrv_unref(src);
7b508f6b 807
2626d27f
KW
808 aio_context_release(qemu_get_aio_context());
809
737efc1e
JS
810 return ret;
811}
812
813static int mirror_prepare(Job *job)
814{
815 return mirror_exit_common(job);
816}
817
818static void mirror_abort(Job *job)
819{
820 int ret = mirror_exit_common(job);
821 assert(ret == 0);
5a7e7a0b
SH
822}
823
537c3d4f 824static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
49efb1f5
DL
825{
826 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
827
18bb6928 828 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
49efb1f5 829 s->last_pause_ns = now;
5d43e86e 830 job_sleep_ns(&s->common.job, 0);
49efb1f5 831 } else {
da01ff7f 832 job_pause_point(&s->common.job);
49efb1f5
DL
833 }
834}
835
c0b363ad
DL
836static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
837{
23ca459a 838 int64_t offset;
138f9fff 839 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
c0b363ad 840 BlockDriverState *target_bs = blk_bs(s->target);
23ca459a 841 int ret;
51b0a488 842 int64_t count;
c0b363ad 843
cdf3bc93 844 if (s->zero_target) {
c7c2769c 845 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
e0d7f73e 846 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
c7c2769c
DL
847 return 0;
848 }
849
90ab48eb 850 s->initial_zeroing_ongoing = true;
23ca459a
EB
851 for (offset = 0; offset < s->bdev_length; ) {
852 int bytes = MIN(s->bdev_length - offset,
853 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
c7c2769c
DL
854
855 mirror_throttle(s);
856
daa7f2f9 857 if (job_is_cancelled(&s->common.job)) {
90ab48eb 858 s->initial_zeroing_ongoing = false;
c7c2769c
DL
859 return 0;
860 }
861
862 if (s->in_flight >= MAX_IN_FLIGHT) {
67adf4b3
EB
863 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
864 s->in_flight);
9178f4fe 865 mirror_wait_for_free_in_flight_slot(s);
c7c2769c
DL
866 continue;
867 }
868
4295c5fc 869 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
23ca459a 870 offset += bytes;
c7c2769c
DL
871 }
872
bae8196d 873 mirror_wait_for_all_io(s);
90ab48eb 874 s->initial_zeroing_ongoing = false;
b7d5062c
DL
875 }
876
c0b363ad 877 /* First part, loop on the sectors and initialize the dirty bitmap. */
23ca459a 878 for (offset = 0; offset < s->bdev_length; ) {
c0b363ad 879 /* Just to make sure we are not exceeding int limit. */
23ca459a
EB
880 int bytes = MIN(s->bdev_length - offset,
881 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
c0b363ad
DL
882
883 mirror_throttle(s);
884
daa7f2f9 885 if (job_is_cancelled(&s->common.job)) {
c0b363ad
DL
886 return 0;
887 }
888
7ff9579e 889 WITH_GRAPH_RDLOCK_GUARD() {
cc323997
PB
890 ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
891 bytes, &count);
7ff9579e 892 }
c0b363ad
DL
893 if (ret < 0) {
894 return ret;
895 }
896
23ca459a 897 assert(count);
a92b1b06 898 if (ret > 0) {
23ca459a 899 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
c0b363ad 900 }
23ca459a 901 offset += count;
c0b363ad
DL
902 }
903 return 0;
904}
905
bdffb31d
PB
906/* Called when going out of the streaming phase to flush the bulk of the
907 * data to the medium, or just before completing.
908 */
26bef102 909static int coroutine_fn mirror_flush(MirrorBlockJob *s)
bdffb31d 910{
26bef102 911 int ret = blk_co_flush(s->target);
bdffb31d
PB
912 if (ret < 0) {
913 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
914 s->ret = ret;
915 }
916 }
917 return ret;
918}
919
f67432a2 920static int coroutine_fn mirror_run(Job *job, Error **errp)
893f7eba 921{
f67432a2 922 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
138f9fff 923 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
32125b14 924 MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
e253f4b8 925 BlockDriverState *target_bs = blk_bs(s->target);
9a0cec66 926 bool need_drain = true;
d59cb66d 927 BlockDeviceIoStatus iostatus;
c0b363ad 928 int64_t length;
e83dd680 929 int64_t target_length;
b812f671 930 BlockDriverInfo bdi;
1d33936e
JC
931 char backing_filename[2]; /* we only need 2 characters because we are only
932 checking for a NULL string */
893f7eba 933 int ret = 0;
893f7eba 934
daa7f2f9 935 if (job_is_cancelled(&s->common.job)) {
893f7eba
PB
936 goto immediate_exit;
937 }
938
8ab8140a 939 bdrv_graph_co_rdlock();
c86422c5 940 s->bdev_length = bdrv_co_getlength(bs);
8ab8140a
KW
941 bdrv_graph_co_rdunlock();
942
b21c7652
HR
943 if (s->bdev_length < 0) {
944 ret = s->bdev_length;
373df5b1 945 goto immediate_exit;
becc347e
KW
946 }
947
c86422c5 948 target_length = blk_co_getlength(s->target);
e83dd680
KW
949 if (target_length < 0) {
950 ret = target_length;
951 goto immediate_exit;
952 }
953
becc347e
KW
954 /* Active commit must resize the base image if its size differs from the
955 * active layer. */
956 if (s->base == blk_bs(s->target)) {
e83dd680 957 if (s->bdev_length > target_length) {
88276216
AF
958 ret = blk_co_truncate(s->target, s->bdev_length, false,
959 PREALLOC_MODE_OFF, 0, NULL);
becc347e
KW
960 if (ret < 0) {
961 goto immediate_exit;
962 }
963 }
e83dd680
KW
964 } else if (s->bdev_length != target_length) {
965 error_setg(errp, "Source and target image have different sizes");
966 ret = -EINVAL;
967 goto immediate_exit;
becc347e
KW
968 }
969
970 if (s->bdev_length == 0) {
2e1795b5
KW
971 /* Transition to the READY state and wait for complete. */
972 job_transition_to_ready(&s->common.job);
76cb2f24 973 qatomic_set(&s->actively_synced, true);
08b83bff 974 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
198c49cc 975 job_yield(&s->common.job);
9e48b025 976 }
9e48b025 977 goto immediate_exit;
893f7eba
PB
978 }
979
b21c7652 980 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
402a4741
PB
981 s->in_flight_bitmap = bitmap_new(length);
982
b812f671
PB
983 /* If we have no backing file yet in the destination, we cannot let
984 * the destination do COW. Instead, we copy sectors around the
985 * dirty data if needed. We need a bitmap to do that.
986 */
e253f4b8 987 bdrv_get_backing_filename(target_bs, backing_filename,
b812f671 988 sizeof(backing_filename));
a00e70c0 989 bdrv_graph_co_rdlock();
3d47eb0a 990 if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) {
b436982f
EB
991 s->target_cluster_size = bdi.cluster_size;
992 } else {
993 s->target_cluster_size = BDRV_SECTOR_SIZE;
e5b43573 994 }
a00e70c0 995 bdrv_graph_co_rdunlock();
3f072a7f 996 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
b436982f
EB
997 s->granularity < s->target_cluster_size) {
998 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
e5b43573 999 s->cow_bitmap = bitmap_new(length);
b812f671 1000 }
e253f4b8 1001 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
b812f671 1002
7504edf4
KW
1003 s->buf = qemu_try_blockalign(bs, s->buf_size);
1004 if (s->buf == NULL) {
1005 ret = -ENOMEM;
1006 goto immediate_exit;
1007 }
1008
402a4741 1009 mirror_free_init(s);
893f7eba 1010
49efb1f5 1011 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
03544a6e 1012 if (!s->is_none_mode) {
c0b363ad 1013 ret = mirror_dirty_init(s);
daa7f2f9 1014 if (ret < 0 || job_is_cancelled(&s->common.job)) {
c0b363ad 1015 goto immediate_exit;
893f7eba
PB
1016 }
1017 }
1018
32125b14
KW
1019 /*
1020 * Only now the job is fully initialised and mirror_top_bs should start
1021 * accessing it.
1022 */
1023 mirror_top_opaque->job = s;
1024
dc162c8e 1025 assert(!s->dbi);
715a74d8 1026 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
893f7eba 1027 for (;;) {
49efb1f5 1028 int64_t cnt, delta;
893f7eba
PB
1029 bool should_complete;
1030
bd48bde8
PB
1031 if (s->ret < 0) {
1032 ret = s->ret;
1033 goto immediate_exit;
1034 }
1035
da01ff7f 1036 job_pause_point(&s->common.job);
565ac01f 1037
4feeec7e
HR
1038 if (job_is_cancelled(&s->common.job)) {
1039 ret = 0;
1040 goto immediate_exit;
1041 }
1042
20dca810 1043 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
05df8a6a
KW
1044 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1045 * the number of bytes currently being processed; together those are
1046 * the current remaining operation length */
d69a879b
HR
1047 job_progress_set_remaining(&s->common.job,
1048 s->bytes_in_flight + cnt +
1049 s->active_write_bytes_in_flight);
bd48bde8
PB
1050
1051 /* Note that even when no rate limit is applied we need to yield
a7282330 1052 * periodically with no pending I/O so that bdrv_drain_all() returns.
18bb6928
KW
1053 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1054 * an error, or when the source is clean, whichever comes first. */
49efb1f5 1055 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
d59cb66d
EGE
1056 WITH_JOB_LOCK_GUARD() {
1057 iostatus = s->common.iostatus;
1058 }
18bb6928 1059 if (delta < BLOCK_JOB_SLICE_TIME &&
d59cb66d 1060 iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
cf56a3c6 1061 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
402a4741 1062 (cnt == 0 && s->in_flight > 0)) {
9a46dba7 1063 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
9178f4fe 1064 mirror_wait_for_free_in_flight_slot(s);
bd48bde8
PB
1065 continue;
1066 } else if (cnt != 0) {
018e5987 1067 mirror_iteration(s);
893f7eba 1068 }
893f7eba
PB
1069 }
1070
1071 should_complete = false;
bd48bde8 1072 if (s->in_flight == 0 && cnt == 0) {
893f7eba 1073 trace_mirror_before_flush(s);
44716224 1074 if (!job_is_ready(&s->common.job)) {
bdffb31d
PB
1075 if (mirror_flush(s) < 0) {
1076 /* Go check s->ret. */
1077 continue;
b952b558 1078 }
b952b558
PB
1079 /* We're out of the streaming phase. From now on, if the job
1080 * is cancelled we will actually complete all pending I/O and
1081 * report completion. This way, block-job-cancel will leave
1082 * the target in a consistent state.
1083 */
2e1795b5 1084 job_transition_to_ready(&s->common.job);
c45d0e1a 1085 }
2d400d15 1086 if (qatomic_read(&s->copy_mode) != MIRROR_COPY_MODE_BACKGROUND) {
76cb2f24 1087 qatomic_set(&s->actively_synced, true);
d63ffd87 1088 }
bdffb31d
PB
1089
1090 should_complete = s->should_complete ||
08b83bff 1091 job_cancel_requested(&s->common.job);
bdffb31d 1092 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
893f7eba
PB
1093 }
1094
1095 if (cnt == 0 && should_complete) {
1096 /* The dirty bitmap is not updated while operations are pending.
1097 * If we're about to exit, wait for pending operations before
1098 * calling bdrv_get_dirty_count(bs), or we may exit while the
1099 * source has dirty data to copy!
1100 *
1101 * Note that I/O can be submitted by the guest while
9a0cec66
PB
1102 * mirror_populate runs, so pause it now. Before deciding
1103 * whether to switch to target check one last time if I/O has
1104 * come in the meanwhile, and if not flush the data to disk.
893f7eba 1105 */
9a46dba7 1106 trace_mirror_before_drain(s, cnt);
9a0cec66 1107
5e771752 1108 s->in_drain = true;
9a0cec66 1109 bdrv_drained_begin(bs);
d69a879b
HR
1110
1111 /* Must be zero because we are drained */
1112 assert(s->in_active_write_counter == 0);
1113
20dca810 1114 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
bdffb31d 1115 if (cnt > 0 || mirror_flush(s) < 0) {
9a0cec66 1116 bdrv_drained_end(bs);
5e771752 1117 s->in_drain = false;
9a0cec66
PB
1118 continue;
1119 }
1120
1121 /* The two disks are in sync. Exit and report successful
1122 * completion.
1123 */
1124 assert(QLIST_EMPTY(&bs->tracked_requests));
9a0cec66
PB
1125 need_drain = false;
1126 break;
893f7eba
PB
1127 }
1128
44716224 1129 if (job_is_ready(&s->common.job) && !should_complete) {
018e5987
KW
1130 if (s->in_flight == 0 && cnt == 0) {
1131 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1132 BLOCK_JOB_SLICE_TIME);
1133 job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME);
1134 }
1135 } else {
1136 block_job_ratelimit_sleep(&s->common);
ddc4115e 1137 }
49efb1f5 1138 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
893f7eba
PB
1139 }
1140
1141immediate_exit:
bd48bde8
PB
1142 if (s->in_flight > 0) {
1143 /* We get here only if something went wrong. Either the job failed,
1144 * or it was cancelled prematurely so that we do not guarantee that
1145 * the target is a copy of the source.
1146 */
08b83bff 1147 assert(ret < 0 || job_is_cancelled(&s->common.job));
9a0cec66 1148 assert(need_drain);
bae8196d 1149 mirror_wait_for_all_io(s);
bd48bde8
PB
1150 }
1151
1152 assert(s->in_flight == 0);
7191bf31 1153 qemu_vfree(s->buf);
b812f671 1154 g_free(s->cow_bitmap);
402a4741 1155 g_free(s->in_flight_bitmap);
dc162c8e 1156 bdrv_dirty_iter_free(s->dbi);
5a7e7a0b 1157
9a0cec66 1158 if (need_drain) {
5e771752 1159 s->in_drain = true;
9a0cec66
PB
1160 bdrv_drained_begin(bs);
1161 }
f67432a2 1162
f67432a2 1163 return ret;
893f7eba
PB
1164}
1165
3453d972 1166static void mirror_complete(Job *job, Error **errp)
d63ffd87 1167{
3453d972 1168 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
d63ffd87 1169
44716224 1170 if (!job_is_ready(job)) {
9df229c3 1171 error_setg(errp, "The active block job '%s' cannot be completed",
3453d972 1172 job->id);
d63ffd87
PB
1173 return;
1174 }
1175
15d67298 1176 /* block all operations on to_replace bs */
09158f00 1177 if (s->replaces) {
5a7e7a0b
SH
1178 AioContext *replace_aio_context;
1179
e12f3784 1180 s->to_replace = bdrv_find_node(s->replaces);
09158f00 1181 if (!s->to_replace) {
e12f3784 1182 error_setg(errp, "Node name '%s' not found", s->replaces);
09158f00
BC
1183 return;
1184 }
1185
5a7e7a0b
SH
1186 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1187 aio_context_acquire(replace_aio_context);
1188
64631f36 1189 /* TODO Translate this into child freeze system. */
09158f00
BC
1190 error_setg(&s->replace_blocker,
1191 "block device is in use by block-job-complete");
1192 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1193 bdrv_ref(s->to_replace);
5a7e7a0b
SH
1194
1195 aio_context_release(replace_aio_context);
09158f00
BC
1196 }
1197
d63ffd87 1198 s->should_complete = true;
00769414
HR
1199
1200 /* If the job is paused, it will be re-entered when it is resumed */
279ac06e
EGE
1201 WITH_JOB_LOCK_GUARD() {
1202 if (!job->paused) {
1203 job_enter_cond_locked(job, NULL);
1204 }
00769414 1205 }
d63ffd87
PB
1206}
1207
537c3d4f 1208static void coroutine_fn mirror_pause(Job *job)
565ac01f 1209{
da01ff7f 1210 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
565ac01f 1211
bae8196d 1212 mirror_wait_for_all_io(s);
565ac01f
SH
1213}
1214
89bd0305
KW
1215static bool mirror_drained_poll(BlockJob *job)
1216{
1217 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
5e771752
SL
1218
1219 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1220 * issue more requests. We make an exception if we've reached this point
1221 * from one of our own drain sections, to avoid a deadlock waiting for
1222 * ourselves.
1223 */
279ac06e
EGE
1224 WITH_JOB_LOCK_GUARD() {
1225 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
1226 && !s->in_drain) {
1227 return true;
1228 }
5e771752
SL
1229 }
1230
89bd0305
KW
1231 return !!s->in_flight;
1232}
1233
73895f38 1234static bool mirror_cancel(Job *job, bool force)
521ff8b7
VSO
1235{
1236 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1237 BlockDriverState *target = blk_bs(s->target);
1238
73895f38
HR
1239 /*
1240 * Before the job is READY, we treat any cancellation like a
1241 * force-cancellation.
1242 */
1243 force = force || !job_is_ready(job);
1244
1245 if (force) {
9c785cd7
VSO
1246 bdrv_cancel_in_flight(target);
1247 }
73895f38
HR
1248 return force;
1249}
1250
1251static bool commit_active_cancel(Job *job, bool force)
1252{
1253 /* Same as above in mirror_cancel() */
1254 return force || !job_is_ready(job);
521ff8b7
VSO
1255}
1256
2d400d15
FE
1257static void mirror_change(BlockJob *job, BlockJobChangeOptions *opts,
1258 Error **errp)
1259{
1260 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1261 BlockJobChangeOptionsMirror *change_opts = &opts->u.mirror;
1262 MirrorCopyMode current;
1263
1264 /*
1265 * The implementation relies on the fact that copy_mode is only written
1266 * under the BQL. Otherwise, further synchronization would be required.
1267 */
1268
1269 GLOBAL_STATE_CODE();
1270
1271 if (qatomic_read(&s->copy_mode) == change_opts->copy_mode) {
1272 return;
1273 }
1274
1275 if (change_opts->copy_mode != MIRROR_COPY_MODE_WRITE_BLOCKING) {
1276 error_setg(errp, "Change to copy mode '%s' is not implemented",
1277 MirrorCopyMode_str(change_opts->copy_mode));
1278 return;
1279 }
1280
1281 current = qatomic_cmpxchg(&s->copy_mode, MIRROR_COPY_MODE_BACKGROUND,
1282 change_opts->copy_mode);
1283 if (current != MIRROR_COPY_MODE_BACKGROUND) {
1284 error_setg(errp, "Expected current copy mode '%s', got '%s'",
1285 MirrorCopyMode_str(MIRROR_COPY_MODE_BACKGROUND),
1286 MirrorCopyMode_str(current));
1287 }
1288}
1289
76cb2f24
FE
1290static void mirror_query(BlockJob *job, BlockJobInfo *info)
1291{
1292 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1293
1294 info->u.mirror = (BlockJobInfoMirror) {
1295 .actively_synced = qatomic_read(&s->actively_synced),
1296 };
1297}
1298
3fc4b10a 1299static const BlockJobDriver mirror_job_driver = {
33e9e9bd
KW
1300 .job_driver = {
1301 .instance_size = sizeof(MirrorBlockJob),
252291ea 1302 .job_type = JOB_TYPE_MIRROR,
80fa2c75 1303 .free = block_job_free,
b15de828 1304 .user_resume = block_job_user_resume,
f67432a2 1305 .run = mirror_run,
737efc1e
JS
1306 .prepare = mirror_prepare,
1307 .abort = mirror_abort,
da01ff7f 1308 .pause = mirror_pause,
3453d972 1309 .complete = mirror_complete,
521ff8b7 1310 .cancel = mirror_cancel,
33e9e9bd 1311 },
89bd0305 1312 .drained_poll = mirror_drained_poll,
2d400d15 1313 .change = mirror_change,
76cb2f24 1314 .query = mirror_query,
893f7eba
PB
1315};
1316
03544a6e 1317static const BlockJobDriver commit_active_job_driver = {
33e9e9bd
KW
1318 .job_driver = {
1319 .instance_size = sizeof(MirrorBlockJob),
252291ea 1320 .job_type = JOB_TYPE_COMMIT,
80fa2c75 1321 .free = block_job_free,
b15de828 1322 .user_resume = block_job_user_resume,
f67432a2 1323 .run = mirror_run,
737efc1e
JS
1324 .prepare = mirror_prepare,
1325 .abort = mirror_abort,
da01ff7f 1326 .pause = mirror_pause,
3453d972 1327 .complete = mirror_complete,
73895f38 1328 .cancel = commit_active_cancel,
33e9e9bd 1329 },
89bd0305 1330 .drained_poll = mirror_drained_poll,
03544a6e
FZ
1331};
1332
537c3d4f
SH
1333static void coroutine_fn
1334do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1335 uint64_t offset, uint64_t bytes,
1336 QEMUIOVector *qiov, int flags)
d06107ad 1337{
5c511ac3 1338 int ret;
dbdf699c
VSO
1339 size_t qiov_offset = 0;
1340 int64_t bitmap_offset, bitmap_end;
d06107ad 1341
dbdf699c
VSO
1342 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1343 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1344 {
1345 /*
1346 * Dirty unaligned padding: ignore it.
1347 *
1348 * Reasoning:
1349 * 1. If we copy it, we can't reset corresponding bit in
1350 * dirty_bitmap as there may be some "dirty" bytes still not
1351 * copied.
1352 * 2. It's already dirty, so skipping it we don't diverge mirror
1353 * progress.
1354 *
1355 * Note, that because of this, guest write may have no contribution
1356 * into mirror converge, but that's not bad, as we have background
1357 * process of mirroring. If under some bad circumstances (high guest
1358 * IO load) background process starve, we will not converge anyway,
1359 * even if each write will contribute, as guest is not guaranteed to
1360 * rewrite the whole disk.
1361 */
1362 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1363 if (bytes <= qiov_offset) {
1364 /* nothing to do after shrink */
1365 return;
1366 }
1367 offset += qiov_offset;
1368 bytes -= qiov_offset;
1369 }
1370
1371 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1372 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1373 {
1374 uint64_t tail = (offset + bytes) % job->granularity;
1375
1376 if (bytes <= tail) {
1377 /* nothing to do after shrink */
1378 return;
1379 }
1380 bytes -= tail;
1381 }
1382
1383 /*
1384 * Tails are either clean or shrunk, so for bitmap resetting
1385 * we safely align the range down.
1386 */
1387 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1388 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1389 if (bitmap_offset < bitmap_end) {
1390 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1391 bitmap_end - bitmap_offset);
1392 }
d06107ad 1393
5c511ac3 1394 job_progress_increase_remaining(&job->common.job, bytes);
d69a879b 1395 job->active_write_bytes_in_flight += bytes;
d06107ad 1396
5c511ac3
VSO
1397 switch (method) {
1398 case MIRROR_METHOD_COPY:
dbdf699c
VSO
1399 ret = blk_co_pwritev_part(job->target, offset, bytes,
1400 qiov, qiov_offset, flags);
5c511ac3 1401 break;
d06107ad 1402
5c511ac3
VSO
1403 case MIRROR_METHOD_ZERO:
1404 assert(!qiov);
1405 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1406 break;
d06107ad 1407
5c511ac3
VSO
1408 case MIRROR_METHOD_DISCARD:
1409 assert(!qiov);
1410 ret = blk_co_pdiscard(job->target, offset, bytes);
1411 break;
d06107ad 1412
5c511ac3
VSO
1413 default:
1414 abort();
1415 }
d06107ad 1416
d69a879b 1417 job->active_write_bytes_in_flight -= bytes;
5c511ac3
VSO
1418 if (ret >= 0) {
1419 job_progress_update(&job->common.job, bytes);
1420 } else {
1421 BlockErrorAction action;
d06107ad 1422
dbdf699c
VSO
1423 /*
1424 * We failed, so we should mark dirty the whole area, aligned up.
1425 * Note that we don't care about shrunk tails if any: they were dirty
1426 * at function start, and they must be still dirty, as we've locked
1427 * the region for in-flight op.
1428 */
1429 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1430 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1431 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1432 bitmap_end - bitmap_offset);
76cb2f24 1433 qatomic_set(&job->actively_synced, false);
d06107ad 1434
5c511ac3
VSO
1435 action = mirror_error_action(job, false, -ret);
1436 if (action == BLOCK_ERROR_ACTION_REPORT) {
1437 if (!job->ret) {
1438 job->ret = ret;
d06107ad
HR
1439 }
1440 }
d06107ad
HR
1441 }
1442}
1443
1444static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1445 uint64_t offset,
1446 uint64_t bytes)
1447{
1448 MirrorOp *op;
1449 uint64_t start_chunk = offset / s->granularity;
1450 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1451
1452 op = g_new(MirrorOp, 1);
1453 *op = (MirrorOp){
1454 .s = s,
1455 .offset = offset,
1456 .bytes = bytes,
1457 .is_active_write = true,
ce8cabbd 1458 .is_in_flight = true,
ead3f1bf 1459 .co = qemu_coroutine_self(),
d06107ad
HR
1460 };
1461 qemu_co_queue_init(&op->waiting_requests);
1462 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1463
1464 s->in_active_write_counter++;
1465
d69a879b
HR
1466 /*
1467 * Wait for concurrent requests affecting the area. If there are already
1468 * running requests that are copying off now-to-be stale data in the area,
1469 * we must wait for them to finish before we begin writing fresh data to the
1470 * target so that the write operations appear in the correct order.
1471 * Note that background requests (see mirror_iteration()) in contrast only
1472 * wait for conflicting requests at the start of the dirty area, and then
1473 * (based on the in_flight_bitmap) truncate the area to copy so it will not
1474 * conflict with any requests beyond that. For active writes, however, we
1475 * cannot truncate that area. The request from our parent must be blocked
1476 * until the area is copied in full. Therefore, we must wait for the whole
1477 * area to become free of concurrent requests.
1478 */
d06107ad
HR
1479 mirror_wait_on_conflicts(op, s, offset, bytes);
1480
1481 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1482
1483 return op;
1484}
1485
9c93652d 1486static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op)
d06107ad
HR
1487{
1488 uint64_t start_chunk = op->offset / op->s->granularity;
1489 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1490 op->s->granularity);
1491
76cb2f24
FE
1492 if (!--op->s->in_active_write_counter &&
1493 qatomic_read(&op->s->actively_synced)) {
d06107ad
HR
1494 BdrvChild *source = op->s->mirror_top_bs->backing;
1495
1496 if (QLIST_FIRST(&source->bs->parents) == source &&
1497 QLIST_NEXT(source, next_parent) == NULL)
1498 {
1499 /* Assert that we are back in sync once all active write
1500 * operations are settled.
1501 * Note that we can only assert this if the mirror node
1502 * is the source node's only parent. */
1503 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1504 }
1505 }
1506 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1507 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1508 qemu_co_queue_restart_all(&op->waiting_requests);
1509 g_free(op);
1510}
1511
b9b10c35
KW
1512static int coroutine_fn GRAPH_RDLOCK
1513bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1514 QEMUIOVector *qiov, BdrvRequestFlags flags)
4ef85a9c
KW
1515{
1516 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1517}
1518
7b32ad22
FE
1519static bool should_copy_to_target(MirrorBDSOpaque *s)
1520{
1521 return s->job && s->job->ret >= 0 &&
1522 !job_is_cancelled(&s->job->common.job) &&
2d400d15 1523 qatomic_read(&s->job->copy_mode) == MIRROR_COPY_MODE_WRITE_BLOCKING;
7b32ad22
FE
1524}
1525
9a5a1c62
EGE
1526static int coroutine_fn GRAPH_RDLOCK
1527bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
7b32ad22
FE
1528 bool copy_to_target, uint64_t offset, uint64_t bytes,
1529 QEMUIOVector *qiov, int flags)
d06107ad
HR
1530{
1531 MirrorOp *op = NULL;
1532 MirrorBDSOpaque *s = bs->opaque;
1533 int ret = 0;
d06107ad
HR
1534
1535 if (copy_to_target) {
1536 op = active_write_prepare(s->job, offset, bytes);
1537 }
1538
1539 switch (method) {
1540 case MIRROR_METHOD_COPY:
1541 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1542 break;
1543
1544 case MIRROR_METHOD_ZERO:
1545 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1546 break;
1547
1548 case MIRROR_METHOD_DISCARD:
0b9fd3f4 1549 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
d06107ad
HR
1550 break;
1551
1552 default:
1553 abort();
1554 }
1555
058cfca5 1556 if (!copy_to_target && s->job && s->job->dirty_bitmap) {
76cb2f24 1557 qatomic_set(&s->job->actively_synced, false);
058cfca5
FE
1558 bdrv_set_dirty_bitmap(s->job->dirty_bitmap, offset, bytes);
1559 }
1560
d06107ad
HR
1561 if (ret < 0) {
1562 goto out;
1563 }
1564
1565 if (copy_to_target) {
1566 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1567 }
1568
1569out:
1570 if (copy_to_target) {
1571 active_write_settle(op);
1572 }
1573 return ret;
1574}
1575
b9b10c35
KW
1576static int coroutine_fn GRAPH_RDLOCK
1577bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1578 QEMUIOVector *qiov, BdrvRequestFlags flags)
4ef85a9c 1579{
d06107ad
HR
1580 QEMUIOVector bounce_qiov;
1581 void *bounce_buf;
1582 int ret = 0;
7b32ad22 1583 bool copy_to_target = should_copy_to_target(bs->opaque);
d06107ad
HR
1584
1585 if (copy_to_target) {
1586 /* The guest might concurrently modify the data to write; but
1587 * the data on source and destination must match, so we have
1588 * to use a bounce buffer if we are going to write to the
1589 * target now. */
1590 bounce_buf = qemu_blockalign(bs, bytes);
1591 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1592
1593 qemu_iovec_init(&bounce_qiov, 1);
1594 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1595 qiov = &bounce_qiov;
e8b65355
SH
1596
1597 flags &= ~BDRV_REQ_REGISTERED_BUF;
d06107ad
HR
1598 }
1599
7b32ad22
FE
1600 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, copy_to_target,
1601 offset, bytes, qiov, flags);
d06107ad
HR
1602
1603 if (copy_to_target) {
1604 qemu_iovec_destroy(&bounce_qiov);
1605 qemu_vfree(bounce_buf);
1606 }
1607
1608 return ret;
4ef85a9c
KW
1609}
1610
88095349 1611static int coroutine_fn GRAPH_RDLOCK bdrv_mirror_top_flush(BlockDriverState *bs)
4ef85a9c 1612{
ce960aa9
VSO
1613 if (bs->backing == NULL) {
1614 /* we can be here after failed bdrv_append in mirror_start_job */
1615 return 0;
1616 }
4ef85a9c
KW
1617 return bdrv_co_flush(bs->backing->bs);
1618}
1619
abaf8b75
KW
1620static int coroutine_fn GRAPH_RDLOCK
1621bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1622 int64_t bytes, BdrvRequestFlags flags)
4ef85a9c 1623{
7b32ad22
FE
1624 bool copy_to_target = should_copy_to_target(bs->opaque);
1625 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, copy_to_target,
1626 offset, bytes, NULL, flags);
4ef85a9c
KW
1627}
1628
9a5a1c62
EGE
1629static int coroutine_fn GRAPH_RDLOCK
1630bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
4ef85a9c 1631{
7b32ad22
FE
1632 bool copy_to_target = should_copy_to_target(bs->opaque);
1633 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, copy_to_target,
1634 offset, bytes, NULL, 0);
4ef85a9c
KW
1635}
1636
998b3a1e 1637static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
fd4a6493 1638{
18775ff3
VSO
1639 if (bs->backing == NULL) {
1640 /* we can be here after failed bdrv_attach_child in
1641 * bdrv_set_backing_hd */
1642 return;
1643 }
fd4a6493
KW
1644 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1645 bs->backing->bs->filename);
1646}
1647
4ef85a9c 1648static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
bf8e925e 1649 BdrvChildRole role,
e0995dc3 1650 BlockReopenQueue *reopen_queue,
4ef85a9c
KW
1651 uint64_t perm, uint64_t shared,
1652 uint64_t *nperm, uint64_t *nshared)
1653{
f94dc3b4
HR
1654 MirrorBDSOpaque *s = bs->opaque;
1655
1656 if (s->stop) {
1657 /*
1658 * If the job is to be stopped, we do not need to forward
1659 * anything to the real image.
1660 */
1661 *nperm = 0;
1662 *nshared = BLK_PERM_ALL;
1663 return;
1664 }
1665
53431b90
HR
1666 bdrv_default_perms(bs, c, role, reopen_queue,
1667 perm, shared, nperm, nshared);
4ef85a9c 1668
53431b90
HR
1669 if (s->is_commit) {
1670 /*
1671 * For commit jobs, we cannot take CONSISTENT_READ, because
1672 * that permission is unshared for everything above the base
1673 * node (except for filters on the base node).
1674 * We also have to force-share the WRITE permission, or
1675 * otherwise we would block ourselves at the base node (if
1676 * writes are blocked for a node, they are also blocked for
1677 * its backing file).
1678 * (We could also share RESIZE, because it may be needed for
1679 * the target if its size is less than the top node's; but
1680 * bdrv_default_perms_for_cow() automatically shares RESIZE
1681 * for backing nodes if WRITE is shared, so there is no need
1682 * to do it here.)
1683 */
1684 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1685 *nshared |= BLK_PERM_WRITE;
1686 }
4ef85a9c
KW
1687}
1688
1689/* Dummy node that provides consistent read to its users without requiring it
1690 * from its backing file and that allows writes on the backing file chain. */
1691static BlockDriver bdrv_mirror_top = {
1692 .format_name = "mirror_top",
1693 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1694 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1695 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1696 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1697 .bdrv_co_flush = bdrv_mirror_top_flush,
fd4a6493 1698 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
4ef85a9c 1699 .bdrv_child_perm = bdrv_mirror_top_child_perm,
6540fd15
HR
1700
1701 .is_filter = true,
046fd84f 1702 .filtered_child_is_backing = true,
4ef85a9c
KW
1703};
1704
cc19f177
VSO
1705static BlockJob *mirror_start_job(
1706 const char *job_id, BlockDriverState *bs,
47970dfb
JS
1707 int creation_flags, BlockDriverState *target,
1708 const char *replaces, int64_t speed,
1709 uint32_t granularity, int64_t buf_size,
274fccee 1710 BlockMirrorBackingMode backing_mode,
cdf3bc93 1711 bool zero_target,
09158f00
BC
1712 BlockdevOnError on_source_error,
1713 BlockdevOnError on_target_error,
0fc9f8ea 1714 bool unmap,
097310b5 1715 BlockCompletionFunc *cb,
51ccfa2d 1716 void *opaque,
09158f00 1717 const BlockJobDriver *driver,
b49f7ead 1718 bool is_none_mode, BlockDriverState *base,
51ccfa2d 1719 bool auto_complete, const char *filter_node_name,
481debaa 1720 bool is_mirror, MirrorCopyMode copy_mode,
51ccfa2d 1721 Error **errp)
893f7eba
PB
1722{
1723 MirrorBlockJob *s;
429076e8 1724 MirrorBDSOpaque *bs_opaque;
4ef85a9c 1725 BlockDriverState *mirror_top_bs;
4ef85a9c 1726 bool target_is_backing;
3f072a7f 1727 uint64_t target_perms, target_shared_perms;
d7086422 1728 int ret;
893f7eba 1729
3804e3cf
KW
1730 GLOBAL_STATE_CODE();
1731
eee13dfe 1732 if (granularity == 0) {
341ebc2f 1733 granularity = bdrv_get_default_bitmap_granularity(target);
eee13dfe
PB
1734 }
1735
31826642 1736 assert(is_power_of_2(granularity));
eee13dfe 1737
48ac0a4d
WC
1738 if (buf_size < 0) {
1739 error_setg(errp, "Invalid parameter 'buf-size'");
cc19f177 1740 return NULL;
48ac0a4d
WC
1741 }
1742
1743 if (buf_size == 0) {
1744 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1745 }
5bc361b8 1746
3f072a7f 1747 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
86fae10c 1748 error_setg(errp, "Can't mirror node into itself");
cc19f177 1749 return NULL;
86fae10c
KW
1750 }
1751
53431b90
HR
1752 target_is_backing = bdrv_chain_contains(bs, target);
1753
4ef85a9c
KW
1754 /* In the case of active commit, add dummy driver to provide consistent
1755 * reads on the top, while disabling it in the intermediate nodes, and make
1756 * the backing chain writable. */
6cdbceb1
KW
1757 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1758 BDRV_O_RDWR, errp);
4ef85a9c 1759 if (mirror_top_bs == NULL) {
cc19f177 1760 return NULL;
4ef85a9c 1761 }
d3c8c674
KW
1762 if (!filter_node_name) {
1763 mirror_top_bs->implicit = true;
1764 }
e5182c1c
HR
1765
1766 /* So that we can always drop this node */
1767 mirror_top_bs->never_freeze = true;
1768
4ef85a9c 1769 mirror_top_bs->total_sectors = bs->total_sectors;
228345bf 1770 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
80f5c33f
KW
1771 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1772 BDRV_REQ_NO_FALLBACK;
429076e8
HR
1773 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1774 mirror_top_bs->opaque = bs_opaque;
4ef85a9c 1775
53431b90
HR
1776 bs_opaque->is_commit = target_is_backing;
1777
4ef85a9c 1778 bdrv_drained_begin(bs);
934aee14 1779 ret = bdrv_append(mirror_top_bs, bs, errp);
4ef85a9c
KW
1780 bdrv_drained_end(bs);
1781
934aee14 1782 if (ret < 0) {
b2c2832c 1783 bdrv_unref(mirror_top_bs);
cc19f177 1784 return NULL;
b2c2832c
KW
1785 }
1786
4ef85a9c 1787 /* Make sure that the source is not resized while the job is running */
75859b94 1788 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
4ef85a9c
KW
1789 BLK_PERM_CONSISTENT_READ,
1790 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
64631f36 1791 BLK_PERM_WRITE, speed,
c6cc12bf 1792 creation_flags, cb, opaque, errp);
893f7eba 1793 if (!s) {
4ef85a9c 1794 goto fail;
893f7eba 1795 }
429076e8 1796
7a25fcd0
HR
1797 /* The block job now has a reference to this node */
1798 bdrv_unref(mirror_top_bs);
1799
4ef85a9c
KW
1800 s->mirror_top_bs = mirror_top_bs;
1801
1802 /* No resize for the target either; while the mirror is still running, a
1803 * consistent read isn't necessarily possible. We could possibly allow
1804 * writes and graph modifications, though it would likely defeat the
1805 * purpose of a mirror, so leave them blocked for now.
1806 *
1807 * In the case of active commit, things look a bit different, though,
1808 * because the target is an already populated backing file in active use.
1809 * We can allow anything except resize there.*/
3f072a7f
HR
1810
1811 target_perms = BLK_PERM_WRITE;
1812 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1813
3f072a7f
HR
1814 if (target_is_backing) {
1815 int64_t bs_size, target_size;
1816 bs_size = bdrv_getlength(bs);
1817 if (bs_size < 0) {
1818 error_setg_errno(errp, -bs_size,
1819 "Could not inquire top image size");
1820 goto fail;
1821 }
1822
1823 target_size = bdrv_getlength(target);
1824 if (target_size < 0) {
1825 error_setg_errno(errp, -target_size,
1826 "Could not inquire base image size");
1827 goto fail;
1828 }
1829
1830 if (target_size < bs_size) {
1831 target_perms |= BLK_PERM_RESIZE;
1832 }
1833
64631f36 1834 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
3f072a7f
HR
1835 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1836 /*
1837 * We may want to allow this in the future, but it would
1838 * require taking some extra care.
1839 */
1840 error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1841 "source's backing chain");
1842 goto fail;
1843 }
1844
d861ab3a 1845 s->target = blk_new(s->common.job.aio_context,
3f072a7f 1846 target_perms, target_shared_perms);
d7086422
KW
1847 ret = blk_insert_bs(s->target, target, errp);
1848 if (ret < 0) {
4ef85a9c 1849 goto fail;
d7086422 1850 }
045a2f82
FZ
1851 if (is_mirror) {
1852 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1853 * of non-shared block migration. To allow migration completion, we
1854 * have to allow "inactivate" of the target BB. When that happens, we
1855 * know the job is drained, and the vcpus are stopped, so no write
1856 * operation will be performed. Block layer already has assertions to
1857 * ensure that. */
1858 blk_set_force_allow_inactivate(s->target);
1859 }
9ff7f0df 1860 blk_set_allow_aio_context_change(s->target, true);
cf312932 1861 blk_set_disable_request_queuing(s->target, true);
e253f4b8 1862
09158f00 1863 s->replaces = g_strdup(replaces);
b952b558
PB
1864 s->on_source_error = on_source_error;
1865 s->on_target_error = on_target_error;
03544a6e 1866 s->is_none_mode = is_none_mode;
274fccee 1867 s->backing_mode = backing_mode;
cdf3bc93 1868 s->zero_target = zero_target;
2d400d15 1869 qatomic_set(&s->copy_mode, copy_mode);
5bc361b8 1870 s->base = base;
3f072a7f 1871 s->base_overlay = bdrv_find_overlay(bs, base);
eee13dfe 1872 s->granularity = granularity;
48ac0a4d 1873 s->buf_size = ROUND_UP(buf_size, granularity);
0fc9f8ea 1874 s->unmap = unmap;
b49f7ead
WC
1875 if (auto_complete) {
1876 s->should_complete = true;
1877 }
b812f671 1878
058cfca5
FE
1879 s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity,
1880 NULL, errp);
b8afb520 1881 if (!s->dirty_bitmap) {
88f9d1b3 1882 goto fail;
b8afb520 1883 }
058cfca5
FE
1884
1885 /*
1886 * The dirty bitmap is set by bdrv_mirror_top_do_write() when not in active
1887 * mode.
1888 */
1889 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
10f3cd15 1890
f3bbc53d 1891 bdrv_graph_wrlock(bs);
67b24427
AG
1892 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1893 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1894 BLK_PERM_CONSISTENT_READ,
1895 errp);
1896 if (ret < 0) {
f3bbc53d 1897 bdrv_graph_wrunlock();
67b24427
AG
1898 goto fail;
1899 }
1900
4ef85a9c 1901 /* Required permissions are already taken with blk_new() */
76d554e2
KW
1902 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1903 &error_abort);
1904
f3ede4b0
AG
1905 /* In commit_active_start() all intermediate nodes disappear, so
1906 * any jobs in them must be blocked */
4ef85a9c 1907 if (target_is_backing) {
3f072a7f
HR
1908 BlockDriverState *iter, *filtered_target;
1909 uint64_t iter_shared_perms;
1910
1911 /*
1912 * The topmost node with
1913 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1914 */
1915 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1916
1917 assert(bdrv_skip_filters(filtered_target) ==
1918 bdrv_skip_filters(target));
1919
1920 /*
1921 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1922 * ourselves at s->base (if writes are blocked for a node, they are
1923 * also blocked for its backing file). The other options would be a
1924 * second filter driver above s->base (== target).
1925 */
1926 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1927
1928 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1929 iter = bdrv_filter_or_cow_bs(iter))
1930 {
1931 if (iter == filtered_target) {
1932 /*
1933 * From here on, all nodes are filters on the base.
1934 * This allows us to share BLK_PERM_CONSISTENT_READ.
1935 */
1936 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1937 }
1938
4ef85a9c 1939 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
3f072a7f 1940 iter_shared_perms, errp);
4ef85a9c 1941 if (ret < 0) {
f3bbc53d 1942 bdrv_graph_wrunlock();
4ef85a9c
KW
1943 goto fail;
1944 }
f3ede4b0 1945 }
ef53dc09
AG
1946
1947 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
f3bbc53d 1948 bdrv_graph_wrunlock();
ef53dc09
AG
1949 goto fail;
1950 }
f3ede4b0 1951 }
f3bbc53d 1952 bdrv_graph_wrunlock();
10f3cd15 1953
12aa4082
HR
1954 QTAILQ_INIT(&s->ops_in_flight);
1955
5ccac6f1 1956 trace_mirror_start(bs, s, opaque);
da01ff7f 1957 job_start(&s->common.job);
cc19f177
VSO
1958
1959 return &s->common;
4ef85a9c
KW
1960
1961fail:
1962 if (s) {
7a25fcd0
HR
1963 /* Make sure this BDS does not go away until we have completed the graph
1964 * changes below */
1965 bdrv_ref(mirror_top_bs);
1966
4ef85a9c
KW
1967 g_free(s->replaces);
1968 blk_unref(s->target);
429076e8 1969 bs_opaque->job = NULL;
e917e2cb 1970 if (s->dirty_bitmap) {
5deb6cbd 1971 bdrv_release_dirty_bitmap(s->dirty_bitmap);
e917e2cb 1972 }
4ad35181 1973 job_early_fail(&s->common.job);
4ef85a9c
KW
1974 }
1975
f94dc3b4 1976 bs_opaque->stop = true;
3804e3cf 1977 bdrv_graph_rdlock_main_loop();
f94dc3b4
HR
1978 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1979 &error_abort);
3804e3cf 1980 bdrv_graph_rdunlock_main_loop();
3f072a7f 1981 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
7a25fcd0
HR
1982
1983 bdrv_unref(mirror_top_bs);
cc19f177
VSO
1984
1985 return NULL;
893f7eba 1986}
03544a6e 1987
71aa9867
AG
1988void mirror_start(const char *job_id, BlockDriverState *bs,
1989 BlockDriverState *target, const char *replaces,
a1999b33
JS
1990 int creation_flags, int64_t speed,
1991 uint32_t granularity, int64_t buf_size,
274fccee 1992 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
cdf3bc93 1993 bool zero_target,
274fccee 1994 BlockdevOnError on_source_error,
03544a6e 1995 BlockdevOnError on_target_error,
481debaa
HR
1996 bool unmap, const char *filter_node_name,
1997 MirrorCopyMode copy_mode, Error **errp)
03544a6e
FZ
1998{
1999 bool is_none_mode;
2000 BlockDriverState *base;
2001
b4ad82aa
EGE
2002 GLOBAL_STATE_CODE();
2003
c8b56501
JS
2004 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
2005 (mode == MIRROR_SYNC_MODE_BITMAP)) {
2006 error_setg(errp, "Sync mode '%s' not supported",
2007 MirrorSyncMode_str(mode));
d58d8453
JS
2008 return;
2009 }
03544a6e 2010 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
3f072a7f 2011 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
a1999b33 2012 mirror_start_job(job_id, bs, creation_flags, target, replaces,
cdf3bc93 2013 speed, granularity, buf_size, backing_mode, zero_target,
51ccfa2d 2014 on_source_error, on_target_error, unmap, NULL, NULL,
6cdbceb1 2015 &mirror_job_driver, is_none_mode, base, false,
481debaa 2016 filter_node_name, true, copy_mode, errp);
03544a6e
FZ
2017}
2018
cc19f177
VSO
2019BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
2020 BlockDriverState *base, int creation_flags,
2021 int64_t speed, BlockdevOnError on_error,
2022 const char *filter_node_name,
2023 BlockCompletionFunc *cb, void *opaque,
2024 bool auto_complete, Error **errp)
03544a6e 2025{
1ba79388 2026 bool base_read_only;
eb5becc1 2027 BlockJob *job;
4da83585 2028
b4ad82aa
EGE
2029 GLOBAL_STATE_CODE();
2030
1ba79388 2031 base_read_only = bdrv_is_read_only(base);
4da83585 2032
1ba79388
AG
2033 if (base_read_only) {
2034 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
cc19f177 2035 return NULL;
1ba79388 2036 }
20a63d2c 2037 }
4da83585 2038
eb5becc1 2039 job = mirror_start_job(
cc19f177 2040 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
cdf3bc93 2041 MIRROR_LEAVE_BACKING_CHAIN, false,
51ccfa2d 2042 on_error, on_error, true, cb, opaque,
6cdbceb1 2043 &commit_active_job_driver, false, base, auto_complete,
481debaa 2044 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
eb5becc1
VSO
2045 errp);
2046 if (!job) {
4da83585
JC
2047 goto error_restore_flags;
2048 }
2049
eb5becc1 2050 return job;
4da83585
JC
2051
2052error_restore_flags:
2053 /* ignore error and errp for bdrv_reopen, because we want to propagate
2054 * the original error */
1ba79388
AG
2055 if (base_read_only) {
2056 bdrv_reopen_set_read_only(base, true, NULL);
2057 }
cc19f177 2058 return NULL;
03544a6e 2059}