]> git.proxmox.com Git - mirror_qemu.git/blame - block/mirror.c
block/mirror: move dirty bitmap to filter
[mirror_qemu.git] / block / mirror.c
CommitLineData
893f7eba
PB
1/*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
80c71a24 14#include "qemu/osdep.h"
fd4a6493 15#include "qemu/cutils.h"
12aa4082 16#include "qemu/coroutine.h"
1181e19a 17#include "qemu/range.h"
893f7eba 18#include "trace.h"
c87621ea 19#include "block/blockjob_int.h"
737e150e 20#include "block/block_int.h"
e2c1c34f 21#include "block/dirty-bitmap.h"
373340b2 22#include "sysemu/block-backend.h"
da34e65c 23#include "qapi/error.h"
893f7eba 24#include "qemu/ratelimit.h"
b812f671 25#include "qemu/bitmap.h"
5df022cf 26#include "qemu/memalign.h"
893f7eba 27
402a4741 28#define MAX_IN_FLIGHT 16
b436982f
EB
29#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
402a4741
PB
31
32/* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
34 */
35typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37} MirrorBuffer;
893f7eba 38
12aa4082
HR
39typedef struct MirrorOp MirrorOp;
40
893f7eba
PB
41typedef struct MirrorBlockJob {
42 BlockJob common;
e253f4b8 43 BlockBackend *target;
4ef85a9c 44 BlockDriverState *mirror_top_bs;
5bc361b8 45 BlockDriverState *base;
3f072a7f 46 BlockDriverState *base_overlay;
4ef85a9c 47
09158f00
BC
48 /* The name of the graph node to replace */
49 char *replaces;
50 /* The BDS to replace */
51 BlockDriverState *to_replace;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error *replace_blocker;
03544a6e 54 bool is_none_mode;
274fccee 55 BlockMirrorBackingMode backing_mode;
cdf3bc93
HR
56 /* Whether the target image requires explicit zero-initialization */
57 bool zero_target;
d06107ad 58 MirrorCopyMode copy_mode;
b952b558 59 BlockdevOnError on_source_error, on_target_error;
d06107ad
HR
60 /* Set when the target is synced (dirty bitmap is clean, nothing
61 * in flight) and the job is running in active mode */
62 bool actively_synced;
d63ffd87 63 bool should_complete;
eee13dfe 64 int64_t granularity;
b812f671 65 size_t buf_size;
b21c7652 66 int64_t bdev_length;
b812f671 67 unsigned long *cow_bitmap;
e4654d2d 68 BdrvDirtyBitmap *dirty_bitmap;
dc162c8e 69 BdrvDirtyBitmapIter *dbi;
893f7eba 70 uint8_t *buf;
402a4741
PB
71 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
72 int buf_free_count;
bd48bde8 73
49efb1f5 74 uint64_t last_pause_ns;
402a4741 75 unsigned long *in_flight_bitmap;
1b8f7776 76 unsigned in_flight;
b436982f 77 int64_t bytes_in_flight;
b58deb34 78 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
bd48bde8 79 int ret;
0fc9f8ea 80 bool unmap;
b436982f 81 int target_cluster_size;
e5b43573 82 int max_iov;
90ab48eb 83 bool initial_zeroing_ongoing;
d06107ad 84 int in_active_write_counter;
d69a879b 85 int64_t active_write_bytes_in_flight;
737efc1e 86 bool prepared;
5e771752 87 bool in_drain;
893f7eba
PB
88} MirrorBlockJob;
89
429076e8
HR
90typedef struct MirrorBDSOpaque {
91 MirrorBlockJob *job;
f94dc3b4 92 bool stop;
53431b90 93 bool is_commit;
429076e8
HR
94} MirrorBDSOpaque;
95
12aa4082 96struct MirrorOp {
bd48bde8
PB
97 MirrorBlockJob *s;
98 QEMUIOVector qiov;
b436982f
EB
99 int64_t offset;
100 uint64_t bytes;
2e1990b2
HR
101
102 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
103 * mirror_co_discard() before yielding for the first time */
104 int64_t *bytes_handled;
12aa4082 105
1181e19a 106 bool is_pseudo_op;
d06107ad 107 bool is_active_write;
ce8cabbd 108 bool is_in_flight;
12aa4082 109 CoQueue waiting_requests;
eed325b9 110 Coroutine *co;
d44dae1a 111 MirrorOp *waiting_for_op;
12aa4082
HR
112
113 QTAILQ_ENTRY(MirrorOp) next;
114};
bd48bde8 115
4295c5fc
HR
116typedef enum MirrorMethod {
117 MIRROR_METHOD_COPY,
118 MIRROR_METHOD_ZERO,
119 MIRROR_METHOD_DISCARD,
120} MirrorMethod;
121
b952b558
PB
122static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
123 int error)
124{
d06107ad 125 s->actively_synced = false;
b952b558 126 if (read) {
81e254dc
KW
127 return block_job_error_action(&s->common, s->on_source_error,
128 true, error);
b952b558 129 } else {
81e254dc
KW
130 return block_job_error_action(&s->common, s->on_target_error,
131 false, error);
b952b558
PB
132 }
133}
134
1181e19a
HR
135static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
136 MirrorBlockJob *s,
137 uint64_t offset,
138 uint64_t bytes)
139{
140 uint64_t self_start_chunk = offset / s->granularity;
141 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
142 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
143
144 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
145 self_start_chunk) < self_end_chunk &&
146 s->ret >= 0)
147 {
148 MirrorOp *op;
149
150 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
151 uint64_t op_start_chunk = op->offset / s->granularity;
152 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
153 s->granularity) -
154 op_start_chunk;
155
156 if (op == self) {
157 continue;
158 }
159
160 if (ranges_overlap(self_start_chunk, self_nb_chunks,
161 op_start_chunk, op_nb_chunks))
162 {
66fed30c
SG
163 if (self) {
164 /*
165 * If the operation is already (indirectly) waiting for us,
166 * or will wait for us as soon as it wakes up, then just go
167 * on (instead of producing a deadlock in the former case).
168 */
169 if (op->waiting_for_op) {
170 continue;
171 }
172
173 self->waiting_for_op = op;
d44dae1a
VSO
174 }
175
1181e19a 176 qemu_co_queue_wait(&op->waiting_requests, NULL);
66fed30c
SG
177
178 if (self) {
179 self->waiting_for_op = NULL;
180 }
181
1181e19a
HR
182 break;
183 }
184 }
185 }
186}
187
2e1990b2 188static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
bd48bde8
PB
189{
190 MirrorBlockJob *s = op->s;
402a4741 191 struct iovec *iov;
bd48bde8 192 int64_t chunk_num;
b436982f 193 int i, nb_chunks;
bd48bde8 194
b436982f 195 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
bd48bde8
PB
196
197 s->in_flight--;
b436982f 198 s->bytes_in_flight -= op->bytes;
402a4741
PB
199 iov = op->qiov.iov;
200 for (i = 0; i < op->qiov.niov; i++) {
201 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
202 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
203 s->buf_free_count++;
204 }
205
b436982f
EB
206 chunk_num = op->offset / s->granularity;
207 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
12aa4082 208
402a4741 209 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
12aa4082 210 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
b21c7652
HR
211 if (ret >= 0) {
212 if (s->cow_bitmap) {
213 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
214 }
90ab48eb 215 if (!s->initial_zeroing_ongoing) {
30a5c887 216 job_progress_update(&s->common.job, op->bytes);
90ab48eb 217 }
bd48bde8 218 }
6df3bf8e 219 qemu_iovec_destroy(&op->qiov);
7b770c72 220
12aa4082
HR
221 qemu_co_queue_restart_all(&op->waiting_requests);
222 g_free(op);
bd48bde8
PB
223}
224
2e1990b2 225static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
bd48bde8 226{
bd48bde8 227 MirrorBlockJob *s = op->s;
b9e413dd 228
bd48bde8 229 if (ret < 0) {
bd48bde8
PB
230 BlockErrorAction action;
231
e0d7f73e 232 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
bd48bde8 233 action = mirror_error_action(s, false, -ret);
a589569f 234 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
235 s->ret = ret;
236 }
237 }
d12ade57 238
bd48bde8
PB
239 mirror_iteration_done(op, ret);
240}
241
2e1990b2 242static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
bd48bde8 243{
bd48bde8 244 MirrorBlockJob *s = op->s;
b9e413dd 245
bd48bde8 246 if (ret < 0) {
bd48bde8
PB
247 BlockErrorAction action;
248
e0d7f73e 249 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
bd48bde8 250 action = mirror_error_action(s, true, -ret);
a589569f 251 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
252 s->ret = ret;
253 }
254
255 mirror_iteration_done(op, ret);
d12ade57 256 return;
bd48bde8 257 }
d12ade57
VSO
258
259 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
260 mirror_write_complete(op, ret);
bd48bde8
PB
261}
262
782d97ef
EB
263/* Clip bytes relative to offset to not exceed end-of-file */
264static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
265 int64_t offset,
266 int64_t bytes)
267{
268 return MIN(bytes, s->bdev_length - offset);
269}
270
782d97ef
EB
271/* Round offset and/or bytes to target cluster if COW is needed, and
272 * return the offset of the adjusted tail against original. */
17ac39c3
PB
273static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
274 uint64_t *bytes)
893f7eba 275{
e5b43573
FZ
276 bool need_cow;
277 int ret = 0;
782d97ef 278 int64_t align_offset = *offset;
7cfd5275 279 int64_t align_bytes = *bytes;
782d97ef 280 int max_bytes = s->granularity * s->max_iov;
e5b43573 281
782d97ef
EB
282 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
283 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
e5b43573
FZ
284 s->cow_bitmap);
285 if (need_cow) {
fc6b211f
AD
286 bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes,
287 &align_offset, &align_bytes);
e5b43573 288 }
3515727f 289
782d97ef
EB
290 if (align_bytes > max_bytes) {
291 align_bytes = max_bytes;
e5b43573 292 if (need_cow) {
782d97ef 293 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
e5b43573 294 }
8f0720ec 295 }
782d97ef 296 /* Clipping may result in align_bytes unaligned to chunk boundary, but
4150ae60 297 * that doesn't matter because it's already the end of source image. */
782d97ef 298 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
8f0720ec 299
782d97ef
EB
300 ret = align_offset + align_bytes - (*offset + *bytes);
301 *offset = align_offset;
302 *bytes = align_bytes;
e5b43573
FZ
303 assert(ret >= 0);
304 return ret;
305}
306
537c3d4f 307static inline void coroutine_fn
eb994912 308mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
21cd917f 309{
12aa4082
HR
310 MirrorOp *op;
311
1181e19a 312 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
eb994912
HR
313 /*
314 * Do not wait on pseudo ops, because it may in turn wait on
1181e19a
HR
315 * some other operation to start, which may in fact be the
316 * caller of this function. Since there is only one pseudo op
317 * at any given time, we will always find some real operation
eb994912
HR
318 * to wait on.
319 * Also, do not wait on active operations, because they do not
320 * use up in-flight slots.
321 */
322 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) {
1181e19a
HR
323 qemu_co_queue_wait(&op->waiting_requests, NULL);
324 return;
325 }
326 }
327 abort();
21cd917f
FZ
328}
329
2e1990b2
HR
330/* Perform a mirror copy operation.
331 *
332 * *op->bytes_handled is set to the number of bytes copied after and
333 * including offset, excluding any bytes copied prior to offset due
334 * to alignment. This will be op->bytes if no alignment is necessary,
335 * or (new_end - op->offset) if the tail is rounded up or down due to
336 * alignment or buffer limit.
e5b43573 337 */
2e1990b2 338static void coroutine_fn mirror_co_read(void *opaque)
e5b43573 339{
2e1990b2
HR
340 MirrorOp *op = opaque;
341 MirrorBlockJob *s = op->s;
ae4cc877
EB
342 int nb_chunks;
343 uint64_t ret;
ae4cc877 344 uint64_t max_bytes;
e5b43573 345
ae4cc877 346 max_bytes = s->granularity * s->max_iov;
402a4741 347
e5b43573 348 /* We can only handle as much as buf_size at a time. */
2e1990b2
HR
349 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
350 assert(op->bytes);
351 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
352 *op->bytes_handled = op->bytes;
402a4741 353
e5b43573 354 if (s->cow_bitmap) {
2e1990b2 355 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
e5b43573 356 }
2e1990b2
HR
357 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
358 assert(*op->bytes_handled <= UINT_MAX);
359 assert(op->bytes <= s->buf_size);
ae4cc877 360 /* The offset is granularity-aligned because:
e5b43573
FZ
361 * 1) Caller passes in aligned values;
362 * 2) mirror_cow_align is used only when target cluster is larger. */
2e1990b2 363 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
ae4cc877 364 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
2e1990b2
HR
365 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
366 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
e5b43573
FZ
367
368 while (s->buf_free_count < nb_chunks) {
2e1990b2 369 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
9178f4fe 370 mirror_wait_for_free_in_flight_slot(s);
b812f671
PB
371 }
372
402a4741
PB
373 /* Now make a QEMUIOVector taking enough granularity-sized chunks
374 * from s->buf_free.
375 */
376 qemu_iovec_init(&op->qiov, nb_chunks);
402a4741
PB
377 while (nb_chunks-- > 0) {
378 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
2e1990b2 379 size_t remaining = op->bytes - op->qiov.size;
5a0f6fd5 380
402a4741
PB
381 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
382 s->buf_free_count--;
5a0f6fd5 383 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
402a4741 384 }
bd48bde8 385
893f7eba 386 /* Copy the dirty cluster. */
bd48bde8 387 s->in_flight++;
2e1990b2 388 s->bytes_in_flight += op->bytes;
ce8cabbd 389 op->is_in_flight = true;
2e1990b2 390 trace_mirror_one_iteration(s, op->offset, op->bytes);
dcfb3beb 391
b9b10c35
KW
392 WITH_GRAPH_RDLOCK_GUARD() {
393 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
394 &op->qiov, 0);
395 }
2e1990b2 396 mirror_read_complete(op, ret);
e5b43573
FZ
397}
398
2e1990b2 399static void coroutine_fn mirror_co_zero(void *opaque)
e5b43573 400{
2e1990b2
HR
401 MirrorOp *op = opaque;
402 int ret;
e5b43573 403
2e1990b2
HR
404 op->s->in_flight++;
405 op->s->bytes_in_flight += op->bytes;
406 *op->bytes_handled = op->bytes;
ce8cabbd 407 op->is_in_flight = true;
e5b43573 408
2e1990b2
HR
409 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
410 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
411 mirror_write_complete(op, ret);
412}
413
414static void coroutine_fn mirror_co_discard(void *opaque)
415{
416 MirrorOp *op = opaque;
417 int ret;
418
419 op->s->in_flight++;
420 op->s->bytes_in_flight += op->bytes;
421 *op->bytes_handled = op->bytes;
ce8cabbd 422 op->is_in_flight = true;
2e1990b2
HR
423
424 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
425 mirror_write_complete(op, ret);
e5b43573
FZ
426}
427
4295c5fc
HR
428static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
429 unsigned bytes, MirrorMethod mirror_method)
430{
2e1990b2
HR
431 MirrorOp *op;
432 Coroutine *co;
433 int64_t bytes_handled = -1;
434
435 op = g_new(MirrorOp, 1);
436 *op = (MirrorOp){
437 .s = s,
438 .offset = offset,
439 .bytes = bytes,
440 .bytes_handled = &bytes_handled,
441 };
12aa4082 442 qemu_co_queue_init(&op->waiting_requests);
2e1990b2 443
4295c5fc
HR
444 switch (mirror_method) {
445 case MIRROR_METHOD_COPY:
2e1990b2
HR
446 co = qemu_coroutine_create(mirror_co_read, op);
447 break;
4295c5fc 448 case MIRROR_METHOD_ZERO:
2e1990b2
HR
449 co = qemu_coroutine_create(mirror_co_zero, op);
450 break;
4295c5fc 451 case MIRROR_METHOD_DISCARD:
2e1990b2
HR
452 co = qemu_coroutine_create(mirror_co_discard, op);
453 break;
4295c5fc
HR
454 default:
455 abort();
456 }
eed325b9 457 op->co = co;
2e1990b2 458
12aa4082 459 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
2e1990b2
HR
460 qemu_coroutine_enter(co);
461 /* At this point, ownership of op has been moved to the coroutine
462 * and the object may already be freed */
463
464 /* Assert that this value has been set */
465 assert(bytes_handled >= 0);
466
467 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
468 * and mirror_co_discard(), bytes_handled == op->bytes, which
469 * is the @bytes parameter given to this function) */
470 assert(bytes_handled <= UINT_MAX);
471 return bytes_handled;
4295c5fc
HR
472}
473
018e5987 474static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
e5b43573 475{
138f9fff 476 BlockDriverState *source = s->mirror_top_bs->backing->bs;
1181e19a
HR
477 MirrorOp *pseudo_op;
478 int64_t offset;
e5b43573
FZ
479 /* At least the first dirty chunk is mirrored in one iteration. */
480 int nb_chunks = 1;
4b5004d9 481 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
b436982f 482 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
e5b43573 483
b64bd51e 484 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
f798184c 485 offset = bdrv_dirty_iter_next(s->dbi);
fb2ef791 486 if (offset < 0) {
dc162c8e 487 bdrv_set_dirty_iter(s->dbi, 0);
f798184c 488 offset = bdrv_dirty_iter_next(s->dbi);
9a46dba7 489 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
fb2ef791 490 assert(offset >= 0);
e5b43573 491 }
b64bd51e 492 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
e5b43573 493
d69a879b
HR
494 /*
495 * Wait for concurrent requests to @offset. The next loop will limit the
496 * copied area based on in_flight_bitmap so we only copy an area that does
497 * not overlap with concurrent in-flight requests. Still, we would like to
498 * copy something, so wait until there are at least no more requests to the
499 * very beginning of the area.
500 */
1181e19a 501 mirror_wait_on_conflicts(NULL, s, offset, 1);
9c83625b 502
da01ff7f 503 job_pause_point(&s->common.job);
565ac01f 504
3202d8e4 505 /* Find the number of consecutive dirty chunks following the first dirty
e5b43573 506 * one, and wait for in flight requests in them. */
b64bd51e 507 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
fb2ef791 508 while (nb_chunks * s->granularity < s->buf_size) {
dc162c8e 509 int64_t next_dirty;
fb2ef791
EB
510 int64_t next_offset = offset + nb_chunks * s->granularity;
511 int64_t next_chunk = next_offset / s->granularity;
512 if (next_offset >= s->bdev_length ||
28636b82 513 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
e5b43573
FZ
514 break;
515 }
516 if (test_bit(next_chunk, s->in_flight_bitmap)) {
9c83625b 517 break;
e5b43573 518 }
9c83625b 519
f798184c 520 next_dirty = bdrv_dirty_iter_next(s->dbi);
fb2ef791 521 if (next_dirty > next_offset || next_dirty < 0) {
f27a2742 522 /* The bitmap iterator's cache is stale, refresh it */
715a74d8 523 bdrv_set_dirty_iter(s->dbi, next_offset);
f798184c 524 next_dirty = bdrv_dirty_iter_next(s->dbi);
f27a2742 525 }
fb2ef791 526 assert(next_dirty == next_offset);
9c83625b 527 nb_chunks++;
e5b43573
FZ
528 }
529
530 /* Clear dirty bits before querying the block status, because
31826642 531 * calling bdrv_block_status_above could yield - if some blocks are
e5b43573
FZ
532 * marked dirty in this window, we need to know.
533 */
e0d7f73e
EB
534 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
535 nb_chunks * s->granularity);
b64bd51e
PB
536 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
537
1181e19a
HR
538 /* Before claiming an area in the in-flight bitmap, we have to
539 * create a MirrorOp for it so that conflicting requests can wait
540 * for it. mirror_perform() will create the real MirrorOps later,
541 * for now we just create a pseudo operation that will wake up all
542 * conflicting requests once all real operations have been
543 * launched. */
544 pseudo_op = g_new(MirrorOp, 1);
545 *pseudo_op = (MirrorOp){
546 .offset = offset,
547 .bytes = nb_chunks * s->granularity,
548 .is_pseudo_op = true,
549 };
550 qemu_co_queue_init(&pseudo_op->waiting_requests);
551 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
552
fb2ef791
EB
553 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
554 while (nb_chunks > 0 && offset < s->bdev_length) {
31826642 555 int ret;
7cfd5275 556 int64_t io_bytes;
f3e4ce4a 557 int64_t io_bytes_acct;
4295c5fc 558 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
e5b43573 559
fb2ef791 560 assert(!(offset % s->granularity));
7ff9579e 561 WITH_GRAPH_RDLOCK_GUARD() {
cc323997
PB
562 ret = bdrv_co_block_status_above(source, NULL, offset,
563 nb_chunks * s->granularity,
564 &io_bytes, NULL, NULL);
7ff9579e 565 }
e5b43573 566 if (ret < 0) {
fb2ef791 567 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
0965a41e 568 } else if (ret & BDRV_BLOCK_DATA) {
fb2ef791 569 io_bytes = MIN(io_bytes, max_io_bytes);
e5b43573
FZ
570 }
571
fb2ef791
EB
572 io_bytes -= io_bytes % s->granularity;
573 if (io_bytes < s->granularity) {
574 io_bytes = s->granularity;
e5b43573 575 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
fb2ef791 576 int64_t target_offset;
7cfd5275 577 int64_t target_bytes;
a00e70c0 578 WITH_GRAPH_RDLOCK_GUARD() {
fc6b211f
AD
579 bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes,
580 &target_offset, &target_bytes);
a00e70c0 581 }
fb2ef791
EB
582 if (target_offset == offset &&
583 target_bytes == io_bytes) {
e5b43573
FZ
584 mirror_method = ret & BDRV_BLOCK_ZERO ?
585 MIRROR_METHOD_ZERO :
586 MIRROR_METHOD_DISCARD;
587 }
588 }
589
cf56a3c6 590 while (s->in_flight >= MAX_IN_FLIGHT) {
fb2ef791 591 trace_mirror_yield_in_flight(s, offset, s->in_flight);
9178f4fe 592 mirror_wait_for_free_in_flight_slot(s);
cf56a3c6
DL
593 }
594
dbaa7b57 595 if (s->ret < 0) {
1181e19a
HR
596 ret = 0;
597 goto fail;
dbaa7b57
VSO
598 }
599
fb2ef791 600 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
4295c5fc
HR
601 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
602 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
603 io_bytes_acct = 0;
604 } else {
605 io_bytes_acct = io_bytes;
e5b43573 606 }
fb2ef791
EB
607 assert(io_bytes);
608 offset += io_bytes;
609 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
018e5987 610 block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct);
dcfb3beb 611 }
1181e19a 612
1181e19a
HR
613fail:
614 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
615 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
616 g_free(pseudo_op);
bd48bde8 617}
b952b558 618
402a4741
PB
619static void mirror_free_init(MirrorBlockJob *s)
620{
621 int granularity = s->granularity;
622 size_t buf_size = s->buf_size;
623 uint8_t *buf = s->buf;
624
625 assert(s->buf_free_count == 0);
626 QSIMPLEQ_INIT(&s->buf_free);
627 while (buf_size != 0) {
628 MirrorBuffer *cur = (MirrorBuffer *)buf;
629 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
630 s->buf_free_count++;
631 buf_size -= granularity;
632 buf += granularity;
633 }
634}
635
bae8196d
PB
636/* This is also used for the .pause callback. There is no matching
637 * mirror_resume() because mirror_run() will begin iterating again
638 * when the job is resumed.
639 */
537c3d4f 640static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
bd48bde8
PB
641{
642 while (s->in_flight > 0) {
9178f4fe 643 mirror_wait_for_free_in_flight_slot(s);
bd48bde8 644 }
893f7eba
PB
645}
646
737efc1e
JS
647/**
648 * mirror_exit_common: handle both abort() and prepare() cases.
649 * for .prepare, returns 0 on success and -errno on failure.
650 * for .abort cases, denoted by abort = true, MUST return 0.
651 */
652static int mirror_exit_common(Job *job)
5a7e7a0b 653{
1908a559
KW
654 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
655 BlockJob *bjob = &s->common;
f93c3add 656 MirrorBDSOpaque *bs_opaque;
5a7e7a0b 657 AioContext *replace_aio_context = NULL;
f93c3add
HR
658 BlockDriverState *src;
659 BlockDriverState *target_bs;
660 BlockDriverState *mirror_top_bs;
12fa4af6 661 Error *local_err = NULL;
737efc1e
JS
662 bool abort = job->ret < 0;
663 int ret = 0;
664
2626d27f
KW
665 GLOBAL_STATE_CODE();
666
737efc1e
JS
667 if (s->prepared) {
668 return 0;
669 }
670 s->prepared = true;
3f09bfbc 671
2626d27f
KW
672 aio_context_acquire(qemu_get_aio_context());
673
f93c3add
HR
674 mirror_top_bs = s->mirror_top_bs;
675 bs_opaque = mirror_top_bs->opaque;
676 src = mirror_top_bs->backing->bs;
677 target_bs = blk_bs(s->target);
678
ef53dc09
AG
679 if (bdrv_chain_contains(src, target_bs)) {
680 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
681 }
682
5deb6cbd 683 bdrv_release_dirty_bitmap(s->dirty_bitmap);
2119882c 684
7b508f6b
JS
685 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
686 * before we can call bdrv_drained_end */
3f09bfbc 687 bdrv_ref(src);
4ef85a9c 688 bdrv_ref(mirror_top_bs);
7d9fcb39
KW
689 bdrv_ref(target_bs);
690
bb0c9409
VSO
691 /*
692 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
7d9fcb39 693 * inserting target_bs at s->to_replace, where we might not be able to get
63c8ef28 694 * these permissions.
bb0c9409 695 */
7d9fcb39
KW
696 blk_unref(s->target);
697 s->target = NULL;
4ef85a9c
KW
698
699 /* We don't access the source any more. Dropping any WRITE/RESIZE is
d2da5e28
KW
700 * required before it could become a backing file of target_bs. Not having
701 * these permissions any more means that we can't allow any new requests on
702 * mirror_top_bs from now on, so keep it drained. */
703 bdrv_drained_begin(mirror_top_bs);
f94dc3b4 704 bs_opaque->stop = true;
3804e3cf
KW
705
706 bdrv_graph_rdlock_main_loop();
f94dc3b4
HR
707 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
708 &error_abort);
3804e3cf
KW
709 bdrv_graph_rdunlock_main_loop();
710
737efc1e 711 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
4ef85a9c 712 BlockDriverState *backing = s->is_none_mode ? src : s->base;
3f072a7f
HR
713 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
714
715 if (bdrv_cow_bs(unfiltered_target) != backing) {
716 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
12fa4af6
KW
717 if (local_err) {
718 error_report_err(local_err);
66c8672d 719 local_err = NULL;
7b508f6b 720 ret = -EPERM;
12fa4af6 721 }
4ef85a9c 722 }
c41f5b96
HR
723 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
724 assert(!bdrv_backing_chain_next(target_bs));
725 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
726 "backing", &local_err);
727 if (ret < 0) {
728 error_report_err(local_err);
729 local_err = NULL;
730 }
4ef85a9c 731 }
5a7e7a0b
SH
732
733 if (s->to_replace) {
734 replace_aio_context = bdrv_get_aio_context(s->to_replace);
735 aio_context_acquire(replace_aio_context);
736 }
737
737efc1e
JS
738 if (s->should_complete && !abort) {
739 BlockDriverState *to_replace = s->to_replace ?: src;
1ba79388 740 bool ro = bdrv_is_read_only(to_replace);
40365552 741
1ba79388
AG
742 if (ro != bdrv_is_read_only(target_bs)) {
743 bdrv_reopen_set_read_only(target_bs, ro, NULL);
5a7e7a0b 744 }
b8804815
KW
745
746 /* The mirror job has no requests in flight any more, but we need to
747 * drain potential other users of the BDS before changing the graph. */
5e771752 748 assert(s->in_drain);
e253f4b8 749 bdrv_drained_begin(target_bs);
6e9cc051
HR
750 /*
751 * Cannot use check_to_replace_node() here, because that would
752 * check for an op blocker on @to_replace, and we have our own
753 * there.
533c6e4e
KW
754 *
755 * TODO Pull out the writer lock from bdrv_replace_node() to here
6e9cc051 756 */
533c6e4e 757 bdrv_graph_rdlock_main_loop();
6e9cc051
HR
758 if (bdrv_recurse_can_replace(src, to_replace)) {
759 bdrv_replace_node(to_replace, target_bs, &local_err);
760 } else {
761 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
762 "because it can no longer be guaranteed that doing so "
763 "would not lead to an abrupt change of visible data",
764 to_replace->node_name, target_bs->node_name);
765 }
533c6e4e 766 bdrv_graph_rdunlock_main_loop();
e253f4b8 767 bdrv_drained_end(target_bs);
5fe31c25
KW
768 if (local_err) {
769 error_report_err(local_err);
7b508f6b 770 ret = -EPERM;
5fe31c25 771 }
5a7e7a0b
SH
772 }
773 if (s->to_replace) {
774 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
775 error_free(s->replace_blocker);
776 bdrv_unref(s->to_replace);
777 }
778 if (replace_aio_context) {
779 aio_context_release(replace_aio_context);
780 }
781 g_free(s->replaces);
7d9fcb39 782 bdrv_unref(target_bs);
4ef85a9c 783
f94dc3b4
HR
784 /*
785 * Remove the mirror filter driver from the graph. Before this, get rid of
4ef85a9c 786 * the blockers on the intermediate nodes so that the resulting state is
f94dc3b4
HR
787 * valid.
788 */
1908a559 789 block_job_remove_all_bdrv(bjob);
3f072a7f 790 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
4ef85a9c 791
429076e8 792 bs_opaque->job = NULL;
4ef85a9c 793
176c3699 794 bdrv_drained_end(src);
d2da5e28 795 bdrv_drained_end(mirror_top_bs);
5e771752 796 s->in_drain = false;
4ef85a9c 797 bdrv_unref(mirror_top_bs);
3f09bfbc 798 bdrv_unref(src);
7b508f6b 799
2626d27f
KW
800 aio_context_release(qemu_get_aio_context());
801
737efc1e
JS
802 return ret;
803}
804
805static int mirror_prepare(Job *job)
806{
807 return mirror_exit_common(job);
808}
809
810static void mirror_abort(Job *job)
811{
812 int ret = mirror_exit_common(job);
813 assert(ret == 0);
5a7e7a0b
SH
814}
815
537c3d4f 816static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
49efb1f5
DL
817{
818 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
819
18bb6928 820 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
49efb1f5 821 s->last_pause_ns = now;
5d43e86e 822 job_sleep_ns(&s->common.job, 0);
49efb1f5 823 } else {
da01ff7f 824 job_pause_point(&s->common.job);
49efb1f5
DL
825 }
826}
827
c0b363ad
DL
828static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
829{
23ca459a 830 int64_t offset;
138f9fff 831 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
c0b363ad 832 BlockDriverState *target_bs = blk_bs(s->target);
23ca459a 833 int ret;
51b0a488 834 int64_t count;
c0b363ad 835
cdf3bc93 836 if (s->zero_target) {
c7c2769c 837 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
e0d7f73e 838 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
c7c2769c
DL
839 return 0;
840 }
841
90ab48eb 842 s->initial_zeroing_ongoing = true;
23ca459a
EB
843 for (offset = 0; offset < s->bdev_length; ) {
844 int bytes = MIN(s->bdev_length - offset,
845 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
c7c2769c
DL
846
847 mirror_throttle(s);
848
daa7f2f9 849 if (job_is_cancelled(&s->common.job)) {
90ab48eb 850 s->initial_zeroing_ongoing = false;
c7c2769c
DL
851 return 0;
852 }
853
854 if (s->in_flight >= MAX_IN_FLIGHT) {
67adf4b3
EB
855 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
856 s->in_flight);
9178f4fe 857 mirror_wait_for_free_in_flight_slot(s);
c7c2769c
DL
858 continue;
859 }
860
4295c5fc 861 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
23ca459a 862 offset += bytes;
c7c2769c
DL
863 }
864
bae8196d 865 mirror_wait_for_all_io(s);
90ab48eb 866 s->initial_zeroing_ongoing = false;
b7d5062c
DL
867 }
868
c0b363ad 869 /* First part, loop on the sectors and initialize the dirty bitmap. */
23ca459a 870 for (offset = 0; offset < s->bdev_length; ) {
c0b363ad 871 /* Just to make sure we are not exceeding int limit. */
23ca459a
EB
872 int bytes = MIN(s->bdev_length - offset,
873 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
c0b363ad
DL
874
875 mirror_throttle(s);
876
daa7f2f9 877 if (job_is_cancelled(&s->common.job)) {
c0b363ad
DL
878 return 0;
879 }
880
7ff9579e 881 WITH_GRAPH_RDLOCK_GUARD() {
cc323997
PB
882 ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
883 bytes, &count);
7ff9579e 884 }
c0b363ad
DL
885 if (ret < 0) {
886 return ret;
887 }
888
23ca459a 889 assert(count);
a92b1b06 890 if (ret > 0) {
23ca459a 891 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
c0b363ad 892 }
23ca459a 893 offset += count;
c0b363ad
DL
894 }
895 return 0;
896}
897
bdffb31d
PB
898/* Called when going out of the streaming phase to flush the bulk of the
899 * data to the medium, or just before completing.
900 */
26bef102 901static int coroutine_fn mirror_flush(MirrorBlockJob *s)
bdffb31d 902{
26bef102 903 int ret = blk_co_flush(s->target);
bdffb31d
PB
904 if (ret < 0) {
905 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
906 s->ret = ret;
907 }
908 }
909 return ret;
910}
911
f67432a2 912static int coroutine_fn mirror_run(Job *job, Error **errp)
893f7eba 913{
f67432a2 914 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
138f9fff 915 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
32125b14 916 MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
e253f4b8 917 BlockDriverState *target_bs = blk_bs(s->target);
9a0cec66 918 bool need_drain = true;
d59cb66d 919 BlockDeviceIoStatus iostatus;
c0b363ad 920 int64_t length;
e83dd680 921 int64_t target_length;
b812f671 922 BlockDriverInfo bdi;
1d33936e
JC
923 char backing_filename[2]; /* we only need 2 characters because we are only
924 checking for a NULL string */
893f7eba 925 int ret = 0;
893f7eba 926
daa7f2f9 927 if (job_is_cancelled(&s->common.job)) {
893f7eba
PB
928 goto immediate_exit;
929 }
930
8ab8140a 931 bdrv_graph_co_rdlock();
c86422c5 932 s->bdev_length = bdrv_co_getlength(bs);
8ab8140a
KW
933 bdrv_graph_co_rdunlock();
934
b21c7652
HR
935 if (s->bdev_length < 0) {
936 ret = s->bdev_length;
373df5b1 937 goto immediate_exit;
becc347e
KW
938 }
939
c86422c5 940 target_length = blk_co_getlength(s->target);
e83dd680
KW
941 if (target_length < 0) {
942 ret = target_length;
943 goto immediate_exit;
944 }
945
becc347e
KW
946 /* Active commit must resize the base image if its size differs from the
947 * active layer. */
948 if (s->base == blk_bs(s->target)) {
e83dd680 949 if (s->bdev_length > target_length) {
88276216
AF
950 ret = blk_co_truncate(s->target, s->bdev_length, false,
951 PREALLOC_MODE_OFF, 0, NULL);
becc347e
KW
952 if (ret < 0) {
953 goto immediate_exit;
954 }
955 }
e83dd680
KW
956 } else if (s->bdev_length != target_length) {
957 error_setg(errp, "Source and target image have different sizes");
958 ret = -EINVAL;
959 goto immediate_exit;
becc347e
KW
960 }
961
962 if (s->bdev_length == 0) {
2e1795b5
KW
963 /* Transition to the READY state and wait for complete. */
964 job_transition_to_ready(&s->common.job);
d06107ad 965 s->actively_synced = true;
08b83bff 966 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
198c49cc 967 job_yield(&s->common.job);
9e48b025 968 }
9e48b025 969 goto immediate_exit;
893f7eba
PB
970 }
971
b21c7652 972 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
402a4741
PB
973 s->in_flight_bitmap = bitmap_new(length);
974
b812f671
PB
975 /* If we have no backing file yet in the destination, we cannot let
976 * the destination do COW. Instead, we copy sectors around the
977 * dirty data if needed. We need a bitmap to do that.
978 */
e253f4b8 979 bdrv_get_backing_filename(target_bs, backing_filename,
b812f671 980 sizeof(backing_filename));
a00e70c0 981 bdrv_graph_co_rdlock();
3d47eb0a 982 if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) {
b436982f
EB
983 s->target_cluster_size = bdi.cluster_size;
984 } else {
985 s->target_cluster_size = BDRV_SECTOR_SIZE;
e5b43573 986 }
a00e70c0 987 bdrv_graph_co_rdunlock();
3f072a7f 988 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
b436982f
EB
989 s->granularity < s->target_cluster_size) {
990 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
e5b43573 991 s->cow_bitmap = bitmap_new(length);
b812f671 992 }
e253f4b8 993 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
b812f671 994
7504edf4
KW
995 s->buf = qemu_try_blockalign(bs, s->buf_size);
996 if (s->buf == NULL) {
997 ret = -ENOMEM;
998 goto immediate_exit;
999 }
1000
402a4741 1001 mirror_free_init(s);
893f7eba 1002
49efb1f5 1003 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
03544a6e 1004 if (!s->is_none_mode) {
c0b363ad 1005 ret = mirror_dirty_init(s);
daa7f2f9 1006 if (ret < 0 || job_is_cancelled(&s->common.job)) {
c0b363ad 1007 goto immediate_exit;
893f7eba
PB
1008 }
1009 }
1010
32125b14
KW
1011 /*
1012 * Only now the job is fully initialised and mirror_top_bs should start
1013 * accessing it.
1014 */
1015 mirror_top_opaque->job = s;
1016
dc162c8e 1017 assert(!s->dbi);
715a74d8 1018 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
893f7eba 1019 for (;;) {
49efb1f5 1020 int64_t cnt, delta;
893f7eba
PB
1021 bool should_complete;
1022
bd48bde8
PB
1023 if (s->ret < 0) {
1024 ret = s->ret;
1025 goto immediate_exit;
1026 }
1027
da01ff7f 1028 job_pause_point(&s->common.job);
565ac01f 1029
4feeec7e
HR
1030 if (job_is_cancelled(&s->common.job)) {
1031 ret = 0;
1032 goto immediate_exit;
1033 }
1034
20dca810 1035 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
05df8a6a
KW
1036 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1037 * the number of bytes currently being processed; together those are
1038 * the current remaining operation length */
d69a879b
HR
1039 job_progress_set_remaining(&s->common.job,
1040 s->bytes_in_flight + cnt +
1041 s->active_write_bytes_in_flight);
bd48bde8
PB
1042
1043 /* Note that even when no rate limit is applied we need to yield
a7282330 1044 * periodically with no pending I/O so that bdrv_drain_all() returns.
18bb6928
KW
1045 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1046 * an error, or when the source is clean, whichever comes first. */
49efb1f5 1047 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
d59cb66d
EGE
1048 WITH_JOB_LOCK_GUARD() {
1049 iostatus = s->common.iostatus;
1050 }
18bb6928 1051 if (delta < BLOCK_JOB_SLICE_TIME &&
d59cb66d 1052 iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
cf56a3c6 1053 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
402a4741 1054 (cnt == 0 && s->in_flight > 0)) {
9a46dba7 1055 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
9178f4fe 1056 mirror_wait_for_free_in_flight_slot(s);
bd48bde8
PB
1057 continue;
1058 } else if (cnt != 0) {
018e5987 1059 mirror_iteration(s);
893f7eba 1060 }
893f7eba
PB
1061 }
1062
1063 should_complete = false;
bd48bde8 1064 if (s->in_flight == 0 && cnt == 0) {
893f7eba 1065 trace_mirror_before_flush(s);
44716224 1066 if (!job_is_ready(&s->common.job)) {
bdffb31d
PB
1067 if (mirror_flush(s) < 0) {
1068 /* Go check s->ret. */
1069 continue;
b952b558 1070 }
b952b558
PB
1071 /* We're out of the streaming phase. From now on, if the job
1072 * is cancelled we will actually complete all pending I/O and
1073 * report completion. This way, block-job-cancel will leave
1074 * the target in a consistent state.
1075 */
2e1795b5 1076 job_transition_to_ready(&s->common.job);
c45d0e1a
FE
1077 }
1078 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
1079 s->actively_synced = true;
d63ffd87 1080 }
bdffb31d
PB
1081
1082 should_complete = s->should_complete ||
08b83bff 1083 job_cancel_requested(&s->common.job);
bdffb31d 1084 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
893f7eba
PB
1085 }
1086
1087 if (cnt == 0 && should_complete) {
1088 /* The dirty bitmap is not updated while operations are pending.
1089 * If we're about to exit, wait for pending operations before
1090 * calling bdrv_get_dirty_count(bs), or we may exit while the
1091 * source has dirty data to copy!
1092 *
1093 * Note that I/O can be submitted by the guest while
9a0cec66
PB
1094 * mirror_populate runs, so pause it now. Before deciding
1095 * whether to switch to target check one last time if I/O has
1096 * come in the meanwhile, and if not flush the data to disk.
893f7eba 1097 */
9a46dba7 1098 trace_mirror_before_drain(s, cnt);
9a0cec66 1099
5e771752 1100 s->in_drain = true;
9a0cec66 1101 bdrv_drained_begin(bs);
d69a879b
HR
1102
1103 /* Must be zero because we are drained */
1104 assert(s->in_active_write_counter == 0);
1105
20dca810 1106 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
bdffb31d 1107 if (cnt > 0 || mirror_flush(s) < 0) {
9a0cec66 1108 bdrv_drained_end(bs);
5e771752 1109 s->in_drain = false;
9a0cec66
PB
1110 continue;
1111 }
1112
1113 /* The two disks are in sync. Exit and report successful
1114 * completion.
1115 */
1116 assert(QLIST_EMPTY(&bs->tracked_requests));
9a0cec66
PB
1117 need_drain = false;
1118 break;
893f7eba
PB
1119 }
1120
44716224 1121 if (job_is_ready(&s->common.job) && !should_complete) {
018e5987
KW
1122 if (s->in_flight == 0 && cnt == 0) {
1123 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1124 BLOCK_JOB_SLICE_TIME);
1125 job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME);
1126 }
1127 } else {
1128 block_job_ratelimit_sleep(&s->common);
ddc4115e 1129 }
49efb1f5 1130 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
893f7eba
PB
1131 }
1132
1133immediate_exit:
bd48bde8
PB
1134 if (s->in_flight > 0) {
1135 /* We get here only if something went wrong. Either the job failed,
1136 * or it was cancelled prematurely so that we do not guarantee that
1137 * the target is a copy of the source.
1138 */
08b83bff 1139 assert(ret < 0 || job_is_cancelled(&s->common.job));
9a0cec66 1140 assert(need_drain);
bae8196d 1141 mirror_wait_for_all_io(s);
bd48bde8
PB
1142 }
1143
1144 assert(s->in_flight == 0);
7191bf31 1145 qemu_vfree(s->buf);
b812f671 1146 g_free(s->cow_bitmap);
402a4741 1147 g_free(s->in_flight_bitmap);
dc162c8e 1148 bdrv_dirty_iter_free(s->dbi);
5a7e7a0b 1149
9a0cec66 1150 if (need_drain) {
5e771752 1151 s->in_drain = true;
9a0cec66
PB
1152 bdrv_drained_begin(bs);
1153 }
f67432a2 1154
f67432a2 1155 return ret;
893f7eba
PB
1156}
1157
3453d972 1158static void mirror_complete(Job *job, Error **errp)
d63ffd87 1159{
3453d972 1160 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
d63ffd87 1161
44716224 1162 if (!job_is_ready(job)) {
9df229c3 1163 error_setg(errp, "The active block job '%s' cannot be completed",
3453d972 1164 job->id);
d63ffd87
PB
1165 return;
1166 }
1167
15d67298 1168 /* block all operations on to_replace bs */
09158f00 1169 if (s->replaces) {
5a7e7a0b
SH
1170 AioContext *replace_aio_context;
1171
e12f3784 1172 s->to_replace = bdrv_find_node(s->replaces);
09158f00 1173 if (!s->to_replace) {
e12f3784 1174 error_setg(errp, "Node name '%s' not found", s->replaces);
09158f00
BC
1175 return;
1176 }
1177
5a7e7a0b
SH
1178 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1179 aio_context_acquire(replace_aio_context);
1180
64631f36 1181 /* TODO Translate this into child freeze system. */
09158f00
BC
1182 error_setg(&s->replace_blocker,
1183 "block device is in use by block-job-complete");
1184 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1185 bdrv_ref(s->to_replace);
5a7e7a0b
SH
1186
1187 aio_context_release(replace_aio_context);
09158f00
BC
1188 }
1189
d63ffd87 1190 s->should_complete = true;
00769414
HR
1191
1192 /* If the job is paused, it will be re-entered when it is resumed */
279ac06e
EGE
1193 WITH_JOB_LOCK_GUARD() {
1194 if (!job->paused) {
1195 job_enter_cond_locked(job, NULL);
1196 }
00769414 1197 }
d63ffd87
PB
1198}
1199
537c3d4f 1200static void coroutine_fn mirror_pause(Job *job)
565ac01f 1201{
da01ff7f 1202 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
565ac01f 1203
bae8196d 1204 mirror_wait_for_all_io(s);
565ac01f
SH
1205}
1206
89bd0305
KW
1207static bool mirror_drained_poll(BlockJob *job)
1208{
1209 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
5e771752
SL
1210
1211 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1212 * issue more requests. We make an exception if we've reached this point
1213 * from one of our own drain sections, to avoid a deadlock waiting for
1214 * ourselves.
1215 */
279ac06e
EGE
1216 WITH_JOB_LOCK_GUARD() {
1217 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
1218 && !s->in_drain) {
1219 return true;
1220 }
5e771752
SL
1221 }
1222
89bd0305
KW
1223 return !!s->in_flight;
1224}
1225
73895f38 1226static bool mirror_cancel(Job *job, bool force)
521ff8b7
VSO
1227{
1228 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1229 BlockDriverState *target = blk_bs(s->target);
1230
73895f38
HR
1231 /*
1232 * Before the job is READY, we treat any cancellation like a
1233 * force-cancellation.
1234 */
1235 force = force || !job_is_ready(job);
1236
1237 if (force) {
9c785cd7
VSO
1238 bdrv_cancel_in_flight(target);
1239 }
73895f38
HR
1240 return force;
1241}
1242
1243static bool commit_active_cancel(Job *job, bool force)
1244{
1245 /* Same as above in mirror_cancel() */
1246 return force || !job_is_ready(job);
521ff8b7
VSO
1247}
1248
3fc4b10a 1249static const BlockJobDriver mirror_job_driver = {
33e9e9bd
KW
1250 .job_driver = {
1251 .instance_size = sizeof(MirrorBlockJob),
252291ea 1252 .job_type = JOB_TYPE_MIRROR,
80fa2c75 1253 .free = block_job_free,
b15de828 1254 .user_resume = block_job_user_resume,
f67432a2 1255 .run = mirror_run,
737efc1e
JS
1256 .prepare = mirror_prepare,
1257 .abort = mirror_abort,
da01ff7f 1258 .pause = mirror_pause,
3453d972 1259 .complete = mirror_complete,
521ff8b7 1260 .cancel = mirror_cancel,
33e9e9bd 1261 },
89bd0305 1262 .drained_poll = mirror_drained_poll,
893f7eba
PB
1263};
1264
03544a6e 1265static const BlockJobDriver commit_active_job_driver = {
33e9e9bd
KW
1266 .job_driver = {
1267 .instance_size = sizeof(MirrorBlockJob),
252291ea 1268 .job_type = JOB_TYPE_COMMIT,
80fa2c75 1269 .free = block_job_free,
b15de828 1270 .user_resume = block_job_user_resume,
f67432a2 1271 .run = mirror_run,
737efc1e
JS
1272 .prepare = mirror_prepare,
1273 .abort = mirror_abort,
da01ff7f 1274 .pause = mirror_pause,
3453d972 1275 .complete = mirror_complete,
73895f38 1276 .cancel = commit_active_cancel,
33e9e9bd 1277 },
89bd0305 1278 .drained_poll = mirror_drained_poll,
03544a6e
FZ
1279};
1280
537c3d4f
SH
1281static void coroutine_fn
1282do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1283 uint64_t offset, uint64_t bytes,
1284 QEMUIOVector *qiov, int flags)
d06107ad 1285{
5c511ac3 1286 int ret;
dbdf699c
VSO
1287 size_t qiov_offset = 0;
1288 int64_t bitmap_offset, bitmap_end;
d06107ad 1289
dbdf699c
VSO
1290 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1291 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1292 {
1293 /*
1294 * Dirty unaligned padding: ignore it.
1295 *
1296 * Reasoning:
1297 * 1. If we copy it, we can't reset corresponding bit in
1298 * dirty_bitmap as there may be some "dirty" bytes still not
1299 * copied.
1300 * 2. It's already dirty, so skipping it we don't diverge mirror
1301 * progress.
1302 *
1303 * Note, that because of this, guest write may have no contribution
1304 * into mirror converge, but that's not bad, as we have background
1305 * process of mirroring. If under some bad circumstances (high guest
1306 * IO load) background process starve, we will not converge anyway,
1307 * even if each write will contribute, as guest is not guaranteed to
1308 * rewrite the whole disk.
1309 */
1310 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1311 if (bytes <= qiov_offset) {
1312 /* nothing to do after shrink */
1313 return;
1314 }
1315 offset += qiov_offset;
1316 bytes -= qiov_offset;
1317 }
1318
1319 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1320 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1321 {
1322 uint64_t tail = (offset + bytes) % job->granularity;
1323
1324 if (bytes <= tail) {
1325 /* nothing to do after shrink */
1326 return;
1327 }
1328 bytes -= tail;
1329 }
1330
1331 /*
1332 * Tails are either clean or shrunk, so for bitmap resetting
1333 * we safely align the range down.
1334 */
1335 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1336 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1337 if (bitmap_offset < bitmap_end) {
1338 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1339 bitmap_end - bitmap_offset);
1340 }
d06107ad 1341
5c511ac3 1342 job_progress_increase_remaining(&job->common.job, bytes);
d69a879b 1343 job->active_write_bytes_in_flight += bytes;
d06107ad 1344
5c511ac3
VSO
1345 switch (method) {
1346 case MIRROR_METHOD_COPY:
dbdf699c
VSO
1347 ret = blk_co_pwritev_part(job->target, offset, bytes,
1348 qiov, qiov_offset, flags);
5c511ac3 1349 break;
d06107ad 1350
5c511ac3
VSO
1351 case MIRROR_METHOD_ZERO:
1352 assert(!qiov);
1353 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1354 break;
d06107ad 1355
5c511ac3
VSO
1356 case MIRROR_METHOD_DISCARD:
1357 assert(!qiov);
1358 ret = blk_co_pdiscard(job->target, offset, bytes);
1359 break;
d06107ad 1360
5c511ac3
VSO
1361 default:
1362 abort();
1363 }
d06107ad 1364
d69a879b 1365 job->active_write_bytes_in_flight -= bytes;
5c511ac3
VSO
1366 if (ret >= 0) {
1367 job_progress_update(&job->common.job, bytes);
1368 } else {
1369 BlockErrorAction action;
d06107ad 1370
dbdf699c
VSO
1371 /*
1372 * We failed, so we should mark dirty the whole area, aligned up.
1373 * Note that we don't care about shrunk tails if any: they were dirty
1374 * at function start, and they must be still dirty, as we've locked
1375 * the region for in-flight op.
1376 */
1377 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1378 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1379 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1380 bitmap_end - bitmap_offset);
5c511ac3 1381 job->actively_synced = false;
d06107ad 1382
5c511ac3
VSO
1383 action = mirror_error_action(job, false, -ret);
1384 if (action == BLOCK_ERROR_ACTION_REPORT) {
1385 if (!job->ret) {
1386 job->ret = ret;
d06107ad
HR
1387 }
1388 }
d06107ad
HR
1389 }
1390}
1391
1392static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1393 uint64_t offset,
1394 uint64_t bytes)
1395{
1396 MirrorOp *op;
1397 uint64_t start_chunk = offset / s->granularity;
1398 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1399
1400 op = g_new(MirrorOp, 1);
1401 *op = (MirrorOp){
1402 .s = s,
1403 .offset = offset,
1404 .bytes = bytes,
1405 .is_active_write = true,
ce8cabbd 1406 .is_in_flight = true,
ead3f1bf 1407 .co = qemu_coroutine_self(),
d06107ad
HR
1408 };
1409 qemu_co_queue_init(&op->waiting_requests);
1410 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1411
1412 s->in_active_write_counter++;
1413
d69a879b
HR
1414 /*
1415 * Wait for concurrent requests affecting the area. If there are already
1416 * running requests that are copying off now-to-be stale data in the area,
1417 * we must wait for them to finish before we begin writing fresh data to the
1418 * target so that the write operations appear in the correct order.
1419 * Note that background requests (see mirror_iteration()) in contrast only
1420 * wait for conflicting requests at the start of the dirty area, and then
1421 * (based on the in_flight_bitmap) truncate the area to copy so it will not
1422 * conflict with any requests beyond that. For active writes, however, we
1423 * cannot truncate that area. The request from our parent must be blocked
1424 * until the area is copied in full. Therefore, we must wait for the whole
1425 * area to become free of concurrent requests.
1426 */
d06107ad
HR
1427 mirror_wait_on_conflicts(op, s, offset, bytes);
1428
1429 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1430
1431 return op;
1432}
1433
9c93652d 1434static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op)
d06107ad
HR
1435{
1436 uint64_t start_chunk = op->offset / op->s->granularity;
1437 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1438 op->s->granularity);
1439
1440 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1441 BdrvChild *source = op->s->mirror_top_bs->backing;
1442
1443 if (QLIST_FIRST(&source->bs->parents) == source &&
1444 QLIST_NEXT(source, next_parent) == NULL)
1445 {
1446 /* Assert that we are back in sync once all active write
1447 * operations are settled.
1448 * Note that we can only assert this if the mirror node
1449 * is the source node's only parent. */
1450 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1451 }
1452 }
1453 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1454 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1455 qemu_co_queue_restart_all(&op->waiting_requests);
1456 g_free(op);
1457}
1458
b9b10c35
KW
1459static int coroutine_fn GRAPH_RDLOCK
1460bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1461 QEMUIOVector *qiov, BdrvRequestFlags flags)
4ef85a9c
KW
1462{
1463 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1464}
1465
9a5a1c62
EGE
1466static int coroutine_fn GRAPH_RDLOCK
1467bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
1468 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1469 int flags)
d06107ad
HR
1470{
1471 MirrorOp *op = NULL;
1472 MirrorBDSOpaque *s = bs->opaque;
1473 int ret = 0;
da93d5c8 1474 bool copy_to_target = false;
d06107ad 1475
da93d5c8
HR
1476 if (s->job) {
1477 copy_to_target = s->job->ret >= 0 &&
1478 !job_is_cancelled(&s->job->common.job) &&
1479 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1480 }
d06107ad
HR
1481
1482 if (copy_to_target) {
1483 op = active_write_prepare(s->job, offset, bytes);
1484 }
1485
1486 switch (method) {
1487 case MIRROR_METHOD_COPY:
1488 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1489 break;
1490
1491 case MIRROR_METHOD_ZERO:
1492 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1493 break;
1494
1495 case MIRROR_METHOD_DISCARD:
0b9fd3f4 1496 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
d06107ad
HR
1497 break;
1498
1499 default:
1500 abort();
1501 }
1502
058cfca5
FE
1503 if (!copy_to_target && s->job && s->job->dirty_bitmap) {
1504 s->job->actively_synced = false;
1505 bdrv_set_dirty_bitmap(s->job->dirty_bitmap, offset, bytes);
1506 }
1507
d06107ad
HR
1508 if (ret < 0) {
1509 goto out;
1510 }
1511
1512 if (copy_to_target) {
1513 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1514 }
1515
1516out:
1517 if (copy_to_target) {
1518 active_write_settle(op);
1519 }
1520 return ret;
1521}
1522
b9b10c35
KW
1523static int coroutine_fn GRAPH_RDLOCK
1524bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1525 QEMUIOVector *qiov, BdrvRequestFlags flags)
4ef85a9c 1526{
d06107ad
HR
1527 MirrorBDSOpaque *s = bs->opaque;
1528 QEMUIOVector bounce_qiov;
1529 void *bounce_buf;
1530 int ret = 0;
da93d5c8 1531 bool copy_to_target = false;
d06107ad 1532
da93d5c8
HR
1533 if (s->job) {
1534 copy_to_target = s->job->ret >= 0 &&
1535 !job_is_cancelled(&s->job->common.job) &&
1536 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1537 }
d06107ad
HR
1538
1539 if (copy_to_target) {
1540 /* The guest might concurrently modify the data to write; but
1541 * the data on source and destination must match, so we have
1542 * to use a bounce buffer if we are going to write to the
1543 * target now. */
1544 bounce_buf = qemu_blockalign(bs, bytes);
1545 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1546
1547 qemu_iovec_init(&bounce_qiov, 1);
1548 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1549 qiov = &bounce_qiov;
e8b65355
SH
1550
1551 flags &= ~BDRV_REQ_REGISTERED_BUF;
d06107ad
HR
1552 }
1553
1554 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1555 flags);
1556
1557 if (copy_to_target) {
1558 qemu_iovec_destroy(&bounce_qiov);
1559 qemu_vfree(bounce_buf);
1560 }
1561
1562 return ret;
4ef85a9c
KW
1563}
1564
88095349 1565static int coroutine_fn GRAPH_RDLOCK bdrv_mirror_top_flush(BlockDriverState *bs)
4ef85a9c 1566{
ce960aa9
VSO
1567 if (bs->backing == NULL) {
1568 /* we can be here after failed bdrv_append in mirror_start_job */
1569 return 0;
1570 }
4ef85a9c
KW
1571 return bdrv_co_flush(bs->backing->bs);
1572}
1573
abaf8b75
KW
1574static int coroutine_fn GRAPH_RDLOCK
1575bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1576 int64_t bytes, BdrvRequestFlags flags)
4ef85a9c 1577{
d06107ad
HR
1578 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1579 flags);
4ef85a9c
KW
1580}
1581
9a5a1c62
EGE
1582static int coroutine_fn GRAPH_RDLOCK
1583bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
4ef85a9c 1584{
d06107ad
HR
1585 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1586 NULL, 0);
4ef85a9c
KW
1587}
1588
998b3a1e 1589static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
fd4a6493 1590{
18775ff3
VSO
1591 if (bs->backing == NULL) {
1592 /* we can be here after failed bdrv_attach_child in
1593 * bdrv_set_backing_hd */
1594 return;
1595 }
fd4a6493
KW
1596 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1597 bs->backing->bs->filename);
1598}
1599
4ef85a9c 1600static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
bf8e925e 1601 BdrvChildRole role,
e0995dc3 1602 BlockReopenQueue *reopen_queue,
4ef85a9c
KW
1603 uint64_t perm, uint64_t shared,
1604 uint64_t *nperm, uint64_t *nshared)
1605{
f94dc3b4
HR
1606 MirrorBDSOpaque *s = bs->opaque;
1607
1608 if (s->stop) {
1609 /*
1610 * If the job is to be stopped, we do not need to forward
1611 * anything to the real image.
1612 */
1613 *nperm = 0;
1614 *nshared = BLK_PERM_ALL;
1615 return;
1616 }
1617
53431b90
HR
1618 bdrv_default_perms(bs, c, role, reopen_queue,
1619 perm, shared, nperm, nshared);
4ef85a9c 1620
53431b90
HR
1621 if (s->is_commit) {
1622 /*
1623 * For commit jobs, we cannot take CONSISTENT_READ, because
1624 * that permission is unshared for everything above the base
1625 * node (except for filters on the base node).
1626 * We also have to force-share the WRITE permission, or
1627 * otherwise we would block ourselves at the base node (if
1628 * writes are blocked for a node, they are also blocked for
1629 * its backing file).
1630 * (We could also share RESIZE, because it may be needed for
1631 * the target if its size is less than the top node's; but
1632 * bdrv_default_perms_for_cow() automatically shares RESIZE
1633 * for backing nodes if WRITE is shared, so there is no need
1634 * to do it here.)
1635 */
1636 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1637 *nshared |= BLK_PERM_WRITE;
1638 }
4ef85a9c
KW
1639}
1640
1641/* Dummy node that provides consistent read to its users without requiring it
1642 * from its backing file and that allows writes on the backing file chain. */
1643static BlockDriver bdrv_mirror_top = {
1644 .format_name = "mirror_top",
1645 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1646 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1647 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1648 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1649 .bdrv_co_flush = bdrv_mirror_top_flush,
fd4a6493 1650 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
4ef85a9c 1651 .bdrv_child_perm = bdrv_mirror_top_child_perm,
6540fd15
HR
1652
1653 .is_filter = true,
046fd84f 1654 .filtered_child_is_backing = true,
4ef85a9c
KW
1655};
1656
cc19f177
VSO
1657static BlockJob *mirror_start_job(
1658 const char *job_id, BlockDriverState *bs,
47970dfb
JS
1659 int creation_flags, BlockDriverState *target,
1660 const char *replaces, int64_t speed,
1661 uint32_t granularity, int64_t buf_size,
274fccee 1662 BlockMirrorBackingMode backing_mode,
cdf3bc93 1663 bool zero_target,
09158f00
BC
1664 BlockdevOnError on_source_error,
1665 BlockdevOnError on_target_error,
0fc9f8ea 1666 bool unmap,
097310b5 1667 BlockCompletionFunc *cb,
51ccfa2d 1668 void *opaque,
09158f00 1669 const BlockJobDriver *driver,
b49f7ead 1670 bool is_none_mode, BlockDriverState *base,
51ccfa2d 1671 bool auto_complete, const char *filter_node_name,
481debaa 1672 bool is_mirror, MirrorCopyMode copy_mode,
51ccfa2d 1673 Error **errp)
893f7eba
PB
1674{
1675 MirrorBlockJob *s;
429076e8 1676 MirrorBDSOpaque *bs_opaque;
4ef85a9c 1677 BlockDriverState *mirror_top_bs;
4ef85a9c 1678 bool target_is_backing;
3f072a7f 1679 uint64_t target_perms, target_shared_perms;
d7086422 1680 int ret;
893f7eba 1681
3804e3cf
KW
1682 GLOBAL_STATE_CODE();
1683
eee13dfe 1684 if (granularity == 0) {
341ebc2f 1685 granularity = bdrv_get_default_bitmap_granularity(target);
eee13dfe
PB
1686 }
1687
31826642 1688 assert(is_power_of_2(granularity));
eee13dfe 1689
48ac0a4d
WC
1690 if (buf_size < 0) {
1691 error_setg(errp, "Invalid parameter 'buf-size'");
cc19f177 1692 return NULL;
48ac0a4d
WC
1693 }
1694
1695 if (buf_size == 0) {
1696 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1697 }
5bc361b8 1698
3f072a7f 1699 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
86fae10c 1700 error_setg(errp, "Can't mirror node into itself");
cc19f177 1701 return NULL;
86fae10c
KW
1702 }
1703
53431b90
HR
1704 target_is_backing = bdrv_chain_contains(bs, target);
1705
4ef85a9c
KW
1706 /* In the case of active commit, add dummy driver to provide consistent
1707 * reads on the top, while disabling it in the intermediate nodes, and make
1708 * the backing chain writable. */
6cdbceb1
KW
1709 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1710 BDRV_O_RDWR, errp);
4ef85a9c 1711 if (mirror_top_bs == NULL) {
cc19f177 1712 return NULL;
4ef85a9c 1713 }
d3c8c674
KW
1714 if (!filter_node_name) {
1715 mirror_top_bs->implicit = true;
1716 }
e5182c1c
HR
1717
1718 /* So that we can always drop this node */
1719 mirror_top_bs->never_freeze = true;
1720
4ef85a9c 1721 mirror_top_bs->total_sectors = bs->total_sectors;
228345bf 1722 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
80f5c33f
KW
1723 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1724 BDRV_REQ_NO_FALLBACK;
429076e8
HR
1725 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1726 mirror_top_bs->opaque = bs_opaque;
4ef85a9c 1727
53431b90
HR
1728 bs_opaque->is_commit = target_is_backing;
1729
4ef85a9c 1730 bdrv_drained_begin(bs);
934aee14 1731 ret = bdrv_append(mirror_top_bs, bs, errp);
4ef85a9c
KW
1732 bdrv_drained_end(bs);
1733
934aee14 1734 if (ret < 0) {
b2c2832c 1735 bdrv_unref(mirror_top_bs);
cc19f177 1736 return NULL;
b2c2832c
KW
1737 }
1738
4ef85a9c 1739 /* Make sure that the source is not resized while the job is running */
75859b94 1740 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
4ef85a9c
KW
1741 BLK_PERM_CONSISTENT_READ,
1742 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
64631f36 1743 BLK_PERM_WRITE, speed,
c6cc12bf 1744 creation_flags, cb, opaque, errp);
893f7eba 1745 if (!s) {
4ef85a9c 1746 goto fail;
893f7eba 1747 }
429076e8 1748
7a25fcd0
HR
1749 /* The block job now has a reference to this node */
1750 bdrv_unref(mirror_top_bs);
1751
4ef85a9c
KW
1752 s->mirror_top_bs = mirror_top_bs;
1753
1754 /* No resize for the target either; while the mirror is still running, a
1755 * consistent read isn't necessarily possible. We could possibly allow
1756 * writes and graph modifications, though it would likely defeat the
1757 * purpose of a mirror, so leave them blocked for now.
1758 *
1759 * In the case of active commit, things look a bit different, though,
1760 * because the target is an already populated backing file in active use.
1761 * We can allow anything except resize there.*/
3f072a7f
HR
1762
1763 target_perms = BLK_PERM_WRITE;
1764 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1765
3f072a7f
HR
1766 if (target_is_backing) {
1767 int64_t bs_size, target_size;
1768 bs_size = bdrv_getlength(bs);
1769 if (bs_size < 0) {
1770 error_setg_errno(errp, -bs_size,
1771 "Could not inquire top image size");
1772 goto fail;
1773 }
1774
1775 target_size = bdrv_getlength(target);
1776 if (target_size < 0) {
1777 error_setg_errno(errp, -target_size,
1778 "Could not inquire base image size");
1779 goto fail;
1780 }
1781
1782 if (target_size < bs_size) {
1783 target_perms |= BLK_PERM_RESIZE;
1784 }
1785
64631f36 1786 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
3f072a7f
HR
1787 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1788 /*
1789 * We may want to allow this in the future, but it would
1790 * require taking some extra care.
1791 */
1792 error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1793 "source's backing chain");
1794 goto fail;
1795 }
1796
d861ab3a 1797 s->target = blk_new(s->common.job.aio_context,
3f072a7f 1798 target_perms, target_shared_perms);
d7086422
KW
1799 ret = blk_insert_bs(s->target, target, errp);
1800 if (ret < 0) {
4ef85a9c 1801 goto fail;
d7086422 1802 }
045a2f82
FZ
1803 if (is_mirror) {
1804 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1805 * of non-shared block migration. To allow migration completion, we
1806 * have to allow "inactivate" of the target BB. When that happens, we
1807 * know the job is drained, and the vcpus are stopped, so no write
1808 * operation will be performed. Block layer already has assertions to
1809 * ensure that. */
1810 blk_set_force_allow_inactivate(s->target);
1811 }
9ff7f0df 1812 blk_set_allow_aio_context_change(s->target, true);
cf312932 1813 blk_set_disable_request_queuing(s->target, true);
e253f4b8 1814
09158f00 1815 s->replaces = g_strdup(replaces);
b952b558
PB
1816 s->on_source_error = on_source_error;
1817 s->on_target_error = on_target_error;
03544a6e 1818 s->is_none_mode = is_none_mode;
274fccee 1819 s->backing_mode = backing_mode;
cdf3bc93 1820 s->zero_target = zero_target;
481debaa 1821 s->copy_mode = copy_mode;
5bc361b8 1822 s->base = base;
3f072a7f 1823 s->base_overlay = bdrv_find_overlay(bs, base);
eee13dfe 1824 s->granularity = granularity;
48ac0a4d 1825 s->buf_size = ROUND_UP(buf_size, granularity);
0fc9f8ea 1826 s->unmap = unmap;
b49f7ead
WC
1827 if (auto_complete) {
1828 s->should_complete = true;
1829 }
b812f671 1830
058cfca5
FE
1831 s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity,
1832 NULL, errp);
b8afb520 1833 if (!s->dirty_bitmap) {
88f9d1b3 1834 goto fail;
b8afb520 1835 }
058cfca5
FE
1836
1837 /*
1838 * The dirty bitmap is set by bdrv_mirror_top_do_write() when not in active
1839 * mode.
1840 */
1841 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
10f3cd15 1842
67b24427
AG
1843 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1844 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1845 BLK_PERM_CONSISTENT_READ,
1846 errp);
1847 if (ret < 0) {
1848 goto fail;
1849 }
1850
4ef85a9c 1851 /* Required permissions are already taken with blk_new() */
76d554e2
KW
1852 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1853 &error_abort);
1854
f3ede4b0
AG
1855 /* In commit_active_start() all intermediate nodes disappear, so
1856 * any jobs in them must be blocked */
4ef85a9c 1857 if (target_is_backing) {
3f072a7f
HR
1858 BlockDriverState *iter, *filtered_target;
1859 uint64_t iter_shared_perms;
1860
1861 /*
1862 * The topmost node with
1863 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1864 */
1865 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1866
1867 assert(bdrv_skip_filters(filtered_target) ==
1868 bdrv_skip_filters(target));
1869
1870 /*
1871 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1872 * ourselves at s->base (if writes are blocked for a node, they are
1873 * also blocked for its backing file). The other options would be a
1874 * second filter driver above s->base (== target).
1875 */
1876 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1877
1878 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1879 iter = bdrv_filter_or_cow_bs(iter))
1880 {
1881 if (iter == filtered_target) {
1882 /*
1883 * From here on, all nodes are filters on the base.
1884 * This allows us to share BLK_PERM_CONSISTENT_READ.
1885 */
1886 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1887 }
1888
4ef85a9c 1889 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
3f072a7f 1890 iter_shared_perms, errp);
4ef85a9c
KW
1891 if (ret < 0) {
1892 goto fail;
1893 }
f3ede4b0 1894 }
ef53dc09
AG
1895
1896 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1897 goto fail;
1898 }
f3ede4b0 1899 }
10f3cd15 1900
12aa4082
HR
1901 QTAILQ_INIT(&s->ops_in_flight);
1902
5ccac6f1 1903 trace_mirror_start(bs, s, opaque);
da01ff7f 1904 job_start(&s->common.job);
cc19f177
VSO
1905
1906 return &s->common;
4ef85a9c
KW
1907
1908fail:
1909 if (s) {
7a25fcd0
HR
1910 /* Make sure this BDS does not go away until we have completed the graph
1911 * changes below */
1912 bdrv_ref(mirror_top_bs);
1913
4ef85a9c
KW
1914 g_free(s->replaces);
1915 blk_unref(s->target);
429076e8 1916 bs_opaque->job = NULL;
e917e2cb 1917 if (s->dirty_bitmap) {
5deb6cbd 1918 bdrv_release_dirty_bitmap(s->dirty_bitmap);
e917e2cb 1919 }
4ad35181 1920 job_early_fail(&s->common.job);
4ef85a9c
KW
1921 }
1922
f94dc3b4 1923 bs_opaque->stop = true;
3804e3cf 1924 bdrv_graph_rdlock_main_loop();
f94dc3b4
HR
1925 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1926 &error_abort);
3804e3cf 1927 bdrv_graph_rdunlock_main_loop();
3f072a7f 1928 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
7a25fcd0
HR
1929
1930 bdrv_unref(mirror_top_bs);
cc19f177
VSO
1931
1932 return NULL;
893f7eba 1933}
03544a6e 1934
71aa9867
AG
1935void mirror_start(const char *job_id, BlockDriverState *bs,
1936 BlockDriverState *target, const char *replaces,
a1999b33
JS
1937 int creation_flags, int64_t speed,
1938 uint32_t granularity, int64_t buf_size,
274fccee 1939 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
cdf3bc93 1940 bool zero_target,
274fccee 1941 BlockdevOnError on_source_error,
03544a6e 1942 BlockdevOnError on_target_error,
481debaa
HR
1943 bool unmap, const char *filter_node_name,
1944 MirrorCopyMode copy_mode, Error **errp)
03544a6e
FZ
1945{
1946 bool is_none_mode;
1947 BlockDriverState *base;
1948
b4ad82aa
EGE
1949 GLOBAL_STATE_CODE();
1950
c8b56501
JS
1951 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1952 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1953 error_setg(errp, "Sync mode '%s' not supported",
1954 MirrorSyncMode_str(mode));
d58d8453
JS
1955 return;
1956 }
03544a6e 1957 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
3f072a7f 1958 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
a1999b33 1959 mirror_start_job(job_id, bs, creation_flags, target, replaces,
cdf3bc93 1960 speed, granularity, buf_size, backing_mode, zero_target,
51ccfa2d 1961 on_source_error, on_target_error, unmap, NULL, NULL,
6cdbceb1 1962 &mirror_job_driver, is_none_mode, base, false,
481debaa 1963 filter_node_name, true, copy_mode, errp);
03544a6e
FZ
1964}
1965
cc19f177
VSO
1966BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1967 BlockDriverState *base, int creation_flags,
1968 int64_t speed, BlockdevOnError on_error,
1969 const char *filter_node_name,
1970 BlockCompletionFunc *cb, void *opaque,
1971 bool auto_complete, Error **errp)
03544a6e 1972{
1ba79388 1973 bool base_read_only;
eb5becc1 1974 BlockJob *job;
4da83585 1975
b4ad82aa
EGE
1976 GLOBAL_STATE_CODE();
1977
1ba79388 1978 base_read_only = bdrv_is_read_only(base);
4da83585 1979
1ba79388
AG
1980 if (base_read_only) {
1981 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
cc19f177 1982 return NULL;
1ba79388 1983 }
20a63d2c 1984 }
4da83585 1985
eb5becc1 1986 job = mirror_start_job(
cc19f177 1987 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
cdf3bc93 1988 MIRROR_LEAVE_BACKING_CHAIN, false,
51ccfa2d 1989 on_error, on_error, true, cb, opaque,
6cdbceb1 1990 &commit_active_job_driver, false, base, auto_complete,
481debaa 1991 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
eb5becc1
VSO
1992 errp);
1993 if (!job) {
4da83585
JC
1994 goto error_restore_flags;
1995 }
1996
eb5becc1 1997 return job;
4da83585
JC
1998
1999error_restore_flags:
2000 /* ignore error and errp for bdrv_reopen, because we want to propagate
2001 * the original error */
1ba79388
AG
2002 if (base_read_only) {
2003 bdrv_reopen_set_read_only(base, true, NULL);
2004 }
cc19f177 2005 return NULL;
03544a6e 2006}