]> git.proxmox.com Git - mirror_qemu.git/blame - block/mirror.c
block: Remove ignore_bds_parents parameter from drain_begin/end.
[mirror_qemu.git] / block / mirror.c
CommitLineData
893f7eba
PB
1/*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
80c71a24 14#include "qemu/osdep.h"
fd4a6493 15#include "qemu/cutils.h"
12aa4082 16#include "qemu/coroutine.h"
1181e19a 17#include "qemu/range.h"
893f7eba 18#include "trace.h"
c87621ea 19#include "block/blockjob_int.h"
737e150e 20#include "block/block_int.h"
373340b2 21#include "sysemu/block-backend.h"
da34e65c 22#include "qapi/error.h"
cc7a8ea7 23#include "qapi/qmp/qerror.h"
893f7eba 24#include "qemu/ratelimit.h"
b812f671 25#include "qemu/bitmap.h"
5df022cf 26#include "qemu/memalign.h"
893f7eba 27
402a4741 28#define MAX_IN_FLIGHT 16
b436982f
EB
29#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
402a4741
PB
31
32/* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
34 */
35typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37} MirrorBuffer;
893f7eba 38
12aa4082
HR
39typedef struct MirrorOp MirrorOp;
40
893f7eba
PB
41typedef struct MirrorBlockJob {
42 BlockJob common;
e253f4b8 43 BlockBackend *target;
4ef85a9c 44 BlockDriverState *mirror_top_bs;
5bc361b8 45 BlockDriverState *base;
3f072a7f 46 BlockDriverState *base_overlay;
4ef85a9c 47
09158f00
BC
48 /* The name of the graph node to replace */
49 char *replaces;
50 /* The BDS to replace */
51 BlockDriverState *to_replace;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error *replace_blocker;
03544a6e 54 bool is_none_mode;
274fccee 55 BlockMirrorBackingMode backing_mode;
cdf3bc93
HR
56 /* Whether the target image requires explicit zero-initialization */
57 bool zero_target;
d06107ad 58 MirrorCopyMode copy_mode;
b952b558 59 BlockdevOnError on_source_error, on_target_error;
d06107ad
HR
60 /* Set when the target is synced (dirty bitmap is clean, nothing
61 * in flight) and the job is running in active mode */
62 bool actively_synced;
d63ffd87 63 bool should_complete;
eee13dfe 64 int64_t granularity;
b812f671 65 size_t buf_size;
b21c7652 66 int64_t bdev_length;
b812f671 67 unsigned long *cow_bitmap;
e4654d2d 68 BdrvDirtyBitmap *dirty_bitmap;
dc162c8e 69 BdrvDirtyBitmapIter *dbi;
893f7eba 70 uint8_t *buf;
402a4741
PB
71 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
72 int buf_free_count;
bd48bde8 73
49efb1f5 74 uint64_t last_pause_ns;
402a4741 75 unsigned long *in_flight_bitmap;
1b8f7776 76 unsigned in_flight;
b436982f 77 int64_t bytes_in_flight;
b58deb34 78 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
bd48bde8 79 int ret;
0fc9f8ea 80 bool unmap;
b436982f 81 int target_cluster_size;
e5b43573 82 int max_iov;
90ab48eb 83 bool initial_zeroing_ongoing;
d06107ad 84 int in_active_write_counter;
d69a879b 85 int64_t active_write_bytes_in_flight;
737efc1e 86 bool prepared;
5e771752 87 bool in_drain;
893f7eba
PB
88} MirrorBlockJob;
89
429076e8
HR
90typedef struct MirrorBDSOpaque {
91 MirrorBlockJob *job;
f94dc3b4 92 bool stop;
53431b90 93 bool is_commit;
429076e8
HR
94} MirrorBDSOpaque;
95
12aa4082 96struct MirrorOp {
bd48bde8
PB
97 MirrorBlockJob *s;
98 QEMUIOVector qiov;
b436982f
EB
99 int64_t offset;
100 uint64_t bytes;
2e1990b2
HR
101
102 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
103 * mirror_co_discard() before yielding for the first time */
104 int64_t *bytes_handled;
12aa4082 105
1181e19a 106 bool is_pseudo_op;
d06107ad 107 bool is_active_write;
ce8cabbd 108 bool is_in_flight;
12aa4082 109 CoQueue waiting_requests;
eed325b9 110 Coroutine *co;
d44dae1a 111 MirrorOp *waiting_for_op;
12aa4082
HR
112
113 QTAILQ_ENTRY(MirrorOp) next;
114};
bd48bde8 115
4295c5fc
HR
116typedef enum MirrorMethod {
117 MIRROR_METHOD_COPY,
118 MIRROR_METHOD_ZERO,
119 MIRROR_METHOD_DISCARD,
120} MirrorMethod;
121
b952b558
PB
122static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
123 int error)
124{
d06107ad 125 s->actively_synced = false;
b952b558 126 if (read) {
81e254dc
KW
127 return block_job_error_action(&s->common, s->on_source_error,
128 true, error);
b952b558 129 } else {
81e254dc
KW
130 return block_job_error_action(&s->common, s->on_target_error,
131 false, error);
b952b558
PB
132 }
133}
134
1181e19a
HR
135static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
136 MirrorBlockJob *s,
137 uint64_t offset,
138 uint64_t bytes)
139{
140 uint64_t self_start_chunk = offset / s->granularity;
141 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
142 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
143
144 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
145 self_start_chunk) < self_end_chunk &&
146 s->ret >= 0)
147 {
148 MirrorOp *op;
149
150 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
151 uint64_t op_start_chunk = op->offset / s->granularity;
152 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
153 s->granularity) -
154 op_start_chunk;
155
156 if (op == self) {
157 continue;
158 }
159
160 if (ranges_overlap(self_start_chunk, self_nb_chunks,
161 op_start_chunk, op_nb_chunks))
162 {
66fed30c
SG
163 if (self) {
164 /*
165 * If the operation is already (indirectly) waiting for us,
166 * or will wait for us as soon as it wakes up, then just go
167 * on (instead of producing a deadlock in the former case).
168 */
169 if (op->waiting_for_op) {
170 continue;
171 }
172
173 self->waiting_for_op = op;
d44dae1a
VSO
174 }
175
1181e19a 176 qemu_co_queue_wait(&op->waiting_requests, NULL);
66fed30c
SG
177
178 if (self) {
179 self->waiting_for_op = NULL;
180 }
181
1181e19a
HR
182 break;
183 }
184 }
185 }
186}
187
2e1990b2 188static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
bd48bde8
PB
189{
190 MirrorBlockJob *s = op->s;
402a4741 191 struct iovec *iov;
bd48bde8 192 int64_t chunk_num;
b436982f 193 int i, nb_chunks;
bd48bde8 194
b436982f 195 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
bd48bde8
PB
196
197 s->in_flight--;
b436982f 198 s->bytes_in_flight -= op->bytes;
402a4741
PB
199 iov = op->qiov.iov;
200 for (i = 0; i < op->qiov.niov; i++) {
201 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
202 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
203 s->buf_free_count++;
204 }
205
b436982f
EB
206 chunk_num = op->offset / s->granularity;
207 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
12aa4082 208
402a4741 209 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
12aa4082 210 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
b21c7652
HR
211 if (ret >= 0) {
212 if (s->cow_bitmap) {
213 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
214 }
90ab48eb 215 if (!s->initial_zeroing_ongoing) {
30a5c887 216 job_progress_update(&s->common.job, op->bytes);
90ab48eb 217 }
bd48bde8 218 }
6df3bf8e 219 qemu_iovec_destroy(&op->qiov);
7b770c72 220
12aa4082
HR
221 qemu_co_queue_restart_all(&op->waiting_requests);
222 g_free(op);
bd48bde8
PB
223}
224
2e1990b2 225static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
bd48bde8 226{
bd48bde8 227 MirrorBlockJob *s = op->s;
b9e413dd 228
bd48bde8 229 if (ret < 0) {
bd48bde8
PB
230 BlockErrorAction action;
231
e0d7f73e 232 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
bd48bde8 233 action = mirror_error_action(s, false, -ret);
a589569f 234 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
235 s->ret = ret;
236 }
237 }
d12ade57 238
bd48bde8
PB
239 mirror_iteration_done(op, ret);
240}
241
2e1990b2 242static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
bd48bde8 243{
bd48bde8 244 MirrorBlockJob *s = op->s;
b9e413dd 245
bd48bde8 246 if (ret < 0) {
bd48bde8
PB
247 BlockErrorAction action;
248
e0d7f73e 249 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
bd48bde8 250 action = mirror_error_action(s, true, -ret);
a589569f 251 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
252 s->ret = ret;
253 }
254
255 mirror_iteration_done(op, ret);
d12ade57 256 return;
bd48bde8 257 }
d12ade57
VSO
258
259 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
260 mirror_write_complete(op, ret);
bd48bde8
PB
261}
262
782d97ef
EB
263/* Clip bytes relative to offset to not exceed end-of-file */
264static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
265 int64_t offset,
266 int64_t bytes)
267{
268 return MIN(bytes, s->bdev_length - offset);
269}
270
782d97ef
EB
271/* Round offset and/or bytes to target cluster if COW is needed, and
272 * return the offset of the adjusted tail against original. */
273static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
ae4cc877 274 uint64_t *bytes)
893f7eba 275{
e5b43573
FZ
276 bool need_cow;
277 int ret = 0;
782d97ef 278 int64_t align_offset = *offset;
7cfd5275 279 int64_t align_bytes = *bytes;
782d97ef 280 int max_bytes = s->granularity * s->max_iov;
e5b43573 281
782d97ef
EB
282 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
283 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
e5b43573
FZ
284 s->cow_bitmap);
285 if (need_cow) {
782d97ef
EB
286 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
287 &align_offset, &align_bytes);
e5b43573 288 }
3515727f 289
782d97ef
EB
290 if (align_bytes > max_bytes) {
291 align_bytes = max_bytes;
e5b43573 292 if (need_cow) {
782d97ef 293 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
e5b43573 294 }
8f0720ec 295 }
782d97ef 296 /* Clipping may result in align_bytes unaligned to chunk boundary, but
4150ae60 297 * that doesn't matter because it's already the end of source image. */
782d97ef 298 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
8f0720ec 299
782d97ef
EB
300 ret = align_offset + align_bytes - (*offset + *bytes);
301 *offset = align_offset;
302 *bytes = align_bytes;
e5b43573
FZ
303 assert(ret >= 0);
304 return ret;
305}
306
537c3d4f 307static inline void coroutine_fn
eb994912 308mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
21cd917f 309{
12aa4082
HR
310 MirrorOp *op;
311
1181e19a 312 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
eb994912
HR
313 /*
314 * Do not wait on pseudo ops, because it may in turn wait on
1181e19a
HR
315 * some other operation to start, which may in fact be the
316 * caller of this function. Since there is only one pseudo op
317 * at any given time, we will always find some real operation
eb994912
HR
318 * to wait on.
319 * Also, do not wait on active operations, because they do not
320 * use up in-flight slots.
321 */
322 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) {
1181e19a
HR
323 qemu_co_queue_wait(&op->waiting_requests, NULL);
324 return;
325 }
326 }
327 abort();
21cd917f
FZ
328}
329
2e1990b2
HR
330/* Perform a mirror copy operation.
331 *
332 * *op->bytes_handled is set to the number of bytes copied after and
333 * including offset, excluding any bytes copied prior to offset due
334 * to alignment. This will be op->bytes if no alignment is necessary,
335 * or (new_end - op->offset) if the tail is rounded up or down due to
336 * alignment or buffer limit.
e5b43573 337 */
2e1990b2 338static void coroutine_fn mirror_co_read(void *opaque)
e5b43573 339{
2e1990b2
HR
340 MirrorOp *op = opaque;
341 MirrorBlockJob *s = op->s;
ae4cc877
EB
342 int nb_chunks;
343 uint64_t ret;
ae4cc877 344 uint64_t max_bytes;
e5b43573 345
ae4cc877 346 max_bytes = s->granularity * s->max_iov;
402a4741 347
e5b43573 348 /* We can only handle as much as buf_size at a time. */
2e1990b2
HR
349 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
350 assert(op->bytes);
351 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
352 *op->bytes_handled = op->bytes;
402a4741 353
e5b43573 354 if (s->cow_bitmap) {
2e1990b2 355 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
e5b43573 356 }
2e1990b2
HR
357 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
358 assert(*op->bytes_handled <= UINT_MAX);
359 assert(op->bytes <= s->buf_size);
ae4cc877 360 /* The offset is granularity-aligned because:
e5b43573
FZ
361 * 1) Caller passes in aligned values;
362 * 2) mirror_cow_align is used only when target cluster is larger. */
2e1990b2 363 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
ae4cc877 364 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
2e1990b2
HR
365 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
366 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
e5b43573
FZ
367
368 while (s->buf_free_count < nb_chunks) {
2e1990b2 369 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
9178f4fe 370 mirror_wait_for_free_in_flight_slot(s);
b812f671
PB
371 }
372
402a4741
PB
373 /* Now make a QEMUIOVector taking enough granularity-sized chunks
374 * from s->buf_free.
375 */
376 qemu_iovec_init(&op->qiov, nb_chunks);
402a4741
PB
377 while (nb_chunks-- > 0) {
378 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
2e1990b2 379 size_t remaining = op->bytes - op->qiov.size;
5a0f6fd5 380
402a4741
PB
381 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
382 s->buf_free_count--;
5a0f6fd5 383 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
402a4741 384 }
bd48bde8 385
893f7eba 386 /* Copy the dirty cluster. */
bd48bde8 387 s->in_flight++;
2e1990b2 388 s->bytes_in_flight += op->bytes;
ce8cabbd 389 op->is_in_flight = true;
2e1990b2 390 trace_mirror_one_iteration(s, op->offset, op->bytes);
dcfb3beb 391
138f9fff
HR
392 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
393 &op->qiov, 0);
2e1990b2 394 mirror_read_complete(op, ret);
e5b43573
FZ
395}
396
2e1990b2 397static void coroutine_fn mirror_co_zero(void *opaque)
e5b43573 398{
2e1990b2
HR
399 MirrorOp *op = opaque;
400 int ret;
e5b43573 401
2e1990b2
HR
402 op->s->in_flight++;
403 op->s->bytes_in_flight += op->bytes;
404 *op->bytes_handled = op->bytes;
ce8cabbd 405 op->is_in_flight = true;
e5b43573 406
2e1990b2
HR
407 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
408 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
409 mirror_write_complete(op, ret);
410}
411
412static void coroutine_fn mirror_co_discard(void *opaque)
413{
414 MirrorOp *op = opaque;
415 int ret;
416
417 op->s->in_flight++;
418 op->s->bytes_in_flight += op->bytes;
419 *op->bytes_handled = op->bytes;
ce8cabbd 420 op->is_in_flight = true;
2e1990b2
HR
421
422 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
423 mirror_write_complete(op, ret);
e5b43573
FZ
424}
425
4295c5fc
HR
426static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
427 unsigned bytes, MirrorMethod mirror_method)
428{
2e1990b2
HR
429 MirrorOp *op;
430 Coroutine *co;
431 int64_t bytes_handled = -1;
432
433 op = g_new(MirrorOp, 1);
434 *op = (MirrorOp){
435 .s = s,
436 .offset = offset,
437 .bytes = bytes,
438 .bytes_handled = &bytes_handled,
439 };
12aa4082 440 qemu_co_queue_init(&op->waiting_requests);
2e1990b2 441
4295c5fc
HR
442 switch (mirror_method) {
443 case MIRROR_METHOD_COPY:
2e1990b2
HR
444 co = qemu_coroutine_create(mirror_co_read, op);
445 break;
4295c5fc 446 case MIRROR_METHOD_ZERO:
2e1990b2
HR
447 co = qemu_coroutine_create(mirror_co_zero, op);
448 break;
4295c5fc 449 case MIRROR_METHOD_DISCARD:
2e1990b2
HR
450 co = qemu_coroutine_create(mirror_co_discard, op);
451 break;
4295c5fc
HR
452 default:
453 abort();
454 }
eed325b9 455 op->co = co;
2e1990b2 456
12aa4082 457 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
2e1990b2
HR
458 qemu_coroutine_enter(co);
459 /* At this point, ownership of op has been moved to the coroutine
460 * and the object may already be freed */
461
462 /* Assert that this value has been set */
463 assert(bytes_handled >= 0);
464
465 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
466 * and mirror_co_discard(), bytes_handled == op->bytes, which
467 * is the @bytes parameter given to this function) */
468 assert(bytes_handled <= UINT_MAX);
469 return bytes_handled;
4295c5fc
HR
470}
471
e5b43573
FZ
472static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
473{
138f9fff 474 BlockDriverState *source = s->mirror_top_bs->backing->bs;
1181e19a
HR
475 MirrorOp *pseudo_op;
476 int64_t offset;
477 uint64_t delay_ns = 0, ret = 0;
e5b43573
FZ
478 /* At least the first dirty chunk is mirrored in one iteration. */
479 int nb_chunks = 1;
4b5004d9 480 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
b436982f 481 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
e5b43573 482
b64bd51e 483 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
f798184c 484 offset = bdrv_dirty_iter_next(s->dbi);
fb2ef791 485 if (offset < 0) {
dc162c8e 486 bdrv_set_dirty_iter(s->dbi, 0);
f798184c 487 offset = bdrv_dirty_iter_next(s->dbi);
9a46dba7 488 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
fb2ef791 489 assert(offset >= 0);
e5b43573 490 }
b64bd51e 491 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
e5b43573 492
d69a879b
HR
493 /*
494 * Wait for concurrent requests to @offset. The next loop will limit the
495 * copied area based on in_flight_bitmap so we only copy an area that does
496 * not overlap with concurrent in-flight requests. Still, we would like to
497 * copy something, so wait until there are at least no more requests to the
498 * very beginning of the area.
499 */
1181e19a 500 mirror_wait_on_conflicts(NULL, s, offset, 1);
9c83625b 501
da01ff7f 502 job_pause_point(&s->common.job);
565ac01f 503
e5b43573
FZ
504 /* Find the number of consective dirty chunks following the first dirty
505 * one, and wait for in flight requests in them. */
b64bd51e 506 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
fb2ef791 507 while (nb_chunks * s->granularity < s->buf_size) {
dc162c8e 508 int64_t next_dirty;
fb2ef791
EB
509 int64_t next_offset = offset + nb_chunks * s->granularity;
510 int64_t next_chunk = next_offset / s->granularity;
511 if (next_offset >= s->bdev_length ||
28636b82 512 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
e5b43573
FZ
513 break;
514 }
515 if (test_bit(next_chunk, s->in_flight_bitmap)) {
9c83625b 516 break;
e5b43573 517 }
9c83625b 518
f798184c 519 next_dirty = bdrv_dirty_iter_next(s->dbi);
fb2ef791 520 if (next_dirty > next_offset || next_dirty < 0) {
f27a2742 521 /* The bitmap iterator's cache is stale, refresh it */
715a74d8 522 bdrv_set_dirty_iter(s->dbi, next_offset);
f798184c 523 next_dirty = bdrv_dirty_iter_next(s->dbi);
f27a2742 524 }
fb2ef791 525 assert(next_dirty == next_offset);
9c83625b 526 nb_chunks++;
e5b43573
FZ
527 }
528
529 /* Clear dirty bits before querying the block status, because
31826642 530 * calling bdrv_block_status_above could yield - if some blocks are
e5b43573
FZ
531 * marked dirty in this window, we need to know.
532 */
e0d7f73e
EB
533 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
534 nb_chunks * s->granularity);
b64bd51e
PB
535 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
536
1181e19a
HR
537 /* Before claiming an area in the in-flight bitmap, we have to
538 * create a MirrorOp for it so that conflicting requests can wait
539 * for it. mirror_perform() will create the real MirrorOps later,
540 * for now we just create a pseudo operation that will wake up all
541 * conflicting requests once all real operations have been
542 * launched. */
543 pseudo_op = g_new(MirrorOp, 1);
544 *pseudo_op = (MirrorOp){
545 .offset = offset,
546 .bytes = nb_chunks * s->granularity,
547 .is_pseudo_op = true,
548 };
549 qemu_co_queue_init(&pseudo_op->waiting_requests);
550 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
551
fb2ef791
EB
552 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
553 while (nb_chunks > 0 && offset < s->bdev_length) {
31826642 554 int ret;
7cfd5275 555 int64_t io_bytes;
f3e4ce4a 556 int64_t io_bytes_acct;
4295c5fc 557 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
e5b43573 558
fb2ef791 559 assert(!(offset % s->granularity));
31826642
EB
560 ret = bdrv_block_status_above(source, NULL, offset,
561 nb_chunks * s->granularity,
562 &io_bytes, NULL, NULL);
e5b43573 563 if (ret < 0) {
fb2ef791 564 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
0965a41e 565 } else if (ret & BDRV_BLOCK_DATA) {
fb2ef791 566 io_bytes = MIN(io_bytes, max_io_bytes);
e5b43573
FZ
567 }
568
fb2ef791
EB
569 io_bytes -= io_bytes % s->granularity;
570 if (io_bytes < s->granularity) {
571 io_bytes = s->granularity;
e5b43573 572 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
fb2ef791 573 int64_t target_offset;
7cfd5275 574 int64_t target_bytes;
fb2ef791
EB
575 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
576 &target_offset, &target_bytes);
577 if (target_offset == offset &&
578 target_bytes == io_bytes) {
e5b43573
FZ
579 mirror_method = ret & BDRV_BLOCK_ZERO ?
580 MIRROR_METHOD_ZERO :
581 MIRROR_METHOD_DISCARD;
582 }
583 }
584
cf56a3c6 585 while (s->in_flight >= MAX_IN_FLIGHT) {
fb2ef791 586 trace_mirror_yield_in_flight(s, offset, s->in_flight);
9178f4fe 587 mirror_wait_for_free_in_flight_slot(s);
cf56a3c6
DL
588 }
589
dbaa7b57 590 if (s->ret < 0) {
1181e19a
HR
591 ret = 0;
592 goto fail;
dbaa7b57
VSO
593 }
594
fb2ef791 595 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
4295c5fc
HR
596 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
597 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
598 io_bytes_acct = 0;
599 } else {
600 io_bytes_acct = io_bytes;
e5b43573 601 }
fb2ef791
EB
602 assert(io_bytes);
603 offset += io_bytes;
604 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
dee81d51 605 delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
dcfb3beb 606 }
1181e19a
HR
607
608 ret = delay_ns;
609fail:
610 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
611 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
612 g_free(pseudo_op);
613
614 return ret;
bd48bde8 615}
b952b558 616
402a4741
PB
617static void mirror_free_init(MirrorBlockJob *s)
618{
619 int granularity = s->granularity;
620 size_t buf_size = s->buf_size;
621 uint8_t *buf = s->buf;
622
623 assert(s->buf_free_count == 0);
624 QSIMPLEQ_INIT(&s->buf_free);
625 while (buf_size != 0) {
626 MirrorBuffer *cur = (MirrorBuffer *)buf;
627 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
628 s->buf_free_count++;
629 buf_size -= granularity;
630 buf += granularity;
631 }
632}
633
bae8196d
PB
634/* This is also used for the .pause callback. There is no matching
635 * mirror_resume() because mirror_run() will begin iterating again
636 * when the job is resumed.
637 */
537c3d4f 638static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
bd48bde8
PB
639{
640 while (s->in_flight > 0) {
9178f4fe 641 mirror_wait_for_free_in_flight_slot(s);
bd48bde8 642 }
893f7eba
PB
643}
644
737efc1e
JS
645/**
646 * mirror_exit_common: handle both abort() and prepare() cases.
647 * for .prepare, returns 0 on success and -errno on failure.
648 * for .abort cases, denoted by abort = true, MUST return 0.
649 */
650static int mirror_exit_common(Job *job)
5a7e7a0b 651{
1908a559
KW
652 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
653 BlockJob *bjob = &s->common;
f93c3add 654 MirrorBDSOpaque *bs_opaque;
5a7e7a0b 655 AioContext *replace_aio_context = NULL;
f93c3add
HR
656 BlockDriverState *src;
657 BlockDriverState *target_bs;
658 BlockDriverState *mirror_top_bs;
12fa4af6 659 Error *local_err = NULL;
737efc1e
JS
660 bool abort = job->ret < 0;
661 int ret = 0;
662
663 if (s->prepared) {
664 return 0;
665 }
666 s->prepared = true;
3f09bfbc 667
f93c3add
HR
668 mirror_top_bs = s->mirror_top_bs;
669 bs_opaque = mirror_top_bs->opaque;
670 src = mirror_top_bs->backing->bs;
671 target_bs = blk_bs(s->target);
672
ef53dc09
AG
673 if (bdrv_chain_contains(src, target_bs)) {
674 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
675 }
676
5deb6cbd 677 bdrv_release_dirty_bitmap(s->dirty_bitmap);
2119882c 678
7b508f6b
JS
679 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
680 * before we can call bdrv_drained_end */
3f09bfbc 681 bdrv_ref(src);
4ef85a9c 682 bdrv_ref(mirror_top_bs);
7d9fcb39
KW
683 bdrv_ref(target_bs);
684
bb0c9409
VSO
685 /*
686 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
7d9fcb39 687 * inserting target_bs at s->to_replace, where we might not be able to get
63c8ef28 688 * these permissions.
bb0c9409 689 */
7d9fcb39
KW
690 blk_unref(s->target);
691 s->target = NULL;
4ef85a9c
KW
692
693 /* We don't access the source any more. Dropping any WRITE/RESIZE is
d2da5e28
KW
694 * required before it could become a backing file of target_bs. Not having
695 * these permissions any more means that we can't allow any new requests on
696 * mirror_top_bs from now on, so keep it drained. */
697 bdrv_drained_begin(mirror_top_bs);
f94dc3b4
HR
698 bs_opaque->stop = true;
699 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
700 &error_abort);
737efc1e 701 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
4ef85a9c 702 BlockDriverState *backing = s->is_none_mode ? src : s->base;
3f072a7f
HR
703 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
704
705 if (bdrv_cow_bs(unfiltered_target) != backing) {
706 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
12fa4af6
KW
707 if (local_err) {
708 error_report_err(local_err);
66c8672d 709 local_err = NULL;
7b508f6b 710 ret = -EPERM;
12fa4af6 711 }
4ef85a9c 712 }
c41f5b96
HR
713 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
714 assert(!bdrv_backing_chain_next(target_bs));
715 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
716 "backing", &local_err);
717 if (ret < 0) {
718 error_report_err(local_err);
719 local_err = NULL;
720 }
4ef85a9c 721 }
5a7e7a0b
SH
722
723 if (s->to_replace) {
724 replace_aio_context = bdrv_get_aio_context(s->to_replace);
725 aio_context_acquire(replace_aio_context);
726 }
727
737efc1e
JS
728 if (s->should_complete && !abort) {
729 BlockDriverState *to_replace = s->to_replace ?: src;
1ba79388 730 bool ro = bdrv_is_read_only(to_replace);
40365552 731
1ba79388
AG
732 if (ro != bdrv_is_read_only(target_bs)) {
733 bdrv_reopen_set_read_only(target_bs, ro, NULL);
5a7e7a0b 734 }
b8804815
KW
735
736 /* The mirror job has no requests in flight any more, but we need to
737 * drain potential other users of the BDS before changing the graph. */
5e771752 738 assert(s->in_drain);
e253f4b8 739 bdrv_drained_begin(target_bs);
6e9cc051
HR
740 /*
741 * Cannot use check_to_replace_node() here, because that would
742 * check for an op blocker on @to_replace, and we have our own
743 * there.
744 */
745 if (bdrv_recurse_can_replace(src, to_replace)) {
746 bdrv_replace_node(to_replace, target_bs, &local_err);
747 } else {
748 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
749 "because it can no longer be guaranteed that doing so "
750 "would not lead to an abrupt change of visible data",
751 to_replace->node_name, target_bs->node_name);
752 }
e253f4b8 753 bdrv_drained_end(target_bs);
5fe31c25
KW
754 if (local_err) {
755 error_report_err(local_err);
7b508f6b 756 ret = -EPERM;
5fe31c25 757 }
5a7e7a0b
SH
758 }
759 if (s->to_replace) {
760 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
761 error_free(s->replace_blocker);
762 bdrv_unref(s->to_replace);
763 }
764 if (replace_aio_context) {
765 aio_context_release(replace_aio_context);
766 }
767 g_free(s->replaces);
7d9fcb39 768 bdrv_unref(target_bs);
4ef85a9c 769
f94dc3b4
HR
770 /*
771 * Remove the mirror filter driver from the graph. Before this, get rid of
4ef85a9c 772 * the blockers on the intermediate nodes so that the resulting state is
f94dc3b4
HR
773 * valid.
774 */
1908a559 775 block_job_remove_all_bdrv(bjob);
3f072a7f 776 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
4ef85a9c 777
429076e8 778 bs_opaque->job = NULL;
4ef85a9c 779
176c3699 780 bdrv_drained_end(src);
d2da5e28 781 bdrv_drained_end(mirror_top_bs);
5e771752 782 s->in_drain = false;
4ef85a9c 783 bdrv_unref(mirror_top_bs);
3f09bfbc 784 bdrv_unref(src);
7b508f6b 785
737efc1e
JS
786 return ret;
787}
788
789static int mirror_prepare(Job *job)
790{
791 return mirror_exit_common(job);
792}
793
794static void mirror_abort(Job *job)
795{
796 int ret = mirror_exit_common(job);
797 assert(ret == 0);
5a7e7a0b
SH
798}
799
537c3d4f 800static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
49efb1f5
DL
801{
802 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
803
18bb6928 804 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
49efb1f5 805 s->last_pause_ns = now;
5d43e86e 806 job_sleep_ns(&s->common.job, 0);
49efb1f5 807 } else {
da01ff7f 808 job_pause_point(&s->common.job);
49efb1f5
DL
809 }
810}
811
c0b363ad
DL
812static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
813{
23ca459a 814 int64_t offset;
138f9fff 815 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
c0b363ad 816 BlockDriverState *target_bs = blk_bs(s->target);
23ca459a 817 int ret;
51b0a488 818 int64_t count;
c0b363ad 819
cdf3bc93 820 if (s->zero_target) {
c7c2769c 821 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
e0d7f73e 822 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
c7c2769c
DL
823 return 0;
824 }
825
90ab48eb 826 s->initial_zeroing_ongoing = true;
23ca459a
EB
827 for (offset = 0; offset < s->bdev_length; ) {
828 int bytes = MIN(s->bdev_length - offset,
829 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
c7c2769c
DL
830
831 mirror_throttle(s);
832
daa7f2f9 833 if (job_is_cancelled(&s->common.job)) {
90ab48eb 834 s->initial_zeroing_ongoing = false;
c7c2769c
DL
835 return 0;
836 }
837
838 if (s->in_flight >= MAX_IN_FLIGHT) {
67adf4b3
EB
839 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
840 s->in_flight);
9178f4fe 841 mirror_wait_for_free_in_flight_slot(s);
c7c2769c
DL
842 continue;
843 }
844
4295c5fc 845 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
23ca459a 846 offset += bytes;
c7c2769c
DL
847 }
848
bae8196d 849 mirror_wait_for_all_io(s);
90ab48eb 850 s->initial_zeroing_ongoing = false;
b7d5062c
DL
851 }
852
c0b363ad 853 /* First part, loop on the sectors and initialize the dirty bitmap. */
23ca459a 854 for (offset = 0; offset < s->bdev_length; ) {
c0b363ad 855 /* Just to make sure we are not exceeding int limit. */
23ca459a
EB
856 int bytes = MIN(s->bdev_length - offset,
857 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
c0b363ad
DL
858
859 mirror_throttle(s);
860
daa7f2f9 861 if (job_is_cancelled(&s->common.job)) {
c0b363ad
DL
862 return 0;
863 }
864
3f072a7f
HR
865 ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
866 &count);
c0b363ad
DL
867 if (ret < 0) {
868 return ret;
869 }
870
23ca459a 871 assert(count);
a92b1b06 872 if (ret > 0) {
23ca459a 873 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
c0b363ad 874 }
23ca459a 875 offset += count;
c0b363ad
DL
876 }
877 return 0;
878}
879
bdffb31d
PB
880/* Called when going out of the streaming phase to flush the bulk of the
881 * data to the medium, or just before completing.
882 */
883static int mirror_flush(MirrorBlockJob *s)
884{
885 int ret = blk_flush(s->target);
886 if (ret < 0) {
887 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
888 s->ret = ret;
889 }
890 }
891 return ret;
892}
893
f67432a2 894static int coroutine_fn mirror_run(Job *job, Error **errp)
893f7eba 895{
f67432a2 896 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
138f9fff 897 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
e253f4b8 898 BlockDriverState *target_bs = blk_bs(s->target);
9a0cec66 899 bool need_drain = true;
d59cb66d 900 BlockDeviceIoStatus iostatus;
c0b363ad 901 int64_t length;
e83dd680 902 int64_t target_length;
b812f671 903 BlockDriverInfo bdi;
1d33936e
JC
904 char backing_filename[2]; /* we only need 2 characters because we are only
905 checking for a NULL string */
893f7eba 906 int ret = 0;
893f7eba 907
daa7f2f9 908 if (job_is_cancelled(&s->common.job)) {
893f7eba
PB
909 goto immediate_exit;
910 }
911
b21c7652
HR
912 s->bdev_length = bdrv_getlength(bs);
913 if (s->bdev_length < 0) {
914 ret = s->bdev_length;
373df5b1 915 goto immediate_exit;
becc347e
KW
916 }
917
e83dd680
KW
918 target_length = blk_getlength(s->target);
919 if (target_length < 0) {
920 ret = target_length;
921 goto immediate_exit;
922 }
923
becc347e
KW
924 /* Active commit must resize the base image if its size differs from the
925 * active layer. */
926 if (s->base == blk_bs(s->target)) {
e83dd680 927 if (s->bdev_length > target_length) {
88276216
AF
928 ret = blk_co_truncate(s->target, s->bdev_length, false,
929 PREALLOC_MODE_OFF, 0, NULL);
becc347e
KW
930 if (ret < 0) {
931 goto immediate_exit;
932 }
933 }
e83dd680
KW
934 } else if (s->bdev_length != target_length) {
935 error_setg(errp, "Source and target image have different sizes");
936 ret = -EINVAL;
937 goto immediate_exit;
becc347e
KW
938 }
939
940 if (s->bdev_length == 0) {
2e1795b5
KW
941 /* Transition to the READY state and wait for complete. */
942 job_transition_to_ready(&s->common.job);
d06107ad 943 s->actively_synced = true;
08b83bff 944 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
198c49cc 945 job_yield(&s->common.job);
9e48b025 946 }
9e48b025 947 goto immediate_exit;
893f7eba
PB
948 }
949
b21c7652 950 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
402a4741
PB
951 s->in_flight_bitmap = bitmap_new(length);
952
b812f671
PB
953 /* If we have no backing file yet in the destination, we cannot let
954 * the destination do COW. Instead, we copy sectors around the
955 * dirty data if needed. We need a bitmap to do that.
956 */
e253f4b8 957 bdrv_get_backing_filename(target_bs, backing_filename,
b812f671 958 sizeof(backing_filename));
e253f4b8 959 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
b436982f
EB
960 s->target_cluster_size = bdi.cluster_size;
961 } else {
962 s->target_cluster_size = BDRV_SECTOR_SIZE;
e5b43573 963 }
3f072a7f 964 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
b436982f
EB
965 s->granularity < s->target_cluster_size) {
966 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
e5b43573 967 s->cow_bitmap = bitmap_new(length);
b812f671 968 }
e253f4b8 969 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
b812f671 970
7504edf4
KW
971 s->buf = qemu_try_blockalign(bs, s->buf_size);
972 if (s->buf == NULL) {
973 ret = -ENOMEM;
974 goto immediate_exit;
975 }
976
402a4741 977 mirror_free_init(s);
893f7eba 978
49efb1f5 979 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
03544a6e 980 if (!s->is_none_mode) {
c0b363ad 981 ret = mirror_dirty_init(s);
daa7f2f9 982 if (ret < 0 || job_is_cancelled(&s->common.job)) {
c0b363ad 983 goto immediate_exit;
893f7eba
PB
984 }
985 }
986
dc162c8e 987 assert(!s->dbi);
715a74d8 988 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
893f7eba 989 for (;;) {
cc8c9d6c 990 uint64_t delay_ns = 0;
49efb1f5 991 int64_t cnt, delta;
893f7eba
PB
992 bool should_complete;
993
bd48bde8
PB
994 if (s->ret < 0) {
995 ret = s->ret;
996 goto immediate_exit;
997 }
998
da01ff7f 999 job_pause_point(&s->common.job);
565ac01f 1000
4feeec7e
HR
1001 if (job_is_cancelled(&s->common.job)) {
1002 ret = 0;
1003 goto immediate_exit;
1004 }
1005
20dca810 1006 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
05df8a6a
KW
1007 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1008 * the number of bytes currently being processed; together those are
1009 * the current remaining operation length */
d69a879b
HR
1010 job_progress_set_remaining(&s->common.job,
1011 s->bytes_in_flight + cnt +
1012 s->active_write_bytes_in_flight);
bd48bde8
PB
1013
1014 /* Note that even when no rate limit is applied we need to yield
a7282330 1015 * periodically with no pending I/O so that bdrv_drain_all() returns.
18bb6928
KW
1016 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1017 * an error, or when the source is clean, whichever comes first. */
49efb1f5 1018 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
d59cb66d
EGE
1019 WITH_JOB_LOCK_GUARD() {
1020 iostatus = s->common.iostatus;
1021 }
18bb6928 1022 if (delta < BLOCK_JOB_SLICE_TIME &&
d59cb66d 1023 iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
cf56a3c6 1024 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
402a4741 1025 (cnt == 0 && s->in_flight > 0)) {
9a46dba7 1026 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
9178f4fe 1027 mirror_wait_for_free_in_flight_slot(s);
bd48bde8
PB
1028 continue;
1029 } else if (cnt != 0) {
cc8c9d6c 1030 delay_ns = mirror_iteration(s);
893f7eba 1031 }
893f7eba
PB
1032 }
1033
1034 should_complete = false;
bd48bde8 1035 if (s->in_flight == 0 && cnt == 0) {
893f7eba 1036 trace_mirror_before_flush(s);
44716224 1037 if (!job_is_ready(&s->common.job)) {
bdffb31d
PB
1038 if (mirror_flush(s) < 0) {
1039 /* Go check s->ret. */
1040 continue;
b952b558 1041 }
b952b558
PB
1042 /* We're out of the streaming phase. From now on, if the job
1043 * is cancelled we will actually complete all pending I/O and
1044 * report completion. This way, block-job-cancel will leave
1045 * the target in a consistent state.
1046 */
2e1795b5 1047 job_transition_to_ready(&s->common.job);
d06107ad
HR
1048 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
1049 s->actively_synced = true;
1050 }
d63ffd87 1051 }
bdffb31d
PB
1052
1053 should_complete = s->should_complete ||
08b83bff 1054 job_cancel_requested(&s->common.job);
bdffb31d 1055 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
893f7eba
PB
1056 }
1057
1058 if (cnt == 0 && should_complete) {
1059 /* The dirty bitmap is not updated while operations are pending.
1060 * If we're about to exit, wait for pending operations before
1061 * calling bdrv_get_dirty_count(bs), or we may exit while the
1062 * source has dirty data to copy!
1063 *
1064 * Note that I/O can be submitted by the guest while
9a0cec66
PB
1065 * mirror_populate runs, so pause it now. Before deciding
1066 * whether to switch to target check one last time if I/O has
1067 * come in the meanwhile, and if not flush the data to disk.
893f7eba 1068 */
9a46dba7 1069 trace_mirror_before_drain(s, cnt);
9a0cec66 1070
5e771752 1071 s->in_drain = true;
9a0cec66 1072 bdrv_drained_begin(bs);
d69a879b
HR
1073
1074 /* Must be zero because we are drained */
1075 assert(s->in_active_write_counter == 0);
1076
20dca810 1077 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
bdffb31d 1078 if (cnt > 0 || mirror_flush(s) < 0) {
9a0cec66 1079 bdrv_drained_end(bs);
5e771752 1080 s->in_drain = false;
9a0cec66
PB
1081 continue;
1082 }
1083
1084 /* The two disks are in sync. Exit and report successful
1085 * completion.
1086 */
1087 assert(QLIST_EMPTY(&bs->tracked_requests));
9a0cec66
PB
1088 need_drain = false;
1089 break;
893f7eba
PB
1090 }
1091
44716224 1092 if (job_is_ready(&s->common.job) && !should_complete) {
18bb6928
KW
1093 delay_ns = (s->in_flight == 0 &&
1094 cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
ddc4115e 1095 }
44716224
HR
1096 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1097 delay_ns);
5d43e86e 1098 job_sleep_ns(&s->common.job, delay_ns);
49efb1f5 1099 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
893f7eba
PB
1100 }
1101
1102immediate_exit:
bd48bde8
PB
1103 if (s->in_flight > 0) {
1104 /* We get here only if something went wrong. Either the job failed,
1105 * or it was cancelled prematurely so that we do not guarantee that
1106 * the target is a copy of the source.
1107 */
08b83bff 1108 assert(ret < 0 || job_is_cancelled(&s->common.job));
9a0cec66 1109 assert(need_drain);
bae8196d 1110 mirror_wait_for_all_io(s);
bd48bde8
PB
1111 }
1112
1113 assert(s->in_flight == 0);
7191bf31 1114 qemu_vfree(s->buf);
b812f671 1115 g_free(s->cow_bitmap);
402a4741 1116 g_free(s->in_flight_bitmap);
dc162c8e 1117 bdrv_dirty_iter_free(s->dbi);
5a7e7a0b 1118
9a0cec66 1119 if (need_drain) {
5e771752 1120 s->in_drain = true;
9a0cec66
PB
1121 bdrv_drained_begin(bs);
1122 }
f67432a2 1123
f67432a2 1124 return ret;
893f7eba
PB
1125}
1126
3453d972 1127static void mirror_complete(Job *job, Error **errp)
d63ffd87 1128{
3453d972 1129 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
d63ffd87 1130
44716224 1131 if (!job_is_ready(job)) {
9df229c3 1132 error_setg(errp, "The active block job '%s' cannot be completed",
3453d972 1133 job->id);
d63ffd87
PB
1134 return;
1135 }
1136
15d67298 1137 /* block all operations on to_replace bs */
09158f00 1138 if (s->replaces) {
5a7e7a0b
SH
1139 AioContext *replace_aio_context;
1140
e12f3784 1141 s->to_replace = bdrv_find_node(s->replaces);
09158f00 1142 if (!s->to_replace) {
e12f3784 1143 error_setg(errp, "Node name '%s' not found", s->replaces);
09158f00
BC
1144 return;
1145 }
1146
5a7e7a0b
SH
1147 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1148 aio_context_acquire(replace_aio_context);
1149
64631f36 1150 /* TODO Translate this into child freeze system. */
09158f00
BC
1151 error_setg(&s->replace_blocker,
1152 "block device is in use by block-job-complete");
1153 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1154 bdrv_ref(s->to_replace);
5a7e7a0b
SH
1155
1156 aio_context_release(replace_aio_context);
09158f00
BC
1157 }
1158
d63ffd87 1159 s->should_complete = true;
00769414
HR
1160
1161 /* If the job is paused, it will be re-entered when it is resumed */
279ac06e
EGE
1162 WITH_JOB_LOCK_GUARD() {
1163 if (!job->paused) {
1164 job_enter_cond_locked(job, NULL);
1165 }
00769414 1166 }
d63ffd87
PB
1167}
1168
537c3d4f 1169static void coroutine_fn mirror_pause(Job *job)
565ac01f 1170{
da01ff7f 1171 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
565ac01f 1172
bae8196d 1173 mirror_wait_for_all_io(s);
565ac01f
SH
1174}
1175
89bd0305
KW
1176static bool mirror_drained_poll(BlockJob *job)
1177{
1178 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
5e771752
SL
1179
1180 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1181 * issue more requests. We make an exception if we've reached this point
1182 * from one of our own drain sections, to avoid a deadlock waiting for
1183 * ourselves.
1184 */
279ac06e
EGE
1185 WITH_JOB_LOCK_GUARD() {
1186 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
1187 && !s->in_drain) {
1188 return true;
1189 }
5e771752
SL
1190 }
1191
89bd0305
KW
1192 return !!s->in_flight;
1193}
1194
73895f38 1195static bool mirror_cancel(Job *job, bool force)
521ff8b7
VSO
1196{
1197 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1198 BlockDriverState *target = blk_bs(s->target);
1199
73895f38
HR
1200 /*
1201 * Before the job is READY, we treat any cancellation like a
1202 * force-cancellation.
1203 */
1204 force = force || !job_is_ready(job);
1205
1206 if (force) {
9c785cd7
VSO
1207 bdrv_cancel_in_flight(target);
1208 }
73895f38
HR
1209 return force;
1210}
1211
1212static bool commit_active_cancel(Job *job, bool force)
1213{
1214 /* Same as above in mirror_cancel() */
1215 return force || !job_is_ready(job);
521ff8b7
VSO
1216}
1217
3fc4b10a 1218static const BlockJobDriver mirror_job_driver = {
33e9e9bd
KW
1219 .job_driver = {
1220 .instance_size = sizeof(MirrorBlockJob),
252291ea 1221 .job_type = JOB_TYPE_MIRROR,
80fa2c75 1222 .free = block_job_free,
b15de828 1223 .user_resume = block_job_user_resume,
f67432a2 1224 .run = mirror_run,
737efc1e
JS
1225 .prepare = mirror_prepare,
1226 .abort = mirror_abort,
da01ff7f 1227 .pause = mirror_pause,
3453d972 1228 .complete = mirror_complete,
521ff8b7 1229 .cancel = mirror_cancel,
33e9e9bd 1230 },
89bd0305 1231 .drained_poll = mirror_drained_poll,
893f7eba
PB
1232};
1233
03544a6e 1234static const BlockJobDriver commit_active_job_driver = {
33e9e9bd
KW
1235 .job_driver = {
1236 .instance_size = sizeof(MirrorBlockJob),
252291ea 1237 .job_type = JOB_TYPE_COMMIT,
80fa2c75 1238 .free = block_job_free,
b15de828 1239 .user_resume = block_job_user_resume,
f67432a2 1240 .run = mirror_run,
737efc1e
JS
1241 .prepare = mirror_prepare,
1242 .abort = mirror_abort,
da01ff7f 1243 .pause = mirror_pause,
3453d972 1244 .complete = mirror_complete,
73895f38 1245 .cancel = commit_active_cancel,
33e9e9bd 1246 },
89bd0305 1247 .drained_poll = mirror_drained_poll,
03544a6e
FZ
1248};
1249
537c3d4f
SH
1250static void coroutine_fn
1251do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1252 uint64_t offset, uint64_t bytes,
1253 QEMUIOVector *qiov, int flags)
d06107ad 1254{
5c511ac3 1255 int ret;
dbdf699c
VSO
1256 size_t qiov_offset = 0;
1257 int64_t bitmap_offset, bitmap_end;
d06107ad 1258
dbdf699c
VSO
1259 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1260 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1261 {
1262 /*
1263 * Dirty unaligned padding: ignore it.
1264 *
1265 * Reasoning:
1266 * 1. If we copy it, we can't reset corresponding bit in
1267 * dirty_bitmap as there may be some "dirty" bytes still not
1268 * copied.
1269 * 2. It's already dirty, so skipping it we don't diverge mirror
1270 * progress.
1271 *
1272 * Note, that because of this, guest write may have no contribution
1273 * into mirror converge, but that's not bad, as we have background
1274 * process of mirroring. If under some bad circumstances (high guest
1275 * IO load) background process starve, we will not converge anyway,
1276 * even if each write will contribute, as guest is not guaranteed to
1277 * rewrite the whole disk.
1278 */
1279 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1280 if (bytes <= qiov_offset) {
1281 /* nothing to do after shrink */
1282 return;
1283 }
1284 offset += qiov_offset;
1285 bytes -= qiov_offset;
1286 }
1287
1288 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1289 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1290 {
1291 uint64_t tail = (offset + bytes) % job->granularity;
1292
1293 if (bytes <= tail) {
1294 /* nothing to do after shrink */
1295 return;
1296 }
1297 bytes -= tail;
1298 }
1299
1300 /*
1301 * Tails are either clean or shrunk, so for bitmap resetting
1302 * we safely align the range down.
1303 */
1304 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1305 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1306 if (bitmap_offset < bitmap_end) {
1307 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1308 bitmap_end - bitmap_offset);
1309 }
d06107ad 1310
5c511ac3 1311 job_progress_increase_remaining(&job->common.job, bytes);
d69a879b 1312 job->active_write_bytes_in_flight += bytes;
d06107ad 1313
5c511ac3
VSO
1314 switch (method) {
1315 case MIRROR_METHOD_COPY:
dbdf699c
VSO
1316 ret = blk_co_pwritev_part(job->target, offset, bytes,
1317 qiov, qiov_offset, flags);
5c511ac3 1318 break;
d06107ad 1319
5c511ac3
VSO
1320 case MIRROR_METHOD_ZERO:
1321 assert(!qiov);
1322 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1323 break;
d06107ad 1324
5c511ac3
VSO
1325 case MIRROR_METHOD_DISCARD:
1326 assert(!qiov);
1327 ret = blk_co_pdiscard(job->target, offset, bytes);
1328 break;
d06107ad 1329
5c511ac3
VSO
1330 default:
1331 abort();
1332 }
d06107ad 1333
d69a879b 1334 job->active_write_bytes_in_flight -= bytes;
5c511ac3
VSO
1335 if (ret >= 0) {
1336 job_progress_update(&job->common.job, bytes);
1337 } else {
1338 BlockErrorAction action;
d06107ad 1339
dbdf699c
VSO
1340 /*
1341 * We failed, so we should mark dirty the whole area, aligned up.
1342 * Note that we don't care about shrunk tails if any: they were dirty
1343 * at function start, and they must be still dirty, as we've locked
1344 * the region for in-flight op.
1345 */
1346 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1347 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1348 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1349 bitmap_end - bitmap_offset);
5c511ac3 1350 job->actively_synced = false;
d06107ad 1351
5c511ac3
VSO
1352 action = mirror_error_action(job, false, -ret);
1353 if (action == BLOCK_ERROR_ACTION_REPORT) {
1354 if (!job->ret) {
1355 job->ret = ret;
d06107ad
HR
1356 }
1357 }
d06107ad
HR
1358 }
1359}
1360
1361static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1362 uint64_t offset,
1363 uint64_t bytes)
1364{
1365 MirrorOp *op;
1366 uint64_t start_chunk = offset / s->granularity;
1367 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1368
1369 op = g_new(MirrorOp, 1);
1370 *op = (MirrorOp){
1371 .s = s,
1372 .offset = offset,
1373 .bytes = bytes,
1374 .is_active_write = true,
ce8cabbd 1375 .is_in_flight = true,
ead3f1bf 1376 .co = qemu_coroutine_self(),
d06107ad
HR
1377 };
1378 qemu_co_queue_init(&op->waiting_requests);
1379 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1380
1381 s->in_active_write_counter++;
1382
d69a879b
HR
1383 /*
1384 * Wait for concurrent requests affecting the area. If there are already
1385 * running requests that are copying off now-to-be stale data in the area,
1386 * we must wait for them to finish before we begin writing fresh data to the
1387 * target so that the write operations appear in the correct order.
1388 * Note that background requests (see mirror_iteration()) in contrast only
1389 * wait for conflicting requests at the start of the dirty area, and then
1390 * (based on the in_flight_bitmap) truncate the area to copy so it will not
1391 * conflict with any requests beyond that. For active writes, however, we
1392 * cannot truncate that area. The request from our parent must be blocked
1393 * until the area is copied in full. Therefore, we must wait for the whole
1394 * area to become free of concurrent requests.
1395 */
d06107ad
HR
1396 mirror_wait_on_conflicts(op, s, offset, bytes);
1397
1398 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1399
1400 return op;
1401}
1402
1403static void coroutine_fn active_write_settle(MirrorOp *op)
1404{
1405 uint64_t start_chunk = op->offset / op->s->granularity;
1406 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1407 op->s->granularity);
1408
1409 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1410 BdrvChild *source = op->s->mirror_top_bs->backing;
1411
1412 if (QLIST_FIRST(&source->bs->parents) == source &&
1413 QLIST_NEXT(source, next_parent) == NULL)
1414 {
1415 /* Assert that we are back in sync once all active write
1416 * operations are settled.
1417 * Note that we can only assert this if the mirror node
1418 * is the source node's only parent. */
1419 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1420 }
1421 }
1422 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1423 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1424 qemu_co_queue_restart_all(&op->waiting_requests);
1425 g_free(op);
1426}
1427
4ef85a9c 1428static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
f7ef38dd 1429 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
4ef85a9c
KW
1430{
1431 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1432}
1433
d06107ad
HR
1434static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1435 MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1436 int flags)
1437{
1438 MirrorOp *op = NULL;
1439 MirrorBDSOpaque *s = bs->opaque;
1440 int ret = 0;
da93d5c8 1441 bool copy_to_target = false;
d06107ad 1442
da93d5c8
HR
1443 if (s->job) {
1444 copy_to_target = s->job->ret >= 0 &&
1445 !job_is_cancelled(&s->job->common.job) &&
1446 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1447 }
d06107ad
HR
1448
1449 if (copy_to_target) {
1450 op = active_write_prepare(s->job, offset, bytes);
1451 }
1452
1453 switch (method) {
1454 case MIRROR_METHOD_COPY:
1455 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1456 break;
1457
1458 case MIRROR_METHOD_ZERO:
1459 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1460 break;
1461
1462 case MIRROR_METHOD_DISCARD:
0b9fd3f4 1463 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
d06107ad
HR
1464 break;
1465
1466 default:
1467 abort();
1468 }
1469
1470 if (ret < 0) {
1471 goto out;
1472 }
1473
1474 if (copy_to_target) {
1475 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1476 }
1477
1478out:
1479 if (copy_to_target) {
1480 active_write_settle(op);
1481 }
1482 return ret;
1483}
1484
4ef85a9c 1485static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
e75abeda 1486 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
4ef85a9c 1487{
d06107ad
HR
1488 MirrorBDSOpaque *s = bs->opaque;
1489 QEMUIOVector bounce_qiov;
1490 void *bounce_buf;
1491 int ret = 0;
da93d5c8 1492 bool copy_to_target = false;
d06107ad 1493
da93d5c8
HR
1494 if (s->job) {
1495 copy_to_target = s->job->ret >= 0 &&
1496 !job_is_cancelled(&s->job->common.job) &&
1497 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1498 }
d06107ad
HR
1499
1500 if (copy_to_target) {
1501 /* The guest might concurrently modify the data to write; but
1502 * the data on source and destination must match, so we have
1503 * to use a bounce buffer if we are going to write to the
1504 * target now. */
1505 bounce_buf = qemu_blockalign(bs, bytes);
1506 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1507
1508 qemu_iovec_init(&bounce_qiov, 1);
1509 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1510 qiov = &bounce_qiov;
e8b65355
SH
1511
1512 flags &= ~BDRV_REQ_REGISTERED_BUF;
d06107ad
HR
1513 }
1514
1515 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1516 flags);
1517
1518 if (copy_to_target) {
1519 qemu_iovec_destroy(&bounce_qiov);
1520 qemu_vfree(bounce_buf);
1521 }
1522
1523 return ret;
4ef85a9c
KW
1524}
1525
1526static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1527{
ce960aa9
VSO
1528 if (bs->backing == NULL) {
1529 /* we can be here after failed bdrv_append in mirror_start_job */
1530 return 0;
1531 }
4ef85a9c
KW
1532 return bdrv_co_flush(bs->backing->bs);
1533}
1534
4ef85a9c 1535static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
f34b2bcf 1536 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
4ef85a9c 1537{
d06107ad
HR
1538 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1539 flags);
4ef85a9c
KW
1540}
1541
1542static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
0c802287 1543 int64_t offset, int64_t bytes)
4ef85a9c 1544{
d06107ad
HR
1545 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1546 NULL, 0);
4ef85a9c
KW
1547}
1548
998b3a1e 1549static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
fd4a6493 1550{
18775ff3
VSO
1551 if (bs->backing == NULL) {
1552 /* we can be here after failed bdrv_attach_child in
1553 * bdrv_set_backing_hd */
1554 return;
1555 }
fd4a6493
KW
1556 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1557 bs->backing->bs->filename);
1558}
1559
4ef85a9c 1560static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
bf8e925e 1561 BdrvChildRole role,
e0995dc3 1562 BlockReopenQueue *reopen_queue,
4ef85a9c
KW
1563 uint64_t perm, uint64_t shared,
1564 uint64_t *nperm, uint64_t *nshared)
1565{
f94dc3b4
HR
1566 MirrorBDSOpaque *s = bs->opaque;
1567
1568 if (s->stop) {
1569 /*
1570 * If the job is to be stopped, we do not need to forward
1571 * anything to the real image.
1572 */
1573 *nperm = 0;
1574 *nshared = BLK_PERM_ALL;
1575 return;
1576 }
1577
53431b90
HR
1578 bdrv_default_perms(bs, c, role, reopen_queue,
1579 perm, shared, nperm, nshared);
4ef85a9c 1580
53431b90
HR
1581 if (s->is_commit) {
1582 /*
1583 * For commit jobs, we cannot take CONSISTENT_READ, because
1584 * that permission is unshared for everything above the base
1585 * node (except for filters on the base node).
1586 * We also have to force-share the WRITE permission, or
1587 * otherwise we would block ourselves at the base node (if
1588 * writes are blocked for a node, they are also blocked for
1589 * its backing file).
1590 * (We could also share RESIZE, because it may be needed for
1591 * the target if its size is less than the top node's; but
1592 * bdrv_default_perms_for_cow() automatically shares RESIZE
1593 * for backing nodes if WRITE is shared, so there is no need
1594 * to do it here.)
1595 */
1596 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1597 *nshared |= BLK_PERM_WRITE;
1598 }
4ef85a9c
KW
1599}
1600
1601/* Dummy node that provides consistent read to its users without requiring it
1602 * from its backing file and that allows writes on the backing file chain. */
1603static BlockDriver bdrv_mirror_top = {
1604 .format_name = "mirror_top",
1605 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1606 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1607 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1608 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1609 .bdrv_co_flush = bdrv_mirror_top_flush,
fd4a6493 1610 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
4ef85a9c 1611 .bdrv_child_perm = bdrv_mirror_top_child_perm,
6540fd15
HR
1612
1613 .is_filter = true,
046fd84f 1614 .filtered_child_is_backing = true,
4ef85a9c
KW
1615};
1616
cc19f177
VSO
1617static BlockJob *mirror_start_job(
1618 const char *job_id, BlockDriverState *bs,
47970dfb
JS
1619 int creation_flags, BlockDriverState *target,
1620 const char *replaces, int64_t speed,
1621 uint32_t granularity, int64_t buf_size,
274fccee 1622 BlockMirrorBackingMode backing_mode,
cdf3bc93 1623 bool zero_target,
09158f00
BC
1624 BlockdevOnError on_source_error,
1625 BlockdevOnError on_target_error,
0fc9f8ea 1626 bool unmap,
097310b5 1627 BlockCompletionFunc *cb,
51ccfa2d 1628 void *opaque,
09158f00 1629 const BlockJobDriver *driver,
b49f7ead 1630 bool is_none_mode, BlockDriverState *base,
51ccfa2d 1631 bool auto_complete, const char *filter_node_name,
481debaa 1632 bool is_mirror, MirrorCopyMode copy_mode,
51ccfa2d 1633 Error **errp)
893f7eba
PB
1634{
1635 MirrorBlockJob *s;
429076e8 1636 MirrorBDSOpaque *bs_opaque;
4ef85a9c 1637 BlockDriverState *mirror_top_bs;
4ef85a9c 1638 bool target_is_backing;
3f072a7f 1639 uint64_t target_perms, target_shared_perms;
d7086422 1640 int ret;
893f7eba 1641
eee13dfe 1642 if (granularity == 0) {
341ebc2f 1643 granularity = bdrv_get_default_bitmap_granularity(target);
eee13dfe
PB
1644 }
1645
31826642 1646 assert(is_power_of_2(granularity));
eee13dfe 1647
48ac0a4d
WC
1648 if (buf_size < 0) {
1649 error_setg(errp, "Invalid parameter 'buf-size'");
cc19f177 1650 return NULL;
48ac0a4d
WC
1651 }
1652
1653 if (buf_size == 0) {
1654 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1655 }
5bc361b8 1656
3f072a7f 1657 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
86fae10c 1658 error_setg(errp, "Can't mirror node into itself");
cc19f177 1659 return NULL;
86fae10c
KW
1660 }
1661
53431b90
HR
1662 target_is_backing = bdrv_chain_contains(bs, target);
1663
4ef85a9c
KW
1664 /* In the case of active commit, add dummy driver to provide consistent
1665 * reads on the top, while disabling it in the intermediate nodes, and make
1666 * the backing chain writable. */
6cdbceb1
KW
1667 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1668 BDRV_O_RDWR, errp);
4ef85a9c 1669 if (mirror_top_bs == NULL) {
cc19f177 1670 return NULL;
4ef85a9c 1671 }
d3c8c674
KW
1672 if (!filter_node_name) {
1673 mirror_top_bs->implicit = true;
1674 }
e5182c1c
HR
1675
1676 /* So that we can always drop this node */
1677 mirror_top_bs->never_freeze = true;
1678
4ef85a9c 1679 mirror_top_bs->total_sectors = bs->total_sectors;
228345bf 1680 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
80f5c33f
KW
1681 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1682 BDRV_REQ_NO_FALLBACK;
429076e8
HR
1683 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1684 mirror_top_bs->opaque = bs_opaque;
4ef85a9c 1685
53431b90
HR
1686 bs_opaque->is_commit = target_is_backing;
1687
4ef85a9c 1688 bdrv_drained_begin(bs);
934aee14 1689 ret = bdrv_append(mirror_top_bs, bs, errp);
4ef85a9c
KW
1690 bdrv_drained_end(bs);
1691
934aee14 1692 if (ret < 0) {
b2c2832c 1693 bdrv_unref(mirror_top_bs);
cc19f177 1694 return NULL;
b2c2832c
KW
1695 }
1696
4ef85a9c 1697 /* Make sure that the source is not resized while the job is running */
75859b94 1698 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
4ef85a9c
KW
1699 BLK_PERM_CONSISTENT_READ,
1700 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
64631f36 1701 BLK_PERM_WRITE, speed,
c6cc12bf 1702 creation_flags, cb, opaque, errp);
893f7eba 1703 if (!s) {
4ef85a9c 1704 goto fail;
893f7eba 1705 }
429076e8
HR
1706 bs_opaque->job = s;
1707
7a25fcd0
HR
1708 /* The block job now has a reference to this node */
1709 bdrv_unref(mirror_top_bs);
1710
4ef85a9c
KW
1711 s->mirror_top_bs = mirror_top_bs;
1712
1713 /* No resize for the target either; while the mirror is still running, a
1714 * consistent read isn't necessarily possible. We could possibly allow
1715 * writes and graph modifications, though it would likely defeat the
1716 * purpose of a mirror, so leave them blocked for now.
1717 *
1718 * In the case of active commit, things look a bit different, though,
1719 * because the target is an already populated backing file in active use.
1720 * We can allow anything except resize there.*/
3f072a7f
HR
1721
1722 target_perms = BLK_PERM_WRITE;
1723 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1724
3f072a7f
HR
1725 if (target_is_backing) {
1726 int64_t bs_size, target_size;
1727 bs_size = bdrv_getlength(bs);
1728 if (bs_size < 0) {
1729 error_setg_errno(errp, -bs_size,
1730 "Could not inquire top image size");
1731 goto fail;
1732 }
1733
1734 target_size = bdrv_getlength(target);
1735 if (target_size < 0) {
1736 error_setg_errno(errp, -target_size,
1737 "Could not inquire base image size");
1738 goto fail;
1739 }
1740
1741 if (target_size < bs_size) {
1742 target_perms |= BLK_PERM_RESIZE;
1743 }
1744
64631f36 1745 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
3f072a7f
HR
1746 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1747 /*
1748 * We may want to allow this in the future, but it would
1749 * require taking some extra care.
1750 */
1751 error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1752 "source's backing chain");
1753 goto fail;
1754 }
1755
d861ab3a 1756 s->target = blk_new(s->common.job.aio_context,
3f072a7f 1757 target_perms, target_shared_perms);
d7086422
KW
1758 ret = blk_insert_bs(s->target, target, errp);
1759 if (ret < 0) {
4ef85a9c 1760 goto fail;
d7086422 1761 }
045a2f82
FZ
1762 if (is_mirror) {
1763 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1764 * of non-shared block migration. To allow migration completion, we
1765 * have to allow "inactivate" of the target BB. When that happens, we
1766 * know the job is drained, and the vcpus are stopped, so no write
1767 * operation will be performed. Block layer already has assertions to
1768 * ensure that. */
1769 blk_set_force_allow_inactivate(s->target);
1770 }
9ff7f0df 1771 blk_set_allow_aio_context_change(s->target, true);
cf312932 1772 blk_set_disable_request_queuing(s->target, true);
e253f4b8 1773
09158f00 1774 s->replaces = g_strdup(replaces);
b952b558
PB
1775 s->on_source_error = on_source_error;
1776 s->on_target_error = on_target_error;
03544a6e 1777 s->is_none_mode = is_none_mode;
274fccee 1778 s->backing_mode = backing_mode;
cdf3bc93 1779 s->zero_target = zero_target;
481debaa 1780 s->copy_mode = copy_mode;
5bc361b8 1781 s->base = base;
3f072a7f 1782 s->base_overlay = bdrv_find_overlay(bs, base);
eee13dfe 1783 s->granularity = granularity;
48ac0a4d 1784 s->buf_size = ROUND_UP(buf_size, granularity);
0fc9f8ea 1785 s->unmap = unmap;
b49f7ead
WC
1786 if (auto_complete) {
1787 s->should_complete = true;
1788 }
b812f671 1789
0db6e54a 1790 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
b8afb520 1791 if (!s->dirty_bitmap) {
88f9d1b3 1792 goto fail;
b8afb520 1793 }
dbdf699c
VSO
1794 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1795 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1796 }
10f3cd15 1797
67b24427
AG
1798 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1799 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1800 BLK_PERM_CONSISTENT_READ,
1801 errp);
1802 if (ret < 0) {
1803 goto fail;
1804 }
1805
4ef85a9c 1806 /* Required permissions are already taken with blk_new() */
76d554e2
KW
1807 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1808 &error_abort);
1809
f3ede4b0
AG
1810 /* In commit_active_start() all intermediate nodes disappear, so
1811 * any jobs in them must be blocked */
4ef85a9c 1812 if (target_is_backing) {
3f072a7f
HR
1813 BlockDriverState *iter, *filtered_target;
1814 uint64_t iter_shared_perms;
1815
1816 /*
1817 * The topmost node with
1818 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1819 */
1820 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1821
1822 assert(bdrv_skip_filters(filtered_target) ==
1823 bdrv_skip_filters(target));
1824
1825 /*
1826 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1827 * ourselves at s->base (if writes are blocked for a node, they are
1828 * also blocked for its backing file). The other options would be a
1829 * second filter driver above s->base (== target).
1830 */
1831 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1832
1833 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1834 iter = bdrv_filter_or_cow_bs(iter))
1835 {
1836 if (iter == filtered_target) {
1837 /*
1838 * From here on, all nodes are filters on the base.
1839 * This allows us to share BLK_PERM_CONSISTENT_READ.
1840 */
1841 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1842 }
1843
4ef85a9c 1844 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
3f072a7f 1845 iter_shared_perms, errp);
4ef85a9c
KW
1846 if (ret < 0) {
1847 goto fail;
1848 }
f3ede4b0 1849 }
ef53dc09
AG
1850
1851 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1852 goto fail;
1853 }
f3ede4b0 1854 }
10f3cd15 1855
12aa4082
HR
1856 QTAILQ_INIT(&s->ops_in_flight);
1857
5ccac6f1 1858 trace_mirror_start(bs, s, opaque);
da01ff7f 1859 job_start(&s->common.job);
cc19f177
VSO
1860
1861 return &s->common;
4ef85a9c
KW
1862
1863fail:
1864 if (s) {
7a25fcd0
HR
1865 /* Make sure this BDS does not go away until we have completed the graph
1866 * changes below */
1867 bdrv_ref(mirror_top_bs);
1868
4ef85a9c
KW
1869 g_free(s->replaces);
1870 blk_unref(s->target);
429076e8 1871 bs_opaque->job = NULL;
e917e2cb 1872 if (s->dirty_bitmap) {
5deb6cbd 1873 bdrv_release_dirty_bitmap(s->dirty_bitmap);
e917e2cb 1874 }
4ad35181 1875 job_early_fail(&s->common.job);
4ef85a9c
KW
1876 }
1877
f94dc3b4
HR
1878 bs_opaque->stop = true;
1879 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1880 &error_abort);
3f072a7f 1881 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
7a25fcd0
HR
1882
1883 bdrv_unref(mirror_top_bs);
cc19f177
VSO
1884
1885 return NULL;
893f7eba 1886}
03544a6e 1887
71aa9867
AG
1888void mirror_start(const char *job_id, BlockDriverState *bs,
1889 BlockDriverState *target, const char *replaces,
a1999b33
JS
1890 int creation_flags, int64_t speed,
1891 uint32_t granularity, int64_t buf_size,
274fccee 1892 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
cdf3bc93 1893 bool zero_target,
274fccee 1894 BlockdevOnError on_source_error,
03544a6e 1895 BlockdevOnError on_target_error,
481debaa
HR
1896 bool unmap, const char *filter_node_name,
1897 MirrorCopyMode copy_mode, Error **errp)
03544a6e
FZ
1898{
1899 bool is_none_mode;
1900 BlockDriverState *base;
1901
b4ad82aa
EGE
1902 GLOBAL_STATE_CODE();
1903
c8b56501
JS
1904 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1905 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1906 error_setg(errp, "Sync mode '%s' not supported",
1907 MirrorSyncMode_str(mode));
d58d8453
JS
1908 return;
1909 }
03544a6e 1910 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
3f072a7f 1911 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
a1999b33 1912 mirror_start_job(job_id, bs, creation_flags, target, replaces,
cdf3bc93 1913 speed, granularity, buf_size, backing_mode, zero_target,
51ccfa2d 1914 on_source_error, on_target_error, unmap, NULL, NULL,
6cdbceb1 1915 &mirror_job_driver, is_none_mode, base, false,
481debaa 1916 filter_node_name, true, copy_mode, errp);
03544a6e
FZ
1917}
1918
cc19f177
VSO
1919BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1920 BlockDriverState *base, int creation_flags,
1921 int64_t speed, BlockdevOnError on_error,
1922 const char *filter_node_name,
1923 BlockCompletionFunc *cb, void *opaque,
1924 bool auto_complete, Error **errp)
03544a6e 1925{
1ba79388 1926 bool base_read_only;
eb5becc1 1927 BlockJob *job;
4da83585 1928
b4ad82aa
EGE
1929 GLOBAL_STATE_CODE();
1930
1ba79388 1931 base_read_only = bdrv_is_read_only(base);
4da83585 1932
1ba79388
AG
1933 if (base_read_only) {
1934 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
cc19f177 1935 return NULL;
1ba79388 1936 }
20a63d2c 1937 }
4da83585 1938
eb5becc1 1939 job = mirror_start_job(
cc19f177 1940 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
cdf3bc93 1941 MIRROR_LEAVE_BACKING_CHAIN, false,
51ccfa2d 1942 on_error, on_error, true, cb, opaque,
6cdbceb1 1943 &commit_active_job_driver, false, base, auto_complete,
481debaa 1944 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
eb5becc1
VSO
1945 errp);
1946 if (!job) {
4da83585
JC
1947 goto error_restore_flags;
1948 }
1949
eb5becc1 1950 return job;
4da83585
JC
1951
1952error_restore_flags:
1953 /* ignore error and errp for bdrv_reopen, because we want to propagate
1954 * the original error */
1ba79388
AG
1955 if (base_read_only) {
1956 bdrv_reopen_set_read_only(base, true, NULL);
1957 }
cc19f177 1958 return NULL;
03544a6e 1959}