]> git.proxmox.com Git - mirror_qemu.git/blame - block/mirror.c
mirror: clarify mirror_do_read return code
[mirror_qemu.git] / block / mirror.c
CommitLineData
893f7eba
PB
1/*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
80c71a24 14#include "qemu/osdep.h"
893f7eba 15#include "trace.h"
737e150e
PB
16#include "block/blockjob.h"
17#include "block/block_int.h"
373340b2 18#include "sysemu/block-backend.h"
da34e65c 19#include "qapi/error.h"
cc7a8ea7 20#include "qapi/qmp/qerror.h"
893f7eba 21#include "qemu/ratelimit.h"
b812f671 22#include "qemu/bitmap.h"
893f7eba 23
402a4741
PB
24#define SLICE_TIME 100000000ULL /* ns */
25#define MAX_IN_FLIGHT 16
48ac0a4d 26#define DEFAULT_MIRROR_BUF_SIZE (10 << 20)
402a4741
PB
27
28/* The mirroring buffer is a list of granularity-sized chunks.
29 * Free chunks are organized in a list.
30 */
31typedef struct MirrorBuffer {
32 QSIMPLEQ_ENTRY(MirrorBuffer) next;
33} MirrorBuffer;
893f7eba
PB
34
35typedef struct MirrorBlockJob {
36 BlockJob common;
37 RateLimit limit;
e253f4b8 38 BlockBackend *target;
5bc361b8 39 BlockDriverState *base;
09158f00
BC
40 /* The name of the graph node to replace */
41 char *replaces;
42 /* The BDS to replace */
43 BlockDriverState *to_replace;
44 /* Used to block operations on the drive-mirror-replace target */
45 Error *replace_blocker;
03544a6e 46 bool is_none_mode;
274fccee 47 BlockMirrorBackingMode backing_mode;
b952b558 48 BlockdevOnError on_source_error, on_target_error;
d63ffd87
PB
49 bool synced;
50 bool should_complete;
eee13dfe 51 int64_t granularity;
b812f671 52 size_t buf_size;
b21c7652 53 int64_t bdev_length;
b812f671 54 unsigned long *cow_bitmap;
e4654d2d 55 BdrvDirtyBitmap *dirty_bitmap;
8f0720ec 56 HBitmapIter hbi;
893f7eba 57 uint8_t *buf;
402a4741
PB
58 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
59 int buf_free_count;
bd48bde8 60
402a4741 61 unsigned long *in_flight_bitmap;
bd48bde8 62 int in_flight;
b21c7652 63 int sectors_in_flight;
bd48bde8 64 int ret;
0fc9f8ea 65 bool unmap;
e424aff5 66 bool waiting_for_io;
e5b43573
FZ
67 int target_cluster_sectors;
68 int max_iov;
893f7eba
PB
69} MirrorBlockJob;
70
bd48bde8
PB
71typedef struct MirrorOp {
72 MirrorBlockJob *s;
73 QEMUIOVector qiov;
bd48bde8
PB
74 int64_t sector_num;
75 int nb_sectors;
76} MirrorOp;
77
b952b558
PB
78static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
79 int error)
80{
81 s->synced = false;
82 if (read) {
81e254dc
KW
83 return block_job_error_action(&s->common, s->on_source_error,
84 true, error);
b952b558 85 } else {
81e254dc
KW
86 return block_job_error_action(&s->common, s->on_target_error,
87 false, error);
b952b558
PB
88 }
89}
90
bd48bde8
PB
91static void mirror_iteration_done(MirrorOp *op, int ret)
92{
93 MirrorBlockJob *s = op->s;
402a4741 94 struct iovec *iov;
bd48bde8 95 int64_t chunk_num;
402a4741 96 int i, nb_chunks, sectors_per_chunk;
bd48bde8
PB
97
98 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
99
100 s->in_flight--;
b21c7652 101 s->sectors_in_flight -= op->nb_sectors;
402a4741
PB
102 iov = op->qiov.iov;
103 for (i = 0; i < op->qiov.niov; i++) {
104 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
105 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
106 s->buf_free_count++;
107 }
108
bd48bde8
PB
109 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
110 chunk_num = op->sector_num / sectors_per_chunk;
4150ae60 111 nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk);
402a4741 112 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
b21c7652
HR
113 if (ret >= 0) {
114 if (s->cow_bitmap) {
115 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
116 }
117 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
bd48bde8
PB
118 }
119
6df3bf8e 120 qemu_iovec_destroy(&op->qiov);
c84b3192 121 g_free(op);
7b770c72 122
e424aff5 123 if (s->waiting_for_io) {
7b770c72
SH
124 qemu_coroutine_enter(s->common.co, NULL);
125 }
bd48bde8
PB
126}
127
128static void mirror_write_complete(void *opaque, int ret)
129{
130 MirrorOp *op = opaque;
131 MirrorBlockJob *s = op->s;
132 if (ret < 0) {
bd48bde8
PB
133 BlockErrorAction action;
134
20dca810 135 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
bd48bde8 136 action = mirror_error_action(s, false, -ret);
a589569f 137 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
138 s->ret = ret;
139 }
140 }
141 mirror_iteration_done(op, ret);
142}
143
144static void mirror_read_complete(void *opaque, int ret)
145{
146 MirrorOp *op = opaque;
147 MirrorBlockJob *s = op->s;
148 if (ret < 0) {
bd48bde8
PB
149 BlockErrorAction action;
150
20dca810 151 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
bd48bde8 152 action = mirror_error_action(s, true, -ret);
a589569f 153 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
bd48bde8
PB
154 s->ret = ret;
155 }
156
157 mirror_iteration_done(op, ret);
158 return;
159 }
e253f4b8 160 blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
73698c30 161 0, mirror_write_complete, op);
bd48bde8
PB
162}
163
4150ae60
FZ
164static inline void mirror_clip_sectors(MirrorBlockJob *s,
165 int64_t sector_num,
166 int *nb_sectors)
167{
168 *nb_sectors = MIN(*nb_sectors,
169 s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
170}
171
e5b43573
FZ
172/* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
173 * return the offset of the adjusted tail sector against original. */
174static int mirror_cow_align(MirrorBlockJob *s,
175 int64_t *sector_num,
176 int *nb_sectors)
893f7eba 177{
e5b43573
FZ
178 bool need_cow;
179 int ret = 0;
180 int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS;
181 int64_t align_sector_num = *sector_num;
182 int align_nb_sectors = *nb_sectors;
183 int max_sectors = chunk_sectors * s->max_iov;
184
185 need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap);
186 need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
187 s->cow_bitmap);
188 if (need_cow) {
244483e6
KW
189 bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
190 *nb_sectors, &align_sector_num,
191 &align_nb_sectors);
e5b43573 192 }
3515727f 193
e5b43573
FZ
194 if (align_nb_sectors > max_sectors) {
195 align_nb_sectors = max_sectors;
196 if (need_cow) {
197 align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors,
198 s->target_cluster_sectors);
199 }
8f0720ec 200 }
4150ae60
FZ
201 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
202 * that doesn't matter because it's already the end of source image. */
203 mirror_clip_sectors(s, align_sector_num, &align_nb_sectors);
8f0720ec 204
e5b43573
FZ
205 ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors);
206 *sector_num = align_sector_num;
207 *nb_sectors = align_nb_sectors;
208 assert(ret >= 0);
209 return ret;
210}
211
21cd917f
FZ
212static inline void mirror_wait_for_io(MirrorBlockJob *s)
213{
214 assert(!s->waiting_for_io);
215 s->waiting_for_io = true;
216 qemu_coroutine_yield();
217 s->waiting_for_io = false;
218}
219
e5b43573 220/* Submit async read while handling COW.
17612955
JS
221 * Returns: The number of sectors copied after and including sector_num,
222 * excluding any sectors copied prior to sector_num due to alignment.
223 * This will be nb_sectors if no alignment is necessary, or
e5b43573
FZ
224 * (new_end - sector_num) if tail is rounded up or down due to
225 * alignment or buffer limit.
226 */
227static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
228 int nb_sectors)
229{
e253f4b8 230 BlockBackend *source = s->common.blk;
e5b43573 231 int sectors_per_chunk, nb_chunks;
17612955 232 int ret;
e5b43573
FZ
233 MirrorOp *op;
234
884fea4e 235 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
402a4741 236
e5b43573
FZ
237 /* We can only handle as much as buf_size at a time. */
238 nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
239 assert(nb_sectors);
17612955 240 ret = nb_sectors;
402a4741 241
e5b43573
FZ
242 if (s->cow_bitmap) {
243 ret += mirror_cow_align(s, &sector_num, &nb_sectors);
244 }
245 assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
246 /* The sector range must meet granularity because:
247 * 1) Caller passes in aligned values;
248 * 2) mirror_cow_align is used only when target cluster is larger. */
e5b43573 249 assert(!(sector_num % sectors_per_chunk));
4150ae60 250 nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
e5b43573
FZ
251
252 while (s->buf_free_count < nb_chunks) {
402a4741 253 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
21cd917f 254 mirror_wait_for_io(s);
b812f671
PB
255 }
256
bd48bde8 257 /* Allocate a MirrorOp that is used as an AIO callback. */
c84b3192 258 op = g_new(MirrorOp, 1);
bd48bde8 259 op->s = s;
bd48bde8
PB
260 op->sector_num = sector_num;
261 op->nb_sectors = nb_sectors;
402a4741
PB
262
263 /* Now make a QEMUIOVector taking enough granularity-sized chunks
264 * from s->buf_free.
265 */
266 qemu_iovec_init(&op->qiov, nb_chunks);
402a4741
PB
267 while (nb_chunks-- > 0) {
268 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
e5b43573 269 size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
5a0f6fd5 270
402a4741
PB
271 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
272 s->buf_free_count--;
5a0f6fd5 273 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
402a4741 274 }
bd48bde8 275
893f7eba 276 /* Copy the dirty cluster. */
bd48bde8 277 s->in_flight++;
b21c7652 278 s->sectors_in_flight += nb_sectors;
b812f671 279 trace_mirror_one_iteration(s, sector_num, nb_sectors);
dcfb3beb 280
73698c30 281 blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
e5b43573
FZ
282 mirror_read_complete, op);
283 return ret;
284}
285
286static void mirror_do_zero_or_discard(MirrorBlockJob *s,
287 int64_t sector_num,
288 int nb_sectors,
289 bool is_discard)
290{
291 MirrorOp *op;
292
293 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
294 * so the freeing in mirror_iteration_done is nop. */
295 op = g_new0(MirrorOp, 1);
296 op->s = s;
297 op->sector_num = sector_num;
298 op->nb_sectors = nb_sectors;
299
300 s->in_flight++;
301 s->sectors_in_flight += nb_sectors;
302 if (is_discard) {
e253f4b8
KW
303 blk_aio_discard(s->target, sector_num, op->nb_sectors,
304 mirror_write_complete, op);
e5b43573 305 } else {
e253f4b8
KW
306 blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
307 op->nb_sectors * BDRV_SECTOR_SIZE,
dcfb3beb
FZ
308 s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
309 mirror_write_complete, op);
e5b43573
FZ
310 }
311}
312
313static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
314{
e253f4b8 315 BlockDriverState *source = blk_bs(s->common.blk);
9c83625b 316 int64_t sector_num, first_chunk;
e5b43573
FZ
317 uint64_t delay_ns = 0;
318 /* At least the first dirty chunk is mirrored in one iteration. */
319 int nb_chunks = 1;
320 int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
321 int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
322
323 sector_num = hbitmap_iter_next(&s->hbi);
324 if (sector_num < 0) {
325 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
326 sector_num = hbitmap_iter_next(&s->hbi);
327 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
328 assert(sector_num >= 0);
329 }
330
9c83625b
HR
331 first_chunk = sector_num / sectors_per_chunk;
332 while (test_bit(first_chunk, s->in_flight_bitmap)) {
ff04198b 333 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
9c83625b
HR
334 mirror_wait_for_io(s);
335 }
336
565ac01f
SH
337 block_job_pause_point(&s->common);
338
e5b43573
FZ
339 /* Find the number of consective dirty chunks following the first dirty
340 * one, and wait for in flight requests in them. */
341 while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
342 int64_t hbitmap_next;
343 int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
344 int64_t next_chunk = next_sector / sectors_per_chunk;
345 if (next_sector >= end ||
346 !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
347 break;
348 }
349 if (test_bit(next_chunk, s->in_flight_bitmap)) {
9c83625b 350 break;
e5b43573 351 }
9c83625b
HR
352
353 hbitmap_next = hbitmap_iter_next(&s->hbi);
f27a2742
HR
354 if (hbitmap_next > next_sector || hbitmap_next < 0) {
355 /* The bitmap iterator's cache is stale, refresh it */
356 bdrv_set_dirty_iter(&s->hbi, next_sector);
357 hbitmap_next = hbitmap_iter_next(&s->hbi);
358 }
9c83625b
HR
359 assert(hbitmap_next == next_sector);
360 nb_chunks++;
e5b43573
FZ
361 }
362
363 /* Clear dirty bits before querying the block status, because
364 * calling bdrv_get_block_status_above could yield - if some blocks are
365 * marked dirty in this window, we need to know.
366 */
367 bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num,
368 nb_chunks * sectors_per_chunk);
369 bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
370 while (nb_chunks > 0 && sector_num < end) {
371 int ret;
372 int io_sectors;
373 BlockDriverState *file;
374 enum MirrorMethod {
375 MIRROR_METHOD_COPY,
376 MIRROR_METHOD_ZERO,
377 MIRROR_METHOD_DISCARD
378 } mirror_method = MIRROR_METHOD_COPY;
379
380 assert(!(sector_num % sectors_per_chunk));
381 ret = bdrv_get_block_status_above(source, NULL, sector_num,
382 nb_chunks * sectors_per_chunk,
383 &io_sectors, &file);
384 if (ret < 0) {
385 io_sectors = nb_chunks * sectors_per_chunk;
386 }
387
388 io_sectors -= io_sectors % sectors_per_chunk;
389 if (io_sectors < sectors_per_chunk) {
390 io_sectors = sectors_per_chunk;
391 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
392 int64_t target_sector_num;
393 int target_nb_sectors;
244483e6
KW
394 bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
395 io_sectors, &target_sector_num,
396 &target_nb_sectors);
e5b43573
FZ
397 if (target_sector_num == sector_num &&
398 target_nb_sectors == io_sectors) {
399 mirror_method = ret & BDRV_BLOCK_ZERO ?
400 MIRROR_METHOD_ZERO :
401 MIRROR_METHOD_DISCARD;
402 }
403 }
404
4150ae60 405 mirror_clip_sectors(s, sector_num, &io_sectors);
e5b43573
FZ
406 switch (mirror_method) {
407 case MIRROR_METHOD_COPY:
408 io_sectors = mirror_do_read(s, sector_num, io_sectors);
409 break;
410 case MIRROR_METHOD_ZERO:
411 mirror_do_zero_or_discard(s, sector_num, io_sectors, false);
412 break;
413 case MIRROR_METHOD_DISCARD:
414 mirror_do_zero_or_discard(s, sector_num, io_sectors, true);
415 break;
416 default:
417 abort();
418 }
419 assert(io_sectors);
420 sector_num += io_sectors;
4150ae60 421 nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
e5b43573 422 delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors);
dcfb3beb 423 }
cc8c9d6c 424 return delay_ns;
bd48bde8 425}
b952b558 426
402a4741
PB
427static void mirror_free_init(MirrorBlockJob *s)
428{
429 int granularity = s->granularity;
430 size_t buf_size = s->buf_size;
431 uint8_t *buf = s->buf;
432
433 assert(s->buf_free_count == 0);
434 QSIMPLEQ_INIT(&s->buf_free);
435 while (buf_size != 0) {
436 MirrorBuffer *cur = (MirrorBuffer *)buf;
437 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
438 s->buf_free_count++;
439 buf_size -= granularity;
440 buf += granularity;
441 }
442}
443
bd48bde8
PB
444static void mirror_drain(MirrorBlockJob *s)
445{
446 while (s->in_flight > 0) {
21cd917f 447 mirror_wait_for_io(s);
bd48bde8 448 }
893f7eba
PB
449}
450
5a7e7a0b
SH
451typedef struct {
452 int ret;
453} MirrorExitData;
454
455static void mirror_exit(BlockJob *job, void *opaque)
456{
457 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
458 MirrorExitData *data = opaque;
459 AioContext *replace_aio_context = NULL;
e253f4b8
KW
460 BlockDriverState *src = blk_bs(s->common.blk);
461 BlockDriverState *target_bs = blk_bs(s->target);
3f09bfbc
KW
462
463 /* Make sure that the source BDS doesn't go away before we called
464 * block_job_completed(). */
465 bdrv_ref(src);
5a7e7a0b
SH
466
467 if (s->to_replace) {
468 replace_aio_context = bdrv_get_aio_context(s->to_replace);
469 aio_context_acquire(replace_aio_context);
470 }
471
472 if (s->should_complete && data->ret == 0) {
e253f4b8 473 BlockDriverState *to_replace = src;
5a7e7a0b
SH
474 if (s->to_replace) {
475 to_replace = s->to_replace;
476 }
40365552 477
e253f4b8
KW
478 if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
479 bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
5a7e7a0b 480 }
b8804815
KW
481
482 /* The mirror job has no requests in flight any more, but we need to
483 * drain potential other users of the BDS before changing the graph. */
e253f4b8
KW
484 bdrv_drained_begin(target_bs);
485 bdrv_replace_in_backing_chain(to_replace, target_bs);
486 bdrv_drained_end(target_bs);
b8804815 487
b6d2e599
KW
488 /* We just changed the BDS the job BB refers to */
489 blk_remove_bs(job->blk);
490 blk_insert_bs(job->blk, src);
5a7e7a0b
SH
491 }
492 if (s->to_replace) {
493 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
494 error_free(s->replace_blocker);
495 bdrv_unref(s->to_replace);
496 }
497 if (replace_aio_context) {
498 aio_context_release(replace_aio_context);
499 }
500 g_free(s->replaces);
e253f4b8
KW
501 bdrv_op_unblock_all(target_bs, s->common.blocker);
502 blk_unref(s->target);
5a7e7a0b
SH
503 block_job_completed(&s->common, data->ret);
504 g_free(data);
176c3699 505 bdrv_drained_end(src);
ab27c3b5
FZ
506 if (qemu_get_aio_context() == bdrv_get_aio_context(src)) {
507 aio_enable_external(iohandler_get_aio_context());
508 }
3f09bfbc 509 bdrv_unref(src);
5a7e7a0b
SH
510}
511
893f7eba
PB
512static void coroutine_fn mirror_run(void *opaque)
513{
514 MirrorBlockJob *s = opaque;
5a7e7a0b 515 MirrorExitData *data;
e253f4b8
KW
516 BlockDriverState *bs = blk_bs(s->common.blk);
517 BlockDriverState *target_bs = blk_bs(s->target);
99900697 518 int64_t sector_num, end, length;
bd48bde8 519 uint64_t last_pause_ns;
b812f671 520 BlockDriverInfo bdi;
1d33936e
JC
521 char backing_filename[2]; /* we only need 2 characters because we are only
522 checking for a NULL string */
893f7eba
PB
523 int ret = 0;
524 int n;
e5b43573 525 int target_cluster_size = BDRV_SECTOR_SIZE;
893f7eba
PB
526
527 if (block_job_is_cancelled(&s->common)) {
528 goto immediate_exit;
529 }
530
b21c7652
HR
531 s->bdev_length = bdrv_getlength(bs);
532 if (s->bdev_length < 0) {
533 ret = s->bdev_length;
373df5b1 534 goto immediate_exit;
b21c7652 535 } else if (s->bdev_length == 0) {
9e48b025
FZ
536 /* Report BLOCK_JOB_READY and wait for complete. */
537 block_job_event_ready(&s->common);
538 s->synced = true;
539 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
540 block_job_yield(&s->common);
541 }
542 s->common.cancelled = false;
543 goto immediate_exit;
893f7eba
PB
544 }
545
b21c7652 546 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
402a4741
PB
547 s->in_flight_bitmap = bitmap_new(length);
548
b812f671
PB
549 /* If we have no backing file yet in the destination, we cannot let
550 * the destination do COW. Instead, we copy sectors around the
551 * dirty data if needed. We need a bitmap to do that.
552 */
e253f4b8 553 bdrv_get_backing_filename(target_bs, backing_filename,
b812f671 554 sizeof(backing_filename));
e253f4b8 555 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
e5b43573
FZ
556 target_cluster_size = bdi.cluster_size;
557 }
e253f4b8 558 if (backing_filename[0] && !target_bs->backing
e5b43573
FZ
559 && s->granularity < target_cluster_size) {
560 s->buf_size = MAX(s->buf_size, target_cluster_size);
561 s->cow_bitmap = bitmap_new(length);
b812f671 562 }
e5b43573 563 s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS;
e253f4b8 564 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
b812f671 565
b21c7652 566 end = s->bdev_length / BDRV_SECTOR_SIZE;
7504edf4
KW
567 s->buf = qemu_try_blockalign(bs, s->buf_size);
568 if (s->buf == NULL) {
569 ret = -ENOMEM;
570 goto immediate_exit;
571 }
572
402a4741 573 mirror_free_init(s);
893f7eba 574
4c0cbd6f 575 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
03544a6e 576 if (!s->is_none_mode) {
893f7eba 577 /* First part, loop on the sectors and initialize the dirty bitmap. */
5bc361b8 578 BlockDriverState *base = s->base;
e253f4b8 579 bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(target_bs);
5279efeb 580
893f7eba 581 for (sector_num = 0; sector_num < end; ) {
99900697
FZ
582 /* Just to make sure we are not exceeding int limit. */
583 int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
584 end - sector_num);
4c0cbd6f
FZ
585 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
586
587 if (now - last_pause_ns > SLICE_TIME) {
588 last_pause_ns = now;
589 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
565ac01f
SH
590 } else {
591 block_job_pause_point(&s->common);
4c0cbd6f
FZ
592 }
593
594 if (block_job_is_cancelled(&s->common)) {
595 goto immediate_exit;
596 }
597
99900697 598 ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
893f7eba
PB
599
600 if (ret < 0) {
601 goto immediate_exit;
602 }
603
604 assert(n > 0);
5279efeb 605 if (ret == 1 || mark_all_dirty) {
20dca810 606 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
893f7eba 607 }
99900697 608 sector_num += n;
893f7eba
PB
609 }
610 }
611
20dca810 612 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
893f7eba 613 for (;;) {
cc8c9d6c 614 uint64_t delay_ns = 0;
893f7eba
PB
615 int64_t cnt;
616 bool should_complete;
617
bd48bde8
PB
618 if (s->ret < 0) {
619 ret = s->ret;
620 goto immediate_exit;
621 }
622
565ac01f
SH
623 block_job_pause_point(&s->common);
624
20dca810 625 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
b21c7652
HR
626 /* s->common.offset contains the number of bytes already processed so
627 * far, cnt is the number of dirty sectors remaining and
628 * s->sectors_in_flight is the number of sectors currently being
629 * processed; together those are the current total operation length */
630 s->common.len = s->common.offset +
631 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
bd48bde8
PB
632
633 /* Note that even when no rate limit is applied we need to yield
a7282330 634 * periodically with no pending I/O so that bdrv_drain_all() returns.
bd48bde8
PB
635 * We do so every SLICE_TIME nanoseconds, or when there is an error,
636 * or when the source is clean, whichever comes first.
637 */
bc72ad67 638 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
bd48bde8 639 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
402a4741
PB
640 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
641 (cnt == 0 && s->in_flight > 0)) {
642 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
21cd917f 643 mirror_wait_for_io(s);
bd48bde8
PB
644 continue;
645 } else if (cnt != 0) {
cc8c9d6c 646 delay_ns = mirror_iteration(s);
893f7eba 647 }
893f7eba
PB
648 }
649
650 should_complete = false;
bd48bde8 651 if (s->in_flight == 0 && cnt == 0) {
893f7eba 652 trace_mirror_before_flush(s);
e253f4b8 653 ret = blk_flush(s->target);
893f7eba 654 if (ret < 0) {
a589569f
WX
655 if (mirror_error_action(s, false, -ret) ==
656 BLOCK_ERROR_ACTION_REPORT) {
b952b558
PB
657 goto immediate_exit;
658 }
659 } else {
660 /* We're out of the streaming phase. From now on, if the job
661 * is cancelled we will actually complete all pending I/O and
662 * report completion. This way, block-job-cancel will leave
663 * the target in a consistent state.
664 */
b952b558 665 if (!s->synced) {
bcada37b 666 block_job_event_ready(&s->common);
b952b558
PB
667 s->synced = true;
668 }
669
670 should_complete = s->should_complete ||
671 block_job_is_cancelled(&s->common);
20dca810 672 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
d63ffd87 673 }
893f7eba
PB
674 }
675
676 if (cnt == 0 && should_complete) {
677 /* The dirty bitmap is not updated while operations are pending.
678 * If we're about to exit, wait for pending operations before
679 * calling bdrv_get_dirty_count(bs), or we may exit while the
680 * source has dirty data to copy!
681 *
682 * Note that I/O can be submitted by the guest while
683 * mirror_populate runs.
684 */
685 trace_mirror_before_drain(s, cnt);
39bf92dd 686 bdrv_co_drain(bs);
20dca810 687 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
893f7eba
PB
688 }
689
690 ret = 0;
cc8c9d6c 691 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
d63ffd87 692 if (!s->synced) {
7483d1e5 693 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
893f7eba
PB
694 if (block_job_is_cancelled(&s->common)) {
695 break;
696 }
697 } else if (!should_complete) {
bd48bde8 698 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
7483d1e5 699 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
893f7eba
PB
700 } else if (cnt == 0) {
701 /* The two disks are in sync. Exit and report successful
702 * completion.
703 */
704 assert(QLIST_EMPTY(&bs->tracked_requests));
705 s->common.cancelled = false;
706 break;
707 }
bc72ad67 708 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
893f7eba
PB
709 }
710
711immediate_exit:
bd48bde8
PB
712 if (s->in_flight > 0) {
713 /* We get here only if something went wrong. Either the job failed,
714 * or it was cancelled prematurely so that we do not guarantee that
715 * the target is a copy of the source.
716 */
717 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
718 mirror_drain(s);
719 }
720
721 assert(s->in_flight == 0);
7191bf31 722 qemu_vfree(s->buf);
b812f671 723 g_free(s->cow_bitmap);
402a4741 724 g_free(s->in_flight_bitmap);
e4654d2d 725 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
5a7e7a0b
SH
726
727 data = g_malloc(sizeof(*data));
728 data->ret = ret;
176c3699
FZ
729 /* Before we switch to target in mirror_exit, make sure data doesn't
730 * change. */
e253f4b8 731 bdrv_drained_begin(bs);
ab27c3b5
FZ
732 if (qemu_get_aio_context() == bdrv_get_aio_context(bs)) {
733 /* FIXME: virtio host notifiers run on iohandler_ctx, therefore the
734 * above bdrv_drained_end isn't enough to quiesce it. This is ugly, we
735 * need a block layer API change to achieve this. */
736 aio_disable_external(iohandler_get_aio_context());
737 }
5a7e7a0b 738 block_job_defer_to_main_loop(&s->common, mirror_exit, data);
893f7eba
PB
739}
740
741static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
742{
743 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
744
745 if (speed < 0) {
c6bd8c70 746 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
893f7eba
PB
747 return;
748 }
749 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
750}
751
d63ffd87
PB
752static void mirror_complete(BlockJob *job, Error **errp)
753{
754 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
274fccee
HR
755 BlockDriverState *src, *target;
756
757 src = blk_bs(job->blk);
758 target = blk_bs(s->target);
d63ffd87 759
d63ffd87 760 if (!s->synced) {
8ccb9569 761 error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id);
d63ffd87
PB
762 return;
763 }
764
274fccee
HR
765 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
766 int ret;
767
768 assert(!target->backing);
769 ret = bdrv_open_backing_file(target, NULL, "backing", errp);
770 if (ret < 0) {
771 return;
772 }
773 }
774
09158f00
BC
775 /* check the target bs is not blocked and block all operations on it */
776 if (s->replaces) {
5a7e7a0b
SH
777 AioContext *replace_aio_context;
778
e12f3784 779 s->to_replace = bdrv_find_node(s->replaces);
09158f00 780 if (!s->to_replace) {
e12f3784 781 error_setg(errp, "Node name '%s' not found", s->replaces);
09158f00
BC
782 return;
783 }
784
5a7e7a0b
SH
785 replace_aio_context = bdrv_get_aio_context(s->to_replace);
786 aio_context_acquire(replace_aio_context);
787
09158f00
BC
788 error_setg(&s->replace_blocker,
789 "block device is in use by block-job-complete");
790 bdrv_op_block_all(s->to_replace, s->replace_blocker);
791 bdrv_ref(s->to_replace);
5a7e7a0b
SH
792
793 aio_context_release(replace_aio_context);
09158f00
BC
794 }
795
274fccee
HR
796 if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
797 BlockDriverState *backing = s->is_none_mode ? src : s->base;
798 if (backing_bs(target) != backing) {
799 bdrv_set_backing_hd(target, backing);
800 }
801 }
802
d63ffd87 803 s->should_complete = true;
751ebd76 804 block_job_enter(&s->common);
d63ffd87
PB
805}
806
565ac01f
SH
807/* There is no matching mirror_resume() because mirror_run() will begin
808 * iterating again when the job is resumed.
809 */
810static void coroutine_fn mirror_pause(BlockJob *job)
811{
812 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
813
814 mirror_drain(s);
815}
816
817static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
818{
819 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
820
821 blk_set_aio_context(s->target, new_context);
822}
823
3fc4b10a 824static const BlockJobDriver mirror_job_driver = {
565ac01f
SH
825 .instance_size = sizeof(MirrorBlockJob),
826 .job_type = BLOCK_JOB_TYPE_MIRROR,
827 .set_speed = mirror_set_speed,
828 .complete = mirror_complete,
829 .pause = mirror_pause,
830 .attached_aio_context = mirror_attached_aio_context,
893f7eba
PB
831};
832
03544a6e 833static const BlockJobDriver commit_active_job_driver = {
565ac01f
SH
834 .instance_size = sizeof(MirrorBlockJob),
835 .job_type = BLOCK_JOB_TYPE_COMMIT,
836 .set_speed = mirror_set_speed,
837 .complete = mirror_complete,
838 .pause = mirror_pause,
839 .attached_aio_context = mirror_attached_aio_context,
03544a6e
FZ
840};
841
842static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
09158f00 843 const char *replaces,
5fba6c0e 844 int64_t speed, uint32_t granularity,
09158f00 845 int64_t buf_size,
274fccee 846 BlockMirrorBackingMode backing_mode,
09158f00
BC
847 BlockdevOnError on_source_error,
848 BlockdevOnError on_target_error,
0fc9f8ea 849 bool unmap,
097310b5 850 BlockCompletionFunc *cb,
09158f00
BC
851 void *opaque, Error **errp,
852 const BlockJobDriver *driver,
853 bool is_none_mode, BlockDriverState *base)
893f7eba
PB
854{
855 MirrorBlockJob *s;
856
eee13dfe 857 if (granularity == 0) {
341ebc2f 858 granularity = bdrv_get_default_bitmap_granularity(target);
eee13dfe
PB
859 }
860
861 assert ((granularity & (granularity - 1)) == 0);
862
48ac0a4d
WC
863 if (buf_size < 0) {
864 error_setg(errp, "Invalid parameter 'buf-size'");
865 return;
866 }
867
868 if (buf_size == 0) {
869 buf_size = DEFAULT_MIRROR_BUF_SIZE;
870 }
5bc361b8 871
03544a6e 872 s = block_job_create(driver, bs, speed, cb, opaque, errp);
893f7eba
PB
873 if (!s) {
874 return;
875 }
876
e253f4b8
KW
877 s->target = blk_new();
878 blk_insert_bs(s->target, target);
879
09158f00 880 s->replaces = g_strdup(replaces);
b952b558
PB
881 s->on_source_error = on_source_error;
882 s->on_target_error = on_target_error;
03544a6e 883 s->is_none_mode = is_none_mode;
274fccee 884 s->backing_mode = backing_mode;
5bc361b8 885 s->base = base;
eee13dfe 886 s->granularity = granularity;
48ac0a4d 887 s->buf_size = ROUND_UP(buf_size, granularity);
0fc9f8ea 888 s->unmap = unmap;
b812f671 889
0db6e54a 890 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
b8afb520 891 if (!s->dirty_bitmap) {
97031164 892 g_free(s->replaces);
e253f4b8 893 blk_unref(s->target);
18930ba3 894 block_job_unref(&s->common);
b8afb520
FZ
895 return;
896 }
10f3cd15 897
e253f4b8 898 bdrv_op_block_all(target, s->common.blocker);
10f3cd15 899
893f7eba
PB
900 s->common.co = qemu_coroutine_create(mirror_run);
901 trace_mirror_start(bs, s, s->common.co, opaque);
902 qemu_coroutine_enter(s->common.co, s);
903}
03544a6e
FZ
904
905void mirror_start(BlockDriverState *bs, BlockDriverState *target,
09158f00 906 const char *replaces,
5fba6c0e 907 int64_t speed, uint32_t granularity, int64_t buf_size,
274fccee
HR
908 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
909 BlockdevOnError on_source_error,
03544a6e 910 BlockdevOnError on_target_error,
0fc9f8ea 911 bool unmap,
097310b5 912 BlockCompletionFunc *cb,
03544a6e
FZ
913 void *opaque, Error **errp)
914{
915 bool is_none_mode;
916 BlockDriverState *base;
917
4b80ab2b
JS
918 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
919 error_setg(errp, "Sync mode 'incremental' not supported");
d58d8453
JS
920 return;
921 }
03544a6e 922 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
760e0063 923 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
09158f00 924 mirror_start_job(bs, target, replaces,
274fccee 925 speed, granularity, buf_size, backing_mode,
0fc9f8ea 926 on_source_error, on_target_error, unmap, cb, opaque, errp,
03544a6e
FZ
927 &mirror_job_driver, is_none_mode, base);
928}
929
930void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
931 int64_t speed,
932 BlockdevOnError on_error,
097310b5 933 BlockCompletionFunc *cb,
03544a6e
FZ
934 void *opaque, Error **errp)
935{
4da83585
JC
936 int64_t length, base_length;
937 int orig_base_flags;
39a611a3 938 int ret;
cc67f4d1 939 Error *local_err = NULL;
4da83585
JC
940
941 orig_base_flags = bdrv_get_flags(base);
942
20a63d2c
FZ
943 if (bdrv_reopen(base, bs->open_flags, errp)) {
944 return;
945 }
4da83585
JC
946
947 length = bdrv_getlength(bs);
948 if (length < 0) {
39a611a3
JC
949 error_setg_errno(errp, -length,
950 "Unable to determine length of %s", bs->filename);
4da83585
JC
951 goto error_restore_flags;
952 }
953
954 base_length = bdrv_getlength(base);
955 if (base_length < 0) {
39a611a3
JC
956 error_setg_errno(errp, -base_length,
957 "Unable to determine length of %s", base->filename);
4da83585
JC
958 goto error_restore_flags;
959 }
960
961 if (length > base_length) {
39a611a3
JC
962 ret = bdrv_truncate(base, length);
963 if (ret < 0) {
964 error_setg_errno(errp, -ret,
965 "Top image %s is larger than base image %s, and "
4da83585
JC
966 "resize of base image failed",
967 bs->filename, base->filename);
968 goto error_restore_flags;
969 }
970 }
971
274fccee 972 mirror_start_job(bs, base, NULL, speed, 0, 0, MIRROR_LEAVE_BACKING_CHAIN,
0fc9f8ea 973 on_error, on_error, false, cb, opaque, &local_err,
03544a6e 974 &commit_active_job_driver, false, base);
0fb6395c 975 if (local_err) {
cc67f4d1 976 error_propagate(errp, local_err);
4da83585
JC
977 goto error_restore_flags;
978 }
979
980 return;
981
982error_restore_flags:
983 /* ignore error and errp for bdrv_reopen, because we want to propagate
984 * the original error */
985 bdrv_reopen(base, orig_base_flags, NULL);
986 return;
03544a6e 987}