]>
Commit | Line | Data |
---|---|---|
893f7eba PB |
1 | /* |
2 | * Image mirroring | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Paolo Bonzini <pbonzini@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
10 | * See the COPYING.LIB file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
80c71a24 | 14 | #include "qemu/osdep.h" |
893f7eba | 15 | #include "trace.h" |
737e150e PB |
16 | #include "block/blockjob.h" |
17 | #include "block/block_int.h" | |
373340b2 | 18 | #include "sysemu/block-backend.h" |
da34e65c | 19 | #include "qapi/error.h" |
cc7a8ea7 | 20 | #include "qapi/qmp/qerror.h" |
893f7eba | 21 | #include "qemu/ratelimit.h" |
b812f671 | 22 | #include "qemu/bitmap.h" |
893f7eba | 23 | |
402a4741 PB |
24 | #define SLICE_TIME 100000000ULL /* ns */ |
25 | #define MAX_IN_FLIGHT 16 | |
48ac0a4d | 26 | #define DEFAULT_MIRROR_BUF_SIZE (10 << 20) |
402a4741 PB |
27 | |
28 | /* The mirroring buffer is a list of granularity-sized chunks. | |
29 | * Free chunks are organized in a list. | |
30 | */ | |
31 | typedef struct MirrorBuffer { | |
32 | QSIMPLEQ_ENTRY(MirrorBuffer) next; | |
33 | } MirrorBuffer; | |
893f7eba PB |
34 | |
35 | typedef struct MirrorBlockJob { | |
36 | BlockJob common; | |
37 | RateLimit limit; | |
e253f4b8 | 38 | BlockBackend *target; |
5bc361b8 | 39 | BlockDriverState *base; |
09158f00 BC |
40 | /* The name of the graph node to replace */ |
41 | char *replaces; | |
42 | /* The BDS to replace */ | |
43 | BlockDriverState *to_replace; | |
44 | /* Used to block operations on the drive-mirror-replace target */ | |
45 | Error *replace_blocker; | |
03544a6e | 46 | bool is_none_mode; |
274fccee | 47 | BlockMirrorBackingMode backing_mode; |
b952b558 | 48 | BlockdevOnError on_source_error, on_target_error; |
d63ffd87 PB |
49 | bool synced; |
50 | bool should_complete; | |
eee13dfe | 51 | int64_t granularity; |
b812f671 | 52 | size_t buf_size; |
b21c7652 | 53 | int64_t bdev_length; |
b812f671 | 54 | unsigned long *cow_bitmap; |
e4654d2d | 55 | BdrvDirtyBitmap *dirty_bitmap; |
8f0720ec | 56 | HBitmapIter hbi; |
893f7eba | 57 | uint8_t *buf; |
402a4741 PB |
58 | QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; |
59 | int buf_free_count; | |
bd48bde8 | 60 | |
49efb1f5 | 61 | uint64_t last_pause_ns; |
402a4741 | 62 | unsigned long *in_flight_bitmap; |
bd48bde8 | 63 | int in_flight; |
531509ba | 64 | int64_t sectors_in_flight; |
bd48bde8 | 65 | int ret; |
0fc9f8ea | 66 | bool unmap; |
e424aff5 | 67 | bool waiting_for_io; |
e5b43573 FZ |
68 | int target_cluster_sectors; |
69 | int max_iov; | |
893f7eba PB |
70 | } MirrorBlockJob; |
71 | ||
bd48bde8 PB |
72 | typedef struct MirrorOp { |
73 | MirrorBlockJob *s; | |
74 | QEMUIOVector qiov; | |
bd48bde8 PB |
75 | int64_t sector_num; |
76 | int nb_sectors; | |
77 | } MirrorOp; | |
78 | ||
b952b558 PB |
79 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, |
80 | int error) | |
81 | { | |
82 | s->synced = false; | |
83 | if (read) { | |
81e254dc KW |
84 | return block_job_error_action(&s->common, s->on_source_error, |
85 | true, error); | |
b952b558 | 86 | } else { |
81e254dc KW |
87 | return block_job_error_action(&s->common, s->on_target_error, |
88 | false, error); | |
b952b558 PB |
89 | } |
90 | } | |
91 | ||
bd48bde8 PB |
92 | static void mirror_iteration_done(MirrorOp *op, int ret) |
93 | { | |
94 | MirrorBlockJob *s = op->s; | |
402a4741 | 95 | struct iovec *iov; |
bd48bde8 | 96 | int64_t chunk_num; |
402a4741 | 97 | int i, nb_chunks, sectors_per_chunk; |
bd48bde8 PB |
98 | |
99 | trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); | |
100 | ||
101 | s->in_flight--; | |
b21c7652 | 102 | s->sectors_in_flight -= op->nb_sectors; |
402a4741 PB |
103 | iov = op->qiov.iov; |
104 | for (i = 0; i < op->qiov.niov; i++) { | |
105 | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; | |
106 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); | |
107 | s->buf_free_count++; | |
108 | } | |
109 | ||
bd48bde8 PB |
110 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
111 | chunk_num = op->sector_num / sectors_per_chunk; | |
4150ae60 | 112 | nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk); |
402a4741 | 113 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); |
b21c7652 HR |
114 | if (ret >= 0) { |
115 | if (s->cow_bitmap) { | |
116 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); | |
117 | } | |
118 | s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; | |
bd48bde8 PB |
119 | } |
120 | ||
6df3bf8e | 121 | qemu_iovec_destroy(&op->qiov); |
c84b3192 | 122 | g_free(op); |
7b770c72 | 123 | |
e424aff5 | 124 | if (s->waiting_for_io) { |
0b8b8753 | 125 | qemu_coroutine_enter(s->common.co); |
7b770c72 | 126 | } |
bd48bde8 PB |
127 | } |
128 | ||
129 | static void mirror_write_complete(void *opaque, int ret) | |
130 | { | |
131 | MirrorOp *op = opaque; | |
132 | MirrorBlockJob *s = op->s; | |
133 | if (ret < 0) { | |
bd48bde8 PB |
134 | BlockErrorAction action; |
135 | ||
20dca810 | 136 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); |
bd48bde8 | 137 | action = mirror_error_action(s, false, -ret); |
a589569f | 138 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
bd48bde8 PB |
139 | s->ret = ret; |
140 | } | |
141 | } | |
142 | mirror_iteration_done(op, ret); | |
143 | } | |
144 | ||
145 | static void mirror_read_complete(void *opaque, int ret) | |
146 | { | |
147 | MirrorOp *op = opaque; | |
148 | MirrorBlockJob *s = op->s; | |
149 | if (ret < 0) { | |
bd48bde8 PB |
150 | BlockErrorAction action; |
151 | ||
20dca810 | 152 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); |
bd48bde8 | 153 | action = mirror_error_action(s, true, -ret); |
a589569f | 154 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
bd48bde8 PB |
155 | s->ret = ret; |
156 | } | |
157 | ||
158 | mirror_iteration_done(op, ret); | |
159 | return; | |
160 | } | |
e253f4b8 | 161 | blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov, |
73698c30 | 162 | 0, mirror_write_complete, op); |
bd48bde8 PB |
163 | } |
164 | ||
4150ae60 FZ |
165 | static inline void mirror_clip_sectors(MirrorBlockJob *s, |
166 | int64_t sector_num, | |
167 | int *nb_sectors) | |
168 | { | |
169 | *nb_sectors = MIN(*nb_sectors, | |
170 | s->bdev_length / BDRV_SECTOR_SIZE - sector_num); | |
171 | } | |
172 | ||
e5b43573 FZ |
173 | /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and |
174 | * return the offset of the adjusted tail sector against original. */ | |
175 | static int mirror_cow_align(MirrorBlockJob *s, | |
176 | int64_t *sector_num, | |
177 | int *nb_sectors) | |
893f7eba | 178 | { |
e5b43573 FZ |
179 | bool need_cow; |
180 | int ret = 0; | |
181 | int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS; | |
182 | int64_t align_sector_num = *sector_num; | |
183 | int align_nb_sectors = *nb_sectors; | |
184 | int max_sectors = chunk_sectors * s->max_iov; | |
185 | ||
186 | need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap); | |
187 | need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, | |
188 | s->cow_bitmap); | |
189 | if (need_cow) { | |
244483e6 KW |
190 | bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num, |
191 | *nb_sectors, &align_sector_num, | |
192 | &align_nb_sectors); | |
e5b43573 | 193 | } |
3515727f | 194 | |
e5b43573 FZ |
195 | if (align_nb_sectors > max_sectors) { |
196 | align_nb_sectors = max_sectors; | |
197 | if (need_cow) { | |
198 | align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors, | |
199 | s->target_cluster_sectors); | |
200 | } | |
8f0720ec | 201 | } |
4150ae60 FZ |
202 | /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but |
203 | * that doesn't matter because it's already the end of source image. */ | |
204 | mirror_clip_sectors(s, align_sector_num, &align_nb_sectors); | |
8f0720ec | 205 | |
e5b43573 FZ |
206 | ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors); |
207 | *sector_num = align_sector_num; | |
208 | *nb_sectors = align_nb_sectors; | |
209 | assert(ret >= 0); | |
210 | return ret; | |
211 | } | |
212 | ||
21cd917f FZ |
213 | static inline void mirror_wait_for_io(MirrorBlockJob *s) |
214 | { | |
215 | assert(!s->waiting_for_io); | |
216 | s->waiting_for_io = true; | |
217 | qemu_coroutine_yield(); | |
218 | s->waiting_for_io = false; | |
219 | } | |
220 | ||
e5b43573 | 221 | /* Submit async read while handling COW. |
17612955 JS |
222 | * Returns: The number of sectors copied after and including sector_num, |
223 | * excluding any sectors copied prior to sector_num due to alignment. | |
224 | * This will be nb_sectors if no alignment is necessary, or | |
e5b43573 FZ |
225 | * (new_end - sector_num) if tail is rounded up or down due to |
226 | * alignment or buffer limit. | |
227 | */ | |
228 | static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, | |
229 | int nb_sectors) | |
230 | { | |
e253f4b8 | 231 | BlockBackend *source = s->common.blk; |
e5b43573 | 232 | int sectors_per_chunk, nb_chunks; |
17612955 | 233 | int ret; |
e5b43573 | 234 | MirrorOp *op; |
e4808881 | 235 | int max_sectors; |
e5b43573 | 236 | |
884fea4e | 237 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
e4808881 | 238 | max_sectors = sectors_per_chunk * s->max_iov; |
402a4741 | 239 | |
e5b43573 FZ |
240 | /* We can only handle as much as buf_size at a time. */ |
241 | nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); | |
e4808881 | 242 | nb_sectors = MIN(max_sectors, nb_sectors); |
e5b43573 | 243 | assert(nb_sectors); |
17612955 | 244 | ret = nb_sectors; |
402a4741 | 245 | |
e5b43573 FZ |
246 | if (s->cow_bitmap) { |
247 | ret += mirror_cow_align(s, §or_num, &nb_sectors); | |
248 | } | |
249 | assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); | |
250 | /* The sector range must meet granularity because: | |
251 | * 1) Caller passes in aligned values; | |
252 | * 2) mirror_cow_align is used only when target cluster is larger. */ | |
e5b43573 | 253 | assert(!(sector_num % sectors_per_chunk)); |
4150ae60 | 254 | nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk); |
e5b43573 FZ |
255 | |
256 | while (s->buf_free_count < nb_chunks) { | |
402a4741 | 257 | trace_mirror_yield_in_flight(s, sector_num, s->in_flight); |
21cd917f | 258 | mirror_wait_for_io(s); |
b812f671 PB |
259 | } |
260 | ||
bd48bde8 | 261 | /* Allocate a MirrorOp that is used as an AIO callback. */ |
c84b3192 | 262 | op = g_new(MirrorOp, 1); |
bd48bde8 | 263 | op->s = s; |
bd48bde8 PB |
264 | op->sector_num = sector_num; |
265 | op->nb_sectors = nb_sectors; | |
402a4741 PB |
266 | |
267 | /* Now make a QEMUIOVector taking enough granularity-sized chunks | |
268 | * from s->buf_free. | |
269 | */ | |
270 | qemu_iovec_init(&op->qiov, nb_chunks); | |
402a4741 PB |
271 | while (nb_chunks-- > 0) { |
272 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); | |
e5b43573 | 273 | size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; |
5a0f6fd5 | 274 | |
402a4741 PB |
275 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); |
276 | s->buf_free_count--; | |
5a0f6fd5 | 277 | qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); |
402a4741 | 278 | } |
bd48bde8 | 279 | |
893f7eba | 280 | /* Copy the dirty cluster. */ |
bd48bde8 | 281 | s->in_flight++; |
b21c7652 | 282 | s->sectors_in_flight += nb_sectors; |
b812f671 | 283 | trace_mirror_one_iteration(s, sector_num, nb_sectors); |
dcfb3beb | 284 | |
73698c30 | 285 | blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0, |
e5b43573 FZ |
286 | mirror_read_complete, op); |
287 | return ret; | |
288 | } | |
289 | ||
290 | static void mirror_do_zero_or_discard(MirrorBlockJob *s, | |
291 | int64_t sector_num, | |
292 | int nb_sectors, | |
293 | bool is_discard) | |
294 | { | |
295 | MirrorOp *op; | |
296 | ||
297 | /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed | |
298 | * so the freeing in mirror_iteration_done is nop. */ | |
299 | op = g_new0(MirrorOp, 1); | |
300 | op->s = s; | |
301 | op->sector_num = sector_num; | |
302 | op->nb_sectors = nb_sectors; | |
303 | ||
304 | s->in_flight++; | |
305 | s->sectors_in_flight += nb_sectors; | |
306 | if (is_discard) { | |
e253f4b8 KW |
307 | blk_aio_discard(s->target, sector_num, op->nb_sectors, |
308 | mirror_write_complete, op); | |
e5b43573 | 309 | } else { |
e253f4b8 KW |
310 | blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE, |
311 | op->nb_sectors * BDRV_SECTOR_SIZE, | |
dcfb3beb FZ |
312 | s->unmap ? BDRV_REQ_MAY_UNMAP : 0, |
313 | mirror_write_complete, op); | |
e5b43573 FZ |
314 | } |
315 | } | |
316 | ||
317 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | |
318 | { | |
e253f4b8 | 319 | BlockDriverState *source = blk_bs(s->common.blk); |
9c83625b | 320 | int64_t sector_num, first_chunk; |
e5b43573 FZ |
321 | uint64_t delay_ns = 0; |
322 | /* At least the first dirty chunk is mirrored in one iteration. */ | |
323 | int nb_chunks = 1; | |
324 | int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; | |
325 | int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; | |
326 | ||
327 | sector_num = hbitmap_iter_next(&s->hbi); | |
328 | if (sector_num < 0) { | |
329 | bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); | |
330 | sector_num = hbitmap_iter_next(&s->hbi); | |
331 | trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); | |
332 | assert(sector_num >= 0); | |
333 | } | |
334 | ||
9c83625b HR |
335 | first_chunk = sector_num / sectors_per_chunk; |
336 | while (test_bit(first_chunk, s->in_flight_bitmap)) { | |
ff04198b | 337 | trace_mirror_yield_in_flight(s, sector_num, s->in_flight); |
9c83625b HR |
338 | mirror_wait_for_io(s); |
339 | } | |
340 | ||
565ac01f SH |
341 | block_job_pause_point(&s->common); |
342 | ||
e5b43573 FZ |
343 | /* Find the number of consective dirty chunks following the first dirty |
344 | * one, and wait for in flight requests in them. */ | |
345 | while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) { | |
346 | int64_t hbitmap_next; | |
347 | int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk; | |
348 | int64_t next_chunk = next_sector / sectors_per_chunk; | |
349 | if (next_sector >= end || | |
350 | !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { | |
351 | break; | |
352 | } | |
353 | if (test_bit(next_chunk, s->in_flight_bitmap)) { | |
9c83625b | 354 | break; |
e5b43573 | 355 | } |
9c83625b HR |
356 | |
357 | hbitmap_next = hbitmap_iter_next(&s->hbi); | |
f27a2742 HR |
358 | if (hbitmap_next > next_sector || hbitmap_next < 0) { |
359 | /* The bitmap iterator's cache is stale, refresh it */ | |
360 | bdrv_set_dirty_iter(&s->hbi, next_sector); | |
361 | hbitmap_next = hbitmap_iter_next(&s->hbi); | |
362 | } | |
9c83625b HR |
363 | assert(hbitmap_next == next_sector); |
364 | nb_chunks++; | |
e5b43573 FZ |
365 | } |
366 | ||
367 | /* Clear dirty bits before querying the block status, because | |
368 | * calling bdrv_get_block_status_above could yield - if some blocks are | |
369 | * marked dirty in this window, we need to know. | |
370 | */ | |
371 | bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, | |
372 | nb_chunks * sectors_per_chunk); | |
373 | bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); | |
374 | while (nb_chunks > 0 && sector_num < end) { | |
375 | int ret; | |
376 | int io_sectors; | |
377 | BlockDriverState *file; | |
378 | enum MirrorMethod { | |
379 | MIRROR_METHOD_COPY, | |
380 | MIRROR_METHOD_ZERO, | |
381 | MIRROR_METHOD_DISCARD | |
382 | } mirror_method = MIRROR_METHOD_COPY; | |
383 | ||
384 | assert(!(sector_num % sectors_per_chunk)); | |
385 | ret = bdrv_get_block_status_above(source, NULL, sector_num, | |
386 | nb_chunks * sectors_per_chunk, | |
387 | &io_sectors, &file); | |
388 | if (ret < 0) { | |
389 | io_sectors = nb_chunks * sectors_per_chunk; | |
390 | } | |
391 | ||
392 | io_sectors -= io_sectors % sectors_per_chunk; | |
393 | if (io_sectors < sectors_per_chunk) { | |
394 | io_sectors = sectors_per_chunk; | |
395 | } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { | |
396 | int64_t target_sector_num; | |
397 | int target_nb_sectors; | |
244483e6 KW |
398 | bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num, |
399 | io_sectors, &target_sector_num, | |
400 | &target_nb_sectors); | |
e5b43573 FZ |
401 | if (target_sector_num == sector_num && |
402 | target_nb_sectors == io_sectors) { | |
403 | mirror_method = ret & BDRV_BLOCK_ZERO ? | |
404 | MIRROR_METHOD_ZERO : | |
405 | MIRROR_METHOD_DISCARD; | |
406 | } | |
407 | } | |
408 | ||
4150ae60 | 409 | mirror_clip_sectors(s, sector_num, &io_sectors); |
e5b43573 FZ |
410 | switch (mirror_method) { |
411 | case MIRROR_METHOD_COPY: | |
412 | io_sectors = mirror_do_read(s, sector_num, io_sectors); | |
413 | break; | |
414 | case MIRROR_METHOD_ZERO: | |
415 | mirror_do_zero_or_discard(s, sector_num, io_sectors, false); | |
416 | break; | |
417 | case MIRROR_METHOD_DISCARD: | |
418 | mirror_do_zero_or_discard(s, sector_num, io_sectors, true); | |
419 | break; | |
420 | default: | |
421 | abort(); | |
422 | } | |
423 | assert(io_sectors); | |
424 | sector_num += io_sectors; | |
4150ae60 | 425 | nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk); |
f14a39cc SS |
426 | if (s->common.speed) { |
427 | delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors); | |
428 | } | |
dcfb3beb | 429 | } |
cc8c9d6c | 430 | return delay_ns; |
bd48bde8 | 431 | } |
b952b558 | 432 | |
402a4741 PB |
433 | static void mirror_free_init(MirrorBlockJob *s) |
434 | { | |
435 | int granularity = s->granularity; | |
436 | size_t buf_size = s->buf_size; | |
437 | uint8_t *buf = s->buf; | |
438 | ||
439 | assert(s->buf_free_count == 0); | |
440 | QSIMPLEQ_INIT(&s->buf_free); | |
441 | while (buf_size != 0) { | |
442 | MirrorBuffer *cur = (MirrorBuffer *)buf; | |
443 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); | |
444 | s->buf_free_count++; | |
445 | buf_size -= granularity; | |
446 | buf += granularity; | |
447 | } | |
448 | } | |
449 | ||
bd48bde8 PB |
450 | static void mirror_drain(MirrorBlockJob *s) |
451 | { | |
452 | while (s->in_flight > 0) { | |
21cd917f | 453 | mirror_wait_for_io(s); |
bd48bde8 | 454 | } |
893f7eba PB |
455 | } |
456 | ||
5a7e7a0b SH |
457 | typedef struct { |
458 | int ret; | |
459 | } MirrorExitData; | |
460 | ||
461 | static void mirror_exit(BlockJob *job, void *opaque) | |
462 | { | |
463 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
464 | MirrorExitData *data = opaque; | |
465 | AioContext *replace_aio_context = NULL; | |
e253f4b8 KW |
466 | BlockDriverState *src = blk_bs(s->common.blk); |
467 | BlockDriverState *target_bs = blk_bs(s->target); | |
3f09bfbc KW |
468 | |
469 | /* Make sure that the source BDS doesn't go away before we called | |
470 | * block_job_completed(). */ | |
471 | bdrv_ref(src); | |
5a7e7a0b SH |
472 | |
473 | if (s->to_replace) { | |
474 | replace_aio_context = bdrv_get_aio_context(s->to_replace); | |
475 | aio_context_acquire(replace_aio_context); | |
476 | } | |
477 | ||
478 | if (s->should_complete && data->ret == 0) { | |
e253f4b8 | 479 | BlockDriverState *to_replace = src; |
5a7e7a0b SH |
480 | if (s->to_replace) { |
481 | to_replace = s->to_replace; | |
482 | } | |
40365552 | 483 | |
e253f4b8 KW |
484 | if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { |
485 | bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); | |
5a7e7a0b | 486 | } |
b8804815 KW |
487 | |
488 | /* The mirror job has no requests in flight any more, but we need to | |
489 | * drain potential other users of the BDS before changing the graph. */ | |
e253f4b8 KW |
490 | bdrv_drained_begin(target_bs); |
491 | bdrv_replace_in_backing_chain(to_replace, target_bs); | |
492 | bdrv_drained_end(target_bs); | |
b8804815 | 493 | |
b6d2e599 KW |
494 | /* We just changed the BDS the job BB refers to */ |
495 | blk_remove_bs(job->blk); | |
496 | blk_insert_bs(job->blk, src); | |
5a7e7a0b SH |
497 | } |
498 | if (s->to_replace) { | |
499 | bdrv_op_unblock_all(s->to_replace, s->replace_blocker); | |
500 | error_free(s->replace_blocker); | |
501 | bdrv_unref(s->to_replace); | |
502 | } | |
503 | if (replace_aio_context) { | |
504 | aio_context_release(replace_aio_context); | |
505 | } | |
506 | g_free(s->replaces); | |
e253f4b8 KW |
507 | bdrv_op_unblock_all(target_bs, s->common.blocker); |
508 | blk_unref(s->target); | |
5a7e7a0b SH |
509 | block_job_completed(&s->common, data->ret); |
510 | g_free(data); | |
176c3699 | 511 | bdrv_drained_end(src); |
ab27c3b5 FZ |
512 | if (qemu_get_aio_context() == bdrv_get_aio_context(src)) { |
513 | aio_enable_external(iohandler_get_aio_context()); | |
514 | } | |
3f09bfbc | 515 | bdrv_unref(src); |
5a7e7a0b SH |
516 | } |
517 | ||
49efb1f5 DL |
518 | static void mirror_throttle(MirrorBlockJob *s) |
519 | { | |
520 | int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | |
521 | ||
522 | if (now - s->last_pause_ns > SLICE_TIME) { | |
523 | s->last_pause_ns = now; | |
524 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); | |
525 | } else { | |
526 | block_job_pause_point(&s->common); | |
527 | } | |
528 | } | |
529 | ||
893f7eba PB |
530 | static void coroutine_fn mirror_run(void *opaque) |
531 | { | |
532 | MirrorBlockJob *s = opaque; | |
5a7e7a0b | 533 | MirrorExitData *data; |
e253f4b8 KW |
534 | BlockDriverState *bs = blk_bs(s->common.blk); |
535 | BlockDriverState *target_bs = blk_bs(s->target); | |
99900697 | 536 | int64_t sector_num, end, length; |
b812f671 | 537 | BlockDriverInfo bdi; |
1d33936e JC |
538 | char backing_filename[2]; /* we only need 2 characters because we are only |
539 | checking for a NULL string */ | |
893f7eba PB |
540 | int ret = 0; |
541 | int n; | |
e5b43573 | 542 | int target_cluster_size = BDRV_SECTOR_SIZE; |
893f7eba PB |
543 | |
544 | if (block_job_is_cancelled(&s->common)) { | |
545 | goto immediate_exit; | |
546 | } | |
547 | ||
b21c7652 HR |
548 | s->bdev_length = bdrv_getlength(bs); |
549 | if (s->bdev_length < 0) { | |
550 | ret = s->bdev_length; | |
373df5b1 | 551 | goto immediate_exit; |
b21c7652 | 552 | } else if (s->bdev_length == 0) { |
9e48b025 FZ |
553 | /* Report BLOCK_JOB_READY and wait for complete. */ |
554 | block_job_event_ready(&s->common); | |
555 | s->synced = true; | |
556 | while (!block_job_is_cancelled(&s->common) && !s->should_complete) { | |
557 | block_job_yield(&s->common); | |
558 | } | |
559 | s->common.cancelled = false; | |
560 | goto immediate_exit; | |
893f7eba PB |
561 | } |
562 | ||
b21c7652 | 563 | length = DIV_ROUND_UP(s->bdev_length, s->granularity); |
402a4741 PB |
564 | s->in_flight_bitmap = bitmap_new(length); |
565 | ||
b812f671 PB |
566 | /* If we have no backing file yet in the destination, we cannot let |
567 | * the destination do COW. Instead, we copy sectors around the | |
568 | * dirty data if needed. We need a bitmap to do that. | |
569 | */ | |
e253f4b8 | 570 | bdrv_get_backing_filename(target_bs, backing_filename, |
b812f671 | 571 | sizeof(backing_filename)); |
e253f4b8 | 572 | if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { |
e5b43573 FZ |
573 | target_cluster_size = bdi.cluster_size; |
574 | } | |
e253f4b8 | 575 | if (backing_filename[0] && !target_bs->backing |
e5b43573 FZ |
576 | && s->granularity < target_cluster_size) { |
577 | s->buf_size = MAX(s->buf_size, target_cluster_size); | |
578 | s->cow_bitmap = bitmap_new(length); | |
b812f671 | 579 | } |
e5b43573 | 580 | s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; |
e253f4b8 | 581 | s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); |
b812f671 | 582 | |
b21c7652 | 583 | end = s->bdev_length / BDRV_SECTOR_SIZE; |
7504edf4 KW |
584 | s->buf = qemu_try_blockalign(bs, s->buf_size); |
585 | if (s->buf == NULL) { | |
586 | ret = -ENOMEM; | |
587 | goto immediate_exit; | |
588 | } | |
589 | ||
402a4741 | 590 | mirror_free_init(s); |
893f7eba | 591 | |
49efb1f5 | 592 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
03544a6e | 593 | if (!s->is_none_mode) { |
893f7eba | 594 | /* First part, loop on the sectors and initialize the dirty bitmap. */ |
5bc361b8 | 595 | BlockDriverState *base = s->base; |
e253f4b8 | 596 | bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(target_bs); |
5279efeb | 597 | |
893f7eba | 598 | for (sector_num = 0; sector_num < end; ) { |
99900697 FZ |
599 | /* Just to make sure we are not exceeding int limit. */ |
600 | int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, | |
601 | end - sector_num); | |
4c0cbd6f | 602 | |
49efb1f5 | 603 | mirror_throttle(s); |
4c0cbd6f FZ |
604 | |
605 | if (block_job_is_cancelled(&s->common)) { | |
606 | goto immediate_exit; | |
607 | } | |
608 | ||
99900697 | 609 | ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); |
893f7eba PB |
610 | |
611 | if (ret < 0) { | |
612 | goto immediate_exit; | |
613 | } | |
614 | ||
615 | assert(n > 0); | |
5279efeb | 616 | if (ret == 1 || mark_all_dirty) { |
20dca810 | 617 | bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); |
893f7eba | 618 | } |
99900697 | 619 | sector_num += n; |
893f7eba PB |
620 | } |
621 | } | |
622 | ||
20dca810 | 623 | bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); |
893f7eba | 624 | for (;;) { |
cc8c9d6c | 625 | uint64_t delay_ns = 0; |
49efb1f5 | 626 | int64_t cnt, delta; |
893f7eba PB |
627 | bool should_complete; |
628 | ||
bd48bde8 PB |
629 | if (s->ret < 0) { |
630 | ret = s->ret; | |
631 | goto immediate_exit; | |
632 | } | |
633 | ||
565ac01f SH |
634 | block_job_pause_point(&s->common); |
635 | ||
20dca810 | 636 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
b21c7652 HR |
637 | /* s->common.offset contains the number of bytes already processed so |
638 | * far, cnt is the number of dirty sectors remaining and | |
639 | * s->sectors_in_flight is the number of sectors currently being | |
640 | * processed; together those are the current total operation length */ | |
641 | s->common.len = s->common.offset + | |
642 | (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; | |
bd48bde8 PB |
643 | |
644 | /* Note that even when no rate limit is applied we need to yield | |
a7282330 | 645 | * periodically with no pending I/O so that bdrv_drain_all() returns. |
bd48bde8 PB |
646 | * We do so every SLICE_TIME nanoseconds, or when there is an error, |
647 | * or when the source is clean, whichever comes first. | |
648 | */ | |
49efb1f5 DL |
649 | delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; |
650 | if (delta < SLICE_TIME && | |
bd48bde8 | 651 | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
402a4741 PB |
652 | if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || |
653 | (cnt == 0 && s->in_flight > 0)) { | |
654 | trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); | |
21cd917f | 655 | mirror_wait_for_io(s); |
bd48bde8 PB |
656 | continue; |
657 | } else if (cnt != 0) { | |
cc8c9d6c | 658 | delay_ns = mirror_iteration(s); |
893f7eba | 659 | } |
893f7eba PB |
660 | } |
661 | ||
662 | should_complete = false; | |
bd48bde8 | 663 | if (s->in_flight == 0 && cnt == 0) { |
893f7eba | 664 | trace_mirror_before_flush(s); |
e253f4b8 | 665 | ret = blk_flush(s->target); |
893f7eba | 666 | if (ret < 0) { |
a589569f WX |
667 | if (mirror_error_action(s, false, -ret) == |
668 | BLOCK_ERROR_ACTION_REPORT) { | |
b952b558 PB |
669 | goto immediate_exit; |
670 | } | |
671 | } else { | |
672 | /* We're out of the streaming phase. From now on, if the job | |
673 | * is cancelled we will actually complete all pending I/O and | |
674 | * report completion. This way, block-job-cancel will leave | |
675 | * the target in a consistent state. | |
676 | */ | |
b952b558 | 677 | if (!s->synced) { |
bcada37b | 678 | block_job_event_ready(&s->common); |
b952b558 PB |
679 | s->synced = true; |
680 | } | |
681 | ||
682 | should_complete = s->should_complete || | |
683 | block_job_is_cancelled(&s->common); | |
20dca810 | 684 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
d63ffd87 | 685 | } |
893f7eba PB |
686 | } |
687 | ||
688 | if (cnt == 0 && should_complete) { | |
689 | /* The dirty bitmap is not updated while operations are pending. | |
690 | * If we're about to exit, wait for pending operations before | |
691 | * calling bdrv_get_dirty_count(bs), or we may exit while the | |
692 | * source has dirty data to copy! | |
693 | * | |
694 | * Note that I/O can be submitted by the guest while | |
695 | * mirror_populate runs. | |
696 | */ | |
697 | trace_mirror_before_drain(s, cnt); | |
39bf92dd | 698 | bdrv_co_drain(bs); |
20dca810 | 699 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
893f7eba PB |
700 | } |
701 | ||
702 | ret = 0; | |
cc8c9d6c | 703 | trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); |
d63ffd87 | 704 | if (!s->synced) { |
7483d1e5 | 705 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
893f7eba PB |
706 | if (block_job_is_cancelled(&s->common)) { |
707 | break; | |
708 | } | |
709 | } else if (!should_complete) { | |
bd48bde8 | 710 | delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); |
7483d1e5 | 711 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
893f7eba PB |
712 | } else if (cnt == 0) { |
713 | /* The two disks are in sync. Exit and report successful | |
714 | * completion. | |
715 | */ | |
716 | assert(QLIST_EMPTY(&bs->tracked_requests)); | |
717 | s->common.cancelled = false; | |
718 | break; | |
719 | } | |
49efb1f5 | 720 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
893f7eba PB |
721 | } |
722 | ||
723 | immediate_exit: | |
bd48bde8 PB |
724 | if (s->in_flight > 0) { |
725 | /* We get here only if something went wrong. Either the job failed, | |
726 | * or it was cancelled prematurely so that we do not guarantee that | |
727 | * the target is a copy of the source. | |
728 | */ | |
729 | assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); | |
730 | mirror_drain(s); | |
731 | } | |
732 | ||
733 | assert(s->in_flight == 0); | |
7191bf31 | 734 | qemu_vfree(s->buf); |
b812f671 | 735 | g_free(s->cow_bitmap); |
402a4741 | 736 | g_free(s->in_flight_bitmap); |
e4654d2d | 737 | bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); |
5a7e7a0b SH |
738 | |
739 | data = g_malloc(sizeof(*data)); | |
740 | data->ret = ret; | |
176c3699 FZ |
741 | /* Before we switch to target in mirror_exit, make sure data doesn't |
742 | * change. */ | |
e253f4b8 | 743 | bdrv_drained_begin(bs); |
ab27c3b5 FZ |
744 | if (qemu_get_aio_context() == bdrv_get_aio_context(bs)) { |
745 | /* FIXME: virtio host notifiers run on iohandler_ctx, therefore the | |
746 | * above bdrv_drained_end isn't enough to quiesce it. This is ugly, we | |
747 | * need a block layer API change to achieve this. */ | |
748 | aio_disable_external(iohandler_get_aio_context()); | |
749 | } | |
5a7e7a0b | 750 | block_job_defer_to_main_loop(&s->common, mirror_exit, data); |
893f7eba PB |
751 | } |
752 | ||
753 | static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) | |
754 | { | |
755 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
756 | ||
757 | if (speed < 0) { | |
c6bd8c70 | 758 | error_setg(errp, QERR_INVALID_PARAMETER, "speed"); |
893f7eba PB |
759 | return; |
760 | } | |
761 | ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); | |
762 | } | |
763 | ||
d63ffd87 PB |
764 | static void mirror_complete(BlockJob *job, Error **errp) |
765 | { | |
766 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
274fccee HR |
767 | BlockDriverState *src, *target; |
768 | ||
769 | src = blk_bs(job->blk); | |
770 | target = blk_bs(s->target); | |
d63ffd87 | 771 | |
d63ffd87 | 772 | if (!s->synced) { |
9df229c3 AG |
773 | error_setg(errp, "The active block job '%s' cannot be completed", |
774 | job->id); | |
d63ffd87 PB |
775 | return; |
776 | } | |
777 | ||
274fccee HR |
778 | if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { |
779 | int ret; | |
780 | ||
781 | assert(!target->backing); | |
782 | ret = bdrv_open_backing_file(target, NULL, "backing", errp); | |
783 | if (ret < 0) { | |
784 | return; | |
785 | } | |
786 | } | |
787 | ||
15d67298 | 788 | /* block all operations on to_replace bs */ |
09158f00 | 789 | if (s->replaces) { |
5a7e7a0b SH |
790 | AioContext *replace_aio_context; |
791 | ||
e12f3784 | 792 | s->to_replace = bdrv_find_node(s->replaces); |
09158f00 | 793 | if (!s->to_replace) { |
e12f3784 | 794 | error_setg(errp, "Node name '%s' not found", s->replaces); |
09158f00 BC |
795 | return; |
796 | } | |
797 | ||
5a7e7a0b SH |
798 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
799 | aio_context_acquire(replace_aio_context); | |
800 | ||
09158f00 BC |
801 | error_setg(&s->replace_blocker, |
802 | "block device is in use by block-job-complete"); | |
803 | bdrv_op_block_all(s->to_replace, s->replace_blocker); | |
804 | bdrv_ref(s->to_replace); | |
5a7e7a0b SH |
805 | |
806 | aio_context_release(replace_aio_context); | |
09158f00 BC |
807 | } |
808 | ||
274fccee HR |
809 | if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { |
810 | BlockDriverState *backing = s->is_none_mode ? src : s->base; | |
811 | if (backing_bs(target) != backing) { | |
812 | bdrv_set_backing_hd(target, backing); | |
813 | } | |
814 | } | |
815 | ||
d63ffd87 | 816 | s->should_complete = true; |
751ebd76 | 817 | block_job_enter(&s->common); |
d63ffd87 PB |
818 | } |
819 | ||
565ac01f SH |
820 | /* There is no matching mirror_resume() because mirror_run() will begin |
821 | * iterating again when the job is resumed. | |
822 | */ | |
823 | static void coroutine_fn mirror_pause(BlockJob *job) | |
824 | { | |
825 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
826 | ||
827 | mirror_drain(s); | |
828 | } | |
829 | ||
830 | static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) | |
831 | { | |
832 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
833 | ||
834 | blk_set_aio_context(s->target, new_context); | |
835 | } | |
836 | ||
3fc4b10a | 837 | static const BlockJobDriver mirror_job_driver = { |
565ac01f SH |
838 | .instance_size = sizeof(MirrorBlockJob), |
839 | .job_type = BLOCK_JOB_TYPE_MIRROR, | |
840 | .set_speed = mirror_set_speed, | |
841 | .complete = mirror_complete, | |
842 | .pause = mirror_pause, | |
843 | .attached_aio_context = mirror_attached_aio_context, | |
893f7eba PB |
844 | }; |
845 | ||
03544a6e | 846 | static const BlockJobDriver commit_active_job_driver = { |
565ac01f SH |
847 | .instance_size = sizeof(MirrorBlockJob), |
848 | .job_type = BLOCK_JOB_TYPE_COMMIT, | |
849 | .set_speed = mirror_set_speed, | |
850 | .complete = mirror_complete, | |
851 | .pause = mirror_pause, | |
852 | .attached_aio_context = mirror_attached_aio_context, | |
03544a6e FZ |
853 | }; |
854 | ||
71aa9867 AG |
855 | static void mirror_start_job(const char *job_id, BlockDriverState *bs, |
856 | BlockDriverState *target, const char *replaces, | |
5fba6c0e | 857 | int64_t speed, uint32_t granularity, |
09158f00 | 858 | int64_t buf_size, |
274fccee | 859 | BlockMirrorBackingMode backing_mode, |
09158f00 BC |
860 | BlockdevOnError on_source_error, |
861 | BlockdevOnError on_target_error, | |
0fc9f8ea | 862 | bool unmap, |
097310b5 | 863 | BlockCompletionFunc *cb, |
09158f00 BC |
864 | void *opaque, Error **errp, |
865 | const BlockJobDriver *driver, | |
866 | bool is_none_mode, BlockDriverState *base) | |
893f7eba PB |
867 | { |
868 | MirrorBlockJob *s; | |
869 | ||
eee13dfe | 870 | if (granularity == 0) { |
341ebc2f | 871 | granularity = bdrv_get_default_bitmap_granularity(target); |
eee13dfe PB |
872 | } |
873 | ||
874 | assert ((granularity & (granularity - 1)) == 0); | |
875 | ||
48ac0a4d WC |
876 | if (buf_size < 0) { |
877 | error_setg(errp, "Invalid parameter 'buf-size'"); | |
878 | return; | |
879 | } | |
880 | ||
881 | if (buf_size == 0) { | |
882 | buf_size = DEFAULT_MIRROR_BUF_SIZE; | |
883 | } | |
5bc361b8 | 884 | |
71aa9867 | 885 | s = block_job_create(job_id, driver, bs, speed, cb, opaque, errp); |
893f7eba PB |
886 | if (!s) { |
887 | return; | |
888 | } | |
889 | ||
e253f4b8 KW |
890 | s->target = blk_new(); |
891 | blk_insert_bs(s->target, target); | |
892 | ||
09158f00 | 893 | s->replaces = g_strdup(replaces); |
b952b558 PB |
894 | s->on_source_error = on_source_error; |
895 | s->on_target_error = on_target_error; | |
03544a6e | 896 | s->is_none_mode = is_none_mode; |
274fccee | 897 | s->backing_mode = backing_mode; |
5bc361b8 | 898 | s->base = base; |
eee13dfe | 899 | s->granularity = granularity; |
48ac0a4d | 900 | s->buf_size = ROUND_UP(buf_size, granularity); |
0fc9f8ea | 901 | s->unmap = unmap; |
b812f671 | 902 | |
0db6e54a | 903 | s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); |
b8afb520 | 904 | if (!s->dirty_bitmap) { |
97031164 | 905 | g_free(s->replaces); |
e253f4b8 | 906 | blk_unref(s->target); |
18930ba3 | 907 | block_job_unref(&s->common); |
b8afb520 FZ |
908 | return; |
909 | } | |
10f3cd15 | 910 | |
e253f4b8 | 911 | bdrv_op_block_all(target, s->common.blocker); |
10f3cd15 | 912 | |
0b8b8753 | 913 | s->common.co = qemu_coroutine_create(mirror_run, s); |
893f7eba | 914 | trace_mirror_start(bs, s, s->common.co, opaque); |
0b8b8753 | 915 | qemu_coroutine_enter(s->common.co); |
893f7eba | 916 | } |
03544a6e | 917 | |
71aa9867 AG |
918 | void mirror_start(const char *job_id, BlockDriverState *bs, |
919 | BlockDriverState *target, const char *replaces, | |
5fba6c0e | 920 | int64_t speed, uint32_t granularity, int64_t buf_size, |
274fccee HR |
921 | MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, |
922 | BlockdevOnError on_source_error, | |
03544a6e | 923 | BlockdevOnError on_target_error, |
0fc9f8ea | 924 | bool unmap, |
097310b5 | 925 | BlockCompletionFunc *cb, |
03544a6e FZ |
926 | void *opaque, Error **errp) |
927 | { | |
928 | bool is_none_mode; | |
929 | BlockDriverState *base; | |
930 | ||
4b80ab2b JS |
931 | if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { |
932 | error_setg(errp, "Sync mode 'incremental' not supported"); | |
d58d8453 JS |
933 | return; |
934 | } | |
03544a6e | 935 | is_none_mode = mode == MIRROR_SYNC_MODE_NONE; |
760e0063 | 936 | base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; |
71aa9867 | 937 | mirror_start_job(job_id, bs, target, replaces, |
274fccee | 938 | speed, granularity, buf_size, backing_mode, |
0fc9f8ea | 939 | on_source_error, on_target_error, unmap, cb, opaque, errp, |
03544a6e FZ |
940 | &mirror_job_driver, is_none_mode, base); |
941 | } | |
942 | ||
fd62c609 AG |
943 | void commit_active_start(const char *job_id, BlockDriverState *bs, |
944 | BlockDriverState *base, int64_t speed, | |
03544a6e | 945 | BlockdevOnError on_error, |
097310b5 | 946 | BlockCompletionFunc *cb, |
03544a6e FZ |
947 | void *opaque, Error **errp) |
948 | { | |
4da83585 JC |
949 | int64_t length, base_length; |
950 | int orig_base_flags; | |
39a611a3 | 951 | int ret; |
cc67f4d1 | 952 | Error *local_err = NULL; |
4da83585 JC |
953 | |
954 | orig_base_flags = bdrv_get_flags(base); | |
955 | ||
20a63d2c FZ |
956 | if (bdrv_reopen(base, bs->open_flags, errp)) { |
957 | return; | |
958 | } | |
4da83585 JC |
959 | |
960 | length = bdrv_getlength(bs); | |
961 | if (length < 0) { | |
39a611a3 JC |
962 | error_setg_errno(errp, -length, |
963 | "Unable to determine length of %s", bs->filename); | |
4da83585 JC |
964 | goto error_restore_flags; |
965 | } | |
966 | ||
967 | base_length = bdrv_getlength(base); | |
968 | if (base_length < 0) { | |
39a611a3 JC |
969 | error_setg_errno(errp, -base_length, |
970 | "Unable to determine length of %s", base->filename); | |
4da83585 JC |
971 | goto error_restore_flags; |
972 | } | |
973 | ||
974 | if (length > base_length) { | |
39a611a3 JC |
975 | ret = bdrv_truncate(base, length); |
976 | if (ret < 0) { | |
977 | error_setg_errno(errp, -ret, | |
978 | "Top image %s is larger than base image %s, and " | |
4da83585 JC |
979 | "resize of base image failed", |
980 | bs->filename, base->filename); | |
981 | goto error_restore_flags; | |
982 | } | |
983 | } | |
984 | ||
fd62c609 | 985 | mirror_start_job(job_id, bs, base, NULL, speed, 0, 0, |
71aa9867 | 986 | MIRROR_LEAVE_BACKING_CHAIN, |
0fc9f8ea | 987 | on_error, on_error, false, cb, opaque, &local_err, |
03544a6e | 988 | &commit_active_job_driver, false, base); |
0fb6395c | 989 | if (local_err) { |
cc67f4d1 | 990 | error_propagate(errp, local_err); |
4da83585 JC |
991 | goto error_restore_flags; |
992 | } | |
993 | ||
994 | return; | |
995 | ||
996 | error_restore_flags: | |
997 | /* ignore error and errp for bdrv_reopen, because we want to propagate | |
998 | * the original error */ | |
999 | bdrv_reopen(base, orig_base_flags, NULL); | |
1000 | return; | |
03544a6e | 1001 | } |