]>
Commit | Line | Data |
---|---|---|
893f7eba PB |
1 | /* |
2 | * Image mirroring | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Paolo Bonzini <pbonzini@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
10 | * See the COPYING.LIB file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
80c71a24 | 14 | #include "qemu/osdep.h" |
fd4a6493 | 15 | #include "qemu/cutils.h" |
893f7eba | 16 | #include "trace.h" |
c87621ea | 17 | #include "block/blockjob_int.h" |
737e150e | 18 | #include "block/block_int.h" |
373340b2 | 19 | #include "sysemu/block-backend.h" |
da34e65c | 20 | #include "qapi/error.h" |
cc7a8ea7 | 21 | #include "qapi/qmp/qerror.h" |
893f7eba | 22 | #include "qemu/ratelimit.h" |
b812f671 | 23 | #include "qemu/bitmap.h" |
893f7eba | 24 | |
402a4741 | 25 | #define MAX_IN_FLIGHT 16 |
b436982f EB |
26 | #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ |
27 | #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) | |
402a4741 PB |
28 | |
29 | /* The mirroring buffer is a list of granularity-sized chunks. | |
30 | * Free chunks are organized in a list. | |
31 | */ | |
32 | typedef struct MirrorBuffer { | |
33 | QSIMPLEQ_ENTRY(MirrorBuffer) next; | |
34 | } MirrorBuffer; | |
893f7eba PB |
35 | |
36 | typedef struct MirrorBlockJob { | |
37 | BlockJob common; | |
e253f4b8 | 38 | BlockBackend *target; |
4ef85a9c KW |
39 | BlockDriverState *mirror_top_bs; |
40 | BlockDriverState *source; | |
5bc361b8 | 41 | BlockDriverState *base; |
4ef85a9c | 42 | |
09158f00 BC |
43 | /* The name of the graph node to replace */ |
44 | char *replaces; | |
45 | /* The BDS to replace */ | |
46 | BlockDriverState *to_replace; | |
47 | /* Used to block operations on the drive-mirror-replace target */ | |
48 | Error *replace_blocker; | |
03544a6e | 49 | bool is_none_mode; |
274fccee | 50 | BlockMirrorBackingMode backing_mode; |
b952b558 | 51 | BlockdevOnError on_source_error, on_target_error; |
d63ffd87 PB |
52 | bool synced; |
53 | bool should_complete; | |
eee13dfe | 54 | int64_t granularity; |
b812f671 | 55 | size_t buf_size; |
b21c7652 | 56 | int64_t bdev_length; |
b812f671 | 57 | unsigned long *cow_bitmap; |
e4654d2d | 58 | BdrvDirtyBitmap *dirty_bitmap; |
dc162c8e | 59 | BdrvDirtyBitmapIter *dbi; |
893f7eba | 60 | uint8_t *buf; |
402a4741 PB |
61 | QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; |
62 | int buf_free_count; | |
bd48bde8 | 63 | |
49efb1f5 | 64 | uint64_t last_pause_ns; |
402a4741 | 65 | unsigned long *in_flight_bitmap; |
bd48bde8 | 66 | int in_flight; |
b436982f | 67 | int64_t bytes_in_flight; |
bd48bde8 | 68 | int ret; |
0fc9f8ea | 69 | bool unmap; |
e424aff5 | 70 | bool waiting_for_io; |
b436982f | 71 | int target_cluster_size; |
e5b43573 | 72 | int max_iov; |
90ab48eb | 73 | bool initial_zeroing_ongoing; |
893f7eba PB |
74 | } MirrorBlockJob; |
75 | ||
bd48bde8 PB |
76 | typedef struct MirrorOp { |
77 | MirrorBlockJob *s; | |
78 | QEMUIOVector qiov; | |
b436982f EB |
79 | int64_t offset; |
80 | uint64_t bytes; | |
bd48bde8 PB |
81 | } MirrorOp; |
82 | ||
b952b558 PB |
83 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, |
84 | int error) | |
85 | { | |
86 | s->synced = false; | |
87 | if (read) { | |
81e254dc KW |
88 | return block_job_error_action(&s->common, s->on_source_error, |
89 | true, error); | |
b952b558 | 90 | } else { |
81e254dc KW |
91 | return block_job_error_action(&s->common, s->on_target_error, |
92 | false, error); | |
b952b558 PB |
93 | } |
94 | } | |
95 | ||
bd48bde8 PB |
96 | static void mirror_iteration_done(MirrorOp *op, int ret) |
97 | { | |
98 | MirrorBlockJob *s = op->s; | |
402a4741 | 99 | struct iovec *iov; |
bd48bde8 | 100 | int64_t chunk_num; |
b436982f | 101 | int i, nb_chunks; |
bd48bde8 | 102 | |
b436982f | 103 | trace_mirror_iteration_done(s, op->offset, op->bytes, ret); |
bd48bde8 PB |
104 | |
105 | s->in_flight--; | |
b436982f | 106 | s->bytes_in_flight -= op->bytes; |
402a4741 PB |
107 | iov = op->qiov.iov; |
108 | for (i = 0; i < op->qiov.niov; i++) { | |
109 | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; | |
110 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); | |
111 | s->buf_free_count++; | |
112 | } | |
113 | ||
b436982f EB |
114 | chunk_num = op->offset / s->granularity; |
115 | nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); | |
402a4741 | 116 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); |
b21c7652 HR |
117 | if (ret >= 0) { |
118 | if (s->cow_bitmap) { | |
119 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); | |
120 | } | |
90ab48eb | 121 | if (!s->initial_zeroing_ongoing) { |
05df8a6a | 122 | block_job_progress_update(&s->common, op->bytes); |
90ab48eb | 123 | } |
bd48bde8 | 124 | } |
6df3bf8e | 125 | qemu_iovec_destroy(&op->qiov); |
c84b3192 | 126 | g_free(op); |
7b770c72 | 127 | |
e424aff5 | 128 | if (s->waiting_for_io) { |
0b8b8753 | 129 | qemu_coroutine_enter(s->common.co); |
7b770c72 | 130 | } |
bd48bde8 PB |
131 | } |
132 | ||
133 | static void mirror_write_complete(void *opaque, int ret) | |
134 | { | |
135 | MirrorOp *op = opaque; | |
136 | MirrorBlockJob *s = op->s; | |
b9e413dd PB |
137 | |
138 | aio_context_acquire(blk_get_aio_context(s->common.blk)); | |
bd48bde8 | 139 | if (ret < 0) { |
bd48bde8 PB |
140 | BlockErrorAction action; |
141 | ||
e0d7f73e | 142 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); |
bd48bde8 | 143 | action = mirror_error_action(s, false, -ret); |
a589569f | 144 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
bd48bde8 PB |
145 | s->ret = ret; |
146 | } | |
147 | } | |
148 | mirror_iteration_done(op, ret); | |
b9e413dd | 149 | aio_context_release(blk_get_aio_context(s->common.blk)); |
bd48bde8 PB |
150 | } |
151 | ||
152 | static void mirror_read_complete(void *opaque, int ret) | |
153 | { | |
154 | MirrorOp *op = opaque; | |
155 | MirrorBlockJob *s = op->s; | |
b9e413dd PB |
156 | |
157 | aio_context_acquire(blk_get_aio_context(s->common.blk)); | |
bd48bde8 | 158 | if (ret < 0) { |
bd48bde8 PB |
159 | BlockErrorAction action; |
160 | ||
e0d7f73e | 161 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); |
bd48bde8 | 162 | action = mirror_error_action(s, true, -ret); |
a589569f | 163 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
bd48bde8 PB |
164 | s->ret = ret; |
165 | } | |
166 | ||
167 | mirror_iteration_done(op, ret); | |
b9e413dd | 168 | } else { |
b436982f | 169 | blk_aio_pwritev(s->target, op->offset, &op->qiov, |
b9e413dd | 170 | 0, mirror_write_complete, op); |
bd48bde8 | 171 | } |
b9e413dd | 172 | aio_context_release(blk_get_aio_context(s->common.blk)); |
bd48bde8 PB |
173 | } |
174 | ||
782d97ef EB |
175 | /* Clip bytes relative to offset to not exceed end-of-file */ |
176 | static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, | |
177 | int64_t offset, | |
178 | int64_t bytes) | |
179 | { | |
180 | return MIN(bytes, s->bdev_length - offset); | |
181 | } | |
182 | ||
782d97ef EB |
183 | /* Round offset and/or bytes to target cluster if COW is needed, and |
184 | * return the offset of the adjusted tail against original. */ | |
185 | static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, | |
ae4cc877 | 186 | uint64_t *bytes) |
893f7eba | 187 | { |
e5b43573 FZ |
188 | bool need_cow; |
189 | int ret = 0; | |
782d97ef | 190 | int64_t align_offset = *offset; |
7cfd5275 | 191 | int64_t align_bytes = *bytes; |
782d97ef | 192 | int max_bytes = s->granularity * s->max_iov; |
e5b43573 | 193 | |
782d97ef EB |
194 | need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); |
195 | need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, | |
e5b43573 FZ |
196 | s->cow_bitmap); |
197 | if (need_cow) { | |
782d97ef EB |
198 | bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, |
199 | &align_offset, &align_bytes); | |
e5b43573 | 200 | } |
3515727f | 201 | |
782d97ef EB |
202 | if (align_bytes > max_bytes) { |
203 | align_bytes = max_bytes; | |
e5b43573 | 204 | if (need_cow) { |
782d97ef | 205 | align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); |
e5b43573 | 206 | } |
8f0720ec | 207 | } |
782d97ef | 208 | /* Clipping may result in align_bytes unaligned to chunk boundary, but |
4150ae60 | 209 | * that doesn't matter because it's already the end of source image. */ |
782d97ef | 210 | align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); |
8f0720ec | 211 | |
782d97ef EB |
212 | ret = align_offset + align_bytes - (*offset + *bytes); |
213 | *offset = align_offset; | |
214 | *bytes = align_bytes; | |
e5b43573 FZ |
215 | assert(ret >= 0); |
216 | return ret; | |
217 | } | |
218 | ||
21cd917f FZ |
219 | static inline void mirror_wait_for_io(MirrorBlockJob *s) |
220 | { | |
221 | assert(!s->waiting_for_io); | |
222 | s->waiting_for_io = true; | |
223 | qemu_coroutine_yield(); | |
224 | s->waiting_for_io = false; | |
225 | } | |
226 | ||
e5b43573 | 227 | /* Submit async read while handling COW. |
ae4cc877 EB |
228 | * Returns: The number of bytes copied after and including offset, |
229 | * excluding any bytes copied prior to offset due to alignment. | |
230 | * This will be @bytes if no alignment is necessary, or | |
231 | * (new_end - offset) if tail is rounded up or down due to | |
e5b43573 FZ |
232 | * alignment or buffer limit. |
233 | */ | |
ae4cc877 EB |
234 | static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset, |
235 | uint64_t bytes) | |
e5b43573 | 236 | { |
e253f4b8 | 237 | BlockBackend *source = s->common.blk; |
ae4cc877 EB |
238 | int nb_chunks; |
239 | uint64_t ret; | |
e5b43573 | 240 | MirrorOp *op; |
ae4cc877 | 241 | uint64_t max_bytes; |
e5b43573 | 242 | |
ae4cc877 | 243 | max_bytes = s->granularity * s->max_iov; |
402a4741 | 244 | |
e5b43573 | 245 | /* We can only handle as much as buf_size at a time. */ |
ae4cc877 EB |
246 | bytes = MIN(s->buf_size, MIN(max_bytes, bytes)); |
247 | assert(bytes); | |
248 | assert(bytes < BDRV_REQUEST_MAX_BYTES); | |
249 | ret = bytes; | |
402a4741 | 250 | |
e5b43573 | 251 | if (s->cow_bitmap) { |
ae4cc877 | 252 | ret += mirror_cow_align(s, &offset, &bytes); |
e5b43573 | 253 | } |
ae4cc877 EB |
254 | assert(bytes <= s->buf_size); |
255 | /* The offset is granularity-aligned because: | |
e5b43573 FZ |
256 | * 1) Caller passes in aligned values; |
257 | * 2) mirror_cow_align is used only when target cluster is larger. */ | |
ae4cc877 EB |
258 | assert(QEMU_IS_ALIGNED(offset, s->granularity)); |
259 | /* The range is sector-aligned, since bdrv_getlength() rounds up. */ | |
260 | assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); | |
261 | nb_chunks = DIV_ROUND_UP(bytes, s->granularity); | |
e5b43573 FZ |
262 | |
263 | while (s->buf_free_count < nb_chunks) { | |
ae4cc877 | 264 | trace_mirror_yield_in_flight(s, offset, s->in_flight); |
21cd917f | 265 | mirror_wait_for_io(s); |
b812f671 PB |
266 | } |
267 | ||
bd48bde8 | 268 | /* Allocate a MirrorOp that is used as an AIO callback. */ |
c84b3192 | 269 | op = g_new(MirrorOp, 1); |
bd48bde8 | 270 | op->s = s; |
ae4cc877 EB |
271 | op->offset = offset; |
272 | op->bytes = bytes; | |
402a4741 PB |
273 | |
274 | /* Now make a QEMUIOVector taking enough granularity-sized chunks | |
275 | * from s->buf_free. | |
276 | */ | |
277 | qemu_iovec_init(&op->qiov, nb_chunks); | |
402a4741 PB |
278 | while (nb_chunks-- > 0) { |
279 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); | |
ae4cc877 | 280 | size_t remaining = bytes - op->qiov.size; |
5a0f6fd5 | 281 | |
402a4741 PB |
282 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); |
283 | s->buf_free_count--; | |
5a0f6fd5 | 284 | qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); |
402a4741 | 285 | } |
bd48bde8 | 286 | |
893f7eba | 287 | /* Copy the dirty cluster. */ |
bd48bde8 | 288 | s->in_flight++; |
ae4cc877 EB |
289 | s->bytes_in_flight += bytes; |
290 | trace_mirror_one_iteration(s, offset, bytes); | |
dcfb3beb | 291 | |
ae4cc877 | 292 | blk_aio_preadv(source, offset, &op->qiov, 0, mirror_read_complete, op); |
e5b43573 FZ |
293 | return ret; |
294 | } | |
295 | ||
296 | static void mirror_do_zero_or_discard(MirrorBlockJob *s, | |
e6f24193 EB |
297 | int64_t offset, |
298 | uint64_t bytes, | |
e5b43573 FZ |
299 | bool is_discard) |
300 | { | |
301 | MirrorOp *op; | |
302 | ||
303 | /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed | |
304 | * so the freeing in mirror_iteration_done is nop. */ | |
305 | op = g_new0(MirrorOp, 1); | |
306 | op->s = s; | |
e6f24193 EB |
307 | op->offset = offset; |
308 | op->bytes = bytes; | |
e5b43573 FZ |
309 | |
310 | s->in_flight++; | |
e6f24193 | 311 | s->bytes_in_flight += bytes; |
e5b43573 | 312 | if (is_discard) { |
e6f24193 | 313 | blk_aio_pdiscard(s->target, offset, |
b436982f | 314 | op->bytes, mirror_write_complete, op); |
e5b43573 | 315 | } else { |
e6f24193 | 316 | blk_aio_pwrite_zeroes(s->target, offset, |
b436982f | 317 | op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0, |
dcfb3beb | 318 | mirror_write_complete, op); |
e5b43573 FZ |
319 | } |
320 | } | |
321 | ||
322 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | |
323 | { | |
4ef85a9c | 324 | BlockDriverState *source = s->source; |
fb2ef791 | 325 | int64_t offset, first_chunk; |
e5b43573 FZ |
326 | uint64_t delay_ns = 0; |
327 | /* At least the first dirty chunk is mirrored in one iteration. */ | |
328 | int nb_chunks = 1; | |
4b5004d9 | 329 | bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); |
b436982f | 330 | int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); |
e5b43573 | 331 | |
b64bd51e | 332 | bdrv_dirty_bitmap_lock(s->dirty_bitmap); |
f798184c | 333 | offset = bdrv_dirty_iter_next(s->dbi); |
fb2ef791 | 334 | if (offset < 0) { |
dc162c8e | 335 | bdrv_set_dirty_iter(s->dbi, 0); |
f798184c | 336 | offset = bdrv_dirty_iter_next(s->dbi); |
9a46dba7 | 337 | trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); |
fb2ef791 | 338 | assert(offset >= 0); |
e5b43573 | 339 | } |
b64bd51e | 340 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); |
e5b43573 | 341 | |
fb2ef791 | 342 | first_chunk = offset / s->granularity; |
9c83625b | 343 | while (test_bit(first_chunk, s->in_flight_bitmap)) { |
fb2ef791 | 344 | trace_mirror_yield_in_flight(s, offset, s->in_flight); |
9c83625b HR |
345 | mirror_wait_for_io(s); |
346 | } | |
347 | ||
565ac01f SH |
348 | block_job_pause_point(&s->common); |
349 | ||
e5b43573 FZ |
350 | /* Find the number of consective dirty chunks following the first dirty |
351 | * one, and wait for in flight requests in them. */ | |
b64bd51e | 352 | bdrv_dirty_bitmap_lock(s->dirty_bitmap); |
fb2ef791 | 353 | while (nb_chunks * s->granularity < s->buf_size) { |
dc162c8e | 354 | int64_t next_dirty; |
fb2ef791 EB |
355 | int64_t next_offset = offset + nb_chunks * s->granularity; |
356 | int64_t next_chunk = next_offset / s->granularity; | |
357 | if (next_offset >= s->bdev_length || | |
3b5d4df0 | 358 | !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) { |
e5b43573 FZ |
359 | break; |
360 | } | |
361 | if (test_bit(next_chunk, s->in_flight_bitmap)) { | |
9c83625b | 362 | break; |
e5b43573 | 363 | } |
9c83625b | 364 | |
f798184c | 365 | next_dirty = bdrv_dirty_iter_next(s->dbi); |
fb2ef791 | 366 | if (next_dirty > next_offset || next_dirty < 0) { |
f27a2742 | 367 | /* The bitmap iterator's cache is stale, refresh it */ |
715a74d8 | 368 | bdrv_set_dirty_iter(s->dbi, next_offset); |
f798184c | 369 | next_dirty = bdrv_dirty_iter_next(s->dbi); |
f27a2742 | 370 | } |
fb2ef791 | 371 | assert(next_dirty == next_offset); |
9c83625b | 372 | nb_chunks++; |
e5b43573 FZ |
373 | } |
374 | ||
375 | /* Clear dirty bits before querying the block status, because | |
31826642 | 376 | * calling bdrv_block_status_above could yield - if some blocks are |
e5b43573 FZ |
377 | * marked dirty in this window, we need to know. |
378 | */ | |
e0d7f73e EB |
379 | bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, |
380 | nb_chunks * s->granularity); | |
b64bd51e PB |
381 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); |
382 | ||
fb2ef791 EB |
383 | bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); |
384 | while (nb_chunks > 0 && offset < s->bdev_length) { | |
31826642 | 385 | int ret; |
7cfd5275 | 386 | int64_t io_bytes; |
f3e4ce4a | 387 | int64_t io_bytes_acct; |
e5b43573 FZ |
388 | enum MirrorMethod { |
389 | MIRROR_METHOD_COPY, | |
390 | MIRROR_METHOD_ZERO, | |
391 | MIRROR_METHOD_DISCARD | |
392 | } mirror_method = MIRROR_METHOD_COPY; | |
393 | ||
fb2ef791 | 394 | assert(!(offset % s->granularity)); |
31826642 EB |
395 | ret = bdrv_block_status_above(source, NULL, offset, |
396 | nb_chunks * s->granularity, | |
397 | &io_bytes, NULL, NULL); | |
e5b43573 | 398 | if (ret < 0) { |
fb2ef791 | 399 | io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); |
0965a41e | 400 | } else if (ret & BDRV_BLOCK_DATA) { |
fb2ef791 | 401 | io_bytes = MIN(io_bytes, max_io_bytes); |
e5b43573 FZ |
402 | } |
403 | ||
fb2ef791 EB |
404 | io_bytes -= io_bytes % s->granularity; |
405 | if (io_bytes < s->granularity) { | |
406 | io_bytes = s->granularity; | |
e5b43573 | 407 | } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { |
fb2ef791 | 408 | int64_t target_offset; |
7cfd5275 | 409 | int64_t target_bytes; |
fb2ef791 EB |
410 | bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, |
411 | &target_offset, &target_bytes); | |
412 | if (target_offset == offset && | |
413 | target_bytes == io_bytes) { | |
e5b43573 FZ |
414 | mirror_method = ret & BDRV_BLOCK_ZERO ? |
415 | MIRROR_METHOD_ZERO : | |
416 | MIRROR_METHOD_DISCARD; | |
417 | } | |
418 | } | |
419 | ||
cf56a3c6 | 420 | while (s->in_flight >= MAX_IN_FLIGHT) { |
fb2ef791 | 421 | trace_mirror_yield_in_flight(s, offset, s->in_flight); |
cf56a3c6 DL |
422 | mirror_wait_for_io(s); |
423 | } | |
424 | ||
dbaa7b57 VSO |
425 | if (s->ret < 0) { |
426 | return 0; | |
427 | } | |
428 | ||
fb2ef791 | 429 | io_bytes = mirror_clip_bytes(s, offset, io_bytes); |
e5b43573 FZ |
430 | switch (mirror_method) { |
431 | case MIRROR_METHOD_COPY: | |
fb2ef791 | 432 | io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes); |
e5b43573 FZ |
433 | break; |
434 | case MIRROR_METHOD_ZERO: | |
e5b43573 | 435 | case MIRROR_METHOD_DISCARD: |
fb2ef791 | 436 | mirror_do_zero_or_discard(s, offset, io_bytes, |
4b5004d9 DL |
437 | mirror_method == MIRROR_METHOD_DISCARD); |
438 | if (write_zeroes_ok) { | |
f3e4ce4a | 439 | io_bytes_acct = 0; |
4b5004d9 | 440 | } else { |
fb2ef791 | 441 | io_bytes_acct = io_bytes; |
4b5004d9 | 442 | } |
e5b43573 FZ |
443 | break; |
444 | default: | |
445 | abort(); | |
446 | } | |
fb2ef791 EB |
447 | assert(io_bytes); |
448 | offset += io_bytes; | |
449 | nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); | |
f14a39cc | 450 | if (s->common.speed) { |
f05fee50 KW |
451 | delay_ns = ratelimit_calculate_delay(&s->common.limit, |
452 | io_bytes_acct); | |
f14a39cc | 453 | } |
dcfb3beb | 454 | } |
cc8c9d6c | 455 | return delay_ns; |
bd48bde8 | 456 | } |
b952b558 | 457 | |
402a4741 PB |
458 | static void mirror_free_init(MirrorBlockJob *s) |
459 | { | |
460 | int granularity = s->granularity; | |
461 | size_t buf_size = s->buf_size; | |
462 | uint8_t *buf = s->buf; | |
463 | ||
464 | assert(s->buf_free_count == 0); | |
465 | QSIMPLEQ_INIT(&s->buf_free); | |
466 | while (buf_size != 0) { | |
467 | MirrorBuffer *cur = (MirrorBuffer *)buf; | |
468 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); | |
469 | s->buf_free_count++; | |
470 | buf_size -= granularity; | |
471 | buf += granularity; | |
472 | } | |
473 | } | |
474 | ||
bae8196d PB |
475 | /* This is also used for the .pause callback. There is no matching |
476 | * mirror_resume() because mirror_run() will begin iterating again | |
477 | * when the job is resumed. | |
478 | */ | |
479 | static void mirror_wait_for_all_io(MirrorBlockJob *s) | |
bd48bde8 PB |
480 | { |
481 | while (s->in_flight > 0) { | |
21cd917f | 482 | mirror_wait_for_io(s); |
bd48bde8 | 483 | } |
893f7eba PB |
484 | } |
485 | ||
5a7e7a0b SH |
486 | typedef struct { |
487 | int ret; | |
488 | } MirrorExitData; | |
489 | ||
490 | static void mirror_exit(BlockJob *job, void *opaque) | |
491 | { | |
492 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
493 | MirrorExitData *data = opaque; | |
494 | AioContext *replace_aio_context = NULL; | |
4ef85a9c | 495 | BlockDriverState *src = s->source; |
e253f4b8 | 496 | BlockDriverState *target_bs = blk_bs(s->target); |
4ef85a9c | 497 | BlockDriverState *mirror_top_bs = s->mirror_top_bs; |
12fa4af6 | 498 | Error *local_err = NULL; |
3f09bfbc | 499 | |
2119882c PB |
500 | bdrv_release_dirty_bitmap(src, s->dirty_bitmap); |
501 | ||
3f09bfbc KW |
502 | /* Make sure that the source BDS doesn't go away before we called |
503 | * block_job_completed(). */ | |
504 | bdrv_ref(src); | |
4ef85a9c | 505 | bdrv_ref(mirror_top_bs); |
7d9fcb39 KW |
506 | bdrv_ref(target_bs); |
507 | ||
508 | /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before | |
509 | * inserting target_bs at s->to_replace, where we might not be able to get | |
63c8ef28 KW |
510 | * these permissions. |
511 | * | |
512 | * Note that blk_unref() alone doesn't necessarily drop permissions because | |
513 | * we might be running nested inside mirror_drain(), which takes an extra | |
514 | * reference, so use an explicit blk_set_perm() first. */ | |
515 | blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort); | |
7d9fcb39 KW |
516 | blk_unref(s->target); |
517 | s->target = NULL; | |
4ef85a9c KW |
518 | |
519 | /* We don't access the source any more. Dropping any WRITE/RESIZE is | |
520 | * required before it could become a backing file of target_bs. */ | |
521 | bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, | |
522 | &error_abort); | |
523 | if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { | |
524 | BlockDriverState *backing = s->is_none_mode ? src : s->base; | |
525 | if (backing_bs(target_bs) != backing) { | |
12fa4af6 KW |
526 | bdrv_set_backing_hd(target_bs, backing, &local_err); |
527 | if (local_err) { | |
528 | error_report_err(local_err); | |
529 | data->ret = -EPERM; | |
530 | } | |
4ef85a9c KW |
531 | } |
532 | } | |
5a7e7a0b SH |
533 | |
534 | if (s->to_replace) { | |
535 | replace_aio_context = bdrv_get_aio_context(s->to_replace); | |
536 | aio_context_acquire(replace_aio_context); | |
537 | } | |
538 | ||
539 | if (s->should_complete && data->ret == 0) { | |
e253f4b8 | 540 | BlockDriverState *to_replace = src; |
5a7e7a0b SH |
541 | if (s->to_replace) { |
542 | to_replace = s->to_replace; | |
543 | } | |
40365552 | 544 | |
e253f4b8 KW |
545 | if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { |
546 | bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); | |
5a7e7a0b | 547 | } |
b8804815 KW |
548 | |
549 | /* The mirror job has no requests in flight any more, but we need to | |
550 | * drain potential other users of the BDS before changing the graph. */ | |
e253f4b8 | 551 | bdrv_drained_begin(target_bs); |
5fe31c25 | 552 | bdrv_replace_node(to_replace, target_bs, &local_err); |
e253f4b8 | 553 | bdrv_drained_end(target_bs); |
5fe31c25 KW |
554 | if (local_err) { |
555 | error_report_err(local_err); | |
556 | data->ret = -EPERM; | |
557 | } | |
5a7e7a0b SH |
558 | } |
559 | if (s->to_replace) { | |
560 | bdrv_op_unblock_all(s->to_replace, s->replace_blocker); | |
561 | error_free(s->replace_blocker); | |
562 | bdrv_unref(s->to_replace); | |
563 | } | |
564 | if (replace_aio_context) { | |
565 | aio_context_release(replace_aio_context); | |
566 | } | |
567 | g_free(s->replaces); | |
7d9fcb39 | 568 | bdrv_unref(target_bs); |
4ef85a9c KW |
569 | |
570 | /* Remove the mirror filter driver from the graph. Before this, get rid of | |
571 | * the blockers on the intermediate nodes so that the resulting state is | |
0bf74767 KW |
572 | * valid. Also give up permissions on mirror_top_bs->backing, which might |
573 | * block the removal. */ | |
4ef85a9c | 574 | block_job_remove_all_bdrv(job); |
c1cef672 FZ |
575 | bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, |
576 | &error_abort); | |
5fe31c25 | 577 | bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); |
4ef85a9c KW |
578 | |
579 | /* We just changed the BDS the job BB refers to (with either or both of the | |
5fe31c25 KW |
580 | * bdrv_replace_node() calls), so switch the BB back so the cleanup does |
581 | * the right thing. We don't need any permissions any more now. */ | |
4ef85a9c KW |
582 | blk_remove_bs(job->blk); |
583 | blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); | |
584 | blk_insert_bs(job->blk, mirror_top_bs, &error_abort); | |
585 | ||
5a7e7a0b | 586 | block_job_completed(&s->common, data->ret); |
4ef85a9c | 587 | |
5a7e7a0b | 588 | g_free(data); |
176c3699 | 589 | bdrv_drained_end(src); |
4ef85a9c | 590 | bdrv_unref(mirror_top_bs); |
3f09bfbc | 591 | bdrv_unref(src); |
5a7e7a0b SH |
592 | } |
593 | ||
49efb1f5 DL |
594 | static void mirror_throttle(MirrorBlockJob *s) |
595 | { | |
596 | int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | |
597 | ||
18bb6928 | 598 | if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { |
49efb1f5 | 599 | s->last_pause_ns = now; |
5bf1d5a7 | 600 | block_job_sleep_ns(&s->common, 0); |
49efb1f5 DL |
601 | } else { |
602 | block_job_pause_point(&s->common); | |
603 | } | |
604 | } | |
605 | ||
c0b363ad DL |
606 | static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) |
607 | { | |
23ca459a | 608 | int64_t offset; |
c0b363ad | 609 | BlockDriverState *base = s->base; |
4ef85a9c | 610 | BlockDriverState *bs = s->source; |
c0b363ad | 611 | BlockDriverState *target_bs = blk_bs(s->target); |
23ca459a | 612 | int ret; |
51b0a488 | 613 | int64_t count; |
c0b363ad | 614 | |
b7d5062c | 615 | if (base == NULL && !bdrv_has_zero_init(target_bs)) { |
c7c2769c | 616 | if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { |
e0d7f73e | 617 | bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); |
c7c2769c DL |
618 | return 0; |
619 | } | |
620 | ||
90ab48eb | 621 | s->initial_zeroing_ongoing = true; |
23ca459a EB |
622 | for (offset = 0; offset < s->bdev_length; ) { |
623 | int bytes = MIN(s->bdev_length - offset, | |
624 | QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); | |
c7c2769c DL |
625 | |
626 | mirror_throttle(s); | |
627 | ||
628 | if (block_job_is_cancelled(&s->common)) { | |
90ab48eb | 629 | s->initial_zeroing_ongoing = false; |
c7c2769c DL |
630 | return 0; |
631 | } | |
632 | ||
633 | if (s->in_flight >= MAX_IN_FLIGHT) { | |
67adf4b3 EB |
634 | trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, |
635 | s->in_flight); | |
c7c2769c DL |
636 | mirror_wait_for_io(s); |
637 | continue; | |
638 | } | |
639 | ||
23ca459a EB |
640 | mirror_do_zero_or_discard(s, offset, bytes, false); |
641 | offset += bytes; | |
c7c2769c DL |
642 | } |
643 | ||
bae8196d | 644 | mirror_wait_for_all_io(s); |
90ab48eb | 645 | s->initial_zeroing_ongoing = false; |
b7d5062c DL |
646 | } |
647 | ||
c0b363ad | 648 | /* First part, loop on the sectors and initialize the dirty bitmap. */ |
23ca459a | 649 | for (offset = 0; offset < s->bdev_length; ) { |
c0b363ad | 650 | /* Just to make sure we are not exceeding int limit. */ |
23ca459a EB |
651 | int bytes = MIN(s->bdev_length - offset, |
652 | QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); | |
c0b363ad DL |
653 | |
654 | mirror_throttle(s); | |
655 | ||
656 | if (block_job_is_cancelled(&s->common)) { | |
657 | return 0; | |
658 | } | |
659 | ||
23ca459a | 660 | ret = bdrv_is_allocated_above(bs, base, offset, bytes, &count); |
c0b363ad DL |
661 | if (ret < 0) { |
662 | return ret; | |
663 | } | |
664 | ||
23ca459a | 665 | assert(count); |
b7d5062c | 666 | if (ret == 1) { |
23ca459a | 667 | bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); |
c0b363ad | 668 | } |
23ca459a | 669 | offset += count; |
c0b363ad DL |
670 | } |
671 | return 0; | |
672 | } | |
673 | ||
bdffb31d PB |
674 | /* Called when going out of the streaming phase to flush the bulk of the |
675 | * data to the medium, or just before completing. | |
676 | */ | |
677 | static int mirror_flush(MirrorBlockJob *s) | |
678 | { | |
679 | int ret = blk_flush(s->target); | |
680 | if (ret < 0) { | |
681 | if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { | |
682 | s->ret = ret; | |
683 | } | |
684 | } | |
685 | return ret; | |
686 | } | |
687 | ||
893f7eba PB |
688 | static void coroutine_fn mirror_run(void *opaque) |
689 | { | |
690 | MirrorBlockJob *s = opaque; | |
5a7e7a0b | 691 | MirrorExitData *data; |
4ef85a9c | 692 | BlockDriverState *bs = s->source; |
e253f4b8 | 693 | BlockDriverState *target_bs = blk_bs(s->target); |
9a0cec66 | 694 | bool need_drain = true; |
c0b363ad | 695 | int64_t length; |
b812f671 | 696 | BlockDriverInfo bdi; |
1d33936e JC |
697 | char backing_filename[2]; /* we only need 2 characters because we are only |
698 | checking for a NULL string */ | |
893f7eba | 699 | int ret = 0; |
893f7eba PB |
700 | |
701 | if (block_job_is_cancelled(&s->common)) { | |
702 | goto immediate_exit; | |
703 | } | |
704 | ||
b21c7652 HR |
705 | s->bdev_length = bdrv_getlength(bs); |
706 | if (s->bdev_length < 0) { | |
707 | ret = s->bdev_length; | |
373df5b1 | 708 | goto immediate_exit; |
becc347e KW |
709 | } |
710 | ||
711 | /* Active commit must resize the base image if its size differs from the | |
712 | * active layer. */ | |
713 | if (s->base == blk_bs(s->target)) { | |
714 | int64_t base_length; | |
715 | ||
716 | base_length = blk_getlength(s->target); | |
717 | if (base_length < 0) { | |
718 | ret = base_length; | |
719 | goto immediate_exit; | |
720 | } | |
721 | ||
722 | if (s->bdev_length > base_length) { | |
3a691c50 HR |
723 | ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF, |
724 | NULL); | |
becc347e KW |
725 | if (ret < 0) { |
726 | goto immediate_exit; | |
727 | } | |
728 | } | |
729 | } | |
730 | ||
731 | if (s->bdev_length == 0) { | |
9e48b025 FZ |
732 | /* Report BLOCK_JOB_READY and wait for complete. */ |
733 | block_job_event_ready(&s->common); | |
734 | s->synced = true; | |
735 | while (!block_job_is_cancelled(&s->common) && !s->should_complete) { | |
736 | block_job_yield(&s->common); | |
737 | } | |
738 | s->common.cancelled = false; | |
739 | goto immediate_exit; | |
893f7eba PB |
740 | } |
741 | ||
b21c7652 | 742 | length = DIV_ROUND_UP(s->bdev_length, s->granularity); |
402a4741 PB |
743 | s->in_flight_bitmap = bitmap_new(length); |
744 | ||
b812f671 PB |
745 | /* If we have no backing file yet in the destination, we cannot let |
746 | * the destination do COW. Instead, we copy sectors around the | |
747 | * dirty data if needed. We need a bitmap to do that. | |
748 | */ | |
e253f4b8 | 749 | bdrv_get_backing_filename(target_bs, backing_filename, |
b812f671 | 750 | sizeof(backing_filename)); |
e253f4b8 | 751 | if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { |
b436982f EB |
752 | s->target_cluster_size = bdi.cluster_size; |
753 | } else { | |
754 | s->target_cluster_size = BDRV_SECTOR_SIZE; | |
e5b43573 | 755 | } |
b436982f EB |
756 | if (backing_filename[0] && !target_bs->backing && |
757 | s->granularity < s->target_cluster_size) { | |
758 | s->buf_size = MAX(s->buf_size, s->target_cluster_size); | |
e5b43573 | 759 | s->cow_bitmap = bitmap_new(length); |
b812f671 | 760 | } |
e253f4b8 | 761 | s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); |
b812f671 | 762 | |
7504edf4 KW |
763 | s->buf = qemu_try_blockalign(bs, s->buf_size); |
764 | if (s->buf == NULL) { | |
765 | ret = -ENOMEM; | |
766 | goto immediate_exit; | |
767 | } | |
768 | ||
402a4741 | 769 | mirror_free_init(s); |
893f7eba | 770 | |
49efb1f5 | 771 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
03544a6e | 772 | if (!s->is_none_mode) { |
c0b363ad DL |
773 | ret = mirror_dirty_init(s); |
774 | if (ret < 0 || block_job_is_cancelled(&s->common)) { | |
775 | goto immediate_exit; | |
893f7eba PB |
776 | } |
777 | } | |
778 | ||
dc162c8e | 779 | assert(!s->dbi); |
715a74d8 | 780 | s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); |
893f7eba | 781 | for (;;) { |
cc8c9d6c | 782 | uint64_t delay_ns = 0; |
49efb1f5 | 783 | int64_t cnt, delta; |
893f7eba PB |
784 | bool should_complete; |
785 | ||
bd48bde8 PB |
786 | if (s->ret < 0) { |
787 | ret = s->ret; | |
788 | goto immediate_exit; | |
789 | } | |
790 | ||
565ac01f SH |
791 | block_job_pause_point(&s->common); |
792 | ||
20dca810 | 793 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
05df8a6a KW |
794 | /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is |
795 | * the number of bytes currently being processed; together those are | |
796 | * the current remaining operation length */ | |
797 | block_job_progress_set_remaining(&s->common, s->bytes_in_flight + cnt); | |
bd48bde8 PB |
798 | |
799 | /* Note that even when no rate limit is applied we need to yield | |
a7282330 | 800 | * periodically with no pending I/O so that bdrv_drain_all() returns. |
18bb6928 KW |
801 | * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is |
802 | * an error, or when the source is clean, whichever comes first. */ | |
49efb1f5 | 803 | delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; |
18bb6928 | 804 | if (delta < BLOCK_JOB_SLICE_TIME && |
bd48bde8 | 805 | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
cf56a3c6 | 806 | if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || |
402a4741 | 807 | (cnt == 0 && s->in_flight > 0)) { |
9a46dba7 | 808 | trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); |
21cd917f | 809 | mirror_wait_for_io(s); |
bd48bde8 PB |
810 | continue; |
811 | } else if (cnt != 0) { | |
cc8c9d6c | 812 | delay_ns = mirror_iteration(s); |
893f7eba | 813 | } |
893f7eba PB |
814 | } |
815 | ||
816 | should_complete = false; | |
bd48bde8 | 817 | if (s->in_flight == 0 && cnt == 0) { |
893f7eba | 818 | trace_mirror_before_flush(s); |
bdffb31d PB |
819 | if (!s->synced) { |
820 | if (mirror_flush(s) < 0) { | |
821 | /* Go check s->ret. */ | |
822 | continue; | |
b952b558 | 823 | } |
b952b558 PB |
824 | /* We're out of the streaming phase. From now on, if the job |
825 | * is cancelled we will actually complete all pending I/O and | |
826 | * report completion. This way, block-job-cancel will leave | |
827 | * the target in a consistent state. | |
828 | */ | |
bdffb31d PB |
829 | block_job_event_ready(&s->common); |
830 | s->synced = true; | |
d63ffd87 | 831 | } |
bdffb31d PB |
832 | |
833 | should_complete = s->should_complete || | |
834 | block_job_is_cancelled(&s->common); | |
835 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); | |
893f7eba PB |
836 | } |
837 | ||
838 | if (cnt == 0 && should_complete) { | |
839 | /* The dirty bitmap is not updated while operations are pending. | |
840 | * If we're about to exit, wait for pending operations before | |
841 | * calling bdrv_get_dirty_count(bs), or we may exit while the | |
842 | * source has dirty data to copy! | |
843 | * | |
844 | * Note that I/O can be submitted by the guest while | |
9a0cec66 PB |
845 | * mirror_populate runs, so pause it now. Before deciding |
846 | * whether to switch to target check one last time if I/O has | |
847 | * come in the meanwhile, and if not flush the data to disk. | |
893f7eba | 848 | */ |
9a46dba7 | 849 | trace_mirror_before_drain(s, cnt); |
9a0cec66 PB |
850 | |
851 | bdrv_drained_begin(bs); | |
20dca810 | 852 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
bdffb31d | 853 | if (cnt > 0 || mirror_flush(s) < 0) { |
9a0cec66 PB |
854 | bdrv_drained_end(bs); |
855 | continue; | |
856 | } | |
857 | ||
858 | /* The two disks are in sync. Exit and report successful | |
859 | * completion. | |
860 | */ | |
861 | assert(QLIST_EMPTY(&bs->tracked_requests)); | |
862 | s->common.cancelled = false; | |
863 | need_drain = false; | |
864 | break; | |
893f7eba PB |
865 | } |
866 | ||
867 | ret = 0; | |
ddc4115e SH |
868 | |
869 | if (s->synced && !should_complete) { | |
18bb6928 KW |
870 | delay_ns = (s->in_flight == 0 && |
871 | cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); | |
ddc4115e | 872 | } |
9a46dba7 | 873 | trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); |
ddc4115e | 874 | block_job_sleep_ns(&s->common, delay_ns); |
eb36639f HR |
875 | if (block_job_is_cancelled(&s->common) && |
876 | (!s->synced || s->common.force)) | |
877 | { | |
b76e4458 | 878 | break; |
893f7eba | 879 | } |
49efb1f5 | 880 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
893f7eba PB |
881 | } |
882 | ||
883 | immediate_exit: | |
bd48bde8 PB |
884 | if (s->in_flight > 0) { |
885 | /* We get here only if something went wrong. Either the job failed, | |
886 | * or it was cancelled prematurely so that we do not guarantee that | |
887 | * the target is a copy of the source. | |
888 | */ | |
b76e4458 LL |
889 | assert(ret < 0 || ((s->common.force || !s->synced) && |
890 | block_job_is_cancelled(&s->common))); | |
9a0cec66 | 891 | assert(need_drain); |
bae8196d | 892 | mirror_wait_for_all_io(s); |
bd48bde8 PB |
893 | } |
894 | ||
895 | assert(s->in_flight == 0); | |
7191bf31 | 896 | qemu_vfree(s->buf); |
b812f671 | 897 | g_free(s->cow_bitmap); |
402a4741 | 898 | g_free(s->in_flight_bitmap); |
dc162c8e | 899 | bdrv_dirty_iter_free(s->dbi); |
5a7e7a0b SH |
900 | |
901 | data = g_malloc(sizeof(*data)); | |
902 | data->ret = ret; | |
9a0cec66 PB |
903 | |
904 | if (need_drain) { | |
905 | bdrv_drained_begin(bs); | |
906 | } | |
5a7e7a0b | 907 | block_job_defer_to_main_loop(&s->common, mirror_exit, data); |
893f7eba PB |
908 | } |
909 | ||
d63ffd87 PB |
910 | static void mirror_complete(BlockJob *job, Error **errp) |
911 | { | |
912 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
4ef85a9c | 913 | BlockDriverState *target; |
274fccee | 914 | |
274fccee | 915 | target = blk_bs(s->target); |
d63ffd87 | 916 | |
d63ffd87 | 917 | if (!s->synced) { |
9df229c3 AG |
918 | error_setg(errp, "The active block job '%s' cannot be completed", |
919 | job->id); | |
d63ffd87 PB |
920 | return; |
921 | } | |
922 | ||
274fccee HR |
923 | if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { |
924 | int ret; | |
925 | ||
926 | assert(!target->backing); | |
927 | ret = bdrv_open_backing_file(target, NULL, "backing", errp); | |
928 | if (ret < 0) { | |
929 | return; | |
930 | } | |
931 | } | |
932 | ||
15d67298 | 933 | /* block all operations on to_replace bs */ |
09158f00 | 934 | if (s->replaces) { |
5a7e7a0b SH |
935 | AioContext *replace_aio_context; |
936 | ||
e12f3784 | 937 | s->to_replace = bdrv_find_node(s->replaces); |
09158f00 | 938 | if (!s->to_replace) { |
e12f3784 | 939 | error_setg(errp, "Node name '%s' not found", s->replaces); |
09158f00 BC |
940 | return; |
941 | } | |
942 | ||
5a7e7a0b SH |
943 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
944 | aio_context_acquire(replace_aio_context); | |
945 | ||
4ef85a9c KW |
946 | /* TODO Translate this into permission system. Current definition of |
947 | * GRAPH_MOD would require to request it for the parents; they might | |
948 | * not even be BlockDriverStates, however, so a BdrvChild can't address | |
949 | * them. May need redefinition of GRAPH_MOD. */ | |
09158f00 BC |
950 | error_setg(&s->replace_blocker, |
951 | "block device is in use by block-job-complete"); | |
952 | bdrv_op_block_all(s->to_replace, s->replace_blocker); | |
953 | bdrv_ref(s->to_replace); | |
5a7e7a0b SH |
954 | |
955 | aio_context_release(replace_aio_context); | |
09158f00 BC |
956 | } |
957 | ||
d63ffd87 | 958 | s->should_complete = true; |
751ebd76 | 959 | block_job_enter(&s->common); |
d63ffd87 PB |
960 | } |
961 | ||
bae8196d | 962 | static void mirror_pause(BlockJob *job) |
565ac01f SH |
963 | { |
964 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
965 | ||
bae8196d | 966 | mirror_wait_for_all_io(s); |
565ac01f SH |
967 | } |
968 | ||
969 | static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) | |
970 | { | |
971 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
972 | ||
973 | blk_set_aio_context(s->target, new_context); | |
974 | } | |
975 | ||
bae8196d PB |
976 | static void mirror_drain(BlockJob *job) |
977 | { | |
978 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
979 | ||
980 | /* Need to keep a reference in case blk_drain triggers execution | |
981 | * of mirror_complete... | |
982 | */ | |
983 | if (s->target) { | |
984 | BlockBackend *target = s->target; | |
985 | blk_ref(target); | |
986 | blk_drain(target); | |
987 | blk_unref(target); | |
988 | } | |
989 | } | |
990 | ||
3fc4b10a | 991 | static const BlockJobDriver mirror_job_driver = { |
565ac01f SH |
992 | .instance_size = sizeof(MirrorBlockJob), |
993 | .job_type = BLOCK_JOB_TYPE_MIRROR, | |
a7815a76 | 994 | .start = mirror_run, |
565ac01f SH |
995 | .complete = mirror_complete, |
996 | .pause = mirror_pause, | |
997 | .attached_aio_context = mirror_attached_aio_context, | |
bae8196d | 998 | .drain = mirror_drain, |
893f7eba PB |
999 | }; |
1000 | ||
03544a6e | 1001 | static const BlockJobDriver commit_active_job_driver = { |
565ac01f SH |
1002 | .instance_size = sizeof(MirrorBlockJob), |
1003 | .job_type = BLOCK_JOB_TYPE_COMMIT, | |
a7815a76 | 1004 | .start = mirror_run, |
565ac01f SH |
1005 | .complete = mirror_complete, |
1006 | .pause = mirror_pause, | |
1007 | .attached_aio_context = mirror_attached_aio_context, | |
bae8196d | 1008 | .drain = mirror_drain, |
03544a6e FZ |
1009 | }; |
1010 | ||
4ef85a9c KW |
1011 | static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, |
1012 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) | |
1013 | { | |
1014 | return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); | |
1015 | } | |
1016 | ||
1017 | static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, | |
1018 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) | |
1019 | { | |
1020 | return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); | |
1021 | } | |
1022 | ||
1023 | static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) | |
1024 | { | |
ce960aa9 VSO |
1025 | if (bs->backing == NULL) { |
1026 | /* we can be here after failed bdrv_append in mirror_start_job */ | |
1027 | return 0; | |
1028 | } | |
4ef85a9c KW |
1029 | return bdrv_co_flush(bs->backing->bs); |
1030 | } | |
1031 | ||
4ef85a9c | 1032 | static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, |
f5a5ca79 | 1033 | int64_t offset, int bytes, BdrvRequestFlags flags) |
4ef85a9c | 1034 | { |
f5a5ca79 | 1035 | return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); |
4ef85a9c KW |
1036 | } |
1037 | ||
1038 | static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, | |
f5a5ca79 | 1039 | int64_t offset, int bytes) |
4ef85a9c | 1040 | { |
f5a5ca79 | 1041 | return bdrv_co_pdiscard(bs->backing->bs, offset, bytes); |
4ef85a9c KW |
1042 | } |
1043 | ||
fd4a6493 KW |
1044 | static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts) |
1045 | { | |
18775ff3 VSO |
1046 | if (bs->backing == NULL) { |
1047 | /* we can be here after failed bdrv_attach_child in | |
1048 | * bdrv_set_backing_hd */ | |
1049 | return; | |
1050 | } | |
fd4a6493 KW |
1051 | bdrv_refresh_filename(bs->backing->bs); |
1052 | pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), | |
1053 | bs->backing->bs->filename); | |
1054 | } | |
1055 | ||
4ef85a9c KW |
1056 | static void bdrv_mirror_top_close(BlockDriverState *bs) |
1057 | { | |
1058 | } | |
1059 | ||
1060 | static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, | |
1061 | const BdrvChildRole *role, | |
e0995dc3 | 1062 | BlockReopenQueue *reopen_queue, |
4ef85a9c KW |
1063 | uint64_t perm, uint64_t shared, |
1064 | uint64_t *nperm, uint64_t *nshared) | |
1065 | { | |
1066 | /* Must be able to forward guest writes to the real image */ | |
1067 | *nperm = 0; | |
1068 | if (perm & BLK_PERM_WRITE) { | |
1069 | *nperm |= BLK_PERM_WRITE; | |
1070 | } | |
1071 | ||
1072 | *nshared = BLK_PERM_ALL; | |
1073 | } | |
1074 | ||
1075 | /* Dummy node that provides consistent read to its users without requiring it | |
1076 | * from its backing file and that allows writes on the backing file chain. */ | |
1077 | static BlockDriver bdrv_mirror_top = { | |
1078 | .format_name = "mirror_top", | |
1079 | .bdrv_co_preadv = bdrv_mirror_top_preadv, | |
1080 | .bdrv_co_pwritev = bdrv_mirror_top_pwritev, | |
1081 | .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, | |
1082 | .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, | |
1083 | .bdrv_co_flush = bdrv_mirror_top_flush, | |
3e4d0e72 | 1084 | .bdrv_co_block_status = bdrv_co_block_status_from_backing, |
fd4a6493 | 1085 | .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, |
4ef85a9c KW |
1086 | .bdrv_close = bdrv_mirror_top_close, |
1087 | .bdrv_child_perm = bdrv_mirror_top_child_perm, | |
1088 | }; | |
1089 | ||
71aa9867 | 1090 | static void mirror_start_job(const char *job_id, BlockDriverState *bs, |
47970dfb JS |
1091 | int creation_flags, BlockDriverState *target, |
1092 | const char *replaces, int64_t speed, | |
1093 | uint32_t granularity, int64_t buf_size, | |
274fccee | 1094 | BlockMirrorBackingMode backing_mode, |
09158f00 BC |
1095 | BlockdevOnError on_source_error, |
1096 | BlockdevOnError on_target_error, | |
0fc9f8ea | 1097 | bool unmap, |
097310b5 | 1098 | BlockCompletionFunc *cb, |
51ccfa2d | 1099 | void *opaque, |
09158f00 | 1100 | const BlockJobDriver *driver, |
b49f7ead | 1101 | bool is_none_mode, BlockDriverState *base, |
51ccfa2d | 1102 | bool auto_complete, const char *filter_node_name, |
045a2f82 | 1103 | bool is_mirror, |
51ccfa2d | 1104 | Error **errp) |
893f7eba PB |
1105 | { |
1106 | MirrorBlockJob *s; | |
4ef85a9c KW |
1107 | BlockDriverState *mirror_top_bs; |
1108 | bool target_graph_mod; | |
1109 | bool target_is_backing; | |
b2c2832c | 1110 | Error *local_err = NULL; |
d7086422 | 1111 | int ret; |
893f7eba | 1112 | |
eee13dfe | 1113 | if (granularity == 0) { |
341ebc2f | 1114 | granularity = bdrv_get_default_bitmap_granularity(target); |
eee13dfe PB |
1115 | } |
1116 | ||
31826642 | 1117 | assert(is_power_of_2(granularity)); |
eee13dfe | 1118 | |
48ac0a4d WC |
1119 | if (buf_size < 0) { |
1120 | error_setg(errp, "Invalid parameter 'buf-size'"); | |
1121 | return; | |
1122 | } | |
1123 | ||
1124 | if (buf_size == 0) { | |
1125 | buf_size = DEFAULT_MIRROR_BUF_SIZE; | |
1126 | } | |
5bc361b8 | 1127 | |
4ef85a9c KW |
1128 | /* In the case of active commit, add dummy driver to provide consistent |
1129 | * reads on the top, while disabling it in the intermediate nodes, and make | |
1130 | * the backing chain writable. */ | |
6cdbceb1 KW |
1131 | mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, |
1132 | BDRV_O_RDWR, errp); | |
4ef85a9c KW |
1133 | if (mirror_top_bs == NULL) { |
1134 | return; | |
1135 | } | |
d3c8c674 KW |
1136 | if (!filter_node_name) { |
1137 | mirror_top_bs->implicit = true; | |
1138 | } | |
4ef85a9c | 1139 | mirror_top_bs->total_sectors = bs->total_sectors; |
19dd29e8 | 1140 | bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs)); |
4ef85a9c KW |
1141 | |
1142 | /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep | |
7a25fcd0 | 1143 | * it alive until block_job_create() succeeds even if bs has no parent. */ |
4ef85a9c KW |
1144 | bdrv_ref(mirror_top_bs); |
1145 | bdrv_drained_begin(bs); | |
b2c2832c | 1146 | bdrv_append(mirror_top_bs, bs, &local_err); |
4ef85a9c KW |
1147 | bdrv_drained_end(bs); |
1148 | ||
b2c2832c KW |
1149 | if (local_err) { |
1150 | bdrv_unref(mirror_top_bs); | |
1151 | error_propagate(errp, local_err); | |
1152 | return; | |
1153 | } | |
1154 | ||
4ef85a9c | 1155 | /* Make sure that the source is not resized while the job is running */ |
75859b94 | 1156 | s = block_job_create(job_id, driver, NULL, mirror_top_bs, |
4ef85a9c KW |
1157 | BLK_PERM_CONSISTENT_READ, |
1158 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | | |
1159 | BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, | |
c6cc12bf | 1160 | creation_flags, cb, opaque, errp); |
893f7eba | 1161 | if (!s) { |
4ef85a9c | 1162 | goto fail; |
893f7eba | 1163 | } |
7a25fcd0 HR |
1164 | /* The block job now has a reference to this node */ |
1165 | bdrv_unref(mirror_top_bs); | |
1166 | ||
4ef85a9c KW |
1167 | s->source = bs; |
1168 | s->mirror_top_bs = mirror_top_bs; | |
1169 | ||
1170 | /* No resize for the target either; while the mirror is still running, a | |
1171 | * consistent read isn't necessarily possible. We could possibly allow | |
1172 | * writes and graph modifications, though it would likely defeat the | |
1173 | * purpose of a mirror, so leave them blocked for now. | |
1174 | * | |
1175 | * In the case of active commit, things look a bit different, though, | |
1176 | * because the target is an already populated backing file in active use. | |
1177 | * We can allow anything except resize there.*/ | |
1178 | target_is_backing = bdrv_chain_contains(bs, target); | |
1179 | target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); | |
1180 | s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE | | |
1181 | (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), | |
1182 | BLK_PERM_WRITE_UNCHANGED | | |
1183 | (target_is_backing ? BLK_PERM_CONSISTENT_READ | | |
1184 | BLK_PERM_WRITE | | |
1185 | BLK_PERM_GRAPH_MOD : 0)); | |
d7086422 KW |
1186 | ret = blk_insert_bs(s->target, target, errp); |
1187 | if (ret < 0) { | |
4ef85a9c | 1188 | goto fail; |
d7086422 | 1189 | } |
045a2f82 FZ |
1190 | if (is_mirror) { |
1191 | /* XXX: Mirror target could be a NBD server of target QEMU in the case | |
1192 | * of non-shared block migration. To allow migration completion, we | |
1193 | * have to allow "inactivate" of the target BB. When that happens, we | |
1194 | * know the job is drained, and the vcpus are stopped, so no write | |
1195 | * operation will be performed. Block layer already has assertions to | |
1196 | * ensure that. */ | |
1197 | blk_set_force_allow_inactivate(s->target); | |
1198 | } | |
e253f4b8 | 1199 | |
09158f00 | 1200 | s->replaces = g_strdup(replaces); |
b952b558 PB |
1201 | s->on_source_error = on_source_error; |
1202 | s->on_target_error = on_target_error; | |
03544a6e | 1203 | s->is_none_mode = is_none_mode; |
274fccee | 1204 | s->backing_mode = backing_mode; |
5bc361b8 | 1205 | s->base = base; |
eee13dfe | 1206 | s->granularity = granularity; |
48ac0a4d | 1207 | s->buf_size = ROUND_UP(buf_size, granularity); |
0fc9f8ea | 1208 | s->unmap = unmap; |
b49f7ead WC |
1209 | if (auto_complete) { |
1210 | s->should_complete = true; | |
1211 | } | |
b812f671 | 1212 | |
0db6e54a | 1213 | s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); |
b8afb520 | 1214 | if (!s->dirty_bitmap) { |
88f9d1b3 | 1215 | goto fail; |
b8afb520 | 1216 | } |
10f3cd15 | 1217 | |
4ef85a9c | 1218 | /* Required permissions are already taken with blk_new() */ |
76d554e2 KW |
1219 | block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, |
1220 | &error_abort); | |
1221 | ||
f3ede4b0 AG |
1222 | /* In commit_active_start() all intermediate nodes disappear, so |
1223 | * any jobs in them must be blocked */ | |
4ef85a9c | 1224 | if (target_is_backing) { |
f3ede4b0 AG |
1225 | BlockDriverState *iter; |
1226 | for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { | |
4ef85a9c KW |
1227 | /* XXX BLK_PERM_WRITE needs to be allowed so we don't block |
1228 | * ourselves at s->base (if writes are blocked for a node, they are | |
1229 | * also blocked for its backing file). The other options would be a | |
1230 | * second filter driver above s->base (== target). */ | |
1231 | ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, | |
1232 | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, | |
1233 | errp); | |
1234 | if (ret < 0) { | |
1235 | goto fail; | |
1236 | } | |
f3ede4b0 AG |
1237 | } |
1238 | } | |
10f3cd15 | 1239 | |
5ccac6f1 JS |
1240 | trace_mirror_start(bs, s, opaque); |
1241 | block_job_start(&s->common); | |
4ef85a9c KW |
1242 | return; |
1243 | ||
1244 | fail: | |
1245 | if (s) { | |
7a25fcd0 HR |
1246 | /* Make sure this BDS does not go away until we have completed the graph |
1247 | * changes below */ | |
1248 | bdrv_ref(mirror_top_bs); | |
1249 | ||
4ef85a9c KW |
1250 | g_free(s->replaces); |
1251 | blk_unref(s->target); | |
05b0d8e3 | 1252 | block_job_early_fail(&s->common); |
4ef85a9c KW |
1253 | } |
1254 | ||
c1cef672 FZ |
1255 | bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, |
1256 | &error_abort); | |
5fe31c25 | 1257 | bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); |
7a25fcd0 HR |
1258 | |
1259 | bdrv_unref(mirror_top_bs); | |
893f7eba | 1260 | } |
03544a6e | 1261 | |
71aa9867 AG |
1262 | void mirror_start(const char *job_id, BlockDriverState *bs, |
1263 | BlockDriverState *target, const char *replaces, | |
5fba6c0e | 1264 | int64_t speed, uint32_t granularity, int64_t buf_size, |
274fccee HR |
1265 | MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, |
1266 | BlockdevOnError on_source_error, | |
03544a6e | 1267 | BlockdevOnError on_target_error, |
6cdbceb1 | 1268 | bool unmap, const char *filter_node_name, Error **errp) |
03544a6e FZ |
1269 | { |
1270 | bool is_none_mode; | |
1271 | BlockDriverState *base; | |
1272 | ||
4b80ab2b JS |
1273 | if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { |
1274 | error_setg(errp, "Sync mode 'incremental' not supported"); | |
d58d8453 JS |
1275 | return; |
1276 | } | |
03544a6e | 1277 | is_none_mode = mode == MIRROR_SYNC_MODE_NONE; |
760e0063 | 1278 | base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; |
47970dfb | 1279 | mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces, |
274fccee | 1280 | speed, granularity, buf_size, backing_mode, |
51ccfa2d | 1281 | on_source_error, on_target_error, unmap, NULL, NULL, |
6cdbceb1 | 1282 | &mirror_job_driver, is_none_mode, base, false, |
045a2f82 | 1283 | filter_node_name, true, errp); |
03544a6e FZ |
1284 | } |
1285 | ||
fd62c609 | 1286 | void commit_active_start(const char *job_id, BlockDriverState *bs, |
47970dfb JS |
1287 | BlockDriverState *base, int creation_flags, |
1288 | int64_t speed, BlockdevOnError on_error, | |
0db832f4 | 1289 | const char *filter_node_name, |
78bbd910 FZ |
1290 | BlockCompletionFunc *cb, void *opaque, |
1291 | bool auto_complete, Error **errp) | |
03544a6e | 1292 | { |
4da83585 | 1293 | int orig_base_flags; |
cc67f4d1 | 1294 | Error *local_err = NULL; |
4da83585 JC |
1295 | |
1296 | orig_base_flags = bdrv_get_flags(base); | |
1297 | ||
20a63d2c FZ |
1298 | if (bdrv_reopen(base, bs->open_flags, errp)) { |
1299 | return; | |
1300 | } | |
4da83585 | 1301 | |
47970dfb | 1302 | mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0, |
71aa9867 | 1303 | MIRROR_LEAVE_BACKING_CHAIN, |
51ccfa2d | 1304 | on_error, on_error, true, cb, opaque, |
6cdbceb1 | 1305 | &commit_active_job_driver, false, base, auto_complete, |
045a2f82 | 1306 | filter_node_name, false, &local_err); |
0fb6395c | 1307 | if (local_err) { |
cc67f4d1 | 1308 | error_propagate(errp, local_err); |
4da83585 JC |
1309 | goto error_restore_flags; |
1310 | } | |
1311 | ||
1312 | return; | |
1313 | ||
1314 | error_restore_flags: | |
1315 | /* ignore error and errp for bdrv_reopen, because we want to propagate | |
1316 | * the original error */ | |
1317 | bdrv_reopen(base, orig_base_flags, NULL); | |
1318 | return; | |
03544a6e | 1319 | } |