]>
Commit | Line | Data |
---|---|---|
893f7eba PB |
1 | /* |
2 | * Image mirroring | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Paolo Bonzini <pbonzini@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
10 | * See the COPYING.LIB file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
80c71a24 | 14 | #include "qemu/osdep.h" |
893f7eba | 15 | #include "trace.h" |
737e150e PB |
16 | #include "block/blockjob.h" |
17 | #include "block/block_int.h" | |
373340b2 | 18 | #include "sysemu/block-backend.h" |
cc7a8ea7 | 19 | #include "qapi/qmp/qerror.h" |
893f7eba | 20 | #include "qemu/ratelimit.h" |
b812f671 | 21 | #include "qemu/bitmap.h" |
40365552 | 22 | #include "qemu/error-report.h" |
893f7eba | 23 | |
402a4741 PB |
24 | #define SLICE_TIME 100000000ULL /* ns */ |
25 | #define MAX_IN_FLIGHT 16 | |
48ac0a4d | 26 | #define DEFAULT_MIRROR_BUF_SIZE (10 << 20) |
402a4741 PB |
27 | |
28 | /* The mirroring buffer is a list of granularity-sized chunks. | |
29 | * Free chunks are organized in a list. | |
30 | */ | |
31 | typedef struct MirrorBuffer { | |
32 | QSIMPLEQ_ENTRY(MirrorBuffer) next; | |
33 | } MirrorBuffer; | |
893f7eba PB |
34 | |
35 | typedef struct MirrorBlockJob { | |
36 | BlockJob common; | |
37 | RateLimit limit; | |
38 | BlockDriverState *target; | |
5bc361b8 | 39 | BlockDriverState *base; |
09158f00 BC |
40 | /* The name of the graph node to replace */ |
41 | char *replaces; | |
42 | /* The BDS to replace */ | |
43 | BlockDriverState *to_replace; | |
44 | /* Used to block operations on the drive-mirror-replace target */ | |
45 | Error *replace_blocker; | |
03544a6e | 46 | bool is_none_mode; |
b952b558 | 47 | BlockdevOnError on_source_error, on_target_error; |
d63ffd87 PB |
48 | bool synced; |
49 | bool should_complete; | |
eee13dfe | 50 | int64_t granularity; |
b812f671 | 51 | size_t buf_size; |
b21c7652 | 52 | int64_t bdev_length; |
b812f671 | 53 | unsigned long *cow_bitmap; |
e4654d2d | 54 | BdrvDirtyBitmap *dirty_bitmap; |
8f0720ec | 55 | HBitmapIter hbi; |
893f7eba | 56 | uint8_t *buf; |
402a4741 PB |
57 | QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; |
58 | int buf_free_count; | |
bd48bde8 | 59 | |
402a4741 | 60 | unsigned long *in_flight_bitmap; |
bd48bde8 | 61 | int in_flight; |
b21c7652 | 62 | int sectors_in_flight; |
bd48bde8 | 63 | int ret; |
0fc9f8ea | 64 | bool unmap; |
e424aff5 | 65 | bool waiting_for_io; |
e5b43573 FZ |
66 | int target_cluster_sectors; |
67 | int max_iov; | |
893f7eba PB |
68 | } MirrorBlockJob; |
69 | ||
bd48bde8 PB |
70 | typedef struct MirrorOp { |
71 | MirrorBlockJob *s; | |
72 | QEMUIOVector qiov; | |
bd48bde8 PB |
73 | int64_t sector_num; |
74 | int nb_sectors; | |
75 | } MirrorOp; | |
76 | ||
b952b558 PB |
77 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, |
78 | int error) | |
79 | { | |
80 | s->synced = false; | |
81 | if (read) { | |
82 | return block_job_error_action(&s->common, s->common.bs, | |
83 | s->on_source_error, true, error); | |
84 | } else { | |
85 | return block_job_error_action(&s->common, s->target, | |
86 | s->on_target_error, false, error); | |
87 | } | |
88 | } | |
89 | ||
bd48bde8 PB |
90 | static void mirror_iteration_done(MirrorOp *op, int ret) |
91 | { | |
92 | MirrorBlockJob *s = op->s; | |
402a4741 | 93 | struct iovec *iov; |
bd48bde8 | 94 | int64_t chunk_num; |
402a4741 | 95 | int i, nb_chunks, sectors_per_chunk; |
bd48bde8 PB |
96 | |
97 | trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); | |
98 | ||
99 | s->in_flight--; | |
b21c7652 | 100 | s->sectors_in_flight -= op->nb_sectors; |
402a4741 PB |
101 | iov = op->qiov.iov; |
102 | for (i = 0; i < op->qiov.niov; i++) { | |
103 | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; | |
104 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); | |
105 | s->buf_free_count++; | |
106 | } | |
107 | ||
bd48bde8 PB |
108 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
109 | chunk_num = op->sector_num / sectors_per_chunk; | |
110 | nb_chunks = op->nb_sectors / sectors_per_chunk; | |
402a4741 | 111 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); |
b21c7652 HR |
112 | if (ret >= 0) { |
113 | if (s->cow_bitmap) { | |
114 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); | |
115 | } | |
116 | s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; | |
bd48bde8 PB |
117 | } |
118 | ||
6df3bf8e | 119 | qemu_iovec_destroy(&op->qiov); |
c84b3192 | 120 | g_free(op); |
7b770c72 | 121 | |
e424aff5 | 122 | if (s->waiting_for_io) { |
7b770c72 SH |
123 | qemu_coroutine_enter(s->common.co, NULL); |
124 | } | |
bd48bde8 PB |
125 | } |
126 | ||
127 | static void mirror_write_complete(void *opaque, int ret) | |
128 | { | |
129 | MirrorOp *op = opaque; | |
130 | MirrorBlockJob *s = op->s; | |
131 | if (ret < 0) { | |
bd48bde8 PB |
132 | BlockErrorAction action; |
133 | ||
20dca810 | 134 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); |
bd48bde8 | 135 | action = mirror_error_action(s, false, -ret); |
a589569f | 136 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
bd48bde8 PB |
137 | s->ret = ret; |
138 | } | |
139 | } | |
140 | mirror_iteration_done(op, ret); | |
141 | } | |
142 | ||
143 | static void mirror_read_complete(void *opaque, int ret) | |
144 | { | |
145 | MirrorOp *op = opaque; | |
146 | MirrorBlockJob *s = op->s; | |
147 | if (ret < 0) { | |
bd48bde8 PB |
148 | BlockErrorAction action; |
149 | ||
20dca810 | 150 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); |
bd48bde8 | 151 | action = mirror_error_action(s, true, -ret); |
a589569f | 152 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
bd48bde8 PB |
153 | s->ret = ret; |
154 | } | |
155 | ||
156 | mirror_iteration_done(op, ret); | |
157 | return; | |
158 | } | |
159 | bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, | |
160 | mirror_write_complete, op); | |
161 | } | |
162 | ||
e5b43573 FZ |
163 | /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and |
164 | * return the offset of the adjusted tail sector against original. */ | |
165 | static int mirror_cow_align(MirrorBlockJob *s, | |
166 | int64_t *sector_num, | |
167 | int *nb_sectors) | |
893f7eba | 168 | { |
e5b43573 FZ |
169 | bool need_cow; |
170 | int ret = 0; | |
171 | int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS; | |
172 | int64_t align_sector_num = *sector_num; | |
173 | int align_nb_sectors = *nb_sectors; | |
174 | int max_sectors = chunk_sectors * s->max_iov; | |
175 | ||
176 | need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap); | |
177 | need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, | |
178 | s->cow_bitmap); | |
179 | if (need_cow) { | |
180 | bdrv_round_to_clusters(s->target, *sector_num, *nb_sectors, | |
181 | &align_sector_num, &align_nb_sectors); | |
182 | } | |
3515727f | 183 | |
e5b43573 FZ |
184 | if (align_nb_sectors > max_sectors) { |
185 | align_nb_sectors = max_sectors; | |
186 | if (need_cow) { | |
187 | align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors, | |
188 | s->target_cluster_sectors); | |
189 | } | |
8f0720ec PB |
190 | } |
191 | ||
e5b43573 FZ |
192 | ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors); |
193 | *sector_num = align_sector_num; | |
194 | *nb_sectors = align_nb_sectors; | |
195 | assert(ret >= 0); | |
196 | return ret; | |
197 | } | |
198 | ||
199 | /* Submit async read while handling COW. | |
200 | * Returns: nb_sectors if no alignment is necessary, or | |
201 | * (new_end - sector_num) if tail is rounded up or down due to | |
202 | * alignment or buffer limit. | |
203 | */ | |
204 | static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, | |
205 | int nb_sectors) | |
206 | { | |
207 | BlockDriverState *source = s->common.bs; | |
208 | int sectors_per_chunk, nb_chunks; | |
209 | int ret = nb_sectors; | |
210 | MirrorOp *op; | |
211 | ||
884fea4e | 212 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
402a4741 | 213 | |
e5b43573 FZ |
214 | /* We can only handle as much as buf_size at a time. */ |
215 | nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); | |
216 | assert(nb_sectors); | |
402a4741 | 217 | |
e5b43573 FZ |
218 | if (s->cow_bitmap) { |
219 | ret += mirror_cow_align(s, §or_num, &nb_sectors); | |
220 | } | |
221 | assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); | |
222 | /* The sector range must meet granularity because: | |
223 | * 1) Caller passes in aligned values; | |
224 | * 2) mirror_cow_align is used only when target cluster is larger. */ | |
225 | assert(!(nb_sectors % sectors_per_chunk)); | |
226 | assert(!(sector_num % sectors_per_chunk)); | |
227 | nb_chunks = nb_sectors / sectors_per_chunk; | |
228 | ||
229 | while (s->buf_free_count < nb_chunks) { | |
402a4741 | 230 | trace_mirror_yield_in_flight(s, sector_num, s->in_flight); |
e424aff5 | 231 | s->waiting_for_io = true; |
402a4741 | 232 | qemu_coroutine_yield(); |
e424aff5 | 233 | s->waiting_for_io = false; |
b812f671 PB |
234 | } |
235 | ||
bd48bde8 | 236 | /* Allocate a MirrorOp that is used as an AIO callback. */ |
c84b3192 | 237 | op = g_new(MirrorOp, 1); |
bd48bde8 | 238 | op->s = s; |
bd48bde8 PB |
239 | op->sector_num = sector_num; |
240 | op->nb_sectors = nb_sectors; | |
402a4741 PB |
241 | |
242 | /* Now make a QEMUIOVector taking enough granularity-sized chunks | |
243 | * from s->buf_free. | |
244 | */ | |
245 | qemu_iovec_init(&op->qiov, nb_chunks); | |
402a4741 PB |
246 | while (nb_chunks-- > 0) { |
247 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); | |
e5b43573 | 248 | size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; |
5a0f6fd5 | 249 | |
402a4741 PB |
250 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); |
251 | s->buf_free_count--; | |
5a0f6fd5 | 252 | qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); |
402a4741 | 253 | } |
bd48bde8 | 254 | |
893f7eba | 255 | /* Copy the dirty cluster. */ |
bd48bde8 | 256 | s->in_flight++; |
b21c7652 | 257 | s->sectors_in_flight += nb_sectors; |
b812f671 | 258 | trace_mirror_one_iteration(s, sector_num, nb_sectors); |
dcfb3beb | 259 | |
e5b43573 FZ |
260 | bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, |
261 | mirror_read_complete, op); | |
262 | return ret; | |
263 | } | |
264 | ||
265 | static void mirror_do_zero_or_discard(MirrorBlockJob *s, | |
266 | int64_t sector_num, | |
267 | int nb_sectors, | |
268 | bool is_discard) | |
269 | { | |
270 | MirrorOp *op; | |
271 | ||
272 | /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed | |
273 | * so the freeing in mirror_iteration_done is nop. */ | |
274 | op = g_new0(MirrorOp, 1); | |
275 | op->s = s; | |
276 | op->sector_num = sector_num; | |
277 | op->nb_sectors = nb_sectors; | |
278 | ||
279 | s->in_flight++; | |
280 | s->sectors_in_flight += nb_sectors; | |
281 | if (is_discard) { | |
282 | bdrv_aio_discard(s->target, sector_num, op->nb_sectors, | |
283 | mirror_write_complete, op); | |
284 | } else { | |
dcfb3beb FZ |
285 | bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors, |
286 | s->unmap ? BDRV_REQ_MAY_UNMAP : 0, | |
287 | mirror_write_complete, op); | |
e5b43573 FZ |
288 | } |
289 | } | |
290 | ||
291 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | |
292 | { | |
293 | BlockDriverState *source = s->common.bs; | |
294 | int64_t sector_num; | |
295 | uint64_t delay_ns = 0; | |
296 | /* At least the first dirty chunk is mirrored in one iteration. */ | |
297 | int nb_chunks = 1; | |
298 | int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; | |
299 | int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; | |
300 | ||
301 | sector_num = hbitmap_iter_next(&s->hbi); | |
302 | if (sector_num < 0) { | |
303 | bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); | |
304 | sector_num = hbitmap_iter_next(&s->hbi); | |
305 | trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); | |
306 | assert(sector_num >= 0); | |
307 | } | |
308 | ||
309 | /* Find the number of consective dirty chunks following the first dirty | |
310 | * one, and wait for in flight requests in them. */ | |
311 | while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) { | |
312 | int64_t hbitmap_next; | |
313 | int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk; | |
314 | int64_t next_chunk = next_sector / sectors_per_chunk; | |
315 | if (next_sector >= end || | |
316 | !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { | |
317 | break; | |
318 | } | |
319 | if (test_bit(next_chunk, s->in_flight_bitmap)) { | |
320 | if (nb_chunks > 0) { | |
321 | break; | |
322 | } | |
323 | trace_mirror_yield_in_flight(s, next_sector, s->in_flight); | |
324 | s->waiting_for_io = true; | |
325 | qemu_coroutine_yield(); | |
326 | s->waiting_for_io = false; | |
327 | /* Now retry. */ | |
328 | } else { | |
329 | hbitmap_next = hbitmap_iter_next(&s->hbi); | |
330 | assert(hbitmap_next == next_sector); | |
331 | nb_chunks++; | |
332 | } | |
333 | } | |
334 | ||
335 | /* Clear dirty bits before querying the block status, because | |
336 | * calling bdrv_get_block_status_above could yield - if some blocks are | |
337 | * marked dirty in this window, we need to know. | |
338 | */ | |
339 | bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, | |
340 | nb_chunks * sectors_per_chunk); | |
341 | bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); | |
342 | while (nb_chunks > 0 && sector_num < end) { | |
343 | int ret; | |
344 | int io_sectors; | |
345 | BlockDriverState *file; | |
346 | enum MirrorMethod { | |
347 | MIRROR_METHOD_COPY, | |
348 | MIRROR_METHOD_ZERO, | |
349 | MIRROR_METHOD_DISCARD | |
350 | } mirror_method = MIRROR_METHOD_COPY; | |
351 | ||
352 | assert(!(sector_num % sectors_per_chunk)); | |
353 | ret = bdrv_get_block_status_above(source, NULL, sector_num, | |
354 | nb_chunks * sectors_per_chunk, | |
355 | &io_sectors, &file); | |
356 | if (ret < 0) { | |
357 | io_sectors = nb_chunks * sectors_per_chunk; | |
358 | } | |
359 | ||
360 | io_sectors -= io_sectors % sectors_per_chunk; | |
361 | if (io_sectors < sectors_per_chunk) { | |
362 | io_sectors = sectors_per_chunk; | |
363 | } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { | |
364 | int64_t target_sector_num; | |
365 | int target_nb_sectors; | |
366 | bdrv_round_to_clusters(s->target, sector_num, io_sectors, | |
367 | &target_sector_num, &target_nb_sectors); | |
368 | if (target_sector_num == sector_num && | |
369 | target_nb_sectors == io_sectors) { | |
370 | mirror_method = ret & BDRV_BLOCK_ZERO ? | |
371 | MIRROR_METHOD_ZERO : | |
372 | MIRROR_METHOD_DISCARD; | |
373 | } | |
374 | } | |
375 | ||
376 | switch (mirror_method) { | |
377 | case MIRROR_METHOD_COPY: | |
378 | io_sectors = mirror_do_read(s, sector_num, io_sectors); | |
379 | break; | |
380 | case MIRROR_METHOD_ZERO: | |
381 | mirror_do_zero_or_discard(s, sector_num, io_sectors, false); | |
382 | break; | |
383 | case MIRROR_METHOD_DISCARD: | |
384 | mirror_do_zero_or_discard(s, sector_num, io_sectors, true); | |
385 | break; | |
386 | default: | |
387 | abort(); | |
388 | } | |
389 | assert(io_sectors); | |
390 | sector_num += io_sectors; | |
391 | nb_chunks -= io_sectors / sectors_per_chunk; | |
392 | delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors); | |
dcfb3beb | 393 | } |
cc8c9d6c | 394 | return delay_ns; |
bd48bde8 | 395 | } |
b952b558 | 396 | |
402a4741 PB |
397 | static void mirror_free_init(MirrorBlockJob *s) |
398 | { | |
399 | int granularity = s->granularity; | |
400 | size_t buf_size = s->buf_size; | |
401 | uint8_t *buf = s->buf; | |
402 | ||
403 | assert(s->buf_free_count == 0); | |
404 | QSIMPLEQ_INIT(&s->buf_free); | |
405 | while (buf_size != 0) { | |
406 | MirrorBuffer *cur = (MirrorBuffer *)buf; | |
407 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); | |
408 | s->buf_free_count++; | |
409 | buf_size -= granularity; | |
410 | buf += granularity; | |
411 | } | |
412 | } | |
413 | ||
bd48bde8 PB |
414 | static void mirror_drain(MirrorBlockJob *s) |
415 | { | |
416 | while (s->in_flight > 0) { | |
e424aff5 | 417 | s->waiting_for_io = true; |
bd48bde8 | 418 | qemu_coroutine_yield(); |
e424aff5 | 419 | s->waiting_for_io = false; |
bd48bde8 | 420 | } |
893f7eba PB |
421 | } |
422 | ||
5a7e7a0b SH |
423 | typedef struct { |
424 | int ret; | |
425 | } MirrorExitData; | |
426 | ||
427 | static void mirror_exit(BlockJob *job, void *opaque) | |
428 | { | |
429 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
430 | MirrorExitData *data = opaque; | |
431 | AioContext *replace_aio_context = NULL; | |
3f09bfbc KW |
432 | BlockDriverState *src = s->common.bs; |
433 | ||
434 | /* Make sure that the source BDS doesn't go away before we called | |
435 | * block_job_completed(). */ | |
436 | bdrv_ref(src); | |
5a7e7a0b SH |
437 | |
438 | if (s->to_replace) { | |
439 | replace_aio_context = bdrv_get_aio_context(s->to_replace); | |
440 | aio_context_acquire(replace_aio_context); | |
441 | } | |
442 | ||
443 | if (s->should_complete && data->ret == 0) { | |
444 | BlockDriverState *to_replace = s->common.bs; | |
445 | if (s->to_replace) { | |
446 | to_replace = s->to_replace; | |
447 | } | |
40365552 KW |
448 | |
449 | /* This was checked in mirror_start_job(), but meanwhile one of the | |
450 | * nodes could have been newly attached to a BlockBackend. */ | |
451 | if (to_replace->blk && s->target->blk) { | |
452 | error_report("block job: Can't create node with two BlockBackends"); | |
453 | data->ret = -EINVAL; | |
454 | goto out; | |
455 | } | |
456 | ||
5a7e7a0b SH |
457 | if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) { |
458 | bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL); | |
459 | } | |
3f09bfbc | 460 | bdrv_replace_in_backing_chain(to_replace, s->target); |
5a7e7a0b | 461 | } |
40365552 KW |
462 | |
463 | out: | |
5a7e7a0b SH |
464 | if (s->to_replace) { |
465 | bdrv_op_unblock_all(s->to_replace, s->replace_blocker); | |
466 | error_free(s->replace_blocker); | |
467 | bdrv_unref(s->to_replace); | |
468 | } | |
469 | if (replace_aio_context) { | |
470 | aio_context_release(replace_aio_context); | |
471 | } | |
472 | g_free(s->replaces); | |
10f3cd15 | 473 | bdrv_op_unblock_all(s->target, s->common.blocker); |
5a7e7a0b SH |
474 | bdrv_unref(s->target); |
475 | block_job_completed(&s->common, data->ret); | |
476 | g_free(data); | |
176c3699 | 477 | bdrv_drained_end(src); |
3f09bfbc | 478 | bdrv_unref(src); |
5a7e7a0b SH |
479 | } |
480 | ||
893f7eba PB |
481 | static void coroutine_fn mirror_run(void *opaque) |
482 | { | |
483 | MirrorBlockJob *s = opaque; | |
5a7e7a0b | 484 | MirrorExitData *data; |
893f7eba | 485 | BlockDriverState *bs = s->common.bs; |
99900697 | 486 | int64_t sector_num, end, length; |
bd48bde8 | 487 | uint64_t last_pause_ns; |
b812f671 | 488 | BlockDriverInfo bdi; |
1d33936e JC |
489 | char backing_filename[2]; /* we only need 2 characters because we are only |
490 | checking for a NULL string */ | |
893f7eba PB |
491 | int ret = 0; |
492 | int n; | |
e5b43573 | 493 | int target_cluster_size = BDRV_SECTOR_SIZE; |
893f7eba PB |
494 | |
495 | if (block_job_is_cancelled(&s->common)) { | |
496 | goto immediate_exit; | |
497 | } | |
498 | ||
b21c7652 HR |
499 | s->bdev_length = bdrv_getlength(bs); |
500 | if (s->bdev_length < 0) { | |
501 | ret = s->bdev_length; | |
373df5b1 | 502 | goto immediate_exit; |
b21c7652 | 503 | } else if (s->bdev_length == 0) { |
9e48b025 FZ |
504 | /* Report BLOCK_JOB_READY and wait for complete. */ |
505 | block_job_event_ready(&s->common); | |
506 | s->synced = true; | |
507 | while (!block_job_is_cancelled(&s->common) && !s->should_complete) { | |
508 | block_job_yield(&s->common); | |
509 | } | |
510 | s->common.cancelled = false; | |
511 | goto immediate_exit; | |
893f7eba PB |
512 | } |
513 | ||
b21c7652 | 514 | length = DIV_ROUND_UP(s->bdev_length, s->granularity); |
402a4741 PB |
515 | s->in_flight_bitmap = bitmap_new(length); |
516 | ||
b812f671 PB |
517 | /* If we have no backing file yet in the destination, we cannot let |
518 | * the destination do COW. Instead, we copy sectors around the | |
519 | * dirty data if needed. We need a bitmap to do that. | |
520 | */ | |
521 | bdrv_get_backing_filename(s->target, backing_filename, | |
522 | sizeof(backing_filename)); | |
e5b43573 FZ |
523 | if (!bdrv_get_info(s->target, &bdi) && bdi.cluster_size) { |
524 | target_cluster_size = bdi.cluster_size; | |
525 | } | |
526 | if (backing_filename[0] && !s->target->backing | |
527 | && s->granularity < target_cluster_size) { | |
528 | s->buf_size = MAX(s->buf_size, target_cluster_size); | |
529 | s->cow_bitmap = bitmap_new(length); | |
b812f671 | 530 | } |
e5b43573 FZ |
531 | s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; |
532 | s->max_iov = MIN(s->common.bs->bl.max_iov, s->target->bl.max_iov); | |
b812f671 | 533 | |
b21c7652 | 534 | end = s->bdev_length / BDRV_SECTOR_SIZE; |
7504edf4 KW |
535 | s->buf = qemu_try_blockalign(bs, s->buf_size); |
536 | if (s->buf == NULL) { | |
537 | ret = -ENOMEM; | |
538 | goto immediate_exit; | |
539 | } | |
540 | ||
402a4741 | 541 | mirror_free_init(s); |
893f7eba | 542 | |
4c0cbd6f | 543 | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
03544a6e | 544 | if (!s->is_none_mode) { |
893f7eba | 545 | /* First part, loop on the sectors and initialize the dirty bitmap. */ |
5bc361b8 | 546 | BlockDriverState *base = s->base; |
5279efeb JC |
547 | bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(s->target); |
548 | ||
893f7eba | 549 | for (sector_num = 0; sector_num < end; ) { |
99900697 FZ |
550 | /* Just to make sure we are not exceeding int limit. */ |
551 | int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, | |
552 | end - sector_num); | |
4c0cbd6f FZ |
553 | int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
554 | ||
555 | if (now - last_pause_ns > SLICE_TIME) { | |
556 | last_pause_ns = now; | |
557 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); | |
558 | } | |
559 | ||
560 | if (block_job_is_cancelled(&s->common)) { | |
561 | goto immediate_exit; | |
562 | } | |
563 | ||
99900697 | 564 | ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); |
893f7eba PB |
565 | |
566 | if (ret < 0) { | |
567 | goto immediate_exit; | |
568 | } | |
569 | ||
570 | assert(n > 0); | |
5279efeb | 571 | if (ret == 1 || mark_all_dirty) { |
20dca810 | 572 | bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); |
893f7eba | 573 | } |
99900697 | 574 | sector_num += n; |
893f7eba PB |
575 | } |
576 | } | |
577 | ||
20dca810 | 578 | bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); |
893f7eba | 579 | for (;;) { |
cc8c9d6c | 580 | uint64_t delay_ns = 0; |
893f7eba PB |
581 | int64_t cnt; |
582 | bool should_complete; | |
583 | ||
bd48bde8 PB |
584 | if (s->ret < 0) { |
585 | ret = s->ret; | |
586 | goto immediate_exit; | |
587 | } | |
588 | ||
20dca810 | 589 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
b21c7652 HR |
590 | /* s->common.offset contains the number of bytes already processed so |
591 | * far, cnt is the number of dirty sectors remaining and | |
592 | * s->sectors_in_flight is the number of sectors currently being | |
593 | * processed; together those are the current total operation length */ | |
594 | s->common.len = s->common.offset + | |
595 | (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; | |
bd48bde8 PB |
596 | |
597 | /* Note that even when no rate limit is applied we need to yield | |
a7282330 | 598 | * periodically with no pending I/O so that bdrv_drain_all() returns. |
bd48bde8 PB |
599 | * We do so every SLICE_TIME nanoseconds, or when there is an error, |
600 | * or when the source is clean, whichever comes first. | |
601 | */ | |
bc72ad67 | 602 | if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && |
bd48bde8 | 603 | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
402a4741 PB |
604 | if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || |
605 | (cnt == 0 && s->in_flight > 0)) { | |
606 | trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); | |
e424aff5 | 607 | s->waiting_for_io = true; |
bd48bde8 | 608 | qemu_coroutine_yield(); |
e424aff5 | 609 | s->waiting_for_io = false; |
bd48bde8 PB |
610 | continue; |
611 | } else if (cnt != 0) { | |
cc8c9d6c | 612 | delay_ns = mirror_iteration(s); |
893f7eba | 613 | } |
893f7eba PB |
614 | } |
615 | ||
616 | should_complete = false; | |
bd48bde8 | 617 | if (s->in_flight == 0 && cnt == 0) { |
893f7eba PB |
618 | trace_mirror_before_flush(s); |
619 | ret = bdrv_flush(s->target); | |
620 | if (ret < 0) { | |
a589569f WX |
621 | if (mirror_error_action(s, false, -ret) == |
622 | BLOCK_ERROR_ACTION_REPORT) { | |
b952b558 PB |
623 | goto immediate_exit; |
624 | } | |
625 | } else { | |
626 | /* We're out of the streaming phase. From now on, if the job | |
627 | * is cancelled we will actually complete all pending I/O and | |
628 | * report completion. This way, block-job-cancel will leave | |
629 | * the target in a consistent state. | |
630 | */ | |
b952b558 | 631 | if (!s->synced) { |
bcada37b | 632 | block_job_event_ready(&s->common); |
b952b558 PB |
633 | s->synced = true; |
634 | } | |
635 | ||
636 | should_complete = s->should_complete || | |
637 | block_job_is_cancelled(&s->common); | |
20dca810 | 638 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
d63ffd87 | 639 | } |
893f7eba PB |
640 | } |
641 | ||
642 | if (cnt == 0 && should_complete) { | |
643 | /* The dirty bitmap is not updated while operations are pending. | |
644 | * If we're about to exit, wait for pending operations before | |
645 | * calling bdrv_get_dirty_count(bs), or we may exit while the | |
646 | * source has dirty data to copy! | |
647 | * | |
648 | * Note that I/O can be submitted by the guest while | |
649 | * mirror_populate runs. | |
650 | */ | |
651 | trace_mirror_before_drain(s, cnt); | |
5a7e7a0b | 652 | bdrv_drain(bs); |
20dca810 | 653 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
893f7eba PB |
654 | } |
655 | ||
656 | ret = 0; | |
cc8c9d6c | 657 | trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); |
d63ffd87 | 658 | if (!s->synced) { |
7483d1e5 | 659 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
893f7eba PB |
660 | if (block_job_is_cancelled(&s->common)) { |
661 | break; | |
662 | } | |
663 | } else if (!should_complete) { | |
bd48bde8 | 664 | delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); |
7483d1e5 | 665 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
893f7eba PB |
666 | } else if (cnt == 0) { |
667 | /* The two disks are in sync. Exit and report successful | |
668 | * completion. | |
669 | */ | |
670 | assert(QLIST_EMPTY(&bs->tracked_requests)); | |
671 | s->common.cancelled = false; | |
672 | break; | |
673 | } | |
bc72ad67 | 674 | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
893f7eba PB |
675 | } |
676 | ||
677 | immediate_exit: | |
bd48bde8 PB |
678 | if (s->in_flight > 0) { |
679 | /* We get here only if something went wrong. Either the job failed, | |
680 | * or it was cancelled prematurely so that we do not guarantee that | |
681 | * the target is a copy of the source. | |
682 | */ | |
683 | assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); | |
684 | mirror_drain(s); | |
685 | } | |
686 | ||
687 | assert(s->in_flight == 0); | |
7191bf31 | 688 | qemu_vfree(s->buf); |
b812f671 | 689 | g_free(s->cow_bitmap); |
402a4741 | 690 | g_free(s->in_flight_bitmap); |
e4654d2d | 691 | bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); |
373340b2 HR |
692 | if (s->target->blk) { |
693 | blk_iostatus_disable(s->target->blk); | |
694 | } | |
5a7e7a0b SH |
695 | |
696 | data = g_malloc(sizeof(*data)); | |
697 | data->ret = ret; | |
176c3699 FZ |
698 | /* Before we switch to target in mirror_exit, make sure data doesn't |
699 | * change. */ | |
700 | bdrv_drained_begin(s->common.bs); | |
5a7e7a0b | 701 | block_job_defer_to_main_loop(&s->common, mirror_exit, data); |
893f7eba PB |
702 | } |
703 | ||
704 | static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) | |
705 | { | |
706 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
707 | ||
708 | if (speed < 0) { | |
c6bd8c70 | 709 | error_setg(errp, QERR_INVALID_PARAMETER, "speed"); |
893f7eba PB |
710 | return; |
711 | } | |
712 | ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); | |
713 | } | |
714 | ||
b952b558 PB |
715 | static void mirror_iostatus_reset(BlockJob *job) |
716 | { | |
717 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
718 | ||
373340b2 HR |
719 | if (s->target->blk) { |
720 | blk_iostatus_reset(s->target->blk); | |
721 | } | |
b952b558 PB |
722 | } |
723 | ||
d63ffd87 PB |
724 | static void mirror_complete(BlockJob *job, Error **errp) |
725 | { | |
726 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | |
34b5d2c6 | 727 | Error *local_err = NULL; |
d63ffd87 PB |
728 | int ret; |
729 | ||
d9b7b057 | 730 | ret = bdrv_open_backing_file(s->target, NULL, "backing", &local_err); |
d63ffd87 | 731 | if (ret < 0) { |
34b5d2c6 | 732 | error_propagate(errp, local_err); |
d63ffd87 PB |
733 | return; |
734 | } | |
735 | if (!s->synced) { | |
8ccb9569 | 736 | error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id); |
d63ffd87 PB |
737 | return; |
738 | } | |
739 | ||
09158f00 BC |
740 | /* check the target bs is not blocked and block all operations on it */ |
741 | if (s->replaces) { | |
5a7e7a0b SH |
742 | AioContext *replace_aio_context; |
743 | ||
e12f3784 | 744 | s->to_replace = bdrv_find_node(s->replaces); |
09158f00 | 745 | if (!s->to_replace) { |
e12f3784 | 746 | error_setg(errp, "Node name '%s' not found", s->replaces); |
09158f00 BC |
747 | return; |
748 | } | |
749 | ||
5a7e7a0b SH |
750 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
751 | aio_context_acquire(replace_aio_context); | |
752 | ||
09158f00 BC |
753 | error_setg(&s->replace_blocker, |
754 | "block device is in use by block-job-complete"); | |
755 | bdrv_op_block_all(s->to_replace, s->replace_blocker); | |
756 | bdrv_ref(s->to_replace); | |
5a7e7a0b SH |
757 | |
758 | aio_context_release(replace_aio_context); | |
09158f00 BC |
759 | } |
760 | ||
d63ffd87 | 761 | s->should_complete = true; |
751ebd76 | 762 | block_job_enter(&s->common); |
d63ffd87 PB |
763 | } |
764 | ||
3fc4b10a | 765 | static const BlockJobDriver mirror_job_driver = { |
893f7eba | 766 | .instance_size = sizeof(MirrorBlockJob), |
79e14bf7 | 767 | .job_type = BLOCK_JOB_TYPE_MIRROR, |
893f7eba | 768 | .set_speed = mirror_set_speed, |
b952b558 | 769 | .iostatus_reset= mirror_iostatus_reset, |
d63ffd87 | 770 | .complete = mirror_complete, |
893f7eba PB |
771 | }; |
772 | ||
03544a6e FZ |
773 | static const BlockJobDriver commit_active_job_driver = { |
774 | .instance_size = sizeof(MirrorBlockJob), | |
775 | .job_type = BLOCK_JOB_TYPE_COMMIT, | |
776 | .set_speed = mirror_set_speed, | |
777 | .iostatus_reset | |
778 | = mirror_iostatus_reset, | |
779 | .complete = mirror_complete, | |
780 | }; | |
781 | ||
782 | static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, | |
09158f00 | 783 | const char *replaces, |
5fba6c0e | 784 | int64_t speed, uint32_t granularity, |
09158f00 BC |
785 | int64_t buf_size, |
786 | BlockdevOnError on_source_error, | |
787 | BlockdevOnError on_target_error, | |
0fc9f8ea | 788 | bool unmap, |
097310b5 | 789 | BlockCompletionFunc *cb, |
09158f00 BC |
790 | void *opaque, Error **errp, |
791 | const BlockJobDriver *driver, | |
792 | bool is_none_mode, BlockDriverState *base) | |
893f7eba PB |
793 | { |
794 | MirrorBlockJob *s; | |
40365552 | 795 | BlockDriverState *replaced_bs; |
893f7eba | 796 | |
eee13dfe | 797 | if (granularity == 0) { |
341ebc2f | 798 | granularity = bdrv_get_default_bitmap_granularity(target); |
eee13dfe PB |
799 | } |
800 | ||
801 | assert ((granularity & (granularity - 1)) == 0); | |
802 | ||
b952b558 PB |
803 | if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || |
804 | on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && | |
373340b2 | 805 | (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) { |
c6bd8c70 | 806 | error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error"); |
b952b558 PB |
807 | return; |
808 | } | |
809 | ||
48ac0a4d WC |
810 | if (buf_size < 0) { |
811 | error_setg(errp, "Invalid parameter 'buf-size'"); | |
812 | return; | |
813 | } | |
814 | ||
815 | if (buf_size == 0) { | |
816 | buf_size = DEFAULT_MIRROR_BUF_SIZE; | |
817 | } | |
5bc361b8 | 818 | |
40365552 KW |
819 | /* We can't support this case as long as the block layer can't handle |
820 | * multiple BlockBackends per BlockDriverState. */ | |
821 | if (replaces) { | |
822 | replaced_bs = bdrv_lookup_bs(replaces, replaces, errp); | |
823 | if (replaced_bs == NULL) { | |
824 | return; | |
825 | } | |
826 | } else { | |
827 | replaced_bs = bs; | |
828 | } | |
829 | if (replaced_bs->blk && target->blk) { | |
830 | error_setg(errp, "Can't create node with two BlockBackends"); | |
831 | return; | |
832 | } | |
833 | ||
03544a6e | 834 | s = block_job_create(driver, bs, speed, cb, opaque, errp); |
893f7eba PB |
835 | if (!s) { |
836 | return; | |
837 | } | |
838 | ||
09158f00 | 839 | s->replaces = g_strdup(replaces); |
b952b558 PB |
840 | s->on_source_error = on_source_error; |
841 | s->on_target_error = on_target_error; | |
893f7eba | 842 | s->target = target; |
03544a6e | 843 | s->is_none_mode = is_none_mode; |
5bc361b8 | 844 | s->base = base; |
eee13dfe | 845 | s->granularity = granularity; |
48ac0a4d | 846 | s->buf_size = ROUND_UP(buf_size, granularity); |
0fc9f8ea | 847 | s->unmap = unmap; |
b812f671 | 848 | |
0db6e54a | 849 | s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); |
b8afb520 | 850 | if (!s->dirty_bitmap) { |
97031164 | 851 | g_free(s->replaces); |
18930ba3 | 852 | block_job_unref(&s->common); |
b8afb520 FZ |
853 | return; |
854 | } | |
10f3cd15 AG |
855 | |
856 | bdrv_op_block_all(s->target, s->common.blocker); | |
857 | ||
893f7eba | 858 | bdrv_set_enable_write_cache(s->target, true); |
373340b2 HR |
859 | if (s->target->blk) { |
860 | blk_set_on_error(s->target->blk, on_target_error, on_target_error); | |
861 | blk_iostatus_enable(s->target->blk); | |
862 | } | |
893f7eba PB |
863 | s->common.co = qemu_coroutine_create(mirror_run); |
864 | trace_mirror_start(bs, s, s->common.co, opaque); | |
865 | qemu_coroutine_enter(s->common.co, s); | |
866 | } | |
03544a6e FZ |
867 | |
868 | void mirror_start(BlockDriverState *bs, BlockDriverState *target, | |
09158f00 | 869 | const char *replaces, |
5fba6c0e | 870 | int64_t speed, uint32_t granularity, int64_t buf_size, |
03544a6e FZ |
871 | MirrorSyncMode mode, BlockdevOnError on_source_error, |
872 | BlockdevOnError on_target_error, | |
0fc9f8ea | 873 | bool unmap, |
097310b5 | 874 | BlockCompletionFunc *cb, |
03544a6e FZ |
875 | void *opaque, Error **errp) |
876 | { | |
877 | bool is_none_mode; | |
878 | BlockDriverState *base; | |
879 | ||
4b80ab2b JS |
880 | if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { |
881 | error_setg(errp, "Sync mode 'incremental' not supported"); | |
d58d8453 JS |
882 | return; |
883 | } | |
03544a6e | 884 | is_none_mode = mode == MIRROR_SYNC_MODE_NONE; |
760e0063 | 885 | base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; |
09158f00 BC |
886 | mirror_start_job(bs, target, replaces, |
887 | speed, granularity, buf_size, | |
0fc9f8ea | 888 | on_source_error, on_target_error, unmap, cb, opaque, errp, |
03544a6e FZ |
889 | &mirror_job_driver, is_none_mode, base); |
890 | } | |
891 | ||
892 | void commit_active_start(BlockDriverState *bs, BlockDriverState *base, | |
893 | int64_t speed, | |
894 | BlockdevOnError on_error, | |
097310b5 | 895 | BlockCompletionFunc *cb, |
03544a6e FZ |
896 | void *opaque, Error **errp) |
897 | { | |
4da83585 JC |
898 | int64_t length, base_length; |
899 | int orig_base_flags; | |
39a611a3 | 900 | int ret; |
cc67f4d1 | 901 | Error *local_err = NULL; |
4da83585 JC |
902 | |
903 | orig_base_flags = bdrv_get_flags(base); | |
904 | ||
20a63d2c FZ |
905 | if (bdrv_reopen(base, bs->open_flags, errp)) { |
906 | return; | |
907 | } | |
4da83585 JC |
908 | |
909 | length = bdrv_getlength(bs); | |
910 | if (length < 0) { | |
39a611a3 JC |
911 | error_setg_errno(errp, -length, |
912 | "Unable to determine length of %s", bs->filename); | |
4da83585 JC |
913 | goto error_restore_flags; |
914 | } | |
915 | ||
916 | base_length = bdrv_getlength(base); | |
917 | if (base_length < 0) { | |
39a611a3 JC |
918 | error_setg_errno(errp, -base_length, |
919 | "Unable to determine length of %s", base->filename); | |
4da83585 JC |
920 | goto error_restore_flags; |
921 | } | |
922 | ||
923 | if (length > base_length) { | |
39a611a3 JC |
924 | ret = bdrv_truncate(base, length); |
925 | if (ret < 0) { | |
926 | error_setg_errno(errp, -ret, | |
927 | "Top image %s is larger than base image %s, and " | |
4da83585 JC |
928 | "resize of base image failed", |
929 | bs->filename, base->filename); | |
930 | goto error_restore_flags; | |
931 | } | |
932 | } | |
933 | ||
20a63d2c | 934 | bdrv_ref(base); |
09158f00 | 935 | mirror_start_job(bs, base, NULL, speed, 0, 0, |
0fc9f8ea | 936 | on_error, on_error, false, cb, opaque, &local_err, |
03544a6e | 937 | &commit_active_job_driver, false, base); |
0fb6395c | 938 | if (local_err) { |
cc67f4d1 | 939 | error_propagate(errp, local_err); |
4da83585 JC |
940 | goto error_restore_flags; |
941 | } | |
942 | ||
943 | return; | |
944 | ||
945 | error_restore_flags: | |
946 | /* ignore error and errp for bdrv_reopen, because we want to propagate | |
947 | * the original error */ | |
948 | bdrv_reopen(base, orig_base_flags, NULL); | |
949 | return; | |
03544a6e | 950 | } |