]> git.proxmox.com Git - mirror_qemu.git/blob - block/block-copy.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / block / block-copy.c
1 /*
2 * block_copy API
3 *
4 * Copyright (C) 2013 Proxmox Server Solutions
5 * Copyright (c) 2019 Virtuozzo International GmbH.
6 *
7 * Authors:
8 * Dietmar Maurer (dietmar@proxmox.com)
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 */
14
15 #include "qemu/osdep.h"
16
17 #include "trace.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "sysemu/block-backend.h"
21 #include "qemu/units.h"
22 #include "qemu/coroutine.h"
23 #include "block/aio_task.h"
24
25 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
26 #define BLOCK_COPY_MAX_BUFFER (1 * MiB)
27 #define BLOCK_COPY_MAX_MEM (128 * MiB)
28 #define BLOCK_COPY_MAX_WORKERS 64
29 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
30
31 static coroutine_fn int block_copy_task_entry(AioTask *task);
32
33 typedef struct BlockCopyCallState {
34 /* IN parameters. Initialized in block_copy_async() and never changed. */
35 BlockCopyState *s;
36 int64_t offset;
37 int64_t bytes;
38 int max_workers;
39 int64_t max_chunk;
40 bool ignore_ratelimit;
41 BlockCopyAsyncCallbackFunc cb;
42 void *cb_opaque;
43
44 /* Coroutine where async block-copy is running */
45 Coroutine *co;
46
47 /* To reference all call states from BlockCopyState */
48 QLIST_ENTRY(BlockCopyCallState) list;
49
50 /* State */
51 int ret;
52 bool finished;
53 QemuCoSleepState *sleep_state;
54 bool cancelled;
55
56 /* OUT parameters */
57 bool error_is_read;
58 } BlockCopyCallState;
59
60 typedef struct BlockCopyTask {
61 AioTask task;
62
63 BlockCopyState *s;
64 BlockCopyCallState *call_state;
65 int64_t offset;
66 int64_t bytes;
67 bool zeroes;
68 QLIST_ENTRY(BlockCopyTask) list;
69 CoQueue wait_queue; /* coroutines blocked on this task */
70 } BlockCopyTask;
71
72 static int64_t task_end(BlockCopyTask *task)
73 {
74 return task->offset + task->bytes;
75 }
76
77 typedef struct BlockCopyState {
78 /*
79 * BdrvChild objects are not owned or managed by block-copy. They are
80 * provided by block-copy user and user is responsible for appropriate
81 * permissions on these children.
82 */
83 BdrvChild *source;
84 BdrvChild *target;
85 BdrvDirtyBitmap *copy_bitmap;
86 int64_t in_flight_bytes;
87 int64_t cluster_size;
88 bool use_copy_range;
89 int64_t copy_size;
90 uint64_t len;
91 QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
92 QLIST_HEAD(, BlockCopyCallState) calls;
93
94 BdrvRequestFlags write_flags;
95
96 /*
97 * skip_unallocated:
98 *
99 * Used by sync=top jobs, which first scan the source node for unallocated
100 * areas and clear them in the copy_bitmap. During this process, the bitmap
101 * is thus not fully initialized: It may still have bits set for areas that
102 * are unallocated and should actually not be copied.
103 *
104 * This is indicated by skip_unallocated.
105 *
106 * In this case, block_copy() will query the source’s allocation status,
107 * skip unallocated regions, clear them in the copy_bitmap, and invoke
108 * block_copy_reset_unallocated() every time it does.
109 */
110 bool skip_unallocated;
111
112 ProgressMeter *progress;
113
114 SharedResource *mem;
115
116 uint64_t speed;
117 RateLimit rate_limit;
118 } BlockCopyState;
119
120 static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
121 int64_t offset, int64_t bytes)
122 {
123 BlockCopyTask *t;
124
125 QLIST_FOREACH(t, &s->tasks, list) {
126 if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
127 return t;
128 }
129 }
130
131 return NULL;
132 }
133
134 /*
135 * If there are no intersecting tasks return false. Otherwise, wait for the
136 * first found intersecting tasks to finish and return true.
137 */
138 static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
139 int64_t bytes)
140 {
141 BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
142
143 if (!task) {
144 return false;
145 }
146
147 qemu_co_queue_wait(&task->wait_queue, NULL);
148
149 return true;
150 }
151
152 /*
153 * Search for the first dirty area in offset/bytes range and create task at
154 * the beginning of it.
155 */
156 static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
157 BlockCopyCallState *call_state,
158 int64_t offset, int64_t bytes)
159 {
160 BlockCopyTask *task;
161 int64_t max_chunk = MIN_NON_ZERO(s->copy_size, call_state->max_chunk);
162
163 if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
164 offset, offset + bytes,
165 max_chunk, &offset, &bytes))
166 {
167 return NULL;
168 }
169
170 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
171 bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
172
173 /* region is dirty, so no existent tasks possible in it */
174 assert(!find_conflicting_task(s, offset, bytes));
175
176 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
177 s->in_flight_bytes += bytes;
178
179 task = g_new(BlockCopyTask, 1);
180 *task = (BlockCopyTask) {
181 .task.func = block_copy_task_entry,
182 .s = s,
183 .call_state = call_state,
184 .offset = offset,
185 .bytes = bytes,
186 };
187 qemu_co_queue_init(&task->wait_queue);
188 QLIST_INSERT_HEAD(&s->tasks, task, list);
189
190 return task;
191 }
192
193 /*
194 * block_copy_task_shrink
195 *
196 * Drop the tail of the task to be handled later. Set dirty bits back and
197 * wake up all tasks waiting for us (may be some of them are not intersecting
198 * with shrunk task)
199 */
200 static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
201 int64_t new_bytes)
202 {
203 if (new_bytes == task->bytes) {
204 return;
205 }
206
207 assert(new_bytes > 0 && new_bytes < task->bytes);
208
209 task->s->in_flight_bytes -= task->bytes - new_bytes;
210 bdrv_set_dirty_bitmap(task->s->copy_bitmap,
211 task->offset + new_bytes, task->bytes - new_bytes);
212
213 task->bytes = new_bytes;
214 qemu_co_queue_restart_all(&task->wait_queue);
215 }
216
217 static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
218 {
219 task->s->in_flight_bytes -= task->bytes;
220 if (ret < 0) {
221 bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
222 }
223 QLIST_REMOVE(task, list);
224 qemu_co_queue_restart_all(&task->wait_queue);
225 }
226
227 void block_copy_state_free(BlockCopyState *s)
228 {
229 if (!s) {
230 return;
231 }
232
233 ratelimit_destroy(&s->rate_limit);
234 bdrv_release_dirty_bitmap(s->copy_bitmap);
235 shres_destroy(s->mem);
236 g_free(s);
237 }
238
239 static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
240 {
241 return MIN_NON_ZERO(INT_MAX,
242 MIN_NON_ZERO(source->bs->bl.max_transfer,
243 target->bs->bl.max_transfer));
244 }
245
246 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
247 int64_t cluster_size, bool use_copy_range,
248 BdrvRequestFlags write_flags, Error **errp)
249 {
250 BlockCopyState *s;
251 BdrvDirtyBitmap *copy_bitmap;
252
253 copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
254 errp);
255 if (!copy_bitmap) {
256 return NULL;
257 }
258 bdrv_disable_dirty_bitmap(copy_bitmap);
259
260 s = g_new(BlockCopyState, 1);
261 *s = (BlockCopyState) {
262 .source = source,
263 .target = target,
264 .copy_bitmap = copy_bitmap,
265 .cluster_size = cluster_size,
266 .len = bdrv_dirty_bitmap_size(copy_bitmap),
267 .write_flags = write_flags,
268 .mem = shres_create(BLOCK_COPY_MAX_MEM),
269 };
270
271 if (block_copy_max_transfer(source, target) < cluster_size) {
272 /*
273 * copy_range does not respect max_transfer. We don't want to bother
274 * with requests smaller than block-copy cluster size, so fallback to
275 * buffered copying (read and write respect max_transfer on their
276 * behalf).
277 */
278 s->use_copy_range = false;
279 s->copy_size = cluster_size;
280 } else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) {
281 /* Compression supports only cluster-size writes and no copy-range. */
282 s->use_copy_range = false;
283 s->copy_size = cluster_size;
284 } else {
285 /*
286 * We enable copy-range, but keep small copy_size, until first
287 * successful copy_range (look at block_copy_do_copy).
288 */
289 s->use_copy_range = use_copy_range;
290 s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
291 }
292
293 ratelimit_init(&s->rate_limit);
294 QLIST_INIT(&s->tasks);
295 QLIST_INIT(&s->calls);
296
297 return s;
298 }
299
300 void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
301 {
302 s->progress = pm;
303 }
304
305 /*
306 * Takes ownership of @task
307 *
308 * If pool is NULL directly run the task, otherwise schedule it into the pool.
309 *
310 * Returns: task.func return code if pool is NULL
311 * otherwise -ECANCELED if pool status is bad
312 * otherwise 0 (successfully scheduled)
313 */
314 static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
315 BlockCopyTask *task)
316 {
317 if (!pool) {
318 int ret = task->task.func(&task->task);
319
320 g_free(task);
321 return ret;
322 }
323
324 aio_task_pool_wait_slot(pool);
325 if (aio_task_pool_status(pool) < 0) {
326 co_put_to_shres(task->s->mem, task->bytes);
327 block_copy_task_end(task, -ECANCELED);
328 g_free(task);
329 return -ECANCELED;
330 }
331
332 aio_task_pool_start_task(pool, &task->task);
333
334 return 0;
335 }
336
337 /*
338 * block_copy_do_copy
339 *
340 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
341 * s->len only to cover last cluster when s->len is not aligned to clusters.
342 *
343 * No sync here: nor bitmap neighter intersecting requests handling, only copy.
344 *
345 * Returns 0 on success.
346 */
347 static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
348 int64_t offset, int64_t bytes,
349 bool zeroes, bool *error_is_read)
350 {
351 int ret;
352 int64_t nbytes = MIN(offset + bytes, s->len) - offset;
353 void *bounce_buffer = NULL;
354
355 assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
356 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
357 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
358 assert(offset < s->len);
359 assert(offset + bytes <= s->len ||
360 offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
361 assert(nbytes < INT_MAX);
362
363 if (zeroes) {
364 ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
365 ~BDRV_REQ_WRITE_COMPRESSED);
366 if (ret < 0) {
367 trace_block_copy_write_zeroes_fail(s, offset, ret);
368 *error_is_read = false;
369 }
370 return ret;
371 }
372
373 if (s->use_copy_range) {
374 ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
375 0, s->write_flags);
376 if (ret < 0) {
377 trace_block_copy_copy_range_fail(s, offset, ret);
378 s->use_copy_range = false;
379 s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
380 /* Fallback to read+write with allocated buffer */
381 } else {
382 if (s->use_copy_range) {
383 /*
384 * Successful copy-range. Now increase copy_size. copy_range
385 * does not respect max_transfer (it's a TODO), so we factor
386 * that in here.
387 *
388 * Note: we double-check s->use_copy_range for the case when
389 * parallel block-copy request unsets it during previous
390 * bdrv_co_copy_range call.
391 */
392 s->copy_size =
393 MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
394 QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
395 s->target),
396 s->cluster_size));
397 }
398 goto out;
399 }
400 }
401
402 /*
403 * In case of failed copy_range request above, we may proceed with buffered
404 * request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
405 * be properly limited, so don't care too much. Moreover the most likely
406 * case (copy_range is unsupported for the configuration, so the very first
407 * copy_range request fails) is handled by setting large copy_size only
408 * after first successful copy_range.
409 */
410
411 bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
412
413 ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
414 if (ret < 0) {
415 trace_block_copy_read_fail(s, offset, ret);
416 *error_is_read = true;
417 goto out;
418 }
419
420 ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
421 s->write_flags);
422 if (ret < 0) {
423 trace_block_copy_write_fail(s, offset, ret);
424 *error_is_read = false;
425 goto out;
426 }
427
428 out:
429 qemu_vfree(bounce_buffer);
430
431 return ret;
432 }
433
434 static coroutine_fn int block_copy_task_entry(AioTask *task)
435 {
436 BlockCopyTask *t = container_of(task, BlockCopyTask, task);
437 bool error_is_read = false;
438 int ret;
439
440 ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
441 &error_is_read);
442 if (ret < 0 && !t->call_state->ret) {
443 t->call_state->ret = ret;
444 t->call_state->error_is_read = error_is_read;
445 } else {
446 progress_work_done(t->s->progress, t->bytes);
447 }
448 co_put_to_shres(t->s->mem, t->bytes);
449 block_copy_task_end(t, ret);
450
451 return ret;
452 }
453
454 static int block_copy_block_status(BlockCopyState *s, int64_t offset,
455 int64_t bytes, int64_t *pnum)
456 {
457 int64_t num;
458 BlockDriverState *base;
459 int ret;
460
461 if (s->skip_unallocated) {
462 base = bdrv_backing_chain_next(s->source->bs);
463 } else {
464 base = NULL;
465 }
466
467 ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
468 NULL, NULL);
469 if (ret < 0 || num < s->cluster_size) {
470 /*
471 * On error or if failed to obtain large enough chunk just fallback to
472 * copy one cluster.
473 */
474 num = s->cluster_size;
475 ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
476 } else if (offset + num == s->len) {
477 num = QEMU_ALIGN_UP(num, s->cluster_size);
478 } else {
479 num = QEMU_ALIGN_DOWN(num, s->cluster_size);
480 }
481
482 *pnum = num;
483 return ret;
484 }
485
486 /*
487 * Check if the cluster starting at offset is allocated or not.
488 * return via pnum the number of contiguous clusters sharing this allocation.
489 */
490 static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
491 int64_t *pnum)
492 {
493 BlockDriverState *bs = s->source->bs;
494 int64_t count, total_count = 0;
495 int64_t bytes = s->len - offset;
496 int ret;
497
498 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
499
500 while (true) {
501 ret = bdrv_is_allocated(bs, offset, bytes, &count);
502 if (ret < 0) {
503 return ret;
504 }
505
506 total_count += count;
507
508 if (ret || count == 0) {
509 /*
510 * ret: partial segment(s) are considered allocated.
511 * otherwise: unallocated tail is treated as an entire segment.
512 */
513 *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
514 return ret;
515 }
516
517 /* Unallocated segment(s) with uncertain following segment(s) */
518 if (total_count >= s->cluster_size) {
519 *pnum = total_count / s->cluster_size;
520 return 0;
521 }
522
523 offset += count;
524 bytes -= count;
525 }
526 }
527
528 /*
529 * Reset bits in copy_bitmap starting at offset if they represent unallocated
530 * data in the image. May reset subsequent contiguous bits.
531 * @return 0 when the cluster at @offset was unallocated,
532 * 1 otherwise, and -ret on error.
533 */
534 int64_t block_copy_reset_unallocated(BlockCopyState *s,
535 int64_t offset, int64_t *count)
536 {
537 int ret;
538 int64_t clusters, bytes;
539
540 ret = block_copy_is_cluster_allocated(s, offset, &clusters);
541 if (ret < 0) {
542 return ret;
543 }
544
545 bytes = clusters * s->cluster_size;
546
547 if (!ret) {
548 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
549 progress_set_remaining(s->progress,
550 bdrv_get_dirty_count(s->copy_bitmap) +
551 s->in_flight_bytes);
552 }
553
554 *count = bytes;
555 return ret;
556 }
557
558 /*
559 * block_copy_dirty_clusters
560 *
561 * Copy dirty clusters in @offset/@bytes range.
562 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
563 * clusters found and -errno on failure.
564 */
565 static int coroutine_fn
566 block_copy_dirty_clusters(BlockCopyCallState *call_state)
567 {
568 BlockCopyState *s = call_state->s;
569 int64_t offset = call_state->offset;
570 int64_t bytes = call_state->bytes;
571
572 int ret = 0;
573 bool found_dirty = false;
574 int64_t end = offset + bytes;
575 AioTaskPool *aio = NULL;
576
577 /*
578 * block_copy() user is responsible for keeping source and target in same
579 * aio context
580 */
581 assert(bdrv_get_aio_context(s->source->bs) ==
582 bdrv_get_aio_context(s->target->bs));
583
584 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
585 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
586
587 while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) {
588 BlockCopyTask *task;
589 int64_t status_bytes;
590
591 task = block_copy_task_create(s, call_state, offset, bytes);
592 if (!task) {
593 /* No more dirty bits in the bitmap */
594 trace_block_copy_skip_range(s, offset, bytes);
595 break;
596 }
597 if (task->offset > offset) {
598 trace_block_copy_skip_range(s, offset, task->offset - offset);
599 }
600
601 found_dirty = true;
602
603 ret = block_copy_block_status(s, task->offset, task->bytes,
604 &status_bytes);
605 assert(ret >= 0); /* never fail */
606 if (status_bytes < task->bytes) {
607 block_copy_task_shrink(task, status_bytes);
608 }
609 if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
610 block_copy_task_end(task, 0);
611 progress_set_remaining(s->progress,
612 bdrv_get_dirty_count(s->copy_bitmap) +
613 s->in_flight_bytes);
614 trace_block_copy_skip_range(s, task->offset, task->bytes);
615 offset = task_end(task);
616 bytes = end - offset;
617 g_free(task);
618 continue;
619 }
620 task->zeroes = ret & BDRV_BLOCK_ZERO;
621
622 if (s->speed) {
623 if (!call_state->ignore_ratelimit) {
624 uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
625 if (ns > 0) {
626 block_copy_task_end(task, -EAGAIN);
627 g_free(task);
628 qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, ns,
629 &call_state->sleep_state);
630 continue;
631 }
632 }
633
634 ratelimit_calculate_delay(&s->rate_limit, task->bytes);
635 }
636
637 trace_block_copy_process(s, task->offset);
638
639 co_get_from_shres(s->mem, task->bytes);
640
641 offset = task_end(task);
642 bytes = end - offset;
643
644 if (!aio && bytes) {
645 aio = aio_task_pool_new(call_state->max_workers);
646 }
647
648 ret = block_copy_task_run(aio, task);
649 if (ret < 0) {
650 goto out;
651 }
652 }
653
654 out:
655 if (aio) {
656 aio_task_pool_wait_all(aio);
657
658 /*
659 * We are not really interested in -ECANCELED returned from
660 * block_copy_task_run. If it fails, it means some task already failed
661 * for real reason, let's return first failure.
662 * Still, assert that we don't rewrite failure by success.
663 *
664 * Note: ret may be positive here because of block-status result.
665 */
666 assert(ret >= 0 || aio_task_pool_status(aio) < 0);
667 ret = aio_task_pool_status(aio);
668
669 aio_task_pool_free(aio);
670 }
671
672 return ret < 0 ? ret : found_dirty;
673 }
674
675 void block_copy_kick(BlockCopyCallState *call_state)
676 {
677 if (call_state->sleep_state) {
678 qemu_co_sleep_wake(call_state->sleep_state);
679 }
680 }
681
682 /*
683 * block_copy_common
684 *
685 * Copy requested region, accordingly to dirty bitmap.
686 * Collaborate with parallel block_copy requests: if they succeed it will help
687 * us. If they fail, we will retry not-copied regions. So, if we return error,
688 * it means that some I/O operation failed in context of _this_ block_copy call,
689 * not some parallel operation.
690 */
691 static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
692 {
693 int ret;
694
695 QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list);
696
697 do {
698 ret = block_copy_dirty_clusters(call_state);
699
700 if (ret == 0 && !call_state->cancelled) {
701 ret = block_copy_wait_one(call_state->s, call_state->offset,
702 call_state->bytes);
703 }
704
705 /*
706 * We retry in two cases:
707 * 1. Some progress done
708 * Something was copied, which means that there were yield points
709 * and some new dirty bits may have appeared (due to failed parallel
710 * block-copy requests).
711 * 2. We have waited for some intersecting block-copy request
712 * It may have failed and produced new dirty bits.
713 */
714 } while (ret > 0 && !call_state->cancelled);
715
716 call_state->finished = true;
717
718 if (call_state->cb) {
719 call_state->cb(call_state->cb_opaque);
720 }
721
722 QLIST_REMOVE(call_state, list);
723
724 return ret;
725 }
726
727 int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
728 bool ignore_ratelimit)
729 {
730 BlockCopyCallState call_state = {
731 .s = s,
732 .offset = start,
733 .bytes = bytes,
734 .ignore_ratelimit = ignore_ratelimit,
735 .max_workers = BLOCK_COPY_MAX_WORKERS,
736 };
737
738 return block_copy_common(&call_state);
739 }
740
741 static void coroutine_fn block_copy_async_co_entry(void *opaque)
742 {
743 block_copy_common(opaque);
744 }
745
746 BlockCopyCallState *block_copy_async(BlockCopyState *s,
747 int64_t offset, int64_t bytes,
748 int max_workers, int64_t max_chunk,
749 BlockCopyAsyncCallbackFunc cb,
750 void *cb_opaque)
751 {
752 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
753
754 *call_state = (BlockCopyCallState) {
755 .s = s,
756 .offset = offset,
757 .bytes = bytes,
758 .max_workers = max_workers,
759 .max_chunk = max_chunk,
760 .cb = cb,
761 .cb_opaque = cb_opaque,
762
763 .co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
764 };
765
766 qemu_coroutine_enter(call_state->co);
767
768 return call_state;
769 }
770
771 void block_copy_call_free(BlockCopyCallState *call_state)
772 {
773 if (!call_state) {
774 return;
775 }
776
777 assert(call_state->finished);
778 g_free(call_state);
779 }
780
781 bool block_copy_call_finished(BlockCopyCallState *call_state)
782 {
783 return call_state->finished;
784 }
785
786 bool block_copy_call_succeeded(BlockCopyCallState *call_state)
787 {
788 return call_state->finished && !call_state->cancelled &&
789 call_state->ret == 0;
790 }
791
792 bool block_copy_call_failed(BlockCopyCallState *call_state)
793 {
794 return call_state->finished && !call_state->cancelled &&
795 call_state->ret < 0;
796 }
797
798 bool block_copy_call_cancelled(BlockCopyCallState *call_state)
799 {
800 return call_state->cancelled;
801 }
802
803 int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
804 {
805 assert(call_state->finished);
806 if (error_is_read) {
807 *error_is_read = call_state->error_is_read;
808 }
809 return call_state->ret;
810 }
811
812 void block_copy_call_cancel(BlockCopyCallState *call_state)
813 {
814 call_state->cancelled = true;
815 block_copy_kick(call_state);
816 }
817
818 BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
819 {
820 return s->copy_bitmap;
821 }
822
823 void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
824 {
825 s->skip_unallocated = skip;
826 }
827
828 void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
829 {
830 s->speed = speed;
831 if (speed > 0) {
832 ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
833 }
834
835 /*
836 * Note: it's good to kick all call states from here, but it should be done
837 * only from a coroutine, to not crash if s->calls list changed while
838 * entering one call. So for now, the only user of this function kicks its
839 * only one call_state by hand.
840 */
841 }