4 * Copyright (C) 2013 Proxmox Server Solutions
5 * Copyright (c) 2019 Virtuozzo International GmbH.
8 * Dietmar Maurer (dietmar@proxmox.com)
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "sysemu/block-backend.h"
21 #include "qemu/units.h"
22 #include "qemu/coroutine.h"
23 #include "block/aio_task.h"
24 #include "qemu/error-report.h"
25 #include "qemu/memalign.h"
27 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
28 #define BLOCK_COPY_MAX_BUFFER (1 * MiB)
29 #define BLOCK_COPY_MAX_MEM (128 * MiB)
30 #define BLOCK_COPY_MAX_WORKERS 64
31 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
32 #define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16)
35 COPY_READ_WRITE_CLUSTER
,
42 static coroutine_fn
int block_copy_task_entry(AioTask
*task
);
44 typedef struct BlockCopyCallState
{
45 /* Fields initialized in block_copy_async() and never changed. */
51 bool ignore_ratelimit
;
52 BlockCopyAsyncCallbackFunc cb
;
54 /* Coroutine where async block-copy is running */
57 /* Fields whose state changes throughout the execution */
58 bool finished
; /* atomic */
59 QemuCoSleep sleep
; /* TODO: protect API with a lock */
60 bool cancelled
; /* atomic */
61 /* To reference all call states from BlockCopyState */
62 QLIST_ENTRY(BlockCopyCallState
) list
;
65 * Fields that report information about return values and erros.
66 * Protected by lock in BlockCopyState.
70 * @ret is set concurrently by tasks under mutex. Only set once by first
71 * failed task (and untouched if no task failed).
72 * After finishing (call_state->finished is true), it is not modified
73 * anymore and may be safely read without mutex.
78 typedef struct BlockCopyTask
{
82 * Fields initialized in block_copy_task_create()
86 BlockCopyCallState
*call_state
;
89 * @method can also be set again in the while loop of
90 * block_copy_dirty_clusters(), but it is never accessed concurrently
91 * because the only other function that reads it is
92 * block_copy_task_entry() and it is invoked afterwards in the same
95 BlockCopyMethod method
;
98 * Fields whose state changes throughout the execution
99 * Protected by lock in BlockCopyState.
101 CoQueue wait_queue
; /* coroutines blocked on this task */
103 * Only protect the case of parallel read while updating @bytes
104 * value in block_copy_task_shrink().
107 QLIST_ENTRY(BlockCopyTask
) list
;
110 static int64_t task_end(BlockCopyTask
*task
)
112 return task
->offset
+ task
->bytes
;
115 typedef struct BlockCopyState
{
117 * BdrvChild objects are not owned or managed by block-copy. They are
118 * provided by block-copy user and user is responsible for appropriate
119 * permissions on these children.
125 * Fields initialized in block_copy_state_new()
128 int64_t cluster_size
;
129 int64_t max_transfer
;
131 BdrvRequestFlags write_flags
;
134 * Fields whose state changes throughout the execution
138 int64_t in_flight_bytes
;
139 BlockCopyMethod method
;
140 QLIST_HEAD(, BlockCopyTask
) tasks
; /* All tasks from all block-copy calls */
141 QLIST_HEAD(, BlockCopyCallState
) calls
;
145 * Used by sync=top jobs, which first scan the source node for unallocated
146 * areas and clear them in the copy_bitmap. During this process, the bitmap
147 * is thus not fully initialized: It may still have bits set for areas that
148 * are unallocated and should actually not be copied.
150 * This is indicated by skip_unallocated.
152 * In this case, block_copy() will query the source’s allocation status,
153 * skip unallocated regions, clear them in the copy_bitmap, and invoke
154 * block_copy_reset_unallocated() every time it does.
156 bool skip_unallocated
; /* atomic */
157 /* State fields that use a thread-safe API */
158 BdrvDirtyBitmap
*copy_bitmap
;
159 ProgressMeter
*progress
;
161 RateLimit rate_limit
;
164 /* Called with lock held */
165 static BlockCopyTask
*find_conflicting_task(BlockCopyState
*s
,
166 int64_t offset
, int64_t bytes
)
170 QLIST_FOREACH(t
, &s
->tasks
, list
) {
171 if (offset
+ bytes
> t
->offset
&& offset
< t
->offset
+ t
->bytes
) {
180 * If there are no intersecting tasks return false. Otherwise, wait for the
181 * first found intersecting tasks to finish and return true.
183 * Called with lock held. May temporary release the lock.
184 * Return value of 0 proves that lock was NOT released.
186 static bool coroutine_fn
block_copy_wait_one(BlockCopyState
*s
, int64_t offset
,
189 BlockCopyTask
*task
= find_conflicting_task(s
, offset
, bytes
);
195 qemu_co_queue_wait(&task
->wait_queue
, &s
->lock
);
200 /* Called with lock held */
201 static int64_t block_copy_chunk_size(BlockCopyState
*s
)
204 case COPY_READ_WRITE_CLUSTER
:
205 return s
->cluster_size
;
206 case COPY_READ_WRITE
:
207 case COPY_RANGE_SMALL
:
208 return MIN(MAX(s
->cluster_size
, BLOCK_COPY_MAX_BUFFER
),
210 case COPY_RANGE_FULL
:
211 return MIN(MAX(s
->cluster_size
, BLOCK_COPY_MAX_COPY_RANGE
),
214 /* Cannot have COPY_WRITE_ZEROES here. */
220 * Search for the first dirty area in offset/bytes range and create task at
221 * the beginning of it.
223 static coroutine_fn BlockCopyTask
*
224 block_copy_task_create(BlockCopyState
*s
, BlockCopyCallState
*call_state
,
225 int64_t offset
, int64_t bytes
)
230 QEMU_LOCK_GUARD(&s
->lock
);
231 max_chunk
= MIN_NON_ZERO(block_copy_chunk_size(s
), call_state
->max_chunk
);
232 if (!bdrv_dirty_bitmap_next_dirty_area(s
->copy_bitmap
,
233 offset
, offset
+ bytes
,
234 max_chunk
, &offset
, &bytes
))
239 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
240 bytes
= QEMU_ALIGN_UP(bytes
, s
->cluster_size
);
242 /* region is dirty, so no existent tasks possible in it */
243 assert(!find_conflicting_task(s
, offset
, bytes
));
245 bdrv_reset_dirty_bitmap(s
->copy_bitmap
, offset
, bytes
);
246 s
->in_flight_bytes
+= bytes
;
248 task
= g_new(BlockCopyTask
, 1);
249 *task
= (BlockCopyTask
) {
250 .task
.func
= block_copy_task_entry
,
252 .call_state
= call_state
,
257 qemu_co_queue_init(&task
->wait_queue
);
258 QLIST_INSERT_HEAD(&s
->tasks
, task
, list
);
264 * block_copy_task_shrink
266 * Drop the tail of the task to be handled later. Set dirty bits back and
267 * wake up all tasks waiting for us (may be some of them are not intersecting
270 static void coroutine_fn
block_copy_task_shrink(BlockCopyTask
*task
,
273 QEMU_LOCK_GUARD(&task
->s
->lock
);
274 if (new_bytes
== task
->bytes
) {
278 assert(new_bytes
> 0 && new_bytes
< task
->bytes
);
280 task
->s
->in_flight_bytes
-= task
->bytes
- new_bytes
;
281 bdrv_set_dirty_bitmap(task
->s
->copy_bitmap
,
282 task
->offset
+ new_bytes
, task
->bytes
- new_bytes
);
284 task
->bytes
= new_bytes
;
285 qemu_co_queue_restart_all(&task
->wait_queue
);
288 static void coroutine_fn
block_copy_task_end(BlockCopyTask
*task
, int ret
)
290 QEMU_LOCK_GUARD(&task
->s
->lock
);
291 task
->s
->in_flight_bytes
-= task
->bytes
;
293 bdrv_set_dirty_bitmap(task
->s
->copy_bitmap
, task
->offset
, task
->bytes
);
295 QLIST_REMOVE(task
, list
);
296 if (task
->s
->progress
) {
297 progress_set_remaining(task
->s
->progress
,
298 bdrv_get_dirty_count(task
->s
->copy_bitmap
) +
299 task
->s
->in_flight_bytes
);
301 qemu_co_queue_restart_all(&task
->wait_queue
);
304 void block_copy_state_free(BlockCopyState
*s
)
310 ratelimit_destroy(&s
->rate_limit
);
311 bdrv_release_dirty_bitmap(s
->copy_bitmap
);
312 shres_destroy(s
->mem
);
316 static uint32_t block_copy_max_transfer(BdrvChild
*source
, BdrvChild
*target
)
318 return MIN_NON_ZERO(INT_MAX
,
319 MIN_NON_ZERO(source
->bs
->bl
.max_transfer
,
320 target
->bs
->bl
.max_transfer
));
323 void block_copy_set_copy_opts(BlockCopyState
*s
, bool use_copy_range
,
326 /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */
327 s
->write_flags
= (s
->write_flags
& BDRV_REQ_SERIALISING
) |
328 (compress
? BDRV_REQ_WRITE_COMPRESSED
: 0);
330 if (s
->max_transfer
< s
->cluster_size
) {
332 * copy_range does not respect max_transfer. We don't want to bother
333 * with requests smaller than block-copy cluster size, so fallback to
334 * buffered copying (read and write respect max_transfer on their
337 s
->method
= COPY_READ_WRITE_CLUSTER
;
338 } else if (compress
) {
339 /* Compression supports only cluster-size writes and no copy-range. */
340 s
->method
= COPY_READ_WRITE_CLUSTER
;
343 * If copy range enabled, start with COPY_RANGE_SMALL, until first
344 * successful copy_range (look at block_copy_do_copy).
346 s
->method
= use_copy_range
? COPY_RANGE_SMALL
: COPY_READ_WRITE
;
350 static int64_t block_copy_calculate_cluster_size(BlockDriverState
*target
,
355 bool target_does_cow
= bdrv_backing_chain_next(target
);
358 * If there is no backing file on the target, we cannot rely on COW if our
359 * backup cluster size is smaller than the target cluster size. Even for
360 * targets with a backing file, try to avoid COW if possible.
362 ret
= bdrv_get_info(target
, &bdi
);
363 if (ret
== -ENOTSUP
&& !target_does_cow
) {
364 /* Cluster size is not defined */
365 warn_report("The target block device doesn't provide "
366 "information about the block size and it doesn't have a "
367 "backing file. The default block size of %u bytes is "
368 "used. If the actual block size of the target exceeds "
369 "this default, the backup may be unusable",
370 BLOCK_COPY_CLUSTER_SIZE_DEFAULT
);
371 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT
;
372 } else if (ret
< 0 && !target_does_cow
) {
373 error_setg_errno(errp
, -ret
,
374 "Couldn't determine the cluster size of the target image, "
375 "which has no backing file");
376 error_append_hint(errp
,
377 "Aborting, since this may create an unusable destination image\n");
379 } else if (ret
< 0 && target_does_cow
) {
380 /* Not fatal; just trudge on ahead. */
381 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT
;
384 return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT
, bdi
.cluster_size
);
387 BlockCopyState
*block_copy_state_new(BdrvChild
*source
, BdrvChild
*target
,
391 int64_t cluster_size
;
392 BdrvDirtyBitmap
*copy_bitmap
;
395 cluster_size
= block_copy_calculate_cluster_size(target
->bs
, errp
);
396 if (cluster_size
< 0) {
400 copy_bitmap
= bdrv_create_dirty_bitmap(source
->bs
, cluster_size
, NULL
,
405 bdrv_disable_dirty_bitmap(copy_bitmap
);
408 * If source is in backing chain of target assume that target is going to be
409 * used for "image fleecing", i.e. it should represent a kind of snapshot of
410 * source at backup-start point in time. And target is going to be read by
411 * somebody (for example, used as NBD export) during backup job.
413 * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
414 * intersection of backup writes and third party reads from target,
415 * otherwise reading from target we may occasionally read already updated by
418 * For more information see commit f8d59dfb40bb and test
419 * tests/qemu-iotests/222
421 is_fleecing
= bdrv_chain_contains(target
->bs
, source
->bs
);
423 s
= g_new(BlockCopyState
, 1);
424 *s
= (BlockCopyState
) {
427 .copy_bitmap
= copy_bitmap
,
428 .cluster_size
= cluster_size
,
429 .len
= bdrv_dirty_bitmap_size(copy_bitmap
),
430 .write_flags
= (is_fleecing
? BDRV_REQ_SERIALISING
: 0),
431 .mem
= shres_create(BLOCK_COPY_MAX_MEM
),
432 .max_transfer
= QEMU_ALIGN_DOWN(
433 block_copy_max_transfer(source
, target
),
437 block_copy_set_copy_opts(s
, false, false);
439 ratelimit_init(&s
->rate_limit
);
440 qemu_co_mutex_init(&s
->lock
);
441 QLIST_INIT(&s
->tasks
);
442 QLIST_INIT(&s
->calls
);
447 /* Only set before running the job, no need for locking. */
448 void block_copy_set_progress_meter(BlockCopyState
*s
, ProgressMeter
*pm
)
454 * Takes ownership of @task
456 * If pool is NULL directly run the task, otherwise schedule it into the pool.
458 * Returns: task.func return code if pool is NULL
459 * otherwise -ECANCELED if pool status is bad
460 * otherwise 0 (successfully scheduled)
462 static coroutine_fn
int block_copy_task_run(AioTaskPool
*pool
,
466 int ret
= task
->task
.func(&task
->task
);
472 aio_task_pool_wait_slot(pool
);
473 if (aio_task_pool_status(pool
) < 0) {
474 co_put_to_shres(task
->s
->mem
, task
->bytes
);
475 block_copy_task_end(task
, -ECANCELED
);
480 aio_task_pool_start_task(pool
, &task
->task
);
488 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
489 * s->len only to cover last cluster when s->len is not aligned to clusters.
491 * No sync here: nor bitmap neighter intersecting requests handling, only copy.
493 * @method is an in-out argument, so that copy_range can be either extended to
494 * a full-size buffer or disabled if the copy_range attempt fails. The output
495 * value of @method should be used for subsequent tasks.
496 * Returns 0 on success.
498 static int coroutine_fn
block_copy_do_copy(BlockCopyState
*s
,
499 int64_t offset
, int64_t bytes
,
500 BlockCopyMethod
*method
,
504 int64_t nbytes
= MIN(offset
+ bytes
, s
->len
) - offset
;
505 void *bounce_buffer
= NULL
;
507 assert(offset
>= 0 && bytes
> 0 && INT64_MAX
- offset
>= bytes
);
508 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
509 assert(QEMU_IS_ALIGNED(bytes
, s
->cluster_size
));
510 assert(offset
< s
->len
);
511 assert(offset
+ bytes
<= s
->len
||
512 offset
+ bytes
== QEMU_ALIGN_UP(s
->len
, s
->cluster_size
));
513 assert(nbytes
< INT_MAX
);
516 case COPY_WRITE_ZEROES
:
517 ret
= bdrv_co_pwrite_zeroes(s
->target
, offset
, nbytes
, s
->write_flags
&
518 ~BDRV_REQ_WRITE_COMPRESSED
);
520 trace_block_copy_write_zeroes_fail(s
, offset
, ret
);
521 *error_is_read
= false;
525 case COPY_RANGE_SMALL
:
526 case COPY_RANGE_FULL
:
527 ret
= bdrv_co_copy_range(s
->source
, offset
, s
->target
, offset
, nbytes
,
530 /* Successful copy-range, increase chunk size. */
531 *method
= COPY_RANGE_FULL
;
535 trace_block_copy_copy_range_fail(s
, offset
, ret
);
536 *method
= COPY_READ_WRITE
;
537 /* Fall through to read+write with allocated buffer */
539 case COPY_READ_WRITE_CLUSTER
:
540 case COPY_READ_WRITE
:
542 * In case of failed copy_range request above, we may proceed with
543 * buffered request larger than BLOCK_COPY_MAX_BUFFER.
544 * Still, further requests will be properly limited, so don't care too
545 * much. Moreover the most likely case (copy_range is unsupported for
546 * the configuration, so the very first copy_range request fails)
547 * is handled by setting large copy_size only after first successful
551 bounce_buffer
= qemu_blockalign(s
->source
->bs
, nbytes
);
553 ret
= bdrv_co_pread(s
->source
, offset
, nbytes
, bounce_buffer
, 0);
555 trace_block_copy_read_fail(s
, offset
, ret
);
556 *error_is_read
= true;
560 ret
= bdrv_co_pwrite(s
->target
, offset
, nbytes
, bounce_buffer
,
563 trace_block_copy_write_fail(s
, offset
, ret
);
564 *error_is_read
= false;
569 qemu_vfree(bounce_buffer
);
579 static coroutine_fn
int block_copy_task_entry(AioTask
*task
)
581 BlockCopyTask
*t
= container_of(task
, BlockCopyTask
, task
);
582 BlockCopyState
*s
= t
->s
;
583 bool error_is_read
= false;
584 BlockCopyMethod method
= t
->method
;
587 ret
= block_copy_do_copy(s
, t
->offset
, t
->bytes
, &method
, &error_is_read
);
589 WITH_QEMU_LOCK_GUARD(&s
->lock
) {
590 if (s
->method
== t
->method
) {
595 if (!t
->call_state
->ret
) {
596 t
->call_state
->ret
= ret
;
597 t
->call_state
->error_is_read
= error_is_read
;
599 } else if (s
->progress
) {
600 progress_work_done(s
->progress
, t
->bytes
);
603 co_put_to_shres(s
->mem
, t
->bytes
);
604 block_copy_task_end(t
, ret
);
609 static int block_copy_block_status(BlockCopyState
*s
, int64_t offset
,
610 int64_t bytes
, int64_t *pnum
)
613 BlockDriverState
*base
;
616 if (qatomic_read(&s
->skip_unallocated
)) {
617 base
= bdrv_backing_chain_next(s
->source
->bs
);
622 ret
= bdrv_block_status_above(s
->source
->bs
, base
, offset
, bytes
, &num
,
624 if (ret
< 0 || num
< s
->cluster_size
) {
626 * On error or if failed to obtain large enough chunk just fallback to
629 num
= s
->cluster_size
;
630 ret
= BDRV_BLOCK_ALLOCATED
| BDRV_BLOCK_DATA
;
631 } else if (offset
+ num
== s
->len
) {
632 num
= QEMU_ALIGN_UP(num
, s
->cluster_size
);
634 num
= QEMU_ALIGN_DOWN(num
, s
->cluster_size
);
642 * Check if the cluster starting at offset is allocated or not.
643 * return via pnum the number of contiguous clusters sharing this allocation.
645 static int block_copy_is_cluster_allocated(BlockCopyState
*s
, int64_t offset
,
648 BlockDriverState
*bs
= s
->source
->bs
;
649 int64_t count
, total_count
= 0;
650 int64_t bytes
= s
->len
- offset
;
653 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
656 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &count
);
661 total_count
+= count
;
663 if (ret
|| count
== 0) {
665 * ret: partial segment(s) are considered allocated.
666 * otherwise: unallocated tail is treated as an entire segment.
668 *pnum
= DIV_ROUND_UP(total_count
, s
->cluster_size
);
672 /* Unallocated segment(s) with uncertain following segment(s) */
673 if (total_count
>= s
->cluster_size
) {
674 *pnum
= total_count
/ s
->cluster_size
;
684 * Reset bits in copy_bitmap starting at offset if they represent unallocated
685 * data in the image. May reset subsequent contiguous bits.
686 * @return 0 when the cluster at @offset was unallocated,
687 * 1 otherwise, and -ret on error.
689 int64_t block_copy_reset_unallocated(BlockCopyState
*s
,
690 int64_t offset
, int64_t *count
)
693 int64_t clusters
, bytes
;
695 ret
= block_copy_is_cluster_allocated(s
, offset
, &clusters
);
700 bytes
= clusters
* s
->cluster_size
;
703 qemu_co_mutex_lock(&s
->lock
);
704 bdrv_reset_dirty_bitmap(s
->copy_bitmap
, offset
, bytes
);
706 progress_set_remaining(s
->progress
,
707 bdrv_get_dirty_count(s
->copy_bitmap
) +
710 qemu_co_mutex_unlock(&s
->lock
);
718 * block_copy_dirty_clusters
720 * Copy dirty clusters in @offset/@bytes range.
721 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
722 * clusters found and -errno on failure.
724 static int coroutine_fn
725 block_copy_dirty_clusters(BlockCopyCallState
*call_state
)
727 BlockCopyState
*s
= call_state
->s
;
728 int64_t offset
= call_state
->offset
;
729 int64_t bytes
= call_state
->bytes
;
732 bool found_dirty
= false;
733 int64_t end
= offset
+ bytes
;
734 AioTaskPool
*aio
= NULL
;
737 * block_copy() user is responsible for keeping source and target in same
740 assert(bdrv_get_aio_context(s
->source
->bs
) ==
741 bdrv_get_aio_context(s
->target
->bs
));
743 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
744 assert(QEMU_IS_ALIGNED(bytes
, s
->cluster_size
));
746 while (bytes
&& aio_task_pool_status(aio
) == 0 &&
747 !qatomic_read(&call_state
->cancelled
)) {
749 int64_t status_bytes
;
751 task
= block_copy_task_create(s
, call_state
, offset
, bytes
);
753 /* No more dirty bits in the bitmap */
754 trace_block_copy_skip_range(s
, offset
, bytes
);
757 if (task
->offset
> offset
) {
758 trace_block_copy_skip_range(s
, offset
, task
->offset
- offset
);
763 ret
= block_copy_block_status(s
, task
->offset
, task
->bytes
,
765 assert(ret
>= 0); /* never fail */
766 if (status_bytes
< task
->bytes
) {
767 block_copy_task_shrink(task
, status_bytes
);
769 if (qatomic_read(&s
->skip_unallocated
) &&
770 !(ret
& BDRV_BLOCK_ALLOCATED
)) {
771 block_copy_task_end(task
, 0);
772 trace_block_copy_skip_range(s
, task
->offset
, task
->bytes
);
773 offset
= task_end(task
);
774 bytes
= end
- offset
;
778 if (ret
& BDRV_BLOCK_ZERO
) {
779 task
->method
= COPY_WRITE_ZEROES
;
782 if (!call_state
->ignore_ratelimit
) {
783 uint64_t ns
= ratelimit_calculate_delay(&s
->rate_limit
, 0);
785 block_copy_task_end(task
, -EAGAIN
);
787 qemu_co_sleep_ns_wakeable(&call_state
->sleep
,
788 QEMU_CLOCK_REALTIME
, ns
);
793 ratelimit_calculate_delay(&s
->rate_limit
, task
->bytes
);
795 trace_block_copy_process(s
, task
->offset
);
797 co_get_from_shres(s
->mem
, task
->bytes
);
799 offset
= task_end(task
);
800 bytes
= end
- offset
;
803 aio
= aio_task_pool_new(call_state
->max_workers
);
806 ret
= block_copy_task_run(aio
, task
);
814 aio_task_pool_wait_all(aio
);
817 * We are not really interested in -ECANCELED returned from
818 * block_copy_task_run. If it fails, it means some task already failed
819 * for real reason, let's return first failure.
820 * Still, assert that we don't rewrite failure by success.
822 * Note: ret may be positive here because of block-status result.
824 assert(ret
>= 0 || aio_task_pool_status(aio
) < 0);
825 ret
= aio_task_pool_status(aio
);
827 aio_task_pool_free(aio
);
830 return ret
< 0 ? ret
: found_dirty
;
833 void block_copy_kick(BlockCopyCallState
*call_state
)
835 qemu_co_sleep_wake(&call_state
->sleep
);
841 * Copy requested region, accordingly to dirty bitmap.
842 * Collaborate with parallel block_copy requests: if they succeed it will help
843 * us. If they fail, we will retry not-copied regions. So, if we return error,
844 * it means that some I/O operation failed in context of _this_ block_copy call,
845 * not some parallel operation.
847 static int coroutine_fn
block_copy_common(BlockCopyCallState
*call_state
)
850 BlockCopyState
*s
= call_state
->s
;
852 qemu_co_mutex_lock(&s
->lock
);
853 QLIST_INSERT_HEAD(&s
->calls
, call_state
, list
);
854 qemu_co_mutex_unlock(&s
->lock
);
857 ret
= block_copy_dirty_clusters(call_state
);
859 if (ret
== 0 && !qatomic_read(&call_state
->cancelled
)) {
860 WITH_QEMU_LOCK_GUARD(&s
->lock
) {
862 * Check that there is no task we still need to
865 ret
= block_copy_wait_one(s
, call_state
->offset
,
869 * No pending tasks, but check again the bitmap in this
870 * same critical section, since a task might have failed
871 * between this and the critical section in
872 * block_copy_dirty_clusters().
874 * block_copy_wait_one return value 0 also means that it
875 * didn't release the lock. So, we are still in the same
876 * critical section, not interrupted by any concurrent
879 ret
= bdrv_dirty_bitmap_next_dirty(s
->copy_bitmap
,
881 call_state
->bytes
) >= 0;
887 * We retry in two cases:
888 * 1. Some progress done
889 * Something was copied, which means that there were yield points
890 * and some new dirty bits may have appeared (due to failed parallel
891 * block-copy requests).
892 * 2. We have waited for some intersecting block-copy request
893 * It may have failed and produced new dirty bits.
895 } while (ret
> 0 && !qatomic_read(&call_state
->cancelled
));
897 qatomic_store_release(&call_state
->finished
, true);
899 if (call_state
->cb
) {
900 call_state
->cb(call_state
->cb_opaque
);
903 qemu_co_mutex_lock(&s
->lock
);
904 QLIST_REMOVE(call_state
, list
);
905 qemu_co_mutex_unlock(&s
->lock
);
910 int coroutine_fn
block_copy(BlockCopyState
*s
, int64_t start
, int64_t bytes
,
911 bool ignore_ratelimit
)
913 BlockCopyCallState call_state
= {
917 .ignore_ratelimit
= ignore_ratelimit
,
918 .max_workers
= BLOCK_COPY_MAX_WORKERS
,
921 return block_copy_common(&call_state
);
924 static void coroutine_fn
block_copy_async_co_entry(void *opaque
)
926 block_copy_common(opaque
);
929 BlockCopyCallState
*block_copy_async(BlockCopyState
*s
,
930 int64_t offset
, int64_t bytes
,
931 int max_workers
, int64_t max_chunk
,
932 BlockCopyAsyncCallbackFunc cb
,
935 BlockCopyCallState
*call_state
= g_new(BlockCopyCallState
, 1);
937 *call_state
= (BlockCopyCallState
) {
941 .max_workers
= max_workers
,
942 .max_chunk
= max_chunk
,
944 .cb_opaque
= cb_opaque
,
946 .co
= qemu_coroutine_create(block_copy_async_co_entry
, call_state
),
949 qemu_coroutine_enter(call_state
->co
);
954 void block_copy_call_free(BlockCopyCallState
*call_state
)
960 assert(qatomic_read(&call_state
->finished
));
964 bool block_copy_call_finished(BlockCopyCallState
*call_state
)
966 return qatomic_read(&call_state
->finished
);
969 bool block_copy_call_succeeded(BlockCopyCallState
*call_state
)
971 return qatomic_load_acquire(&call_state
->finished
) &&
972 !qatomic_read(&call_state
->cancelled
) &&
973 call_state
->ret
== 0;
976 bool block_copy_call_failed(BlockCopyCallState
*call_state
)
978 return qatomic_load_acquire(&call_state
->finished
) &&
979 !qatomic_read(&call_state
->cancelled
) &&
983 bool block_copy_call_cancelled(BlockCopyCallState
*call_state
)
985 return qatomic_read(&call_state
->cancelled
);
988 int block_copy_call_status(BlockCopyCallState
*call_state
, bool *error_is_read
)
990 assert(qatomic_load_acquire(&call_state
->finished
));
992 *error_is_read
= call_state
->error_is_read
;
994 return call_state
->ret
;
998 * Note that cancelling and finishing are racy.
999 * User can cancel a block-copy that is already finished.
1001 void block_copy_call_cancel(BlockCopyCallState
*call_state
)
1003 qatomic_set(&call_state
->cancelled
, true);
1004 block_copy_kick(call_state
);
1007 BdrvDirtyBitmap
*block_copy_dirty_bitmap(BlockCopyState
*s
)
1009 return s
->copy_bitmap
;
1012 int64_t block_copy_cluster_size(BlockCopyState
*s
)
1014 return s
->cluster_size
;
1017 void block_copy_set_skip_unallocated(BlockCopyState
*s
, bool skip
)
1019 qatomic_set(&s
->skip_unallocated
, skip
);
1022 void block_copy_set_speed(BlockCopyState
*s
, uint64_t speed
)
1024 ratelimit_set_speed(&s
->rate_limit
, speed
, BLOCK_COPY_SLICE_TIME
);
1027 * Note: it's good to kick all call states from here, but it should be done
1028 * only from a coroutine, to not crash if s->calls list changed while
1029 * entering one call. So for now, the only user of this function kicks its
1030 * only one call_state by hand.