]>
Commit | Line | Data |
---|---|---|
beb5f545 VSO |
1 | /* |
2 | * block_copy API | |
3 | * | |
4 | * Copyright (C) 2013 Proxmox Server Solutions | |
5 | * Copyright (c) 2019 Virtuozzo International GmbH. | |
6 | * | |
7 | * Authors: | |
8 | * Dietmar Maurer (dietmar@proxmox.com) | |
9 | * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | */ | |
14 | ||
15 | #include "qemu/osdep.h" | |
16 | ||
17 | #include "trace.h" | |
18 | #include "qapi/error.h" | |
19 | #include "block/block-copy.h" | |
20 | #include "sysemu/block-backend.h" | |
b3b7036a | 21 | #include "qemu/units.h" |
4ce5dd3e VSO |
22 | #include "qemu/coroutine.h" |
23 | #include "block/aio_task.h" | |
b518e9e9 | 24 | #include "qemu/error-report.h" |
b3b7036a VSO |
25 | |
26 | #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB) | |
0e240245 | 27 | #define BLOCK_COPY_MAX_BUFFER (1 * MiB) |
7f739d0e | 28 | #define BLOCK_COPY_MAX_MEM (128 * MiB) |
4ce5dd3e | 29 | #define BLOCK_COPY_MAX_WORKERS 64 |
7e032df0 | 30 | #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */ |
b518e9e9 | 31 | #define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16) |
4ce5dd3e | 32 | |
05d5e12b PB |
33 | typedef enum { |
34 | COPY_READ_WRITE_CLUSTER, | |
35 | COPY_READ_WRITE, | |
36 | COPY_WRITE_ZEROES, | |
37 | COPY_RANGE_SMALL, | |
38 | COPY_RANGE_FULL | |
39 | } BlockCopyMethod; | |
40 | ||
4ce5dd3e VSO |
41 | static coroutine_fn int block_copy_task_entry(AioTask *task); |
42 | ||
43 | typedef struct BlockCopyCallState { | |
d0c389d2 | 44 | /* Fields initialized in block_copy_async() and never changed. */ |
3b8c2329 VSO |
45 | BlockCopyState *s; |
46 | int64_t offset; | |
47 | int64_t bytes; | |
26be9d62 VSO |
48 | int max_workers; |
49 | int64_t max_chunk; | |
7e032df0 | 50 | bool ignore_ratelimit; |
de4641b4 VSO |
51 | BlockCopyAsyncCallbackFunc cb; |
52 | void *cb_opaque; | |
de4641b4 VSO |
53 | /* Coroutine where async block-copy is running */ |
54 | Coroutine *co; | |
3b8c2329 | 55 | |
d0c389d2 | 56 | /* Fields whose state changes throughout the execution */ |
149009be | 57 | bool finished; /* atomic */ |
d0c389d2 | 58 | QemuCoSleep sleep; /* TODO: protect API with a lock */ |
149009be | 59 | bool cancelled; /* atomic */ |
d0c389d2 EGE |
60 | /* To reference all call states from BlockCopyState */ |
61 | QLIST_ENTRY(BlockCopyCallState) list; | |
3b8c2329 | 62 | |
d0c389d2 EGE |
63 | /* |
64 | * Fields that report information about return values and erros. | |
65 | * Protected by lock in BlockCopyState. | |
66 | */ | |
4ce5dd3e | 67 | bool error_is_read; |
d0c389d2 EGE |
68 | /* |
69 | * @ret is set concurrently by tasks under mutex. Only set once by first | |
70 | * failed task (and untouched if no task failed). | |
71 | * After finishing (call_state->finished is true), it is not modified | |
72 | * anymore and may be safely read without mutex. | |
73 | */ | |
74 | int ret; | |
4ce5dd3e | 75 | } BlockCopyCallState; |
beb5f545 | 76 | |
e9407785 | 77 | typedef struct BlockCopyTask { |
4ce5dd3e VSO |
78 | AioTask task; |
79 | ||
d0c389d2 EGE |
80 | /* |
81 | * Fields initialized in block_copy_task_create() | |
82 | * and never changed. | |
83 | */ | |
1348a657 | 84 | BlockCopyState *s; |
4ce5dd3e | 85 | BlockCopyCallState *call_state; |
397f4e9d | 86 | int64_t offset; |
d0c389d2 EGE |
87 | /* |
88 | * @method can also be set again in the while loop of | |
89 | * block_copy_dirty_clusters(), but it is never accessed concurrently | |
90 | * because the only other function that reads it is | |
91 | * block_copy_task_entry() and it is invoked afterwards in the same | |
92 | * iteration. | |
93 | */ | |
05d5e12b | 94 | BlockCopyMethod method; |
d0c389d2 EGE |
95 | |
96 | /* | |
97 | * Fields whose state changes throughout the execution | |
98 | * Protected by lock in BlockCopyState. | |
99 | */ | |
e9407785 | 100 | CoQueue wait_queue; /* coroutines blocked on this task */ |
d0c389d2 EGE |
101 | /* |
102 | * Only protect the case of parallel read while updating @bytes | |
103 | * value in block_copy_task_shrink(). | |
104 | */ | |
105 | int64_t bytes; | |
106 | QLIST_ENTRY(BlockCopyTask) list; | |
e9407785 | 107 | } BlockCopyTask; |
397f4e9d | 108 | |
42ac2144 VSO |
109 | static int64_t task_end(BlockCopyTask *task) |
110 | { | |
111 | return task->offset + task->bytes; | |
112 | } | |
113 | ||
397f4e9d VSO |
114 | typedef struct BlockCopyState { |
115 | /* | |
116 | * BdrvChild objects are not owned or managed by block-copy. They are | |
117 | * provided by block-copy user and user is responsible for appropriate | |
118 | * permissions on these children. | |
119 | */ | |
120 | BdrvChild *source; | |
121 | BdrvChild *target; | |
d0c389d2 EGE |
122 | |
123 | /* | |
124 | * Fields initialized in block_copy_state_new() | |
125 | * and never changed. | |
126 | */ | |
397f4e9d | 127 | int64_t cluster_size; |
05d5e12b | 128 | int64_t max_transfer; |
397f4e9d | 129 | uint64_t len; |
397f4e9d VSO |
130 | BdrvRequestFlags write_flags; |
131 | ||
d0c389d2 EGE |
132 | /* |
133 | * Fields whose state changes throughout the execution | |
134 | * Protected by lock. | |
135 | */ | |
136 | CoMutex lock; | |
137 | int64_t in_flight_bytes; | |
138 | BlockCopyMethod method; | |
139 | QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */ | |
140 | QLIST_HEAD(, BlockCopyCallState) calls; | |
397f4e9d VSO |
141 | /* |
142 | * skip_unallocated: | |
143 | * | |
144 | * Used by sync=top jobs, which first scan the source node for unallocated | |
145 | * areas and clear them in the copy_bitmap. During this process, the bitmap | |
146 | * is thus not fully initialized: It may still have bits set for areas that | |
147 | * are unallocated and should actually not be copied. | |
148 | * | |
149 | * This is indicated by skip_unallocated. | |
150 | * | |
151 | * In this case, block_copy() will query the source’s allocation status, | |
152 | * skip unallocated regions, clear them in the copy_bitmap, and invoke | |
153 | * block_copy_reset_unallocated() every time it does. | |
154 | */ | |
d0c389d2 EGE |
155 | bool skip_unallocated; /* atomic */ |
156 | /* State fields that use a thread-safe API */ | |
157 | BdrvDirtyBitmap *copy_bitmap; | |
397f4e9d | 158 | ProgressMeter *progress; |
397f4e9d | 159 | SharedResource *mem; |
7e032df0 | 160 | RateLimit rate_limit; |
397f4e9d VSO |
161 | } BlockCopyState; |
162 | ||
d0c389d2 | 163 | /* Called with lock held */ |
e9407785 VSO |
164 | static BlockCopyTask *find_conflicting_task(BlockCopyState *s, |
165 | int64_t offset, int64_t bytes) | |
17187cb6 | 166 | { |
e9407785 | 167 | BlockCopyTask *t; |
17187cb6 | 168 | |
e9407785 VSO |
169 | QLIST_FOREACH(t, &s->tasks, list) { |
170 | if (offset + bytes > t->offset && offset < t->offset + t->bytes) { | |
171 | return t; | |
17187cb6 VSO |
172 | } |
173 | } | |
174 | ||
175 | return NULL; | |
176 | } | |
177 | ||
5332e5d2 | 178 | /* |
e9407785 VSO |
179 | * If there are no intersecting tasks return false. Otherwise, wait for the |
180 | * first found intersecting tasks to finish and return true. | |
d0c389d2 EGE |
181 | * |
182 | * Called with lock held. May temporary release the lock. | |
183 | * Return value of 0 proves that lock was NOT released. | |
5332e5d2 VSO |
184 | */ |
185 | static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset, | |
186 | int64_t bytes) | |
a6ffe199 | 187 | { |
e9407785 | 188 | BlockCopyTask *task = find_conflicting_task(s, offset, bytes); |
17187cb6 | 189 | |
e9407785 | 190 | if (!task) { |
5332e5d2 | 191 | return false; |
17187cb6 | 192 | } |
5332e5d2 | 193 | |
d0c389d2 | 194 | qemu_co_queue_wait(&task->wait_queue, &s->lock); |
5332e5d2 VSO |
195 | |
196 | return true; | |
a6ffe199 VSO |
197 | } |
198 | ||
d0c389d2 | 199 | /* Called with lock held */ |
05d5e12b PB |
200 | static int64_t block_copy_chunk_size(BlockCopyState *s) |
201 | { | |
202 | switch (s->method) { | |
203 | case COPY_READ_WRITE_CLUSTER: | |
204 | return s->cluster_size; | |
205 | case COPY_READ_WRITE: | |
206 | case COPY_RANGE_SMALL: | |
207 | return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER), | |
208 | s->max_transfer); | |
209 | case COPY_RANGE_FULL: | |
210 | return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE), | |
211 | s->max_transfer); | |
212 | default: | |
213 | /* Cannot have COPY_WRITE_ZEROES here. */ | |
214 | abort(); | |
215 | } | |
216 | } | |
217 | ||
42ac2144 VSO |
218 | /* |
219 | * Search for the first dirty area in offset/bytes range and create task at | |
220 | * the beginning of it. | |
221 | */ | |
d0c389d2 EGE |
222 | static coroutine_fn BlockCopyTask * |
223 | block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state, | |
224 | int64_t offset, int64_t bytes) | |
a6ffe199 | 225 | { |
42ac2144 | 226 | BlockCopyTask *task; |
05d5e12b | 227 | int64_t max_chunk; |
f13e60a9 | 228 | |
d0c389d2 | 229 | QEMU_LOCK_GUARD(&s->lock); |
05d5e12b | 230 | max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk); |
42ac2144 VSO |
231 | if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap, |
232 | offset, offset + bytes, | |
26be9d62 | 233 | max_chunk, &offset, &bytes)) |
42ac2144 VSO |
234 | { |
235 | return NULL; | |
236 | } | |
237 | ||
7661a886 SR |
238 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); |
239 | bytes = QEMU_ALIGN_UP(bytes, s->cluster_size); | |
240 | ||
42ac2144 | 241 | /* region is dirty, so no existent tasks possible in it */ |
e9407785 | 242 | assert(!find_conflicting_task(s, offset, bytes)); |
5332e5d2 VSO |
243 | |
244 | bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); | |
245 | s->in_flight_bytes += bytes; | |
246 | ||
42ac2144 | 247 | task = g_new(BlockCopyTask, 1); |
1348a657 | 248 | *task = (BlockCopyTask) { |
4ce5dd3e | 249 | .task.func = block_copy_task_entry, |
1348a657 | 250 | .s = s, |
4ce5dd3e | 251 | .call_state = call_state, |
1348a657 VSO |
252 | .offset = offset, |
253 | .bytes = bytes, | |
05d5e12b | 254 | .method = s->method, |
1348a657 | 255 | }; |
e9407785 VSO |
256 | qemu_co_queue_init(&task->wait_queue); |
257 | QLIST_INSERT_HEAD(&s->tasks, task, list); | |
f13e60a9 VSO |
258 | |
259 | return task; | |
a6ffe199 VSO |
260 | } |
261 | ||
5332e5d2 | 262 | /* |
e9407785 | 263 | * block_copy_task_shrink |
5332e5d2 | 264 | * |
e9407785 VSO |
265 | * Drop the tail of the task to be handled later. Set dirty bits back and |
266 | * wake up all tasks waiting for us (may be some of them are not intersecting | |
267 | * with shrunk task) | |
5332e5d2 | 268 | */ |
1348a657 | 269 | static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task, |
e9407785 | 270 | int64_t new_bytes) |
a6ffe199 | 271 | { |
d0c389d2 | 272 | QEMU_LOCK_GUARD(&task->s->lock); |
e9407785 | 273 | if (new_bytes == task->bytes) { |
5332e5d2 VSO |
274 | return; |
275 | } | |
276 | ||
e9407785 | 277 | assert(new_bytes > 0 && new_bytes < task->bytes); |
5332e5d2 | 278 | |
1348a657 VSO |
279 | task->s->in_flight_bytes -= task->bytes - new_bytes; |
280 | bdrv_set_dirty_bitmap(task->s->copy_bitmap, | |
e9407785 | 281 | task->offset + new_bytes, task->bytes - new_bytes); |
5332e5d2 | 282 | |
e9407785 VSO |
283 | task->bytes = new_bytes; |
284 | qemu_co_queue_restart_all(&task->wait_queue); | |
5332e5d2 VSO |
285 | } |
286 | ||
1348a657 | 287 | static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret) |
5332e5d2 | 288 | { |
d0c389d2 | 289 | QEMU_LOCK_GUARD(&task->s->lock); |
1348a657 | 290 | task->s->in_flight_bytes -= task->bytes; |
5332e5d2 | 291 | if (ret < 0) { |
1348a657 | 292 | bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes); |
5332e5d2 | 293 | } |
e9407785 | 294 | QLIST_REMOVE(task, list); |
201b4bb6 VSO |
295 | if (task->s->progress) { |
296 | progress_set_remaining(task->s->progress, | |
297 | bdrv_get_dirty_count(task->s->copy_bitmap) + | |
298 | task->s->in_flight_bytes); | |
299 | } | |
e9407785 | 300 | qemu_co_queue_restart_all(&task->wait_queue); |
a6ffe199 VSO |
301 | } |
302 | ||
beb5f545 VSO |
303 | void block_copy_state_free(BlockCopyState *s) |
304 | { | |
305 | if (!s) { | |
306 | return; | |
307 | } | |
308 | ||
4951967d | 309 | ratelimit_destroy(&s->rate_limit); |
5deb6cbd | 310 | bdrv_release_dirty_bitmap(s->copy_bitmap); |
7f739d0e | 311 | shres_destroy(s->mem); |
beb5f545 VSO |
312 | g_free(s); |
313 | } | |
314 | ||
9d31bc53 VSO |
315 | static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target) |
316 | { | |
317 | return MIN_NON_ZERO(INT_MAX, | |
318 | MIN_NON_ZERO(source->bs->bl.max_transfer, | |
319 | target->bs->bl.max_transfer)); | |
320 | } | |
321 | ||
f8b9504b VSO |
322 | void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range, |
323 | bool compress) | |
324 | { | |
325 | /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */ | |
326 | s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) | | |
327 | (compress ? BDRV_REQ_WRITE_COMPRESSED : 0); | |
328 | ||
329 | if (s->max_transfer < s->cluster_size) { | |
330 | /* | |
331 | * copy_range does not respect max_transfer. We don't want to bother | |
332 | * with requests smaller than block-copy cluster size, so fallback to | |
333 | * buffered copying (read and write respect max_transfer on their | |
334 | * behalf). | |
335 | */ | |
336 | s->method = COPY_READ_WRITE_CLUSTER; | |
337 | } else if (compress) { | |
338 | /* Compression supports only cluster-size writes and no copy-range. */ | |
339 | s->method = COPY_READ_WRITE_CLUSTER; | |
340 | } else { | |
341 | /* | |
342 | * If copy range enabled, start with COPY_RANGE_SMALL, until first | |
343 | * successful copy_range (look at block_copy_do_copy). | |
344 | */ | |
345 | s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE; | |
346 | } | |
347 | } | |
348 | ||
b518e9e9 VSO |
349 | static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, |
350 | Error **errp) | |
351 | { | |
352 | int ret; | |
353 | BlockDriverInfo bdi; | |
354 | bool target_does_cow = bdrv_backing_chain_next(target); | |
355 | ||
356 | /* | |
357 | * If there is no backing file on the target, we cannot rely on COW if our | |
358 | * backup cluster size is smaller than the target cluster size. Even for | |
359 | * targets with a backing file, try to avoid COW if possible. | |
360 | */ | |
361 | ret = bdrv_get_info(target, &bdi); | |
362 | if (ret == -ENOTSUP && !target_does_cow) { | |
363 | /* Cluster size is not defined */ | |
364 | warn_report("The target block device doesn't provide " | |
365 | "information about the block size and it doesn't have a " | |
366 | "backing file. The default block size of %u bytes is " | |
367 | "used. If the actual block size of the target exceeds " | |
368 | "this default, the backup may be unusable", | |
369 | BLOCK_COPY_CLUSTER_SIZE_DEFAULT); | |
370 | return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; | |
371 | } else if (ret < 0 && !target_does_cow) { | |
372 | error_setg_errno(errp, -ret, | |
373 | "Couldn't determine the cluster size of the target image, " | |
374 | "which has no backing file"); | |
375 | error_append_hint(errp, | |
376 | "Aborting, since this may create an unusable destination image\n"); | |
377 | return ret; | |
378 | } else if (ret < 0 && target_does_cow) { | |
379 | /* Not fatal; just trudge on ahead. */ | |
380 | return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; | |
381 | } | |
382 | ||
383 | return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); | |
384 | } | |
385 | ||
00e30f05 | 386 | BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, |
1f7252e8 | 387 | const BdrvDirtyBitmap *bitmap, |
abde8ac2 | 388 | Error **errp) |
beb5f545 | 389 | { |
1f7252e8 | 390 | ERRP_GUARD(); |
beb5f545 | 391 | BlockCopyState *s; |
b518e9e9 | 392 | int64_t cluster_size; |
beb5f545 | 393 | BdrvDirtyBitmap *copy_bitmap; |
49577723 | 394 | bool is_fleecing; |
beb5f545 | 395 | |
b518e9e9 VSO |
396 | cluster_size = block_copy_calculate_cluster_size(target->bs, errp); |
397 | if (cluster_size < 0) { | |
398 | return NULL; | |
399 | } | |
400 | ||
00e30f05 VSO |
401 | copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, |
402 | errp); | |
beb5f545 VSO |
403 | if (!copy_bitmap) { |
404 | return NULL; | |
405 | } | |
406 | bdrv_disable_dirty_bitmap(copy_bitmap); | |
1f7252e8 VSO |
407 | if (bitmap) { |
408 | if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) { | |
409 | error_prepend(errp, "Failed to merge bitmap '%s' to internal " | |
410 | "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap)); | |
411 | bdrv_release_dirty_bitmap(copy_bitmap); | |
412 | return NULL; | |
413 | } | |
414 | } else { | |
415 | bdrv_set_dirty_bitmap(copy_bitmap, 0, | |
416 | bdrv_dirty_bitmap_size(copy_bitmap)); | |
417 | } | |
beb5f545 | 418 | |
49577723 VSO |
419 | /* |
420 | * If source is in backing chain of target assume that target is going to be | |
421 | * used for "image fleecing", i.e. it should represent a kind of snapshot of | |
422 | * source at backup-start point in time. And target is going to be read by | |
423 | * somebody (for example, used as NBD export) during backup job. | |
424 | * | |
425 | * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid | |
426 | * intersection of backup writes and third party reads from target, | |
427 | * otherwise reading from target we may occasionally read already updated by | |
428 | * guest data. | |
429 | * | |
430 | * For more information see commit f8d59dfb40bb and test | |
431 | * tests/qemu-iotests/222 | |
432 | */ | |
433 | is_fleecing = bdrv_chain_contains(target->bs, source->bs); | |
434 | ||
beb5f545 VSO |
435 | s = g_new(BlockCopyState, 1); |
436 | *s = (BlockCopyState) { | |
00e30f05 VSO |
437 | .source = source, |
438 | .target = target, | |
beb5f545 VSO |
439 | .copy_bitmap = copy_bitmap, |
440 | .cluster_size = cluster_size, | |
441 | .len = bdrv_dirty_bitmap_size(copy_bitmap), | |
f8b9504b | 442 | .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0), |
7f739d0e | 443 | .mem = shres_create(BLOCK_COPY_MAX_MEM), |
05d5e12b PB |
444 | .max_transfer = QEMU_ALIGN_DOWN( |
445 | block_copy_max_transfer(source, target), | |
446 | cluster_size), | |
beb5f545 VSO |
447 | }; |
448 | ||
abde8ac2 | 449 | block_copy_set_copy_opts(s, false, false); |
beb5f545 | 450 | |
4951967d | 451 | ratelimit_init(&s->rate_limit); |
d0c389d2 | 452 | qemu_co_mutex_init(&s->lock); |
e9407785 | 453 | QLIST_INIT(&s->tasks); |
2e099a9d | 454 | QLIST_INIT(&s->calls); |
a6ffe199 | 455 | |
beb5f545 | 456 | return s; |
beb5f545 VSO |
457 | } |
458 | ||
d0c389d2 | 459 | /* Only set before running the job, no need for locking. */ |
d0ebeca1 VSO |
460 | void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) |
461 | { | |
462 | s->progress = pm; | |
463 | } | |
464 | ||
4ce5dd3e VSO |
465 | /* |
466 | * Takes ownership of @task | |
467 | * | |
468 | * If pool is NULL directly run the task, otherwise schedule it into the pool. | |
469 | * | |
470 | * Returns: task.func return code if pool is NULL | |
471 | * otherwise -ECANCELED if pool status is bad | |
472 | * otherwise 0 (successfully scheduled) | |
473 | */ | |
474 | static coroutine_fn int block_copy_task_run(AioTaskPool *pool, | |
475 | BlockCopyTask *task) | |
476 | { | |
477 | if (!pool) { | |
478 | int ret = task->task.func(&task->task); | |
479 | ||
480 | g_free(task); | |
481 | return ret; | |
482 | } | |
483 | ||
484 | aio_task_pool_wait_slot(pool); | |
485 | if (aio_task_pool_status(pool) < 0) { | |
486 | co_put_to_shres(task->s->mem, task->bytes); | |
487 | block_copy_task_end(task, -ECANCELED); | |
488 | g_free(task); | |
489 | return -ECANCELED; | |
490 | } | |
491 | ||
492 | aio_task_pool_start_task(pool, &task->task); | |
493 | ||
494 | return 0; | |
495 | } | |
496 | ||
beb5f545 | 497 | /* |
e332a726 VSO |
498 | * block_copy_do_copy |
499 | * | |
dafaf135 VSO |
500 | * Do copy of cluster-aligned chunk. Requested region is allowed to exceed |
501 | * s->len only to cover last cluster when s->len is not aligned to clusters. | |
e332a726 VSO |
502 | * |
503 | * No sync here: nor bitmap neighter intersecting requests handling, only copy. | |
504 | * | |
05d5e12b PB |
505 | * @method is an in-out argument, so that copy_range can be either extended to |
506 | * a full-size buffer or disabled if the copy_range attempt fails. The output | |
507 | * value of @method should be used for subsequent tasks. | |
e332a726 | 508 | * Returns 0 on success. |
beb5f545 | 509 | */ |
e332a726 | 510 | static int coroutine_fn block_copy_do_copy(BlockCopyState *s, |
8719091f | 511 | int64_t offset, int64_t bytes, |
05d5e12b | 512 | BlockCopyMethod *method, |
bed95234 | 513 | bool *error_is_read) |
beb5f545 VSO |
514 | { |
515 | int ret; | |
8719091f | 516 | int64_t nbytes = MIN(offset + bytes, s->len) - offset; |
e332a726 | 517 | void *bounce_buffer = NULL; |
beb5f545 | 518 | |
8719091f VSO |
519 | assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes); |
520 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); | |
dafaf135 | 521 | assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); |
8719091f VSO |
522 | assert(offset < s->len); |
523 | assert(offset + bytes <= s->len || | |
524 | offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); | |
dafaf135 | 525 | assert(nbytes < INT_MAX); |
e332a726 | 526 | |
05d5e12b PB |
527 | switch (*method) { |
528 | case COPY_WRITE_ZEROES: | |
8719091f | 529 | ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags & |
2d57511a VSO |
530 | ~BDRV_REQ_WRITE_COMPRESSED); |
531 | if (ret < 0) { | |
8719091f | 532 | trace_block_copy_write_zeroes_fail(s, offset, ret); |
d7eca542 | 533 | *error_is_read = false; |
2d57511a VSO |
534 | } |
535 | return ret; | |
2d57511a | 536 | |
05d5e12b PB |
537 | case COPY_RANGE_SMALL: |
538 | case COPY_RANGE_FULL: | |
8719091f | 539 | ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes, |
e332a726 | 540 | 0, s->write_flags); |
05d5e12b PB |
541 | if (ret >= 0) { |
542 | /* Successful copy-range, increase chunk size. */ | |
543 | *method = COPY_RANGE_FULL; | |
bed95234 | 544 | return 0; |
e332a726 | 545 | } |
e332a726 | 546 | |
05d5e12b PB |
547 | trace_block_copy_copy_range_fail(s, offset, ret); |
548 | *method = COPY_READ_WRITE; | |
549 | /* Fall through to read+write with allocated buffer */ | |
0e240245 | 550 | |
05d5e12b PB |
551 | case COPY_READ_WRITE_CLUSTER: |
552 | case COPY_READ_WRITE: | |
553 | /* | |
554 | * In case of failed copy_range request above, we may proceed with | |
555 | * buffered request larger than BLOCK_COPY_MAX_BUFFER. | |
556 | * Still, further requests will be properly limited, so don't care too | |
557 | * much. Moreover the most likely case (copy_range is unsupported for | |
558 | * the configuration, so the very first copy_range request fails) | |
559 | * is handled by setting large copy_size only after first successful | |
560 | * copy_range. | |
561 | */ | |
beb5f545 | 562 | |
05d5e12b | 563 | bounce_buffer = qemu_blockalign(s->source->bs, nbytes); |
beb5f545 | 564 | |
05d5e12b PB |
565 | ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0); |
566 | if (ret < 0) { | |
567 | trace_block_copy_read_fail(s, offset, ret); | |
568 | *error_is_read = true; | |
569 | goto out; | |
570 | } | |
beb5f545 | 571 | |
05d5e12b PB |
572 | ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer, |
573 | s->write_flags); | |
574 | if (ret < 0) { | |
575 | trace_block_copy_write_fail(s, offset, ret); | |
576 | *error_is_read = false; | |
577 | goto out; | |
578 | } | |
3816edd2 | 579 | |
05d5e12b PB |
580 | out: |
581 | qemu_vfree(bounce_buffer); | |
582 | break; | |
beb5f545 | 583 | |
05d5e12b PB |
584 | default: |
585 | abort(); | |
bed95234 VSO |
586 | } |
587 | ||
05d5e12b | 588 | return ret; |
bed95234 VSO |
589 | } |
590 | ||
4ce5dd3e VSO |
591 | static coroutine_fn int block_copy_task_entry(AioTask *task) |
592 | { | |
593 | BlockCopyTask *t = container_of(task, BlockCopyTask, task); | |
c6a3e3df | 594 | BlockCopyState *s = t->s; |
c78dd00e | 595 | bool error_is_read = false; |
05d5e12b | 596 | BlockCopyMethod method = t->method; |
4ce5dd3e VSO |
597 | int ret; |
598 | ||
05d5e12b | 599 | ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read); |
d0c389d2 EGE |
600 | |
601 | WITH_QEMU_LOCK_GUARD(&s->lock) { | |
602 | if (s->method == t->method) { | |
603 | s->method = method; | |
604 | } | |
605 | ||
606 | if (ret < 0) { | |
607 | if (!t->call_state->ret) { | |
608 | t->call_state->ret = ret; | |
609 | t->call_state->error_is_read = error_is_read; | |
610 | } | |
201b4bb6 | 611 | } else if (s->progress) { |
d0c389d2 | 612 | progress_work_done(s->progress, t->bytes); |
8146b357 | 613 | } |
4ce5dd3e | 614 | } |
c6a3e3df | 615 | co_put_to_shres(s->mem, t->bytes); |
4ce5dd3e VSO |
616 | block_copy_task_end(t, ret); |
617 | ||
618 | return ret; | |
619 | } | |
620 | ||
2d57511a VSO |
621 | static int block_copy_block_status(BlockCopyState *s, int64_t offset, |
622 | int64_t bytes, int64_t *pnum) | |
623 | { | |
624 | int64_t num; | |
625 | BlockDriverState *base; | |
626 | int ret; | |
627 | ||
d0c389d2 | 628 | if (qatomic_read(&s->skip_unallocated)) { |
c6f6d846 | 629 | base = bdrv_backing_chain_next(s->source->bs); |
2d57511a VSO |
630 | } else { |
631 | base = NULL; | |
632 | } | |
633 | ||
634 | ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num, | |
635 | NULL, NULL); | |
636 | if (ret < 0 || num < s->cluster_size) { | |
637 | /* | |
638 | * On error or if failed to obtain large enough chunk just fallback to | |
639 | * copy one cluster. | |
640 | */ | |
641 | num = s->cluster_size; | |
642 | ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA; | |
643 | } else if (offset + num == s->len) { | |
644 | num = QEMU_ALIGN_UP(num, s->cluster_size); | |
645 | } else { | |
646 | num = QEMU_ALIGN_DOWN(num, s->cluster_size); | |
647 | } | |
648 | ||
649 | *pnum = num; | |
650 | return ret; | |
651 | } | |
652 | ||
beb5f545 VSO |
653 | /* |
654 | * Check if the cluster starting at offset is allocated or not. | |
655 | * return via pnum the number of contiguous clusters sharing this allocation. | |
656 | */ | |
657 | static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, | |
658 | int64_t *pnum) | |
659 | { | |
00e30f05 | 660 | BlockDriverState *bs = s->source->bs; |
beb5f545 VSO |
661 | int64_t count, total_count = 0; |
662 | int64_t bytes = s->len - offset; | |
663 | int ret; | |
664 | ||
665 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); | |
666 | ||
667 | while (true) { | |
668 | ret = bdrv_is_allocated(bs, offset, bytes, &count); | |
669 | if (ret < 0) { | |
670 | return ret; | |
671 | } | |
672 | ||
673 | total_count += count; | |
674 | ||
675 | if (ret || count == 0) { | |
676 | /* | |
677 | * ret: partial segment(s) are considered allocated. | |
678 | * otherwise: unallocated tail is treated as an entire segment. | |
679 | */ | |
680 | *pnum = DIV_ROUND_UP(total_count, s->cluster_size); | |
681 | return ret; | |
682 | } | |
683 | ||
684 | /* Unallocated segment(s) with uncertain following segment(s) */ | |
685 | if (total_count >= s->cluster_size) { | |
686 | *pnum = total_count / s->cluster_size; | |
687 | return 0; | |
688 | } | |
689 | ||
690 | offset += count; | |
691 | bytes -= count; | |
692 | } | |
693 | } | |
694 | ||
695 | /* | |
696 | * Reset bits in copy_bitmap starting at offset if they represent unallocated | |
697 | * data in the image. May reset subsequent contiguous bits. | |
698 | * @return 0 when the cluster at @offset was unallocated, | |
699 | * 1 otherwise, and -ret on error. | |
700 | */ | |
701 | int64_t block_copy_reset_unallocated(BlockCopyState *s, | |
702 | int64_t offset, int64_t *count) | |
703 | { | |
704 | int ret; | |
705 | int64_t clusters, bytes; | |
706 | ||
707 | ret = block_copy_is_cluster_allocated(s, offset, &clusters); | |
708 | if (ret < 0) { | |
709 | return ret; | |
710 | } | |
711 | ||
712 | bytes = clusters * s->cluster_size; | |
713 | ||
714 | if (!ret) { | |
d0c389d2 | 715 | qemu_co_mutex_lock(&s->lock); |
beb5f545 | 716 | bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); |
201b4bb6 VSO |
717 | if (s->progress) { |
718 | progress_set_remaining(s->progress, | |
719 | bdrv_get_dirty_count(s->copy_bitmap) + | |
720 | s->in_flight_bytes); | |
721 | } | |
d0c389d2 | 722 | qemu_co_mutex_unlock(&s->lock); |
beb5f545 VSO |
723 | } |
724 | ||
725 | *count = bytes; | |
726 | return ret; | |
727 | } | |
728 | ||
5332e5d2 VSO |
729 | /* |
730 | * block_copy_dirty_clusters | |
731 | * | |
732 | * Copy dirty clusters in @offset/@bytes range. | |
733 | * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty | |
734 | * clusters found and -errno on failure. | |
735 | */ | |
3b8c2329 VSO |
736 | static int coroutine_fn |
737 | block_copy_dirty_clusters(BlockCopyCallState *call_state) | |
beb5f545 | 738 | { |
3b8c2329 VSO |
739 | BlockCopyState *s = call_state->s; |
740 | int64_t offset = call_state->offset; | |
741 | int64_t bytes = call_state->bytes; | |
742 | ||
beb5f545 | 743 | int ret = 0; |
5332e5d2 | 744 | bool found_dirty = false; |
42ac2144 | 745 | int64_t end = offset + bytes; |
4ce5dd3e | 746 | AioTaskPool *aio = NULL; |
beb5f545 VSO |
747 | |
748 | /* | |
749 | * block_copy() user is responsible for keeping source and target in same | |
750 | * aio context | |
751 | */ | |
00e30f05 VSO |
752 | assert(bdrv_get_aio_context(s->source->bs) == |
753 | bdrv_get_aio_context(s->target->bs)); | |
beb5f545 | 754 | |
8719091f | 755 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); |
dafaf135 | 756 | assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); |
beb5f545 | 757 | |
149009be EGE |
758 | while (bytes && aio_task_pool_status(aio) == 0 && |
759 | !qatomic_read(&call_state->cancelled)) { | |
4ce5dd3e | 760 | BlockCopyTask *task; |
42ac2144 | 761 | int64_t status_bytes; |
beb5f545 | 762 | |
3b8c2329 | 763 | task = block_copy_task_create(s, call_state, offset, bytes); |
42ac2144 VSO |
764 | if (!task) { |
765 | /* No more dirty bits in the bitmap */ | |
766 | trace_block_copy_skip_range(s, offset, bytes); | |
767 | break; | |
768 | } | |
769 | if (task->offset > offset) { | |
770 | trace_block_copy_skip_range(s, offset, task->offset - offset); | |
beb5f545 VSO |
771 | } |
772 | ||
5332e5d2 VSO |
773 | found_dirty = true; |
774 | ||
42ac2144 VSO |
775 | ret = block_copy_block_status(s, task->offset, task->bytes, |
776 | &status_bytes); | |
5332e5d2 | 777 | assert(ret >= 0); /* never fail */ |
42ac2144 VSO |
778 | if (status_bytes < task->bytes) { |
779 | block_copy_task_shrink(task, status_bytes); | |
780 | } | |
d0c389d2 EGE |
781 | if (qatomic_read(&s->skip_unallocated) && |
782 | !(ret & BDRV_BLOCK_ALLOCATED)) { | |
1348a657 | 783 | block_copy_task_end(task, 0); |
42ac2144 VSO |
784 | trace_block_copy_skip_range(s, task->offset, task->bytes); |
785 | offset = task_end(task); | |
786 | bytes = end - offset; | |
fc9aefc8 | 787 | g_free(task); |
2d57511a | 788 | continue; |
beb5f545 | 789 | } |
bed95234 | 790 | if (ret & BDRV_BLOCK_ZERO) { |
05d5e12b | 791 | task->method = COPY_WRITE_ZEROES; |
bed95234 | 792 | } |
beb5f545 | 793 | |
ca657c99 PB |
794 | if (!call_state->ignore_ratelimit) { |
795 | uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0); | |
796 | if (ns > 0) { | |
797 | block_copy_task_end(task, -EAGAIN); | |
798 | g_free(task); | |
799 | qemu_co_sleep_ns_wakeable(&call_state->sleep, | |
800 | QEMU_CLOCK_REALTIME, ns); | |
801 | continue; | |
7e032df0 | 802 | } |
7e032df0 VSO |
803 | } |
804 | ||
ca657c99 PB |
805 | ratelimit_calculate_delay(&s->rate_limit, task->bytes); |
806 | ||
42ac2144 | 807 | trace_block_copy_process(s, task->offset); |
beb5f545 | 808 | |
42ac2144 | 809 | co_get_from_shres(s->mem, task->bytes); |
beb5f545 | 810 | |
42ac2144 VSO |
811 | offset = task_end(task); |
812 | bytes = end - offset; | |
4ce5dd3e VSO |
813 | |
814 | if (!aio && bytes) { | |
26be9d62 | 815 | aio = aio_task_pool_new(call_state->max_workers); |
4ce5dd3e VSO |
816 | } |
817 | ||
818 | ret = block_copy_task_run(aio, task); | |
819 | if (ret < 0) { | |
820 | goto out; | |
821 | } | |
822 | } | |
823 | ||
824 | out: | |
825 | if (aio) { | |
826 | aio_task_pool_wait_all(aio); | |
827 | ||
828 | /* | |
829 | * We are not really interested in -ECANCELED returned from | |
830 | * block_copy_task_run. If it fails, it means some task already failed | |
831 | * for real reason, let's return first failure. | |
832 | * Still, assert that we don't rewrite failure by success. | |
e8de7ba9 VSO |
833 | * |
834 | * Note: ret may be positive here because of block-status result. | |
4ce5dd3e | 835 | */ |
e8de7ba9 | 836 | assert(ret >= 0 || aio_task_pool_status(aio) < 0); |
4ce5dd3e VSO |
837 | ret = aio_task_pool_status(aio); |
838 | ||
839 | aio_task_pool_free(aio); | |
840 | } | |
beb5f545 | 841 | |
4ce5dd3e | 842 | return ret < 0 ? ret : found_dirty; |
5332e5d2 VSO |
843 | } |
844 | ||
7e032df0 VSO |
845 | void block_copy_kick(BlockCopyCallState *call_state) |
846 | { | |
29a6ea24 | 847 | qemu_co_sleep_wake(&call_state->sleep); |
7e032df0 VSO |
848 | } |
849 | ||
5332e5d2 | 850 | /* |
3b8c2329 | 851 | * block_copy_common |
5332e5d2 VSO |
852 | * |
853 | * Copy requested region, accordingly to dirty bitmap. | |
854 | * Collaborate with parallel block_copy requests: if they succeed it will help | |
855 | * us. If they fail, we will retry not-copied regions. So, if we return error, | |
856 | * it means that some I/O operation failed in context of _this_ block_copy call, | |
857 | * not some parallel operation. | |
858 | */ | |
3b8c2329 | 859 | static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) |
5332e5d2 VSO |
860 | { |
861 | int ret; | |
c6a3e3df | 862 | BlockCopyState *s = call_state->s; |
5332e5d2 | 863 | |
d0c389d2 | 864 | qemu_co_mutex_lock(&s->lock); |
c6a3e3df | 865 | QLIST_INSERT_HEAD(&s->calls, call_state, list); |
d0c389d2 | 866 | qemu_co_mutex_unlock(&s->lock); |
2e099a9d | 867 | |
5332e5d2 | 868 | do { |
3b8c2329 | 869 | ret = block_copy_dirty_clusters(call_state); |
5332e5d2 | 870 | |
149009be | 871 | if (ret == 0 && !qatomic_read(&call_state->cancelled)) { |
d0c389d2 EGE |
872 | WITH_QEMU_LOCK_GUARD(&s->lock) { |
873 | /* | |
874 | * Check that there is no task we still need to | |
875 | * wait to complete | |
876 | */ | |
877 | ret = block_copy_wait_one(s, call_state->offset, | |
878 | call_state->bytes); | |
879 | if (ret == 0) { | |
880 | /* | |
881 | * No pending tasks, but check again the bitmap in this | |
882 | * same critical section, since a task might have failed | |
883 | * between this and the critical section in | |
884 | * block_copy_dirty_clusters(). | |
885 | * | |
886 | * block_copy_wait_one return value 0 also means that it | |
887 | * didn't release the lock. So, we are still in the same | |
888 | * critical section, not interrupted by any concurrent | |
889 | * access to state. | |
890 | */ | |
891 | ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap, | |
892 | call_state->offset, | |
893 | call_state->bytes) >= 0; | |
894 | } | |
895 | } | |
5332e5d2 VSO |
896 | } |
897 | ||
898 | /* | |
899 | * We retry in two cases: | |
900 | * 1. Some progress done | |
901 | * Something was copied, which means that there were yield points | |
902 | * and some new dirty bits may have appeared (due to failed parallel | |
903 | * block-copy requests). | |
904 | * 2. We have waited for some intersecting block-copy request | |
905 | * It may have failed and produced new dirty bits. | |
906 | */ | |
149009be | 907 | } while (ret > 0 && !qatomic_read(&call_state->cancelled)); |
a6ffe199 | 908 | |
149009be | 909 | qatomic_store_release(&call_state->finished, true); |
de4641b4 VSO |
910 | |
911 | if (call_state->cb) { | |
912 | call_state->cb(call_state->cb_opaque); | |
913 | } | |
914 | ||
d0c389d2 | 915 | qemu_co_mutex_lock(&s->lock); |
2e099a9d | 916 | QLIST_REMOVE(call_state, list); |
d0c389d2 | 917 | qemu_co_mutex_unlock(&s->lock); |
2e099a9d | 918 | |
beb5f545 VSO |
919 | return ret; |
920 | } | |
397f4e9d | 921 | |
3b8c2329 | 922 | int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, |
143a6384 | 923 | bool ignore_ratelimit) |
3b8c2329 VSO |
924 | { |
925 | BlockCopyCallState call_state = { | |
926 | .s = s, | |
927 | .offset = start, | |
928 | .bytes = bytes, | |
7e032df0 | 929 | .ignore_ratelimit = ignore_ratelimit, |
26be9d62 | 930 | .max_workers = BLOCK_COPY_MAX_WORKERS, |
3b8c2329 VSO |
931 | }; |
932 | ||
143a6384 | 933 | return block_copy_common(&call_state); |
3b8c2329 VSO |
934 | } |
935 | ||
de4641b4 VSO |
936 | static void coroutine_fn block_copy_async_co_entry(void *opaque) |
937 | { | |
938 | block_copy_common(opaque); | |
939 | } | |
940 | ||
941 | BlockCopyCallState *block_copy_async(BlockCopyState *s, | |
942 | int64_t offset, int64_t bytes, | |
26be9d62 | 943 | int max_workers, int64_t max_chunk, |
de4641b4 VSO |
944 | BlockCopyAsyncCallbackFunc cb, |
945 | void *cb_opaque) | |
946 | { | |
947 | BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); | |
948 | ||
949 | *call_state = (BlockCopyCallState) { | |
950 | .s = s, | |
951 | .offset = offset, | |
952 | .bytes = bytes, | |
26be9d62 VSO |
953 | .max_workers = max_workers, |
954 | .max_chunk = max_chunk, | |
de4641b4 VSO |
955 | .cb = cb, |
956 | .cb_opaque = cb_opaque, | |
957 | ||
958 | .co = qemu_coroutine_create(block_copy_async_co_entry, call_state), | |
959 | }; | |
960 | ||
961 | qemu_coroutine_enter(call_state->co); | |
962 | ||
963 | return call_state; | |
964 | } | |
965 | ||
966 | void block_copy_call_free(BlockCopyCallState *call_state) | |
967 | { | |
968 | if (!call_state) { | |
969 | return; | |
970 | } | |
971 | ||
149009be | 972 | assert(qatomic_read(&call_state->finished)); |
de4641b4 VSO |
973 | g_free(call_state); |
974 | } | |
975 | ||
976 | bool block_copy_call_finished(BlockCopyCallState *call_state) | |
977 | { | |
149009be | 978 | return qatomic_read(&call_state->finished); |
de4641b4 VSO |
979 | } |
980 | ||
981 | bool block_copy_call_succeeded(BlockCopyCallState *call_state) | |
982 | { | |
149009be EGE |
983 | return qatomic_load_acquire(&call_state->finished) && |
984 | !qatomic_read(&call_state->cancelled) && | |
985 | call_state->ret == 0; | |
de4641b4 VSO |
986 | } |
987 | ||
988 | bool block_copy_call_failed(BlockCopyCallState *call_state) | |
989 | { | |
149009be EGE |
990 | return qatomic_load_acquire(&call_state->finished) && |
991 | !qatomic_read(&call_state->cancelled) && | |
992 | call_state->ret < 0; | |
a6d23d56 VSO |
993 | } |
994 | ||
995 | bool block_copy_call_cancelled(BlockCopyCallState *call_state) | |
996 | { | |
149009be | 997 | return qatomic_read(&call_state->cancelled); |
de4641b4 VSO |
998 | } |
999 | ||
1000 | int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) | |
1001 | { | |
149009be | 1002 | assert(qatomic_load_acquire(&call_state->finished)); |
de4641b4 VSO |
1003 | if (error_is_read) { |
1004 | *error_is_read = call_state->error_is_read; | |
1005 | } | |
1006 | return call_state->ret; | |
1007 | } | |
1008 | ||
149009be EGE |
1009 | /* |
1010 | * Note that cancelling and finishing are racy. | |
1011 | * User can cancel a block-copy that is already finished. | |
1012 | */ | |
a6d23d56 VSO |
1013 | void block_copy_call_cancel(BlockCopyCallState *call_state) |
1014 | { | |
149009be | 1015 | qatomic_set(&call_state->cancelled, true); |
a6d23d56 VSO |
1016 | block_copy_kick(call_state); |
1017 | } | |
1018 | ||
397f4e9d VSO |
1019 | BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) |
1020 | { | |
1021 | return s->copy_bitmap; | |
1022 | } | |
1023 | ||
b518e9e9 VSO |
1024 | int64_t block_copy_cluster_size(BlockCopyState *s) |
1025 | { | |
1026 | return s->cluster_size; | |
1027 | } | |
1028 | ||
397f4e9d VSO |
1029 | void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip) |
1030 | { | |
d0c389d2 | 1031 | qatomic_set(&s->skip_unallocated, skip); |
397f4e9d | 1032 | } |
7e032df0 VSO |
1033 | |
1034 | void block_copy_set_speed(BlockCopyState *s, uint64_t speed) | |
1035 | { | |
ca657c99 | 1036 | ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME); |
7e032df0 VSO |
1037 | |
1038 | /* | |
1039 | * Note: it's good to kick all call states from here, but it should be done | |
1040 | * only from a coroutine, to not crash if s->calls list changed while | |
1041 | * entering one call. So for now, the only user of this function kicks its | |
1042 | * only one call_state by hand. | |
1043 | */ | |
1044 | } |