]>
Commit | Line | Data |
---|---|---|
beb5f545 VSO |
1 | /* |
2 | * block_copy API | |
3 | * | |
4 | * Copyright (C) 2013 Proxmox Server Solutions | |
5 | * Copyright (c) 2019 Virtuozzo International GmbH. | |
6 | * | |
7 | * Authors: | |
8 | * Dietmar Maurer (dietmar@proxmox.com) | |
9 | * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | */ | |
14 | ||
15 | #include "qemu/osdep.h" | |
16 | ||
17 | #include "trace.h" | |
18 | #include "qapi/error.h" | |
19 | #include "block/block-copy.h" | |
e2c1c34f MA |
20 | #include "block/block_int-io.h" |
21 | #include "block/dirty-bitmap.h" | |
d088e6a4 | 22 | #include "block/reqlist.h" |
beb5f545 | 23 | #include "sysemu/block-backend.h" |
b3b7036a | 24 | #include "qemu/units.h" |
e2c1c34f | 25 | #include "qemu/co-shared-resource.h" |
4ce5dd3e | 26 | #include "qemu/coroutine.h" |
e2c1c34f | 27 | #include "qemu/ratelimit.h" |
4ce5dd3e | 28 | #include "block/aio_task.h" |
b518e9e9 | 29 | #include "qemu/error-report.h" |
5df022cf | 30 | #include "qemu/memalign.h" |
b3b7036a VSO |
31 | |
32 | #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB) | |
0e240245 | 33 | #define BLOCK_COPY_MAX_BUFFER (1 * MiB) |
7f739d0e | 34 | #define BLOCK_COPY_MAX_MEM (128 * MiB) |
4ce5dd3e | 35 | #define BLOCK_COPY_MAX_WORKERS 64 |
7e032df0 | 36 | #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */ |
b518e9e9 | 37 | #define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16) |
4ce5dd3e | 38 | |
05d5e12b PB |
39 | typedef enum { |
40 | COPY_READ_WRITE_CLUSTER, | |
41 | COPY_READ_WRITE, | |
42 | COPY_WRITE_ZEROES, | |
43 | COPY_RANGE_SMALL, | |
44 | COPY_RANGE_FULL | |
45 | } BlockCopyMethod; | |
46 | ||
4ce5dd3e VSO |
47 | static coroutine_fn int block_copy_task_entry(AioTask *task); |
48 | ||
49 | typedef struct BlockCopyCallState { | |
d0c389d2 | 50 | /* Fields initialized in block_copy_async() and never changed. */ |
3b8c2329 VSO |
51 | BlockCopyState *s; |
52 | int64_t offset; | |
53 | int64_t bytes; | |
26be9d62 VSO |
54 | int max_workers; |
55 | int64_t max_chunk; | |
7e032df0 | 56 | bool ignore_ratelimit; |
de4641b4 VSO |
57 | BlockCopyAsyncCallbackFunc cb; |
58 | void *cb_opaque; | |
de4641b4 VSO |
59 | /* Coroutine where async block-copy is running */ |
60 | Coroutine *co; | |
3b8c2329 | 61 | |
d0c389d2 | 62 | /* Fields whose state changes throughout the execution */ |
149009be | 63 | bool finished; /* atomic */ |
d0c389d2 | 64 | QemuCoSleep sleep; /* TODO: protect API with a lock */ |
149009be | 65 | bool cancelled; /* atomic */ |
d0c389d2 EGE |
66 | /* To reference all call states from BlockCopyState */ |
67 | QLIST_ENTRY(BlockCopyCallState) list; | |
3b8c2329 | 68 | |
d0c389d2 | 69 | /* |
3202d8e4 | 70 | * Fields that report information about return values and errors. |
d0c389d2 EGE |
71 | * Protected by lock in BlockCopyState. |
72 | */ | |
4ce5dd3e | 73 | bool error_is_read; |
d0c389d2 EGE |
74 | /* |
75 | * @ret is set concurrently by tasks under mutex. Only set once by first | |
76 | * failed task (and untouched if no task failed). | |
77 | * After finishing (call_state->finished is true), it is not modified | |
78 | * anymore and may be safely read without mutex. | |
79 | */ | |
80 | int ret; | |
4ce5dd3e | 81 | } BlockCopyCallState; |
beb5f545 | 82 | |
e9407785 | 83 | typedef struct BlockCopyTask { |
4ce5dd3e VSO |
84 | AioTask task; |
85 | ||
d0c389d2 EGE |
86 | /* |
87 | * Fields initialized in block_copy_task_create() | |
88 | * and never changed. | |
89 | */ | |
1348a657 | 90 | BlockCopyState *s; |
4ce5dd3e | 91 | BlockCopyCallState *call_state; |
d0c389d2 EGE |
92 | /* |
93 | * @method can also be set again in the while loop of | |
94 | * block_copy_dirty_clusters(), but it is never accessed concurrently | |
95 | * because the only other function that reads it is | |
96 | * block_copy_task_entry() and it is invoked afterwards in the same | |
97 | * iteration. | |
98 | */ | |
05d5e12b | 99 | BlockCopyMethod method; |
d0c389d2 EGE |
100 | |
101 | /* | |
d088e6a4 VSO |
102 | * Generally, req is protected by lock in BlockCopyState, Still req.offset |
103 | * is only set on task creation, so may be read concurrently after creation. | |
104 | * req.bytes is changed at most once, and need only protecting the case of | |
105 | * parallel read while updating @bytes value in block_copy_task_shrink(). | |
d0c389d2 | 106 | */ |
d088e6a4 | 107 | BlockReq req; |
e9407785 | 108 | } BlockCopyTask; |
397f4e9d | 109 | |
42ac2144 VSO |
110 | static int64_t task_end(BlockCopyTask *task) |
111 | { | |
d088e6a4 | 112 | return task->req.offset + task->req.bytes; |
42ac2144 VSO |
113 | } |
114 | ||
397f4e9d VSO |
115 | typedef struct BlockCopyState { |
116 | /* | |
117 | * BdrvChild objects are not owned or managed by block-copy. They are | |
118 | * provided by block-copy user and user is responsible for appropriate | |
119 | * permissions on these children. | |
120 | */ | |
121 | BdrvChild *source; | |
122 | BdrvChild *target; | |
d0c389d2 EGE |
123 | |
124 | /* | |
125 | * Fields initialized in block_copy_state_new() | |
126 | * and never changed. | |
127 | */ | |
397f4e9d | 128 | int64_t cluster_size; |
05d5e12b | 129 | int64_t max_transfer; |
397f4e9d | 130 | uint64_t len; |
397f4e9d VSO |
131 | BdrvRequestFlags write_flags; |
132 | ||
d0c389d2 EGE |
133 | /* |
134 | * Fields whose state changes throughout the execution | |
135 | * Protected by lock. | |
136 | */ | |
137 | CoMutex lock; | |
138 | int64_t in_flight_bytes; | |
139 | BlockCopyMethod method; | |
d088e6a4 | 140 | BlockReqList reqs; |
d0c389d2 | 141 | QLIST_HEAD(, BlockCopyCallState) calls; |
397f4e9d VSO |
142 | /* |
143 | * skip_unallocated: | |
144 | * | |
145 | * Used by sync=top jobs, which first scan the source node for unallocated | |
146 | * areas and clear them in the copy_bitmap. During this process, the bitmap | |
147 | * is thus not fully initialized: It may still have bits set for areas that | |
148 | * are unallocated and should actually not be copied. | |
149 | * | |
150 | * This is indicated by skip_unallocated. | |
151 | * | |
152 | * In this case, block_copy() will query the source’s allocation status, | |
153 | * skip unallocated regions, clear them in the copy_bitmap, and invoke | |
154 | * block_copy_reset_unallocated() every time it does. | |
155 | */ | |
d0c389d2 EGE |
156 | bool skip_unallocated; /* atomic */ |
157 | /* State fields that use a thread-safe API */ | |
158 | BdrvDirtyBitmap *copy_bitmap; | |
397f4e9d | 159 | ProgressMeter *progress; |
397f4e9d | 160 | SharedResource *mem; |
7e032df0 | 161 | RateLimit rate_limit; |
397f4e9d VSO |
162 | } BlockCopyState; |
163 | ||
d0c389d2 | 164 | /* Called with lock held */ |
05d5e12b PB |
165 | static int64_t block_copy_chunk_size(BlockCopyState *s) |
166 | { | |
167 | switch (s->method) { | |
168 | case COPY_READ_WRITE_CLUSTER: | |
169 | return s->cluster_size; | |
170 | case COPY_READ_WRITE: | |
171 | case COPY_RANGE_SMALL: | |
172 | return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER), | |
173 | s->max_transfer); | |
174 | case COPY_RANGE_FULL: | |
175 | return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE), | |
176 | s->max_transfer); | |
177 | default: | |
178 | /* Cannot have COPY_WRITE_ZEROES here. */ | |
179 | abort(); | |
180 | } | |
181 | } | |
182 | ||
42ac2144 VSO |
183 | /* |
184 | * Search for the first dirty area in offset/bytes range and create task at | |
185 | * the beginning of it. | |
186 | */ | |
d0c389d2 EGE |
187 | static coroutine_fn BlockCopyTask * |
188 | block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state, | |
189 | int64_t offset, int64_t bytes) | |
a6ffe199 | 190 | { |
42ac2144 | 191 | BlockCopyTask *task; |
05d5e12b | 192 | int64_t max_chunk; |
f13e60a9 | 193 | |
d0c389d2 | 194 | QEMU_LOCK_GUARD(&s->lock); |
05d5e12b | 195 | max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk); |
42ac2144 VSO |
196 | if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap, |
197 | offset, offset + bytes, | |
26be9d62 | 198 | max_chunk, &offset, &bytes)) |
42ac2144 VSO |
199 | { |
200 | return NULL; | |
201 | } | |
202 | ||
7661a886 SR |
203 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); |
204 | bytes = QEMU_ALIGN_UP(bytes, s->cluster_size); | |
205 | ||
42ac2144 | 206 | /* region is dirty, so no existent tasks possible in it */ |
d088e6a4 | 207 | assert(!reqlist_find_conflict(&s->reqs, offset, bytes)); |
5332e5d2 VSO |
208 | |
209 | bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); | |
210 | s->in_flight_bytes += bytes; | |
211 | ||
42ac2144 | 212 | task = g_new(BlockCopyTask, 1); |
1348a657 | 213 | *task = (BlockCopyTask) { |
4ce5dd3e | 214 | .task.func = block_copy_task_entry, |
1348a657 | 215 | .s = s, |
4ce5dd3e | 216 | .call_state = call_state, |
05d5e12b | 217 | .method = s->method, |
1348a657 | 218 | }; |
d088e6a4 | 219 | reqlist_init_req(&s->reqs, &task->req, offset, bytes); |
f13e60a9 VSO |
220 | |
221 | return task; | |
a6ffe199 VSO |
222 | } |
223 | ||
5332e5d2 | 224 | /* |
e9407785 | 225 | * block_copy_task_shrink |
5332e5d2 | 226 | * |
e9407785 VSO |
227 | * Drop the tail of the task to be handled later. Set dirty bits back and |
228 | * wake up all tasks waiting for us (may be some of them are not intersecting | |
229 | * with shrunk task) | |
5332e5d2 | 230 | */ |
1348a657 | 231 | static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task, |
e9407785 | 232 | int64_t new_bytes) |
a6ffe199 | 233 | { |
d0c389d2 | 234 | QEMU_LOCK_GUARD(&task->s->lock); |
d088e6a4 | 235 | if (new_bytes == task->req.bytes) { |
5332e5d2 VSO |
236 | return; |
237 | } | |
238 | ||
d088e6a4 | 239 | assert(new_bytes > 0 && new_bytes < task->req.bytes); |
5332e5d2 | 240 | |
d088e6a4 | 241 | task->s->in_flight_bytes -= task->req.bytes - new_bytes; |
1348a657 | 242 | bdrv_set_dirty_bitmap(task->s->copy_bitmap, |
d088e6a4 VSO |
243 | task->req.offset + new_bytes, |
244 | task->req.bytes - new_bytes); | |
5332e5d2 | 245 | |
d088e6a4 | 246 | reqlist_shrink_req(&task->req, new_bytes); |
5332e5d2 VSO |
247 | } |
248 | ||
1348a657 | 249 | static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret) |
5332e5d2 | 250 | { |
d0c389d2 | 251 | QEMU_LOCK_GUARD(&task->s->lock); |
d088e6a4 | 252 | task->s->in_flight_bytes -= task->req.bytes; |
5332e5d2 | 253 | if (ret < 0) { |
d088e6a4 VSO |
254 | bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset, |
255 | task->req.bytes); | |
5332e5d2 | 256 | } |
201b4bb6 VSO |
257 | if (task->s->progress) { |
258 | progress_set_remaining(task->s->progress, | |
259 | bdrv_get_dirty_count(task->s->copy_bitmap) + | |
260 | task->s->in_flight_bytes); | |
261 | } | |
d088e6a4 | 262 | reqlist_remove_req(&task->req); |
a6ffe199 VSO |
263 | } |
264 | ||
beb5f545 VSO |
265 | void block_copy_state_free(BlockCopyState *s) |
266 | { | |
267 | if (!s) { | |
268 | return; | |
269 | } | |
270 | ||
4951967d | 271 | ratelimit_destroy(&s->rate_limit); |
5deb6cbd | 272 | bdrv_release_dirty_bitmap(s->copy_bitmap); |
7f739d0e | 273 | shres_destroy(s->mem); |
beb5f545 VSO |
274 | g_free(s); |
275 | } | |
276 | ||
9d31bc53 VSO |
277 | static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target) |
278 | { | |
279 | return MIN_NON_ZERO(INT_MAX, | |
280 | MIN_NON_ZERO(source->bs->bl.max_transfer, | |
281 | target->bs->bl.max_transfer)); | |
282 | } | |
283 | ||
f8b9504b VSO |
284 | void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range, |
285 | bool compress) | |
286 | { | |
287 | /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */ | |
288 | s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) | | |
289 | (compress ? BDRV_REQ_WRITE_COMPRESSED : 0); | |
290 | ||
291 | if (s->max_transfer < s->cluster_size) { | |
292 | /* | |
293 | * copy_range does not respect max_transfer. We don't want to bother | |
294 | * with requests smaller than block-copy cluster size, so fallback to | |
295 | * buffered copying (read and write respect max_transfer on their | |
296 | * behalf). | |
297 | */ | |
298 | s->method = COPY_READ_WRITE_CLUSTER; | |
299 | } else if (compress) { | |
300 | /* Compression supports only cluster-size writes and no copy-range. */ | |
301 | s->method = COPY_READ_WRITE_CLUSTER; | |
302 | } else { | |
303 | /* | |
304 | * If copy range enabled, start with COPY_RANGE_SMALL, until first | |
305 | * successful copy_range (look at block_copy_do_copy). | |
306 | */ | |
307 | s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE; | |
308 | } | |
309 | } | |
310 | ||
b518e9e9 VSO |
311 | static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, |
312 | Error **errp) | |
313 | { | |
314 | int ret; | |
315 | BlockDriverInfo bdi; | |
316 | bool target_does_cow = bdrv_backing_chain_next(target); | |
317 | ||
318 | /* | |
319 | * If there is no backing file on the target, we cannot rely on COW if our | |
320 | * backup cluster size is smaller than the target cluster size. Even for | |
321 | * targets with a backing file, try to avoid COW if possible. | |
322 | */ | |
323 | ret = bdrv_get_info(target, &bdi); | |
324 | if (ret == -ENOTSUP && !target_does_cow) { | |
325 | /* Cluster size is not defined */ | |
326 | warn_report("The target block device doesn't provide " | |
327 | "information about the block size and it doesn't have a " | |
328 | "backing file. The default block size of %u bytes is " | |
329 | "used. If the actual block size of the target exceeds " | |
330 | "this default, the backup may be unusable", | |
331 | BLOCK_COPY_CLUSTER_SIZE_DEFAULT); | |
332 | return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; | |
333 | } else if (ret < 0 && !target_does_cow) { | |
334 | error_setg_errno(errp, -ret, | |
335 | "Couldn't determine the cluster size of the target image, " | |
336 | "which has no backing file"); | |
337 | error_append_hint(errp, | |
338 | "Aborting, since this may create an unusable destination image\n"); | |
339 | return ret; | |
340 | } else if (ret < 0 && target_does_cow) { | |
341 | /* Not fatal; just trudge on ahead. */ | |
342 | return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; | |
343 | } | |
344 | ||
345 | return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); | |
346 | } | |
347 | ||
00e30f05 | 348 | BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, |
1f7252e8 | 349 | const BdrvDirtyBitmap *bitmap, |
abde8ac2 | 350 | Error **errp) |
beb5f545 | 351 | { |
1f7252e8 | 352 | ERRP_GUARD(); |
beb5f545 | 353 | BlockCopyState *s; |
b518e9e9 | 354 | int64_t cluster_size; |
beb5f545 | 355 | BdrvDirtyBitmap *copy_bitmap; |
49577723 | 356 | bool is_fleecing; |
beb5f545 | 357 | |
b518e9e9 VSO |
358 | cluster_size = block_copy_calculate_cluster_size(target->bs, errp); |
359 | if (cluster_size < 0) { | |
360 | return NULL; | |
361 | } | |
362 | ||
00e30f05 VSO |
363 | copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL, |
364 | errp); | |
beb5f545 VSO |
365 | if (!copy_bitmap) { |
366 | return NULL; | |
367 | } | |
368 | bdrv_disable_dirty_bitmap(copy_bitmap); | |
1f7252e8 VSO |
369 | if (bitmap) { |
370 | if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) { | |
371 | error_prepend(errp, "Failed to merge bitmap '%s' to internal " | |
372 | "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap)); | |
373 | bdrv_release_dirty_bitmap(copy_bitmap); | |
374 | return NULL; | |
375 | } | |
376 | } else { | |
377 | bdrv_set_dirty_bitmap(copy_bitmap, 0, | |
378 | bdrv_dirty_bitmap_size(copy_bitmap)); | |
379 | } | |
beb5f545 | 380 | |
49577723 VSO |
381 | /* |
382 | * If source is in backing chain of target assume that target is going to be | |
383 | * used for "image fleecing", i.e. it should represent a kind of snapshot of | |
384 | * source at backup-start point in time. And target is going to be read by | |
385 | * somebody (for example, used as NBD export) during backup job. | |
386 | * | |
387 | * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid | |
388 | * intersection of backup writes and third party reads from target, | |
389 | * otherwise reading from target we may occasionally read already updated by | |
390 | * guest data. | |
391 | * | |
392 | * For more information see commit f8d59dfb40bb and test | |
393 | * tests/qemu-iotests/222 | |
394 | */ | |
395 | is_fleecing = bdrv_chain_contains(target->bs, source->bs); | |
396 | ||
beb5f545 VSO |
397 | s = g_new(BlockCopyState, 1); |
398 | *s = (BlockCopyState) { | |
00e30f05 VSO |
399 | .source = source, |
400 | .target = target, | |
beb5f545 VSO |
401 | .copy_bitmap = copy_bitmap, |
402 | .cluster_size = cluster_size, | |
403 | .len = bdrv_dirty_bitmap_size(copy_bitmap), | |
f8b9504b | 404 | .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0), |
7f739d0e | 405 | .mem = shres_create(BLOCK_COPY_MAX_MEM), |
05d5e12b PB |
406 | .max_transfer = QEMU_ALIGN_DOWN( |
407 | block_copy_max_transfer(source, target), | |
408 | cluster_size), | |
beb5f545 VSO |
409 | }; |
410 | ||
abde8ac2 | 411 | block_copy_set_copy_opts(s, false, false); |
beb5f545 | 412 | |
4951967d | 413 | ratelimit_init(&s->rate_limit); |
d0c389d2 | 414 | qemu_co_mutex_init(&s->lock); |
d088e6a4 | 415 | QLIST_INIT(&s->reqs); |
2e099a9d | 416 | QLIST_INIT(&s->calls); |
a6ffe199 | 417 | |
beb5f545 | 418 | return s; |
beb5f545 VSO |
419 | } |
420 | ||
d0c389d2 | 421 | /* Only set before running the job, no need for locking. */ |
d0ebeca1 VSO |
422 | void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) |
423 | { | |
424 | s->progress = pm; | |
425 | } | |
426 | ||
4ce5dd3e VSO |
427 | /* |
428 | * Takes ownership of @task | |
429 | * | |
430 | * If pool is NULL directly run the task, otherwise schedule it into the pool. | |
431 | * | |
432 | * Returns: task.func return code if pool is NULL | |
433 | * otherwise -ECANCELED if pool status is bad | |
434 | * otherwise 0 (successfully scheduled) | |
435 | */ | |
436 | static coroutine_fn int block_copy_task_run(AioTaskPool *pool, | |
437 | BlockCopyTask *task) | |
438 | { | |
439 | if (!pool) { | |
440 | int ret = task->task.func(&task->task); | |
441 | ||
442 | g_free(task); | |
443 | return ret; | |
444 | } | |
445 | ||
446 | aio_task_pool_wait_slot(pool); | |
447 | if (aio_task_pool_status(pool) < 0) { | |
d088e6a4 | 448 | co_put_to_shres(task->s->mem, task->req.bytes); |
4ce5dd3e VSO |
449 | block_copy_task_end(task, -ECANCELED); |
450 | g_free(task); | |
451 | return -ECANCELED; | |
452 | } | |
453 | ||
454 | aio_task_pool_start_task(pool, &task->task); | |
455 | ||
456 | return 0; | |
457 | } | |
458 | ||
beb5f545 | 459 | /* |
e332a726 VSO |
460 | * block_copy_do_copy |
461 | * | |
dafaf135 VSO |
462 | * Do copy of cluster-aligned chunk. Requested region is allowed to exceed |
463 | * s->len only to cover last cluster when s->len is not aligned to clusters. | |
e332a726 | 464 | * |
3202d8e4 | 465 | * No sync here: neither bitmap nor intersecting requests handling, only copy. |
e332a726 | 466 | * |
05d5e12b PB |
467 | * @method is an in-out argument, so that copy_range can be either extended to |
468 | * a full-size buffer or disabled if the copy_range attempt fails. The output | |
469 | * value of @method should be used for subsequent tasks. | |
e332a726 | 470 | * Returns 0 on success. |
beb5f545 | 471 | */ |
abaf8b75 KW |
472 | static int coroutine_fn GRAPH_RDLOCK |
473 | block_copy_do_copy(BlockCopyState *s, int64_t offset, int64_t bytes, | |
474 | BlockCopyMethod *method, bool *error_is_read) | |
beb5f545 VSO |
475 | { |
476 | int ret; | |
8719091f | 477 | int64_t nbytes = MIN(offset + bytes, s->len) - offset; |
e332a726 | 478 | void *bounce_buffer = NULL; |
beb5f545 | 479 | |
8719091f VSO |
480 | assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes); |
481 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); | |
dafaf135 | 482 | assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); |
8719091f VSO |
483 | assert(offset < s->len); |
484 | assert(offset + bytes <= s->len || | |
485 | offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); | |
dafaf135 | 486 | assert(nbytes < INT_MAX); |
e332a726 | 487 | |
05d5e12b PB |
488 | switch (*method) { |
489 | case COPY_WRITE_ZEROES: | |
8719091f | 490 | ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags & |
2d57511a VSO |
491 | ~BDRV_REQ_WRITE_COMPRESSED); |
492 | if (ret < 0) { | |
8719091f | 493 | trace_block_copy_write_zeroes_fail(s, offset, ret); |
d7eca542 | 494 | *error_is_read = false; |
2d57511a VSO |
495 | } |
496 | return ret; | |
2d57511a | 497 | |
05d5e12b PB |
498 | case COPY_RANGE_SMALL: |
499 | case COPY_RANGE_FULL: | |
8719091f | 500 | ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes, |
e332a726 | 501 | 0, s->write_flags); |
05d5e12b PB |
502 | if (ret >= 0) { |
503 | /* Successful copy-range, increase chunk size. */ | |
504 | *method = COPY_RANGE_FULL; | |
bed95234 | 505 | return 0; |
e332a726 | 506 | } |
e332a726 | 507 | |
05d5e12b PB |
508 | trace_block_copy_copy_range_fail(s, offset, ret); |
509 | *method = COPY_READ_WRITE; | |
510 | /* Fall through to read+write with allocated buffer */ | |
0e240245 | 511 | |
05d5e12b PB |
512 | case COPY_READ_WRITE_CLUSTER: |
513 | case COPY_READ_WRITE: | |
514 | /* | |
515 | * In case of failed copy_range request above, we may proceed with | |
516 | * buffered request larger than BLOCK_COPY_MAX_BUFFER. | |
517 | * Still, further requests will be properly limited, so don't care too | |
518 | * much. Moreover the most likely case (copy_range is unsupported for | |
519 | * the configuration, so the very first copy_range request fails) | |
520 | * is handled by setting large copy_size only after first successful | |
521 | * copy_range. | |
522 | */ | |
beb5f545 | 523 | |
05d5e12b | 524 | bounce_buffer = qemu_blockalign(s->source->bs, nbytes); |
beb5f545 | 525 | |
05d5e12b PB |
526 | ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0); |
527 | if (ret < 0) { | |
528 | trace_block_copy_read_fail(s, offset, ret); | |
529 | *error_is_read = true; | |
530 | goto out; | |
531 | } | |
beb5f545 | 532 | |
05d5e12b PB |
533 | ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer, |
534 | s->write_flags); | |
535 | if (ret < 0) { | |
536 | trace_block_copy_write_fail(s, offset, ret); | |
537 | *error_is_read = false; | |
538 | goto out; | |
539 | } | |
3816edd2 | 540 | |
05d5e12b PB |
541 | out: |
542 | qemu_vfree(bounce_buffer); | |
543 | break; | |
beb5f545 | 544 | |
05d5e12b PB |
545 | default: |
546 | abort(); | |
bed95234 VSO |
547 | } |
548 | ||
05d5e12b | 549 | return ret; |
bed95234 VSO |
550 | } |
551 | ||
4ce5dd3e VSO |
552 | static coroutine_fn int block_copy_task_entry(AioTask *task) |
553 | { | |
554 | BlockCopyTask *t = container_of(task, BlockCopyTask, task); | |
c6a3e3df | 555 | BlockCopyState *s = t->s; |
c78dd00e | 556 | bool error_is_read = false; |
05d5e12b | 557 | BlockCopyMethod method = t->method; |
4ce5dd3e VSO |
558 | int ret; |
559 | ||
abaf8b75 KW |
560 | WITH_GRAPH_RDLOCK_GUARD() { |
561 | ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method, | |
562 | &error_is_read); | |
563 | } | |
d0c389d2 EGE |
564 | |
565 | WITH_QEMU_LOCK_GUARD(&s->lock) { | |
566 | if (s->method == t->method) { | |
567 | s->method = method; | |
568 | } | |
569 | ||
570 | if (ret < 0) { | |
571 | if (!t->call_state->ret) { | |
572 | t->call_state->ret = ret; | |
573 | t->call_state->error_is_read = error_is_read; | |
574 | } | |
201b4bb6 | 575 | } else if (s->progress) { |
d088e6a4 | 576 | progress_work_done(s->progress, t->req.bytes); |
8146b357 | 577 | } |
4ce5dd3e | 578 | } |
d088e6a4 | 579 | co_put_to_shres(s->mem, t->req.bytes); |
4ce5dd3e VSO |
580 | block_copy_task_end(t, ret); |
581 | ||
582 | return ret; | |
583 | } | |
584 | ||
7ff9579e KW |
585 | static coroutine_fn GRAPH_RDLOCK |
586 | int block_copy_block_status(BlockCopyState *s, int64_t offset, int64_t bytes, | |
587 | int64_t *pnum) | |
2d57511a VSO |
588 | { |
589 | int64_t num; | |
590 | BlockDriverState *base; | |
591 | int ret; | |
592 | ||
d0c389d2 | 593 | if (qatomic_read(&s->skip_unallocated)) { |
c6f6d846 | 594 | base = bdrv_backing_chain_next(s->source->bs); |
2d57511a VSO |
595 | } else { |
596 | base = NULL; | |
597 | } | |
598 | ||
43a0d4f0 EGE |
599 | ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num, |
600 | NULL, NULL); | |
2d57511a VSO |
601 | if (ret < 0 || num < s->cluster_size) { |
602 | /* | |
603 | * On error or if failed to obtain large enough chunk just fallback to | |
604 | * copy one cluster. | |
605 | */ | |
606 | num = s->cluster_size; | |
607 | ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA; | |
608 | } else if (offset + num == s->len) { | |
609 | num = QEMU_ALIGN_UP(num, s->cluster_size); | |
610 | } else { | |
611 | num = QEMU_ALIGN_DOWN(num, s->cluster_size); | |
612 | } | |
613 | ||
614 | *pnum = num; | |
615 | return ret; | |
616 | } | |
617 | ||
beb5f545 VSO |
618 | /* |
619 | * Check if the cluster starting at offset is allocated or not. | |
620 | * return via pnum the number of contiguous clusters sharing this allocation. | |
621 | */ | |
7ff9579e KW |
622 | static int coroutine_fn GRAPH_RDLOCK |
623 | block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, | |
624 | int64_t *pnum) | |
beb5f545 | 625 | { |
00e30f05 | 626 | BlockDriverState *bs = s->source->bs; |
beb5f545 VSO |
627 | int64_t count, total_count = 0; |
628 | int64_t bytes = s->len - offset; | |
629 | int ret; | |
630 | ||
631 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); | |
632 | ||
633 | while (true) { | |
7ff9579e | 634 | /* protected in backup_run() */ |
43a0d4f0 | 635 | ret = bdrv_co_is_allocated(bs, offset, bytes, &count); |
beb5f545 VSO |
636 | if (ret < 0) { |
637 | return ret; | |
638 | } | |
639 | ||
640 | total_count += count; | |
641 | ||
642 | if (ret || count == 0) { | |
643 | /* | |
644 | * ret: partial segment(s) are considered allocated. | |
645 | * otherwise: unallocated tail is treated as an entire segment. | |
646 | */ | |
647 | *pnum = DIV_ROUND_UP(total_count, s->cluster_size); | |
648 | return ret; | |
649 | } | |
650 | ||
651 | /* Unallocated segment(s) with uncertain following segment(s) */ | |
652 | if (total_count >= s->cluster_size) { | |
653 | *pnum = total_count / s->cluster_size; | |
654 | return 0; | |
655 | } | |
656 | ||
657 | offset += count; | |
658 | bytes -= count; | |
659 | } | |
660 | } | |
661 | ||
177541e6 VSO |
662 | void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes) |
663 | { | |
664 | QEMU_LOCK_GUARD(&s->lock); | |
665 | ||
666 | bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); | |
667 | if (s->progress) { | |
668 | progress_set_remaining(s->progress, | |
669 | bdrv_get_dirty_count(s->copy_bitmap) + | |
670 | s->in_flight_bytes); | |
671 | } | |
672 | } | |
673 | ||
beb5f545 VSO |
674 | /* |
675 | * Reset bits in copy_bitmap starting at offset if they represent unallocated | |
676 | * data in the image. May reset subsequent contiguous bits. | |
677 | * @return 0 when the cluster at @offset was unallocated, | |
678 | * 1 otherwise, and -ret on error. | |
679 | */ | |
43a0d4f0 EGE |
680 | int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s, |
681 | int64_t offset, | |
682 | int64_t *count) | |
beb5f545 VSO |
683 | { |
684 | int ret; | |
685 | int64_t clusters, bytes; | |
686 | ||
687 | ret = block_copy_is_cluster_allocated(s, offset, &clusters); | |
688 | if (ret < 0) { | |
689 | return ret; | |
690 | } | |
691 | ||
692 | bytes = clusters * s->cluster_size; | |
693 | ||
694 | if (!ret) { | |
177541e6 | 695 | block_copy_reset(s, offset, bytes); |
beb5f545 VSO |
696 | } |
697 | ||
698 | *count = bytes; | |
699 | return ret; | |
700 | } | |
701 | ||
5332e5d2 VSO |
702 | /* |
703 | * block_copy_dirty_clusters | |
704 | * | |
705 | * Copy dirty clusters in @offset/@bytes range. | |
706 | * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty | |
707 | * clusters found and -errno on failure. | |
708 | */ | |
7ff9579e | 709 | static int coroutine_fn GRAPH_RDLOCK |
3b8c2329 | 710 | block_copy_dirty_clusters(BlockCopyCallState *call_state) |
beb5f545 | 711 | { |
3b8c2329 VSO |
712 | BlockCopyState *s = call_state->s; |
713 | int64_t offset = call_state->offset; | |
714 | int64_t bytes = call_state->bytes; | |
715 | ||
beb5f545 | 716 | int ret = 0; |
5332e5d2 | 717 | bool found_dirty = false; |
42ac2144 | 718 | int64_t end = offset + bytes; |
4ce5dd3e | 719 | AioTaskPool *aio = NULL; |
beb5f545 VSO |
720 | |
721 | /* | |
722 | * block_copy() user is responsible for keeping source and target in same | |
723 | * aio context | |
724 | */ | |
00e30f05 VSO |
725 | assert(bdrv_get_aio_context(s->source->bs) == |
726 | bdrv_get_aio_context(s->target->bs)); | |
beb5f545 | 727 | |
8719091f | 728 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); |
dafaf135 | 729 | assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); |
beb5f545 | 730 | |
149009be EGE |
731 | while (bytes && aio_task_pool_status(aio) == 0 && |
732 | !qatomic_read(&call_state->cancelled)) { | |
4ce5dd3e | 733 | BlockCopyTask *task; |
42ac2144 | 734 | int64_t status_bytes; |
beb5f545 | 735 | |
3b8c2329 | 736 | task = block_copy_task_create(s, call_state, offset, bytes); |
42ac2144 VSO |
737 | if (!task) { |
738 | /* No more dirty bits in the bitmap */ | |
739 | trace_block_copy_skip_range(s, offset, bytes); | |
740 | break; | |
741 | } | |
d088e6a4 VSO |
742 | if (task->req.offset > offset) { |
743 | trace_block_copy_skip_range(s, offset, task->req.offset - offset); | |
beb5f545 VSO |
744 | } |
745 | ||
5332e5d2 VSO |
746 | found_dirty = true; |
747 | ||
d088e6a4 | 748 | ret = block_copy_block_status(s, task->req.offset, task->req.bytes, |
42ac2144 | 749 | &status_bytes); |
5332e5d2 | 750 | assert(ret >= 0); /* never fail */ |
d088e6a4 | 751 | if (status_bytes < task->req.bytes) { |
42ac2144 VSO |
752 | block_copy_task_shrink(task, status_bytes); |
753 | } | |
d0c389d2 EGE |
754 | if (qatomic_read(&s->skip_unallocated) && |
755 | !(ret & BDRV_BLOCK_ALLOCATED)) { | |
1348a657 | 756 | block_copy_task_end(task, 0); |
d088e6a4 | 757 | trace_block_copy_skip_range(s, task->req.offset, task->req.bytes); |
42ac2144 VSO |
758 | offset = task_end(task); |
759 | bytes = end - offset; | |
fc9aefc8 | 760 | g_free(task); |
2d57511a | 761 | continue; |
beb5f545 | 762 | } |
bed95234 | 763 | if (ret & BDRV_BLOCK_ZERO) { |
05d5e12b | 764 | task->method = COPY_WRITE_ZEROES; |
bed95234 | 765 | } |
beb5f545 | 766 | |
ca657c99 PB |
767 | if (!call_state->ignore_ratelimit) { |
768 | uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0); | |
769 | if (ns > 0) { | |
770 | block_copy_task_end(task, -EAGAIN); | |
771 | g_free(task); | |
772 | qemu_co_sleep_ns_wakeable(&call_state->sleep, | |
773 | QEMU_CLOCK_REALTIME, ns); | |
774 | continue; | |
7e032df0 | 775 | } |
7e032df0 VSO |
776 | } |
777 | ||
d088e6a4 | 778 | ratelimit_calculate_delay(&s->rate_limit, task->req.bytes); |
ca657c99 | 779 | |
d088e6a4 | 780 | trace_block_copy_process(s, task->req.offset); |
beb5f545 | 781 | |
d088e6a4 | 782 | co_get_from_shres(s->mem, task->req.bytes); |
beb5f545 | 783 | |
42ac2144 VSO |
784 | offset = task_end(task); |
785 | bytes = end - offset; | |
4ce5dd3e VSO |
786 | |
787 | if (!aio && bytes) { | |
26be9d62 | 788 | aio = aio_task_pool_new(call_state->max_workers); |
4ce5dd3e VSO |
789 | } |
790 | ||
791 | ret = block_copy_task_run(aio, task); | |
792 | if (ret < 0) { | |
793 | goto out; | |
794 | } | |
795 | } | |
796 | ||
797 | out: | |
798 | if (aio) { | |
799 | aio_task_pool_wait_all(aio); | |
800 | ||
801 | /* | |
802 | * We are not really interested in -ECANCELED returned from | |
803 | * block_copy_task_run. If it fails, it means some task already failed | |
804 | * for real reason, let's return first failure. | |
805 | * Still, assert that we don't rewrite failure by success. | |
e8de7ba9 VSO |
806 | * |
807 | * Note: ret may be positive here because of block-status result. | |
4ce5dd3e | 808 | */ |
e8de7ba9 | 809 | assert(ret >= 0 || aio_task_pool_status(aio) < 0); |
4ce5dd3e VSO |
810 | ret = aio_task_pool_status(aio); |
811 | ||
812 | aio_task_pool_free(aio); | |
813 | } | |
beb5f545 | 814 | |
4ce5dd3e | 815 | return ret < 0 ? ret : found_dirty; |
5332e5d2 VSO |
816 | } |
817 | ||
7e032df0 VSO |
818 | void block_copy_kick(BlockCopyCallState *call_state) |
819 | { | |
29a6ea24 | 820 | qemu_co_sleep_wake(&call_state->sleep); |
7e032df0 VSO |
821 | } |
822 | ||
5332e5d2 | 823 | /* |
3b8c2329 | 824 | * block_copy_common |
5332e5d2 VSO |
825 | * |
826 | * Copy requested region, accordingly to dirty bitmap. | |
827 | * Collaborate with parallel block_copy requests: if they succeed it will help | |
828 | * us. If they fail, we will retry not-copied regions. So, if we return error, | |
829 | * it means that some I/O operation failed in context of _this_ block_copy call, | |
830 | * not some parallel operation. | |
831 | */ | |
7ff9579e KW |
832 | static int coroutine_fn GRAPH_RDLOCK |
833 | block_copy_common(BlockCopyCallState *call_state) | |
5332e5d2 VSO |
834 | { |
835 | int ret; | |
c6a3e3df | 836 | BlockCopyState *s = call_state->s; |
5332e5d2 | 837 | |
d0c389d2 | 838 | qemu_co_mutex_lock(&s->lock); |
c6a3e3df | 839 | QLIST_INSERT_HEAD(&s->calls, call_state, list); |
d0c389d2 | 840 | qemu_co_mutex_unlock(&s->lock); |
2e099a9d | 841 | |
5332e5d2 | 842 | do { |
3b8c2329 | 843 | ret = block_copy_dirty_clusters(call_state); |
5332e5d2 | 844 | |
149009be | 845 | if (ret == 0 && !qatomic_read(&call_state->cancelled)) { |
d0c389d2 EGE |
846 | WITH_QEMU_LOCK_GUARD(&s->lock) { |
847 | /* | |
848 | * Check that there is no task we still need to | |
849 | * wait to complete | |
850 | */ | |
d088e6a4 VSO |
851 | ret = reqlist_wait_one(&s->reqs, call_state->offset, |
852 | call_state->bytes, &s->lock); | |
d0c389d2 EGE |
853 | if (ret == 0) { |
854 | /* | |
855 | * No pending tasks, but check again the bitmap in this | |
856 | * same critical section, since a task might have failed | |
857 | * between this and the critical section in | |
858 | * block_copy_dirty_clusters(). | |
859 | * | |
d088e6a4 | 860 | * reqlist_wait_one return value 0 also means that it |
d0c389d2 EGE |
861 | * didn't release the lock. So, we are still in the same |
862 | * critical section, not interrupted by any concurrent | |
863 | * access to state. | |
864 | */ | |
865 | ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap, | |
866 | call_state->offset, | |
867 | call_state->bytes) >= 0; | |
868 | } | |
869 | } | |
5332e5d2 VSO |
870 | } |
871 | ||
872 | /* | |
873 | * We retry in two cases: | |
874 | * 1. Some progress done | |
875 | * Something was copied, which means that there were yield points | |
876 | * and some new dirty bits may have appeared (due to failed parallel | |
877 | * block-copy requests). | |
878 | * 2. We have waited for some intersecting block-copy request | |
879 | * It may have failed and produced new dirty bits. | |
880 | */ | |
149009be | 881 | } while (ret > 0 && !qatomic_read(&call_state->cancelled)); |
a6ffe199 | 882 | |
149009be | 883 | qatomic_store_release(&call_state->finished, true); |
de4641b4 VSO |
884 | |
885 | if (call_state->cb) { | |
886 | call_state->cb(call_state->cb_opaque); | |
887 | } | |
888 | ||
d0c389d2 | 889 | qemu_co_mutex_lock(&s->lock); |
2e099a9d | 890 | QLIST_REMOVE(call_state, list); |
d0c389d2 | 891 | qemu_co_mutex_unlock(&s->lock); |
2e099a9d | 892 | |
beb5f545 VSO |
893 | return ret; |
894 | } | |
397f4e9d | 895 | |
15df6e69 VSO |
896 | static void coroutine_fn block_copy_async_co_entry(void *opaque) |
897 | { | |
7ff9579e | 898 | GRAPH_RDLOCK_GUARD(); |
15df6e69 VSO |
899 | block_copy_common(opaque); |
900 | } | |
901 | ||
3b8c2329 | 902 | int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes, |
15df6e69 VSO |
903 | bool ignore_ratelimit, uint64_t timeout_ns, |
904 | BlockCopyAsyncCallbackFunc cb, | |
905 | void *cb_opaque) | |
3b8c2329 | 906 | { |
15df6e69 VSO |
907 | int ret; |
908 | BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); | |
909 | ||
910 | *call_state = (BlockCopyCallState) { | |
3b8c2329 VSO |
911 | .s = s, |
912 | .offset = start, | |
913 | .bytes = bytes, | |
7e032df0 | 914 | .ignore_ratelimit = ignore_ratelimit, |
26be9d62 | 915 | .max_workers = BLOCK_COPY_MAX_WORKERS, |
15df6e69 VSO |
916 | .cb = cb, |
917 | .cb_opaque = cb_opaque, | |
3b8c2329 VSO |
918 | }; |
919 | ||
15df6e69 VSO |
920 | ret = qemu_co_timeout(block_copy_async_co_entry, call_state, timeout_ns, |
921 | g_free); | |
922 | if (ret < 0) { | |
923 | assert(ret == -ETIMEDOUT); | |
924 | block_copy_call_cancel(call_state); | |
925 | /* call_state will be freed by running coroutine. */ | |
926 | return ret; | |
927 | } | |
3b8c2329 | 928 | |
15df6e69 VSO |
929 | ret = call_state->ret; |
930 | g_free(call_state); | |
931 | ||
932 | return ret; | |
de4641b4 VSO |
933 | } |
934 | ||
935 | BlockCopyCallState *block_copy_async(BlockCopyState *s, | |
936 | int64_t offset, int64_t bytes, | |
26be9d62 | 937 | int max_workers, int64_t max_chunk, |
de4641b4 VSO |
938 | BlockCopyAsyncCallbackFunc cb, |
939 | void *cb_opaque) | |
940 | { | |
941 | BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1); | |
942 | ||
943 | *call_state = (BlockCopyCallState) { | |
944 | .s = s, | |
945 | .offset = offset, | |
946 | .bytes = bytes, | |
26be9d62 VSO |
947 | .max_workers = max_workers, |
948 | .max_chunk = max_chunk, | |
de4641b4 VSO |
949 | .cb = cb, |
950 | .cb_opaque = cb_opaque, | |
951 | ||
952 | .co = qemu_coroutine_create(block_copy_async_co_entry, call_state), | |
953 | }; | |
954 | ||
955 | qemu_coroutine_enter(call_state->co); | |
956 | ||
957 | return call_state; | |
958 | } | |
959 | ||
960 | void block_copy_call_free(BlockCopyCallState *call_state) | |
961 | { | |
962 | if (!call_state) { | |
963 | return; | |
964 | } | |
965 | ||
149009be | 966 | assert(qatomic_read(&call_state->finished)); |
de4641b4 VSO |
967 | g_free(call_state); |
968 | } | |
969 | ||
970 | bool block_copy_call_finished(BlockCopyCallState *call_state) | |
971 | { | |
149009be | 972 | return qatomic_read(&call_state->finished); |
de4641b4 VSO |
973 | } |
974 | ||
975 | bool block_copy_call_succeeded(BlockCopyCallState *call_state) | |
976 | { | |
149009be EGE |
977 | return qatomic_load_acquire(&call_state->finished) && |
978 | !qatomic_read(&call_state->cancelled) && | |
979 | call_state->ret == 0; | |
de4641b4 VSO |
980 | } |
981 | ||
982 | bool block_copy_call_failed(BlockCopyCallState *call_state) | |
983 | { | |
149009be EGE |
984 | return qatomic_load_acquire(&call_state->finished) && |
985 | !qatomic_read(&call_state->cancelled) && | |
986 | call_state->ret < 0; | |
a6d23d56 VSO |
987 | } |
988 | ||
989 | bool block_copy_call_cancelled(BlockCopyCallState *call_state) | |
990 | { | |
149009be | 991 | return qatomic_read(&call_state->cancelled); |
de4641b4 VSO |
992 | } |
993 | ||
994 | int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) | |
995 | { | |
149009be | 996 | assert(qatomic_load_acquire(&call_state->finished)); |
de4641b4 VSO |
997 | if (error_is_read) { |
998 | *error_is_read = call_state->error_is_read; | |
999 | } | |
1000 | return call_state->ret; | |
1001 | } | |
1002 | ||
149009be EGE |
1003 | /* |
1004 | * Note that cancelling and finishing are racy. | |
1005 | * User can cancel a block-copy that is already finished. | |
1006 | */ | |
a6d23d56 VSO |
1007 | void block_copy_call_cancel(BlockCopyCallState *call_state) |
1008 | { | |
149009be | 1009 | qatomic_set(&call_state->cancelled, true); |
a6d23d56 VSO |
1010 | block_copy_kick(call_state); |
1011 | } | |
1012 | ||
397f4e9d VSO |
1013 | BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) |
1014 | { | |
1015 | return s->copy_bitmap; | |
1016 | } | |
1017 | ||
b518e9e9 VSO |
1018 | int64_t block_copy_cluster_size(BlockCopyState *s) |
1019 | { | |
1020 | return s->cluster_size; | |
1021 | } | |
1022 | ||
397f4e9d VSO |
1023 | void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip) |
1024 | { | |
d0c389d2 | 1025 | qatomic_set(&s->skip_unallocated, skip); |
397f4e9d | 1026 | } |
7e032df0 VSO |
1027 | |
1028 | void block_copy_set_speed(BlockCopyState *s, uint64_t speed) | |
1029 | { | |
ca657c99 | 1030 | ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME); |
7e032df0 VSO |
1031 | |
1032 | /* | |
1033 | * Note: it's good to kick all call states from here, but it should be done | |
1034 | * only from a coroutine, to not crash if s->calls list changed while | |
1035 | * entering one call. So for now, the only user of this function kicks its | |
1036 | * only one call_state by hand. | |
1037 | */ | |
1038 | } |