]>
Commit | Line | Data |
---|---|---|
98d2c6f2 DM |
1 | /* |
2 | * QEMU backup | |
3 | * | |
4 | * Copyright (C) 2013 Proxmox Server Solutions | |
5 | * | |
6 | * Authors: | |
7 | * Dietmar Maurer (dietmar@proxmox.com) | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
80c71a24 | 14 | #include "qemu/osdep.h" |
98d2c6f2 DM |
15 | |
16 | #include "trace.h" | |
17 | #include "block/block.h" | |
18 | #include "block/block_int.h" | |
c87621ea | 19 | #include "block/blockjob_int.h" |
49d3e828 | 20 | #include "block/block_backup.h" |
da34e65c | 21 | #include "qapi/error.h" |
cc7a8ea7 | 22 | #include "qapi/qmp/qerror.h" |
98d2c6f2 | 23 | #include "qemu/ratelimit.h" |
f348b6d1 | 24 | #include "qemu/cutils.h" |
373340b2 | 25 | #include "sysemu/block-backend.h" |
b2f56462 | 26 | #include "qemu/bitmap.h" |
a410a7f1 | 27 | #include "qemu/error-report.h" |
98d2c6f2 | 28 | |
16096a4d | 29 | #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) |
98d2c6f2 DM |
30 | #define SLICE_TIME 100000000ULL /* ns */ |
31 | ||
98d2c6f2 DM |
32 | typedef struct BackupBlockJob { |
33 | BlockJob common; | |
5c438bc6 | 34 | BlockBackend *target; |
4b80ab2b | 35 | /* bitmap for sync=incremental */ |
d58d8453 | 36 | BdrvDirtyBitmap *sync_bitmap; |
fc5d3f84 | 37 | MirrorSyncMode sync_mode; |
98d2c6f2 DM |
38 | RateLimit limit; |
39 | BlockdevOnError on_source_error; | |
40 | BlockdevOnError on_target_error; | |
41 | CoRwlock flush_rwlock; | |
cf79cdf6 | 42 | uint64_t bytes_read; |
b2f56462 | 43 | unsigned long *done_bitmap; |
16096a4d | 44 | int64_t cluster_size; |
13b9414b | 45 | bool compress; |
12b3e52e | 46 | NotifierWithReturn before_write; |
98d2c6f2 DM |
47 | QLIST_HEAD(, CowRequest) inflight_reqs; |
48 | } BackupBlockJob; | |
49 | ||
16096a4d JS |
50 | /* Size of a cluster in sectors, instead of bytes. */ |
51 | static inline int64_t cluster_size_sectors(BackupBlockJob *job) | |
52 | { | |
53 | return job->cluster_size / BDRV_SECTOR_SIZE; | |
54 | } | |
55 | ||
98d2c6f2 DM |
56 | /* See if in-flight requests overlap and wait for them to complete */ |
57 | static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, | |
58 | int64_t start, | |
59 | int64_t end) | |
60 | { | |
61 | CowRequest *req; | |
62 | bool retry; | |
63 | ||
64 | do { | |
65 | retry = false; | |
66 | QLIST_FOREACH(req, &job->inflight_reqs, list) { | |
f6ac2078 | 67 | if (end > req->start_byte && start < req->end_byte) { |
1ace7cea | 68 | qemu_co_queue_wait(&req->wait_queue, NULL); |
98d2c6f2 DM |
69 | retry = true; |
70 | break; | |
71 | } | |
72 | } | |
73 | } while (retry); | |
74 | } | |
75 | ||
76 | /* Keep track of an in-flight request */ | |
77 | static void cow_request_begin(CowRequest *req, BackupBlockJob *job, | |
f6ac2078 | 78 | int64_t start, int64_t end) |
98d2c6f2 | 79 | { |
f6ac2078 EB |
80 | req->start_byte = start; |
81 | req->end_byte = end; | |
98d2c6f2 DM |
82 | qemu_co_queue_init(&req->wait_queue); |
83 | QLIST_INSERT_HEAD(&job->inflight_reqs, req, list); | |
84 | } | |
85 | ||
86 | /* Forget about a completed request */ | |
87 | static void cow_request_end(CowRequest *req) | |
88 | { | |
89 | QLIST_REMOVE(req, list); | |
90 | qemu_co_queue_restart_all(&req->wait_queue); | |
91 | } | |
92 | ||
8543c274 | 93 | static int coroutine_fn backup_do_cow(BackupBlockJob *job, |
98d2c6f2 | 94 | int64_t sector_num, int nb_sectors, |
06c3916b WC |
95 | bool *error_is_read, |
96 | bool is_write_notifier) | |
98d2c6f2 | 97 | { |
5c438bc6 | 98 | BlockBackend *blk = job->common.blk; |
98d2c6f2 DM |
99 | CowRequest cow_request; |
100 | struct iovec iov; | |
101 | QEMUIOVector bounce_qiov; | |
102 | void *bounce_buffer = NULL; | |
103 | int ret = 0; | |
16096a4d | 104 | int64_t sectors_per_cluster = cluster_size_sectors(job); |
cf79cdf6 EB |
105 | int64_t start, end; /* clusters */ |
106 | int n; /* bytes */ | |
98d2c6f2 DM |
107 | |
108 | qemu_co_rwlock_rdlock(&job->flush_rwlock); | |
109 | ||
16096a4d JS |
110 | start = sector_num / sectors_per_cluster; |
111 | end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); | |
98d2c6f2 | 112 | |
cf79cdf6 | 113 | trace_backup_do_cow_enter(job, start * job->cluster_size, |
5cb1a49e EB |
114 | sector_num * BDRV_SECTOR_SIZE, |
115 | nb_sectors * BDRV_SECTOR_SIZE); | |
98d2c6f2 | 116 | |
f6ac2078 EB |
117 | wait_for_overlapping_requests(job, start * job->cluster_size, |
118 | end * job->cluster_size); | |
119 | cow_request_begin(&cow_request, job, start * job->cluster_size, | |
120 | end * job->cluster_size); | |
98d2c6f2 DM |
121 | |
122 | for (; start < end; start++) { | |
b2f56462 | 123 | if (test_bit(start, job->done_bitmap)) { |
cf79cdf6 | 124 | trace_backup_do_cow_skip(job, start * job->cluster_size); |
98d2c6f2 DM |
125 | continue; /* already copied */ |
126 | } | |
127 | ||
cf79cdf6 | 128 | trace_backup_do_cow_process(job, start * job->cluster_size); |
98d2c6f2 | 129 | |
cf79cdf6 EB |
130 | n = MIN(job->cluster_size, |
131 | job->common.len - start * job->cluster_size); | |
98d2c6f2 DM |
132 | |
133 | if (!bounce_buffer) { | |
5c438bc6 | 134 | bounce_buffer = blk_blockalign(blk, job->cluster_size); |
98d2c6f2 DM |
135 | } |
136 | iov.iov_base = bounce_buffer; | |
cf79cdf6 | 137 | iov.iov_len = n; |
98d2c6f2 DM |
138 | qemu_iovec_init_external(&bounce_qiov, &iov, 1); |
139 | ||
5c438bc6 KW |
140 | ret = blk_co_preadv(blk, start * job->cluster_size, |
141 | bounce_qiov.size, &bounce_qiov, | |
142 | is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); | |
98d2c6f2 | 143 | if (ret < 0) { |
cf79cdf6 | 144 | trace_backup_do_cow_read_fail(job, start * job->cluster_size, ret); |
98d2c6f2 DM |
145 | if (error_is_read) { |
146 | *error_is_read = true; | |
147 | } | |
148 | goto out; | |
149 | } | |
150 | ||
151 | if (buffer_is_zero(iov.iov_base, iov.iov_len)) { | |
5c438bc6 KW |
152 | ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size, |
153 | bounce_qiov.size, BDRV_REQ_MAY_UNMAP); | |
98d2c6f2 | 154 | } else { |
5c438bc6 | 155 | ret = blk_co_pwritev(job->target, start * job->cluster_size, |
13b9414b PB |
156 | bounce_qiov.size, &bounce_qiov, |
157 | job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0); | |
98d2c6f2 DM |
158 | } |
159 | if (ret < 0) { | |
cf79cdf6 | 160 | trace_backup_do_cow_write_fail(job, start * job->cluster_size, ret); |
98d2c6f2 DM |
161 | if (error_is_read) { |
162 | *error_is_read = false; | |
163 | } | |
164 | goto out; | |
165 | } | |
166 | ||
b2f56462 | 167 | set_bit(start, job->done_bitmap); |
98d2c6f2 DM |
168 | |
169 | /* Publish progress, guest I/O counts as progress too. Note that the | |
170 | * offset field is an opaque progress value, it is not a disk offset. | |
171 | */ | |
cf79cdf6 EB |
172 | job->bytes_read += n; |
173 | job->common.offset += n; | |
98d2c6f2 DM |
174 | } |
175 | ||
176 | out: | |
177 | if (bounce_buffer) { | |
178 | qemu_vfree(bounce_buffer); | |
179 | } | |
180 | ||
181 | cow_request_end(&cow_request); | |
182 | ||
5cb1a49e EB |
183 | trace_backup_do_cow_return(job, sector_num * BDRV_SECTOR_SIZE, |
184 | nb_sectors * BDRV_SECTOR_SIZE, ret); | |
98d2c6f2 DM |
185 | |
186 | qemu_co_rwlock_unlock(&job->flush_rwlock); | |
187 | ||
188 | return ret; | |
189 | } | |
190 | ||
191 | static int coroutine_fn backup_before_write_notify( | |
192 | NotifierWithReturn *notifier, | |
193 | void *opaque) | |
194 | { | |
12b3e52e | 195 | BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write); |
98d2c6f2 | 196 | BdrvTrackedRequest *req = opaque; |
793ed47a KW |
197 | int64_t sector_num = req->offset >> BDRV_SECTOR_BITS; |
198 | int nb_sectors = req->bytes >> BDRV_SECTOR_BITS; | |
98d2c6f2 | 199 | |
5c438bc6 | 200 | assert(req->bs == blk_bs(job->common.blk)); |
793ed47a KW |
201 | assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0); |
202 | assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0); | |
203 | ||
8543c274 | 204 | return backup_do_cow(job, sector_num, nb_sectors, NULL, true); |
98d2c6f2 DM |
205 | } |
206 | ||
207 | static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp) | |
208 | { | |
209 | BackupBlockJob *s = container_of(job, BackupBlockJob, common); | |
210 | ||
211 | if (speed < 0) { | |
c6bd8c70 | 212 | error_setg(errp, QERR_INVALID_PARAMETER, "speed"); |
98d2c6f2 DM |
213 | return; |
214 | } | |
f3e4ce4a | 215 | ratelimit_set_speed(&s->limit, speed, SLICE_TIME); |
98d2c6f2 DM |
216 | } |
217 | ||
b976ea3c FZ |
218 | static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) |
219 | { | |
220 | BdrvDirtyBitmap *bm; | |
5c438bc6 | 221 | BlockDriverState *bs = blk_bs(job->common.blk); |
b976ea3c FZ |
222 | |
223 | if (ret < 0 || block_job_is_cancelled(&job->common)) { | |
224 | /* Merge the successor back into the parent, delete nothing. */ | |
225 | bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); | |
226 | assert(bm); | |
227 | } else { | |
228 | /* Everything is fine, delete this bitmap and install the backup. */ | |
229 | bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); | |
230 | assert(bm); | |
231 | } | |
232 | } | |
233 | ||
c347b2c6 JS |
234 | static void backup_commit(BlockJob *job) |
235 | { | |
236 | BackupBlockJob *s = container_of(job, BackupBlockJob, common); | |
237 | if (s->sync_bitmap) { | |
238 | backup_cleanup_sync_bitmap(s, 0); | |
239 | } | |
240 | } | |
241 | ||
242 | static void backup_abort(BlockJob *job) | |
243 | { | |
244 | BackupBlockJob *s = container_of(job, BackupBlockJob, common); | |
245 | if (s->sync_bitmap) { | |
246 | backup_cleanup_sync_bitmap(s, -1); | |
247 | } | |
248 | } | |
249 | ||
e8a40bf7 JS |
250 | static void backup_clean(BlockJob *job) |
251 | { | |
252 | BackupBlockJob *s = container_of(job, BackupBlockJob, common); | |
253 | assert(s->target); | |
254 | blk_unref(s->target); | |
255 | s->target = NULL; | |
256 | } | |
257 | ||
5ab4b69c SH |
258 | static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context) |
259 | { | |
260 | BackupBlockJob *s = container_of(job, BackupBlockJob, common); | |
261 | ||
262 | blk_set_aio_context(s->target, aio_context); | |
263 | } | |
264 | ||
49d3e828 WC |
265 | void backup_do_checkpoint(BlockJob *job, Error **errp) |
266 | { | |
267 | BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); | |
268 | int64_t len; | |
269 | ||
270 | assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP); | |
271 | ||
272 | if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) { | |
273 | error_setg(errp, "The backup job only supports block checkpoint in" | |
274 | " sync=none mode"); | |
275 | return; | |
276 | } | |
277 | ||
278 | len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size); | |
279 | bitmap_zero(backup_job->done_bitmap, len); | |
280 | } | |
281 | ||
f6ac2078 EB |
282 | void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset, |
283 | uint64_t bytes) | |
a8bbee0e CX |
284 | { |
285 | BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); | |
a8bbee0e CX |
286 | int64_t start, end; |
287 | ||
288 | assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP); | |
289 | ||
f6ac2078 EB |
290 | start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); |
291 | end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); | |
a8bbee0e CX |
292 | wait_for_overlapping_requests(backup_job, start, end); |
293 | } | |
294 | ||
295 | void backup_cow_request_begin(CowRequest *req, BlockJob *job, | |
f6ac2078 | 296 | int64_t offset, uint64_t bytes) |
a8bbee0e CX |
297 | { |
298 | BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); | |
a8bbee0e CX |
299 | int64_t start, end; |
300 | ||
301 | assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP); | |
302 | ||
f6ac2078 EB |
303 | start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); |
304 | end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); | |
a8bbee0e CX |
305 | cow_request_begin(req, backup_job, start, end); |
306 | } | |
307 | ||
308 | void backup_cow_request_end(CowRequest *req) | |
309 | { | |
310 | cow_request_end(req); | |
311 | } | |
312 | ||
bae8196d PB |
313 | static void backup_drain(BlockJob *job) |
314 | { | |
315 | BackupBlockJob *s = container_of(job, BackupBlockJob, common); | |
316 | ||
317 | /* Need to keep a reference in case blk_drain triggers execution | |
318 | * of backup_complete... | |
319 | */ | |
320 | if (s->target) { | |
321 | BlockBackend *target = s->target; | |
322 | blk_ref(target); | |
323 | blk_drain(target); | |
324 | blk_unref(target); | |
325 | } | |
326 | } | |
327 | ||
98d2c6f2 DM |
328 | static BlockErrorAction backup_error_action(BackupBlockJob *job, |
329 | bool read, int error) | |
330 | { | |
331 | if (read) { | |
81e254dc KW |
332 | return block_job_error_action(&job->common, job->on_source_error, |
333 | true, error); | |
98d2c6f2 | 334 | } else { |
81e254dc KW |
335 | return block_job_error_action(&job->common, job->on_target_error, |
336 | false, error); | |
98d2c6f2 DM |
337 | } |
338 | } | |
339 | ||
761731b1 SH |
340 | typedef struct { |
341 | int ret; | |
342 | } BackupCompleteData; | |
343 | ||
344 | static void backup_complete(BlockJob *job, void *opaque) | |
345 | { | |
761731b1 SH |
346 | BackupCompleteData *data = opaque; |
347 | ||
761731b1 SH |
348 | block_job_completed(job, data->ret); |
349 | g_free(data); | |
350 | } | |
351 | ||
d58d8453 JS |
352 | static bool coroutine_fn yield_and_check(BackupBlockJob *job) |
353 | { | |
354 | if (block_job_is_cancelled(&job->common)) { | |
355 | return true; | |
356 | } | |
357 | ||
358 | /* we need to yield so that bdrv_drain_all() returns. | |
359 | * (without, VM does not reboot) | |
360 | */ | |
361 | if (job->common.speed) { | |
362 | uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, | |
cf79cdf6 EB |
363 | job->bytes_read); |
364 | job->bytes_read = 0; | |
d58d8453 JS |
365 | block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); |
366 | } else { | |
367 | block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); | |
368 | } | |
369 | ||
370 | if (block_job_is_cancelled(&job->common)) { | |
371 | return true; | |
372 | } | |
373 | ||
374 | return false; | |
375 | } | |
376 | ||
377 | static int coroutine_fn backup_run_incremental(BackupBlockJob *job) | |
378 | { | |
379 | bool error_is_read; | |
380 | int ret = 0; | |
381 | int clusters_per_iter; | |
382 | uint32_t granularity; | |
383 | int64_t sector; | |
384 | int64_t cluster; | |
385 | int64_t end; | |
386 | int64_t last_cluster = -1; | |
16096a4d | 387 | int64_t sectors_per_cluster = cluster_size_sectors(job); |
dc162c8e | 388 | BdrvDirtyBitmapIter *dbi; |
d58d8453 JS |
389 | |
390 | granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); | |
16096a4d | 391 | clusters_per_iter = MAX((granularity / job->cluster_size), 1); |
dc162c8e | 392 | dbi = bdrv_dirty_iter_new(job->sync_bitmap, 0); |
d58d8453 JS |
393 | |
394 | /* Find the next dirty sector(s) */ | |
dc162c8e | 395 | while ((sector = bdrv_dirty_iter_next(dbi)) != -1) { |
16096a4d | 396 | cluster = sector / sectors_per_cluster; |
d58d8453 JS |
397 | |
398 | /* Fake progress updates for any clusters we skipped */ | |
399 | if (cluster != last_cluster + 1) { | |
400 | job->common.offset += ((cluster - last_cluster - 1) * | |
16096a4d | 401 | job->cluster_size); |
d58d8453 JS |
402 | } |
403 | ||
404 | for (end = cluster + clusters_per_iter; cluster < end; cluster++) { | |
405 | do { | |
406 | if (yield_and_check(job)) { | |
dc162c8e | 407 | goto out; |
d58d8453 | 408 | } |
8543c274 | 409 | ret = backup_do_cow(job, cluster * sectors_per_cluster, |
16096a4d | 410 | sectors_per_cluster, &error_is_read, |
06c3916b | 411 | false); |
d58d8453 JS |
412 | if ((ret < 0) && |
413 | backup_error_action(job, error_is_read, -ret) == | |
414 | BLOCK_ERROR_ACTION_REPORT) { | |
dc162c8e | 415 | goto out; |
d58d8453 JS |
416 | } |
417 | } while (ret < 0); | |
418 | } | |
419 | ||
420 | /* If the bitmap granularity is smaller than the backup granularity, | |
421 | * we need to advance the iterator pointer to the next cluster. */ | |
16096a4d | 422 | if (granularity < job->cluster_size) { |
dc162c8e | 423 | bdrv_set_dirty_iter(dbi, cluster * sectors_per_cluster); |
d58d8453 JS |
424 | } |
425 | ||
426 | last_cluster = cluster - 1; | |
427 | } | |
428 | ||
429 | /* Play some final catchup with the progress meter */ | |
16096a4d | 430 | end = DIV_ROUND_UP(job->common.len, job->cluster_size); |
d58d8453 | 431 | if (last_cluster + 1 < end) { |
16096a4d | 432 | job->common.offset += ((end - last_cluster - 1) * job->cluster_size); |
d58d8453 JS |
433 | } |
434 | ||
dc162c8e FZ |
435 | out: |
436 | bdrv_dirty_iter_free(dbi); | |
d58d8453 JS |
437 | return ret; |
438 | } | |
439 | ||
98d2c6f2 DM |
440 | static void coroutine_fn backup_run(void *opaque) |
441 | { | |
442 | BackupBlockJob *job = opaque; | |
761731b1 | 443 | BackupCompleteData *data; |
5c438bc6 | 444 | BlockDriverState *bs = blk_bs(job->common.blk); |
98d2c6f2 | 445 | int64_t start, end; |
16096a4d | 446 | int64_t sectors_per_cluster = cluster_size_sectors(job); |
98d2c6f2 DM |
447 | int ret = 0; |
448 | ||
449 | QLIST_INIT(&job->inflight_reqs); | |
450 | qemu_co_rwlock_init(&job->flush_rwlock); | |
451 | ||
452 | start = 0; | |
16096a4d | 453 | end = DIV_ROUND_UP(job->common.len, job->cluster_size); |
98d2c6f2 | 454 | |
b2f56462 | 455 | job->done_bitmap = bitmap_new(end); |
98d2c6f2 | 456 | |
12b3e52e JS |
457 | job->before_write.notify = backup_before_write_notify; |
458 | bdrv_add_before_write_notifier(bs, &job->before_write); | |
98d2c6f2 | 459 | |
fc5d3f84 IM |
460 | if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { |
461 | while (!block_job_is_cancelled(&job->common)) { | |
462 | /* Yield until the job is cancelled. We just let our before_write | |
463 | * notify callback service CoW requests. */ | |
5ab4b69c | 464 | block_job_yield(&job->common); |
98d2c6f2 | 465 | } |
4b80ab2b | 466 | } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { |
d58d8453 | 467 | ret = backup_run_incremental(job); |
fc5d3f84 IM |
468 | } else { |
469 | /* Both FULL and TOP SYNC_MODE's require copying.. */ | |
470 | for (; start < end; start++) { | |
471 | bool error_is_read; | |
666a9543 EB |
472 | int alloced = 0; |
473 | ||
d58d8453 | 474 | if (yield_and_check(job)) { |
98d2c6f2 | 475 | break; |
fc5d3f84 IM |
476 | } |
477 | ||
478 | if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { | |
479 | int i, n; | |
fc5d3f84 IM |
480 | |
481 | /* Check to see if these blocks are already in the | |
482 | * backing file. */ | |
483 | ||
16096a4d | 484 | for (i = 0; i < sectors_per_cluster;) { |
bdad13b9 | 485 | /* bdrv_is_allocated() only returns true/false based |
4c293dc6 | 486 | * on the first set of sectors it comes across that |
fc5d3f84 IM |
487 | * are are all in the same state. |
488 | * For that reason we must verify each sector in the | |
489 | * backup cluster length. We end up copying more than | |
490 | * needed but at some point that is always the case. */ | |
491 | alloced = | |
bdad13b9 | 492 | bdrv_is_allocated(bs, |
16096a4d JS |
493 | start * sectors_per_cluster + i, |
494 | sectors_per_cluster - i, &n); | |
fc5d3f84 IM |
495 | i += n; |
496 | ||
666a9543 | 497 | if (alloced || n == 0) { |
fc5d3f84 IM |
498 | break; |
499 | } | |
500 | } | |
501 | ||
502 | /* If the above loop never found any sectors that are in | |
503 | * the topmost image, skip this backup. */ | |
504 | if (alloced == 0) { | |
505 | continue; | |
506 | } | |
507 | } | |
508 | /* FULL sync mode we copy the whole drive. */ | |
666a9543 EB |
509 | if (alloced < 0) { |
510 | ret = alloced; | |
511 | } else { | |
512 | ret = backup_do_cow(job, start * sectors_per_cluster, | |
513 | sectors_per_cluster, &error_is_read, | |
514 | false); | |
515 | } | |
fc5d3f84 IM |
516 | if (ret < 0) { |
517 | /* Depending on error action, fail now or retry cluster */ | |
518 | BlockErrorAction action = | |
519 | backup_error_action(job, error_is_read, -ret); | |
a589569f | 520 | if (action == BLOCK_ERROR_ACTION_REPORT) { |
fc5d3f84 IM |
521 | break; |
522 | } else { | |
523 | start--; | |
524 | continue; | |
525 | } | |
98d2c6f2 DM |
526 | } |
527 | } | |
528 | } | |
529 | ||
12b3e52e | 530 | notifier_with_return_remove(&job->before_write); |
98d2c6f2 DM |
531 | |
532 | /* wait until pending backup_do_cow() calls have completed */ | |
533 | qemu_co_rwlock_wrlock(&job->flush_rwlock); | |
534 | qemu_co_rwlock_unlock(&job->flush_rwlock); | |
b2f56462 | 535 | g_free(job->done_bitmap); |
98d2c6f2 | 536 | |
761731b1 SH |
537 | data = g_malloc(sizeof(*data)); |
538 | data->ret = ret; | |
539 | block_job_defer_to_main_loop(&job->common, backup_complete, data); | |
98d2c6f2 DM |
540 | } |
541 | ||
a7815a76 JS |
542 | static const BlockJobDriver backup_job_driver = { |
543 | .instance_size = sizeof(BackupBlockJob), | |
544 | .job_type = BLOCK_JOB_TYPE_BACKUP, | |
545 | .start = backup_run, | |
546 | .set_speed = backup_set_speed, | |
547 | .commit = backup_commit, | |
548 | .abort = backup_abort, | |
549 | .clean = backup_clean, | |
550 | .attached_aio_context = backup_attached_aio_context, | |
551 | .drain = backup_drain, | |
552 | }; | |
553 | ||
111049a4 | 554 | BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, |
70559d49 AG |
555 | BlockDriverState *target, int64_t speed, |
556 | MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, | |
13b9414b | 557 | bool compress, |
98d2c6f2 DM |
558 | BlockdevOnError on_source_error, |
559 | BlockdevOnError on_target_error, | |
47970dfb | 560 | int creation_flags, |
097310b5 | 561 | BlockCompletionFunc *cb, void *opaque, |
78f51fde | 562 | BlockJobTxn *txn, Error **errp) |
98d2c6f2 DM |
563 | { |
564 | int64_t len; | |
4c9bca7e | 565 | BlockDriverInfo bdi; |
91ab6883 | 566 | BackupBlockJob *job = NULL; |
4c9bca7e | 567 | int ret; |
98d2c6f2 DM |
568 | |
569 | assert(bs); | |
570 | assert(target); | |
98d2c6f2 | 571 | |
c29c1dd3 FZ |
572 | if (bs == target) { |
573 | error_setg(errp, "Source and target cannot be the same"); | |
111049a4 | 574 | return NULL; |
c29c1dd3 FZ |
575 | } |
576 | ||
c29c1dd3 FZ |
577 | if (!bdrv_is_inserted(bs)) { |
578 | error_setg(errp, "Device is not inserted: %s", | |
579 | bdrv_get_device_name(bs)); | |
111049a4 | 580 | return NULL; |
c29c1dd3 FZ |
581 | } |
582 | ||
583 | if (!bdrv_is_inserted(target)) { | |
584 | error_setg(errp, "Device is not inserted: %s", | |
585 | bdrv_get_device_name(target)); | |
111049a4 | 586 | return NULL; |
c29c1dd3 FZ |
587 | } |
588 | ||
13b9414b PB |
589 | if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) { |
590 | error_setg(errp, "Compression is not supported for this drive %s", | |
591 | bdrv_get_device_name(target)); | |
111049a4 | 592 | return NULL; |
13b9414b PB |
593 | } |
594 | ||
c29c1dd3 | 595 | if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { |
111049a4 | 596 | return NULL; |
c29c1dd3 FZ |
597 | } |
598 | ||
599 | if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { | |
111049a4 | 600 | return NULL; |
c29c1dd3 FZ |
601 | } |
602 | ||
4b80ab2b | 603 | if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { |
d58d8453 JS |
604 | if (!sync_bitmap) { |
605 | error_setg(errp, "must provide a valid bitmap name for " | |
4b80ab2b | 606 | "\"incremental\" sync mode"); |
111049a4 | 607 | return NULL; |
d58d8453 JS |
608 | } |
609 | ||
610 | /* Create a new bitmap, and freeze/disable this one. */ | |
611 | if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { | |
111049a4 | 612 | return NULL; |
d58d8453 JS |
613 | } |
614 | } else if (sync_bitmap) { | |
615 | error_setg(errp, | |
616 | "a sync_bitmap was provided to backup_run, " | |
617 | "but received an incompatible sync_mode (%s)", | |
618 | MirrorSyncMode_lookup[sync_mode]); | |
111049a4 | 619 | return NULL; |
d58d8453 JS |
620 | } |
621 | ||
98d2c6f2 DM |
622 | len = bdrv_getlength(bs); |
623 | if (len < 0) { | |
624 | error_setg_errno(errp, -len, "unable to get length for '%s'", | |
625 | bdrv_get_device_name(bs)); | |
d58d8453 | 626 | goto error; |
98d2c6f2 DM |
627 | } |
628 | ||
4e9e4323 KW |
629 | /* job->common.len is fixed, so we can't allow resize */ |
630 | job = block_job_create(job_id, &backup_job_driver, bs, | |
631 | BLK_PERM_CONSISTENT_READ, | |
632 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | | |
633 | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD, | |
c6cc12bf | 634 | speed, creation_flags, cb, opaque, errp); |
98d2c6f2 | 635 | if (!job) { |
d58d8453 | 636 | goto error; |
98d2c6f2 DM |
637 | } |
638 | ||
4e9e4323 KW |
639 | /* The target must match the source in size, so no resize here either */ |
640 | job->target = blk_new(BLK_PERM_WRITE, | |
641 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | | |
642 | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD); | |
d7086422 KW |
643 | ret = blk_insert_bs(job->target, target, errp); |
644 | if (ret < 0) { | |
645 | goto error; | |
646 | } | |
5c438bc6 | 647 | |
98d2c6f2 DM |
648 | job->on_source_error = on_source_error; |
649 | job->on_target_error = on_target_error; | |
fc5d3f84 | 650 | job->sync_mode = sync_mode; |
4b80ab2b | 651 | job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? |
d58d8453 | 652 | sync_bitmap : NULL; |
13b9414b | 653 | job->compress = compress; |
4c9bca7e JS |
654 | |
655 | /* If there is no backing file on the target, we cannot rely on COW if our | |
656 | * backup cluster size is smaller than the target cluster size. Even for | |
657 | * targets with a backing file, try to avoid COW if possible. */ | |
5c438bc6 | 658 | ret = bdrv_get_info(target, &bdi); |
a410a7f1 VSO |
659 | if (ret == -ENOTSUP && !target->backing) { |
660 | /* Cluster size is not defined */ | |
661 | error_report("WARNING: The target block device doesn't provide " | |
662 | "information about the block size and it doesn't have a " | |
663 | "backing file. The default block size of %u bytes is " | |
664 | "used. If the actual block size of the target exceeds " | |
665 | "this default, the backup may be unusable", | |
666 | BACKUP_CLUSTER_SIZE_DEFAULT); | |
667 | job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; | |
668 | } else if (ret < 0 && !target->backing) { | |
4c9bca7e JS |
669 | error_setg_errno(errp, -ret, |
670 | "Couldn't determine the cluster size of the target image, " | |
671 | "which has no backing file"); | |
672 | error_append_hint(errp, | |
673 | "Aborting, since this may create an unusable destination image\n"); | |
674 | goto error; | |
675 | } else if (ret < 0 && target->backing) { | |
676 | /* Not fatal; just trudge on ahead. */ | |
677 | job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; | |
678 | } else { | |
679 | job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); | |
680 | } | |
681 | ||
4e9e4323 | 682 | /* Required permissions are already taken with target's blk_new() */ |
76d554e2 KW |
683 | block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, |
684 | &error_abort); | |
98d2c6f2 | 685 | job->common.len = len; |
78f51fde | 686 | block_job_txn_add_job(txn, &job->common); |
111049a4 JS |
687 | |
688 | return &job->common; | |
d58d8453 JS |
689 | |
690 | error: | |
691 | if (sync_bitmap) { | |
692 | bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); | |
693 | } | |
91ab6883 | 694 | if (job) { |
e8a40bf7 | 695 | backup_clean(&job->common); |
05b0d8e3 | 696 | block_job_early_fail(&job->common); |
91ab6883 | 697 | } |
111049a4 JS |
698 | |
699 | return NULL; | |
98d2c6f2 | 700 | } |