]> git.proxmox.com Git - mirror_qemu.git/blob - block/backup.c
include/qemu/osdep.h: Don't include qapi/error.h
[mirror_qemu.git] / block / backup.c
1 /*
2 * QEMU backup
3 *
4 * Copyright (C) 2013 Proxmox Server Solutions
5 *
6 * Authors:
7 * Dietmar Maurer (dietmar@proxmox.com)
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15
16 #include "trace.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "sysemu/block-backend.h"
24 #include "qemu/bitmap.h"
25
26 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
27 #define SLICE_TIME 100000000ULL /* ns */
28
29 typedef struct CowRequest {
30 int64_t start;
31 int64_t end;
32 QLIST_ENTRY(CowRequest) list;
33 CoQueue wait_queue; /* coroutines blocked on this request */
34 } CowRequest;
35
36 typedef struct BackupBlockJob {
37 BlockJob common;
38 BlockDriverState *target;
39 /* bitmap for sync=incremental */
40 BdrvDirtyBitmap *sync_bitmap;
41 MirrorSyncMode sync_mode;
42 RateLimit limit;
43 BlockdevOnError on_source_error;
44 BlockdevOnError on_target_error;
45 CoRwlock flush_rwlock;
46 uint64_t sectors_read;
47 unsigned long *done_bitmap;
48 int64_t cluster_size;
49 QLIST_HEAD(, CowRequest) inflight_reqs;
50 } BackupBlockJob;
51
52 /* Size of a cluster in sectors, instead of bytes. */
53 static inline int64_t cluster_size_sectors(BackupBlockJob *job)
54 {
55 return job->cluster_size / BDRV_SECTOR_SIZE;
56 }
57
58 /* See if in-flight requests overlap and wait for them to complete */
59 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
60 int64_t start,
61 int64_t end)
62 {
63 CowRequest *req;
64 bool retry;
65
66 do {
67 retry = false;
68 QLIST_FOREACH(req, &job->inflight_reqs, list) {
69 if (end > req->start && start < req->end) {
70 qemu_co_queue_wait(&req->wait_queue);
71 retry = true;
72 break;
73 }
74 }
75 } while (retry);
76 }
77
78 /* Keep track of an in-flight request */
79 static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
80 int64_t start, int64_t end)
81 {
82 req->start = start;
83 req->end = end;
84 qemu_co_queue_init(&req->wait_queue);
85 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
86 }
87
88 /* Forget about a completed request */
89 static void cow_request_end(CowRequest *req)
90 {
91 QLIST_REMOVE(req, list);
92 qemu_co_queue_restart_all(&req->wait_queue);
93 }
94
95 static int coroutine_fn backup_do_cow(BlockDriverState *bs,
96 int64_t sector_num, int nb_sectors,
97 bool *error_is_read,
98 bool is_write_notifier)
99 {
100 BackupBlockJob *job = (BackupBlockJob *)bs->job;
101 CowRequest cow_request;
102 struct iovec iov;
103 QEMUIOVector bounce_qiov;
104 void *bounce_buffer = NULL;
105 int ret = 0;
106 int64_t sectors_per_cluster = cluster_size_sectors(job);
107 int64_t start, end;
108 int n;
109
110 qemu_co_rwlock_rdlock(&job->flush_rwlock);
111
112 start = sector_num / sectors_per_cluster;
113 end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
114
115 trace_backup_do_cow_enter(job, start, sector_num, nb_sectors);
116
117 wait_for_overlapping_requests(job, start, end);
118 cow_request_begin(&cow_request, job, start, end);
119
120 for (; start < end; start++) {
121 if (test_bit(start, job->done_bitmap)) {
122 trace_backup_do_cow_skip(job, start);
123 continue; /* already copied */
124 }
125
126 trace_backup_do_cow_process(job, start);
127
128 n = MIN(sectors_per_cluster,
129 job->common.len / BDRV_SECTOR_SIZE -
130 start * sectors_per_cluster);
131
132 if (!bounce_buffer) {
133 bounce_buffer = qemu_blockalign(bs, job->cluster_size);
134 }
135 iov.iov_base = bounce_buffer;
136 iov.iov_len = n * BDRV_SECTOR_SIZE;
137 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
138
139 if (is_write_notifier) {
140 ret = bdrv_co_readv_no_serialising(bs,
141 start * sectors_per_cluster,
142 n, &bounce_qiov);
143 } else {
144 ret = bdrv_co_readv(bs, start * sectors_per_cluster, n,
145 &bounce_qiov);
146 }
147 if (ret < 0) {
148 trace_backup_do_cow_read_fail(job, start, ret);
149 if (error_is_read) {
150 *error_is_read = true;
151 }
152 goto out;
153 }
154
155 if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
156 ret = bdrv_co_write_zeroes(job->target,
157 start * sectors_per_cluster,
158 n, BDRV_REQ_MAY_UNMAP);
159 } else {
160 ret = bdrv_co_writev(job->target,
161 start * sectors_per_cluster, n,
162 &bounce_qiov);
163 }
164 if (ret < 0) {
165 trace_backup_do_cow_write_fail(job, start, ret);
166 if (error_is_read) {
167 *error_is_read = false;
168 }
169 goto out;
170 }
171
172 set_bit(start, job->done_bitmap);
173
174 /* Publish progress, guest I/O counts as progress too. Note that the
175 * offset field is an opaque progress value, it is not a disk offset.
176 */
177 job->sectors_read += n;
178 job->common.offset += n * BDRV_SECTOR_SIZE;
179 }
180
181 out:
182 if (bounce_buffer) {
183 qemu_vfree(bounce_buffer);
184 }
185
186 cow_request_end(&cow_request);
187
188 trace_backup_do_cow_return(job, sector_num, nb_sectors, ret);
189
190 qemu_co_rwlock_unlock(&job->flush_rwlock);
191
192 return ret;
193 }
194
195 static int coroutine_fn backup_before_write_notify(
196 NotifierWithReturn *notifier,
197 void *opaque)
198 {
199 BdrvTrackedRequest *req = opaque;
200 int64_t sector_num = req->offset >> BDRV_SECTOR_BITS;
201 int nb_sectors = req->bytes >> BDRV_SECTOR_BITS;
202
203 assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0);
204 assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
205
206 return backup_do_cow(req->bs, sector_num, nb_sectors, NULL, true);
207 }
208
209 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
210 {
211 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
212
213 if (speed < 0) {
214 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
215 return;
216 }
217 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
218 }
219
220 static void backup_iostatus_reset(BlockJob *job)
221 {
222 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
223
224 if (s->target->blk) {
225 blk_iostatus_reset(s->target->blk);
226 }
227 }
228
229 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
230 {
231 BdrvDirtyBitmap *bm;
232 BlockDriverState *bs = job->common.bs;
233
234 if (ret < 0 || block_job_is_cancelled(&job->common)) {
235 /* Merge the successor back into the parent, delete nothing. */
236 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
237 assert(bm);
238 } else {
239 /* Everything is fine, delete this bitmap and install the backup. */
240 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
241 assert(bm);
242 }
243 }
244
245 static void backup_commit(BlockJob *job)
246 {
247 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
248 if (s->sync_bitmap) {
249 backup_cleanup_sync_bitmap(s, 0);
250 }
251 }
252
253 static void backup_abort(BlockJob *job)
254 {
255 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
256 if (s->sync_bitmap) {
257 backup_cleanup_sync_bitmap(s, -1);
258 }
259 }
260
261 static const BlockJobDriver backup_job_driver = {
262 .instance_size = sizeof(BackupBlockJob),
263 .job_type = BLOCK_JOB_TYPE_BACKUP,
264 .set_speed = backup_set_speed,
265 .iostatus_reset = backup_iostatus_reset,
266 .commit = backup_commit,
267 .abort = backup_abort,
268 };
269
270 static BlockErrorAction backup_error_action(BackupBlockJob *job,
271 bool read, int error)
272 {
273 if (read) {
274 return block_job_error_action(&job->common, job->common.bs,
275 job->on_source_error, true, error);
276 } else {
277 return block_job_error_action(&job->common, job->target,
278 job->on_target_error, false, error);
279 }
280 }
281
282 typedef struct {
283 int ret;
284 } BackupCompleteData;
285
286 static void backup_complete(BlockJob *job, void *opaque)
287 {
288 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
289 BackupCompleteData *data = opaque;
290
291 bdrv_unref(s->target);
292
293 block_job_completed(job, data->ret);
294 g_free(data);
295 }
296
297 static bool coroutine_fn yield_and_check(BackupBlockJob *job)
298 {
299 if (block_job_is_cancelled(&job->common)) {
300 return true;
301 }
302
303 /* we need to yield so that bdrv_drain_all() returns.
304 * (without, VM does not reboot)
305 */
306 if (job->common.speed) {
307 uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
308 job->sectors_read);
309 job->sectors_read = 0;
310 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
311 } else {
312 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
313 }
314
315 if (block_job_is_cancelled(&job->common)) {
316 return true;
317 }
318
319 return false;
320 }
321
322 static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
323 {
324 bool error_is_read;
325 int ret = 0;
326 int clusters_per_iter;
327 uint32_t granularity;
328 int64_t sector;
329 int64_t cluster;
330 int64_t end;
331 int64_t last_cluster = -1;
332 int64_t sectors_per_cluster = cluster_size_sectors(job);
333 BlockDriverState *bs = job->common.bs;
334 HBitmapIter hbi;
335
336 granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
337 clusters_per_iter = MAX((granularity / job->cluster_size), 1);
338 bdrv_dirty_iter_init(job->sync_bitmap, &hbi);
339
340 /* Find the next dirty sector(s) */
341 while ((sector = hbitmap_iter_next(&hbi)) != -1) {
342 cluster = sector / sectors_per_cluster;
343
344 /* Fake progress updates for any clusters we skipped */
345 if (cluster != last_cluster + 1) {
346 job->common.offset += ((cluster - last_cluster - 1) *
347 job->cluster_size);
348 }
349
350 for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
351 do {
352 if (yield_and_check(job)) {
353 return ret;
354 }
355 ret = backup_do_cow(bs, cluster * sectors_per_cluster,
356 sectors_per_cluster, &error_is_read,
357 false);
358 if ((ret < 0) &&
359 backup_error_action(job, error_is_read, -ret) ==
360 BLOCK_ERROR_ACTION_REPORT) {
361 return ret;
362 }
363 } while (ret < 0);
364 }
365
366 /* If the bitmap granularity is smaller than the backup granularity,
367 * we need to advance the iterator pointer to the next cluster. */
368 if (granularity < job->cluster_size) {
369 bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster);
370 }
371
372 last_cluster = cluster - 1;
373 }
374
375 /* Play some final catchup with the progress meter */
376 end = DIV_ROUND_UP(job->common.len, job->cluster_size);
377 if (last_cluster + 1 < end) {
378 job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
379 }
380
381 return ret;
382 }
383
384 static void coroutine_fn backup_run(void *opaque)
385 {
386 BackupBlockJob *job = opaque;
387 BackupCompleteData *data;
388 BlockDriverState *bs = job->common.bs;
389 BlockDriverState *target = job->target;
390 BlockdevOnError on_target_error = job->on_target_error;
391 NotifierWithReturn before_write = {
392 .notify = backup_before_write_notify,
393 };
394 int64_t start, end;
395 int64_t sectors_per_cluster = cluster_size_sectors(job);
396 int ret = 0;
397
398 QLIST_INIT(&job->inflight_reqs);
399 qemu_co_rwlock_init(&job->flush_rwlock);
400
401 start = 0;
402 end = DIV_ROUND_UP(job->common.len, job->cluster_size);
403
404 job->done_bitmap = bitmap_new(end);
405
406 bdrv_set_enable_write_cache(target, true);
407 if (target->blk) {
408 blk_set_on_error(target->blk, on_target_error, on_target_error);
409 blk_iostatus_enable(target->blk);
410 }
411
412 bdrv_add_before_write_notifier(bs, &before_write);
413
414 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
415 while (!block_job_is_cancelled(&job->common)) {
416 /* Yield until the job is cancelled. We just let our before_write
417 * notify callback service CoW requests. */
418 job->common.busy = false;
419 qemu_coroutine_yield();
420 job->common.busy = true;
421 }
422 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
423 ret = backup_run_incremental(job);
424 } else {
425 /* Both FULL and TOP SYNC_MODE's require copying.. */
426 for (; start < end; start++) {
427 bool error_is_read;
428 if (yield_and_check(job)) {
429 break;
430 }
431
432 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
433 int i, n;
434 int alloced = 0;
435
436 /* Check to see if these blocks are already in the
437 * backing file. */
438
439 for (i = 0; i < sectors_per_cluster;) {
440 /* bdrv_is_allocated() only returns true/false based
441 * on the first set of sectors it comes across that
442 * are are all in the same state.
443 * For that reason we must verify each sector in the
444 * backup cluster length. We end up copying more than
445 * needed but at some point that is always the case. */
446 alloced =
447 bdrv_is_allocated(bs,
448 start * sectors_per_cluster + i,
449 sectors_per_cluster - i, &n);
450 i += n;
451
452 if (alloced == 1 || n == 0) {
453 break;
454 }
455 }
456
457 /* If the above loop never found any sectors that are in
458 * the topmost image, skip this backup. */
459 if (alloced == 0) {
460 continue;
461 }
462 }
463 /* FULL sync mode we copy the whole drive. */
464 ret = backup_do_cow(bs, start * sectors_per_cluster,
465 sectors_per_cluster, &error_is_read, false);
466 if (ret < 0) {
467 /* Depending on error action, fail now or retry cluster */
468 BlockErrorAction action =
469 backup_error_action(job, error_is_read, -ret);
470 if (action == BLOCK_ERROR_ACTION_REPORT) {
471 break;
472 } else {
473 start--;
474 continue;
475 }
476 }
477 }
478 }
479
480 notifier_with_return_remove(&before_write);
481
482 /* wait until pending backup_do_cow() calls have completed */
483 qemu_co_rwlock_wrlock(&job->flush_rwlock);
484 qemu_co_rwlock_unlock(&job->flush_rwlock);
485 g_free(job->done_bitmap);
486
487 if (target->blk) {
488 blk_iostatus_disable(target->blk);
489 }
490 bdrv_op_unblock_all(target, job->common.blocker);
491
492 data = g_malloc(sizeof(*data));
493 data->ret = ret;
494 block_job_defer_to_main_loop(&job->common, backup_complete, data);
495 }
496
497 void backup_start(BlockDriverState *bs, BlockDriverState *target,
498 int64_t speed, MirrorSyncMode sync_mode,
499 BdrvDirtyBitmap *sync_bitmap,
500 BlockdevOnError on_source_error,
501 BlockdevOnError on_target_error,
502 BlockCompletionFunc *cb, void *opaque,
503 BlockJobTxn *txn, Error **errp)
504 {
505 int64_t len;
506 BlockDriverInfo bdi;
507 int ret;
508
509 assert(bs);
510 assert(target);
511 assert(cb);
512
513 if (bs == target) {
514 error_setg(errp, "Source and target cannot be the same");
515 return;
516 }
517
518 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
519 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
520 (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
521 error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
522 return;
523 }
524
525 if (!bdrv_is_inserted(bs)) {
526 error_setg(errp, "Device is not inserted: %s",
527 bdrv_get_device_name(bs));
528 return;
529 }
530
531 if (!bdrv_is_inserted(target)) {
532 error_setg(errp, "Device is not inserted: %s",
533 bdrv_get_device_name(target));
534 return;
535 }
536
537 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
538 return;
539 }
540
541 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
542 return;
543 }
544
545 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
546 if (!sync_bitmap) {
547 error_setg(errp, "must provide a valid bitmap name for "
548 "\"incremental\" sync mode");
549 return;
550 }
551
552 /* Create a new bitmap, and freeze/disable this one. */
553 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
554 return;
555 }
556 } else if (sync_bitmap) {
557 error_setg(errp,
558 "a sync_bitmap was provided to backup_run, "
559 "but received an incompatible sync_mode (%s)",
560 MirrorSyncMode_lookup[sync_mode]);
561 return;
562 }
563
564 len = bdrv_getlength(bs);
565 if (len < 0) {
566 error_setg_errno(errp, -len, "unable to get length for '%s'",
567 bdrv_get_device_name(bs));
568 goto error;
569 }
570
571 BackupBlockJob *job = block_job_create(&backup_job_driver, bs, speed,
572 cb, opaque, errp);
573 if (!job) {
574 goto error;
575 }
576
577 job->on_source_error = on_source_error;
578 job->on_target_error = on_target_error;
579 job->target = target;
580 job->sync_mode = sync_mode;
581 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
582 sync_bitmap : NULL;
583
584 /* If there is no backing file on the target, we cannot rely on COW if our
585 * backup cluster size is smaller than the target cluster size. Even for
586 * targets with a backing file, try to avoid COW if possible. */
587 ret = bdrv_get_info(job->target, &bdi);
588 if (ret < 0 && !target->backing) {
589 error_setg_errno(errp, -ret,
590 "Couldn't determine the cluster size of the target image, "
591 "which has no backing file");
592 error_append_hint(errp,
593 "Aborting, since this may create an unusable destination image\n");
594 goto error;
595 } else if (ret < 0 && target->backing) {
596 /* Not fatal; just trudge on ahead. */
597 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
598 } else {
599 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
600 }
601
602 bdrv_op_block_all(target, job->common.blocker);
603 job->common.len = len;
604 job->common.co = qemu_coroutine_create(backup_run);
605 block_job_txn_add_job(txn, &job->common);
606 qemu_coroutine_enter(job->common.co, job);
607 return;
608
609 error:
610 if (sync_bitmap) {
611 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
612 }
613 }