]> git.proxmox.com Git - mirror_qemu.git/blob - block/backup.c
Update version for 2.7.1 release
[mirror_qemu.git] / block / backup.c
1 /*
2 * QEMU backup
3 *
4 * Copyright (C) 2013 Proxmox Server Solutions
5 *
6 * Authors:
7 * Dietmar Maurer (dietmar@proxmox.com)
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15
16 #include "trace.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/cutils.h"
24 #include "sysemu/block-backend.h"
25 #include "qemu/bitmap.h"
26
27 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
28 #define SLICE_TIME 100000000ULL /* ns */
29
30 typedef struct CowRequest {
31 int64_t start;
32 int64_t end;
33 QLIST_ENTRY(CowRequest) list;
34 CoQueue wait_queue; /* coroutines blocked on this request */
35 } CowRequest;
36
37 typedef struct BackupBlockJob {
38 BlockJob common;
39 BlockBackend *target;
40 /* bitmap for sync=incremental */
41 BdrvDirtyBitmap *sync_bitmap;
42 MirrorSyncMode sync_mode;
43 RateLimit limit;
44 BlockdevOnError on_source_error;
45 BlockdevOnError on_target_error;
46 CoRwlock flush_rwlock;
47 uint64_t sectors_read;
48 unsigned long *done_bitmap;
49 int64_t cluster_size;
50 NotifierWithReturn before_write;
51 QLIST_HEAD(, CowRequest) inflight_reqs;
52 } BackupBlockJob;
53
54 /* Size of a cluster in sectors, instead of bytes. */
55 static inline int64_t cluster_size_sectors(BackupBlockJob *job)
56 {
57 return job->cluster_size / BDRV_SECTOR_SIZE;
58 }
59
60 /* See if in-flight requests overlap and wait for them to complete */
61 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
62 int64_t start,
63 int64_t end)
64 {
65 CowRequest *req;
66 bool retry;
67
68 do {
69 retry = false;
70 QLIST_FOREACH(req, &job->inflight_reqs, list) {
71 if (end > req->start && start < req->end) {
72 qemu_co_queue_wait(&req->wait_queue);
73 retry = true;
74 break;
75 }
76 }
77 } while (retry);
78 }
79
80 /* Keep track of an in-flight request */
81 static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
82 int64_t start, int64_t end)
83 {
84 req->start = start;
85 req->end = end;
86 qemu_co_queue_init(&req->wait_queue);
87 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
88 }
89
90 /* Forget about a completed request */
91 static void cow_request_end(CowRequest *req)
92 {
93 QLIST_REMOVE(req, list);
94 qemu_co_queue_restart_all(&req->wait_queue);
95 }
96
97 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
98 int64_t sector_num, int nb_sectors,
99 bool *error_is_read,
100 bool is_write_notifier)
101 {
102 BlockBackend *blk = job->common.blk;
103 CowRequest cow_request;
104 struct iovec iov;
105 QEMUIOVector bounce_qiov;
106 void *bounce_buffer = NULL;
107 int ret = 0;
108 int64_t sectors_per_cluster = cluster_size_sectors(job);
109 int64_t start, end;
110 int n;
111
112 qemu_co_rwlock_rdlock(&job->flush_rwlock);
113
114 start = sector_num / sectors_per_cluster;
115 end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
116
117 trace_backup_do_cow_enter(job, start, sector_num, nb_sectors);
118
119 wait_for_overlapping_requests(job, start, end);
120 cow_request_begin(&cow_request, job, start, end);
121
122 for (; start < end; start++) {
123 if (test_bit(start, job->done_bitmap)) {
124 trace_backup_do_cow_skip(job, start);
125 continue; /* already copied */
126 }
127
128 trace_backup_do_cow_process(job, start);
129
130 n = MIN(sectors_per_cluster,
131 job->common.len / BDRV_SECTOR_SIZE -
132 start * sectors_per_cluster);
133
134 if (!bounce_buffer) {
135 bounce_buffer = blk_blockalign(blk, job->cluster_size);
136 }
137 iov.iov_base = bounce_buffer;
138 iov.iov_len = n * BDRV_SECTOR_SIZE;
139 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
140
141 ret = blk_co_preadv(blk, start * job->cluster_size,
142 bounce_qiov.size, &bounce_qiov,
143 is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
144 if (ret < 0) {
145 trace_backup_do_cow_read_fail(job, start, ret);
146 if (error_is_read) {
147 *error_is_read = true;
148 }
149 goto out;
150 }
151
152 if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
153 ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
154 bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
155 } else {
156 ret = blk_co_pwritev(job->target, start * job->cluster_size,
157 bounce_qiov.size, &bounce_qiov, 0);
158 }
159 if (ret < 0) {
160 trace_backup_do_cow_write_fail(job, start, ret);
161 if (error_is_read) {
162 *error_is_read = false;
163 }
164 goto out;
165 }
166
167 set_bit(start, job->done_bitmap);
168
169 /* Publish progress, guest I/O counts as progress too. Note that the
170 * offset field is an opaque progress value, it is not a disk offset.
171 */
172 job->sectors_read += n;
173 job->common.offset += n * BDRV_SECTOR_SIZE;
174 }
175
176 out:
177 if (bounce_buffer) {
178 qemu_vfree(bounce_buffer);
179 }
180
181 cow_request_end(&cow_request);
182
183 trace_backup_do_cow_return(job, sector_num, nb_sectors, ret);
184
185 qemu_co_rwlock_unlock(&job->flush_rwlock);
186
187 return ret;
188 }
189
190 static int coroutine_fn backup_before_write_notify(
191 NotifierWithReturn *notifier,
192 void *opaque)
193 {
194 BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
195 BdrvTrackedRequest *req = opaque;
196 int64_t sector_num = req->offset >> BDRV_SECTOR_BITS;
197 int nb_sectors = req->bytes >> BDRV_SECTOR_BITS;
198
199 assert(req->bs == blk_bs(job->common.blk));
200 assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0);
201 assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
202
203 return backup_do_cow(job, sector_num, nb_sectors, NULL, true);
204 }
205
206 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
207 {
208 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
209
210 if (speed < 0) {
211 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
212 return;
213 }
214 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
215 }
216
217 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
218 {
219 BdrvDirtyBitmap *bm;
220 BlockDriverState *bs = blk_bs(job->common.blk);
221
222 if (ret < 0 || block_job_is_cancelled(&job->common)) {
223 /* Merge the successor back into the parent, delete nothing. */
224 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
225 assert(bm);
226 } else {
227 /* Everything is fine, delete this bitmap and install the backup. */
228 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
229 assert(bm);
230 }
231 }
232
233 static void backup_commit(BlockJob *job)
234 {
235 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
236 if (s->sync_bitmap) {
237 backup_cleanup_sync_bitmap(s, 0);
238 }
239 }
240
241 static void backup_abort(BlockJob *job)
242 {
243 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
244 if (s->sync_bitmap) {
245 backup_cleanup_sync_bitmap(s, -1);
246 }
247 }
248
249 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
250 {
251 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
252
253 blk_set_aio_context(s->target, aio_context);
254 }
255
256 static const BlockJobDriver backup_job_driver = {
257 .instance_size = sizeof(BackupBlockJob),
258 .job_type = BLOCK_JOB_TYPE_BACKUP,
259 .set_speed = backup_set_speed,
260 .commit = backup_commit,
261 .abort = backup_abort,
262 .attached_aio_context = backup_attached_aio_context,
263 };
264
265 static BlockErrorAction backup_error_action(BackupBlockJob *job,
266 bool read, int error)
267 {
268 if (read) {
269 return block_job_error_action(&job->common, job->on_source_error,
270 true, error);
271 } else {
272 return block_job_error_action(&job->common, job->on_target_error,
273 false, error);
274 }
275 }
276
277 typedef struct {
278 int ret;
279 } BackupCompleteData;
280
281 static void backup_complete(BlockJob *job, void *opaque)
282 {
283 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
284 BackupCompleteData *data = opaque;
285
286 blk_unref(s->target);
287
288 block_job_completed(job, data->ret);
289 g_free(data);
290 }
291
292 static bool coroutine_fn yield_and_check(BackupBlockJob *job)
293 {
294 if (block_job_is_cancelled(&job->common)) {
295 return true;
296 }
297
298 /* we need to yield so that bdrv_drain_all() returns.
299 * (without, VM does not reboot)
300 */
301 if (job->common.speed) {
302 uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
303 job->sectors_read);
304 job->sectors_read = 0;
305 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
306 } else {
307 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
308 }
309
310 if (block_job_is_cancelled(&job->common)) {
311 return true;
312 }
313
314 return false;
315 }
316
317 static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
318 {
319 bool error_is_read;
320 int ret = 0;
321 int clusters_per_iter;
322 uint32_t granularity;
323 int64_t sector;
324 int64_t cluster;
325 int64_t end;
326 int64_t last_cluster = -1;
327 int64_t sectors_per_cluster = cluster_size_sectors(job);
328 HBitmapIter hbi;
329
330 granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
331 clusters_per_iter = MAX((granularity / job->cluster_size), 1);
332 bdrv_dirty_iter_init(job->sync_bitmap, &hbi);
333
334 /* Find the next dirty sector(s) */
335 while ((sector = hbitmap_iter_next(&hbi)) != -1) {
336 cluster = sector / sectors_per_cluster;
337
338 /* Fake progress updates for any clusters we skipped */
339 if (cluster != last_cluster + 1) {
340 job->common.offset += ((cluster - last_cluster - 1) *
341 job->cluster_size);
342 }
343
344 for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
345 do {
346 if (yield_and_check(job)) {
347 return ret;
348 }
349 ret = backup_do_cow(job, cluster * sectors_per_cluster,
350 sectors_per_cluster, &error_is_read,
351 false);
352 if ((ret < 0) &&
353 backup_error_action(job, error_is_read, -ret) ==
354 BLOCK_ERROR_ACTION_REPORT) {
355 return ret;
356 }
357 } while (ret < 0);
358 }
359
360 /* If the bitmap granularity is smaller than the backup granularity,
361 * we need to advance the iterator pointer to the next cluster. */
362 if (granularity < job->cluster_size) {
363 bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster);
364 }
365
366 last_cluster = cluster - 1;
367 }
368
369 /* Play some final catchup with the progress meter */
370 end = DIV_ROUND_UP(job->common.len, job->cluster_size);
371 if (last_cluster + 1 < end) {
372 job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
373 }
374
375 return ret;
376 }
377
378 static void coroutine_fn backup_run(void *opaque)
379 {
380 BackupBlockJob *job = opaque;
381 BackupCompleteData *data;
382 BlockDriverState *bs = blk_bs(job->common.blk);
383 BlockBackend *target = job->target;
384 int64_t start, end;
385 int64_t sectors_per_cluster = cluster_size_sectors(job);
386 int ret = 0;
387
388 QLIST_INIT(&job->inflight_reqs);
389 qemu_co_rwlock_init(&job->flush_rwlock);
390
391 start = 0;
392 end = DIV_ROUND_UP(job->common.len, job->cluster_size);
393
394 job->done_bitmap = bitmap_new(end);
395
396 job->before_write.notify = backup_before_write_notify;
397 bdrv_add_before_write_notifier(bs, &job->before_write);
398
399 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
400 while (!block_job_is_cancelled(&job->common)) {
401 /* Yield until the job is cancelled. We just let our before_write
402 * notify callback service CoW requests. */
403 block_job_yield(&job->common);
404 }
405 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
406 ret = backup_run_incremental(job);
407 } else {
408 /* Both FULL and TOP SYNC_MODE's require copying.. */
409 for (; start < end; start++) {
410 bool error_is_read;
411 if (yield_and_check(job)) {
412 break;
413 }
414
415 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
416 int i, n;
417 int alloced = 0;
418
419 /* Check to see if these blocks are already in the
420 * backing file. */
421
422 for (i = 0; i < sectors_per_cluster;) {
423 /* bdrv_is_allocated() only returns true/false based
424 * on the first set of sectors it comes across that
425 * are are all in the same state.
426 * For that reason we must verify each sector in the
427 * backup cluster length. We end up copying more than
428 * needed but at some point that is always the case. */
429 alloced =
430 bdrv_is_allocated(bs,
431 start * sectors_per_cluster + i,
432 sectors_per_cluster - i, &n);
433 i += n;
434
435 if (alloced == 1 || n == 0) {
436 break;
437 }
438 }
439
440 /* If the above loop never found any sectors that are in
441 * the topmost image, skip this backup. */
442 if (alloced == 0) {
443 continue;
444 }
445 }
446 /* FULL sync mode we copy the whole drive. */
447 ret = backup_do_cow(job, start * sectors_per_cluster,
448 sectors_per_cluster, &error_is_read, false);
449 if (ret < 0) {
450 /* Depending on error action, fail now or retry cluster */
451 BlockErrorAction action =
452 backup_error_action(job, error_is_read, -ret);
453 if (action == BLOCK_ERROR_ACTION_REPORT) {
454 break;
455 } else {
456 start--;
457 continue;
458 }
459 }
460 }
461 }
462
463 notifier_with_return_remove(&job->before_write);
464
465 /* wait until pending backup_do_cow() calls have completed */
466 qemu_co_rwlock_wrlock(&job->flush_rwlock);
467 qemu_co_rwlock_unlock(&job->flush_rwlock);
468 g_free(job->done_bitmap);
469
470 bdrv_op_unblock_all(blk_bs(target), job->common.blocker);
471
472 data = g_malloc(sizeof(*data));
473 data->ret = ret;
474 block_job_defer_to_main_loop(&job->common, backup_complete, data);
475 }
476
477 void backup_start(const char *job_id, BlockDriverState *bs,
478 BlockDriverState *target, int64_t speed,
479 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
480 BlockdevOnError on_source_error,
481 BlockdevOnError on_target_error,
482 BlockCompletionFunc *cb, void *opaque,
483 BlockJobTxn *txn, Error **errp)
484 {
485 int64_t len;
486 BlockDriverInfo bdi;
487 BackupBlockJob *job = NULL;
488 int ret;
489
490 assert(bs);
491 assert(target);
492
493 if (bs == target) {
494 error_setg(errp, "Source and target cannot be the same");
495 return;
496 }
497
498 if (!bdrv_is_inserted(bs)) {
499 error_setg(errp, "Device is not inserted: %s",
500 bdrv_get_device_name(bs));
501 return;
502 }
503
504 if (!bdrv_is_inserted(target)) {
505 error_setg(errp, "Device is not inserted: %s",
506 bdrv_get_device_name(target));
507 return;
508 }
509
510 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
511 return;
512 }
513
514 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
515 return;
516 }
517
518 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
519 if (!sync_bitmap) {
520 error_setg(errp, "must provide a valid bitmap name for "
521 "\"incremental\" sync mode");
522 return;
523 }
524
525 /* Create a new bitmap, and freeze/disable this one. */
526 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
527 return;
528 }
529 } else if (sync_bitmap) {
530 error_setg(errp,
531 "a sync_bitmap was provided to backup_run, "
532 "but received an incompatible sync_mode (%s)",
533 MirrorSyncMode_lookup[sync_mode]);
534 return;
535 }
536
537 len = bdrv_getlength(bs);
538 if (len < 0) {
539 error_setg_errno(errp, -len, "unable to get length for '%s'",
540 bdrv_get_device_name(bs));
541 goto error;
542 }
543
544 job = block_job_create(job_id, &backup_job_driver, bs, speed,
545 cb, opaque, errp);
546 if (!job) {
547 goto error;
548 }
549
550 job->target = blk_new();
551 blk_insert_bs(job->target, target);
552
553 job->on_source_error = on_source_error;
554 job->on_target_error = on_target_error;
555 job->sync_mode = sync_mode;
556 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
557 sync_bitmap : NULL;
558
559 /* If there is no backing file on the target, we cannot rely on COW if our
560 * backup cluster size is smaller than the target cluster size. Even for
561 * targets with a backing file, try to avoid COW if possible. */
562 ret = bdrv_get_info(target, &bdi);
563 if (ret < 0 && !target->backing) {
564 error_setg_errno(errp, -ret,
565 "Couldn't determine the cluster size of the target image, "
566 "which has no backing file");
567 error_append_hint(errp,
568 "Aborting, since this may create an unusable destination image\n");
569 goto error;
570 } else if (ret < 0 && target->backing) {
571 /* Not fatal; just trudge on ahead. */
572 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
573 } else {
574 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
575 }
576
577 bdrv_op_block_all(target, job->common.blocker);
578 job->common.len = len;
579 job->common.co = qemu_coroutine_create(backup_run, job);
580 block_job_txn_add_job(txn, &job->common);
581 qemu_coroutine_enter(job->common.co);
582 return;
583
584 error:
585 if (sync_bitmap) {
586 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
587 }
588 if (job) {
589 blk_unref(job->target);
590 block_job_unref(&job->common);
591 }
592 }