4 * Copyright Red Hat, Inc. 2012
7 * Jeff Cody <jcody@redhat.com>
8 * Based on stream.c by Stefan Hajnoczi
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qemu/cutils.h"
18 #include "block/block_int.h"
19 #include "block/blockjob_int.h"
20 #include "qapi/error.h"
21 #include "qemu/ratelimit.h"
22 #include "qemu/memalign.h"
23 #include "sysemu/block-backend.h"
27 * Size of data buffer for populating the image file. This should be large
28 * enough to process multiple clusters in a single call, so that populating
29 * contiguous regions of the image is efficient.
31 COMMIT_BUFFER_SIZE
= 512 * 1024, /* in bytes */
34 typedef struct CommitBlockJob
{
36 BlockDriverState
*commit_top_bs
;
39 BlockDriverState
*base_bs
;
40 BlockDriverState
*base_overlay
;
41 BlockdevOnError on_error
;
44 char *backing_file_str
;
47 static int commit_prepare(Job
*job
)
49 CommitBlockJob
*s
= container_of(job
, CommitBlockJob
, common
.job
);
51 bdrv_graph_rdlock_main_loop();
52 bdrv_unfreeze_backing_chain(s
->commit_top_bs
, s
->base_bs
);
53 s
->chain_frozen
= false;
54 bdrv_graph_rdunlock_main_loop();
56 /* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
57 * the normal backing chain can be restored. */
61 /* FIXME: bdrv_drop_intermediate treats total failures and partial failures
62 * identically. Further work is needed to disambiguate these cases. */
63 return bdrv_drop_intermediate(s
->commit_top_bs
, s
->base_bs
,
67 static void commit_abort(Job
*job
)
69 CommitBlockJob
*s
= container_of(job
, CommitBlockJob
, common
.job
);
70 BlockDriverState
*top_bs
= blk_bs(s
->top
);
71 BlockDriverState
*commit_top_backing_bs
;
73 if (s
->chain_frozen
) {
74 bdrv_graph_rdlock_main_loop();
75 bdrv_unfreeze_backing_chain(s
->commit_top_bs
, s
->base_bs
);
76 bdrv_graph_rdunlock_main_loop();
79 /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
81 bdrv_ref(s
->commit_top_bs
);
87 /* free the blockers on the intermediate nodes so that bdrv_replace_nodes
89 block_job_remove_all_bdrv(&s
->common
);
91 /* If bdrv_drop_intermediate() failed (or was not invoked), remove the
92 * commit filter driver from the backing chain now. Do this as the final
93 * step so that the 'consistent read' permission can be granted.
95 * XXX Can (or should) we somehow keep 'consistent read' blocked even
96 * after the failed/cancelled commit job is gone? If we already wrote
97 * something to base, the intermediate images aren't valid any more. */
98 bdrv_graph_rdlock_main_loop();
99 commit_top_backing_bs
= s
->commit_top_bs
->backing
->bs
;
100 bdrv_graph_rdunlock_main_loop();
102 bdrv_drained_begin(commit_top_backing_bs
);
103 bdrv_graph_wrlock(commit_top_backing_bs
);
104 bdrv_replace_node(s
->commit_top_bs
, commit_top_backing_bs
, &error_abort
);
105 bdrv_graph_wrunlock(commit_top_backing_bs
);
106 bdrv_drained_end(commit_top_backing_bs
);
108 bdrv_unref(s
->commit_top_bs
);
112 static void commit_clean(Job
*job
)
114 CommitBlockJob
*s
= container_of(job
, CommitBlockJob
, common
.job
);
116 /* restore base open flags here if appropriate (e.g., change the base back
117 * to r/o). These reopens do not need to be atomic, since we won't abort
118 * even on failure here */
119 if (s
->base_read_only
) {
120 bdrv_reopen_set_read_only(s
->base_bs
, true, NULL
);
123 g_free(s
->backing_file_str
);
127 static int coroutine_fn
commit_run(Job
*job
, Error
**errp
)
129 CommitBlockJob
*s
= container_of(job
, CommitBlockJob
, common
.job
);
132 int64_t n
= 0; /* bytes */
133 QEMU_AUTO_VFREE
void *buf
= NULL
;
134 int64_t len
, base_len
;
136 len
= blk_co_getlength(s
->top
);
140 job_progress_set_remaining(&s
->common
.job
, len
);
142 base_len
= blk_co_getlength(s
->base
);
147 if (base_len
< len
) {
148 ret
= blk_co_truncate(s
->base
, len
, false, PREALLOC_MODE_OFF
, 0, NULL
);
154 buf
= blk_blockalign(s
->top
, COMMIT_BUFFER_SIZE
);
156 for (offset
= 0; offset
< len
; offset
+= n
) {
158 bool error_in_source
= true;
160 /* Note that even when no rate limit is applied we need to yield
161 * with no pending I/O here so that bdrv_drain_all() returns.
163 block_job_ratelimit_sleep(&s
->common
);
164 if (job_is_cancelled(&s
->common
.job
)) {
167 /* Copy if allocated above the base */
168 ret
= blk_co_is_allocated_above(s
->top
, s
->base_overlay
, true,
169 offset
, COMMIT_BUFFER_SIZE
, &n
);
171 trace_commit_one_iteration(s
, offset
, n
, ret
);
173 assert(n
< SIZE_MAX
);
175 ret
= blk_co_pread(s
->top
, offset
, n
, buf
, 0);
177 ret
= blk_co_pwrite(s
->base
, offset
, n
, buf
, 0);
179 error_in_source
= false;
184 BlockErrorAction action
=
185 block_job_error_action(&s
->common
, s
->on_error
,
186 error_in_source
, -ret
);
187 if (action
== BLOCK_ERROR_ACTION_REPORT
) {
194 /* Publish progress */
195 job_progress_update(&s
->common
.job
, n
);
198 block_job_ratelimit_processed_bytes(&s
->common
, n
);
205 static const BlockJobDriver commit_job_driver
= {
207 .instance_size
= sizeof(CommitBlockJob
),
208 .job_type
= JOB_TYPE_COMMIT
,
209 .free
= block_job_free
,
210 .user_resume
= block_job_user_resume
,
212 .prepare
= commit_prepare
,
213 .abort
= commit_abort
,
214 .clean
= commit_clean
218 static int coroutine_fn GRAPH_RDLOCK
219 bdrv_commit_top_preadv(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
220 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
222 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
225 static GRAPH_RDLOCK
void bdrv_commit_top_refresh_filename(BlockDriverState
*bs
)
227 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
228 bs
->backing
->bs
->filename
);
231 static void bdrv_commit_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
233 BlockReopenQueue
*reopen_queue
,
234 uint64_t perm
, uint64_t shared
,
235 uint64_t *nperm
, uint64_t *nshared
)
238 *nshared
= BLK_PERM_ALL
;
241 /* Dummy node that provides consistent read to its users without requiring it
242 * from its backing file and that allows writes on the backing file chain. */
243 static BlockDriver bdrv_commit_top
= {
244 .format_name
= "commit_top",
245 .bdrv_co_preadv
= bdrv_commit_top_preadv
,
246 .bdrv_refresh_filename
= bdrv_commit_top_refresh_filename
,
247 .bdrv_child_perm
= bdrv_commit_top_child_perm
,
250 .filtered_child_is_backing
= true,
253 void commit_start(const char *job_id
, BlockDriverState
*bs
,
254 BlockDriverState
*base
, BlockDriverState
*top
,
255 int creation_flags
, int64_t speed
,
256 BlockdevOnError on_error
, const char *backing_file_str
,
257 const char *filter_node_name
, Error
**errp
)
260 BlockDriverState
*iter
;
261 BlockDriverState
*commit_top_bs
= NULL
;
262 BlockDriverState
*filtered_base
;
263 int64_t base_size
, top_size
;
264 uint64_t base_perms
, iter_shared_perms
;
270 bdrv_graph_rdlock_main_loop();
271 if (bdrv_skip_filters(top
) == bdrv_skip_filters(base
)) {
272 error_setg(errp
, "Invalid files for merge: top and base are the same");
273 bdrv_graph_rdunlock_main_loop();
276 bdrv_graph_rdunlock_main_loop();
278 base_size
= bdrv_getlength(base
);
280 error_setg_errno(errp
, -base_size
, "Could not inquire base image size");
284 top_size
= bdrv_getlength(top
);
286 error_setg_errno(errp
, -top_size
, "Could not inquire top image size");
290 base_perms
= BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
;
291 if (base_size
< top_size
) {
292 base_perms
|= BLK_PERM_RESIZE
;
295 s
= block_job_create(job_id
, &commit_job_driver
, NULL
, bs
, 0, BLK_PERM_ALL
,
296 speed
, creation_flags
, NULL
, NULL
, errp
);
301 /* convert base to r/w, if necessary */
302 s
->base_read_only
= bdrv_is_read_only(base
);
303 if (s
->base_read_only
) {
304 if (bdrv_reopen_set_read_only(base
, false, errp
) != 0) {
309 /* Insert commit_top block node above top, so we can block consistent read
310 * on the backing chain below it */
311 commit_top_bs
= bdrv_new_open_driver(&bdrv_commit_top
, filter_node_name
, 0,
313 if (commit_top_bs
== NULL
) {
316 if (!filter_node_name
) {
317 commit_top_bs
->implicit
= true;
320 /* So that we can always drop this node */
321 commit_top_bs
->never_freeze
= true;
323 commit_top_bs
->total_sectors
= top
->total_sectors
;
325 ret
= bdrv_append(commit_top_bs
, top
, errp
);
326 bdrv_unref(commit_top_bs
); /* referenced by new parents or failed */
328 commit_top_bs
= NULL
;
332 s
->commit_top_bs
= commit_top_bs
;
335 * Block all nodes between top and base, because they will
336 * disappear from the chain after this operation.
337 * Note that this assumes that the user is fine with removing all
338 * nodes (including R/W filters) between top and base. Assuring
339 * this is the responsibility of the interface (i.e. whoever calls
342 bdrv_graph_wrlock(top
);
343 s
->base_overlay
= bdrv_find_overlay(top
, base
);
344 assert(s
->base_overlay
);
347 * The topmost node with
348 * bdrv_skip_filters(filtered_base) == bdrv_skip_filters(base)
350 filtered_base
= bdrv_cow_bs(s
->base_overlay
);
351 assert(bdrv_skip_filters(filtered_base
) == bdrv_skip_filters(base
));
354 * XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves
355 * at s->base (if writes are blocked for a node, they are also blocked
356 * for its backing file). The other options would be a second filter
357 * driver above s->base.
359 iter_shared_perms
= BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
;
361 for (iter
= top
; iter
!= base
; iter
= bdrv_filter_or_cow_bs(iter
)) {
362 if (iter
== filtered_base
) {
364 * From here on, all nodes are filters on the base. This
365 * allows us to share BLK_PERM_CONSISTENT_READ.
367 iter_shared_perms
|= BLK_PERM_CONSISTENT_READ
;
370 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
371 iter_shared_perms
, errp
);
373 bdrv_graph_wrunlock(top
);
378 if (bdrv_freeze_backing_chain(commit_top_bs
, base
, errp
) < 0) {
379 bdrv_graph_wrunlock(top
);
382 s
->chain_frozen
= true;
384 ret
= block_job_add_bdrv(&s
->common
, "base", base
, 0, BLK_PERM_ALL
, errp
);
385 bdrv_graph_wrunlock(top
);
391 s
->base
= blk_new(s
->common
.job
.aio_context
,
393 BLK_PERM_CONSISTENT_READ
394 | BLK_PERM_WRITE_UNCHANGED
);
395 ret
= blk_insert_bs(s
->base
, base
, errp
);
399 blk_set_disable_request_queuing(s
->base
, true);
402 /* Required permissions are already taken with block_job_add_bdrv() */
403 s
->top
= blk_new(s
->common
.job
.aio_context
, 0, BLK_PERM_ALL
);
404 ret
= blk_insert_bs(s
->top
, top
, errp
);
408 blk_set_disable_request_queuing(s
->top
, true);
410 s
->backing_file_str
= g_strdup(backing_file_str
);
411 s
->on_error
= on_error
;
413 trace_commit_start(bs
, base
, top
, s
);
414 job_start(&s
->common
.job
);
418 if (s
->chain_frozen
) {
419 bdrv_graph_rdlock_main_loop();
420 bdrv_unfreeze_backing_chain(commit_top_bs
, base
);
421 bdrv_graph_rdunlock_main_loop();
429 if (s
->base_read_only
) {
430 bdrv_reopen_set_read_only(base
, true, NULL
);
432 job_early_fail(&s
->common
.job
);
433 /* commit_top_bs has to be replaced after deleting the block job,
434 * otherwise this would fail because of lack of permissions. */
436 bdrv_drained_begin(top
);
437 bdrv_graph_wrlock(top
);
438 bdrv_replace_node(commit_top_bs
, top
, &error_abort
);
439 bdrv_graph_wrunlock(top
);
440 bdrv_drained_end(top
);
445 #define COMMIT_BUF_SIZE (2048 * BDRV_SECTOR_SIZE)
447 /* commit COW file into the raw image */
448 int bdrv_commit(BlockDriverState
*bs
)
450 BlockBackend
*src
, *backing
;
451 BlockDriverState
*backing_file_bs
= NULL
;
452 BlockDriverState
*commit_top_bs
= NULL
;
453 BlockDriver
*drv
= bs
->drv
;
455 int64_t offset
, length
, backing_length
;
459 QEMU_AUTO_VFREE
uint8_t *buf
= NULL
;
460 Error
*local_err
= NULL
;
463 GRAPH_RDLOCK_GUARD_MAINLOOP();
468 backing_file_bs
= bdrv_cow_bs(bs
);
470 if (!backing_file_bs
) {
474 if (bdrv_op_is_blocked(bs
, BLOCK_OP_TYPE_COMMIT_SOURCE
, NULL
) ||
475 bdrv_op_is_blocked(backing_file_bs
, BLOCK_OP_TYPE_COMMIT_TARGET
, NULL
))
480 ro
= bdrv_is_read_only(backing_file_bs
);
483 if (bdrv_reopen_set_read_only(backing_file_bs
, false, NULL
)) {
488 ctx
= bdrv_get_aio_context(bs
);
489 /* WRITE_UNCHANGED is required for bdrv_make_empty() */
490 src
= blk_new(ctx
, BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
,
492 backing
= blk_new(ctx
, BLK_PERM_WRITE
| BLK_PERM_RESIZE
, BLK_PERM_ALL
);
494 ret
= blk_insert_bs(src
, bs
, &local_err
);
496 error_report_err(local_err
);
500 /* Insert commit_top block node above backing, so we can write to it */
501 commit_top_bs
= bdrv_new_open_driver(&bdrv_commit_top
, NULL
, BDRV_O_RDWR
,
503 if (commit_top_bs
== NULL
) {
504 error_report_err(local_err
);
508 bdrv_set_backing_hd(commit_top_bs
, backing_file_bs
, &error_abort
);
509 bdrv_set_backing_hd(bs
, commit_top_bs
, &error_abort
);
511 ret
= blk_insert_bs(backing
, backing_file_bs
, &local_err
);
513 error_report_err(local_err
);
517 length
= blk_getlength(src
);
523 backing_length
= blk_getlength(backing
);
524 if (backing_length
< 0) {
525 ret
= backing_length
;
529 /* If our top snapshot is larger than the backing file image,
530 * grow the backing file image if possible. If not possible,
531 * we must return an error */
532 if (length
> backing_length
) {
533 ret
= blk_truncate(backing
, length
, false, PREALLOC_MODE_OFF
, 0,
536 error_report_err(local_err
);
541 /* blk_try_blockalign() for src will choose an alignment that works for
542 * backing as well, so no need to compare the alignment manually. */
543 buf
= blk_try_blockalign(src
, COMMIT_BUF_SIZE
);
549 for (offset
= 0; offset
< length
; offset
+= n
) {
550 ret
= bdrv_is_allocated(bs
, offset
, COMMIT_BUF_SIZE
, &n
);
555 ret
= blk_pread(src
, offset
, n
, buf
, 0);
560 ret
= blk_pwrite(backing
, offset
, n
, buf
, 0);
567 ret
= blk_make_empty(src
, NULL
);
568 /* Ignore -ENOTSUP */
569 if (ret
< 0 && ret
!= -ENOTSUP
) {
576 * Make sure all data we wrote to the backing device is actually
584 if (bdrv_cow_bs(bs
) != backing_file_bs
) {
585 bdrv_set_backing_hd(bs
, backing_file_bs
, &error_abort
);
587 bdrv_unref(commit_top_bs
);
591 /* ignoring error return here */
592 bdrv_reopen_set_read_only(backing_file_bs
, true, NULL
);