]> git.proxmox.com Git - mirror_qemu.git/blob - block/io.c
tests/boot_linux_console: fix extract_from_deb() comment
[mirror_qemu.git] / block / io.c
1 /*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "sysemu/replay.h"
37
38 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
39
40 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
41 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
42
43 static void bdrv_parent_cb_resize(BlockDriverState *bs);
44 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
45 int64_t offset, int bytes, BdrvRequestFlags flags);
46
47 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
48 bool ignore_bds_parents)
49 {
50 BdrvChild *c, *next;
51
52 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
53 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
54 continue;
55 }
56 bdrv_parent_drained_begin_single(c, false);
57 }
58 }
59
60 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
61 int *drained_end_counter)
62 {
63 assert(c->parent_quiesce_counter > 0);
64 c->parent_quiesce_counter--;
65 if (c->role->drained_end) {
66 c->role->drained_end(c, drained_end_counter);
67 }
68 }
69
70 void bdrv_parent_drained_end_single(BdrvChild *c)
71 {
72 int drained_end_counter = 0;
73 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
74 BDRV_POLL_WHILE(c->bs, atomic_read(&drained_end_counter) > 0);
75 }
76
77 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
78 bool ignore_bds_parents,
79 int *drained_end_counter)
80 {
81 BdrvChild *c;
82
83 QLIST_FOREACH(c, &bs->parents, next_parent) {
84 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
85 continue;
86 }
87 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
88 }
89 }
90
91 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
92 {
93 if (c->role->drained_poll) {
94 return c->role->drained_poll(c);
95 }
96 return false;
97 }
98
99 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
100 bool ignore_bds_parents)
101 {
102 BdrvChild *c, *next;
103 bool busy = false;
104
105 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
106 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
107 continue;
108 }
109 busy |= bdrv_parent_drained_poll_single(c);
110 }
111
112 return busy;
113 }
114
115 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
116 {
117 c->parent_quiesce_counter++;
118 if (c->role->drained_begin) {
119 c->role->drained_begin(c);
120 }
121 if (poll) {
122 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
123 }
124 }
125
126 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
127 {
128 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
129 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
130 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
131 src->opt_mem_alignment);
132 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
133 src->min_mem_alignment);
134 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
135 }
136
137 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
138 {
139 BlockDriver *drv = bs->drv;
140 Error *local_err = NULL;
141
142 memset(&bs->bl, 0, sizeof(bs->bl));
143
144 if (!drv) {
145 return;
146 }
147
148 /* Default alignment based on whether driver has byte interface */
149 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
150 drv->bdrv_aio_preadv ||
151 drv->bdrv_co_preadv_part) ? 1 : 512;
152
153 /* Take some limits from the children as a default */
154 if (bs->file) {
155 bdrv_refresh_limits(bs->file->bs, &local_err);
156 if (local_err) {
157 error_propagate(errp, local_err);
158 return;
159 }
160 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
161 } else {
162 bs->bl.min_mem_alignment = 512;
163 bs->bl.opt_mem_alignment = qemu_real_host_page_size;
164
165 /* Safe default since most protocols use readv()/writev()/etc */
166 bs->bl.max_iov = IOV_MAX;
167 }
168
169 if (bs->backing) {
170 bdrv_refresh_limits(bs->backing->bs, &local_err);
171 if (local_err) {
172 error_propagate(errp, local_err);
173 return;
174 }
175 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
176 }
177
178 /* Then let the driver override it */
179 if (drv->bdrv_refresh_limits) {
180 drv->bdrv_refresh_limits(bs, errp);
181 }
182 }
183
184 /**
185 * The copy-on-read flag is actually a reference count so multiple users may
186 * use the feature without worrying about clobbering its previous state.
187 * Copy-on-read stays enabled until all users have called to disable it.
188 */
189 void bdrv_enable_copy_on_read(BlockDriverState *bs)
190 {
191 atomic_inc(&bs->copy_on_read);
192 }
193
194 void bdrv_disable_copy_on_read(BlockDriverState *bs)
195 {
196 int old = atomic_fetch_dec(&bs->copy_on_read);
197 assert(old >= 1);
198 }
199
200 typedef struct {
201 Coroutine *co;
202 BlockDriverState *bs;
203 bool done;
204 bool begin;
205 bool recursive;
206 bool poll;
207 BdrvChild *parent;
208 bool ignore_bds_parents;
209 int *drained_end_counter;
210 } BdrvCoDrainData;
211
212 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
213 {
214 BdrvCoDrainData *data = opaque;
215 BlockDriverState *bs = data->bs;
216
217 if (data->begin) {
218 bs->drv->bdrv_co_drain_begin(bs);
219 } else {
220 bs->drv->bdrv_co_drain_end(bs);
221 }
222
223 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
224 atomic_mb_set(&data->done, true);
225 if (!data->begin) {
226 atomic_dec(data->drained_end_counter);
227 }
228 bdrv_dec_in_flight(bs);
229
230 g_free(data);
231 }
232
233 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
234 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
235 int *drained_end_counter)
236 {
237 BdrvCoDrainData *data;
238
239 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
240 (!begin && !bs->drv->bdrv_co_drain_end)) {
241 return;
242 }
243
244 data = g_new(BdrvCoDrainData, 1);
245 *data = (BdrvCoDrainData) {
246 .bs = bs,
247 .done = false,
248 .begin = begin,
249 .drained_end_counter = drained_end_counter,
250 };
251
252 if (!begin) {
253 atomic_inc(drained_end_counter);
254 }
255
256 /* Make sure the driver callback completes during the polling phase for
257 * drain_begin. */
258 bdrv_inc_in_flight(bs);
259 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
260 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
261 }
262
263 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
264 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
265 BdrvChild *ignore_parent, bool ignore_bds_parents)
266 {
267 BdrvChild *child, *next;
268
269 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
270 return true;
271 }
272
273 if (atomic_read(&bs->in_flight)) {
274 return true;
275 }
276
277 if (recursive) {
278 assert(!ignore_bds_parents);
279 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
280 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
281 return true;
282 }
283 }
284 }
285
286 return false;
287 }
288
289 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
290 BdrvChild *ignore_parent)
291 {
292 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
293 }
294
295 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
296 BdrvChild *parent, bool ignore_bds_parents,
297 bool poll);
298 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
299 BdrvChild *parent, bool ignore_bds_parents,
300 int *drained_end_counter);
301
302 static void bdrv_co_drain_bh_cb(void *opaque)
303 {
304 BdrvCoDrainData *data = opaque;
305 Coroutine *co = data->co;
306 BlockDriverState *bs = data->bs;
307
308 if (bs) {
309 AioContext *ctx = bdrv_get_aio_context(bs);
310 AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
311
312 /*
313 * When the coroutine yielded, the lock for its home context was
314 * released, so we need to re-acquire it here. If it explicitly
315 * acquired a different context, the lock is still held and we don't
316 * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
317 */
318 if (ctx == co_ctx) {
319 aio_context_acquire(ctx);
320 }
321 bdrv_dec_in_flight(bs);
322 if (data->begin) {
323 assert(!data->drained_end_counter);
324 bdrv_do_drained_begin(bs, data->recursive, data->parent,
325 data->ignore_bds_parents, data->poll);
326 } else {
327 assert(!data->poll);
328 bdrv_do_drained_end(bs, data->recursive, data->parent,
329 data->ignore_bds_parents,
330 data->drained_end_counter);
331 }
332 if (ctx == co_ctx) {
333 aio_context_release(ctx);
334 }
335 } else {
336 assert(data->begin);
337 bdrv_drain_all_begin();
338 }
339
340 data->done = true;
341 aio_co_wake(co);
342 }
343
344 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
345 bool begin, bool recursive,
346 BdrvChild *parent,
347 bool ignore_bds_parents,
348 bool poll,
349 int *drained_end_counter)
350 {
351 BdrvCoDrainData data;
352
353 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
354 * other coroutines run if they were queued by aio_co_enter(). */
355
356 assert(qemu_in_coroutine());
357 data = (BdrvCoDrainData) {
358 .co = qemu_coroutine_self(),
359 .bs = bs,
360 .done = false,
361 .begin = begin,
362 .recursive = recursive,
363 .parent = parent,
364 .ignore_bds_parents = ignore_bds_parents,
365 .poll = poll,
366 .drained_end_counter = drained_end_counter,
367 };
368
369 if (bs) {
370 bdrv_inc_in_flight(bs);
371 }
372 replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs),
373 bdrv_co_drain_bh_cb, &data);
374
375 qemu_coroutine_yield();
376 /* If we are resumed from some other event (such as an aio completion or a
377 * timer callback), it is a bug in the caller that should be fixed. */
378 assert(data.done);
379 }
380
381 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
382 BdrvChild *parent, bool ignore_bds_parents)
383 {
384 assert(!qemu_in_coroutine());
385
386 /* Stop things in parent-to-child order */
387 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
388 aio_disable_external(bdrv_get_aio_context(bs));
389 }
390
391 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
392 bdrv_drain_invoke(bs, true, NULL);
393 }
394
395 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
396 BdrvChild *parent, bool ignore_bds_parents,
397 bool poll)
398 {
399 BdrvChild *child, *next;
400
401 if (qemu_in_coroutine()) {
402 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
403 poll, NULL);
404 return;
405 }
406
407 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
408
409 if (recursive) {
410 assert(!ignore_bds_parents);
411 bs->recursive_quiesce_counter++;
412 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
413 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
414 false);
415 }
416 }
417
418 /*
419 * Wait for drained requests to finish.
420 *
421 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
422 * call is needed so things in this AioContext can make progress even
423 * though we don't return to the main AioContext loop - this automatically
424 * includes other nodes in the same AioContext and therefore all child
425 * nodes.
426 */
427 if (poll) {
428 assert(!ignore_bds_parents);
429 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
430 }
431 }
432
433 void bdrv_drained_begin(BlockDriverState *bs)
434 {
435 bdrv_do_drained_begin(bs, false, NULL, false, true);
436 }
437
438 void bdrv_subtree_drained_begin(BlockDriverState *bs)
439 {
440 bdrv_do_drained_begin(bs, true, NULL, false, true);
441 }
442
443 /**
444 * This function does not poll, nor must any of its recursively called
445 * functions. The *drained_end_counter pointee will be incremented
446 * once for every background operation scheduled, and decremented once
447 * the operation settles. Therefore, the pointer must remain valid
448 * until the pointee reaches 0. That implies that whoever sets up the
449 * pointee has to poll until it is 0.
450 *
451 * We use atomic operations to access *drained_end_counter, because
452 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
453 * @bs may contain nodes in different AioContexts,
454 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
455 * regardless of which AioContext they are in.
456 */
457 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
458 BdrvChild *parent, bool ignore_bds_parents,
459 int *drained_end_counter)
460 {
461 BdrvChild *child;
462 int old_quiesce_counter;
463
464 assert(drained_end_counter != NULL);
465
466 if (qemu_in_coroutine()) {
467 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
468 false, drained_end_counter);
469 return;
470 }
471 assert(bs->quiesce_counter > 0);
472
473 /* Re-enable things in child-to-parent order */
474 bdrv_drain_invoke(bs, false, drained_end_counter);
475 bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
476 drained_end_counter);
477
478 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
479 if (old_quiesce_counter == 1) {
480 aio_enable_external(bdrv_get_aio_context(bs));
481 }
482
483 if (recursive) {
484 assert(!ignore_bds_parents);
485 bs->recursive_quiesce_counter--;
486 QLIST_FOREACH(child, &bs->children, next) {
487 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
488 drained_end_counter);
489 }
490 }
491 }
492
493 void bdrv_drained_end(BlockDriverState *bs)
494 {
495 int drained_end_counter = 0;
496 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
497 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0);
498 }
499
500 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
501 {
502 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
503 }
504
505 void bdrv_subtree_drained_end(BlockDriverState *bs)
506 {
507 int drained_end_counter = 0;
508 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
509 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0);
510 }
511
512 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
513 {
514 int i;
515
516 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
517 bdrv_do_drained_begin(child->bs, true, child, false, true);
518 }
519 }
520
521 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
522 {
523 int drained_end_counter = 0;
524 int i;
525
526 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
527 bdrv_do_drained_end(child->bs, true, child, false,
528 &drained_end_counter);
529 }
530
531 BDRV_POLL_WHILE(child->bs, atomic_read(&drained_end_counter) > 0);
532 }
533
534 /*
535 * Wait for pending requests to complete on a single BlockDriverState subtree,
536 * and suspend block driver's internal I/O until next request arrives.
537 *
538 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
539 * AioContext.
540 */
541 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
542 {
543 assert(qemu_in_coroutine());
544 bdrv_drained_begin(bs);
545 bdrv_drained_end(bs);
546 }
547
548 void bdrv_drain(BlockDriverState *bs)
549 {
550 bdrv_drained_begin(bs);
551 bdrv_drained_end(bs);
552 }
553
554 static void bdrv_drain_assert_idle(BlockDriverState *bs)
555 {
556 BdrvChild *child, *next;
557
558 assert(atomic_read(&bs->in_flight) == 0);
559 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
560 bdrv_drain_assert_idle(child->bs);
561 }
562 }
563
564 unsigned int bdrv_drain_all_count = 0;
565
566 static bool bdrv_drain_all_poll(void)
567 {
568 BlockDriverState *bs = NULL;
569 bool result = false;
570
571 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
572 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
573 while ((bs = bdrv_next_all_states(bs))) {
574 AioContext *aio_context = bdrv_get_aio_context(bs);
575 aio_context_acquire(aio_context);
576 result |= bdrv_drain_poll(bs, false, NULL, true);
577 aio_context_release(aio_context);
578 }
579
580 return result;
581 }
582
583 /*
584 * Wait for pending requests to complete across all BlockDriverStates
585 *
586 * This function does not flush data to disk, use bdrv_flush_all() for that
587 * after calling this function.
588 *
589 * This pauses all block jobs and disables external clients. It must
590 * be paired with bdrv_drain_all_end().
591 *
592 * NOTE: no new block jobs or BlockDriverStates can be created between
593 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
594 */
595 void bdrv_drain_all_begin(void)
596 {
597 BlockDriverState *bs = NULL;
598
599 if (qemu_in_coroutine()) {
600 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
601 return;
602 }
603
604 /*
605 * bdrv queue is managed by record/replay,
606 * waiting for finishing the I/O requests may
607 * be infinite
608 */
609 if (replay_events_enabled()) {
610 return;
611 }
612
613 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
614 * loop AioContext, so make sure we're in the main context. */
615 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
616 assert(bdrv_drain_all_count < INT_MAX);
617 bdrv_drain_all_count++;
618
619 /* Quiesce all nodes, without polling in-flight requests yet. The graph
620 * cannot change during this loop. */
621 while ((bs = bdrv_next_all_states(bs))) {
622 AioContext *aio_context = bdrv_get_aio_context(bs);
623
624 aio_context_acquire(aio_context);
625 bdrv_do_drained_begin(bs, false, NULL, true, false);
626 aio_context_release(aio_context);
627 }
628
629 /* Now poll the in-flight requests */
630 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
631
632 while ((bs = bdrv_next_all_states(bs))) {
633 bdrv_drain_assert_idle(bs);
634 }
635 }
636
637 void bdrv_drain_all_end(void)
638 {
639 BlockDriverState *bs = NULL;
640 int drained_end_counter = 0;
641
642 /*
643 * bdrv queue is managed by record/replay,
644 * waiting for finishing the I/O requests may
645 * be endless
646 */
647 if (replay_events_enabled()) {
648 return;
649 }
650
651 while ((bs = bdrv_next_all_states(bs))) {
652 AioContext *aio_context = bdrv_get_aio_context(bs);
653
654 aio_context_acquire(aio_context);
655 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
656 aio_context_release(aio_context);
657 }
658
659 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
660 AIO_WAIT_WHILE(NULL, atomic_read(&drained_end_counter) > 0);
661
662 assert(bdrv_drain_all_count > 0);
663 bdrv_drain_all_count--;
664 }
665
666 void bdrv_drain_all(void)
667 {
668 bdrv_drain_all_begin();
669 bdrv_drain_all_end();
670 }
671
672 /**
673 * Remove an active request from the tracked requests list
674 *
675 * This function should be called when a tracked request is completing.
676 */
677 static void tracked_request_end(BdrvTrackedRequest *req)
678 {
679 if (req->serialising) {
680 atomic_dec(&req->bs->serialising_in_flight);
681 }
682
683 qemu_co_mutex_lock(&req->bs->reqs_lock);
684 QLIST_REMOVE(req, list);
685 qemu_co_queue_restart_all(&req->wait_queue);
686 qemu_co_mutex_unlock(&req->bs->reqs_lock);
687 }
688
689 /**
690 * Add an active request to the tracked requests list
691 */
692 static void tracked_request_begin(BdrvTrackedRequest *req,
693 BlockDriverState *bs,
694 int64_t offset,
695 uint64_t bytes,
696 enum BdrvTrackedRequestType type)
697 {
698 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
699
700 *req = (BdrvTrackedRequest){
701 .bs = bs,
702 .offset = offset,
703 .bytes = bytes,
704 .type = type,
705 .co = qemu_coroutine_self(),
706 .serialising = false,
707 .overlap_offset = offset,
708 .overlap_bytes = bytes,
709 };
710
711 qemu_co_queue_init(&req->wait_queue);
712
713 qemu_co_mutex_lock(&bs->reqs_lock);
714 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
715 qemu_co_mutex_unlock(&bs->reqs_lock);
716 }
717
718 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
719 int64_t offset, uint64_t bytes)
720 {
721 /* aaaa bbbb */
722 if (offset >= req->overlap_offset + req->overlap_bytes) {
723 return false;
724 }
725 /* bbbb aaaa */
726 if (req->overlap_offset >= offset + bytes) {
727 return false;
728 }
729 return true;
730 }
731
732 static bool coroutine_fn
733 bdrv_wait_serialising_requests_locked(BlockDriverState *bs,
734 BdrvTrackedRequest *self)
735 {
736 BdrvTrackedRequest *req;
737 bool retry;
738 bool waited = false;
739
740 do {
741 retry = false;
742 QLIST_FOREACH(req, &bs->tracked_requests, list) {
743 if (req == self || (!req->serialising && !self->serialising)) {
744 continue;
745 }
746 if (tracked_request_overlaps(req, self->overlap_offset,
747 self->overlap_bytes))
748 {
749 /* Hitting this means there was a reentrant request, for
750 * example, a block driver issuing nested requests. This must
751 * never happen since it means deadlock.
752 */
753 assert(qemu_coroutine_self() != req->co);
754
755 /* If the request is already (indirectly) waiting for us, or
756 * will wait for us as soon as it wakes up, then just go on
757 * (instead of producing a deadlock in the former case). */
758 if (!req->waiting_for) {
759 self->waiting_for = req;
760 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
761 self->waiting_for = NULL;
762 retry = true;
763 waited = true;
764 break;
765 }
766 }
767 }
768 } while (retry);
769 return waited;
770 }
771
772 bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
773 {
774 BlockDriverState *bs = req->bs;
775 int64_t overlap_offset = req->offset & ~(align - 1);
776 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
777 - overlap_offset;
778 bool waited;
779
780 qemu_co_mutex_lock(&bs->reqs_lock);
781 if (!req->serialising) {
782 atomic_inc(&req->bs->serialising_in_flight);
783 req->serialising = true;
784 }
785
786 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
787 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
788 waited = bdrv_wait_serialising_requests_locked(bs, req);
789 qemu_co_mutex_unlock(&bs->reqs_lock);
790 return waited;
791 }
792
793 /**
794 * Return the tracked request on @bs for the current coroutine, or
795 * NULL if there is none.
796 */
797 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
798 {
799 BdrvTrackedRequest *req;
800 Coroutine *self = qemu_coroutine_self();
801
802 QLIST_FOREACH(req, &bs->tracked_requests, list) {
803 if (req->co == self) {
804 return req;
805 }
806 }
807
808 return NULL;
809 }
810
811 /**
812 * Round a region to cluster boundaries
813 */
814 void bdrv_round_to_clusters(BlockDriverState *bs,
815 int64_t offset, int64_t bytes,
816 int64_t *cluster_offset,
817 int64_t *cluster_bytes)
818 {
819 BlockDriverInfo bdi;
820
821 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
822 *cluster_offset = offset;
823 *cluster_bytes = bytes;
824 } else {
825 int64_t c = bdi.cluster_size;
826 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
827 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
828 }
829 }
830
831 static int bdrv_get_cluster_size(BlockDriverState *bs)
832 {
833 BlockDriverInfo bdi;
834 int ret;
835
836 ret = bdrv_get_info(bs, &bdi);
837 if (ret < 0 || bdi.cluster_size == 0) {
838 return bs->bl.request_alignment;
839 } else {
840 return bdi.cluster_size;
841 }
842 }
843
844 void bdrv_inc_in_flight(BlockDriverState *bs)
845 {
846 atomic_inc(&bs->in_flight);
847 }
848
849 void bdrv_wakeup(BlockDriverState *bs)
850 {
851 aio_wait_kick();
852 }
853
854 void bdrv_dec_in_flight(BlockDriverState *bs)
855 {
856 atomic_dec(&bs->in_flight);
857 bdrv_wakeup(bs);
858 }
859
860 static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
861 {
862 BlockDriverState *bs = self->bs;
863 bool waited = false;
864
865 if (!atomic_read(&bs->serialising_in_flight)) {
866 return false;
867 }
868
869 qemu_co_mutex_lock(&bs->reqs_lock);
870 waited = bdrv_wait_serialising_requests_locked(bs, self);
871 qemu_co_mutex_unlock(&bs->reqs_lock);
872
873 return waited;
874 }
875
876 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
877 size_t size)
878 {
879 if (size > BDRV_REQUEST_MAX_BYTES) {
880 return -EIO;
881 }
882
883 if (!bdrv_is_inserted(bs)) {
884 return -ENOMEDIUM;
885 }
886
887 if (offset < 0) {
888 return -EIO;
889 }
890
891 return 0;
892 }
893
894 typedef struct RwCo {
895 BdrvChild *child;
896 int64_t offset;
897 QEMUIOVector *qiov;
898 bool is_write;
899 int ret;
900 BdrvRequestFlags flags;
901 } RwCo;
902
903 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
904 {
905 RwCo *rwco = opaque;
906
907 if (!rwco->is_write) {
908 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
909 rwco->qiov->size, rwco->qiov,
910 rwco->flags);
911 } else {
912 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
913 rwco->qiov->size, rwco->qiov,
914 rwco->flags);
915 }
916 aio_wait_kick();
917 }
918
919 /*
920 * Process a vectored synchronous request using coroutines
921 */
922 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
923 QEMUIOVector *qiov, bool is_write,
924 BdrvRequestFlags flags)
925 {
926 Coroutine *co;
927 RwCo rwco = {
928 .child = child,
929 .offset = offset,
930 .qiov = qiov,
931 .is_write = is_write,
932 .ret = NOT_DONE,
933 .flags = flags,
934 };
935
936 if (qemu_in_coroutine()) {
937 /* Fast-path if already in coroutine context */
938 bdrv_rw_co_entry(&rwco);
939 } else {
940 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
941 bdrv_coroutine_enter(child->bs, co);
942 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
943 }
944 return rwco.ret;
945 }
946
947 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
948 int bytes, BdrvRequestFlags flags)
949 {
950 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
951
952 return bdrv_prwv_co(child, offset, &qiov, true,
953 BDRV_REQ_ZERO_WRITE | flags);
954 }
955
956 /*
957 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
958 * The operation is sped up by checking the block status and only writing
959 * zeroes to the device if they currently do not return zeroes. Optional
960 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
961 * BDRV_REQ_FUA).
962 *
963 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
964 */
965 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
966 {
967 int ret;
968 int64_t target_size, bytes, offset = 0;
969 BlockDriverState *bs = child->bs;
970
971 target_size = bdrv_getlength(bs);
972 if (target_size < 0) {
973 return target_size;
974 }
975
976 for (;;) {
977 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
978 if (bytes <= 0) {
979 return 0;
980 }
981 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
982 if (ret < 0) {
983 return ret;
984 }
985 if (ret & BDRV_BLOCK_ZERO) {
986 offset += bytes;
987 continue;
988 }
989 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
990 if (ret < 0) {
991 return ret;
992 }
993 offset += bytes;
994 }
995 }
996
997 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
998 {
999 int ret;
1000
1001 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
1002 if (ret < 0) {
1003 return ret;
1004 }
1005
1006 return qiov->size;
1007 }
1008
1009 /* See bdrv_pwrite() for the return codes */
1010 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
1011 {
1012 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1013
1014 if (bytes < 0) {
1015 return -EINVAL;
1016 }
1017
1018 return bdrv_preadv(child, offset, &qiov);
1019 }
1020
1021 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
1022 {
1023 int ret;
1024
1025 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
1026 if (ret < 0) {
1027 return ret;
1028 }
1029
1030 return qiov->size;
1031 }
1032
1033 /* Return no. of bytes on success or < 0 on error. Important errors are:
1034 -EIO generic I/O error (may happen for all errors)
1035 -ENOMEDIUM No media inserted.
1036 -EINVAL Invalid offset or number of bytes
1037 -EACCES Trying to write a read-only device
1038 */
1039 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
1040 {
1041 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1042
1043 if (bytes < 0) {
1044 return -EINVAL;
1045 }
1046
1047 return bdrv_pwritev(child, offset, &qiov);
1048 }
1049
1050 /*
1051 * Writes to the file and ensures that no writes are reordered across this
1052 * request (acts as a barrier)
1053 *
1054 * Returns 0 on success, -errno in error cases.
1055 */
1056 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
1057 const void *buf, int count)
1058 {
1059 int ret;
1060
1061 ret = bdrv_pwrite(child, offset, buf, count);
1062 if (ret < 0) {
1063 return ret;
1064 }
1065
1066 ret = bdrv_flush(child->bs);
1067 if (ret < 0) {
1068 return ret;
1069 }
1070
1071 return 0;
1072 }
1073
1074 typedef struct CoroutineIOCompletion {
1075 Coroutine *coroutine;
1076 int ret;
1077 } CoroutineIOCompletion;
1078
1079 static void bdrv_co_io_em_complete(void *opaque, int ret)
1080 {
1081 CoroutineIOCompletion *co = opaque;
1082
1083 co->ret = ret;
1084 aio_co_wake(co->coroutine);
1085 }
1086
1087 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1088 uint64_t offset, uint64_t bytes,
1089 QEMUIOVector *qiov,
1090 size_t qiov_offset, int flags)
1091 {
1092 BlockDriver *drv = bs->drv;
1093 int64_t sector_num;
1094 unsigned int nb_sectors;
1095 QEMUIOVector local_qiov;
1096 int ret;
1097
1098 assert(!(flags & ~BDRV_REQ_MASK));
1099 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1100
1101 if (!drv) {
1102 return -ENOMEDIUM;
1103 }
1104
1105 if (drv->bdrv_co_preadv_part) {
1106 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1107 flags);
1108 }
1109
1110 if (qiov_offset > 0 || bytes != qiov->size) {
1111 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1112 qiov = &local_qiov;
1113 }
1114
1115 if (drv->bdrv_co_preadv) {
1116 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1117 goto out;
1118 }
1119
1120 if (drv->bdrv_aio_preadv) {
1121 BlockAIOCB *acb;
1122 CoroutineIOCompletion co = {
1123 .coroutine = qemu_coroutine_self(),
1124 };
1125
1126 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1127 bdrv_co_io_em_complete, &co);
1128 if (acb == NULL) {
1129 ret = -EIO;
1130 goto out;
1131 } else {
1132 qemu_coroutine_yield();
1133 ret = co.ret;
1134 goto out;
1135 }
1136 }
1137
1138 sector_num = offset >> BDRV_SECTOR_BITS;
1139 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1140
1141 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1142 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1143 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1144 assert(drv->bdrv_co_readv);
1145
1146 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1147
1148 out:
1149 if (qiov == &local_qiov) {
1150 qemu_iovec_destroy(&local_qiov);
1151 }
1152
1153 return ret;
1154 }
1155
1156 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1157 uint64_t offset, uint64_t bytes,
1158 QEMUIOVector *qiov,
1159 size_t qiov_offset, int flags)
1160 {
1161 BlockDriver *drv = bs->drv;
1162 int64_t sector_num;
1163 unsigned int nb_sectors;
1164 QEMUIOVector local_qiov;
1165 int ret;
1166
1167 assert(!(flags & ~BDRV_REQ_MASK));
1168 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1169
1170 if (!drv) {
1171 return -ENOMEDIUM;
1172 }
1173
1174 if (drv->bdrv_co_pwritev_part) {
1175 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1176 flags & bs->supported_write_flags);
1177 flags &= ~bs->supported_write_flags;
1178 goto emulate_flags;
1179 }
1180
1181 if (qiov_offset > 0 || bytes != qiov->size) {
1182 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1183 qiov = &local_qiov;
1184 }
1185
1186 if (drv->bdrv_co_pwritev) {
1187 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1188 flags & bs->supported_write_flags);
1189 flags &= ~bs->supported_write_flags;
1190 goto emulate_flags;
1191 }
1192
1193 if (drv->bdrv_aio_pwritev) {
1194 BlockAIOCB *acb;
1195 CoroutineIOCompletion co = {
1196 .coroutine = qemu_coroutine_self(),
1197 };
1198
1199 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1200 flags & bs->supported_write_flags,
1201 bdrv_co_io_em_complete, &co);
1202 flags &= ~bs->supported_write_flags;
1203 if (acb == NULL) {
1204 ret = -EIO;
1205 } else {
1206 qemu_coroutine_yield();
1207 ret = co.ret;
1208 }
1209 goto emulate_flags;
1210 }
1211
1212 sector_num = offset >> BDRV_SECTOR_BITS;
1213 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1214
1215 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1216 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1217 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1218
1219 assert(drv->bdrv_co_writev);
1220 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1221 flags & bs->supported_write_flags);
1222 flags &= ~bs->supported_write_flags;
1223
1224 emulate_flags:
1225 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
1226 ret = bdrv_co_flush(bs);
1227 }
1228
1229 if (qiov == &local_qiov) {
1230 qemu_iovec_destroy(&local_qiov);
1231 }
1232
1233 return ret;
1234 }
1235
1236 static int coroutine_fn
1237 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1238 uint64_t bytes, QEMUIOVector *qiov,
1239 size_t qiov_offset)
1240 {
1241 BlockDriver *drv = bs->drv;
1242 QEMUIOVector local_qiov;
1243 int ret;
1244
1245 if (!drv) {
1246 return -ENOMEDIUM;
1247 }
1248
1249 if (!block_driver_can_compress(drv)) {
1250 return -ENOTSUP;
1251 }
1252
1253 if (drv->bdrv_co_pwritev_compressed_part) {
1254 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1255 qiov, qiov_offset);
1256 }
1257
1258 if (qiov_offset == 0) {
1259 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1260 }
1261
1262 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1263 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1264 qemu_iovec_destroy(&local_qiov);
1265
1266 return ret;
1267 }
1268
1269 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1270 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1271 size_t qiov_offset, int flags)
1272 {
1273 BlockDriverState *bs = child->bs;
1274
1275 /* Perform I/O through a temporary buffer so that users who scribble over
1276 * their read buffer while the operation is in progress do not end up
1277 * modifying the image file. This is critical for zero-copy guest I/O
1278 * where anything might happen inside guest memory.
1279 */
1280 void *bounce_buffer = NULL;
1281
1282 BlockDriver *drv = bs->drv;
1283 int64_t cluster_offset;
1284 int64_t cluster_bytes;
1285 size_t skip_bytes;
1286 int ret;
1287 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1288 BDRV_REQUEST_MAX_BYTES);
1289 unsigned int progress = 0;
1290 bool skip_write;
1291
1292 if (!drv) {
1293 return -ENOMEDIUM;
1294 }
1295
1296 /*
1297 * Do not write anything when the BDS is inactive. That is not
1298 * allowed, and it would not help.
1299 */
1300 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1301
1302 /* FIXME We cannot require callers to have write permissions when all they
1303 * are doing is a read request. If we did things right, write permissions
1304 * would be obtained anyway, but internally by the copy-on-read code. As
1305 * long as it is implemented here rather than in a separate filter driver,
1306 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1307 * it could request permissions. Therefore we have to bypass the permission
1308 * system for the moment. */
1309 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1310
1311 /* Cover entire cluster so no additional backing file I/O is required when
1312 * allocating cluster in the image file. Note that this value may exceed
1313 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1314 * is one reason we loop rather than doing it all at once.
1315 */
1316 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1317 skip_bytes = offset - cluster_offset;
1318
1319 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1320 cluster_offset, cluster_bytes);
1321
1322 while (cluster_bytes) {
1323 int64_t pnum;
1324
1325 if (skip_write) {
1326 ret = 1; /* "already allocated", so nothing will be copied */
1327 pnum = MIN(cluster_bytes, max_transfer);
1328 } else {
1329 ret = bdrv_is_allocated(bs, cluster_offset,
1330 MIN(cluster_bytes, max_transfer), &pnum);
1331 if (ret < 0) {
1332 /*
1333 * Safe to treat errors in querying allocation as if
1334 * unallocated; we'll probably fail again soon on the
1335 * read, but at least that will set a decent errno.
1336 */
1337 pnum = MIN(cluster_bytes, max_transfer);
1338 }
1339
1340 /* Stop at EOF if the image ends in the middle of the cluster */
1341 if (ret == 0 && pnum == 0) {
1342 assert(progress >= bytes);
1343 break;
1344 }
1345
1346 assert(skip_bytes < pnum);
1347 }
1348
1349 if (ret <= 0) {
1350 QEMUIOVector local_qiov;
1351
1352 /* Must copy-on-read; use the bounce buffer */
1353 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1354 if (!bounce_buffer) {
1355 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1356 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1357 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1358
1359 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1360 if (!bounce_buffer) {
1361 ret = -ENOMEM;
1362 goto err;
1363 }
1364 }
1365 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1366
1367 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1368 &local_qiov, 0, 0);
1369 if (ret < 0) {
1370 goto err;
1371 }
1372
1373 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1374 if (drv->bdrv_co_pwrite_zeroes &&
1375 buffer_is_zero(bounce_buffer, pnum)) {
1376 /* FIXME: Should we (perhaps conditionally) be setting
1377 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1378 * that still correctly reads as zero? */
1379 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1380 BDRV_REQ_WRITE_UNCHANGED);
1381 } else {
1382 /* This does not change the data on the disk, it is not
1383 * necessary to flush even in cache=writethrough mode.
1384 */
1385 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1386 &local_qiov, 0,
1387 BDRV_REQ_WRITE_UNCHANGED);
1388 }
1389
1390 if (ret < 0) {
1391 /* It might be okay to ignore write errors for guest
1392 * requests. If this is a deliberate copy-on-read
1393 * then we don't want to ignore the error. Simply
1394 * report it in all cases.
1395 */
1396 goto err;
1397 }
1398
1399 if (!(flags & BDRV_REQ_PREFETCH)) {
1400 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1401 bounce_buffer + skip_bytes,
1402 pnum - skip_bytes);
1403 }
1404 } else if (!(flags & BDRV_REQ_PREFETCH)) {
1405 /* Read directly into the destination */
1406 ret = bdrv_driver_preadv(bs, offset + progress,
1407 MIN(pnum - skip_bytes, bytes - progress),
1408 qiov, qiov_offset + progress, 0);
1409 if (ret < 0) {
1410 goto err;
1411 }
1412 }
1413
1414 cluster_offset += pnum;
1415 cluster_bytes -= pnum;
1416 progress += pnum - skip_bytes;
1417 skip_bytes = 0;
1418 }
1419 ret = 0;
1420
1421 err:
1422 qemu_vfree(bounce_buffer);
1423 return ret;
1424 }
1425
1426 /*
1427 * Forwards an already correctly aligned request to the BlockDriver. This
1428 * handles copy on read, zeroing after EOF, and fragmentation of large
1429 * reads; any other features must be implemented by the caller.
1430 */
1431 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1432 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1433 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
1434 {
1435 BlockDriverState *bs = child->bs;
1436 int64_t total_bytes, max_bytes;
1437 int ret = 0;
1438 uint64_t bytes_remaining = bytes;
1439 int max_transfer;
1440
1441 assert(is_power_of_2(align));
1442 assert((offset & (align - 1)) == 0);
1443 assert((bytes & (align - 1)) == 0);
1444 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1445 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1446 align);
1447
1448 /* TODO: We would need a per-BDS .supported_read_flags and
1449 * potential fallback support, if we ever implement any read flags
1450 * to pass through to drivers. For now, there aren't any
1451 * passthrough flags. */
1452 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
1453
1454 /* Handle Copy on Read and associated serialisation */
1455 if (flags & BDRV_REQ_COPY_ON_READ) {
1456 /* If we touch the same cluster it counts as an overlap. This
1457 * guarantees that allocating writes will be serialized and not race
1458 * with each other for the same cluster. For example, in copy-on-read
1459 * it ensures that the CoR read and write operations are atomic and
1460 * guest writes cannot interleave between them. */
1461 bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
1462 } else {
1463 bdrv_wait_serialising_requests(req);
1464 }
1465
1466 if (flags & BDRV_REQ_COPY_ON_READ) {
1467 int64_t pnum;
1468
1469 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1470 if (ret < 0) {
1471 goto out;
1472 }
1473
1474 if (!ret || pnum != bytes) {
1475 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1476 qiov, qiov_offset, flags);
1477 goto out;
1478 } else if (flags & BDRV_REQ_PREFETCH) {
1479 goto out;
1480 }
1481 }
1482
1483 /* Forward the request to the BlockDriver, possibly fragmenting it */
1484 total_bytes = bdrv_getlength(bs);
1485 if (total_bytes < 0) {
1486 ret = total_bytes;
1487 goto out;
1488 }
1489
1490 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1491 if (bytes <= max_bytes && bytes <= max_transfer) {
1492 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0);
1493 goto out;
1494 }
1495
1496 while (bytes_remaining) {
1497 int num;
1498
1499 if (max_bytes) {
1500 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1501 assert(num);
1502
1503 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1504 num, qiov, bytes - bytes_remaining, 0);
1505 max_bytes -= num;
1506 } else {
1507 num = bytes_remaining;
1508 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1509 bytes_remaining);
1510 }
1511 if (ret < 0) {
1512 goto out;
1513 }
1514 bytes_remaining -= num;
1515 }
1516
1517 out:
1518 return ret < 0 ? ret : 0;
1519 }
1520
1521 /*
1522 * Request padding
1523 *
1524 * |<---- align ----->| |<----- align ---->|
1525 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1526 * | | | | | |
1527 * -*----------$-------*-------- ... --------*-----$------------*---
1528 * | | | | | |
1529 * | offset | | end |
1530 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1531 * [buf ... ) [tail_buf )
1532 *
1533 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1534 * is placed at the beginning of @buf and @tail at the @end.
1535 *
1536 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1537 * around tail, if tail exists.
1538 *
1539 * @merge_reads is true for small requests,
1540 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1541 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1542 */
1543 typedef struct BdrvRequestPadding {
1544 uint8_t *buf;
1545 size_t buf_len;
1546 uint8_t *tail_buf;
1547 size_t head;
1548 size_t tail;
1549 bool merge_reads;
1550 QEMUIOVector local_qiov;
1551 } BdrvRequestPadding;
1552
1553 static bool bdrv_init_padding(BlockDriverState *bs,
1554 int64_t offset, int64_t bytes,
1555 BdrvRequestPadding *pad)
1556 {
1557 uint64_t align = bs->bl.request_alignment;
1558 size_t sum;
1559
1560 memset(pad, 0, sizeof(*pad));
1561
1562 pad->head = offset & (align - 1);
1563 pad->tail = ((offset + bytes) & (align - 1));
1564 if (pad->tail) {
1565 pad->tail = align - pad->tail;
1566 }
1567
1568 if ((!pad->head && !pad->tail) || !bytes) {
1569 return false;
1570 }
1571
1572 sum = pad->head + bytes + pad->tail;
1573 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1574 pad->buf = qemu_blockalign(bs, pad->buf_len);
1575 pad->merge_reads = sum == pad->buf_len;
1576 if (pad->tail) {
1577 pad->tail_buf = pad->buf + pad->buf_len - align;
1578 }
1579
1580 return true;
1581 }
1582
1583 static int bdrv_padding_rmw_read(BdrvChild *child,
1584 BdrvTrackedRequest *req,
1585 BdrvRequestPadding *pad,
1586 bool zero_middle)
1587 {
1588 QEMUIOVector local_qiov;
1589 BlockDriverState *bs = child->bs;
1590 uint64_t align = bs->bl.request_alignment;
1591 int ret;
1592
1593 assert(req->serialising && pad->buf);
1594
1595 if (pad->head || pad->merge_reads) {
1596 uint64_t bytes = pad->merge_reads ? pad->buf_len : align;
1597
1598 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1599
1600 if (pad->head) {
1601 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1602 }
1603 if (pad->merge_reads && pad->tail) {
1604 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1605 }
1606 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1607 align, &local_qiov, 0, 0);
1608 if (ret < 0) {
1609 return ret;
1610 }
1611 if (pad->head) {
1612 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1613 }
1614 if (pad->merge_reads && pad->tail) {
1615 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1616 }
1617
1618 if (pad->merge_reads) {
1619 goto zero_mem;
1620 }
1621 }
1622
1623 if (pad->tail) {
1624 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1625
1626 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1627 ret = bdrv_aligned_preadv(
1628 child, req,
1629 req->overlap_offset + req->overlap_bytes - align,
1630 align, align, &local_qiov, 0, 0);
1631 if (ret < 0) {
1632 return ret;
1633 }
1634 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1635 }
1636
1637 zero_mem:
1638 if (zero_middle) {
1639 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1640 }
1641
1642 return 0;
1643 }
1644
1645 static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1646 {
1647 if (pad->buf) {
1648 qemu_vfree(pad->buf);
1649 qemu_iovec_destroy(&pad->local_qiov);
1650 }
1651 }
1652
1653 /*
1654 * bdrv_pad_request
1655 *
1656 * Exchange request parameters with padded request if needed. Don't include RMW
1657 * read of padding, bdrv_padding_rmw_read() should be called separately if
1658 * needed.
1659 *
1660 * All parameters except @bs are in-out: they represent original request at
1661 * function call and padded (if padding needed) at function finish.
1662 *
1663 * Function always succeeds.
1664 */
1665 static bool bdrv_pad_request(BlockDriverState *bs,
1666 QEMUIOVector **qiov, size_t *qiov_offset,
1667 int64_t *offset, unsigned int *bytes,
1668 BdrvRequestPadding *pad)
1669 {
1670 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
1671 return false;
1672 }
1673
1674 qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1675 *qiov, *qiov_offset, *bytes,
1676 pad->buf + pad->buf_len - pad->tail, pad->tail);
1677 *bytes += pad->head + pad->tail;
1678 *offset -= pad->head;
1679 *qiov = &pad->local_qiov;
1680 *qiov_offset = 0;
1681
1682 return true;
1683 }
1684
1685 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1686 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1687 BdrvRequestFlags flags)
1688 {
1689 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1690 }
1691
1692 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1693 int64_t offset, unsigned int bytes,
1694 QEMUIOVector *qiov, size_t qiov_offset,
1695 BdrvRequestFlags flags)
1696 {
1697 BlockDriverState *bs = child->bs;
1698 BdrvTrackedRequest req;
1699 BdrvRequestPadding pad;
1700 int ret;
1701
1702 trace_bdrv_co_preadv(bs, offset, bytes, flags);
1703
1704 ret = bdrv_check_byte_request(bs, offset, bytes);
1705 if (ret < 0) {
1706 return ret;
1707 }
1708
1709 bdrv_inc_in_flight(bs);
1710
1711 /* Don't do copy-on-read if we read data before write operation */
1712 if (atomic_read(&bs->copy_on_read)) {
1713 flags |= BDRV_REQ_COPY_ON_READ;
1714 }
1715
1716 bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad);
1717
1718 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1719 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1720 bs->bl.request_alignment,
1721 qiov, qiov_offset, flags);
1722 tracked_request_end(&req);
1723 bdrv_dec_in_flight(bs);
1724
1725 bdrv_padding_destroy(&pad);
1726
1727 return ret;
1728 }
1729
1730 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1731 int64_t offset, int bytes, BdrvRequestFlags flags)
1732 {
1733 BlockDriver *drv = bs->drv;
1734 QEMUIOVector qiov;
1735 void *buf = NULL;
1736 int ret = 0;
1737 bool need_flush = false;
1738 int head = 0;
1739 int tail = 0;
1740
1741 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1742 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1743 bs->bl.request_alignment);
1744 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1745
1746 if (!drv) {
1747 return -ENOMEDIUM;
1748 }
1749
1750 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1751 return -ENOTSUP;
1752 }
1753
1754 assert(alignment % bs->bl.request_alignment == 0);
1755 head = offset % alignment;
1756 tail = (offset + bytes) % alignment;
1757 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1758 assert(max_write_zeroes >= bs->bl.request_alignment);
1759
1760 while (bytes > 0 && !ret) {
1761 int num = bytes;
1762
1763 /* Align request. Block drivers can expect the "bulk" of the request
1764 * to be aligned, and that unaligned requests do not cross cluster
1765 * boundaries.
1766 */
1767 if (head) {
1768 /* Make a small request up to the first aligned sector. For
1769 * convenience, limit this request to max_transfer even if
1770 * we don't need to fall back to writes. */
1771 num = MIN(MIN(bytes, max_transfer), alignment - head);
1772 head = (head + num) % alignment;
1773 assert(num < max_write_zeroes);
1774 } else if (tail && num > alignment) {
1775 /* Shorten the request to the last aligned sector. */
1776 num -= tail;
1777 }
1778
1779 /* limit request size */
1780 if (num > max_write_zeroes) {
1781 num = max_write_zeroes;
1782 }
1783
1784 ret = -ENOTSUP;
1785 /* First try the efficient write zeroes operation */
1786 if (drv->bdrv_co_pwrite_zeroes) {
1787 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1788 flags & bs->supported_zero_flags);
1789 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1790 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1791 need_flush = true;
1792 }
1793 } else {
1794 assert(!bs->supported_zero_flags);
1795 }
1796
1797 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1798 /* Fall back to bounce buffer if write zeroes is unsupported */
1799 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1800
1801 if ((flags & BDRV_REQ_FUA) &&
1802 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1803 /* No need for bdrv_driver_pwrite() to do a fallback
1804 * flush on each chunk; use just one at the end */
1805 write_flags &= ~BDRV_REQ_FUA;
1806 need_flush = true;
1807 }
1808 num = MIN(num, max_transfer);
1809 if (buf == NULL) {
1810 buf = qemu_try_blockalign0(bs, num);
1811 if (buf == NULL) {
1812 ret = -ENOMEM;
1813 goto fail;
1814 }
1815 }
1816 qemu_iovec_init_buf(&qiov, buf, num);
1817
1818 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1819
1820 /* Keep bounce buffer around if it is big enough for all
1821 * all future requests.
1822 */
1823 if (num < max_transfer) {
1824 qemu_vfree(buf);
1825 buf = NULL;
1826 }
1827 }
1828
1829 offset += num;
1830 bytes -= num;
1831 }
1832
1833 fail:
1834 if (ret == 0 && need_flush) {
1835 ret = bdrv_co_flush(bs);
1836 }
1837 qemu_vfree(buf);
1838 return ret;
1839 }
1840
1841 static inline int coroutine_fn
1842 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
1843 BdrvTrackedRequest *req, int flags)
1844 {
1845 BlockDriverState *bs = child->bs;
1846 bool waited;
1847 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1848
1849 if (bs->read_only) {
1850 return -EPERM;
1851 }
1852
1853 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1854 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1855 assert(!(flags & ~BDRV_REQ_MASK));
1856
1857 if (flags & BDRV_REQ_SERIALISING) {
1858 waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
1859 /*
1860 * For a misaligned request we should have already waited earlier,
1861 * because we come after bdrv_padding_rmw_read which must be called
1862 * with the request already marked as serialising.
1863 */
1864 assert(!waited ||
1865 (req->offset == req->overlap_offset &&
1866 req->bytes == req->overlap_bytes));
1867 } else {
1868 bdrv_wait_serialising_requests(req);
1869 }
1870
1871 assert(req->overlap_offset <= offset);
1872 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1873 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
1874
1875 switch (req->type) {
1876 case BDRV_TRACKED_WRITE:
1877 case BDRV_TRACKED_DISCARD:
1878 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1879 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1880 } else {
1881 assert(child->perm & BLK_PERM_WRITE);
1882 }
1883 return notifier_with_return_list_notify(&bs->before_write_notifiers,
1884 req);
1885 case BDRV_TRACKED_TRUNCATE:
1886 assert(child->perm & BLK_PERM_RESIZE);
1887 return 0;
1888 default:
1889 abort();
1890 }
1891 }
1892
1893 static inline void coroutine_fn
1894 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
1895 BdrvTrackedRequest *req, int ret)
1896 {
1897 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1898 BlockDriverState *bs = child->bs;
1899
1900 atomic_inc(&bs->write_gen);
1901
1902 /*
1903 * Discard cannot extend the image, but in error handling cases, such as
1904 * when reverting a qcow2 cluster allocation, the discarded range can pass
1905 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1906 * here. Instead, just skip it, since semantically a discard request
1907 * beyond EOF cannot expand the image anyway.
1908 */
1909 if (ret == 0 &&
1910 (req->type == BDRV_TRACKED_TRUNCATE ||
1911 end_sector > bs->total_sectors) &&
1912 req->type != BDRV_TRACKED_DISCARD) {
1913 bs->total_sectors = end_sector;
1914 bdrv_parent_cb_resize(bs);
1915 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
1916 }
1917 if (req->bytes) {
1918 switch (req->type) {
1919 case BDRV_TRACKED_WRITE:
1920 stat64_max(&bs->wr_highest_offset, offset + bytes);
1921 /* fall through, to set dirty bits */
1922 case BDRV_TRACKED_DISCARD:
1923 bdrv_set_dirty(bs, offset, bytes);
1924 break;
1925 default:
1926 break;
1927 }
1928 }
1929 }
1930
1931 /*
1932 * Forwards an already correctly aligned write request to the BlockDriver,
1933 * after possibly fragmenting it.
1934 */
1935 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
1936 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1937 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
1938 {
1939 BlockDriverState *bs = child->bs;
1940 BlockDriver *drv = bs->drv;
1941 int ret;
1942
1943 uint64_t bytes_remaining = bytes;
1944 int max_transfer;
1945
1946 if (!drv) {
1947 return -ENOMEDIUM;
1948 }
1949
1950 if (bdrv_has_readonly_bitmaps(bs)) {
1951 return -EPERM;
1952 }
1953
1954 assert(is_power_of_2(align));
1955 assert((offset & (align - 1)) == 0);
1956 assert((bytes & (align - 1)) == 0);
1957 assert(!qiov || qiov_offset + bytes <= qiov->size);
1958 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1959 align);
1960
1961 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
1962
1963 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1964 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1965 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
1966 flags |= BDRV_REQ_ZERO_WRITE;
1967 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1968 flags |= BDRV_REQ_MAY_UNMAP;
1969 }
1970 }
1971
1972 if (ret < 0) {
1973 /* Do nothing, write notifier decided to fail this request */
1974 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1975 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1976 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1977 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1978 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
1979 qiov, qiov_offset);
1980 } else if (bytes <= max_transfer) {
1981 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1982 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
1983 } else {
1984 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1985 while (bytes_remaining) {
1986 int num = MIN(bytes_remaining, max_transfer);
1987 int local_flags = flags;
1988
1989 assert(num);
1990 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1991 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1992 /* If FUA is going to be emulated by flush, we only
1993 * need to flush on the last iteration */
1994 local_flags &= ~BDRV_REQ_FUA;
1995 }
1996
1997 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1998 num, qiov, bytes - bytes_remaining,
1999 local_flags);
2000 if (ret < 0) {
2001 break;
2002 }
2003 bytes_remaining -= num;
2004 }
2005 }
2006 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
2007
2008 if (ret >= 0) {
2009 ret = 0;
2010 }
2011 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
2012
2013 return ret;
2014 }
2015
2016 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
2017 int64_t offset,
2018 unsigned int bytes,
2019 BdrvRequestFlags flags,
2020 BdrvTrackedRequest *req)
2021 {
2022 BlockDriverState *bs = child->bs;
2023 QEMUIOVector local_qiov;
2024 uint64_t align = bs->bl.request_alignment;
2025 int ret = 0;
2026 bool padding;
2027 BdrvRequestPadding pad;
2028
2029 padding = bdrv_init_padding(bs, offset, bytes, &pad);
2030 if (padding) {
2031 bdrv_mark_request_serialising(req, align);
2032
2033 bdrv_padding_rmw_read(child, req, &pad, true);
2034
2035 if (pad.head || pad.merge_reads) {
2036 int64_t aligned_offset = offset & ~(align - 1);
2037 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2038
2039 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2040 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2041 align, &local_qiov, 0,
2042 flags & ~BDRV_REQ_ZERO_WRITE);
2043 if (ret < 0 || pad.merge_reads) {
2044 /* Error or all work is done */
2045 goto out;
2046 }
2047 offset += write_bytes - pad.head;
2048 bytes -= write_bytes - pad.head;
2049 }
2050 }
2051
2052 assert(!bytes || (offset & (align - 1)) == 0);
2053 if (bytes >= align) {
2054 /* Write the aligned part in the middle. */
2055 uint64_t aligned_bytes = bytes & ~(align - 1);
2056 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2057 NULL, 0, flags);
2058 if (ret < 0) {
2059 goto out;
2060 }
2061 bytes -= aligned_bytes;
2062 offset += aligned_bytes;
2063 }
2064
2065 assert(!bytes || (offset & (align - 1)) == 0);
2066 if (bytes) {
2067 assert(align == pad.tail + bytes);
2068
2069 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2070 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2071 &local_qiov, 0,
2072 flags & ~BDRV_REQ_ZERO_WRITE);
2073 }
2074
2075 out:
2076 bdrv_padding_destroy(&pad);
2077
2078 return ret;
2079 }
2080
2081 /*
2082 * Handle a write request in coroutine context
2083 */
2084 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2085 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
2086 BdrvRequestFlags flags)
2087 {
2088 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2089 }
2090
2091 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2092 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset,
2093 BdrvRequestFlags flags)
2094 {
2095 BlockDriverState *bs = child->bs;
2096 BdrvTrackedRequest req;
2097 uint64_t align = bs->bl.request_alignment;
2098 BdrvRequestPadding pad;
2099 int ret;
2100
2101 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
2102
2103 if (!bs->drv) {
2104 return -ENOMEDIUM;
2105 }
2106
2107 ret = bdrv_check_byte_request(bs, offset, bytes);
2108 if (ret < 0) {
2109 return ret;
2110 }
2111
2112 /* If the request is misaligned then we can't make it efficient */
2113 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2114 !QEMU_IS_ALIGNED(offset | bytes, align))
2115 {
2116 return -ENOTSUP;
2117 }
2118
2119 bdrv_inc_in_flight(bs);
2120 /*
2121 * Align write if necessary by performing a read-modify-write cycle.
2122 * Pad qiov with the read parts and be sure to have a tracked request not
2123 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
2124 */
2125 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2126
2127 if (flags & BDRV_REQ_ZERO_WRITE) {
2128 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2129 goto out;
2130 }
2131
2132 if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
2133 bdrv_mark_request_serialising(&req, align);
2134 bdrv_padding_rmw_read(child, &req, &pad, false);
2135 }
2136
2137 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2138 qiov, qiov_offset, flags);
2139
2140 bdrv_padding_destroy(&pad);
2141
2142 out:
2143 tracked_request_end(&req);
2144 bdrv_dec_in_flight(bs);
2145
2146 return ret;
2147 }
2148
2149 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2150 int bytes, BdrvRequestFlags flags)
2151 {
2152 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2153
2154 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
2155 flags &= ~BDRV_REQ_MAY_UNMAP;
2156 }
2157
2158 return bdrv_co_pwritev(child, offset, bytes, NULL,
2159 BDRV_REQ_ZERO_WRITE | flags);
2160 }
2161
2162 /*
2163 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2164 */
2165 int bdrv_flush_all(void)
2166 {
2167 BdrvNextIterator it;
2168 BlockDriverState *bs = NULL;
2169 int result = 0;
2170
2171 /*
2172 * bdrv queue is managed by record/replay,
2173 * creating new flush request for stopping
2174 * the VM may break the determinism
2175 */
2176 if (replay_events_enabled()) {
2177 return result;
2178 }
2179
2180 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2181 AioContext *aio_context = bdrv_get_aio_context(bs);
2182 int ret;
2183
2184 aio_context_acquire(aio_context);
2185 ret = bdrv_flush(bs);
2186 if (ret < 0 && !result) {
2187 result = ret;
2188 }
2189 aio_context_release(aio_context);
2190 }
2191
2192 return result;
2193 }
2194
2195
2196 typedef struct BdrvCoBlockStatusData {
2197 BlockDriverState *bs;
2198 BlockDriverState *base;
2199 bool want_zero;
2200 int64_t offset;
2201 int64_t bytes;
2202 int64_t *pnum;
2203 int64_t *map;
2204 BlockDriverState **file;
2205 int ret;
2206 bool done;
2207 } BdrvCoBlockStatusData;
2208
2209 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
2210 bool want_zero,
2211 int64_t offset,
2212 int64_t bytes,
2213 int64_t *pnum,
2214 int64_t *map,
2215 BlockDriverState **file)
2216 {
2217 assert(bs->file && bs->file->bs);
2218 *pnum = bytes;
2219 *map = offset;
2220 *file = bs->file->bs;
2221 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2222 }
2223
2224 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
2225 bool want_zero,
2226 int64_t offset,
2227 int64_t bytes,
2228 int64_t *pnum,
2229 int64_t *map,
2230 BlockDriverState **file)
2231 {
2232 assert(bs->backing && bs->backing->bs);
2233 *pnum = bytes;
2234 *map = offset;
2235 *file = bs->backing->bs;
2236 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2237 }
2238
2239 /*
2240 * Returns the allocation status of the specified sectors.
2241 * Drivers not implementing the functionality are assumed to not support
2242 * backing files, hence all their sectors are reported as allocated.
2243 *
2244 * If 'want_zero' is true, the caller is querying for mapping
2245 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2246 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2247 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2248 *
2249 * If 'offset' is beyond the end of the disk image the return value is
2250 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2251 *
2252 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2253 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2254 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2255 *
2256 * 'pnum' is set to the number of bytes (including and immediately
2257 * following the specified offset) that are easily known to be in the
2258 * same allocated/unallocated state. Note that a second call starting
2259 * at the original offset plus returned pnum may have the same status.
2260 * The returned value is non-zero on success except at end-of-file.
2261 *
2262 * Returns negative errno on failure. Otherwise, if the
2263 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2264 * set to the host mapping and BDS corresponding to the guest offset.
2265 */
2266 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2267 bool want_zero,
2268 int64_t offset, int64_t bytes,
2269 int64_t *pnum, int64_t *map,
2270 BlockDriverState **file)
2271 {
2272 int64_t total_size;
2273 int64_t n; /* bytes */
2274 int ret;
2275 int64_t local_map = 0;
2276 BlockDriverState *local_file = NULL;
2277 int64_t aligned_offset, aligned_bytes;
2278 uint32_t align;
2279
2280 assert(pnum);
2281 *pnum = 0;
2282 total_size = bdrv_getlength(bs);
2283 if (total_size < 0) {
2284 ret = total_size;
2285 goto early_out;
2286 }
2287
2288 if (offset >= total_size) {
2289 ret = BDRV_BLOCK_EOF;
2290 goto early_out;
2291 }
2292 if (!bytes) {
2293 ret = 0;
2294 goto early_out;
2295 }
2296
2297 n = total_size - offset;
2298 if (n < bytes) {
2299 bytes = n;
2300 }
2301
2302 /* Must be non-NULL or bdrv_getlength() would have failed */
2303 assert(bs->drv);
2304 if (!bs->drv->bdrv_co_block_status) {
2305 *pnum = bytes;
2306 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2307 if (offset + bytes == total_size) {
2308 ret |= BDRV_BLOCK_EOF;
2309 }
2310 if (bs->drv->protocol_name) {
2311 ret |= BDRV_BLOCK_OFFSET_VALID;
2312 local_map = offset;
2313 local_file = bs;
2314 }
2315 goto early_out;
2316 }
2317
2318 bdrv_inc_in_flight(bs);
2319
2320 /* Round out to request_alignment boundaries */
2321 align = bs->bl.request_alignment;
2322 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2323 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2324
2325 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2326 aligned_bytes, pnum, &local_map,
2327 &local_file);
2328 if (ret < 0) {
2329 *pnum = 0;
2330 goto out;
2331 }
2332
2333 /*
2334 * The driver's result must be a non-zero multiple of request_alignment.
2335 * Clamp pnum and adjust map to original request.
2336 */
2337 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2338 align > offset - aligned_offset);
2339 if (ret & BDRV_BLOCK_RECURSE) {
2340 assert(ret & BDRV_BLOCK_DATA);
2341 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2342 assert(!(ret & BDRV_BLOCK_ZERO));
2343 }
2344
2345 *pnum -= offset - aligned_offset;
2346 if (*pnum > bytes) {
2347 *pnum = bytes;
2348 }
2349 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2350 local_map += offset - aligned_offset;
2351 }
2352
2353 if (ret & BDRV_BLOCK_RAW) {
2354 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2355 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2356 *pnum, pnum, &local_map, &local_file);
2357 goto out;
2358 }
2359
2360 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2361 ret |= BDRV_BLOCK_ALLOCATED;
2362 } else if (want_zero) {
2363 if (bdrv_unallocated_blocks_are_zero(bs)) {
2364 ret |= BDRV_BLOCK_ZERO;
2365 } else if (bs->backing) {
2366 BlockDriverState *bs2 = bs->backing->bs;
2367 int64_t size2 = bdrv_getlength(bs2);
2368
2369 if (size2 >= 0 && offset >= size2) {
2370 ret |= BDRV_BLOCK_ZERO;
2371 }
2372 }
2373 }
2374
2375 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2376 local_file && local_file != bs &&
2377 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2378 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2379 int64_t file_pnum;
2380 int ret2;
2381
2382 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2383 *pnum, &file_pnum, NULL, NULL);
2384 if (ret2 >= 0) {
2385 /* Ignore errors. This is just providing extra information, it
2386 * is useful but not necessary.
2387 */
2388 if (ret2 & BDRV_BLOCK_EOF &&
2389 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2390 /*
2391 * It is valid for the format block driver to read
2392 * beyond the end of the underlying file's current
2393 * size; such areas read as zero.
2394 */
2395 ret |= BDRV_BLOCK_ZERO;
2396 } else {
2397 /* Limit request to the range reported by the protocol driver */
2398 *pnum = file_pnum;
2399 ret |= (ret2 & BDRV_BLOCK_ZERO);
2400 }
2401 }
2402 }
2403
2404 out:
2405 bdrv_dec_in_flight(bs);
2406 if (ret >= 0 && offset + *pnum == total_size) {
2407 ret |= BDRV_BLOCK_EOF;
2408 }
2409 early_out:
2410 if (file) {
2411 *file = local_file;
2412 }
2413 if (map) {
2414 *map = local_map;
2415 }
2416 return ret;
2417 }
2418
2419 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2420 BlockDriverState *base,
2421 bool want_zero,
2422 int64_t offset,
2423 int64_t bytes,
2424 int64_t *pnum,
2425 int64_t *map,
2426 BlockDriverState **file)
2427 {
2428 BlockDriverState *p;
2429 int ret = 0;
2430 bool first = true;
2431
2432 assert(bs != base);
2433 for (p = bs; p != base; p = backing_bs(p)) {
2434 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2435 file);
2436 if (ret < 0) {
2437 break;
2438 }
2439 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2440 /*
2441 * Reading beyond the end of the file continues to read
2442 * zeroes, but we can only widen the result to the
2443 * unallocated length we learned from an earlier
2444 * iteration.
2445 */
2446 *pnum = bytes;
2447 }
2448 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
2449 break;
2450 }
2451 /* [offset, pnum] unallocated on this layer, which could be only
2452 * the first part of [offset, bytes]. */
2453 bytes = MIN(bytes, *pnum);
2454 first = false;
2455 }
2456 return ret;
2457 }
2458
2459 /* Coroutine wrapper for bdrv_block_status_above() */
2460 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
2461 {
2462 BdrvCoBlockStatusData *data = opaque;
2463
2464 data->ret = bdrv_co_block_status_above(data->bs, data->base,
2465 data->want_zero,
2466 data->offset, data->bytes,
2467 data->pnum, data->map, data->file);
2468 data->done = true;
2469 aio_wait_kick();
2470 }
2471
2472 /*
2473 * Synchronous wrapper around bdrv_co_block_status_above().
2474 *
2475 * See bdrv_co_block_status_above() for details.
2476 */
2477 static int bdrv_common_block_status_above(BlockDriverState *bs,
2478 BlockDriverState *base,
2479 bool want_zero, int64_t offset,
2480 int64_t bytes, int64_t *pnum,
2481 int64_t *map,
2482 BlockDriverState **file)
2483 {
2484 Coroutine *co;
2485 BdrvCoBlockStatusData data = {
2486 .bs = bs,
2487 .base = base,
2488 .want_zero = want_zero,
2489 .offset = offset,
2490 .bytes = bytes,
2491 .pnum = pnum,
2492 .map = map,
2493 .file = file,
2494 .done = false,
2495 };
2496
2497 if (qemu_in_coroutine()) {
2498 /* Fast-path if already in coroutine context */
2499 bdrv_block_status_above_co_entry(&data);
2500 } else {
2501 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
2502 bdrv_coroutine_enter(bs, co);
2503 BDRV_POLL_WHILE(bs, !data.done);
2504 }
2505 return data.ret;
2506 }
2507
2508 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2509 int64_t offset, int64_t bytes, int64_t *pnum,
2510 int64_t *map, BlockDriverState **file)
2511 {
2512 return bdrv_common_block_status_above(bs, base, true, offset, bytes,
2513 pnum, map, file);
2514 }
2515
2516 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2517 int64_t *pnum, int64_t *map, BlockDriverState **file)
2518 {
2519 return bdrv_block_status_above(bs, backing_bs(bs),
2520 offset, bytes, pnum, map, file);
2521 }
2522
2523 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2524 int64_t bytes, int64_t *pnum)
2525 {
2526 int ret;
2527 int64_t dummy;
2528
2529 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
2530 bytes, pnum ? pnum : &dummy, NULL,
2531 NULL);
2532 if (ret < 0) {
2533 return ret;
2534 }
2535 return !!(ret & BDRV_BLOCK_ALLOCATED);
2536 }
2537
2538 /*
2539 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2540 *
2541 * Return 1 if (a prefix of) the given range is allocated in any image
2542 * between BASE and TOP (BASE is only included if include_base is set).
2543 * BASE can be NULL to check if the given offset is allocated in any
2544 * image of the chain. Return 0 otherwise, or negative errno on
2545 * failure.
2546 *
2547 * 'pnum' is set to the number of bytes (including and immediately
2548 * following the specified offset) that are known to be in the same
2549 * allocated/unallocated state. Note that a subsequent call starting
2550 * at 'offset + *pnum' may return the same allocation status (in other
2551 * words, the result is not necessarily the maximum possible range);
2552 * but 'pnum' will only be 0 when end of file is reached.
2553 *
2554 */
2555 int bdrv_is_allocated_above(BlockDriverState *top,
2556 BlockDriverState *base,
2557 bool include_base, int64_t offset,
2558 int64_t bytes, int64_t *pnum)
2559 {
2560 BlockDriverState *intermediate;
2561 int ret;
2562 int64_t n = bytes;
2563
2564 assert(base || !include_base);
2565
2566 intermediate = top;
2567 while (include_base || intermediate != base) {
2568 int64_t pnum_inter;
2569 int64_t size_inter;
2570
2571 assert(intermediate);
2572 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
2573 if (ret < 0) {
2574 return ret;
2575 }
2576 if (ret) {
2577 *pnum = pnum_inter;
2578 return 1;
2579 }
2580
2581 size_inter = bdrv_getlength(intermediate);
2582 if (size_inter < 0) {
2583 return size_inter;
2584 }
2585 if (n > pnum_inter &&
2586 (intermediate == top || offset + pnum_inter < size_inter)) {
2587 n = pnum_inter;
2588 }
2589
2590 if (intermediate == base) {
2591 break;
2592 }
2593
2594 intermediate = backing_bs(intermediate);
2595 }
2596
2597 *pnum = n;
2598 return 0;
2599 }
2600
2601 typedef struct BdrvVmstateCo {
2602 BlockDriverState *bs;
2603 QEMUIOVector *qiov;
2604 int64_t pos;
2605 bool is_read;
2606 int ret;
2607 } BdrvVmstateCo;
2608
2609 static int coroutine_fn
2610 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2611 bool is_read)
2612 {
2613 BlockDriver *drv = bs->drv;
2614 int ret = -ENOTSUP;
2615
2616 bdrv_inc_in_flight(bs);
2617
2618 if (!drv) {
2619 ret = -ENOMEDIUM;
2620 } else if (drv->bdrv_load_vmstate) {
2621 if (is_read) {
2622 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2623 } else {
2624 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2625 }
2626 } else if (bs->file) {
2627 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
2628 }
2629
2630 bdrv_dec_in_flight(bs);
2631 return ret;
2632 }
2633
2634 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2635 {
2636 BdrvVmstateCo *co = opaque;
2637 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2638 aio_wait_kick();
2639 }
2640
2641 static inline int
2642 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2643 bool is_read)
2644 {
2645 if (qemu_in_coroutine()) {
2646 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2647 } else {
2648 BdrvVmstateCo data = {
2649 .bs = bs,
2650 .qiov = qiov,
2651 .pos = pos,
2652 .is_read = is_read,
2653 .ret = -EINPROGRESS,
2654 };
2655 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
2656
2657 bdrv_coroutine_enter(bs, co);
2658 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
2659 return data.ret;
2660 }
2661 }
2662
2663 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2664 int64_t pos, int size)
2665 {
2666 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2667 int ret;
2668
2669 ret = bdrv_writev_vmstate(bs, &qiov, pos);
2670 if (ret < 0) {
2671 return ret;
2672 }
2673
2674 return size;
2675 }
2676
2677 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2678 {
2679 return bdrv_rw_vmstate(bs, qiov, pos, false);
2680 }
2681
2682 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2683 int64_t pos, int size)
2684 {
2685 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2686 int ret;
2687
2688 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2689 if (ret < 0) {
2690 return ret;
2691 }
2692
2693 return size;
2694 }
2695
2696 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2697 {
2698 return bdrv_rw_vmstate(bs, qiov, pos, true);
2699 }
2700
2701 /**************************************************************/
2702 /* async I/Os */
2703
2704 void bdrv_aio_cancel(BlockAIOCB *acb)
2705 {
2706 qemu_aio_ref(acb);
2707 bdrv_aio_cancel_async(acb);
2708 while (acb->refcnt > 1) {
2709 if (acb->aiocb_info->get_aio_context) {
2710 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2711 } else if (acb->bs) {
2712 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2713 * assert that we're not using an I/O thread. Thread-safe
2714 * code should use bdrv_aio_cancel_async exclusively.
2715 */
2716 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2717 aio_poll(bdrv_get_aio_context(acb->bs), true);
2718 } else {
2719 abort();
2720 }
2721 }
2722 qemu_aio_unref(acb);
2723 }
2724
2725 /* Async version of aio cancel. The caller is not blocked if the acb implements
2726 * cancel_async, otherwise we do nothing and let the request normally complete.
2727 * In either case the completion callback must be called. */
2728 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2729 {
2730 if (acb->aiocb_info->cancel_async) {
2731 acb->aiocb_info->cancel_async(acb);
2732 }
2733 }
2734
2735 /**************************************************************/
2736 /* Coroutine block device emulation */
2737
2738 typedef struct FlushCo {
2739 BlockDriverState *bs;
2740 int ret;
2741 } FlushCo;
2742
2743
2744 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2745 {
2746 FlushCo *rwco = opaque;
2747
2748 rwco->ret = bdrv_co_flush(rwco->bs);
2749 aio_wait_kick();
2750 }
2751
2752 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2753 {
2754 int current_gen;
2755 int ret = 0;
2756
2757 bdrv_inc_in_flight(bs);
2758
2759 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2760 bdrv_is_sg(bs)) {
2761 goto early_exit;
2762 }
2763
2764 qemu_co_mutex_lock(&bs->reqs_lock);
2765 current_gen = atomic_read(&bs->write_gen);
2766
2767 /* Wait until any previous flushes are completed */
2768 while (bs->active_flush_req) {
2769 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2770 }
2771
2772 /* Flushes reach this point in nondecreasing current_gen order. */
2773 bs->active_flush_req = true;
2774 qemu_co_mutex_unlock(&bs->reqs_lock);
2775
2776 /* Write back all layers by calling one driver function */
2777 if (bs->drv->bdrv_co_flush) {
2778 ret = bs->drv->bdrv_co_flush(bs);
2779 goto out;
2780 }
2781
2782 /* Write back cached data to the OS even with cache=unsafe */
2783 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2784 if (bs->drv->bdrv_co_flush_to_os) {
2785 ret = bs->drv->bdrv_co_flush_to_os(bs);
2786 if (ret < 0) {
2787 goto out;
2788 }
2789 }
2790
2791 /* But don't actually force it to the disk with cache=unsafe */
2792 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2793 goto flush_parent;
2794 }
2795
2796 /* Check if we really need to flush anything */
2797 if (bs->flushed_gen == current_gen) {
2798 goto flush_parent;
2799 }
2800
2801 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2802 if (!bs->drv) {
2803 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2804 * (even in case of apparent success) */
2805 ret = -ENOMEDIUM;
2806 goto out;
2807 }
2808 if (bs->drv->bdrv_co_flush_to_disk) {
2809 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2810 } else if (bs->drv->bdrv_aio_flush) {
2811 BlockAIOCB *acb;
2812 CoroutineIOCompletion co = {
2813 .coroutine = qemu_coroutine_self(),
2814 };
2815
2816 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2817 if (acb == NULL) {
2818 ret = -EIO;
2819 } else {
2820 qemu_coroutine_yield();
2821 ret = co.ret;
2822 }
2823 } else {
2824 /*
2825 * Some block drivers always operate in either writethrough or unsafe
2826 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2827 * know how the server works (because the behaviour is hardcoded or
2828 * depends on server-side configuration), so we can't ensure that
2829 * everything is safe on disk. Returning an error doesn't work because
2830 * that would break guests even if the server operates in writethrough
2831 * mode.
2832 *
2833 * Let's hope the user knows what he's doing.
2834 */
2835 ret = 0;
2836 }
2837
2838 if (ret < 0) {
2839 goto out;
2840 }
2841
2842 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2843 * in the case of cache=unsafe, so there are no useless flushes.
2844 */
2845 flush_parent:
2846 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2847 out:
2848 /* Notify any pending flushes that we have completed */
2849 if (ret == 0) {
2850 bs->flushed_gen = current_gen;
2851 }
2852
2853 qemu_co_mutex_lock(&bs->reqs_lock);
2854 bs->active_flush_req = false;
2855 /* Return value is ignored - it's ok if wait queue is empty */
2856 qemu_co_queue_next(&bs->flush_queue);
2857 qemu_co_mutex_unlock(&bs->reqs_lock);
2858
2859 early_exit:
2860 bdrv_dec_in_flight(bs);
2861 return ret;
2862 }
2863
2864 int bdrv_flush(BlockDriverState *bs)
2865 {
2866 Coroutine *co;
2867 FlushCo flush_co = {
2868 .bs = bs,
2869 .ret = NOT_DONE,
2870 };
2871
2872 if (qemu_in_coroutine()) {
2873 /* Fast-path if already in coroutine context */
2874 bdrv_flush_co_entry(&flush_co);
2875 } else {
2876 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2877 bdrv_coroutine_enter(bs, co);
2878 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
2879 }
2880
2881 return flush_co.ret;
2882 }
2883
2884 typedef struct DiscardCo {
2885 BdrvChild *child;
2886 int64_t offset;
2887 int64_t bytes;
2888 int ret;
2889 } DiscardCo;
2890 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2891 {
2892 DiscardCo *rwco = opaque;
2893
2894 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
2895 aio_wait_kick();
2896 }
2897
2898 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2899 int64_t bytes)
2900 {
2901 BdrvTrackedRequest req;
2902 int max_pdiscard, ret;
2903 int head, tail, align;
2904 BlockDriverState *bs = child->bs;
2905
2906 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
2907 return -ENOMEDIUM;
2908 }
2909
2910 if (bdrv_has_readonly_bitmaps(bs)) {
2911 return -EPERM;
2912 }
2913
2914 if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) {
2915 return -EIO;
2916 }
2917
2918 /* Do nothing if disabled. */
2919 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2920 return 0;
2921 }
2922
2923 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2924 return 0;
2925 }
2926
2927 /* Discard is advisory, but some devices track and coalesce
2928 * unaligned requests, so we must pass everything down rather than
2929 * round here. Still, most devices will just silently ignore
2930 * unaligned requests (by returning -ENOTSUP), so we must fragment
2931 * the request accordingly. */
2932 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2933 assert(align % bs->bl.request_alignment == 0);
2934 head = offset % align;
2935 tail = (offset + bytes) % align;
2936
2937 bdrv_inc_in_flight(bs);
2938 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
2939
2940 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
2941 if (ret < 0) {
2942 goto out;
2943 }
2944
2945 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2946 align);
2947 assert(max_pdiscard >= bs->bl.request_alignment);
2948
2949 while (bytes > 0) {
2950 int64_t num = bytes;
2951
2952 if (head) {
2953 /* Make small requests to get to alignment boundaries. */
2954 num = MIN(bytes, align - head);
2955 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2956 num %= bs->bl.request_alignment;
2957 }
2958 head = (head + num) % align;
2959 assert(num < max_pdiscard);
2960 } else if (tail) {
2961 if (num > align) {
2962 /* Shorten the request to the last aligned cluster. */
2963 num -= tail;
2964 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2965 tail > bs->bl.request_alignment) {
2966 tail %= bs->bl.request_alignment;
2967 num -= tail;
2968 }
2969 }
2970 /* limit request size */
2971 if (num > max_pdiscard) {
2972 num = max_pdiscard;
2973 }
2974
2975 if (!bs->drv) {
2976 ret = -ENOMEDIUM;
2977 goto out;
2978 }
2979 if (bs->drv->bdrv_co_pdiscard) {
2980 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2981 } else {
2982 BlockAIOCB *acb;
2983 CoroutineIOCompletion co = {
2984 .coroutine = qemu_coroutine_self(),
2985 };
2986
2987 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2988 bdrv_co_io_em_complete, &co);
2989 if (acb == NULL) {
2990 ret = -EIO;
2991 goto out;
2992 } else {
2993 qemu_coroutine_yield();
2994 ret = co.ret;
2995 }
2996 }
2997 if (ret && ret != -ENOTSUP) {
2998 goto out;
2999 }
3000
3001 offset += num;
3002 bytes -= num;
3003 }
3004 ret = 0;
3005 out:
3006 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3007 tracked_request_end(&req);
3008 bdrv_dec_in_flight(bs);
3009 return ret;
3010 }
3011
3012 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes)
3013 {
3014 Coroutine *co;
3015 DiscardCo rwco = {
3016 .child = child,
3017 .offset = offset,
3018 .bytes = bytes,
3019 .ret = NOT_DONE,
3020 };
3021
3022 if (qemu_in_coroutine()) {
3023 /* Fast-path if already in coroutine context */
3024 bdrv_pdiscard_co_entry(&rwco);
3025 } else {
3026 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
3027 bdrv_coroutine_enter(child->bs, co);
3028 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
3029 }
3030
3031 return rwco.ret;
3032 }
3033
3034 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3035 {
3036 BlockDriver *drv = bs->drv;
3037 CoroutineIOCompletion co = {
3038 .coroutine = qemu_coroutine_self(),
3039 };
3040 BlockAIOCB *acb;
3041
3042 bdrv_inc_in_flight(bs);
3043 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3044 co.ret = -ENOTSUP;
3045 goto out;
3046 }
3047
3048 if (drv->bdrv_co_ioctl) {
3049 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3050 } else {
3051 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3052 if (!acb) {
3053 co.ret = -ENOTSUP;
3054 goto out;
3055 }
3056 qemu_coroutine_yield();
3057 }
3058 out:
3059 bdrv_dec_in_flight(bs);
3060 return co.ret;
3061 }
3062
3063 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3064 {
3065 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3066 }
3067
3068 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3069 {
3070 return memset(qemu_blockalign(bs, size), 0, size);
3071 }
3072
3073 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3074 {
3075 size_t align = bdrv_opt_mem_align(bs);
3076
3077 /* Ensure that NULL is never returned on success */
3078 assert(align > 0);
3079 if (size == 0) {
3080 size = align;
3081 }
3082
3083 return qemu_try_memalign(align, size);
3084 }
3085
3086 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3087 {
3088 void *mem = qemu_try_blockalign(bs, size);
3089
3090 if (mem) {
3091 memset(mem, 0, size);
3092 }
3093
3094 return mem;
3095 }
3096
3097 /*
3098 * Check if all memory in this vector is sector aligned.
3099 */
3100 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
3101 {
3102 int i;
3103 size_t alignment = bdrv_min_mem_align(bs);
3104
3105 for (i = 0; i < qiov->niov; i++) {
3106 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
3107 return false;
3108 }
3109 if (qiov->iov[i].iov_len % alignment) {
3110 return false;
3111 }
3112 }
3113
3114 return true;
3115 }
3116
3117 void bdrv_add_before_write_notifier(BlockDriverState *bs,
3118 NotifierWithReturn *notifier)
3119 {
3120 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
3121 }
3122
3123 void bdrv_io_plug(BlockDriverState *bs)
3124 {
3125 BdrvChild *child;
3126
3127 QLIST_FOREACH(child, &bs->children, next) {
3128 bdrv_io_plug(child->bs);
3129 }
3130
3131 if (atomic_fetch_inc(&bs->io_plugged) == 0) {
3132 BlockDriver *drv = bs->drv;
3133 if (drv && drv->bdrv_io_plug) {
3134 drv->bdrv_io_plug(bs);
3135 }
3136 }
3137 }
3138
3139 void bdrv_io_unplug(BlockDriverState *bs)
3140 {
3141 BdrvChild *child;
3142
3143 assert(bs->io_plugged);
3144 if (atomic_fetch_dec(&bs->io_plugged) == 1) {
3145 BlockDriver *drv = bs->drv;
3146 if (drv && drv->bdrv_io_unplug) {
3147 drv->bdrv_io_unplug(bs);
3148 }
3149 }
3150
3151 QLIST_FOREACH(child, &bs->children, next) {
3152 bdrv_io_unplug(child->bs);
3153 }
3154 }
3155
3156 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
3157 {
3158 BdrvChild *child;
3159
3160 if (bs->drv && bs->drv->bdrv_register_buf) {
3161 bs->drv->bdrv_register_buf(bs, host, size);
3162 }
3163 QLIST_FOREACH(child, &bs->children, next) {
3164 bdrv_register_buf(child->bs, host, size);
3165 }
3166 }
3167
3168 void bdrv_unregister_buf(BlockDriverState *bs, void *host)
3169 {
3170 BdrvChild *child;
3171
3172 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3173 bs->drv->bdrv_unregister_buf(bs, host);
3174 }
3175 QLIST_FOREACH(child, &bs->children, next) {
3176 bdrv_unregister_buf(child->bs, host);
3177 }
3178 }
3179
3180 static int coroutine_fn bdrv_co_copy_range_internal(
3181 BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
3182 uint64_t dst_offset, uint64_t bytes,
3183 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3184 bool recurse_src)
3185 {
3186 BdrvTrackedRequest req;
3187 int ret;
3188
3189 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3190 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3191 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3192
3193 if (!dst || !dst->bs) {
3194 return -ENOMEDIUM;
3195 }
3196 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
3197 if (ret) {
3198 return ret;
3199 }
3200 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3201 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3202 }
3203
3204 if (!src || !src->bs) {
3205 return -ENOMEDIUM;
3206 }
3207 ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
3208 if (ret) {
3209 return ret;
3210 }
3211
3212 if (!src->bs->drv->bdrv_co_copy_range_from
3213 || !dst->bs->drv->bdrv_co_copy_range_to
3214 || src->bs->encrypted || dst->bs->encrypted) {
3215 return -ENOTSUP;
3216 }
3217
3218 if (recurse_src) {
3219 bdrv_inc_in_flight(src->bs);
3220 tracked_request_begin(&req, src->bs, src_offset, bytes,
3221 BDRV_TRACKED_READ);
3222
3223 /* BDRV_REQ_SERIALISING is only for write operation */
3224 assert(!(read_flags & BDRV_REQ_SERIALISING));
3225 bdrv_wait_serialising_requests(&req);
3226
3227 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3228 src, src_offset,
3229 dst, dst_offset,
3230 bytes,
3231 read_flags, write_flags);
3232
3233 tracked_request_end(&req);
3234 bdrv_dec_in_flight(src->bs);
3235 } else {
3236 bdrv_inc_in_flight(dst->bs);
3237 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3238 BDRV_TRACKED_WRITE);
3239 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3240 write_flags);
3241 if (!ret) {
3242 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3243 src, src_offset,
3244 dst, dst_offset,
3245 bytes,
3246 read_flags, write_flags);
3247 }
3248 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3249 tracked_request_end(&req);
3250 bdrv_dec_in_flight(dst->bs);
3251 }
3252
3253 return ret;
3254 }
3255
3256 /* Copy range from @src to @dst.
3257 *
3258 * See the comment of bdrv_co_copy_range for the parameter and return value
3259 * semantics. */
3260 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3261 BdrvChild *dst, uint64_t dst_offset,
3262 uint64_t bytes,
3263 BdrvRequestFlags read_flags,
3264 BdrvRequestFlags write_flags)
3265 {
3266 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3267 read_flags, write_flags);
3268 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3269 bytes, read_flags, write_flags, true);
3270 }
3271
3272 /* Copy range from @src to @dst.
3273 *
3274 * See the comment of bdrv_co_copy_range for the parameter and return value
3275 * semantics. */
3276 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3277 BdrvChild *dst, uint64_t dst_offset,
3278 uint64_t bytes,
3279 BdrvRequestFlags read_flags,
3280 BdrvRequestFlags write_flags)
3281 {
3282 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3283 read_flags, write_flags);
3284 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3285 bytes, read_flags, write_flags, false);
3286 }
3287
3288 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3289 BdrvChild *dst, uint64_t dst_offset,
3290 uint64_t bytes, BdrvRequestFlags read_flags,
3291 BdrvRequestFlags write_flags)
3292 {
3293 return bdrv_co_copy_range_from(src, src_offset,
3294 dst, dst_offset,
3295 bytes, read_flags, write_flags);
3296 }
3297
3298 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3299 {
3300 BdrvChild *c;
3301 QLIST_FOREACH(c, &bs->parents, next_parent) {
3302 if (c->role->resize) {
3303 c->role->resize(c);
3304 }
3305 }
3306 }
3307
3308 /**
3309 * Truncate file to 'offset' bytes (needed only for file protocols)
3310 *
3311 * If 'exact' is true, the file must be resized to exactly the given
3312 * 'offset'. Otherwise, it is sufficient for the node to be at least
3313 * 'offset' bytes in length.
3314 */
3315 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3316 PreallocMode prealloc, Error **errp)
3317 {
3318 BlockDriverState *bs = child->bs;
3319 BlockDriver *drv = bs->drv;
3320 BdrvTrackedRequest req;
3321 int64_t old_size, new_bytes;
3322 int ret;
3323
3324
3325 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3326 if (!drv) {
3327 error_setg(errp, "No medium inserted");
3328 return -ENOMEDIUM;
3329 }
3330 if (offset < 0) {
3331 error_setg(errp, "Image size cannot be negative");
3332 return -EINVAL;
3333 }
3334
3335 old_size = bdrv_getlength(bs);
3336 if (old_size < 0) {
3337 error_setg_errno(errp, -old_size, "Failed to get old image size");
3338 return old_size;
3339 }
3340
3341 if (offset > old_size) {
3342 new_bytes = offset - old_size;
3343 } else {
3344 new_bytes = 0;
3345 }
3346
3347 bdrv_inc_in_flight(bs);
3348 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3349 BDRV_TRACKED_TRUNCATE);
3350
3351 /* If we are growing the image and potentially using preallocation for the
3352 * new area, we need to make sure that no write requests are made to it
3353 * concurrently or they might be overwritten by preallocation. */
3354 if (new_bytes) {
3355 bdrv_mark_request_serialising(&req, 1);
3356 }
3357 if (bs->read_only) {
3358 error_setg(errp, "Image is read-only");
3359 ret = -EACCES;
3360 goto out;
3361 }
3362 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3363 0);
3364 if (ret < 0) {
3365 error_setg_errno(errp, -ret,
3366 "Failed to prepare request for truncation");
3367 goto out;
3368 }
3369
3370 if (drv->bdrv_co_truncate) {
3371 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, errp);
3372 } else if (bs->file && drv->is_filter) {
3373 ret = bdrv_co_truncate(bs->file, offset, exact, prealloc, errp);
3374 } else {
3375 error_setg(errp, "Image format driver does not support resize");
3376 ret = -ENOTSUP;
3377 goto out;
3378 }
3379 if (ret < 0) {
3380 goto out;
3381 }
3382
3383 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3384 if (ret < 0) {
3385 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3386 } else {
3387 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3388 }
3389 /* It's possible that truncation succeeded but refresh_total_sectors
3390 * failed, but the latter doesn't affect how we should finish the request.
3391 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3392 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3393
3394 out:
3395 tracked_request_end(&req);
3396 bdrv_dec_in_flight(bs);
3397
3398 return ret;
3399 }
3400
3401 typedef struct TruncateCo {
3402 BdrvChild *child;
3403 int64_t offset;
3404 bool exact;
3405 PreallocMode prealloc;
3406 Error **errp;
3407 int ret;
3408 } TruncateCo;
3409
3410 static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
3411 {
3412 TruncateCo *tco = opaque;
3413 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->exact,
3414 tco->prealloc, tco->errp);
3415 aio_wait_kick();
3416 }
3417
3418 int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
3419 PreallocMode prealloc, Error **errp)
3420 {
3421 Coroutine *co;
3422 TruncateCo tco = {
3423 .child = child,
3424 .offset = offset,
3425 .exact = exact,
3426 .prealloc = prealloc,
3427 .errp = errp,
3428 .ret = NOT_DONE,
3429 };
3430
3431 if (qemu_in_coroutine()) {
3432 /* Fast-path if already in coroutine context */
3433 bdrv_truncate_co_entry(&tco);
3434 } else {
3435 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
3436 bdrv_coroutine_enter(child->bs, co);
3437 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
3438 }
3439
3440 return tco.ret;
3441 }