]> git.proxmox.com Git - mirror_qemu.git/blob - block/io.c
fece938fd0e790e667cfb1014e8aaf0ff0f11b12
[mirror_qemu.git] / block / io.c
1 /*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
35 #include "qemu/cutils.h"
36 #include "qemu/memalign.h"
37 #include "qapi/error.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
40 #include "sysemu/replay.h"
41
42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44
45 static void bdrv_parent_cb_resize(BlockDriverState *bs);
46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
47 int64_t offset, int64_t bytes, BdrvRequestFlags flags);
48
49 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
50 {
51 BdrvChild *c, *next;
52
53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
54 if (c == ignore) {
55 continue;
56 }
57 bdrv_parent_drained_begin_single(c);
58 }
59 }
60
61 void bdrv_parent_drained_end_single(BdrvChild *c)
62 {
63 GLOBAL_STATE_CODE();
64
65 assert(c->quiesced_parent);
66 c->quiesced_parent = false;
67
68 if (c->klass->drained_end) {
69 c->klass->drained_end(c);
70 }
71 }
72
73 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
74 {
75 BdrvChild *c;
76
77 QLIST_FOREACH(c, &bs->parents, next_parent) {
78 if (c == ignore) {
79 continue;
80 }
81 bdrv_parent_drained_end_single(c);
82 }
83 }
84
85 bool bdrv_parent_drained_poll_single(BdrvChild *c)
86 {
87 if (c->klass->drained_poll) {
88 return c->klass->drained_poll(c);
89 }
90 return false;
91 }
92
93 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
94 bool ignore_bds_parents)
95 {
96 BdrvChild *c, *next;
97 bool busy = false;
98
99 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
100 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
101 continue;
102 }
103 busy |= bdrv_parent_drained_poll_single(c);
104 }
105
106 return busy;
107 }
108
109 void bdrv_parent_drained_begin_single(BdrvChild *c)
110 {
111 GLOBAL_STATE_CODE();
112
113 assert(!c->quiesced_parent);
114 c->quiesced_parent = true;
115
116 if (c->klass->drained_begin) {
117 c->klass->drained_begin(c);
118 }
119 }
120
121 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
122 {
123 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
124 src->pdiscard_alignment);
125 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
126 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
127 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
128 src->max_hw_transfer);
129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130 src->opt_mem_alignment);
131 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132 src->min_mem_alignment);
133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
134 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
135 }
136
137 typedef struct BdrvRefreshLimitsState {
138 BlockDriverState *bs;
139 BlockLimits old_bl;
140 } BdrvRefreshLimitsState;
141
142 static void bdrv_refresh_limits_abort(void *opaque)
143 {
144 BdrvRefreshLimitsState *s = opaque;
145
146 s->bs->bl = s->old_bl;
147 }
148
149 static TransactionActionDrv bdrv_refresh_limits_drv = {
150 .abort = bdrv_refresh_limits_abort,
151 .clean = g_free,
152 };
153
154 /* @tran is allowed to be NULL, in this case no rollback is possible. */
155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
156 {
157 ERRP_GUARD();
158 BlockDriver *drv = bs->drv;
159 BdrvChild *c;
160 bool have_limits;
161
162 GLOBAL_STATE_CODE();
163
164 if (tran) {
165 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
166 *s = (BdrvRefreshLimitsState) {
167 .bs = bs,
168 .old_bl = bs->bl,
169 };
170 tran_add(tran, &bdrv_refresh_limits_drv, s);
171 }
172
173 memset(&bs->bl, 0, sizeof(bs->bl));
174
175 if (!drv) {
176 return;
177 }
178
179 /* Default alignment based on whether driver has byte interface */
180 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
181 drv->bdrv_aio_preadv ||
182 drv->bdrv_co_preadv_part) ? 1 : 512;
183
184 /* Take some limits from the children as a default */
185 have_limits = false;
186 QLIST_FOREACH(c, &bs->children, next) {
187 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
188 {
189 bdrv_merge_limits(&bs->bl, &c->bs->bl);
190 have_limits = true;
191 }
192
193 if (c->role & BDRV_CHILD_FILTERED) {
194 bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
195 }
196 }
197
198 if (!have_limits) {
199 bs->bl.min_mem_alignment = 512;
200 bs->bl.opt_mem_alignment = qemu_real_host_page_size();
201
202 /* Safe default since most protocols use readv()/writev()/etc */
203 bs->bl.max_iov = IOV_MAX;
204 }
205
206 /* Then let the driver override it */
207 if (drv->bdrv_refresh_limits) {
208 drv->bdrv_refresh_limits(bs, errp);
209 if (*errp) {
210 return;
211 }
212 }
213
214 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
215 error_setg(errp, "Driver requires too large request alignment");
216 }
217 }
218
219 /**
220 * The copy-on-read flag is actually a reference count so multiple users may
221 * use the feature without worrying about clobbering its previous state.
222 * Copy-on-read stays enabled until all users have called to disable it.
223 */
224 void bdrv_enable_copy_on_read(BlockDriverState *bs)
225 {
226 IO_CODE();
227 qatomic_inc(&bs->copy_on_read);
228 }
229
230 void bdrv_disable_copy_on_read(BlockDriverState *bs)
231 {
232 int old = qatomic_fetch_dec(&bs->copy_on_read);
233 IO_CODE();
234 assert(old >= 1);
235 }
236
237 typedef struct {
238 Coroutine *co;
239 BlockDriverState *bs;
240 bool done;
241 bool begin;
242 bool poll;
243 BdrvChild *parent;
244 } BdrvCoDrainData;
245
246 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
247 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
248 bool ignore_bds_parents)
249 {
250 GLOBAL_STATE_CODE();
251
252 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
253 return true;
254 }
255
256 if (qatomic_read(&bs->in_flight)) {
257 return true;
258 }
259
260 return false;
261 }
262
263 static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
264 BdrvChild *ignore_parent)
265 {
266 return bdrv_drain_poll(bs, ignore_parent, false);
267 }
268
269 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
270 bool poll);
271 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
272
273 static void bdrv_co_drain_bh_cb(void *opaque)
274 {
275 BdrvCoDrainData *data = opaque;
276 Coroutine *co = data->co;
277 BlockDriverState *bs = data->bs;
278
279 if (bs) {
280 AioContext *ctx = bdrv_get_aio_context(bs);
281 aio_context_acquire(ctx);
282 bdrv_dec_in_flight(bs);
283 if (data->begin) {
284 bdrv_do_drained_begin(bs, data->parent, data->poll);
285 } else {
286 assert(!data->poll);
287 bdrv_do_drained_end(bs, data->parent);
288 }
289 aio_context_release(ctx);
290 } else {
291 assert(data->begin);
292 bdrv_drain_all_begin();
293 }
294
295 data->done = true;
296 aio_co_wake(co);
297 }
298
299 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
300 bool begin,
301 BdrvChild *parent,
302 bool poll)
303 {
304 BdrvCoDrainData data;
305 Coroutine *self = qemu_coroutine_self();
306 AioContext *ctx = bdrv_get_aio_context(bs);
307 AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
308
309 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
310 * other coroutines run if they were queued by aio_co_enter(). */
311
312 assert(qemu_in_coroutine());
313 data = (BdrvCoDrainData) {
314 .co = self,
315 .bs = bs,
316 .done = false,
317 .begin = begin,
318 .parent = parent,
319 .poll = poll,
320 };
321
322 if (bs) {
323 bdrv_inc_in_flight(bs);
324 }
325
326 /*
327 * Temporarily drop the lock across yield or we would get deadlocks.
328 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
329 *
330 * When we yield below, the lock for the current context will be
331 * released, so if this is actually the lock that protects bs, don't drop
332 * it a second time.
333 */
334 if (ctx != co_ctx) {
335 aio_context_release(ctx);
336 }
337 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
338 bdrv_co_drain_bh_cb, &data);
339
340 qemu_coroutine_yield();
341 /* If we are resumed from some other event (such as an aio completion or a
342 * timer callback), it is a bug in the caller that should be fixed. */
343 assert(data.done);
344
345 /* Reaquire the AioContext of bs if we dropped it */
346 if (ctx != co_ctx) {
347 aio_context_acquire(ctx);
348 }
349 }
350
351 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
352 bool poll)
353 {
354 IO_OR_GS_CODE();
355
356 if (qemu_in_coroutine()) {
357 bdrv_co_yield_to_drain(bs, true, parent, poll);
358 return;
359 }
360
361 GLOBAL_STATE_CODE();
362
363 /* Stop things in parent-to-child order */
364 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
365 aio_disable_external(bdrv_get_aio_context(bs));
366 bdrv_parent_drained_begin(bs, parent);
367 if (bs->drv && bs->drv->bdrv_drain_begin) {
368 bs->drv->bdrv_drain_begin(bs);
369 }
370 }
371
372 /*
373 * Wait for drained requests to finish.
374 *
375 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
376 * call is needed so things in this AioContext can make progress even
377 * though we don't return to the main AioContext loop - this automatically
378 * includes other nodes in the same AioContext and therefore all child
379 * nodes.
380 */
381 if (poll) {
382 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
383 }
384 }
385
386 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
387 {
388 bdrv_do_drained_begin(bs, parent, false);
389 }
390
391 void bdrv_drained_begin(BlockDriverState *bs)
392 {
393 IO_OR_GS_CODE();
394 bdrv_do_drained_begin(bs, NULL, true);
395 }
396
397 /**
398 * This function does not poll, nor must any of its recursively called
399 * functions.
400 */
401 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
402 {
403 int old_quiesce_counter;
404
405 IO_OR_GS_CODE();
406
407 if (qemu_in_coroutine()) {
408 bdrv_co_yield_to_drain(bs, false, parent, false);
409 return;
410 }
411 assert(bs->quiesce_counter > 0);
412 GLOBAL_STATE_CODE();
413
414 /* Re-enable things in child-to-parent order */
415 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
416 if (old_quiesce_counter == 1) {
417 if (bs->drv && bs->drv->bdrv_drain_end) {
418 bs->drv->bdrv_drain_end(bs);
419 }
420 bdrv_parent_drained_end(bs, parent);
421 aio_enable_external(bdrv_get_aio_context(bs));
422 }
423 }
424
425 void bdrv_drained_end(BlockDriverState *bs)
426 {
427 IO_OR_GS_CODE();
428 bdrv_do_drained_end(bs, NULL);
429 }
430
431 void bdrv_drain(BlockDriverState *bs)
432 {
433 IO_OR_GS_CODE();
434 bdrv_drained_begin(bs);
435 bdrv_drained_end(bs);
436 }
437
438 static void bdrv_drain_assert_idle(BlockDriverState *bs)
439 {
440 BdrvChild *child, *next;
441
442 assert(qatomic_read(&bs->in_flight) == 0);
443 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
444 bdrv_drain_assert_idle(child->bs);
445 }
446 }
447
448 unsigned int bdrv_drain_all_count = 0;
449
450 static bool bdrv_drain_all_poll(void)
451 {
452 BlockDriverState *bs = NULL;
453 bool result = false;
454 GLOBAL_STATE_CODE();
455
456 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
457 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
458 while ((bs = bdrv_next_all_states(bs))) {
459 AioContext *aio_context = bdrv_get_aio_context(bs);
460 aio_context_acquire(aio_context);
461 result |= bdrv_drain_poll(bs, NULL, true);
462 aio_context_release(aio_context);
463 }
464
465 return result;
466 }
467
468 /*
469 * Wait for pending requests to complete across all BlockDriverStates
470 *
471 * This function does not flush data to disk, use bdrv_flush_all() for that
472 * after calling this function.
473 *
474 * This pauses all block jobs and disables external clients. It must
475 * be paired with bdrv_drain_all_end().
476 *
477 * NOTE: no new block jobs or BlockDriverStates can be created between
478 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
479 */
480 void bdrv_drain_all_begin_nopoll(void)
481 {
482 BlockDriverState *bs = NULL;
483 GLOBAL_STATE_CODE();
484
485 /*
486 * bdrv queue is managed by record/replay,
487 * waiting for finishing the I/O requests may
488 * be infinite
489 */
490 if (replay_events_enabled()) {
491 return;
492 }
493
494 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
495 * loop AioContext, so make sure we're in the main context. */
496 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
497 assert(bdrv_drain_all_count < INT_MAX);
498 bdrv_drain_all_count++;
499
500 /* Quiesce all nodes, without polling in-flight requests yet. The graph
501 * cannot change during this loop. */
502 while ((bs = bdrv_next_all_states(bs))) {
503 AioContext *aio_context = bdrv_get_aio_context(bs);
504
505 aio_context_acquire(aio_context);
506 bdrv_do_drained_begin(bs, NULL, false);
507 aio_context_release(aio_context);
508 }
509 }
510
511 void bdrv_drain_all_begin(void)
512 {
513 BlockDriverState *bs = NULL;
514
515 if (qemu_in_coroutine()) {
516 bdrv_co_yield_to_drain(NULL, true, NULL, true);
517 return;
518 }
519
520 /*
521 * bdrv queue is managed by record/replay,
522 * waiting for finishing the I/O requests may
523 * be infinite
524 */
525 if (replay_events_enabled()) {
526 return;
527 }
528
529 bdrv_drain_all_begin_nopoll();
530
531 /* Now poll the in-flight requests */
532 AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
533
534 while ((bs = bdrv_next_all_states(bs))) {
535 bdrv_drain_assert_idle(bs);
536 }
537 }
538
539 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
540 {
541 GLOBAL_STATE_CODE();
542
543 g_assert(bs->quiesce_counter > 0);
544 g_assert(!bs->refcnt);
545
546 while (bs->quiesce_counter) {
547 bdrv_do_drained_end(bs, NULL);
548 }
549 }
550
551 void bdrv_drain_all_end(void)
552 {
553 BlockDriverState *bs = NULL;
554 GLOBAL_STATE_CODE();
555
556 /*
557 * bdrv queue is managed by record/replay,
558 * waiting for finishing the I/O requests may
559 * be endless
560 */
561 if (replay_events_enabled()) {
562 return;
563 }
564
565 while ((bs = bdrv_next_all_states(bs))) {
566 AioContext *aio_context = bdrv_get_aio_context(bs);
567
568 aio_context_acquire(aio_context);
569 bdrv_do_drained_end(bs, NULL);
570 aio_context_release(aio_context);
571 }
572
573 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
574 assert(bdrv_drain_all_count > 0);
575 bdrv_drain_all_count--;
576 }
577
578 void bdrv_drain_all(void)
579 {
580 GLOBAL_STATE_CODE();
581 bdrv_drain_all_begin();
582 bdrv_drain_all_end();
583 }
584
585 /**
586 * Remove an active request from the tracked requests list
587 *
588 * This function should be called when a tracked request is completing.
589 */
590 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
591 {
592 if (req->serialising) {
593 qatomic_dec(&req->bs->serialising_in_flight);
594 }
595
596 qemu_co_mutex_lock(&req->bs->reqs_lock);
597 QLIST_REMOVE(req, list);
598 qemu_co_queue_restart_all(&req->wait_queue);
599 qemu_co_mutex_unlock(&req->bs->reqs_lock);
600 }
601
602 /**
603 * Add an active request to the tracked requests list
604 */
605 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
606 BlockDriverState *bs,
607 int64_t offset,
608 int64_t bytes,
609 enum BdrvTrackedRequestType type)
610 {
611 bdrv_check_request(offset, bytes, &error_abort);
612
613 *req = (BdrvTrackedRequest){
614 .bs = bs,
615 .offset = offset,
616 .bytes = bytes,
617 .type = type,
618 .co = qemu_coroutine_self(),
619 .serialising = false,
620 .overlap_offset = offset,
621 .overlap_bytes = bytes,
622 };
623
624 qemu_co_queue_init(&req->wait_queue);
625
626 qemu_co_mutex_lock(&bs->reqs_lock);
627 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
628 qemu_co_mutex_unlock(&bs->reqs_lock);
629 }
630
631 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
632 int64_t offset, int64_t bytes)
633 {
634 bdrv_check_request(offset, bytes, &error_abort);
635
636 /* aaaa bbbb */
637 if (offset >= req->overlap_offset + req->overlap_bytes) {
638 return false;
639 }
640 /* bbbb aaaa */
641 if (req->overlap_offset >= offset + bytes) {
642 return false;
643 }
644 return true;
645 }
646
647 /* Called with self->bs->reqs_lock held */
648 static coroutine_fn BdrvTrackedRequest *
649 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
650 {
651 BdrvTrackedRequest *req;
652
653 QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
654 if (req == self || (!req->serialising && !self->serialising)) {
655 continue;
656 }
657 if (tracked_request_overlaps(req, self->overlap_offset,
658 self->overlap_bytes))
659 {
660 /*
661 * Hitting this means there was a reentrant request, for
662 * example, a block driver issuing nested requests. This must
663 * never happen since it means deadlock.
664 */
665 assert(qemu_coroutine_self() != req->co);
666
667 /*
668 * If the request is already (indirectly) waiting for us, or
669 * will wait for us as soon as it wakes up, then just go on
670 * (instead of producing a deadlock in the former case).
671 */
672 if (!req->waiting_for) {
673 return req;
674 }
675 }
676 }
677
678 return NULL;
679 }
680
681 /* Called with self->bs->reqs_lock held */
682 static void coroutine_fn
683 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
684 {
685 BdrvTrackedRequest *req;
686
687 while ((req = bdrv_find_conflicting_request(self))) {
688 self->waiting_for = req;
689 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
690 self->waiting_for = NULL;
691 }
692 }
693
694 /* Called with req->bs->reqs_lock held */
695 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
696 uint64_t align)
697 {
698 int64_t overlap_offset = req->offset & ~(align - 1);
699 int64_t overlap_bytes =
700 ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
701
702 bdrv_check_request(req->offset, req->bytes, &error_abort);
703
704 if (!req->serialising) {
705 qatomic_inc(&req->bs->serialising_in_flight);
706 req->serialising = true;
707 }
708
709 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
710 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
711 }
712
713 /**
714 * Return the tracked request on @bs for the current coroutine, or
715 * NULL if there is none.
716 */
717 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
718 {
719 BdrvTrackedRequest *req;
720 Coroutine *self = qemu_coroutine_self();
721 IO_CODE();
722
723 QLIST_FOREACH(req, &bs->tracked_requests, list) {
724 if (req->co == self) {
725 return req;
726 }
727 }
728
729 return NULL;
730 }
731
732 /**
733 * Round a region to cluster boundaries
734 */
735 void coroutine_fn GRAPH_RDLOCK
736 bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
737 int64_t *cluster_offset, int64_t *cluster_bytes)
738 {
739 BlockDriverInfo bdi;
740 IO_CODE();
741 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
742 *cluster_offset = offset;
743 *cluster_bytes = bytes;
744 } else {
745 int64_t c = bdi.cluster_size;
746 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
747 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
748 }
749 }
750
751 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
752 {
753 BlockDriverInfo bdi;
754 int ret;
755
756 ret = bdrv_co_get_info(bs, &bdi);
757 if (ret < 0 || bdi.cluster_size == 0) {
758 return bs->bl.request_alignment;
759 } else {
760 return bdi.cluster_size;
761 }
762 }
763
764 void bdrv_inc_in_flight(BlockDriverState *bs)
765 {
766 IO_CODE();
767 qatomic_inc(&bs->in_flight);
768 }
769
770 void bdrv_wakeup(BlockDriverState *bs)
771 {
772 IO_CODE();
773 aio_wait_kick();
774 }
775
776 void bdrv_dec_in_flight(BlockDriverState *bs)
777 {
778 IO_CODE();
779 qatomic_dec(&bs->in_flight);
780 bdrv_wakeup(bs);
781 }
782
783 static void coroutine_fn
784 bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
785 {
786 BlockDriverState *bs = self->bs;
787
788 if (!qatomic_read(&bs->serialising_in_flight)) {
789 return;
790 }
791
792 qemu_co_mutex_lock(&bs->reqs_lock);
793 bdrv_wait_serialising_requests_locked(self);
794 qemu_co_mutex_unlock(&bs->reqs_lock);
795 }
796
797 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
798 uint64_t align)
799 {
800 IO_CODE();
801
802 qemu_co_mutex_lock(&req->bs->reqs_lock);
803
804 tracked_request_set_serialising(req, align);
805 bdrv_wait_serialising_requests_locked(req);
806
807 qemu_co_mutex_unlock(&req->bs->reqs_lock);
808 }
809
810 int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
811 QEMUIOVector *qiov, size_t qiov_offset,
812 Error **errp)
813 {
814 /*
815 * Check generic offset/bytes correctness
816 */
817
818 if (offset < 0) {
819 error_setg(errp, "offset is negative: %" PRIi64, offset);
820 return -EIO;
821 }
822
823 if (bytes < 0) {
824 error_setg(errp, "bytes is negative: %" PRIi64, bytes);
825 return -EIO;
826 }
827
828 if (bytes > BDRV_MAX_LENGTH) {
829 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
830 bytes, BDRV_MAX_LENGTH);
831 return -EIO;
832 }
833
834 if (offset > BDRV_MAX_LENGTH) {
835 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
836 offset, BDRV_MAX_LENGTH);
837 return -EIO;
838 }
839
840 if (offset > BDRV_MAX_LENGTH - bytes) {
841 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
842 "exceeds maximum(%" PRIi64 ")", offset, bytes,
843 BDRV_MAX_LENGTH);
844 return -EIO;
845 }
846
847 if (!qiov) {
848 return 0;
849 }
850
851 /*
852 * Check qiov and qiov_offset
853 */
854
855 if (qiov_offset > qiov->size) {
856 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
857 qiov_offset, qiov->size);
858 return -EIO;
859 }
860
861 if (bytes > qiov->size - qiov_offset) {
862 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
863 "vector size(%zu)", bytes, qiov_offset, qiov->size);
864 return -EIO;
865 }
866
867 return 0;
868 }
869
870 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
871 {
872 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
873 }
874
875 static int bdrv_check_request32(int64_t offset, int64_t bytes,
876 QEMUIOVector *qiov, size_t qiov_offset)
877 {
878 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
879 if (ret < 0) {
880 return ret;
881 }
882
883 if (bytes > BDRV_REQUEST_MAX_BYTES) {
884 return -EIO;
885 }
886
887 return 0;
888 }
889
890 /*
891 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
892 * The operation is sped up by checking the block status and only writing
893 * zeroes to the device if they currently do not return zeroes. Optional
894 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
895 * BDRV_REQ_FUA).
896 *
897 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
898 */
899 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
900 {
901 int ret;
902 int64_t target_size, bytes, offset = 0;
903 BlockDriverState *bs = child->bs;
904 IO_CODE();
905
906 target_size = bdrv_getlength(bs);
907 if (target_size < 0) {
908 return target_size;
909 }
910
911 for (;;) {
912 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
913 if (bytes <= 0) {
914 return 0;
915 }
916 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
917 if (ret < 0) {
918 return ret;
919 }
920 if (ret & BDRV_BLOCK_ZERO) {
921 offset += bytes;
922 continue;
923 }
924 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
925 if (ret < 0) {
926 return ret;
927 }
928 offset += bytes;
929 }
930 }
931
932 /*
933 * Writes to the file and ensures that no writes are reordered across this
934 * request (acts as a barrier)
935 *
936 * Returns 0 on success, -errno in error cases.
937 */
938 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
939 int64_t bytes, const void *buf,
940 BdrvRequestFlags flags)
941 {
942 int ret;
943 IO_CODE();
944 assert_bdrv_graph_readable();
945
946 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
947 if (ret < 0) {
948 return ret;
949 }
950
951 ret = bdrv_co_flush(child->bs);
952 if (ret < 0) {
953 return ret;
954 }
955
956 return 0;
957 }
958
959 typedef struct CoroutineIOCompletion {
960 Coroutine *coroutine;
961 int ret;
962 } CoroutineIOCompletion;
963
964 static void bdrv_co_io_em_complete(void *opaque, int ret)
965 {
966 CoroutineIOCompletion *co = opaque;
967
968 co->ret = ret;
969 aio_co_wake(co->coroutine);
970 }
971
972 static int coroutine_fn GRAPH_RDLOCK
973 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
974 QEMUIOVector *qiov, size_t qiov_offset, int flags)
975 {
976 BlockDriver *drv = bs->drv;
977 int64_t sector_num;
978 unsigned int nb_sectors;
979 QEMUIOVector local_qiov;
980 int ret;
981 assert_bdrv_graph_readable();
982
983 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
984 assert(!(flags & ~bs->supported_read_flags));
985
986 if (!drv) {
987 return -ENOMEDIUM;
988 }
989
990 if (drv->bdrv_co_preadv_part) {
991 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
992 flags);
993 }
994
995 if (qiov_offset > 0 || bytes != qiov->size) {
996 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
997 qiov = &local_qiov;
998 }
999
1000 if (drv->bdrv_co_preadv) {
1001 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1002 goto out;
1003 }
1004
1005 if (drv->bdrv_aio_preadv) {
1006 BlockAIOCB *acb;
1007 CoroutineIOCompletion co = {
1008 .coroutine = qemu_coroutine_self(),
1009 };
1010
1011 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1012 bdrv_co_io_em_complete, &co);
1013 if (acb == NULL) {
1014 ret = -EIO;
1015 goto out;
1016 } else {
1017 qemu_coroutine_yield();
1018 ret = co.ret;
1019 goto out;
1020 }
1021 }
1022
1023 sector_num = offset >> BDRV_SECTOR_BITS;
1024 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1025
1026 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1027 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1028 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1029 assert(drv->bdrv_co_readv);
1030
1031 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1032
1033 out:
1034 if (qiov == &local_qiov) {
1035 qemu_iovec_destroy(&local_qiov);
1036 }
1037
1038 return ret;
1039 }
1040
1041 static int coroutine_fn GRAPH_RDLOCK
1042 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1043 QEMUIOVector *qiov, size_t qiov_offset,
1044 BdrvRequestFlags flags)
1045 {
1046 BlockDriver *drv = bs->drv;
1047 bool emulate_fua = false;
1048 int64_t sector_num;
1049 unsigned int nb_sectors;
1050 QEMUIOVector local_qiov;
1051 int ret;
1052 assert_bdrv_graph_readable();
1053
1054 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1055
1056 if (!drv) {
1057 return -ENOMEDIUM;
1058 }
1059
1060 if ((flags & BDRV_REQ_FUA) &&
1061 (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1062 flags &= ~BDRV_REQ_FUA;
1063 emulate_fua = true;
1064 }
1065
1066 flags &= bs->supported_write_flags;
1067
1068 if (drv->bdrv_co_pwritev_part) {
1069 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1070 flags);
1071 goto emulate_flags;
1072 }
1073
1074 if (qiov_offset > 0 || bytes != qiov->size) {
1075 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1076 qiov = &local_qiov;
1077 }
1078
1079 if (drv->bdrv_co_pwritev) {
1080 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
1081 goto emulate_flags;
1082 }
1083
1084 if (drv->bdrv_aio_pwritev) {
1085 BlockAIOCB *acb;
1086 CoroutineIOCompletion co = {
1087 .coroutine = qemu_coroutine_self(),
1088 };
1089
1090 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
1091 bdrv_co_io_em_complete, &co);
1092 if (acb == NULL) {
1093 ret = -EIO;
1094 } else {
1095 qemu_coroutine_yield();
1096 ret = co.ret;
1097 }
1098 goto emulate_flags;
1099 }
1100
1101 sector_num = offset >> BDRV_SECTOR_BITS;
1102 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1103
1104 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1105 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1106 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1107
1108 assert(drv->bdrv_co_writev);
1109 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
1110
1111 emulate_flags:
1112 if (ret == 0 && emulate_fua) {
1113 ret = bdrv_co_flush(bs);
1114 }
1115
1116 if (qiov == &local_qiov) {
1117 qemu_iovec_destroy(&local_qiov);
1118 }
1119
1120 return ret;
1121 }
1122
1123 static int coroutine_fn GRAPH_RDLOCK
1124 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1125 int64_t bytes, QEMUIOVector *qiov,
1126 size_t qiov_offset)
1127 {
1128 BlockDriver *drv = bs->drv;
1129 QEMUIOVector local_qiov;
1130 int ret;
1131 assert_bdrv_graph_readable();
1132
1133 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1134
1135 if (!drv) {
1136 return -ENOMEDIUM;
1137 }
1138
1139 if (!block_driver_can_compress(drv)) {
1140 return -ENOTSUP;
1141 }
1142
1143 if (drv->bdrv_co_pwritev_compressed_part) {
1144 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1145 qiov, qiov_offset);
1146 }
1147
1148 if (qiov_offset == 0) {
1149 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1150 }
1151
1152 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1153 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1154 qemu_iovec_destroy(&local_qiov);
1155
1156 return ret;
1157 }
1158
1159 static int coroutine_fn GRAPH_RDLOCK
1160 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
1161 QEMUIOVector *qiov, size_t qiov_offset, int flags)
1162 {
1163 BlockDriverState *bs = child->bs;
1164
1165 /* Perform I/O through a temporary buffer so that users who scribble over
1166 * their read buffer while the operation is in progress do not end up
1167 * modifying the image file. This is critical for zero-copy guest I/O
1168 * where anything might happen inside guest memory.
1169 */
1170 void *bounce_buffer = NULL;
1171
1172 BlockDriver *drv = bs->drv;
1173 int64_t cluster_offset;
1174 int64_t cluster_bytes;
1175 int64_t skip_bytes;
1176 int ret;
1177 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1178 BDRV_REQUEST_MAX_BYTES);
1179 int64_t progress = 0;
1180 bool skip_write;
1181
1182 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1183
1184 if (!drv) {
1185 return -ENOMEDIUM;
1186 }
1187
1188 /*
1189 * Do not write anything when the BDS is inactive. That is not
1190 * allowed, and it would not help.
1191 */
1192 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1193
1194 /* FIXME We cannot require callers to have write permissions when all they
1195 * are doing is a read request. If we did things right, write permissions
1196 * would be obtained anyway, but internally by the copy-on-read code. As
1197 * long as it is implemented here rather than in a separate filter driver,
1198 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1199 * it could request permissions. Therefore we have to bypass the permission
1200 * system for the moment. */
1201 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1202
1203 /* Cover entire cluster so no additional backing file I/O is required when
1204 * allocating cluster in the image file. Note that this value may exceed
1205 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1206 * is one reason we loop rather than doing it all at once.
1207 */
1208 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1209 skip_bytes = offset - cluster_offset;
1210
1211 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1212 cluster_offset, cluster_bytes);
1213
1214 while (cluster_bytes) {
1215 int64_t pnum;
1216
1217 if (skip_write) {
1218 ret = 1; /* "already allocated", so nothing will be copied */
1219 pnum = MIN(cluster_bytes, max_transfer);
1220 } else {
1221 ret = bdrv_is_allocated(bs, cluster_offset,
1222 MIN(cluster_bytes, max_transfer), &pnum);
1223 if (ret < 0) {
1224 /*
1225 * Safe to treat errors in querying allocation as if
1226 * unallocated; we'll probably fail again soon on the
1227 * read, but at least that will set a decent errno.
1228 */
1229 pnum = MIN(cluster_bytes, max_transfer);
1230 }
1231
1232 /* Stop at EOF if the image ends in the middle of the cluster */
1233 if (ret == 0 && pnum == 0) {
1234 assert(progress >= bytes);
1235 break;
1236 }
1237
1238 assert(skip_bytes < pnum);
1239 }
1240
1241 if (ret <= 0) {
1242 QEMUIOVector local_qiov;
1243
1244 /* Must copy-on-read; use the bounce buffer */
1245 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1246 if (!bounce_buffer) {
1247 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1248 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1249 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1250
1251 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1252 if (!bounce_buffer) {
1253 ret = -ENOMEM;
1254 goto err;
1255 }
1256 }
1257 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1258
1259 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1260 &local_qiov, 0, 0);
1261 if (ret < 0) {
1262 goto err;
1263 }
1264
1265 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1266 if (drv->bdrv_co_pwrite_zeroes &&
1267 buffer_is_zero(bounce_buffer, pnum)) {
1268 /* FIXME: Should we (perhaps conditionally) be setting
1269 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1270 * that still correctly reads as zero? */
1271 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1272 BDRV_REQ_WRITE_UNCHANGED);
1273 } else {
1274 /* This does not change the data on the disk, it is not
1275 * necessary to flush even in cache=writethrough mode.
1276 */
1277 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1278 &local_qiov, 0,
1279 BDRV_REQ_WRITE_UNCHANGED);
1280 }
1281
1282 if (ret < 0) {
1283 /* It might be okay to ignore write errors for guest
1284 * requests. If this is a deliberate copy-on-read
1285 * then we don't want to ignore the error. Simply
1286 * report it in all cases.
1287 */
1288 goto err;
1289 }
1290
1291 if (!(flags & BDRV_REQ_PREFETCH)) {
1292 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1293 bounce_buffer + skip_bytes,
1294 MIN(pnum - skip_bytes, bytes - progress));
1295 }
1296 } else if (!(flags & BDRV_REQ_PREFETCH)) {
1297 /* Read directly into the destination */
1298 ret = bdrv_driver_preadv(bs, offset + progress,
1299 MIN(pnum - skip_bytes, bytes - progress),
1300 qiov, qiov_offset + progress, 0);
1301 if (ret < 0) {
1302 goto err;
1303 }
1304 }
1305
1306 cluster_offset += pnum;
1307 cluster_bytes -= pnum;
1308 progress += pnum - skip_bytes;
1309 skip_bytes = 0;
1310 }
1311 ret = 0;
1312
1313 err:
1314 qemu_vfree(bounce_buffer);
1315 return ret;
1316 }
1317
1318 /*
1319 * Forwards an already correctly aligned request to the BlockDriver. This
1320 * handles copy on read, zeroing after EOF, and fragmentation of large
1321 * reads; any other features must be implemented by the caller.
1322 */
1323 static int coroutine_fn GRAPH_RDLOCK
1324 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
1325 int64_t offset, int64_t bytes, int64_t align,
1326 QEMUIOVector *qiov, size_t qiov_offset, int flags)
1327 {
1328 BlockDriverState *bs = child->bs;
1329 int64_t total_bytes, max_bytes;
1330 int ret = 0;
1331 int64_t bytes_remaining = bytes;
1332 int max_transfer;
1333
1334 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1335 assert(is_power_of_2(align));
1336 assert((offset & (align - 1)) == 0);
1337 assert((bytes & (align - 1)) == 0);
1338 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1339 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1340 align);
1341
1342 /*
1343 * TODO: We would need a per-BDS .supported_read_flags and
1344 * potential fallback support, if we ever implement any read flags
1345 * to pass through to drivers. For now, there aren't any
1346 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1347 */
1348 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1349 BDRV_REQ_REGISTERED_BUF)));
1350
1351 /* Handle Copy on Read and associated serialisation */
1352 if (flags & BDRV_REQ_COPY_ON_READ) {
1353 /* If we touch the same cluster it counts as an overlap. This
1354 * guarantees that allocating writes will be serialized and not race
1355 * with each other for the same cluster. For example, in copy-on-read
1356 * it ensures that the CoR read and write operations are atomic and
1357 * guest writes cannot interleave between them. */
1358 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1359 } else {
1360 bdrv_wait_serialising_requests(req);
1361 }
1362
1363 if (flags & BDRV_REQ_COPY_ON_READ) {
1364 int64_t pnum;
1365
1366 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1367 flags &= ~BDRV_REQ_COPY_ON_READ;
1368
1369 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1370 if (ret < 0) {
1371 goto out;
1372 }
1373
1374 if (!ret || pnum != bytes) {
1375 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1376 qiov, qiov_offset, flags);
1377 goto out;
1378 } else if (flags & BDRV_REQ_PREFETCH) {
1379 goto out;
1380 }
1381 }
1382
1383 /* Forward the request to the BlockDriver, possibly fragmenting it */
1384 total_bytes = bdrv_getlength(bs);
1385 if (total_bytes < 0) {
1386 ret = total_bytes;
1387 goto out;
1388 }
1389
1390 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1391
1392 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1393 if (bytes <= max_bytes && bytes <= max_transfer) {
1394 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1395 goto out;
1396 }
1397
1398 while (bytes_remaining) {
1399 int64_t num;
1400
1401 if (max_bytes) {
1402 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1403 assert(num);
1404
1405 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1406 num, qiov,
1407 qiov_offset + bytes - bytes_remaining,
1408 flags);
1409 max_bytes -= num;
1410 } else {
1411 num = bytes_remaining;
1412 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1413 0, bytes_remaining);
1414 }
1415 if (ret < 0) {
1416 goto out;
1417 }
1418 bytes_remaining -= num;
1419 }
1420
1421 out:
1422 return ret < 0 ? ret : 0;
1423 }
1424
1425 /*
1426 * Request padding
1427 *
1428 * |<---- align ----->| |<----- align ---->|
1429 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1430 * | | | | | |
1431 * -*----------$-------*-------- ... --------*-----$------------*---
1432 * | | | | | |
1433 * | offset | | end |
1434 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1435 * [buf ... ) [tail_buf )
1436 *
1437 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1438 * is placed at the beginning of @buf and @tail at the @end.
1439 *
1440 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1441 * around tail, if tail exists.
1442 *
1443 * @merge_reads is true for small requests,
1444 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1445 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1446 */
1447 typedef struct BdrvRequestPadding {
1448 uint8_t *buf;
1449 size_t buf_len;
1450 uint8_t *tail_buf;
1451 size_t head;
1452 size_t tail;
1453 bool merge_reads;
1454 QEMUIOVector local_qiov;
1455 } BdrvRequestPadding;
1456
1457 static bool bdrv_init_padding(BlockDriverState *bs,
1458 int64_t offset, int64_t bytes,
1459 BdrvRequestPadding *pad)
1460 {
1461 int64_t align = bs->bl.request_alignment;
1462 int64_t sum;
1463
1464 bdrv_check_request(offset, bytes, &error_abort);
1465 assert(align <= INT_MAX); /* documented in block/block_int.h */
1466 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1467
1468 memset(pad, 0, sizeof(*pad));
1469
1470 pad->head = offset & (align - 1);
1471 pad->tail = ((offset + bytes) & (align - 1));
1472 if (pad->tail) {
1473 pad->tail = align - pad->tail;
1474 }
1475
1476 if (!pad->head && !pad->tail) {
1477 return false;
1478 }
1479
1480 assert(bytes); /* Nothing good in aligning zero-length requests */
1481
1482 sum = pad->head + bytes + pad->tail;
1483 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1484 pad->buf = qemu_blockalign(bs, pad->buf_len);
1485 pad->merge_reads = sum == pad->buf_len;
1486 if (pad->tail) {
1487 pad->tail_buf = pad->buf + pad->buf_len - align;
1488 }
1489
1490 return true;
1491 }
1492
1493 static int coroutine_fn GRAPH_RDLOCK
1494 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
1495 BdrvRequestPadding *pad, bool zero_middle)
1496 {
1497 QEMUIOVector local_qiov;
1498 BlockDriverState *bs = child->bs;
1499 uint64_t align = bs->bl.request_alignment;
1500 int ret;
1501
1502 assert(req->serialising && pad->buf);
1503
1504 if (pad->head || pad->merge_reads) {
1505 int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1506
1507 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1508
1509 if (pad->head) {
1510 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1511 }
1512 if (pad->merge_reads && pad->tail) {
1513 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1514 }
1515 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1516 align, &local_qiov, 0, 0);
1517 if (ret < 0) {
1518 return ret;
1519 }
1520 if (pad->head) {
1521 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1522 }
1523 if (pad->merge_reads && pad->tail) {
1524 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1525 }
1526
1527 if (pad->merge_reads) {
1528 goto zero_mem;
1529 }
1530 }
1531
1532 if (pad->tail) {
1533 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1534
1535 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1536 ret = bdrv_aligned_preadv(
1537 child, req,
1538 req->overlap_offset + req->overlap_bytes - align,
1539 align, align, &local_qiov, 0, 0);
1540 if (ret < 0) {
1541 return ret;
1542 }
1543 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1544 }
1545
1546 zero_mem:
1547 if (zero_middle) {
1548 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1549 }
1550
1551 return 0;
1552 }
1553
1554 static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1555 {
1556 if (pad->buf) {
1557 qemu_vfree(pad->buf);
1558 qemu_iovec_destroy(&pad->local_qiov);
1559 }
1560 memset(pad, 0, sizeof(*pad));
1561 }
1562
1563 /*
1564 * bdrv_pad_request
1565 *
1566 * Exchange request parameters with padded request if needed. Don't include RMW
1567 * read of padding, bdrv_padding_rmw_read() should be called separately if
1568 * needed.
1569 *
1570 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1571 * - on function start they represent original request
1572 * - on failure or when padding is not needed they are unchanged
1573 * - on success when padding is needed they represent padded request
1574 */
1575 static int bdrv_pad_request(BlockDriverState *bs,
1576 QEMUIOVector **qiov, size_t *qiov_offset,
1577 int64_t *offset, int64_t *bytes,
1578 BdrvRequestPadding *pad, bool *padded,
1579 BdrvRequestFlags *flags)
1580 {
1581 int ret;
1582
1583 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
1584
1585 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
1586 if (padded) {
1587 *padded = false;
1588 }
1589 return 0;
1590 }
1591
1592 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1593 *qiov, *qiov_offset, *bytes,
1594 pad->buf + pad->buf_len - pad->tail,
1595 pad->tail);
1596 if (ret < 0) {
1597 bdrv_padding_destroy(pad);
1598 return ret;
1599 }
1600 *bytes += pad->head + pad->tail;
1601 *offset -= pad->head;
1602 *qiov = &pad->local_qiov;
1603 *qiov_offset = 0;
1604 if (padded) {
1605 *padded = true;
1606 }
1607 if (flags) {
1608 /* Can't use optimization hint with bounce buffer */
1609 *flags &= ~BDRV_REQ_REGISTERED_BUF;
1610 }
1611
1612 return 0;
1613 }
1614
1615 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1616 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1617 BdrvRequestFlags flags)
1618 {
1619 IO_CODE();
1620 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1621 }
1622
1623 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1624 int64_t offset, int64_t bytes,
1625 QEMUIOVector *qiov, size_t qiov_offset,
1626 BdrvRequestFlags flags)
1627 {
1628 BlockDriverState *bs = child->bs;
1629 BdrvTrackedRequest req;
1630 BdrvRequestPadding pad;
1631 int ret;
1632 IO_CODE();
1633
1634 trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1635
1636 if (!bdrv_co_is_inserted(bs)) {
1637 return -ENOMEDIUM;
1638 }
1639
1640 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1641 if (ret < 0) {
1642 return ret;
1643 }
1644
1645 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1646 /*
1647 * Aligning zero request is nonsense. Even if driver has special meaning
1648 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1649 * it to driver due to request_alignment.
1650 *
1651 * Still, no reason to return an error if someone do unaligned
1652 * zero-length read occasionally.
1653 */
1654 return 0;
1655 }
1656
1657 bdrv_inc_in_flight(bs);
1658
1659 /* Don't do copy-on-read if we read data before write operation */
1660 if (qatomic_read(&bs->copy_on_read)) {
1661 flags |= BDRV_REQ_COPY_ON_READ;
1662 }
1663
1664 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
1665 NULL, &flags);
1666 if (ret < 0) {
1667 goto fail;
1668 }
1669
1670 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1671 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1672 bs->bl.request_alignment,
1673 qiov, qiov_offset, flags);
1674 tracked_request_end(&req);
1675 bdrv_padding_destroy(&pad);
1676
1677 fail:
1678 bdrv_dec_in_flight(bs);
1679
1680 return ret;
1681 }
1682
1683 static int coroutine_fn GRAPH_RDLOCK
1684 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1685 BdrvRequestFlags flags)
1686 {
1687 BlockDriver *drv = bs->drv;
1688 QEMUIOVector qiov;
1689 void *buf = NULL;
1690 int ret = 0;
1691 bool need_flush = false;
1692 int head = 0;
1693 int tail = 0;
1694
1695 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
1696 INT64_MAX);
1697 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1698 bs->bl.request_alignment);
1699 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1700
1701 assert_bdrv_graph_readable();
1702 bdrv_check_request(offset, bytes, &error_abort);
1703
1704 if (!drv) {
1705 return -ENOMEDIUM;
1706 }
1707
1708 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1709 return -ENOTSUP;
1710 }
1711
1712 /* By definition there is no user buffer so this flag doesn't make sense */
1713 if (flags & BDRV_REQ_REGISTERED_BUF) {
1714 return -EINVAL;
1715 }
1716
1717 /* Invalidate the cached block-status data range if this write overlaps */
1718 bdrv_bsc_invalidate_range(bs, offset, bytes);
1719
1720 assert(alignment % bs->bl.request_alignment == 0);
1721 head = offset % alignment;
1722 tail = (offset + bytes) % alignment;
1723 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1724 assert(max_write_zeroes >= bs->bl.request_alignment);
1725
1726 while (bytes > 0 && !ret) {
1727 int64_t num = bytes;
1728
1729 /* Align request. Block drivers can expect the "bulk" of the request
1730 * to be aligned, and that unaligned requests do not cross cluster
1731 * boundaries.
1732 */
1733 if (head) {
1734 /* Make a small request up to the first aligned sector. For
1735 * convenience, limit this request to max_transfer even if
1736 * we don't need to fall back to writes. */
1737 num = MIN(MIN(bytes, max_transfer), alignment - head);
1738 head = (head + num) % alignment;
1739 assert(num < max_write_zeroes);
1740 } else if (tail && num > alignment) {
1741 /* Shorten the request to the last aligned sector. */
1742 num -= tail;
1743 }
1744
1745 /* limit request size */
1746 if (num > max_write_zeroes) {
1747 num = max_write_zeroes;
1748 }
1749
1750 ret = -ENOTSUP;
1751 /* First try the efficient write zeroes operation */
1752 if (drv->bdrv_co_pwrite_zeroes) {
1753 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1754 flags & bs->supported_zero_flags);
1755 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1756 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1757 need_flush = true;
1758 }
1759 } else {
1760 assert(!bs->supported_zero_flags);
1761 }
1762
1763 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1764 /* Fall back to bounce buffer if write zeroes is unsupported */
1765 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1766
1767 if ((flags & BDRV_REQ_FUA) &&
1768 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1769 /* No need for bdrv_driver_pwrite() to do a fallback
1770 * flush on each chunk; use just one at the end */
1771 write_flags &= ~BDRV_REQ_FUA;
1772 need_flush = true;
1773 }
1774 num = MIN(num, max_transfer);
1775 if (buf == NULL) {
1776 buf = qemu_try_blockalign0(bs, num);
1777 if (buf == NULL) {
1778 ret = -ENOMEM;
1779 goto fail;
1780 }
1781 }
1782 qemu_iovec_init_buf(&qiov, buf, num);
1783
1784 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1785
1786 /* Keep bounce buffer around if it is big enough for all
1787 * all future requests.
1788 */
1789 if (num < max_transfer) {
1790 qemu_vfree(buf);
1791 buf = NULL;
1792 }
1793 }
1794
1795 offset += num;
1796 bytes -= num;
1797 }
1798
1799 fail:
1800 if (ret == 0 && need_flush) {
1801 ret = bdrv_co_flush(bs);
1802 }
1803 qemu_vfree(buf);
1804 return ret;
1805 }
1806
1807 static inline int coroutine_fn GRAPH_RDLOCK
1808 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1809 BdrvTrackedRequest *req, int flags)
1810 {
1811 BlockDriverState *bs = child->bs;
1812
1813 bdrv_check_request(offset, bytes, &error_abort);
1814
1815 if (bdrv_is_read_only(bs)) {
1816 return -EPERM;
1817 }
1818
1819 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1820 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1821 assert(!(flags & ~BDRV_REQ_MASK));
1822 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1823
1824 if (flags & BDRV_REQ_SERIALISING) {
1825 QEMU_LOCK_GUARD(&bs->reqs_lock);
1826
1827 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1828
1829 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1830 return -EBUSY;
1831 }
1832
1833 bdrv_wait_serialising_requests_locked(req);
1834 } else {
1835 bdrv_wait_serialising_requests(req);
1836 }
1837
1838 assert(req->overlap_offset <= offset);
1839 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1840 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1841 child->perm & BLK_PERM_RESIZE);
1842
1843 switch (req->type) {
1844 case BDRV_TRACKED_WRITE:
1845 case BDRV_TRACKED_DISCARD:
1846 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1847 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1848 } else {
1849 assert(child->perm & BLK_PERM_WRITE);
1850 }
1851 bdrv_write_threshold_check_write(bs, offset, bytes);
1852 return 0;
1853 case BDRV_TRACKED_TRUNCATE:
1854 assert(child->perm & BLK_PERM_RESIZE);
1855 return 0;
1856 default:
1857 abort();
1858 }
1859 }
1860
1861 static inline void coroutine_fn
1862 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
1863 BdrvTrackedRequest *req, int ret)
1864 {
1865 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1866 BlockDriverState *bs = child->bs;
1867
1868 bdrv_check_request(offset, bytes, &error_abort);
1869
1870 qatomic_inc(&bs->write_gen);
1871
1872 /*
1873 * Discard cannot extend the image, but in error handling cases, such as
1874 * when reverting a qcow2 cluster allocation, the discarded range can pass
1875 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1876 * here. Instead, just skip it, since semantically a discard request
1877 * beyond EOF cannot expand the image anyway.
1878 */
1879 if (ret == 0 &&
1880 (req->type == BDRV_TRACKED_TRUNCATE ||
1881 end_sector > bs->total_sectors) &&
1882 req->type != BDRV_TRACKED_DISCARD) {
1883 bs->total_sectors = end_sector;
1884 bdrv_parent_cb_resize(bs);
1885 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
1886 }
1887 if (req->bytes) {
1888 switch (req->type) {
1889 case BDRV_TRACKED_WRITE:
1890 stat64_max(&bs->wr_highest_offset, offset + bytes);
1891 /* fall through, to set dirty bits */
1892 case BDRV_TRACKED_DISCARD:
1893 bdrv_set_dirty(bs, offset, bytes);
1894 break;
1895 default:
1896 break;
1897 }
1898 }
1899 }
1900
1901 /*
1902 * Forwards an already correctly aligned write request to the BlockDriver,
1903 * after possibly fragmenting it.
1904 */
1905 static int coroutine_fn GRAPH_RDLOCK
1906 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
1907 int64_t offset, int64_t bytes, int64_t align,
1908 QEMUIOVector *qiov, size_t qiov_offset,
1909 BdrvRequestFlags flags)
1910 {
1911 BlockDriverState *bs = child->bs;
1912 BlockDriver *drv = bs->drv;
1913 int ret;
1914
1915 int64_t bytes_remaining = bytes;
1916 int max_transfer;
1917
1918 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1919
1920 if (!drv) {
1921 return -ENOMEDIUM;
1922 }
1923
1924 if (bdrv_has_readonly_bitmaps(bs)) {
1925 return -EPERM;
1926 }
1927
1928 assert(is_power_of_2(align));
1929 assert((offset & (align - 1)) == 0);
1930 assert((bytes & (align - 1)) == 0);
1931 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1932 align);
1933
1934 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
1935
1936 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1937 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1938 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
1939 flags |= BDRV_REQ_ZERO_WRITE;
1940 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1941 flags |= BDRV_REQ_MAY_UNMAP;
1942 }
1943
1944 /* Can't use optimization hint with bufferless zero write */
1945 flags &= ~BDRV_REQ_REGISTERED_BUF;
1946 }
1947
1948 if (ret < 0) {
1949 /* Do nothing, write notifier decided to fail this request */
1950 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1951 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1952 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1953 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1954 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
1955 qiov, qiov_offset);
1956 } else if (bytes <= max_transfer) {
1957 bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
1958 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
1959 } else {
1960 bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
1961 while (bytes_remaining) {
1962 int num = MIN(bytes_remaining, max_transfer);
1963 int local_flags = flags;
1964
1965 assert(num);
1966 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1967 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1968 /* If FUA is going to be emulated by flush, we only
1969 * need to flush on the last iteration */
1970 local_flags &= ~BDRV_REQ_FUA;
1971 }
1972
1973 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1974 num, qiov,
1975 qiov_offset + bytes - bytes_remaining,
1976 local_flags);
1977 if (ret < 0) {
1978 break;
1979 }
1980 bytes_remaining -= num;
1981 }
1982 }
1983 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
1984
1985 if (ret >= 0) {
1986 ret = 0;
1987 }
1988 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
1989
1990 return ret;
1991 }
1992
1993 static int coroutine_fn GRAPH_RDLOCK
1994 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
1995 BdrvRequestFlags flags, BdrvTrackedRequest *req)
1996 {
1997 BlockDriverState *bs = child->bs;
1998 QEMUIOVector local_qiov;
1999 uint64_t align = bs->bl.request_alignment;
2000 int ret = 0;
2001 bool padding;
2002 BdrvRequestPadding pad;
2003
2004 /* This flag doesn't make sense for padding or zero writes */
2005 flags &= ~BDRV_REQ_REGISTERED_BUF;
2006
2007 padding = bdrv_init_padding(bs, offset, bytes, &pad);
2008 if (padding) {
2009 assert(!(flags & BDRV_REQ_NO_WAIT));
2010 bdrv_make_request_serialising(req, align);
2011
2012 bdrv_padding_rmw_read(child, req, &pad, true);
2013
2014 if (pad.head || pad.merge_reads) {
2015 int64_t aligned_offset = offset & ~(align - 1);
2016 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2017
2018 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2019 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2020 align, &local_qiov, 0,
2021 flags & ~BDRV_REQ_ZERO_WRITE);
2022 if (ret < 0 || pad.merge_reads) {
2023 /* Error or all work is done */
2024 goto out;
2025 }
2026 offset += write_bytes - pad.head;
2027 bytes -= write_bytes - pad.head;
2028 }
2029 }
2030
2031 assert(!bytes || (offset & (align - 1)) == 0);
2032 if (bytes >= align) {
2033 /* Write the aligned part in the middle. */
2034 int64_t aligned_bytes = bytes & ~(align - 1);
2035 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2036 NULL, 0, flags);
2037 if (ret < 0) {
2038 goto out;
2039 }
2040 bytes -= aligned_bytes;
2041 offset += aligned_bytes;
2042 }
2043
2044 assert(!bytes || (offset & (align - 1)) == 0);
2045 if (bytes) {
2046 assert(align == pad.tail + bytes);
2047
2048 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2049 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2050 &local_qiov, 0,
2051 flags & ~BDRV_REQ_ZERO_WRITE);
2052 }
2053
2054 out:
2055 bdrv_padding_destroy(&pad);
2056
2057 return ret;
2058 }
2059
2060 /*
2061 * Handle a write request in coroutine context
2062 */
2063 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2064 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2065 BdrvRequestFlags flags)
2066 {
2067 IO_CODE();
2068 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2069 }
2070
2071 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2072 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2073 BdrvRequestFlags flags)
2074 {
2075 BlockDriverState *bs = child->bs;
2076 BdrvTrackedRequest req;
2077 uint64_t align = bs->bl.request_alignment;
2078 BdrvRequestPadding pad;
2079 int ret;
2080 bool padded = false;
2081 IO_CODE();
2082
2083 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2084
2085 if (!bdrv_co_is_inserted(bs)) {
2086 return -ENOMEDIUM;
2087 }
2088
2089 if (flags & BDRV_REQ_ZERO_WRITE) {
2090 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
2091 } else {
2092 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2093 }
2094 if (ret < 0) {
2095 return ret;
2096 }
2097
2098 /* If the request is misaligned then we can't make it efficient */
2099 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2100 !QEMU_IS_ALIGNED(offset | bytes, align))
2101 {
2102 return -ENOTSUP;
2103 }
2104
2105 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2106 /*
2107 * Aligning zero request is nonsense. Even if driver has special meaning
2108 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2109 * it to driver due to request_alignment.
2110 *
2111 * Still, no reason to return an error if someone do unaligned
2112 * zero-length write occasionally.
2113 */
2114 return 0;
2115 }
2116
2117 if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2118 /*
2119 * Pad request for following read-modify-write cycle.
2120 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2121 * alignment only if there is no ZERO flag.
2122 */
2123 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
2124 &padded, &flags);
2125 if (ret < 0) {
2126 return ret;
2127 }
2128 }
2129
2130 bdrv_inc_in_flight(bs);
2131 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2132
2133 if (flags & BDRV_REQ_ZERO_WRITE) {
2134 assert(!padded);
2135 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2136 goto out;
2137 }
2138
2139 if (padded) {
2140 /*
2141 * Request was unaligned to request_alignment and therefore
2142 * padded. We are going to do read-modify-write, and must
2143 * serialize the request to prevent interactions of the
2144 * widened region with other transactions.
2145 */
2146 assert(!(flags & BDRV_REQ_NO_WAIT));
2147 bdrv_make_request_serialising(&req, align);
2148 bdrv_padding_rmw_read(child, &req, &pad, false);
2149 }
2150
2151 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2152 qiov, qiov_offset, flags);
2153
2154 bdrv_padding_destroy(&pad);
2155
2156 out:
2157 tracked_request_end(&req);
2158 bdrv_dec_in_flight(bs);
2159
2160 return ret;
2161 }
2162
2163 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2164 int64_t bytes, BdrvRequestFlags flags)
2165 {
2166 IO_CODE();
2167 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2168 assert_bdrv_graph_readable();
2169
2170 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
2171 flags &= ~BDRV_REQ_MAY_UNMAP;
2172 }
2173
2174 return bdrv_co_pwritev(child, offset, bytes, NULL,
2175 BDRV_REQ_ZERO_WRITE | flags);
2176 }
2177
2178 /*
2179 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2180 */
2181 int bdrv_flush_all(void)
2182 {
2183 BdrvNextIterator it;
2184 BlockDriverState *bs = NULL;
2185 int result = 0;
2186
2187 GLOBAL_STATE_CODE();
2188
2189 /*
2190 * bdrv queue is managed by record/replay,
2191 * creating new flush request for stopping
2192 * the VM may break the determinism
2193 */
2194 if (replay_events_enabled()) {
2195 return result;
2196 }
2197
2198 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2199 AioContext *aio_context = bdrv_get_aio_context(bs);
2200 int ret;
2201
2202 aio_context_acquire(aio_context);
2203 ret = bdrv_flush(bs);
2204 if (ret < 0 && !result) {
2205 result = ret;
2206 }
2207 aio_context_release(aio_context);
2208 }
2209
2210 return result;
2211 }
2212
2213 /*
2214 * Returns the allocation status of the specified sectors.
2215 * Drivers not implementing the functionality are assumed to not support
2216 * backing files, hence all their sectors are reported as allocated.
2217 *
2218 * If 'want_zero' is true, the caller is querying for mapping
2219 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2220 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2221 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2222 *
2223 * If 'offset' is beyond the end of the disk image the return value is
2224 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2225 *
2226 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2227 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2228 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2229 *
2230 * 'pnum' is set to the number of bytes (including and immediately
2231 * following the specified offset) that are easily known to be in the
2232 * same allocated/unallocated state. Note that a second call starting
2233 * at the original offset plus returned pnum may have the same status.
2234 * The returned value is non-zero on success except at end-of-file.
2235 *
2236 * Returns negative errno on failure. Otherwise, if the
2237 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2238 * set to the host mapping and BDS corresponding to the guest offset.
2239 */
2240 static int coroutine_fn GRAPH_RDLOCK
2241 bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
2242 int64_t offset, int64_t bytes,
2243 int64_t *pnum, int64_t *map, BlockDriverState **file)
2244 {
2245 int64_t total_size;
2246 int64_t n; /* bytes */
2247 int ret;
2248 int64_t local_map = 0;
2249 BlockDriverState *local_file = NULL;
2250 int64_t aligned_offset, aligned_bytes;
2251 uint32_t align;
2252 bool has_filtered_child;
2253
2254 assert(pnum);
2255 assert_bdrv_graph_readable();
2256 *pnum = 0;
2257 total_size = bdrv_getlength(bs);
2258 if (total_size < 0) {
2259 ret = total_size;
2260 goto early_out;
2261 }
2262
2263 if (offset >= total_size) {
2264 ret = BDRV_BLOCK_EOF;
2265 goto early_out;
2266 }
2267 if (!bytes) {
2268 ret = 0;
2269 goto early_out;
2270 }
2271
2272 n = total_size - offset;
2273 if (n < bytes) {
2274 bytes = n;
2275 }
2276
2277 /* Must be non-NULL or bdrv_getlength() would have failed */
2278 assert(bs->drv);
2279 has_filtered_child = bdrv_filter_child(bs);
2280 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2281 *pnum = bytes;
2282 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2283 if (offset + bytes == total_size) {
2284 ret |= BDRV_BLOCK_EOF;
2285 }
2286 if (bs->drv->protocol_name) {
2287 ret |= BDRV_BLOCK_OFFSET_VALID;
2288 local_map = offset;
2289 local_file = bs;
2290 }
2291 goto early_out;
2292 }
2293
2294 bdrv_inc_in_flight(bs);
2295
2296 /* Round out to request_alignment boundaries */
2297 align = bs->bl.request_alignment;
2298 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2299 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2300
2301 if (bs->drv->bdrv_co_block_status) {
2302 /*
2303 * Use the block-status cache only for protocol nodes: Format
2304 * drivers are generally quick to inquire the status, but protocol
2305 * drivers often need to get information from outside of qemu, so
2306 * we do not have control over the actual implementation. There
2307 * have been cases where inquiring the status took an unreasonably
2308 * long time, and we can do nothing in qemu to fix it.
2309 * This is especially problematic for images with large data areas,
2310 * because finding the few holes in them and giving them special
2311 * treatment does not gain much performance. Therefore, we try to
2312 * cache the last-identified data region.
2313 *
2314 * Second, limiting ourselves to protocol nodes allows us to assume
2315 * the block status for data regions to be DATA | OFFSET_VALID, and
2316 * that the host offset is the same as the guest offset.
2317 *
2318 * Note that it is possible that external writers zero parts of
2319 * the cached regions without the cache being invalidated, and so
2320 * we may report zeroes as data. This is not catastrophic,
2321 * however, because reporting zeroes as data is fine.
2322 */
2323 if (QLIST_EMPTY(&bs->children) &&
2324 bdrv_bsc_is_data(bs, aligned_offset, pnum))
2325 {
2326 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2327 local_file = bs;
2328 local_map = aligned_offset;
2329 } else {
2330 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2331 aligned_bytes, pnum, &local_map,
2332 &local_file);
2333
2334 /*
2335 * Note that checking QLIST_EMPTY(&bs->children) is also done when
2336 * the cache is queried above. Technically, we do not need to check
2337 * it here; the worst that can happen is that we fill the cache for
2338 * non-protocol nodes, and then it is never used. However, filling
2339 * the cache requires an RCU update, so double check here to avoid
2340 * such an update if possible.
2341 *
2342 * Check want_zero, because we only want to update the cache when we
2343 * have accurate information about what is zero and what is data.
2344 */
2345 if (want_zero &&
2346 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
2347 QLIST_EMPTY(&bs->children))
2348 {
2349 /*
2350 * When a protocol driver reports BLOCK_OFFSET_VALID, the
2351 * returned local_map value must be the same as the offset we
2352 * have passed (aligned_offset), and local_bs must be the node
2353 * itself.
2354 * Assert this, because we follow this rule when reading from
2355 * the cache (see the `local_file = bs` and
2356 * `local_map = aligned_offset` assignments above), and the
2357 * result the cache delivers must be the same as the driver
2358 * would deliver.
2359 */
2360 assert(local_file == bs);
2361 assert(local_map == aligned_offset);
2362 bdrv_bsc_fill(bs, aligned_offset, *pnum);
2363 }
2364 }
2365 } else {
2366 /* Default code for filters */
2367
2368 local_file = bdrv_filter_bs(bs);
2369 assert(local_file);
2370
2371 *pnum = aligned_bytes;
2372 local_map = aligned_offset;
2373 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2374 }
2375 if (ret < 0) {
2376 *pnum = 0;
2377 goto out;
2378 }
2379
2380 /*
2381 * The driver's result must be a non-zero multiple of request_alignment.
2382 * Clamp pnum and adjust map to original request.
2383 */
2384 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2385 align > offset - aligned_offset);
2386 if (ret & BDRV_BLOCK_RECURSE) {
2387 assert(ret & BDRV_BLOCK_DATA);
2388 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2389 assert(!(ret & BDRV_BLOCK_ZERO));
2390 }
2391
2392 *pnum -= offset - aligned_offset;
2393 if (*pnum > bytes) {
2394 *pnum = bytes;
2395 }
2396 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2397 local_map += offset - aligned_offset;
2398 }
2399
2400 if (ret & BDRV_BLOCK_RAW) {
2401 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2402 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2403 *pnum, pnum, &local_map, &local_file);
2404 goto out;
2405 }
2406
2407 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2408 ret |= BDRV_BLOCK_ALLOCATED;
2409 } else if (bs->drv->supports_backing) {
2410 BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2411
2412 if (!cow_bs) {
2413 ret |= BDRV_BLOCK_ZERO;
2414 } else if (want_zero) {
2415 int64_t size2 = bdrv_getlength(cow_bs);
2416
2417 if (size2 >= 0 && offset >= size2) {
2418 ret |= BDRV_BLOCK_ZERO;
2419 }
2420 }
2421 }
2422
2423 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2424 local_file && local_file != bs &&
2425 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2426 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2427 int64_t file_pnum;
2428 int ret2;
2429
2430 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2431 *pnum, &file_pnum, NULL, NULL);
2432 if (ret2 >= 0) {
2433 /* Ignore errors. This is just providing extra information, it
2434 * is useful but not necessary.
2435 */
2436 if (ret2 & BDRV_BLOCK_EOF &&
2437 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2438 /*
2439 * It is valid for the format block driver to read
2440 * beyond the end of the underlying file's current
2441 * size; such areas read as zero.
2442 */
2443 ret |= BDRV_BLOCK_ZERO;
2444 } else {
2445 /* Limit request to the range reported by the protocol driver */
2446 *pnum = file_pnum;
2447 ret |= (ret2 & BDRV_BLOCK_ZERO);
2448 }
2449 }
2450 }
2451
2452 out:
2453 bdrv_dec_in_flight(bs);
2454 if (ret >= 0 && offset + *pnum == total_size) {
2455 ret |= BDRV_BLOCK_EOF;
2456 }
2457 early_out:
2458 if (file) {
2459 *file = local_file;
2460 }
2461 if (map) {
2462 *map = local_map;
2463 }
2464 return ret;
2465 }
2466
2467 int coroutine_fn
2468 bdrv_co_common_block_status_above(BlockDriverState *bs,
2469 BlockDriverState *base,
2470 bool include_base,
2471 bool want_zero,
2472 int64_t offset,
2473 int64_t bytes,
2474 int64_t *pnum,
2475 int64_t *map,
2476 BlockDriverState **file,
2477 int *depth)
2478 {
2479 int ret;
2480 BlockDriverState *p;
2481 int64_t eof = 0;
2482 int dummy;
2483 IO_CODE();
2484
2485 assert(!include_base || base); /* Can't include NULL base */
2486 assert_bdrv_graph_readable();
2487
2488 if (!depth) {
2489 depth = &dummy;
2490 }
2491 *depth = 0;
2492
2493 if (!include_base && bs == base) {
2494 *pnum = bytes;
2495 return 0;
2496 }
2497
2498 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
2499 ++*depth;
2500 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2501 return ret;
2502 }
2503
2504 if (ret & BDRV_BLOCK_EOF) {
2505 eof = offset + *pnum;
2506 }
2507
2508 assert(*pnum <= bytes);
2509 bytes = *pnum;
2510
2511 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2512 p = bdrv_filter_or_cow_bs(p))
2513 {
2514 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2515 file);
2516 ++*depth;
2517 if (ret < 0) {
2518 return ret;
2519 }
2520 if (*pnum == 0) {
2521 /*
2522 * The top layer deferred to this layer, and because this layer is
2523 * short, any zeroes that we synthesize beyond EOF behave as if they
2524 * were allocated at this layer.
2525 *
2526 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2527 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2528 * below.
2529 */
2530 assert(ret & BDRV_BLOCK_EOF);
2531 *pnum = bytes;
2532 if (file) {
2533 *file = p;
2534 }
2535 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2536 break;
2537 }
2538 if (ret & BDRV_BLOCK_ALLOCATED) {
2539 /*
2540 * We've found the node and the status, we must break.
2541 *
2542 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2543 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2544 * below.
2545 */
2546 ret &= ~BDRV_BLOCK_EOF;
2547 break;
2548 }
2549
2550 if (p == base) {
2551 assert(include_base);
2552 break;
2553 }
2554
2555 /*
2556 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2557 * let's continue the diving.
2558 */
2559 assert(*pnum <= bytes);
2560 bytes = *pnum;
2561 }
2562
2563 if (offset + *pnum == eof) {
2564 ret |= BDRV_BLOCK_EOF;
2565 }
2566
2567 return ret;
2568 }
2569
2570 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2571 BlockDriverState *base,
2572 int64_t offset, int64_t bytes,
2573 int64_t *pnum, int64_t *map,
2574 BlockDriverState **file)
2575 {
2576 IO_CODE();
2577 return bdrv_co_common_block_status_above(bs, base, false, true, offset,
2578 bytes, pnum, map, file, NULL);
2579 }
2580
2581 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2582 int64_t offset, int64_t bytes, int64_t *pnum,
2583 int64_t *map, BlockDriverState **file)
2584 {
2585 IO_CODE();
2586 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
2587 pnum, map, file, NULL);
2588 }
2589
2590 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2591 int64_t *pnum, int64_t *map, BlockDriverState **file)
2592 {
2593 IO_CODE();
2594 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2595 offset, bytes, pnum, map, file);
2596 }
2597
2598 /*
2599 * Check @bs (and its backing chain) to see if the range defined
2600 * by @offset and @bytes is known to read as zeroes.
2601 * Return 1 if that is the case, 0 otherwise and -errno on error.
2602 * This test is meant to be fast rather than accurate so returning 0
2603 * does not guarantee non-zero data.
2604 */
2605 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2606 int64_t bytes)
2607 {
2608 int ret;
2609 int64_t pnum = bytes;
2610 IO_CODE();
2611
2612 if (!bytes) {
2613 return 1;
2614 }
2615
2616 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2617 bytes, &pnum, NULL, NULL, NULL);
2618
2619 if (ret < 0) {
2620 return ret;
2621 }
2622
2623 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2624 }
2625
2626 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
2627 int64_t bytes, int64_t *pnum)
2628 {
2629 int ret;
2630 int64_t dummy;
2631 IO_CODE();
2632
2633 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
2634 bytes, pnum ? pnum : &dummy, NULL,
2635 NULL, NULL);
2636 if (ret < 0) {
2637 return ret;
2638 }
2639 return !!(ret & BDRV_BLOCK_ALLOCATED);
2640 }
2641
2642 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
2643 int64_t *pnum)
2644 {
2645 int ret;
2646 int64_t dummy;
2647 IO_CODE();
2648
2649 ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
2650 bytes, pnum ? pnum : &dummy, NULL,
2651 NULL, NULL);
2652 if (ret < 0) {
2653 return ret;
2654 }
2655 return !!(ret & BDRV_BLOCK_ALLOCATED);
2656 }
2657
2658 /* See bdrv_is_allocated_above for documentation */
2659 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
2660 BlockDriverState *base,
2661 bool include_base, int64_t offset,
2662 int64_t bytes, int64_t *pnum)
2663 {
2664 int depth;
2665 int ret;
2666 IO_CODE();
2667
2668 ret = bdrv_co_common_block_status_above(top, base, include_base, false,
2669 offset, bytes, pnum, NULL, NULL,
2670 &depth);
2671 if (ret < 0) {
2672 return ret;
2673 }
2674
2675 if (ret & BDRV_BLOCK_ALLOCATED) {
2676 return depth;
2677 }
2678 return 0;
2679 }
2680
2681 /*
2682 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2683 *
2684 * Return a positive depth if (a prefix of) the given range is allocated
2685 * in any image between BASE and TOP (BASE is only included if include_base
2686 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2687 * BASE can be NULL to check if the given offset is allocated in any
2688 * image of the chain. Return 0 otherwise, or negative errno on
2689 * failure.
2690 *
2691 * 'pnum' is set to the number of bytes (including and immediately
2692 * following the specified offset) that are known to be in the same
2693 * allocated/unallocated state. Note that a subsequent call starting
2694 * at 'offset + *pnum' may return the same allocation status (in other
2695 * words, the result is not necessarily the maximum possible range);
2696 * but 'pnum' will only be 0 when end of file is reached.
2697 */
2698 int bdrv_is_allocated_above(BlockDriverState *top,
2699 BlockDriverState *base,
2700 bool include_base, int64_t offset,
2701 int64_t bytes, int64_t *pnum)
2702 {
2703 int depth;
2704 int ret;
2705 IO_CODE();
2706
2707 ret = bdrv_common_block_status_above(top, base, include_base, false,
2708 offset, bytes, pnum, NULL, NULL,
2709 &depth);
2710 if (ret < 0) {
2711 return ret;
2712 }
2713
2714 if (ret & BDRV_BLOCK_ALLOCATED) {
2715 return depth;
2716 }
2717 return 0;
2718 }
2719
2720 int coroutine_fn
2721 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2722 {
2723 BlockDriver *drv = bs->drv;
2724 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2725 int ret;
2726 IO_CODE();
2727 assert_bdrv_graph_readable();
2728
2729 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2730 if (ret < 0) {
2731 return ret;
2732 }
2733
2734 if (!drv) {
2735 return -ENOMEDIUM;
2736 }
2737
2738 bdrv_inc_in_flight(bs);
2739
2740 if (drv->bdrv_co_load_vmstate) {
2741 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2742 } else if (child_bs) {
2743 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2744 } else {
2745 ret = -ENOTSUP;
2746 }
2747
2748 bdrv_dec_in_flight(bs);
2749
2750 return ret;
2751 }
2752
2753 int coroutine_fn
2754 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2755 {
2756 BlockDriver *drv = bs->drv;
2757 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2758 int ret;
2759 IO_CODE();
2760 assert_bdrv_graph_readable();
2761
2762 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2763 if (ret < 0) {
2764 return ret;
2765 }
2766
2767 if (!drv) {
2768 return -ENOMEDIUM;
2769 }
2770
2771 bdrv_inc_in_flight(bs);
2772
2773 if (drv->bdrv_co_save_vmstate) {
2774 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2775 } else if (child_bs) {
2776 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2777 } else {
2778 ret = -ENOTSUP;
2779 }
2780
2781 bdrv_dec_in_flight(bs);
2782
2783 return ret;
2784 }
2785
2786 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2787 int64_t pos, int size)
2788 {
2789 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2790 int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2791 IO_CODE();
2792
2793 return ret < 0 ? ret : size;
2794 }
2795
2796 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2797 int64_t pos, int size)
2798 {
2799 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2800 int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2801 IO_CODE();
2802
2803 return ret < 0 ? ret : size;
2804 }
2805
2806 /**************************************************************/
2807 /* async I/Os */
2808
2809 void bdrv_aio_cancel(BlockAIOCB *acb)
2810 {
2811 IO_CODE();
2812 qemu_aio_ref(acb);
2813 bdrv_aio_cancel_async(acb);
2814 while (acb->refcnt > 1) {
2815 if (acb->aiocb_info->get_aio_context) {
2816 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2817 } else if (acb->bs) {
2818 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2819 * assert that we're not using an I/O thread. Thread-safe
2820 * code should use bdrv_aio_cancel_async exclusively.
2821 */
2822 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2823 aio_poll(bdrv_get_aio_context(acb->bs), true);
2824 } else {
2825 abort();
2826 }
2827 }
2828 qemu_aio_unref(acb);
2829 }
2830
2831 /* Async version of aio cancel. The caller is not blocked if the acb implements
2832 * cancel_async, otherwise we do nothing and let the request normally complete.
2833 * In either case the completion callback must be called. */
2834 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2835 {
2836 IO_CODE();
2837 if (acb->aiocb_info->cancel_async) {
2838 acb->aiocb_info->cancel_async(acb);
2839 }
2840 }
2841
2842 /**************************************************************/
2843 /* Coroutine block device emulation */
2844
2845 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2846 {
2847 BdrvChild *primary_child = bdrv_primary_child(bs);
2848 BdrvChild *child;
2849 int current_gen;
2850 int ret = 0;
2851 IO_CODE();
2852
2853 assert_bdrv_graph_readable();
2854 bdrv_inc_in_flight(bs);
2855
2856 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
2857 bdrv_is_sg(bs)) {
2858 goto early_exit;
2859 }
2860
2861 qemu_co_mutex_lock(&bs->reqs_lock);
2862 current_gen = qatomic_read(&bs->write_gen);
2863
2864 /* Wait until any previous flushes are completed */
2865 while (bs->active_flush_req) {
2866 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2867 }
2868
2869 /* Flushes reach this point in nondecreasing current_gen order. */
2870 bs->active_flush_req = true;
2871 qemu_co_mutex_unlock(&bs->reqs_lock);
2872
2873 /* Write back all layers by calling one driver function */
2874 if (bs->drv->bdrv_co_flush) {
2875 ret = bs->drv->bdrv_co_flush(bs);
2876 goto out;
2877 }
2878
2879 /* Write back cached data to the OS even with cache=unsafe */
2880 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
2881 if (bs->drv->bdrv_co_flush_to_os) {
2882 ret = bs->drv->bdrv_co_flush_to_os(bs);
2883 if (ret < 0) {
2884 goto out;
2885 }
2886 }
2887
2888 /* But don't actually force it to the disk with cache=unsafe */
2889 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2890 goto flush_children;
2891 }
2892
2893 /* Check if we really need to flush anything */
2894 if (bs->flushed_gen == current_gen) {
2895 goto flush_children;
2896 }
2897
2898 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2899 if (!bs->drv) {
2900 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2901 * (even in case of apparent success) */
2902 ret = -ENOMEDIUM;
2903 goto out;
2904 }
2905 if (bs->drv->bdrv_co_flush_to_disk) {
2906 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2907 } else if (bs->drv->bdrv_aio_flush) {
2908 BlockAIOCB *acb;
2909 CoroutineIOCompletion co = {
2910 .coroutine = qemu_coroutine_self(),
2911 };
2912
2913 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2914 if (acb == NULL) {
2915 ret = -EIO;
2916 } else {
2917 qemu_coroutine_yield();
2918 ret = co.ret;
2919 }
2920 } else {
2921 /*
2922 * Some block drivers always operate in either writethrough or unsafe
2923 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2924 * know how the server works (because the behaviour is hardcoded or
2925 * depends on server-side configuration), so we can't ensure that
2926 * everything is safe on disk. Returning an error doesn't work because
2927 * that would break guests even if the server operates in writethrough
2928 * mode.
2929 *
2930 * Let's hope the user knows what he's doing.
2931 */
2932 ret = 0;
2933 }
2934
2935 if (ret < 0) {
2936 goto out;
2937 }
2938
2939 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2940 * in the case of cache=unsafe, so there are no useless flushes.
2941 */
2942 flush_children:
2943 ret = 0;
2944 QLIST_FOREACH(child, &bs->children, next) {
2945 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
2946 int this_child_ret = bdrv_co_flush(child->bs);
2947 if (!ret) {
2948 ret = this_child_ret;
2949 }
2950 }
2951 }
2952
2953 out:
2954 /* Notify any pending flushes that we have completed */
2955 if (ret == 0) {
2956 bs->flushed_gen = current_gen;
2957 }
2958
2959 qemu_co_mutex_lock(&bs->reqs_lock);
2960 bs->active_flush_req = false;
2961 /* Return value is ignored - it's ok if wait queue is empty */
2962 qemu_co_queue_next(&bs->flush_queue);
2963 qemu_co_mutex_unlock(&bs->reqs_lock);
2964
2965 early_exit:
2966 bdrv_dec_in_flight(bs);
2967 return ret;
2968 }
2969
2970 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2971 int64_t bytes)
2972 {
2973 BdrvTrackedRequest req;
2974 int ret;
2975 int64_t max_pdiscard;
2976 int head, tail, align;
2977 BlockDriverState *bs = child->bs;
2978 IO_CODE();
2979 assert_bdrv_graph_readable();
2980
2981 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
2982 return -ENOMEDIUM;
2983 }
2984
2985 if (bdrv_has_readonly_bitmaps(bs)) {
2986 return -EPERM;
2987 }
2988
2989 ret = bdrv_check_request(offset, bytes, NULL);
2990 if (ret < 0) {
2991 return ret;
2992 }
2993
2994 /* Do nothing if disabled. */
2995 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2996 return 0;
2997 }
2998
2999 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
3000 return 0;
3001 }
3002
3003 /* Invalidate the cached block-status data range if this discard overlaps */
3004 bdrv_bsc_invalidate_range(bs, offset, bytes);
3005
3006 /* Discard is advisory, but some devices track and coalesce
3007 * unaligned requests, so we must pass everything down rather than
3008 * round here. Still, most devices will just silently ignore
3009 * unaligned requests (by returning -ENOTSUP), so we must fragment
3010 * the request accordingly. */
3011 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3012 assert(align % bs->bl.request_alignment == 0);
3013 head = offset % align;
3014 tail = (offset + bytes) % align;
3015
3016 bdrv_inc_in_flight(bs);
3017 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3018
3019 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3020 if (ret < 0) {
3021 goto out;
3022 }
3023
3024 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
3025 align);
3026 assert(max_pdiscard >= bs->bl.request_alignment);
3027
3028 while (bytes > 0) {
3029 int64_t num = bytes;
3030
3031 if (head) {
3032 /* Make small requests to get to alignment boundaries. */
3033 num = MIN(bytes, align - head);
3034 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3035 num %= bs->bl.request_alignment;
3036 }
3037 head = (head + num) % align;
3038 assert(num < max_pdiscard);
3039 } else if (tail) {
3040 if (num > align) {
3041 /* Shorten the request to the last aligned cluster. */
3042 num -= tail;
3043 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3044 tail > bs->bl.request_alignment) {
3045 tail %= bs->bl.request_alignment;
3046 num -= tail;
3047 }
3048 }
3049 /* limit request size */
3050 if (num > max_pdiscard) {
3051 num = max_pdiscard;
3052 }
3053
3054 if (!bs->drv) {
3055 ret = -ENOMEDIUM;
3056 goto out;
3057 }
3058 if (bs->drv->bdrv_co_pdiscard) {
3059 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3060 } else {
3061 BlockAIOCB *acb;
3062 CoroutineIOCompletion co = {
3063 .coroutine = qemu_coroutine_self(),
3064 };
3065
3066 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3067 bdrv_co_io_em_complete, &co);
3068 if (acb == NULL) {
3069 ret = -EIO;
3070 goto out;
3071 } else {
3072 qemu_coroutine_yield();
3073 ret = co.ret;
3074 }
3075 }
3076 if (ret && ret != -ENOTSUP) {
3077 goto out;
3078 }
3079
3080 offset += num;
3081 bytes -= num;
3082 }
3083 ret = 0;
3084 out:
3085 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3086 tracked_request_end(&req);
3087 bdrv_dec_in_flight(bs);
3088 return ret;
3089 }
3090
3091 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3092 {
3093 BlockDriver *drv = bs->drv;
3094 CoroutineIOCompletion co = {
3095 .coroutine = qemu_coroutine_self(),
3096 };
3097 BlockAIOCB *acb;
3098 IO_CODE();
3099 assert_bdrv_graph_readable();
3100
3101 bdrv_inc_in_flight(bs);
3102 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3103 co.ret = -ENOTSUP;
3104 goto out;
3105 }
3106
3107 if (drv->bdrv_co_ioctl) {
3108 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3109 } else {
3110 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3111 if (!acb) {
3112 co.ret = -ENOTSUP;
3113 goto out;
3114 }
3115 qemu_coroutine_yield();
3116 }
3117 out:
3118 bdrv_dec_in_flight(bs);
3119 return co.ret;
3120 }
3121
3122 int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
3123 unsigned int *nr_zones,
3124 BlockZoneDescriptor *zones)
3125 {
3126 BlockDriver *drv = bs->drv;
3127 CoroutineIOCompletion co = {
3128 .coroutine = qemu_coroutine_self(),
3129 };
3130 IO_CODE();
3131
3132 bdrv_inc_in_flight(bs);
3133 if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
3134 co.ret = -ENOTSUP;
3135 goto out;
3136 }
3137 co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
3138 out:
3139 bdrv_dec_in_flight(bs);
3140 return co.ret;
3141 }
3142
3143 int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
3144 int64_t offset, int64_t len)
3145 {
3146 BlockDriver *drv = bs->drv;
3147 CoroutineIOCompletion co = {
3148 .coroutine = qemu_coroutine_self(),
3149 };
3150 IO_CODE();
3151
3152 bdrv_inc_in_flight(bs);
3153 if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
3154 co.ret = -ENOTSUP;
3155 goto out;
3156 }
3157 co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
3158 out:
3159 bdrv_dec_in_flight(bs);
3160 return co.ret;
3161 }
3162
3163 int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
3164 QEMUIOVector *qiov,
3165 BdrvRequestFlags flags)
3166 {
3167 int ret;
3168 BlockDriver *drv = bs->drv;
3169 CoroutineIOCompletion co = {
3170 .coroutine = qemu_coroutine_self(),
3171 };
3172 IO_CODE();
3173
3174 ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
3175 if (ret < 0) {
3176 return ret;
3177 }
3178
3179 bdrv_inc_in_flight(bs);
3180 if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
3181 co.ret = -ENOTSUP;
3182 goto out;
3183 }
3184 co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
3185 out:
3186 bdrv_dec_in_flight(bs);
3187 return co.ret;
3188 }
3189
3190 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3191 {
3192 IO_CODE();
3193 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3194 }
3195
3196 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3197 {
3198 IO_CODE();
3199 return memset(qemu_blockalign(bs, size), 0, size);
3200 }
3201
3202 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3203 {
3204 size_t align = bdrv_opt_mem_align(bs);
3205 IO_CODE();
3206
3207 /* Ensure that NULL is never returned on success */
3208 assert(align > 0);
3209 if (size == 0) {
3210 size = align;
3211 }
3212
3213 return qemu_try_memalign(align, size);
3214 }
3215
3216 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3217 {
3218 void *mem = qemu_try_blockalign(bs, size);
3219 IO_CODE();
3220
3221 if (mem) {
3222 memset(mem, 0, size);
3223 }
3224
3225 return mem;
3226 }
3227
3228 void coroutine_fn bdrv_co_io_plug(BlockDriverState *bs)
3229 {
3230 BdrvChild *child;
3231 IO_CODE();
3232 assert_bdrv_graph_readable();
3233
3234 QLIST_FOREACH(child, &bs->children, next) {
3235 bdrv_co_io_plug(child->bs);
3236 }
3237
3238 if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
3239 BlockDriver *drv = bs->drv;
3240 if (drv && drv->bdrv_co_io_plug) {
3241 drv->bdrv_co_io_plug(bs);
3242 }
3243 }
3244 }
3245
3246 void coroutine_fn bdrv_co_io_unplug(BlockDriverState *bs)
3247 {
3248 BdrvChild *child;
3249 IO_CODE();
3250 assert_bdrv_graph_readable();
3251
3252 assert(bs->io_plugged);
3253 if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
3254 BlockDriver *drv = bs->drv;
3255 if (drv && drv->bdrv_co_io_unplug) {
3256 drv->bdrv_co_io_unplug(bs);
3257 }
3258 }
3259
3260 QLIST_FOREACH(child, &bs->children, next) {
3261 bdrv_co_io_unplug(child->bs);
3262 }
3263 }
3264
3265 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3266 static void GRAPH_RDLOCK
3267 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3268 BdrvChild *final_child)
3269 {
3270 BdrvChild *child;
3271
3272 GLOBAL_STATE_CODE();
3273 assert_bdrv_graph_readable();
3274
3275 QLIST_FOREACH(child, &bs->children, next) {
3276 if (child == final_child) {
3277 break;
3278 }
3279
3280 bdrv_unregister_buf(child->bs, host, size);
3281 }
3282
3283 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3284 bs->drv->bdrv_unregister_buf(bs, host, size);
3285 }
3286 }
3287
3288 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3289 Error **errp)
3290 {
3291 BdrvChild *child;
3292
3293 GLOBAL_STATE_CODE();
3294 GRAPH_RDLOCK_GUARD_MAINLOOP();
3295
3296 if (bs->drv && bs->drv->bdrv_register_buf) {
3297 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3298 return false;
3299 }
3300 }
3301 QLIST_FOREACH(child, &bs->children, next) {
3302 if (!bdrv_register_buf(child->bs, host, size, errp)) {
3303 bdrv_register_buf_rollback(bs, host, size, child);
3304 return false;
3305 }
3306 }
3307 return true;
3308 }
3309
3310 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
3311 {
3312 BdrvChild *child;
3313
3314 GLOBAL_STATE_CODE();
3315 GRAPH_RDLOCK_GUARD_MAINLOOP();
3316
3317 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3318 bs->drv->bdrv_unregister_buf(bs, host, size);
3319 }
3320 QLIST_FOREACH(child, &bs->children, next) {
3321 bdrv_unregister_buf(child->bs, host, size);
3322 }
3323 }
3324
3325 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3326 BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3327 int64_t dst_offset, int64_t bytes,
3328 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3329 bool recurse_src)
3330 {
3331 BdrvTrackedRequest req;
3332 int ret;
3333 assert_bdrv_graph_readable();
3334
3335 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3336 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3337 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3338 assert(!(read_flags & BDRV_REQ_NO_WAIT));
3339 assert(!(write_flags & BDRV_REQ_NO_WAIT));
3340
3341 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3342 return -ENOMEDIUM;
3343 }
3344 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3345 if (ret) {
3346 return ret;
3347 }
3348 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3349 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3350 }
3351
3352 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3353 return -ENOMEDIUM;
3354 }
3355 ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3356 if (ret) {
3357 return ret;
3358 }
3359
3360 if (!src->bs->drv->bdrv_co_copy_range_from
3361 || !dst->bs->drv->bdrv_co_copy_range_to
3362 || src->bs->encrypted || dst->bs->encrypted) {
3363 return -ENOTSUP;
3364 }
3365
3366 if (recurse_src) {
3367 bdrv_inc_in_flight(src->bs);
3368 tracked_request_begin(&req, src->bs, src_offset, bytes,
3369 BDRV_TRACKED_READ);
3370
3371 /* BDRV_REQ_SERIALISING is only for write operation */
3372 assert(!(read_flags & BDRV_REQ_SERIALISING));
3373 bdrv_wait_serialising_requests(&req);
3374
3375 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3376 src, src_offset,
3377 dst, dst_offset,
3378 bytes,
3379 read_flags, write_flags);
3380
3381 tracked_request_end(&req);
3382 bdrv_dec_in_flight(src->bs);
3383 } else {
3384 bdrv_inc_in_flight(dst->bs);
3385 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3386 BDRV_TRACKED_WRITE);
3387 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3388 write_flags);
3389 if (!ret) {
3390 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3391 src, src_offset,
3392 dst, dst_offset,
3393 bytes,
3394 read_flags, write_flags);
3395 }
3396 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3397 tracked_request_end(&req);
3398 bdrv_dec_in_flight(dst->bs);
3399 }
3400
3401 return ret;
3402 }
3403
3404 /* Copy range from @src to @dst.
3405 *
3406 * See the comment of bdrv_co_copy_range for the parameter and return value
3407 * semantics. */
3408 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3409 BdrvChild *dst, int64_t dst_offset,
3410 int64_t bytes,
3411 BdrvRequestFlags read_flags,
3412 BdrvRequestFlags write_flags)
3413 {
3414 IO_CODE();
3415 assert_bdrv_graph_readable();
3416 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3417 read_flags, write_flags);
3418 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3419 bytes, read_flags, write_flags, true);
3420 }
3421
3422 /* Copy range from @src to @dst.
3423 *
3424 * See the comment of bdrv_co_copy_range for the parameter and return value
3425 * semantics. */
3426 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3427 BdrvChild *dst, int64_t dst_offset,
3428 int64_t bytes,
3429 BdrvRequestFlags read_flags,
3430 BdrvRequestFlags write_flags)
3431 {
3432 IO_CODE();
3433 assert_bdrv_graph_readable();
3434 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3435 read_flags, write_flags);
3436 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3437 bytes, read_flags, write_flags, false);
3438 }
3439
3440 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3441 BdrvChild *dst, int64_t dst_offset,
3442 int64_t bytes, BdrvRequestFlags read_flags,
3443 BdrvRequestFlags write_flags)
3444 {
3445 IO_CODE();
3446 assert_bdrv_graph_readable();
3447
3448 return bdrv_co_copy_range_from(src, src_offset,
3449 dst, dst_offset,
3450 bytes, read_flags, write_flags);
3451 }
3452
3453 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3454 {
3455 BdrvChild *c;
3456 QLIST_FOREACH(c, &bs->parents, next_parent) {
3457 if (c->klass->resize) {
3458 c->klass->resize(c);
3459 }
3460 }
3461 }
3462
3463 /**
3464 * Truncate file to 'offset' bytes (needed only for file protocols)
3465 *
3466 * If 'exact' is true, the file must be resized to exactly the given
3467 * 'offset'. Otherwise, it is sufficient for the node to be at least
3468 * 'offset' bytes in length.
3469 */
3470 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3471 PreallocMode prealloc, BdrvRequestFlags flags,
3472 Error **errp)
3473 {
3474 BlockDriverState *bs = child->bs;
3475 BdrvChild *filtered, *backing;
3476 BlockDriver *drv = bs->drv;
3477 BdrvTrackedRequest req;
3478 int64_t old_size, new_bytes;
3479 int ret;
3480 IO_CODE();
3481 assert_bdrv_graph_readable();
3482
3483 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3484 if (!drv) {
3485 error_setg(errp, "No medium inserted");
3486 return -ENOMEDIUM;
3487 }
3488 if (offset < 0) {
3489 error_setg(errp, "Image size cannot be negative");
3490 return -EINVAL;
3491 }
3492
3493 ret = bdrv_check_request(offset, 0, errp);
3494 if (ret < 0) {
3495 return ret;
3496 }
3497
3498 old_size = bdrv_getlength(bs);
3499 if (old_size < 0) {
3500 error_setg_errno(errp, -old_size, "Failed to get old image size");
3501 return old_size;
3502 }
3503
3504 if (bdrv_is_read_only(bs)) {
3505 error_setg(errp, "Image is read-only");
3506 return -EACCES;
3507 }
3508
3509 if (offset > old_size) {
3510 new_bytes = offset - old_size;
3511 } else {
3512 new_bytes = 0;
3513 }
3514
3515 bdrv_inc_in_flight(bs);
3516 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3517 BDRV_TRACKED_TRUNCATE);
3518
3519 /* If we are growing the image and potentially using preallocation for the
3520 * new area, we need to make sure that no write requests are made to it
3521 * concurrently or they might be overwritten by preallocation. */
3522 if (new_bytes) {
3523 bdrv_make_request_serialising(&req, 1);
3524 }
3525 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3526 0);
3527 if (ret < 0) {
3528 error_setg_errno(errp, -ret,
3529 "Failed to prepare request for truncation");
3530 goto out;
3531 }
3532
3533 filtered = bdrv_filter_child(bs);
3534 backing = bdrv_cow_child(bs);
3535
3536 /*
3537 * If the image has a backing file that is large enough that it would
3538 * provide data for the new area, we cannot leave it unallocated because
3539 * then the backing file content would become visible. Instead, zero-fill
3540 * the new area.
3541 *
3542 * Note that if the image has a backing file, but was opened without the
3543 * backing file, taking care of keeping things consistent with that backing
3544 * file is the user's responsibility.
3545 */
3546 if (new_bytes && backing) {
3547 int64_t backing_len;
3548
3549 backing_len = bdrv_co_getlength(backing->bs);
3550 if (backing_len < 0) {
3551 ret = backing_len;
3552 error_setg_errno(errp, -ret, "Could not get backing file size");
3553 goto out;
3554 }
3555
3556 if (backing_len > old_size) {
3557 flags |= BDRV_REQ_ZERO_WRITE;
3558 }
3559 }
3560
3561 if (drv->bdrv_co_truncate) {
3562 if (flags & ~bs->supported_truncate_flags) {
3563 error_setg(errp, "Block driver does not support requested flags");
3564 ret = -ENOTSUP;
3565 goto out;
3566 }
3567 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3568 } else if (filtered) {
3569 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3570 } else {
3571 error_setg(errp, "Image format driver does not support resize");
3572 ret = -ENOTSUP;
3573 goto out;
3574 }
3575 if (ret < 0) {
3576 goto out;
3577 }
3578
3579 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3580 if (ret < 0) {
3581 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3582 } else {
3583 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3584 }
3585 /*
3586 * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3587 * failed, but the latter doesn't affect how we should finish the request.
3588 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3589 */
3590 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3591
3592 out:
3593 tracked_request_end(&req);
3594 bdrv_dec_in_flight(bs);
3595
3596 return ret;
3597 }
3598
3599 void bdrv_cancel_in_flight(BlockDriverState *bs)
3600 {
3601 GLOBAL_STATE_CODE();
3602 if (!bs || !bs->drv) {
3603 return;
3604 }
3605
3606 if (bs->drv->bdrv_cancel_in_flight) {
3607 bs->drv->bdrv_cancel_in_flight(bs);
3608 }
3609 }
3610
3611 int coroutine_fn
3612 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3613 QEMUIOVector *qiov, size_t qiov_offset)
3614 {
3615 BlockDriverState *bs = child->bs;
3616 BlockDriver *drv = bs->drv;
3617 int ret;
3618 IO_CODE();
3619 assert_bdrv_graph_readable();
3620
3621 if (!drv) {
3622 return -ENOMEDIUM;
3623 }
3624
3625 if (!drv->bdrv_co_preadv_snapshot) {
3626 return -ENOTSUP;
3627 }
3628
3629 bdrv_inc_in_flight(bs);
3630 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3631 bdrv_dec_in_flight(bs);
3632
3633 return ret;
3634 }
3635
3636 int coroutine_fn
3637 bdrv_co_snapshot_block_status(BlockDriverState *bs,
3638 bool want_zero, int64_t offset, int64_t bytes,
3639 int64_t *pnum, int64_t *map,
3640 BlockDriverState **file)
3641 {
3642 BlockDriver *drv = bs->drv;
3643 int ret;
3644 IO_CODE();
3645 assert_bdrv_graph_readable();
3646
3647 if (!drv) {
3648 return -ENOMEDIUM;
3649 }
3650
3651 if (!drv->bdrv_co_snapshot_block_status) {
3652 return -ENOTSUP;
3653 }
3654
3655 bdrv_inc_in_flight(bs);
3656 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3657 pnum, map, file);
3658 bdrv_dec_in_flight(bs);
3659
3660 return ret;
3661 }
3662
3663 int coroutine_fn
3664 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3665 {
3666 BlockDriver *drv = bs->drv;
3667 int ret;
3668 IO_CODE();
3669 assert_bdrv_graph_readable();
3670
3671 if (!drv) {
3672 return -ENOMEDIUM;
3673 }
3674
3675 if (!drv->bdrv_co_pdiscard_snapshot) {
3676 return -ENOTSUP;
3677 }
3678
3679 bdrv_inc_in_flight(bs);
3680 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3681 bdrv_dec_in_flight(bs);
3682
3683 return ret;
3684 }