]> git.proxmox.com Git - mirror_qemu.git/blame - block/io.c
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
[mirror_qemu.git] / block / io.c
CommitLineData
61007b31
SH
1/*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
61007b31 26#include "trace.h"
7f0e9da6 27#include "sysemu/block-backend.h"
7719f3c9 28#include "block/aio-wait.h"
61007b31 29#include "block/blockjob.h"
f321dcb5 30#include "block/blockjob_int.h"
61007b31 31#include "block/block_int.h"
21c2283e 32#include "block/coroutines.h"
e2c1c34f 33#include "block/dirty-bitmap.h"
94783301 34#include "block/write-threshold.h"
f348b6d1 35#include "qemu/cutils.h"
5df022cf 36#include "qemu/memalign.h"
da34e65c 37#include "qapi/error.h"
d49b6836 38#include "qemu/error-report.h"
db725815 39#include "qemu/main-loop.h"
c8aa7895 40#include "sysemu/replay.h"
61007b31 41
cb2e2878
EB
42/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44
7f8f03ef 45static void bdrv_parent_cb_resize(BlockDriverState *bs);
d05aa8bb 46static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
5ae07b14 47 int64_t offset, int64_t bytes, BdrvRequestFlags flags);
61007b31 48
a82a3bd1 49static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
61007b31 50{
02d21300 51 BdrvChild *c, *next;
27ccdd52 52
02d21300 53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
a82a3bd1 54 if (c == ignore) {
0152bf40
KW
55 continue;
56 }
606ed756 57 bdrv_parent_drained_begin_single(c);
ce0f1412
PB
58 }
59}
61007b31 60
2f65df6e 61void bdrv_parent_drained_end_single(BdrvChild *c)
804db8ea 62{
ab613350 63 GLOBAL_STATE_CODE();
2f65df6e 64
57e05be3
KW
65 assert(c->quiesced_parent);
66 c->quiesced_parent = false;
67
bd86fb99 68 if (c->klass->drained_end) {
2f65df6e 69 c->klass->drained_end(c);
804db8ea
HR
70 }
71}
72
a82a3bd1 73static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
ce0f1412 74{
61ad631c 75 BdrvChild *c;
27ccdd52 76
61ad631c 77 QLIST_FOREACH(c, &bs->parents, next_parent) {
a82a3bd1 78 if (c == ignore) {
0152bf40
KW
79 continue;
80 }
2f65df6e 81 bdrv_parent_drained_end_single(c);
27ccdd52 82 }
61007b31
SH
83}
84
23987471 85bool bdrv_parent_drained_poll_single(BdrvChild *c)
4be6a6d1 86{
bd86fb99
HR
87 if (c->klass->drained_poll) {
88 return c->klass->drained_poll(c);
4be6a6d1
KW
89 }
90 return false;
91}
92
6cd5c9d7
KW
93static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
94 bool ignore_bds_parents)
89bd0305
KW
95{
96 BdrvChild *c, *next;
97 bool busy = false;
98
99 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
bd86fb99 100 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
89bd0305
KW
101 continue;
102 }
4be6a6d1 103 busy |= bdrv_parent_drained_poll_single(c);
89bd0305
KW
104 }
105
106 return busy;
107}
108
606ed756 109void bdrv_parent_drained_begin_single(BdrvChild *c)
4be6a6d1 110{
ab613350 111 GLOBAL_STATE_CODE();
57e05be3
KW
112
113 assert(!c->quiesced_parent);
114 c->quiesced_parent = true;
115
bd86fb99
HR
116 if (c->klass->drained_begin) {
117 c->klass->drained_begin(c);
4be6a6d1 118 }
4be6a6d1
KW
119}
120
d9e0dfa2
EB
121static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
122{
9f460c64
AO
123 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
124 src->pdiscard_alignment);
d9e0dfa2
EB
125 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
126 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
24b36e98
PB
127 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
128 src->max_hw_transfer);
d9e0dfa2
EB
129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130 src->opt_mem_alignment);
131 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132 src->min_mem_alignment);
133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
cc071629 134 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
d9e0dfa2
EB
135}
136
1e4c797c
VSO
137typedef struct BdrvRefreshLimitsState {
138 BlockDriverState *bs;
139 BlockLimits old_bl;
140} BdrvRefreshLimitsState;
141
142static void bdrv_refresh_limits_abort(void *opaque)
143{
144 BdrvRefreshLimitsState *s = opaque;
145
146 s->bs->bl = s->old_bl;
147}
148
149static TransactionActionDrv bdrv_refresh_limits_drv = {
150 .abort = bdrv_refresh_limits_abort,
151 .clean = g_free,
152};
153
154/* @tran is allowed to be NULL, in this case no rollback is possible. */
155void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
61007b31 156{
33985614 157 ERRP_GUARD();
61007b31 158 BlockDriver *drv = bs->drv;
66b129ac
HR
159 BdrvChild *c;
160 bool have_limits;
61007b31 161
f791bf7f
EGE
162 GLOBAL_STATE_CODE();
163
1e4c797c
VSO
164 if (tran) {
165 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
166 *s = (BdrvRefreshLimitsState) {
167 .bs = bs,
168 .old_bl = bs->bl,
169 };
170 tran_add(tran, &bdrv_refresh_limits_drv, s);
171 }
172
61007b31
SH
173 memset(&bs->bl, 0, sizeof(bs->bl));
174
175 if (!drv) {
176 return;
177 }
178
79ba8c98 179 /* Default alignment based on whether driver has byte interface */
e31f6864 180 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
ac850bf0
VSO
181 drv->bdrv_aio_preadv ||
182 drv->bdrv_co_preadv_part) ? 1 : 512;
79ba8c98 183
61007b31 184 /* Take some limits from the children as a default */
66b129ac
HR
185 have_limits = false;
186 QLIST_FOREACH(c, &bs->children, next) {
187 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
188 {
66b129ac
HR
189 bdrv_merge_limits(&bs->bl, &c->bs->bl);
190 have_limits = true;
61007b31 191 }
160a29e2
PB
192
193 if (c->role & BDRV_CHILD_FILTERED) {
194 bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
195 }
66b129ac
HR
196 }
197
198 if (!have_limits) {
4196d2f0 199 bs->bl.min_mem_alignment = 512;
8e3b0cbb 200 bs->bl.opt_mem_alignment = qemu_real_host_page_size();
bd44feb7
SH
201
202 /* Safe default since most protocols use readv()/writev()/etc */
203 bs->bl.max_iov = IOV_MAX;
61007b31
SH
204 }
205
61007b31
SH
206 /* Then let the driver override it */
207 if (drv->bdrv_refresh_limits) {
208 drv->bdrv_refresh_limits(bs, errp);
8b117001
VSO
209 if (*errp) {
210 return;
211 }
212 }
213
214 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
215 error_setg(errp, "Driver requires too large request alignment");
61007b31
SH
216 }
217}
218
219/**
220 * The copy-on-read flag is actually a reference count so multiple users may
221 * use the feature without worrying about clobbering its previous state.
222 * Copy-on-read stays enabled until all users have called to disable it.
223 */
224void bdrv_enable_copy_on_read(BlockDriverState *bs)
225{
384a48fb 226 IO_CODE();
d73415a3 227 qatomic_inc(&bs->copy_on_read);
61007b31
SH
228}
229
230void bdrv_disable_copy_on_read(BlockDriverState *bs)
231{
d73415a3 232 int old = qatomic_fetch_dec(&bs->copy_on_read);
384a48fb 233 IO_CODE();
d3faa13e 234 assert(old >= 1);
61007b31
SH
235}
236
61124f03
PB
237typedef struct {
238 Coroutine *co;
239 BlockDriverState *bs;
240 bool done;
481cad48 241 bool begin;
fe4f0614 242 bool poll;
0152bf40 243 BdrvChild *parent;
61124f03
PB
244} BdrvCoDrainData;
245
1cc8e54a 246/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
299403ae
KW
247bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
248 bool ignore_bds_parents)
89bd0305 249{
ab613350 250 GLOBAL_STATE_CODE();
fe4f0614 251
6cd5c9d7 252 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
89bd0305
KW
253 return true;
254 }
255
d73415a3 256 if (qatomic_read(&bs->in_flight)) {
fe4f0614
KW
257 return true;
258 }
259
fe4f0614 260 return false;
89bd0305
KW
261}
262
299403ae 263static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
89bd0305 264 BdrvChild *ignore_parent)
1cc8e54a 265{
299403ae 266 return bdrv_drain_poll(bs, ignore_parent, false);
1cc8e54a
KW
267}
268
299403ae 269static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
a82a3bd1
KW
270 bool poll);
271static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
0152bf40 272
a77fd4bb
FZ
273static void bdrv_co_drain_bh_cb(void *opaque)
274{
275 BdrvCoDrainData *data = opaque;
276 Coroutine *co = data->co;
99723548 277 BlockDriverState *bs = data->bs;
a77fd4bb 278
c8ca33d0 279 if (bs) {
aa1361d5 280 AioContext *ctx = bdrv_get_aio_context(bs);
960d5fb3 281 aio_context_acquire(ctx);
c8ca33d0
KW
282 bdrv_dec_in_flight(bs);
283 if (data->begin) {
a82a3bd1 284 bdrv_do_drained_begin(bs, data->parent, data->poll);
c8ca33d0 285 } else {
e037c09c 286 assert(!data->poll);
a82a3bd1 287 bdrv_do_drained_end(bs, data->parent);
c8ca33d0 288 }
960d5fb3 289 aio_context_release(ctx);
481cad48 290 } else {
c8ca33d0
KW
291 assert(data->begin);
292 bdrv_drain_all_begin();
481cad48
MP
293 }
294
a77fd4bb 295 data->done = true;
1919631e 296 aio_co_wake(co);
a77fd4bb
FZ
297}
298
481cad48 299static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
299403ae 300 bool begin,
6cd5c9d7 301 BdrvChild *parent,
2f65df6e 302 bool poll)
a77fd4bb
FZ
303{
304 BdrvCoDrainData data;
960d5fb3
KW
305 Coroutine *self = qemu_coroutine_self();
306 AioContext *ctx = bdrv_get_aio_context(bs);
307 AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
a77fd4bb
FZ
308
309 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
c40a2545 310 * other coroutines run if they were queued by aio_co_enter(). */
a77fd4bb
FZ
311
312 assert(qemu_in_coroutine());
313 data = (BdrvCoDrainData) {
960d5fb3 314 .co = self,
a77fd4bb
FZ
315 .bs = bs,
316 .done = false,
481cad48 317 .begin = begin,
0152bf40 318 .parent = parent,
fe4f0614 319 .poll = poll,
a77fd4bb 320 };
8e1da77e 321
c8ca33d0
KW
322 if (bs) {
323 bdrv_inc_in_flight(bs);
324 }
960d5fb3
KW
325
326 /*
327 * Temporarily drop the lock across yield or we would get deadlocks.
328 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
329 *
330 * When we yield below, the lock for the current context will be
331 * released, so if this is actually the lock that protects bs, don't drop
332 * it a second time.
333 */
334 if (ctx != co_ctx) {
335 aio_context_release(ctx);
336 }
ab613350
SH
337 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
338 bdrv_co_drain_bh_cb, &data);
a77fd4bb
FZ
339
340 qemu_coroutine_yield();
341 /* If we are resumed from some other event (such as an aio completion or a
342 * timer callback), it is a bug in the caller that should be fixed. */
343 assert(data.done);
960d5fb3 344
3202d8e4 345 /* Reacquire the AioContext of bs if we dropped it */
960d5fb3
KW
346 if (ctx != co_ctx) {
347 aio_context_acquire(ctx);
348 }
a77fd4bb
FZ
349}
350
05c272ff
KW
351static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
352 bool poll)
6820643f 353{
384a48fb 354 IO_OR_GS_CODE();
05c272ff
KW
355
356 if (qemu_in_coroutine()) {
357 bdrv_co_yield_to_drain(bs, true, parent, poll);
358 return;
359 }
d42cf288 360
ab613350
SH
361 GLOBAL_STATE_CODE();
362
60369b86 363 /* Stop things in parent-to-child order */
d73415a3 364 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
a82a3bd1 365 bdrv_parent_drained_begin(bs, parent);
57e05be3
KW
366 if (bs->drv && bs->drv->bdrv_drain_begin) {
367 bs->drv->bdrv_drain_begin(bs);
368 }
c7bc05f7 369 }
d30b8e64 370
fe4f0614
KW
371 /*
372 * Wait for drained requests to finish.
373 *
374 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
375 * call is needed so things in this AioContext can make progress even
376 * though we don't return to the main AioContext loop - this automatically
377 * includes other nodes in the same AioContext and therefore all child
378 * nodes.
379 */
380 if (poll) {
299403ae 381 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
fe4f0614 382 }
6820643f
KW
383}
384
05c272ff
KW
385void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
386{
387 bdrv_do_drained_begin(bs, parent, false);
388}
389
0152bf40
KW
390void bdrv_drained_begin(BlockDriverState *bs)
391{
384a48fb 392 IO_OR_GS_CODE();
a82a3bd1 393 bdrv_do_drained_begin(bs, NULL, true);
0152bf40
KW
394}
395
e037c09c
HR
396/**
397 * This function does not poll, nor must any of its recursively called
2f65df6e 398 * functions.
e037c09c 399 */
a82a3bd1 400static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
6820643f 401{
0f115168
KW
402 int old_quiesce_counter;
403
ab613350
SH
404 IO_OR_GS_CODE();
405
481cad48 406 if (qemu_in_coroutine()) {
a82a3bd1 407 bdrv_co_yield_to_drain(bs, false, parent, false);
481cad48
MP
408 return;
409 }
6820643f 410 assert(bs->quiesce_counter > 0);
ab613350 411 GLOBAL_STATE_CODE();
6820643f 412
60369b86 413 /* Re-enable things in child-to-parent order */
d73415a3 414 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
0f115168 415 if (old_quiesce_counter == 1) {
57e05be3
KW
416 if (bs->drv && bs->drv->bdrv_drain_end) {
417 bs->drv->bdrv_drain_end(bs);
418 }
a82a3bd1 419 bdrv_parent_drained_end(bs, parent);
0f115168 420 }
6820643f
KW
421}
422
0152bf40
KW
423void bdrv_drained_end(BlockDriverState *bs)
424{
384a48fb 425 IO_OR_GS_CODE();
a82a3bd1 426 bdrv_do_drained_end(bs, NULL);
d736f119
KW
427}
428
b6e84c97
PB
429void bdrv_drain(BlockDriverState *bs)
430{
384a48fb 431 IO_OR_GS_CODE();
6820643f
KW
432 bdrv_drained_begin(bs);
433 bdrv_drained_end(bs);
61007b31
SH
434}
435
c13ad59f
KW
436static void bdrv_drain_assert_idle(BlockDriverState *bs)
437{
438 BdrvChild *child, *next;
439
d73415a3 440 assert(qatomic_read(&bs->in_flight) == 0);
c13ad59f
KW
441 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
442 bdrv_drain_assert_idle(child->bs);
443 }
444}
445
0f12264e
KW
446unsigned int bdrv_drain_all_count = 0;
447
448static bool bdrv_drain_all_poll(void)
449{
450 BlockDriverState *bs = NULL;
451 bool result = false;
f791bf7f 452 GLOBAL_STATE_CODE();
0f12264e 453
0f12264e
KW
454 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
455 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
456 while ((bs = bdrv_next_all_states(bs))) {
457 AioContext *aio_context = bdrv_get_aio_context(bs);
458 aio_context_acquire(aio_context);
299403ae 459 result |= bdrv_drain_poll(bs, NULL, true);
0f12264e
KW
460 aio_context_release(aio_context);
461 }
462
463 return result;
464}
465
61007b31
SH
466/*
467 * Wait for pending requests to complete across all BlockDriverStates
468 *
469 * This function does not flush data to disk, use bdrv_flush_all() for that
470 * after calling this function.
c0778f66
AG
471 *
472 * This pauses all block jobs and disables external clients. It must
473 * be paired with bdrv_drain_all_end().
474 *
475 * NOTE: no new block jobs or BlockDriverStates can be created between
476 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
61007b31 477 */
da0bd744 478void bdrv_drain_all_begin_nopoll(void)
61007b31 479{
0f12264e 480 BlockDriverState *bs = NULL;
f791bf7f 481 GLOBAL_STATE_CODE();
61007b31 482
c8aa7895
PD
483 /*
484 * bdrv queue is managed by record/replay,
485 * waiting for finishing the I/O requests may
486 * be infinite
487 */
488 if (replay_events_enabled()) {
489 return;
490 }
491
0f12264e
KW
492 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
493 * loop AioContext, so make sure we're in the main context. */
9a7e86c8 494 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
0f12264e
KW
495 assert(bdrv_drain_all_count < INT_MAX);
496 bdrv_drain_all_count++;
9a7e86c8 497
0f12264e
KW
498 /* Quiesce all nodes, without polling in-flight requests yet. The graph
499 * cannot change during this loop. */
500 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
501 AioContext *aio_context = bdrv_get_aio_context(bs);
502
503 aio_context_acquire(aio_context);
a82a3bd1 504 bdrv_do_drained_begin(bs, NULL, false);
61007b31
SH
505 aio_context_release(aio_context);
506 }
da0bd744
KW
507}
508
509void bdrv_drain_all_begin(void)
510{
511 BlockDriverState *bs = NULL;
512
513 if (qemu_in_coroutine()) {
514 bdrv_co_yield_to_drain(NULL, true, NULL, true);
515 return;
516 }
517
63945789
PM
518 /*
519 * bdrv queue is managed by record/replay,
520 * waiting for finishing the I/O requests may
521 * be infinite
522 */
523 if (replay_events_enabled()) {
524 return;
525 }
526
da0bd744 527 bdrv_drain_all_begin_nopoll();
61007b31 528
0f12264e 529 /* Now poll the in-flight requests */
263d5e12 530 AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
0f12264e
KW
531
532 while ((bs = bdrv_next_all_states(bs))) {
c13ad59f 533 bdrv_drain_assert_idle(bs);
61007b31 534 }
c0778f66
AG
535}
536
1a6d3bd2
GK
537void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
538{
b4ad82aa 539 GLOBAL_STATE_CODE();
1a6d3bd2
GK
540
541 g_assert(bs->quiesce_counter > 0);
542 g_assert(!bs->refcnt);
543
544 while (bs->quiesce_counter) {
a82a3bd1 545 bdrv_do_drained_end(bs, NULL);
1a6d3bd2 546 }
1a6d3bd2
GK
547}
548
c0778f66
AG
549void bdrv_drain_all_end(void)
550{
0f12264e 551 BlockDriverState *bs = NULL;
f791bf7f 552 GLOBAL_STATE_CODE();
c0778f66 553
c8aa7895
PD
554 /*
555 * bdrv queue is managed by record/replay,
556 * waiting for finishing the I/O requests may
557 * be endless
558 */
559 if (replay_events_enabled()) {
560 return;
561 }
562
0f12264e 563 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
564 AioContext *aio_context = bdrv_get_aio_context(bs);
565
566 aio_context_acquire(aio_context);
a82a3bd1 567 bdrv_do_drained_end(bs, NULL);
61007b31
SH
568 aio_context_release(aio_context);
569 }
0f12264e 570
e037c09c 571 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
0f12264e
KW
572 assert(bdrv_drain_all_count > 0);
573 bdrv_drain_all_count--;
61007b31
SH
574}
575
c0778f66
AG
576void bdrv_drain_all(void)
577{
f791bf7f 578 GLOBAL_STATE_CODE();
c0778f66
AG
579 bdrv_drain_all_begin();
580 bdrv_drain_all_end();
581}
582
61007b31
SH
583/**
584 * Remove an active request from the tracked requests list
585 *
586 * This function should be called when a tracked request is completing.
587 */
f0d43b1e 588static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
61007b31
SH
589{
590 if (req->serialising) {
d73415a3 591 qatomic_dec(&req->bs->serialising_in_flight);
61007b31
SH
592 }
593
fa9185fc 594 qemu_mutex_lock(&req->bs->reqs_lock);
61007b31 595 QLIST_REMOVE(req, list);
fa9185fc 596 qemu_mutex_unlock(&req->bs->reqs_lock);
3480ce69
SH
597
598 /*
599 * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
600 * anymore because the request has been removed from the list, so it's safe
601 * to restart the queue outside reqs_lock to minimize the critical section.
602 */
61007b31
SH
603 qemu_co_queue_restart_all(&req->wait_queue);
604}
605
606/**
607 * Add an active request to the tracked requests list
608 */
881a4c55
PB
609static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
610 BlockDriverState *bs,
611 int64_t offset,
612 int64_t bytes,
613 enum BdrvTrackedRequestType type)
61007b31 614{
80247264 615 bdrv_check_request(offset, bytes, &error_abort);
22931a15 616
61007b31
SH
617 *req = (BdrvTrackedRequest){
618 .bs = bs,
619 .offset = offset,
620 .bytes = bytes,
ebde595c 621 .type = type,
61007b31
SH
622 .co = qemu_coroutine_self(),
623 .serialising = false,
624 .overlap_offset = offset,
625 .overlap_bytes = bytes,
626 };
627
628 qemu_co_queue_init(&req->wait_queue);
629
fa9185fc 630 qemu_mutex_lock(&bs->reqs_lock);
61007b31 631 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
fa9185fc 632 qemu_mutex_unlock(&bs->reqs_lock);
61007b31
SH
633}
634
3ba0e1a0 635static bool tracked_request_overlaps(BdrvTrackedRequest *req,
80247264 636 int64_t offset, int64_t bytes)
3ba0e1a0 637{
80247264
EB
638 bdrv_check_request(offset, bytes, &error_abort);
639
3ba0e1a0
PB
640 /* aaaa bbbb */
641 if (offset >= req->overlap_offset + req->overlap_bytes) {
642 return false;
643 }
644 /* bbbb aaaa */
645 if (req->overlap_offset >= offset + bytes) {
646 return false;
647 }
648 return true;
649}
650
3183937f 651/* Called with self->bs->reqs_lock held */
881a4c55 652static coroutine_fn BdrvTrackedRequest *
3183937f
VSO
653bdrv_find_conflicting_request(BdrvTrackedRequest *self)
654{
655 BdrvTrackedRequest *req;
656
657 QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
658 if (req == self || (!req->serialising && !self->serialising)) {
659 continue;
660 }
661 if (tracked_request_overlaps(req, self->overlap_offset,
662 self->overlap_bytes))
663 {
664 /*
665 * Hitting this means there was a reentrant request, for
666 * example, a block driver issuing nested requests. This must
667 * never happen since it means deadlock.
668 */
669 assert(qemu_coroutine_self() != req->co);
670
671 /*
672 * If the request is already (indirectly) waiting for us, or
673 * will wait for us as soon as it wakes up, then just go on
674 * (instead of producing a deadlock in the former case).
675 */
676 if (!req->waiting_for) {
677 return req;
678 }
679 }
680 }
681
682 return NULL;
683}
684
ec1c8868 685/* Called with self->bs->reqs_lock held */
131498f7 686static void coroutine_fn
ec1c8868 687bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
3ba0e1a0
PB
688{
689 BdrvTrackedRequest *req;
3ba0e1a0 690
3183937f
VSO
691 while ((req = bdrv_find_conflicting_request(self))) {
692 self->waiting_for = req;
ec1c8868 693 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
3183937f 694 self->waiting_for = NULL;
3183937f 695 }
3ba0e1a0
PB
696}
697
8ac5aab2
VSO
698/* Called with req->bs->reqs_lock held */
699static void tracked_request_set_serialising(BdrvTrackedRequest *req,
700 uint64_t align)
61007b31
SH
701{
702 int64_t overlap_offset = req->offset & ~(align - 1);
80247264
EB
703 int64_t overlap_bytes =
704 ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
705
706 bdrv_check_request(req->offset, req->bytes, &error_abort);
61007b31
SH
707
708 if (!req->serialising) {
d73415a3 709 qatomic_inc(&req->bs->serialising_in_flight);
61007b31
SH
710 req->serialising = true;
711 }
712
713 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
714 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
09d2f948
VSO
715}
716
c28107e9
HR
717/**
718 * Return the tracked request on @bs for the current coroutine, or
719 * NULL if there is none.
720 */
721BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
722{
723 BdrvTrackedRequest *req;
724 Coroutine *self = qemu_coroutine_self();
967d7905 725 IO_CODE();
c28107e9
HR
726
727 QLIST_FOREACH(req, &bs->tracked_requests, list) {
728 if (req->co == self) {
729 return req;
730 }
731 }
732
733 return NULL;
734}
735
244483e6 736/**
fc6b211f 737 * Round a region to subcluster (if supported) or cluster boundaries
244483e6 738 */
a00e70c0 739void coroutine_fn GRAPH_RDLOCK
fc6b211f
AD
740bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
741 int64_t *align_offset, int64_t *align_bytes)
244483e6
KW
742{
743 BlockDriverInfo bdi;
384a48fb 744 IO_CODE();
fc6b211f
AD
745 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
746 *align_offset = offset;
747 *align_bytes = bytes;
244483e6 748 } else {
fc6b211f
AD
749 int64_t c = bdi.subcluster_size;
750 *align_offset = QEMU_ALIGN_DOWN(offset, c);
751 *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
244483e6
KW
752 }
753}
754
a00e70c0 755static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
61007b31
SH
756{
757 BlockDriverInfo bdi;
758 int ret;
759
3d47eb0a 760 ret = bdrv_co_get_info(bs, &bdi);
61007b31 761 if (ret < 0 || bdi.cluster_size == 0) {
a5b8dd2c 762 return bs->bl.request_alignment;
61007b31
SH
763 } else {
764 return bdi.cluster_size;
765 }
766}
767
99723548
PB
768void bdrv_inc_in_flight(BlockDriverState *bs)
769{
967d7905 770 IO_CODE();
d73415a3 771 qatomic_inc(&bs->in_flight);
99723548
PB
772}
773
c9d1a561
PB
774void bdrv_wakeup(BlockDriverState *bs)
775{
967d7905 776 IO_CODE();
cfe29d82 777 aio_wait_kick();
c9d1a561
PB
778}
779
99723548
PB
780void bdrv_dec_in_flight(BlockDriverState *bs)
781{
967d7905 782 IO_CODE();
d73415a3 783 qatomic_dec(&bs->in_flight);
c9d1a561 784 bdrv_wakeup(bs);
99723548
PB
785}
786
131498f7
DL
787static void coroutine_fn
788bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
61007b31
SH
789{
790 BlockDriverState *bs = self->bs;
61007b31 791
d73415a3 792 if (!qatomic_read(&bs->serialising_in_flight)) {
131498f7 793 return;
61007b31
SH
794 }
795
fa9185fc 796 qemu_mutex_lock(&bs->reqs_lock);
131498f7 797 bdrv_wait_serialising_requests_locked(self);
fa9185fc 798 qemu_mutex_unlock(&bs->reqs_lock);
61007b31
SH
799}
800
131498f7 801void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
8ac5aab2
VSO
802 uint64_t align)
803{
967d7905 804 IO_CODE();
8ac5aab2 805
fa9185fc 806 qemu_mutex_lock(&req->bs->reqs_lock);
8ac5aab2
VSO
807
808 tracked_request_set_serialising(req, align);
131498f7 809 bdrv_wait_serialising_requests_locked(req);
8ac5aab2 810
fa9185fc 811 qemu_mutex_unlock(&req->bs->reqs_lock);
8ac5aab2
VSO
812}
813
558902cc
VSO
814int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
815 QEMUIOVector *qiov, size_t qiov_offset,
816 Error **errp)
61007b31 817{
63f4ad11
VSO
818 /*
819 * Check generic offset/bytes correctness
820 */
821
69b55e03
VSO
822 if (offset < 0) {
823 error_setg(errp, "offset is negative: %" PRIi64, offset);
824 return -EIO;
825 }
826
827 if (bytes < 0) {
828 error_setg(errp, "bytes is negative: %" PRIi64, bytes);
61007b31
SH
829 return -EIO;
830 }
831
8b117001 832 if (bytes > BDRV_MAX_LENGTH) {
69b55e03
VSO
833 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
834 bytes, BDRV_MAX_LENGTH);
835 return -EIO;
836 }
837
838 if (offset > BDRV_MAX_LENGTH) {
839 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
840 offset, BDRV_MAX_LENGTH);
8b117001
VSO
841 return -EIO;
842 }
843
844 if (offset > BDRV_MAX_LENGTH - bytes) {
69b55e03
VSO
845 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
846 "exceeds maximum(%" PRIi64 ")", offset, bytes,
847 BDRV_MAX_LENGTH);
8b117001
VSO
848 return -EIO;
849 }
850
63f4ad11
VSO
851 if (!qiov) {
852 return 0;
853 }
854
855 /*
856 * Check qiov and qiov_offset
857 */
858
859 if (qiov_offset > qiov->size) {
860 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
861 qiov_offset, qiov->size);
862 return -EIO;
863 }
864
865 if (bytes > qiov->size - qiov_offset) {
866 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
867 "vector size(%zu)", bytes, qiov_offset, qiov->size);
868 return -EIO;
869 }
870
8b117001
VSO
871 return 0;
872}
873
63f4ad11
VSO
874int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
875{
876 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
877}
878
879static int bdrv_check_request32(int64_t offset, int64_t bytes,
880 QEMUIOVector *qiov, size_t qiov_offset)
8b117001 881{
63f4ad11 882 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
8b117001
VSO
883 if (ret < 0) {
884 return ret;
885 }
886
887 if (bytes > BDRV_REQUEST_MAX_BYTES) {
61007b31
SH
888 return -EIO;
889 }
890
891 return 0;
892}
893
61007b31 894/*
74021bc4 895 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
61007b31
SH
896 * The operation is sped up by checking the block status and only writing
897 * zeroes to the device if they currently do not return zeroes. Optional
74021bc4 898 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
465fe887 899 * BDRV_REQ_FUA).
61007b31 900 *
f4649069 901 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
61007b31 902 */
720ff280 903int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
61007b31 904{
237d78f8
EB
905 int ret;
906 int64_t target_size, bytes, offset = 0;
720ff280 907 BlockDriverState *bs = child->bs;
384a48fb 908 IO_CODE();
61007b31 909
7286d610
EB
910 target_size = bdrv_getlength(bs);
911 if (target_size < 0) {
912 return target_size;
61007b31
SH
913 }
914
915 for (;;) {
7286d610
EB
916 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
917 if (bytes <= 0) {
61007b31
SH
918 return 0;
919 }
237d78f8 920 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
61007b31 921 if (ret < 0) {
61007b31
SH
922 return ret;
923 }
924 if (ret & BDRV_BLOCK_ZERO) {
237d78f8 925 offset += bytes;
61007b31
SH
926 continue;
927 }
237d78f8 928 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
61007b31 929 if (ret < 0) {
61007b31
SH
930 return ret;
931 }
237d78f8 932 offset += bytes;
61007b31
SH
933 }
934}
935
61007b31
SH
936/*
937 * Writes to the file and ensures that no writes are reordered across this
938 * request (acts as a barrier)
939 *
940 * Returns 0 on success, -errno in error cases.
941 */
e97190a4
AF
942int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
943 int64_t bytes, const void *buf,
944 BdrvRequestFlags flags)
61007b31
SH
945{
946 int ret;
384a48fb 947 IO_CODE();
b24a4c41 948 assert_bdrv_graph_readable();
88095349 949
e97190a4 950 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
61007b31
SH
951 if (ret < 0) {
952 return ret;
953 }
954
e97190a4 955 ret = bdrv_co_flush(child->bs);
855a6a93
KW
956 if (ret < 0) {
957 return ret;
61007b31
SH
958 }
959
960 return 0;
961}
962
08844473
KW
963typedef struct CoroutineIOCompletion {
964 Coroutine *coroutine;
965 int ret;
966} CoroutineIOCompletion;
967
968static void bdrv_co_io_em_complete(void *opaque, int ret)
969{
970 CoroutineIOCompletion *co = opaque;
971
972 co->ret = ret;
b9e413dd 973 aio_co_wake(co->coroutine);
08844473
KW
974}
975
7b1fb72e
KW
976static int coroutine_fn GRAPH_RDLOCK
977bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
978 QEMUIOVector *qiov, size_t qiov_offset, int flags)
166fe960
KW
979{
980 BlockDriver *drv = bs->drv;
3fb06697
KW
981 int64_t sector_num;
982 unsigned int nb_sectors;
ac850bf0
VSO
983 QEMUIOVector local_qiov;
984 int ret;
b9b10c35 985 assert_bdrv_graph_readable();
3fb06697 986
17abcbee 987 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
e8b65355 988 assert(!(flags & ~bs->supported_read_flags));
fa166538 989
d470ad42
HR
990 if (!drv) {
991 return -ENOMEDIUM;
992 }
993
ac850bf0
VSO
994 if (drv->bdrv_co_preadv_part) {
995 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
996 flags);
997 }
998
999 if (qiov_offset > 0 || bytes != qiov->size) {
1000 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1001 qiov = &local_qiov;
1002 }
1003
3fb06697 1004 if (drv->bdrv_co_preadv) {
ac850bf0
VSO
1005 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1006 goto out;
3fb06697
KW
1007 }
1008
edfab6a0 1009 if (drv->bdrv_aio_preadv) {
08844473
KW
1010 BlockAIOCB *acb;
1011 CoroutineIOCompletion co = {
1012 .coroutine = qemu_coroutine_self(),
1013 };
1014
edfab6a0
EB
1015 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1016 bdrv_co_io_em_complete, &co);
08844473 1017 if (acb == NULL) {
ac850bf0
VSO
1018 ret = -EIO;
1019 goto out;
08844473
KW
1020 } else {
1021 qemu_coroutine_yield();
ac850bf0
VSO
1022 ret = co.ret;
1023 goto out;
08844473
KW
1024 }
1025 }
edfab6a0
EB
1026
1027 sector_num = offset >> BDRV_SECTOR_BITS;
1028 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1029
1bbbf32d
NS
1030 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1031 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
41ae31e3 1032 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
edfab6a0
EB
1033 assert(drv->bdrv_co_readv);
1034
ac850bf0
VSO
1035 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1036
1037out:
1038 if (qiov == &local_qiov) {
1039 qemu_iovec_destroy(&local_qiov);
1040 }
1041
1042 return ret;
166fe960
KW
1043}
1044
7b1fb72e
KW
1045static int coroutine_fn GRAPH_RDLOCK
1046bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1047 QEMUIOVector *qiov, size_t qiov_offset,
1048 BdrvRequestFlags flags)
78a07294
KW
1049{
1050 BlockDriver *drv = bs->drv;
e8b65355 1051 bool emulate_fua = false;
3fb06697
KW
1052 int64_t sector_num;
1053 unsigned int nb_sectors;
ac850bf0 1054 QEMUIOVector local_qiov;
78a07294 1055 int ret;
b9b10c35 1056 assert_bdrv_graph_readable();
78a07294 1057
17abcbee 1058 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
fa166538 1059
d470ad42
HR
1060 if (!drv) {
1061 return -ENOMEDIUM;
1062 }
1063
e8b65355
SH
1064 if ((flags & BDRV_REQ_FUA) &&
1065 (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1066 flags &= ~BDRV_REQ_FUA;
1067 emulate_fua = true;
1068 }
1069
1070 flags &= bs->supported_write_flags;
1071
ac850bf0
VSO
1072 if (drv->bdrv_co_pwritev_part) {
1073 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
e8b65355 1074 flags);
ac850bf0
VSO
1075 goto emulate_flags;
1076 }
1077
1078 if (qiov_offset > 0 || bytes != qiov->size) {
1079 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1080 qiov = &local_qiov;
1081 }
1082
3fb06697 1083 if (drv->bdrv_co_pwritev) {
e8b65355 1084 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
3fb06697
KW
1085 goto emulate_flags;
1086 }
1087
edfab6a0 1088 if (drv->bdrv_aio_pwritev) {
08844473
KW
1089 BlockAIOCB *acb;
1090 CoroutineIOCompletion co = {
1091 .coroutine = qemu_coroutine_self(),
1092 };
1093
e8b65355 1094 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
edfab6a0 1095 bdrv_co_io_em_complete, &co);
08844473 1096 if (acb == NULL) {
3fb06697 1097 ret = -EIO;
08844473
KW
1098 } else {
1099 qemu_coroutine_yield();
3fb06697 1100 ret = co.ret;
08844473 1101 }
edfab6a0
EB
1102 goto emulate_flags;
1103 }
1104
1105 sector_num = offset >> BDRV_SECTOR_BITS;
1106 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1107
1bbbf32d
NS
1108 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1109 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
41ae31e3 1110 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
edfab6a0 1111
e18a58b4 1112 assert(drv->bdrv_co_writev);
e8b65355 1113 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
78a07294 1114
3fb06697 1115emulate_flags:
e8b65355 1116 if (ret == 0 && emulate_fua) {
78a07294
KW
1117 ret = bdrv_co_flush(bs);
1118 }
1119
ac850bf0
VSO
1120 if (qiov == &local_qiov) {
1121 qemu_iovec_destroy(&local_qiov);
1122 }
1123
78a07294
KW
1124 return ret;
1125}
1126
7b1fb72e 1127static int coroutine_fn GRAPH_RDLOCK
17abcbee
VSO
1128bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1129 int64_t bytes, QEMUIOVector *qiov,
ac850bf0 1130 size_t qiov_offset)
29a298af
PB
1131{
1132 BlockDriver *drv = bs->drv;
ac850bf0
VSO
1133 QEMUIOVector local_qiov;
1134 int ret;
b9b10c35 1135 assert_bdrv_graph_readable();
29a298af 1136
17abcbee
VSO
1137 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1138
d470ad42
HR
1139 if (!drv) {
1140 return -ENOMEDIUM;
1141 }
1142
ac850bf0 1143 if (!block_driver_can_compress(drv)) {
29a298af
PB
1144 return -ENOTSUP;
1145 }
1146
ac850bf0
VSO
1147 if (drv->bdrv_co_pwritev_compressed_part) {
1148 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1149 qiov, qiov_offset);
1150 }
1151
1152 if (qiov_offset == 0) {
1153 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1154 }
1155
1156 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1157 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1158 qemu_iovec_destroy(&local_qiov);
1159
1160 return ret;
29a298af
PB
1161}
1162
7b1fb72e
KW
1163static int coroutine_fn GRAPH_RDLOCK
1164bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
1165 QEMUIOVector *qiov, size_t qiov_offset, int flags)
61007b31 1166{
85c97ca7
KW
1167 BlockDriverState *bs = child->bs;
1168
61007b31
SH
1169 /* Perform I/O through a temporary buffer so that users who scribble over
1170 * their read buffer while the operation is in progress do not end up
1171 * modifying the image file. This is critical for zero-copy guest I/O
1172 * where anything might happen inside guest memory.
1173 */
2275cc90 1174 void *bounce_buffer = NULL;
61007b31
SH
1175
1176 BlockDriver *drv = bs->drv;
fc6b211f
AD
1177 int64_t align_offset;
1178 int64_t align_bytes;
9df5afbd 1179 int64_t skip_bytes;
61007b31 1180 int ret;
cb2e2878
EB
1181 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1182 BDRV_REQUEST_MAX_BYTES);
9df5afbd 1183 int64_t progress = 0;
8644476e 1184 bool skip_write;
61007b31 1185
9df5afbd
VSO
1186 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1187
d470ad42
HR
1188 if (!drv) {
1189 return -ENOMEDIUM;
1190 }
1191
8644476e
HR
1192 /*
1193 * Do not write anything when the BDS is inactive. That is not
1194 * allowed, and it would not help.
1195 */
1196 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1197
1bf03e66
KW
1198 /* FIXME We cannot require callers to have write permissions when all they
1199 * are doing is a read request. If we did things right, write permissions
1200 * would be obtained anyway, but internally by the copy-on-read code. As
765d9df9 1201 * long as it is implemented here rather than in a separate filter driver,
1bf03e66
KW
1202 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1203 * it could request permissions. Therefore we have to bypass the permission
1204 * system for the moment. */
1205 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
afa4b293 1206
61007b31 1207 /* Cover entire cluster so no additional backing file I/O is required when
cb2e2878
EB
1208 * allocating cluster in the image file. Note that this value may exceed
1209 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1210 * is one reason we loop rather than doing it all at once.
61007b31 1211 */
fc6b211f
AD
1212 bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
1213 skip_bytes = offset - align_offset;
61007b31 1214
244483e6 1215 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
fc6b211f 1216 align_offset, align_bytes);
61007b31 1217
fc6b211f 1218 while (align_bytes) {
cb2e2878 1219 int64_t pnum;
61007b31 1220
8644476e
HR
1221 if (skip_write) {
1222 ret = 1; /* "already allocated", so nothing will be copied */
fc6b211f 1223 pnum = MIN(align_bytes, max_transfer);
8644476e 1224 } else {
fc6b211f
AD
1225 ret = bdrv_is_allocated(bs, align_offset,
1226 MIN(align_bytes, max_transfer), &pnum);
8644476e
HR
1227 if (ret < 0) {
1228 /*
1229 * Safe to treat errors in querying allocation as if
1230 * unallocated; we'll probably fail again soon on the
1231 * read, but at least that will set a decent errno.
1232 */
fc6b211f 1233 pnum = MIN(align_bytes, max_transfer);
8644476e 1234 }
61007b31 1235
8644476e
HR
1236 /* Stop at EOF if the image ends in the middle of the cluster */
1237 if (ret == 0 && pnum == 0) {
1238 assert(progress >= bytes);
1239 break;
1240 }
b0ddcbbb 1241
8644476e
HR
1242 assert(skip_bytes < pnum);
1243 }
61007b31 1244
cb2e2878 1245 if (ret <= 0) {
1143ec5e
VSO
1246 QEMUIOVector local_qiov;
1247
cb2e2878 1248 /* Must copy-on-read; use the bounce buffer */
0d93ed08 1249 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
2275cc90 1250 if (!bounce_buffer) {
fc6b211f 1251 int64_t max_we_need = MAX(pnum, align_bytes - pnum);
2275cc90
VSO
1252 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1253 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1254
1255 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1256 if (!bounce_buffer) {
1257 ret = -ENOMEM;
1258 goto err;
1259 }
1260 }
0d93ed08 1261 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
61007b31 1262
fc6b211f 1263 ret = bdrv_driver_preadv(bs, align_offset, pnum,
ac850bf0 1264 &local_qiov, 0, 0);
cb2e2878
EB
1265 if (ret < 0) {
1266 goto err;
1267 }
1268
c834dc05 1269 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
cb2e2878
EB
1270 if (drv->bdrv_co_pwrite_zeroes &&
1271 buffer_is_zero(bounce_buffer, pnum)) {
1272 /* FIXME: Should we (perhaps conditionally) be setting
1273 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1274 * that still correctly reads as zero? */
fc6b211f 1275 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
7adcf59f 1276 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1277 } else {
1278 /* This does not change the data on the disk, it is not
1279 * necessary to flush even in cache=writethrough mode.
1280 */
fc6b211f 1281 ret = bdrv_driver_pwritev(bs, align_offset, pnum,
ac850bf0 1282 &local_qiov, 0,
7adcf59f 1283 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1284 }
1285
1286 if (ret < 0) {
1287 /* It might be okay to ignore write errors for guest
1288 * requests. If this is a deliberate copy-on-read
1289 * then we don't want to ignore the error. Simply
1290 * report it in all cases.
1291 */
1292 goto err;
1293 }
1294
3299e5ec 1295 if (!(flags & BDRV_REQ_PREFETCH)) {
1143ec5e
VSO
1296 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1297 bounce_buffer + skip_bytes,
4ab78b19 1298 MIN(pnum - skip_bytes, bytes - progress));
3299e5ec
VSO
1299 }
1300 } else if (!(flags & BDRV_REQ_PREFETCH)) {
cb2e2878 1301 /* Read directly into the destination */
1143ec5e
VSO
1302 ret = bdrv_driver_preadv(bs, offset + progress,
1303 MIN(pnum - skip_bytes, bytes - progress),
1304 qiov, qiov_offset + progress, 0);
cb2e2878
EB
1305 if (ret < 0) {
1306 goto err;
1307 }
1308 }
1309
fc6b211f
AD
1310 align_offset += pnum;
1311 align_bytes -= pnum;
cb2e2878
EB
1312 progress += pnum - skip_bytes;
1313 skip_bytes = 0;
1314 }
1315 ret = 0;
61007b31
SH
1316
1317err:
1318 qemu_vfree(bounce_buffer);
1319 return ret;
1320}
1321
1322/*
1323 * Forwards an already correctly aligned request to the BlockDriver. This
1a62d0ac
EB
1324 * handles copy on read, zeroing after EOF, and fragmentation of large
1325 * reads; any other features must be implemented by the caller.
61007b31 1326 */
7b1fb72e
KW
1327static int coroutine_fn GRAPH_RDLOCK
1328bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
1329 int64_t offset, int64_t bytes, int64_t align,
1330 QEMUIOVector *qiov, size_t qiov_offset, int flags)
61007b31 1331{
85c97ca7 1332 BlockDriverState *bs = child->bs;
c9d20029 1333 int64_t total_bytes, max_bytes;
1a62d0ac 1334 int ret = 0;
8b0c5d76 1335 int64_t bytes_remaining = bytes;
1a62d0ac 1336 int max_transfer;
61007b31 1337
8b0c5d76 1338 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
49c07526
KW
1339 assert(is_power_of_2(align));
1340 assert((offset & (align - 1)) == 0);
1341 assert((bytes & (align - 1)) == 0);
abb06c5a 1342 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1a62d0ac
EB
1343 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1344 align);
a604fa2b 1345
e8b65355
SH
1346 /*
1347 * TODO: We would need a per-BDS .supported_read_flags and
a604fa2b
EB
1348 * potential fallback support, if we ever implement any read flags
1349 * to pass through to drivers. For now, there aren't any
e8b65355
SH
1350 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1351 */
1352 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1353 BDRV_REQ_REGISTERED_BUF)));
61007b31
SH
1354
1355 /* Handle Copy on Read and associated serialisation */
1356 if (flags & BDRV_REQ_COPY_ON_READ) {
1357 /* If we touch the same cluster it counts as an overlap. This
1358 * guarantees that allocating writes will be serialized and not race
1359 * with each other for the same cluster. For example, in copy-on-read
1360 * it ensures that the CoR read and write operations are atomic and
1361 * guest writes cannot interleave between them. */
8ac5aab2 1362 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
18fbd0de
PB
1363 } else {
1364 bdrv_wait_serialising_requests(req);
61007b31
SH
1365 }
1366
61007b31 1367 if (flags & BDRV_REQ_COPY_ON_READ) {
d6a644bb 1368 int64_t pnum;
61007b31 1369
897dd0ec
AS
1370 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1371 flags &= ~BDRV_REQ_COPY_ON_READ;
1372
88e63df2 1373 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
61007b31
SH
1374 if (ret < 0) {
1375 goto out;
1376 }
1377
88e63df2 1378 if (!ret || pnum != bytes) {
65cd4424
VSO
1379 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1380 qiov, qiov_offset, flags);
3299e5ec
VSO
1381 goto out;
1382 } else if (flags & BDRV_REQ_PREFETCH) {
61007b31
SH
1383 goto out;
1384 }
1385 }
1386
1a62d0ac 1387 /* Forward the request to the BlockDriver, possibly fragmenting it */
0af02bd1 1388 total_bytes = bdrv_co_getlength(bs);
c9d20029
KW
1389 if (total_bytes < 0) {
1390 ret = total_bytes;
1391 goto out;
1392 }
61007b31 1393
e8b65355 1394 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
897dd0ec 1395
c9d20029 1396 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1a62d0ac 1397 if (bytes <= max_bytes && bytes <= max_transfer) {
897dd0ec 1398 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1a62d0ac
EB
1399 goto out;
1400 }
61007b31 1401
1a62d0ac 1402 while (bytes_remaining) {
8b0c5d76 1403 int64_t num;
61007b31 1404
1a62d0ac 1405 if (max_bytes) {
1a62d0ac
EB
1406 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1407 assert(num);
61007b31 1408
1a62d0ac 1409 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
134b7dec 1410 num, qiov,
897dd0ec
AS
1411 qiov_offset + bytes - bytes_remaining,
1412 flags);
1a62d0ac 1413 max_bytes -= num;
1a62d0ac
EB
1414 } else {
1415 num = bytes_remaining;
134b7dec
HR
1416 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1417 0, bytes_remaining);
1a62d0ac
EB
1418 }
1419 if (ret < 0) {
1420 goto out;
1421 }
1422 bytes_remaining -= num;
61007b31
SH
1423 }
1424
1425out:
1a62d0ac 1426 return ret < 0 ? ret : 0;
61007b31
SH
1427}
1428
61007b31 1429/*
7a3f542f
VSO
1430 * Request padding
1431 *
1432 * |<---- align ----->| |<----- align ---->|
1433 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1434 * | | | | | |
1435 * -*----------$-------*-------- ... --------*-----$------------*---
1436 * | | | | | |
1437 * | offset | | end |
1438 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1439 * [buf ... ) [tail_buf )
1440 *
1441 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1442 * is placed at the beginning of @buf and @tail at the @end.
1443 *
1444 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1445 * around tail, if tail exists.
1446 *
1447 * @merge_reads is true for small requests,
1448 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1449 * head and tail exist but @buf_len == align and @tail_buf == @buf.
18743311
HC
1450 *
1451 * @write is true for write requests, false for read requests.
1452 *
1453 * If padding makes the vector too long (exceeding IOV_MAX), then we need to
1454 * merge existing vector elements into a single one. @collapse_bounce_buf acts
1455 * as the bounce buffer in such cases. @pre_collapse_qiov has the pre-collapse
1456 * I/O vector elements so for read requests, the data can be copied back after
1457 * the read is done.
7a3f542f
VSO
1458 */
1459typedef struct BdrvRequestPadding {
1460 uint8_t *buf;
1461 size_t buf_len;
1462 uint8_t *tail_buf;
1463 size_t head;
1464 size_t tail;
1465 bool merge_reads;
18743311 1466 bool write;
7a3f542f 1467 QEMUIOVector local_qiov;
18743311
HC
1468
1469 uint8_t *collapse_bounce_buf;
1470 size_t collapse_len;
1471 QEMUIOVector pre_collapse_qiov;
7a3f542f
VSO
1472} BdrvRequestPadding;
1473
1474static bool bdrv_init_padding(BlockDriverState *bs,
1475 int64_t offset, int64_t bytes,
18743311 1476 bool write,
7a3f542f
VSO
1477 BdrvRequestPadding *pad)
1478{
a56ed80c
VSO
1479 int64_t align = bs->bl.request_alignment;
1480 int64_t sum;
1481
1482 bdrv_check_request(offset, bytes, &error_abort);
1483 assert(align <= INT_MAX); /* documented in block/block_int.h */
1484 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
7a3f542f
VSO
1485
1486 memset(pad, 0, sizeof(*pad));
1487
1488 pad->head = offset & (align - 1);
1489 pad->tail = ((offset + bytes) & (align - 1));
1490 if (pad->tail) {
1491 pad->tail = align - pad->tail;
1492 }
1493
ac9d00bf 1494 if (!pad->head && !pad->tail) {
7a3f542f
VSO
1495 return false;
1496 }
1497
ac9d00bf
VSO
1498 assert(bytes); /* Nothing good in aligning zero-length requests */
1499
7a3f542f
VSO
1500 sum = pad->head + bytes + pad->tail;
1501 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1502 pad->buf = qemu_blockalign(bs, pad->buf_len);
1503 pad->merge_reads = sum == pad->buf_len;
1504 if (pad->tail) {
1505 pad->tail_buf = pad->buf + pad->buf_len - align;
1506 }
1507
18743311
HC
1508 pad->write = write;
1509
7a3f542f
VSO
1510 return true;
1511}
1512
7b1fb72e
KW
1513static int coroutine_fn GRAPH_RDLOCK
1514bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
1515 BdrvRequestPadding *pad, bool zero_middle)
7a3f542f
VSO
1516{
1517 QEMUIOVector local_qiov;
1518 BlockDriverState *bs = child->bs;
1519 uint64_t align = bs->bl.request_alignment;
1520 int ret;
1521
1522 assert(req->serialising && pad->buf);
1523
1524 if (pad->head || pad->merge_reads) {
8b0c5d76 1525 int64_t bytes = pad->merge_reads ? pad->buf_len : align;
7a3f542f
VSO
1526
1527 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1528
1529 if (pad->head) {
c834dc05 1530 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
7a3f542f
VSO
1531 }
1532 if (pad->merge_reads && pad->tail) {
c834dc05 1533 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
7a3f542f
VSO
1534 }
1535 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
65cd4424 1536 align, &local_qiov, 0, 0);
7a3f542f
VSO
1537 if (ret < 0) {
1538 return ret;
1539 }
1540 if (pad->head) {
c834dc05 1541 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
7a3f542f
VSO
1542 }
1543 if (pad->merge_reads && pad->tail) {
c834dc05 1544 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
7a3f542f
VSO
1545 }
1546
1547 if (pad->merge_reads) {
1548 goto zero_mem;
1549 }
1550 }
1551
1552 if (pad->tail) {
1553 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1554
c834dc05 1555 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
7a3f542f
VSO
1556 ret = bdrv_aligned_preadv(
1557 child, req,
1558 req->overlap_offset + req->overlap_bytes - align,
65cd4424 1559 align, align, &local_qiov, 0, 0);
7a3f542f
VSO
1560 if (ret < 0) {
1561 return ret;
1562 }
c834dc05 1563 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
7a3f542f
VSO
1564 }
1565
1566zero_mem:
1567 if (zero_middle) {
1568 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1569 }
1570
1571 return 0;
1572}
1573
18743311
HC
1574/**
1575 * Free *pad's associated buffers, and perform any necessary finalization steps.
1576 */
1577static void bdrv_padding_finalize(BdrvRequestPadding *pad)
7a3f542f 1578{
18743311
HC
1579 if (pad->collapse_bounce_buf) {
1580 if (!pad->write) {
1581 /*
1582 * If padding required elements in the vector to be collapsed into a
1583 * bounce buffer, copy the bounce buffer content back
1584 */
1585 qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0,
1586 pad->collapse_bounce_buf, pad->collapse_len);
1587 }
1588 qemu_vfree(pad->collapse_bounce_buf);
1589 qemu_iovec_destroy(&pad->pre_collapse_qiov);
1590 }
7a3f542f
VSO
1591 if (pad->buf) {
1592 qemu_vfree(pad->buf);
1593 qemu_iovec_destroy(&pad->local_qiov);
1594 }
98ca4549 1595 memset(pad, 0, sizeof(*pad));
7a3f542f
VSO
1596}
1597
18743311
HC
1598/*
1599 * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
1600 * ensuring that the resulting vector will not exceed IOV_MAX elements.
1601 *
1602 * To ensure this, when necessary, the first two or three elements of @iov are
1603 * merged into pad->collapse_bounce_buf and replaced by a reference to that
1604 * bounce buffer in pad->local_qiov.
1605 *
1606 * After performing a read request, the data from the bounce buffer must be
1607 * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
1608 */
1609static int bdrv_create_padded_qiov(BlockDriverState *bs,
1610 BdrvRequestPadding *pad,
1611 struct iovec *iov, int niov,
1612 size_t iov_offset, size_t bytes)
1613{
1614 int padded_niov, surplus_count, collapse_count;
1615
1616 /* Assert this invariant */
1617 assert(niov <= IOV_MAX);
1618
1619 /*
1620 * Cannot pad if resulting length would exceed SIZE_MAX. Returning an error
1621 * to the guest is not ideal, but there is little else we can do. At least
1622 * this will practically never happen on 64-bit systems.
1623 */
1624 if (SIZE_MAX - pad->head < bytes ||
1625 SIZE_MAX - pad->head - bytes < pad->tail)
1626 {
1627 return -EINVAL;
1628 }
1629
1630 /* Length of the resulting IOV if we just concatenated everything */
1631 padded_niov = !!pad->head + niov + !!pad->tail;
1632
1633 qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX));
1634
1635 if (pad->head) {
1636 qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head);
1637 }
1638
1639 /*
1640 * If padded_niov > IOV_MAX, we cannot just concatenate everything.
1641 * Instead, merge the first two or three elements of @iov to reduce the
1642 * number of vector elements as necessary.
1643 */
1644 if (padded_niov > IOV_MAX) {
1645 /*
1646 * Only head and tail can have lead to the number of entries exceeding
1647 * IOV_MAX, so we can exceed it by the head and tail at most. We need
1648 * to reduce the number of elements by `surplus_count`, so we merge that
1649 * many elements plus one into one element.
1650 */
1651 surplus_count = padded_niov - IOV_MAX;
1652 assert(surplus_count <= !!pad->head + !!pad->tail);
1653 collapse_count = surplus_count + 1;
1654
1655 /*
1656 * Move the elements to collapse into `pad->pre_collapse_qiov`, then
1657 * advance `iov` (and associated variables) by those elements.
1658 */
1659 qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count);
1660 qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov,
1661 collapse_count, iov_offset, SIZE_MAX);
1662 iov += collapse_count;
1663 iov_offset = 0;
1664 niov -= collapse_count;
1665 bytes -= pad->pre_collapse_qiov.size;
1666
1667 /*
1668 * Construct the bounce buffer to match the length of the to-collapse
1669 * vector elements, and for write requests, initialize it with the data
1670 * from those elements. Then add it to `pad->local_qiov`.
1671 */
1672 pad->collapse_len = pad->pre_collapse_qiov.size;
1673 pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len);
1674 if (pad->write) {
1675 qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0,
1676 pad->collapse_bounce_buf, pad->collapse_len);
1677 }
1678 qemu_iovec_add(&pad->local_qiov,
1679 pad->collapse_bounce_buf, pad->collapse_len);
1680 }
1681
1682 qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes);
1683
1684 if (pad->tail) {
1685 qemu_iovec_add(&pad->local_qiov,
1686 pad->buf + pad->buf_len - pad->tail, pad->tail);
1687 }
1688
1689 assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX));
1690 return 0;
1691}
1692
7a3f542f
VSO
1693/*
1694 * bdrv_pad_request
1695 *
1696 * Exchange request parameters with padded request if needed. Don't include RMW
1697 * read of padding, bdrv_padding_rmw_read() should be called separately if
1698 * needed.
1699 *
18743311
HC
1700 * @write is true for write requests, false for read requests.
1701 *
98ca4549
VSO
1702 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1703 * - on function start they represent original request
1704 * - on failure or when padding is not needed they are unchanged
1705 * - on success when padding is needed they represent padded request
61007b31 1706 */
98ca4549
VSO
1707static int bdrv_pad_request(BlockDriverState *bs,
1708 QEMUIOVector **qiov, size_t *qiov_offset,
37e9403e 1709 int64_t *offset, int64_t *bytes,
18743311 1710 bool write,
e8b65355
SH
1711 BdrvRequestPadding *pad, bool *padded,
1712 BdrvRequestFlags *flags)
7a3f542f 1713{
4c002cef 1714 int ret;
18743311
HC
1715 struct iovec *sliced_iov;
1716 int sliced_niov;
1717 size_t sliced_head, sliced_tail;
4c002cef 1718
ef256751
HC
1719 /* Should have been checked by the caller already */
1720 ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset);
1721 if (ret < 0) {
1722 return ret;
1723 }
37e9403e 1724
18743311 1725 if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
98ca4549
VSO
1726 if (padded) {
1727 *padded = false;
1728 }
1729 return 0;
7a3f542f
VSO
1730 }
1731
18743311
HC
1732 sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
1733 &sliced_head, &sliced_tail,
1734 &sliced_niov);
1735
ef256751 1736 /* Guaranteed by bdrv_check_request32() */
18743311
HC
1737 assert(*bytes <= SIZE_MAX);
1738 ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
1739 sliced_head, *bytes);
98ca4549 1740 if (ret < 0) {
18743311 1741 bdrv_padding_finalize(pad);
98ca4549
VSO
1742 return ret;
1743 }
7a3f542f
VSO
1744 *bytes += pad->head + pad->tail;
1745 *offset -= pad->head;
1746 *qiov = &pad->local_qiov;
1acc3466 1747 *qiov_offset = 0;
98ca4549
VSO
1748 if (padded) {
1749 *padded = true;
1750 }
e8b65355
SH
1751 if (flags) {
1752 /* Can't use optimization hint with bounce buffer */
1753 *flags &= ~BDRV_REQ_REGISTERED_BUF;
1754 }
7a3f542f 1755
98ca4549 1756 return 0;
7a3f542f
VSO
1757}
1758
a03ef88f 1759int coroutine_fn bdrv_co_preadv(BdrvChild *child,
e9e52efd 1760 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
61007b31 1761 BdrvRequestFlags flags)
1acc3466 1762{
967d7905 1763 IO_CODE();
1acc3466
VSO
1764 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1765}
1766
1767int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
37e9403e 1768 int64_t offset, int64_t bytes,
1acc3466
VSO
1769 QEMUIOVector *qiov, size_t qiov_offset,
1770 BdrvRequestFlags flags)
61007b31 1771{
a03ef88f 1772 BlockDriverState *bs = child->bs;
61007b31 1773 BdrvTrackedRequest req;
7a3f542f 1774 BdrvRequestPadding pad;
61007b31 1775 int ret;
967d7905 1776 IO_CODE();
61007b31 1777
37e9403e 1778 trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
61007b31 1779
1e97be91 1780 if (!bdrv_co_is_inserted(bs)) {
f4dad307
VSO
1781 return -ENOMEDIUM;
1782 }
1783
63f4ad11 1784 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
61007b31
SH
1785 if (ret < 0) {
1786 return ret;
1787 }
1788
ac9d00bf
VSO
1789 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1790 /*
1791 * Aligning zero request is nonsense. Even if driver has special meaning
1792 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1793 * it to driver due to request_alignment.
1794 *
1795 * Still, no reason to return an error if someone do unaligned
1796 * zero-length read occasionally.
1797 */
1798 return 0;
1799 }
1800
99723548
PB
1801 bdrv_inc_in_flight(bs);
1802
9568b511 1803 /* Don't do copy-on-read if we read data before write operation */
d73415a3 1804 if (qatomic_read(&bs->copy_on_read)) {
61007b31
SH
1805 flags |= BDRV_REQ_COPY_ON_READ;
1806 }
1807
18743311
HC
1808 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false,
1809 &pad, NULL, &flags);
98ca4549 1810 if (ret < 0) {
87ab8802 1811 goto fail;
98ca4549 1812 }
61007b31 1813
ebde595c 1814 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
7a3f542f
VSO
1815 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1816 bs->bl.request_alignment,
1acc3466 1817 qiov, qiov_offset, flags);
61007b31 1818 tracked_request_end(&req);
18743311 1819 bdrv_padding_finalize(&pad);
61007b31 1820
87ab8802
KW
1821fail:
1822 bdrv_dec_in_flight(bs);
1823
61007b31
SH
1824 return ret;
1825}
1826
eeb47775
KW
1827static int coroutine_fn GRAPH_RDLOCK
1828bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1829 BdrvRequestFlags flags)
61007b31
SH
1830{
1831 BlockDriver *drv = bs->drv;
1832 QEMUIOVector qiov;
0d93ed08 1833 void *buf = NULL;
61007b31 1834 int ret = 0;
465fe887 1835 bool need_flush = false;
443668ca
DL
1836 int head = 0;
1837 int tail = 0;
61007b31 1838
2aaa3f9b
VSO
1839 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
1840 INT64_MAX);
a5b8dd2c
EB
1841 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1842 bs->bl.request_alignment);
cb2e2878 1843 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
d05aa8bb 1844
abaf8b75 1845 assert_bdrv_graph_readable();
5ae07b14
VSO
1846 bdrv_check_request(offset, bytes, &error_abort);
1847
d470ad42
HR
1848 if (!drv) {
1849 return -ENOMEDIUM;
1850 }
1851
fe0480d6
KW
1852 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1853 return -ENOTSUP;
1854 }
1855
e8b65355
SH
1856 /* By definition there is no user buffer so this flag doesn't make sense */
1857 if (flags & BDRV_REQ_REGISTERED_BUF) {
1858 return -EINVAL;
1859 }
1860
0bc329fb
HR
1861 /* Invalidate the cached block-status data range if this write overlaps */
1862 bdrv_bsc_invalidate_range(bs, offset, bytes);
1863
b8d0a980
EB
1864 assert(alignment % bs->bl.request_alignment == 0);
1865 head = offset % alignment;
f5a5ca79 1866 tail = (offset + bytes) % alignment;
b8d0a980
EB
1867 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1868 assert(max_write_zeroes >= bs->bl.request_alignment);
61007b31 1869
f5a5ca79 1870 while (bytes > 0 && !ret) {
5ae07b14 1871 int64_t num = bytes;
61007b31
SH
1872
1873 /* Align request. Block drivers can expect the "bulk" of the request
443668ca
DL
1874 * to be aligned, and that unaligned requests do not cross cluster
1875 * boundaries.
61007b31 1876 */
443668ca 1877 if (head) {
b2f95fee
EB
1878 /* Make a small request up to the first aligned sector. For
1879 * convenience, limit this request to max_transfer even if
1880 * we don't need to fall back to writes. */
f5a5ca79 1881 num = MIN(MIN(bytes, max_transfer), alignment - head);
b2f95fee
EB
1882 head = (head + num) % alignment;
1883 assert(num < max_write_zeroes);
d05aa8bb 1884 } else if (tail && num > alignment) {
443668ca
DL
1885 /* Shorten the request to the last aligned sector. */
1886 num -= tail;
61007b31
SH
1887 }
1888
1889 /* limit request size */
1890 if (num > max_write_zeroes) {
1891 num = max_write_zeroes;
1892 }
1893
1894 ret = -ENOTSUP;
1895 /* First try the efficient write zeroes operation */
d05aa8bb
EB
1896 if (drv->bdrv_co_pwrite_zeroes) {
1897 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1898 flags & bs->supported_zero_flags);
1899 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1900 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1901 need_flush = true;
1902 }
465fe887
EB
1903 } else {
1904 assert(!bs->supported_zero_flags);
61007b31
SH
1905 }
1906
294682cc 1907 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
61007b31 1908 /* Fall back to bounce buffer if write zeroes is unsupported */
465fe887
EB
1909 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1910
1911 if ((flags & BDRV_REQ_FUA) &&
1912 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1913 /* No need for bdrv_driver_pwrite() to do a fallback
1914 * flush on each chunk; use just one at the end */
1915 write_flags &= ~BDRV_REQ_FUA;
1916 need_flush = true;
1917 }
5def6b80 1918 num = MIN(num, max_transfer);
0d93ed08
VSO
1919 if (buf == NULL) {
1920 buf = qemu_try_blockalign0(bs, num);
1921 if (buf == NULL) {
61007b31
SH
1922 ret = -ENOMEM;
1923 goto fail;
1924 }
61007b31 1925 }
0d93ed08 1926 qemu_iovec_init_buf(&qiov, buf, num);
61007b31 1927
ac850bf0 1928 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
61007b31
SH
1929
1930 /* Keep bounce buffer around if it is big enough for all
1931 * all future requests.
1932 */
5def6b80 1933 if (num < max_transfer) {
0d93ed08
VSO
1934 qemu_vfree(buf);
1935 buf = NULL;
61007b31
SH
1936 }
1937 }
1938
d05aa8bb 1939 offset += num;
f5a5ca79 1940 bytes -= num;
61007b31
SH
1941 }
1942
1943fail:
465fe887
EB
1944 if (ret == 0 && need_flush) {
1945 ret = bdrv_co_flush(bs);
1946 }
0d93ed08 1947 qemu_vfree(buf);
61007b31
SH
1948 return ret;
1949}
1950
a00e70c0 1951static inline int coroutine_fn GRAPH_RDLOCK
fcfd9ade 1952bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
85fe2479
FZ
1953 BdrvTrackedRequest *req, int flags)
1954{
1955 BlockDriverState *bs = child->bs;
fcfd9ade
VSO
1956
1957 bdrv_check_request(offset, bytes, &error_abort);
85fe2479 1958
307261b2 1959 if (bdrv_is_read_only(bs)) {
85fe2479
FZ
1960 return -EPERM;
1961 }
1962
85fe2479
FZ
1963 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1964 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1965 assert(!(flags & ~BDRV_REQ_MASK));
d1a764d1 1966 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
85fe2479
FZ
1967
1968 if (flags & BDRV_REQ_SERIALISING) {
d1a764d1
VSO
1969 QEMU_LOCK_GUARD(&bs->reqs_lock);
1970
1971 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1972
1973 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1974 return -EBUSY;
1975 }
1976
1977 bdrv_wait_serialising_requests_locked(req);
18fbd0de
PB
1978 } else {
1979 bdrv_wait_serialising_requests(req);
85fe2479
FZ
1980 }
1981
85fe2479
FZ
1982 assert(req->overlap_offset <= offset);
1983 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
fcfd9ade
VSO
1984 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1985 child->perm & BLK_PERM_RESIZE);
85fe2479 1986
cd47d792
FZ
1987 switch (req->type) {
1988 case BDRV_TRACKED_WRITE:
1989 case BDRV_TRACKED_DISCARD:
1990 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1991 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1992 } else {
1993 assert(child->perm & BLK_PERM_WRITE);
1994 }
94783301
VSO
1995 bdrv_write_threshold_check_write(bs, offset, bytes);
1996 return 0;
cd47d792
FZ
1997 case BDRV_TRACKED_TRUNCATE:
1998 assert(child->perm & BLK_PERM_RESIZE);
1999 return 0;
2000 default:
2001 abort();
85fe2479 2002 }
85fe2479
FZ
2003}
2004
2005static inline void coroutine_fn
fcfd9ade 2006bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
85fe2479
FZ
2007 BdrvTrackedRequest *req, int ret)
2008{
2009 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2010 BlockDriverState *bs = child->bs;
2011
fcfd9ade
VSO
2012 bdrv_check_request(offset, bytes, &error_abort);
2013
d73415a3 2014 qatomic_inc(&bs->write_gen);
85fe2479 2015
00695c27
FZ
2016 /*
2017 * Discard cannot extend the image, but in error handling cases, such as
2018 * when reverting a qcow2 cluster allocation, the discarded range can pass
2019 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2020 * here. Instead, just skip it, since semantically a discard request
2021 * beyond EOF cannot expand the image anyway.
2022 */
7f8f03ef 2023 if (ret == 0 &&
cd47d792
FZ
2024 (req->type == BDRV_TRACKED_TRUNCATE ||
2025 end_sector > bs->total_sectors) &&
2026 req->type != BDRV_TRACKED_DISCARD) {
7f8f03ef
FZ
2027 bs->total_sectors = end_sector;
2028 bdrv_parent_cb_resize(bs);
2029 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
85fe2479 2030 }
00695c27
FZ
2031 if (req->bytes) {
2032 switch (req->type) {
2033 case BDRV_TRACKED_WRITE:
2034 stat64_max(&bs->wr_highest_offset, offset + bytes);
2035 /* fall through, to set dirty bits */
2036 case BDRV_TRACKED_DISCARD:
2037 bdrv_set_dirty(bs, offset, bytes);
2038 break;
2039 default:
2040 break;
2041 }
2042 }
85fe2479
FZ
2043}
2044
61007b31 2045/*
04ed95f4
EB
2046 * Forwards an already correctly aligned write request to the BlockDriver,
2047 * after possibly fragmenting it.
61007b31 2048 */
7b1fb72e
KW
2049static int coroutine_fn GRAPH_RDLOCK
2050bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
2051 int64_t offset, int64_t bytes, int64_t align,
2052 QEMUIOVector *qiov, size_t qiov_offset,
2053 BdrvRequestFlags flags)
61007b31 2054{
85c97ca7 2055 BlockDriverState *bs = child->bs;
61007b31 2056 BlockDriver *drv = bs->drv;
61007b31
SH
2057 int ret;
2058
fcfd9ade 2059 int64_t bytes_remaining = bytes;
04ed95f4 2060 int max_transfer;
61007b31 2061
fcfd9ade
VSO
2062 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2063
d470ad42
HR
2064 if (!drv) {
2065 return -ENOMEDIUM;
2066 }
2067
d6883bc9
VSO
2068 if (bdrv_has_readonly_bitmaps(bs)) {
2069 return -EPERM;
2070 }
2071
cff86b38
EB
2072 assert(is_power_of_2(align));
2073 assert((offset & (align - 1)) == 0);
2074 assert((bytes & (align - 1)) == 0);
04ed95f4
EB
2075 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2076 align);
61007b31 2077
85fe2479 2078 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
61007b31
SH
2079
2080 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
c1499a5e 2081 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
28c4da28 2082 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
61007b31
SH
2083 flags |= BDRV_REQ_ZERO_WRITE;
2084 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2085 flags |= BDRV_REQ_MAY_UNMAP;
2086 }
3c586715
SH
2087
2088 /* Can't use optimization hint with bufferless zero write */
2089 flags &= ~BDRV_REQ_REGISTERED_BUF;
61007b31
SH
2090 }
2091
2092 if (ret < 0) {
2093 /* Do nothing, write notifier decided to fail this request */
2094 } else if (flags & BDRV_REQ_ZERO_WRITE) {
c834dc05 2095 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
9896c876 2096 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
3ea1a091 2097 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
28c4da28
VSO
2098 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2099 qiov, qiov_offset);
04ed95f4 2100 } else if (bytes <= max_transfer) {
c834dc05 2101 bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
28c4da28 2102 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
04ed95f4 2103 } else {
c834dc05 2104 bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
04ed95f4
EB
2105 while (bytes_remaining) {
2106 int num = MIN(bytes_remaining, max_transfer);
04ed95f4
EB
2107 int local_flags = flags;
2108
2109 assert(num);
2110 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2111 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2112 /* If FUA is going to be emulated by flush, we only
2113 * need to flush on the last iteration */
2114 local_flags &= ~BDRV_REQ_FUA;
2115 }
04ed95f4
EB
2116
2117 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
134b7dec
HR
2118 num, qiov,
2119 qiov_offset + bytes - bytes_remaining,
28c4da28 2120 local_flags);
04ed95f4
EB
2121 if (ret < 0) {
2122 break;
2123 }
2124 bytes_remaining -= num;
2125 }
61007b31 2126 }
c834dc05 2127 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
61007b31 2128
61007b31 2129 if (ret >= 0) {
04ed95f4 2130 ret = 0;
61007b31 2131 }
85fe2479 2132 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
61007b31
SH
2133
2134 return ret;
2135}
2136
7b1fb72e
KW
2137static int coroutine_fn GRAPH_RDLOCK
2138bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
2139 BdrvRequestFlags flags, BdrvTrackedRequest *req)
9eeb6dd1 2140{
85c97ca7 2141 BlockDriverState *bs = child->bs;
9eeb6dd1 2142 QEMUIOVector local_qiov;
a5b8dd2c 2143 uint64_t align = bs->bl.request_alignment;
9eeb6dd1 2144 int ret = 0;
7a3f542f
VSO
2145 bool padding;
2146 BdrvRequestPadding pad;
9eeb6dd1 2147
e8b65355
SH
2148 /* This flag doesn't make sense for padding or zero writes */
2149 flags &= ~BDRV_REQ_REGISTERED_BUF;
2150
18743311 2151 padding = bdrv_init_padding(bs, offset, bytes, true, &pad);
7a3f542f 2152 if (padding) {
45e62b46 2153 assert(!(flags & BDRV_REQ_NO_WAIT));
8ac5aab2 2154 bdrv_make_request_serialising(req, align);
9eeb6dd1 2155
7a3f542f
VSO
2156 bdrv_padding_rmw_read(child, req, &pad, true);
2157
2158 if (pad.head || pad.merge_reads) {
2159 int64_t aligned_offset = offset & ~(align - 1);
2160 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2161
2162 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2163 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
28c4da28 2164 align, &local_qiov, 0,
7a3f542f
VSO
2165 flags & ~BDRV_REQ_ZERO_WRITE);
2166 if (ret < 0 || pad.merge_reads) {
2167 /* Error or all work is done */
2168 goto out;
2169 }
2170 offset += write_bytes - pad.head;
2171 bytes -= write_bytes - pad.head;
9eeb6dd1 2172 }
9eeb6dd1
FZ
2173 }
2174
2175 assert(!bytes || (offset & (align - 1)) == 0);
2176 if (bytes >= align) {
2177 /* Write the aligned part in the middle. */
fcfd9ade 2178 int64_t aligned_bytes = bytes & ~(align - 1);
85c97ca7 2179 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
28c4da28 2180 NULL, 0, flags);
9eeb6dd1 2181 if (ret < 0) {
7a3f542f 2182 goto out;
9eeb6dd1
FZ
2183 }
2184 bytes -= aligned_bytes;
2185 offset += aligned_bytes;
2186 }
2187
2188 assert(!bytes || (offset & (align - 1)) == 0);
2189 if (bytes) {
7a3f542f 2190 assert(align == pad.tail + bytes);
9eeb6dd1 2191
7a3f542f 2192 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
85c97ca7 2193 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
28c4da28
VSO
2194 &local_qiov, 0,
2195 flags & ~BDRV_REQ_ZERO_WRITE);
9eeb6dd1 2196 }
9eeb6dd1 2197
7a3f542f 2198out:
18743311 2199 bdrv_padding_finalize(&pad);
7a3f542f
VSO
2200
2201 return ret;
9eeb6dd1
FZ
2202}
2203
61007b31
SH
2204/*
2205 * Handle a write request in coroutine context
2206 */
a03ef88f 2207int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
e9e52efd 2208 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
61007b31 2209 BdrvRequestFlags flags)
1acc3466 2210{
967d7905 2211 IO_CODE();
1acc3466
VSO
2212 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2213}
2214
2215int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
37e9403e 2216 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
1acc3466 2217 BdrvRequestFlags flags)
61007b31 2218{
a03ef88f 2219 BlockDriverState *bs = child->bs;
61007b31 2220 BdrvTrackedRequest req;
a5b8dd2c 2221 uint64_t align = bs->bl.request_alignment;
7a3f542f 2222 BdrvRequestPadding pad;
61007b31 2223 int ret;
f0deecff 2224 bool padded = false;
967d7905 2225 IO_CODE();
61007b31 2226
37e9403e 2227 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
f42cf447 2228
1e97be91 2229 if (!bdrv_co_is_inserted(bs)) {
61007b31
SH
2230 return -ENOMEDIUM;
2231 }
61007b31 2232
2aaa3f9b
VSO
2233 if (flags & BDRV_REQ_ZERO_WRITE) {
2234 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
2235 } else {
2236 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2237 }
61007b31
SH
2238 if (ret < 0) {
2239 return ret;
2240 }
2241
f2208fdc
AG
2242 /* If the request is misaligned then we can't make it efficient */
2243 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2244 !QEMU_IS_ALIGNED(offset | bytes, align))
2245 {
2246 return -ENOTSUP;
2247 }
2248
ac9d00bf
VSO
2249 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2250 /*
2251 * Aligning zero request is nonsense. Even if driver has special meaning
2252 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2253 * it to driver due to request_alignment.
2254 *
2255 * Still, no reason to return an error if someone do unaligned
2256 * zero-length write occasionally.
2257 */
2258 return 0;
2259 }
2260
f0deecff
VSO
2261 if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2262 /*
2263 * Pad request for following read-modify-write cycle.
2264 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2265 * alignment only if there is no ZERO flag.
2266 */
18743311
HC
2267 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true,
2268 &pad, &padded, &flags);
98ca4549
VSO
2269 if (ret < 0) {
2270 return ret;
2271 }
f0deecff
VSO
2272 }
2273
99723548 2274 bdrv_inc_in_flight(bs);
ebde595c 2275 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
61007b31 2276
18a59f03 2277 if (flags & BDRV_REQ_ZERO_WRITE) {
f0deecff 2278 assert(!padded);
85c97ca7 2279 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
9eeb6dd1
FZ
2280 goto out;
2281 }
2282
f0deecff
VSO
2283 if (padded) {
2284 /*
2285 * Request was unaligned to request_alignment and therefore
2286 * padded. We are going to do read-modify-write, and must
2287 * serialize the request to prevent interactions of the
2288 * widened region with other transactions.
2289 */
45e62b46 2290 assert(!(flags & BDRV_REQ_NO_WAIT));
8ac5aab2 2291 bdrv_make_request_serialising(&req, align);
7a3f542f 2292 bdrv_padding_rmw_read(child, &req, &pad, false);
61007b31
SH
2293 }
2294
85c97ca7 2295 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1acc3466 2296 qiov, qiov_offset, flags);
61007b31 2297
18743311 2298 bdrv_padding_finalize(&pad);
61007b31 2299
9eeb6dd1
FZ
2300out:
2301 tracked_request_end(&req);
99723548 2302 bdrv_dec_in_flight(bs);
7a3f542f 2303
61007b31
SH
2304 return ret;
2305}
2306
a03ef88f 2307int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
e9e52efd 2308 int64_t bytes, BdrvRequestFlags flags)
61007b31 2309{
384a48fb 2310 IO_CODE();
f5a5ca79 2311 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
abaf8b75 2312 assert_bdrv_graph_readable();
61007b31 2313
a03ef88f 2314 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
61007b31
SH
2315 flags &= ~BDRV_REQ_MAY_UNMAP;
2316 }
61007b31 2317
f5a5ca79 2318 return bdrv_co_pwritev(child, offset, bytes, NULL,
74021bc4 2319 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
2320}
2321
4085f5c7
JS
2322/*
2323 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2324 */
2325int bdrv_flush_all(void)
2326{
2327 BdrvNextIterator it;
2328 BlockDriverState *bs = NULL;
2329 int result = 0;
2330
f791bf7f
EGE
2331 GLOBAL_STATE_CODE();
2332
c8aa7895
PD
2333 /*
2334 * bdrv queue is managed by record/replay,
2335 * creating new flush request for stopping
2336 * the VM may break the determinism
2337 */
2338 if (replay_events_enabled()) {
2339 return result;
2340 }
2341
4085f5c7
JS
2342 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2343 AioContext *aio_context = bdrv_get_aio_context(bs);
2344 int ret;
2345
2346 aio_context_acquire(aio_context);
2347 ret = bdrv_flush(bs);
2348 if (ret < 0 && !result) {
2349 result = ret;
2350 }
2351 aio_context_release(aio_context);
2352 }
2353
2354 return result;
2355}
2356
61007b31
SH
2357/*
2358 * Returns the allocation status of the specified sectors.
2359 * Drivers not implementing the functionality are assumed to not support
2360 * backing files, hence all their sectors are reported as allocated.
2361 *
86a3d5c6
EB
2362 * If 'want_zero' is true, the caller is querying for mapping
2363 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2364 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2365 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
c9ce8c4d 2366 *
2e8bc787 2367 * If 'offset' is beyond the end of the disk image the return value is
fb0d8654 2368 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
61007b31 2369 *
2e8bc787 2370 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
fb0d8654
EB
2371 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2372 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
67a0fd2a 2373 *
2e8bc787
EB
2374 * 'pnum' is set to the number of bytes (including and immediately
2375 * following the specified offset) that are easily known to be in the
2376 * same allocated/unallocated state. Note that a second call starting
2377 * at the original offset plus returned pnum may have the same status.
2378 * The returned value is non-zero on success except at end-of-file.
2379 *
2380 * Returns negative errno on failure. Otherwise, if the
2381 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2382 * set to the host mapping and BDS corresponding to the guest offset.
61007b31 2383 */
7ff9579e
KW
2384static int coroutine_fn GRAPH_RDLOCK
2385bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
2386 int64_t offset, int64_t bytes,
2387 int64_t *pnum, int64_t *map, BlockDriverState **file)
2e8bc787
EB
2388{
2389 int64_t total_size;
2390 int64_t n; /* bytes */
efa6e2ed 2391 int ret;
2e8bc787 2392 int64_t local_map = 0;
298a1665 2393 BlockDriverState *local_file = NULL;
efa6e2ed
EB
2394 int64_t aligned_offset, aligned_bytes;
2395 uint32_t align;
549ec0d9 2396 bool has_filtered_child;
61007b31 2397
298a1665 2398 assert(pnum);
7ff9579e 2399 assert_bdrv_graph_readable();
298a1665 2400 *pnum = 0;
0af02bd1 2401 total_size = bdrv_co_getlength(bs);
2e8bc787
EB
2402 if (total_size < 0) {
2403 ret = total_size;
298a1665 2404 goto early_out;
61007b31
SH
2405 }
2406
2e8bc787 2407 if (offset >= total_size) {
298a1665
EB
2408 ret = BDRV_BLOCK_EOF;
2409 goto early_out;
61007b31 2410 }
2e8bc787 2411 if (!bytes) {
298a1665
EB
2412 ret = 0;
2413 goto early_out;
9cdcfd9f 2414 }
61007b31 2415
2e8bc787
EB
2416 n = total_size - offset;
2417 if (n < bytes) {
2418 bytes = n;
61007b31
SH
2419 }
2420
0af02bd1 2421 /* Must be non-NULL or bdrv_co_getlength() would have failed */
d470ad42 2422 assert(bs->drv);
549ec0d9
HR
2423 has_filtered_child = bdrv_filter_child(bs);
2424 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2e8bc787 2425 *pnum = bytes;
61007b31 2426 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2e8bc787 2427 if (offset + bytes == total_size) {
fb0d8654
EB
2428 ret |= BDRV_BLOCK_EOF;
2429 }
61007b31 2430 if (bs->drv->protocol_name) {
2e8bc787
EB
2431 ret |= BDRV_BLOCK_OFFSET_VALID;
2432 local_map = offset;
298a1665 2433 local_file = bs;
61007b31 2434 }
298a1665 2435 goto early_out;
61007b31
SH
2436 }
2437
99723548 2438 bdrv_inc_in_flight(bs);
efa6e2ed
EB
2439
2440 /* Round out to request_alignment boundaries */
86a3d5c6 2441 align = bs->bl.request_alignment;
efa6e2ed
EB
2442 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2443 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2444
549ec0d9 2445 if (bs->drv->bdrv_co_block_status) {
0bc329fb
HR
2446 /*
2447 * Use the block-status cache only for protocol nodes: Format
2448 * drivers are generally quick to inquire the status, but protocol
2449 * drivers often need to get information from outside of qemu, so
2450 * we do not have control over the actual implementation. There
2451 * have been cases where inquiring the status took an unreasonably
2452 * long time, and we can do nothing in qemu to fix it.
2453 * This is especially problematic for images with large data areas,
2454 * because finding the few holes in them and giving them special
2455 * treatment does not gain much performance. Therefore, we try to
2456 * cache the last-identified data region.
2457 *
2458 * Second, limiting ourselves to protocol nodes allows us to assume
2459 * the block status for data regions to be DATA | OFFSET_VALID, and
2460 * that the host offset is the same as the guest offset.
2461 *
2462 * Note that it is possible that external writers zero parts of
2463 * the cached regions without the cache being invalidated, and so
2464 * we may report zeroes as data. This is not catastrophic,
2465 * however, because reporting zeroes as data is fine.
2466 */
2467 if (QLIST_EMPTY(&bs->children) &&
2468 bdrv_bsc_is_data(bs, aligned_offset, pnum))
2469 {
2470 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2471 local_file = bs;
2472 local_map = aligned_offset;
2473 } else {
2474 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2475 aligned_bytes, pnum, &local_map,
2476 &local_file);
2477
2478 /*
2479 * Note that checking QLIST_EMPTY(&bs->children) is also done when
2480 * the cache is queried above. Technically, we do not need to check
2481 * it here; the worst that can happen is that we fill the cache for
2482 * non-protocol nodes, and then it is never used. However, filling
2483 * the cache requires an RCU update, so double check here to avoid
2484 * such an update if possible.
113b727c
HR
2485 *
2486 * Check want_zero, because we only want to update the cache when we
2487 * have accurate information about what is zero and what is data.
0bc329fb 2488 */
113b727c
HR
2489 if (want_zero &&
2490 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
0bc329fb
HR
2491 QLIST_EMPTY(&bs->children))
2492 {
2493 /*
2494 * When a protocol driver reports BLOCK_OFFSET_VALID, the
2495 * returned local_map value must be the same as the offset we
2496 * have passed (aligned_offset), and local_bs must be the node
2497 * itself.
2498 * Assert this, because we follow this rule when reading from
2499 * the cache (see the `local_file = bs` and
2500 * `local_map = aligned_offset` assignments above), and the
2501 * result the cache delivers must be the same as the driver
2502 * would deliver.
2503 */
2504 assert(local_file == bs);
2505 assert(local_map == aligned_offset);
2506 bdrv_bsc_fill(bs, aligned_offset, *pnum);
2507 }
2508 }
549ec0d9
HR
2509 } else {
2510 /* Default code for filters */
2511
2512 local_file = bdrv_filter_bs(bs);
2513 assert(local_file);
2514
2515 *pnum = aligned_bytes;
2516 local_map = aligned_offset;
2517 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2518 }
636cb512
EB
2519 if (ret < 0) {
2520 *pnum = 0;
2521 goto out;
efa6e2ed
EB
2522 }
2523
2e8bc787 2524 /*
636cb512 2525 * The driver's result must be a non-zero multiple of request_alignment.
efa6e2ed 2526 * Clamp pnum and adjust map to original request.
2e8bc787 2527 */
636cb512
EB
2528 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2529 align > offset - aligned_offset);
69f47505
VSO
2530 if (ret & BDRV_BLOCK_RECURSE) {
2531 assert(ret & BDRV_BLOCK_DATA);
2532 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2533 assert(!(ret & BDRV_BLOCK_ZERO));
2534 }
2535
efa6e2ed
EB
2536 *pnum -= offset - aligned_offset;
2537 if (*pnum > bytes) {
2538 *pnum = bytes;
61007b31 2539 }
2e8bc787 2540 if (ret & BDRV_BLOCK_OFFSET_VALID) {
efa6e2ed 2541 local_map += offset - aligned_offset;
2e8bc787 2542 }
61007b31
SH
2543
2544 if (ret & BDRV_BLOCK_RAW) {
298a1665 2545 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2e8bc787
EB
2546 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2547 *pnum, pnum, &local_map, &local_file);
99723548 2548 goto out;
61007b31
SH
2549 }
2550
2551 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2552 ret |= BDRV_BLOCK_ALLOCATED;
d40f4a56 2553 } else if (bs->drv->supports_backing) {
cb850315
HR
2554 BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2555
d40f4a56
AG
2556 if (!cow_bs) {
2557 ret |= BDRV_BLOCK_ZERO;
2558 } else if (want_zero) {
0af02bd1 2559 int64_t size2 = bdrv_co_getlength(cow_bs);
c9ce8c4d 2560
2e8bc787 2561 if (size2 >= 0 && offset >= size2) {
61007b31
SH
2562 ret |= BDRV_BLOCK_ZERO;
2563 }
2564 }
2565 }
2566
69f47505
VSO
2567 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2568 local_file && local_file != bs &&
61007b31
SH
2569 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2570 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2e8bc787
EB
2571 int64_t file_pnum;
2572 int ret2;
61007b31 2573
2e8bc787
EB
2574 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2575 *pnum, &file_pnum, NULL, NULL);
61007b31
SH
2576 if (ret2 >= 0) {
2577 /* Ignore errors. This is just providing extra information, it
2578 * is useful but not necessary.
2579 */
c61e684e
EB
2580 if (ret2 & BDRV_BLOCK_EOF &&
2581 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2582 /*
2583 * It is valid for the format block driver to read
2584 * beyond the end of the underlying file's current
2585 * size; such areas read as zero.
2586 */
61007b31
SH
2587 ret |= BDRV_BLOCK_ZERO;
2588 } else {
2589 /* Limit request to the range reported by the protocol driver */
2590 *pnum = file_pnum;
2591 ret |= (ret2 & BDRV_BLOCK_ZERO);
2592 }
2593 }
2594 }
2595
99723548
PB
2596out:
2597 bdrv_dec_in_flight(bs);
2e8bc787 2598 if (ret >= 0 && offset + *pnum == total_size) {
fb0d8654
EB
2599 ret |= BDRV_BLOCK_EOF;
2600 }
298a1665
EB
2601early_out:
2602 if (file) {
2603 *file = local_file;
2604 }
2e8bc787
EB
2605 if (map) {
2606 *map = local_map;
2607 }
61007b31
SH
2608 return ret;
2609}
2610
21c2283e 2611int coroutine_fn
f9e694cb
VSO
2612bdrv_co_common_block_status_above(BlockDriverState *bs,
2613 BlockDriverState *base,
3555a432 2614 bool include_base,
f9e694cb
VSO
2615 bool want_zero,
2616 int64_t offset,
2617 int64_t bytes,
2618 int64_t *pnum,
2619 int64_t *map,
a92b1b06
EB
2620 BlockDriverState **file,
2621 int *depth)
ba3f0e25 2622{
67c095c8 2623 int ret;
ba3f0e25 2624 BlockDriverState *p;
67c095c8 2625 int64_t eof = 0;
a92b1b06 2626 int dummy;
1581a70d 2627 IO_CODE();
ba3f0e25 2628
3555a432 2629 assert(!include_base || base); /* Can't include NULL base */
7ff9579e 2630 assert_bdrv_graph_readable();
67c095c8 2631
a92b1b06
EB
2632 if (!depth) {
2633 depth = &dummy;
2634 }
2635 *depth = 0;
2636
624f27bb
VSO
2637 if (!include_base && bs == base) {
2638 *pnum = bytes;
2639 return 0;
2640 }
2641
67c095c8 2642 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
a92b1b06 2643 ++*depth;
3555a432 2644 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
67c095c8
VSO
2645 return ret;
2646 }
2647
2648 if (ret & BDRV_BLOCK_EOF) {
2649 eof = offset + *pnum;
2650 }
2651
2652 assert(*pnum <= bytes);
2653 bytes = *pnum;
2654
3555a432 2655 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
67c095c8
VSO
2656 p = bdrv_filter_or_cow_bs(p))
2657 {
5b648c67
EB
2658 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2659 file);
a92b1b06 2660 ++*depth;
c61e684e 2661 if (ret < 0) {
67c095c8 2662 return ret;
c61e684e 2663 }
67c095c8 2664 if (*pnum == 0) {
c61e684e 2665 /*
67c095c8
VSO
2666 * The top layer deferred to this layer, and because this layer is
2667 * short, any zeroes that we synthesize beyond EOF behave as if they
2668 * were allocated at this layer.
2669 *
2670 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2671 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2672 * below.
c61e684e 2673 */
67c095c8 2674 assert(ret & BDRV_BLOCK_EOF);
5b648c67 2675 *pnum = bytes;
67c095c8
VSO
2676 if (file) {
2677 *file = p;
2678 }
2679 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2680 break;
c61e684e 2681 }
67c095c8
VSO
2682 if (ret & BDRV_BLOCK_ALLOCATED) {
2683 /*
2684 * We've found the node and the status, we must break.
2685 *
2686 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2687 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2688 * below.
2689 */
2690 ret &= ~BDRV_BLOCK_EOF;
ba3f0e25
FZ
2691 break;
2692 }
67c095c8 2693
3555a432
VSO
2694 if (p == base) {
2695 assert(include_base);
2696 break;
2697 }
2698
67c095c8
VSO
2699 /*
2700 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2701 * let's continue the diving.
2702 */
2703 assert(*pnum <= bytes);
2704 bytes = *pnum;
ba3f0e25 2705 }
67c095c8
VSO
2706
2707 if (offset + *pnum == eof) {
2708 ret |= BDRV_BLOCK_EOF;
2709 }
2710
ba3f0e25
FZ
2711 return ret;
2712}
2713
7b52a921
EGE
2714int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2715 BlockDriverState *base,
2716 int64_t offset, int64_t bytes,
2717 int64_t *pnum, int64_t *map,
2718 BlockDriverState **file)
2719{
2720 IO_CODE();
2721 return bdrv_co_common_block_status_above(bs, base, false, true, offset,
2722 bytes, pnum, map, file, NULL);
2723}
2724
31826642
EB
2725int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2726 int64_t offset, int64_t bytes, int64_t *pnum,
2727 int64_t *map, BlockDriverState **file)
c9ce8c4d 2728{
384a48fb 2729 IO_CODE();
3555a432 2730 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
a92b1b06 2731 pnum, map, file, NULL);
c9ce8c4d
EB
2732}
2733
237d78f8
EB
2734int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2735 int64_t *pnum, int64_t *map, BlockDriverState **file)
ba3f0e25 2736{
384a48fb 2737 IO_CODE();
cb850315 2738 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
31826642 2739 offset, bytes, pnum, map, file);
ba3f0e25
FZ
2740}
2741
46cd1e8a
AG
2742/*
2743 * Check @bs (and its backing chain) to see if the range defined
2744 * by @offset and @bytes is known to read as zeroes.
2745 * Return 1 if that is the case, 0 otherwise and -errno on error.
2746 * This test is meant to be fast rather than accurate so returning 0
2747 * does not guarantee non-zero data.
2748 */
2749int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2750 int64_t bytes)
2751{
2752 int ret;
2753 int64_t pnum = bytes;
384a48fb 2754 IO_CODE();
46cd1e8a
AG
2755
2756 if (!bytes) {
2757 return 1;
2758 }
2759
ce47ff20
AF
2760 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2761 bytes, &pnum, NULL, NULL, NULL);
46cd1e8a
AG
2762
2763 if (ret < 0) {
2764 return ret;
2765 }
2766
2767 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2768}
2769
7b52a921
EGE
2770int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
2771 int64_t bytes, int64_t *pnum)
2772{
2773 int ret;
2774 int64_t dummy;
2775 IO_CODE();
2776
2777 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
2778 bytes, pnum ? pnum : &dummy, NULL,
2779 NULL, NULL);
2780 if (ret < 0) {
2781 return ret;
2782 }
2783 return !!(ret & BDRV_BLOCK_ALLOCATED);
2784}
2785
7c85803c
AF
2786int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
2787 int64_t *pnum)
61007b31 2788{
7ddb99b9
EB
2789 int ret;
2790 int64_t dummy;
384a48fb 2791 IO_CODE();
d6a644bb 2792
3555a432
VSO
2793 ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
2794 bytes, pnum ? pnum : &dummy, NULL,
a92b1b06 2795 NULL, NULL);
61007b31
SH
2796 if (ret < 0) {
2797 return ret;
2798 }
2799 return !!(ret & BDRV_BLOCK_ALLOCATED);
2800}
2801
7b52a921
EGE
2802/* See bdrv_is_allocated_above for documentation */
2803int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
2804 BlockDriverState *base,
2805 bool include_base, int64_t offset,
2806 int64_t bytes, int64_t *pnum)
2807{
2808 int depth;
2809 int ret;
2810 IO_CODE();
2811
2812 ret = bdrv_co_common_block_status_above(top, base, include_base, false,
2813 offset, bytes, pnum, NULL, NULL,
2814 &depth);
2815 if (ret < 0) {
2816 return ret;
2817 }
2818
2819 if (ret & BDRV_BLOCK_ALLOCATED) {
2820 return depth;
2821 }
2822 return 0;
2823}
2824
61007b31
SH
2825/*
2826 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2827 *
a92b1b06
EB
2828 * Return a positive depth if (a prefix of) the given range is allocated
2829 * in any image between BASE and TOP (BASE is only included if include_base
2830 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
170d3bd3
AS
2831 * BASE can be NULL to check if the given offset is allocated in any
2832 * image of the chain. Return 0 otherwise, or negative errno on
2833 * failure.
61007b31 2834 *
51b0a488
EB
2835 * 'pnum' is set to the number of bytes (including and immediately
2836 * following the specified offset) that are known to be in the same
2837 * allocated/unallocated state. Note that a subsequent call starting
2838 * at 'offset + *pnum' may return the same allocation status (in other
2839 * words, the result is not necessarily the maximum possible range);
2840 * but 'pnum' will only be 0 when end of file is reached.
61007b31
SH
2841 */
2842int bdrv_is_allocated_above(BlockDriverState *top,
2843 BlockDriverState *base,
170d3bd3
AS
2844 bool include_base, int64_t offset,
2845 int64_t bytes, int64_t *pnum)
61007b31 2846{
a92b1b06 2847 int depth;
7b52a921 2848 int ret;
384a48fb 2849 IO_CODE();
7b52a921
EGE
2850
2851 ret = bdrv_common_block_status_above(top, base, include_base, false,
2852 offset, bytes, pnum, NULL, NULL,
2853 &depth);
7e7e5100
VSO
2854 if (ret < 0) {
2855 return ret;
61007b31
SH
2856 }
2857
a92b1b06
EB
2858 if (ret & BDRV_BLOCK_ALLOCATED) {
2859 return depth;
2860 }
2861 return 0;
61007b31
SH
2862}
2863
21c2283e 2864int coroutine_fn
b33b354f 2865bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1a8ae822
KW
2866{
2867 BlockDriver *drv = bs->drv;
c4db2e25 2868 BlockDriverState *child_bs = bdrv_primary_bs(bs);
b984b296 2869 int ret;
1581a70d 2870 IO_CODE();
1b3ff9fe 2871 assert_bdrv_graph_readable();
b984b296
VSO
2872
2873 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2874 if (ret < 0) {
2875 return ret;
2876 }
dc88a467 2877
b33b354f
VSO
2878 if (!drv) {
2879 return -ENOMEDIUM;
2880 }
2881
dc88a467 2882 bdrv_inc_in_flight(bs);
1a8ae822 2883
ca5e2ad9
EGE
2884 if (drv->bdrv_co_load_vmstate) {
2885 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
c4db2e25 2886 } else if (child_bs) {
b33b354f 2887 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
b984b296
VSO
2888 } else {
2889 ret = -ENOTSUP;
1a8ae822
KW
2890 }
2891
dc88a467 2892 bdrv_dec_in_flight(bs);
b33b354f 2893
dc88a467 2894 return ret;
1a8ae822
KW
2895}
2896
b33b354f
VSO
2897int coroutine_fn
2898bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
61007b31 2899{
b33b354f
VSO
2900 BlockDriver *drv = bs->drv;
2901 BlockDriverState *child_bs = bdrv_primary_bs(bs);
b984b296 2902 int ret;
1581a70d 2903 IO_CODE();
1b3ff9fe 2904 assert_bdrv_graph_readable();
b984b296
VSO
2905
2906 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2907 if (ret < 0) {
2908 return ret;
2909 }
61007b31 2910
b33b354f
VSO
2911 if (!drv) {
2912 return -ENOMEDIUM;
b433d942
KW
2913 }
2914
b33b354f 2915 bdrv_inc_in_flight(bs);
61007b31 2916
ca5e2ad9
EGE
2917 if (drv->bdrv_co_save_vmstate) {
2918 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
b33b354f
VSO
2919 } else if (child_bs) {
2920 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
b984b296
VSO
2921 } else {
2922 ret = -ENOTSUP;
b33b354f
VSO
2923 }
2924
2925 bdrv_dec_in_flight(bs);
2926
2927 return ret;
61007b31
SH
2928}
2929
b33b354f 2930int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
61007b31 2931 int64_t pos, int size)
5ddda0b8 2932{
0d93ed08 2933 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
b33b354f 2934 int ret = bdrv_writev_vmstate(bs, &qiov, pos);
384a48fb 2935 IO_CODE();
b433d942 2936
b33b354f 2937 return ret < 0 ? ret : size;
5ddda0b8
KW
2938}
2939
b33b354f
VSO
2940int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2941 int64_t pos, int size)
61007b31 2942{
b33b354f
VSO
2943 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2944 int ret = bdrv_readv_vmstate(bs, &qiov, pos);
384a48fb 2945 IO_CODE();
b33b354f
VSO
2946
2947 return ret < 0 ? ret : size;
61007b31
SH
2948}
2949
2950/**************************************************************/
2951/* async I/Os */
2952
61007b31
SH
2953void bdrv_aio_cancel(BlockAIOCB *acb)
2954{
384a48fb 2955 IO_CODE();
61007b31
SH
2956 qemu_aio_ref(acb);
2957 bdrv_aio_cancel_async(acb);
2958 while (acb->refcnt > 1) {
2959 if (acb->aiocb_info->get_aio_context) {
2960 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2961 } else if (acb->bs) {
2f47da5f
PB
2962 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2963 * assert that we're not using an I/O thread. Thread-safe
2964 * code should use bdrv_aio_cancel_async exclusively.
2965 */
2966 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
61007b31
SH
2967 aio_poll(bdrv_get_aio_context(acb->bs), true);
2968 } else {
2969 abort();
2970 }
2971 }
2972 qemu_aio_unref(acb);
2973}
2974
2975/* Async version of aio cancel. The caller is not blocked if the acb implements
2976 * cancel_async, otherwise we do nothing and let the request normally complete.
2977 * In either case the completion callback must be called. */
2978void bdrv_aio_cancel_async(BlockAIOCB *acb)
2979{
384a48fb 2980 IO_CODE();
61007b31
SH
2981 if (acb->aiocb_info->cancel_async) {
2982 acb->aiocb_info->cancel_async(acb);
2983 }
2984}
2985
61007b31
SH
2986/**************************************************************/
2987/* Coroutine block device emulation */
2988
61007b31
SH
2989int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2990{
883833e2
HR
2991 BdrvChild *primary_child = bdrv_primary_child(bs);
2992 BdrvChild *child;
49ca6259
FZ
2993 int current_gen;
2994 int ret = 0;
384a48fb 2995 IO_CODE();
49ca6259 2996
88095349 2997 assert_bdrv_graph_readable();
49ca6259 2998 bdrv_inc_in_flight(bs);
61007b31 2999
1e97be91 3000 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
1b6bc94d 3001 bdrv_is_sg(bs)) {
49ca6259 3002 goto early_exit;
61007b31
SH
3003 }
3004
fa9185fc 3005 qemu_mutex_lock(&bs->reqs_lock);
d73415a3 3006 current_gen = qatomic_read(&bs->write_gen);
3ff2f67a
EY
3007
3008 /* Wait until any previous flushes are completed */
99723548 3009 while (bs->active_flush_req) {
3783fa3d 3010 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
3ff2f67a
EY
3011 }
3012
3783fa3d 3013 /* Flushes reach this point in nondecreasing current_gen order. */
99723548 3014 bs->active_flush_req = true;
fa9185fc 3015 qemu_mutex_unlock(&bs->reqs_lock);
3ff2f67a 3016
c32b82af
PD
3017 /* Write back all layers by calling one driver function */
3018 if (bs->drv->bdrv_co_flush) {
3019 ret = bs->drv->bdrv_co_flush(bs);
3020 goto out;
3021 }
3022
61007b31 3023 /* Write back cached data to the OS even with cache=unsafe */
17362398 3024 BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
61007b31
SH
3025 if (bs->drv->bdrv_co_flush_to_os) {
3026 ret = bs->drv->bdrv_co_flush_to_os(bs);
3027 if (ret < 0) {
cdb5e315 3028 goto out;
61007b31
SH
3029 }
3030 }
3031
3032 /* But don't actually force it to the disk with cache=unsafe */
3033 if (bs->open_flags & BDRV_O_NO_FLUSH) {
883833e2 3034 goto flush_children;
61007b31
SH
3035 }
3036
3ff2f67a
EY
3037 /* Check if we really need to flush anything */
3038 if (bs->flushed_gen == current_gen) {
883833e2 3039 goto flush_children;
3ff2f67a
EY
3040 }
3041
17362398 3042 BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
d470ad42
HR
3043 if (!bs->drv) {
3044 /* bs->drv->bdrv_co_flush() might have ejected the BDS
3045 * (even in case of apparent success) */
3046 ret = -ENOMEDIUM;
3047 goto out;
3048 }
61007b31
SH
3049 if (bs->drv->bdrv_co_flush_to_disk) {
3050 ret = bs->drv->bdrv_co_flush_to_disk(bs);
3051 } else if (bs->drv->bdrv_aio_flush) {
3052 BlockAIOCB *acb;
3053 CoroutineIOCompletion co = {
3054 .coroutine = qemu_coroutine_self(),
3055 };
3056
3057 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3058 if (acb == NULL) {
3059 ret = -EIO;
3060 } else {
3061 qemu_coroutine_yield();
3062 ret = co.ret;
3063 }
3064 } else {
3065 /*
3066 * Some block drivers always operate in either writethrough or unsafe
3067 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3068 * know how the server works (because the behaviour is hardcoded or
3069 * depends on server-side configuration), so we can't ensure that
3070 * everything is safe on disk. Returning an error doesn't work because
3071 * that would break guests even if the server operates in writethrough
3072 * mode.
3073 *
3074 * Let's hope the user knows what he's doing.
3075 */
3076 ret = 0;
3077 }
3ff2f67a 3078
61007b31 3079 if (ret < 0) {
cdb5e315 3080 goto out;
61007b31
SH
3081 }
3082
3083 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3084 * in the case of cache=unsafe, so there are no useless flushes.
3085 */
883833e2
HR
3086flush_children:
3087 ret = 0;
3088 QLIST_FOREACH(child, &bs->children, next) {
3089 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3090 int this_child_ret = bdrv_co_flush(child->bs);
3091 if (!ret) {
3092 ret = this_child_ret;
3093 }
3094 }
3095 }
3096
cdb5e315 3097out:
3ff2f67a 3098 /* Notify any pending flushes that we have completed */
e6af1e08
KW
3099 if (ret == 0) {
3100 bs->flushed_gen = current_gen;
3101 }
3783fa3d 3102
fa9185fc 3103 qemu_mutex_lock(&bs->reqs_lock);
99723548 3104 bs->active_flush_req = false;
156af3ac
DL
3105 /* Return value is ignored - it's ok if wait queue is empty */
3106 qemu_co_queue_next(&bs->flush_queue);
fa9185fc 3107 qemu_mutex_unlock(&bs->reqs_lock);
3ff2f67a 3108
49ca6259 3109early_exit:
99723548 3110 bdrv_dec_in_flight(bs);
cdb5e315 3111 return ret;
61007b31
SH
3112}
3113
d93e5726
VSO
3114int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3115 int64_t bytes)
61007b31 3116{
b1066c87 3117 BdrvTrackedRequest req;
39af49c0
VSO
3118 int ret;
3119 int64_t max_pdiscard;
3482b9bc 3120 int head, tail, align;
0b9fd3f4 3121 BlockDriverState *bs = child->bs;
384a48fb 3122 IO_CODE();
9a5a1c62 3123 assert_bdrv_graph_readable();
61007b31 3124
1e97be91 3125 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
61007b31
SH
3126 return -ENOMEDIUM;
3127 }
3128
d6883bc9
VSO
3129 if (bdrv_has_readonly_bitmaps(bs)) {
3130 return -EPERM;
3131 }
3132
69b55e03 3133 ret = bdrv_check_request(offset, bytes, NULL);
8b117001
VSO
3134 if (ret < 0) {
3135 return ret;
61007b31
SH
3136 }
3137
61007b31
SH
3138 /* Do nothing if disabled. */
3139 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3140 return 0;
3141 }
3142
02aefe43 3143 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
61007b31
SH
3144 return 0;
3145 }
3146
0bc329fb
HR
3147 /* Invalidate the cached block-status data range if this discard overlaps */
3148 bdrv_bsc_invalidate_range(bs, offset, bytes);
3149
3482b9bc
EB
3150 /* Discard is advisory, but some devices track and coalesce
3151 * unaligned requests, so we must pass everything down rather than
3152 * round here. Still, most devices will just silently ignore
3153 * unaligned requests (by returning -ENOTSUP), so we must fragment
3154 * the request accordingly. */
02aefe43 3155 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
b8d0a980
EB
3156 assert(align % bs->bl.request_alignment == 0);
3157 head = offset % align;
f5a5ca79 3158 tail = (offset + bytes) % align;
9f1963b3 3159
99723548 3160 bdrv_inc_in_flight(bs);
f5a5ca79 3161 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
50824995 3162
00695c27 3163 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
ec050f77
DL
3164 if (ret < 0) {
3165 goto out;
3166 }
3167
6a8f3dbb 3168 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
9f1963b3 3169 align);
3482b9bc 3170 assert(max_pdiscard >= bs->bl.request_alignment);
61007b31 3171
f5a5ca79 3172 while (bytes > 0) {
d93e5726 3173 int64_t num = bytes;
3482b9bc
EB
3174
3175 if (head) {
3176 /* Make small requests to get to alignment boundaries. */
f5a5ca79 3177 num = MIN(bytes, align - head);
3482b9bc
EB
3178 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3179 num %= bs->bl.request_alignment;
3180 }
3181 head = (head + num) % align;
3182 assert(num < max_pdiscard);
3183 } else if (tail) {
3184 if (num > align) {
3185 /* Shorten the request to the last aligned cluster. */
3186 num -= tail;
3187 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3188 tail > bs->bl.request_alignment) {
3189 tail %= bs->bl.request_alignment;
3190 num -= tail;
3191 }
3192 }
3193 /* limit request size */
3194 if (num > max_pdiscard) {
3195 num = max_pdiscard;
3196 }
61007b31 3197
d470ad42
HR
3198 if (!bs->drv) {
3199 ret = -ENOMEDIUM;
3200 goto out;
3201 }
47a5486d
EB
3202 if (bs->drv->bdrv_co_pdiscard) {
3203 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
61007b31
SH
3204 } else {
3205 BlockAIOCB *acb;
3206 CoroutineIOCompletion co = {
3207 .coroutine = qemu_coroutine_self(),
3208 };
3209
4da444a0
EB
3210 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3211 bdrv_co_io_em_complete, &co);
61007b31 3212 if (acb == NULL) {
b1066c87
FZ
3213 ret = -EIO;
3214 goto out;
61007b31
SH
3215 } else {
3216 qemu_coroutine_yield();
3217 ret = co.ret;
3218 }
3219 }
3220 if (ret && ret != -ENOTSUP) {
b1066c87 3221 goto out;
61007b31
SH
3222 }
3223
9f1963b3 3224 offset += num;
f5a5ca79 3225 bytes -= num;
61007b31 3226 }
b1066c87
FZ
3227 ret = 0;
3228out:
00695c27 3229 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
b1066c87 3230 tracked_request_end(&req);
99723548 3231 bdrv_dec_in_flight(bs);
b1066c87 3232 return ret;
61007b31
SH
3233}
3234
881a4c55 3235int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
61007b31
SH
3236{
3237 BlockDriver *drv = bs->drv;
5c5ae76a
FZ
3238 CoroutineIOCompletion co = {
3239 .coroutine = qemu_coroutine_self(),
3240 };
3241 BlockAIOCB *acb;
384a48fb 3242 IO_CODE();
26c518ab 3243 assert_bdrv_graph_readable();
61007b31 3244
99723548 3245 bdrv_inc_in_flight(bs);
16a389dc 3246 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
5c5ae76a
FZ
3247 co.ret = -ENOTSUP;
3248 goto out;
3249 }
3250
16a389dc
KW
3251 if (drv->bdrv_co_ioctl) {
3252 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3253 } else {
3254 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3255 if (!acb) {
3256 co.ret = -ENOTSUP;
3257 goto out;
3258 }
3259 qemu_coroutine_yield();
5c5ae76a 3260 }
5c5ae76a 3261out:
99723548 3262 bdrv_dec_in_flight(bs);
5c5ae76a
FZ
3263 return co.ret;
3264}
3265
6d43eaa3
SL
3266int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
3267 unsigned int *nr_zones,
3268 BlockZoneDescriptor *zones)
3269{
3270 BlockDriver *drv = bs->drv;
3271 CoroutineIOCompletion co = {
3272 .coroutine = qemu_coroutine_self(),
3273 };
3274 IO_CODE();
3275
3276 bdrv_inc_in_flight(bs);
3277 if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
3278 co.ret = -ENOTSUP;
3279 goto out;
3280 }
3281 co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
3282out:
3283 bdrv_dec_in_flight(bs);
3284 return co.ret;
3285}
3286
3287int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
3288 int64_t offset, int64_t len)
3289{
3290 BlockDriver *drv = bs->drv;
3291 CoroutineIOCompletion co = {
3292 .coroutine = qemu_coroutine_self(),
3293 };
3294 IO_CODE();
3295
3296 bdrv_inc_in_flight(bs);
3297 if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
3298 co.ret = -ENOTSUP;
3299 goto out;
3300 }
3301 co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
3302out:
3303 bdrv_dec_in_flight(bs);
3304 return co.ret;
3305}
3306
4751d09a
SL
3307int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
3308 QEMUIOVector *qiov,
3309 BdrvRequestFlags flags)
3310{
3311 int ret;
3312 BlockDriver *drv = bs->drv;
3313 CoroutineIOCompletion co = {
3314 .coroutine = qemu_coroutine_self(),
3315 };
3316 IO_CODE();
3317
3318 ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
3319 if (ret < 0) {
3320 return ret;
3321 }
3322
3323 bdrv_inc_in_flight(bs);
3324 if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
3325 co.ret = -ENOTSUP;
3326 goto out;
3327 }
3328 co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
3329out:
3330 bdrv_dec_in_flight(bs);
3331 return co.ret;
3332}
3333
61007b31
SH
3334void *qemu_blockalign(BlockDriverState *bs, size_t size)
3335{
384a48fb 3336 IO_CODE();
61007b31
SH
3337 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3338}
3339
3340void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3341{
384a48fb 3342 IO_CODE();
61007b31
SH
3343 return memset(qemu_blockalign(bs, size), 0, size);
3344}
3345
3346void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3347{
3348 size_t align = bdrv_opt_mem_align(bs);
384a48fb 3349 IO_CODE();
61007b31
SH
3350
3351 /* Ensure that NULL is never returned on success */
3352 assert(align > 0);
3353 if (size == 0) {
3354 size = align;
3355 }
3356
3357 return qemu_try_memalign(align, size);
3358}
3359
3360void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3361{
3362 void *mem = qemu_try_blockalign(bs, size);
384a48fb 3363 IO_CODE();
61007b31
SH
3364
3365 if (mem) {
3366 memset(mem, 0, size);
3367 }
3368
3369 return mem;
3370}
3371
f4ec04ba 3372/* Helper that undoes bdrv_register_buf() when it fails partway through */
d9249c25
KW
3373static void GRAPH_RDLOCK
3374bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3375 BdrvChild *final_child)
f4ec04ba
SH
3376{
3377 BdrvChild *child;
3378
d9249c25
KW
3379 GLOBAL_STATE_CODE();
3380 assert_bdrv_graph_readable();
3381
f4ec04ba
SH
3382 QLIST_FOREACH(child, &bs->children, next) {
3383 if (child == final_child) {
3384 break;
3385 }
3386
3387 bdrv_unregister_buf(child->bs, host, size);
3388 }
3389
3390 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3391 bs->drv->bdrv_unregister_buf(bs, host, size);
3392 }
3393}
3394
3395bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3396 Error **errp)
23d0ba93
FZ
3397{
3398 BdrvChild *child;
3399
f791bf7f 3400 GLOBAL_STATE_CODE();
d9249c25
KW
3401 GRAPH_RDLOCK_GUARD_MAINLOOP();
3402
23d0ba93 3403 if (bs->drv && bs->drv->bdrv_register_buf) {
f4ec04ba
SH
3404 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3405 return false;
3406 }
23d0ba93
FZ
3407 }
3408 QLIST_FOREACH(child, &bs->children, next) {
f4ec04ba
SH
3409 if (!bdrv_register_buf(child->bs, host, size, errp)) {
3410 bdrv_register_buf_rollback(bs, host, size, child);
3411 return false;
3412 }
23d0ba93 3413 }
f4ec04ba 3414 return true;
23d0ba93
FZ
3415}
3416
4f384011 3417void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
23d0ba93
FZ
3418{
3419 BdrvChild *child;
3420
f791bf7f 3421 GLOBAL_STATE_CODE();
d9249c25
KW
3422 GRAPH_RDLOCK_GUARD_MAINLOOP();
3423
23d0ba93 3424 if (bs->drv && bs->drv->bdrv_unregister_buf) {
4f384011 3425 bs->drv->bdrv_unregister_buf(bs, host, size);
23d0ba93
FZ
3426 }
3427 QLIST_FOREACH(child, &bs->children, next) {
4f384011 3428 bdrv_unregister_buf(child->bs, host, size);
23d0ba93
FZ
3429 }
3430}
fcc67678 3431
abaf8b75 3432static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
a5215b8f
VSO
3433 BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3434 int64_t dst_offset, int64_t bytes,
67b51fb9
VSO
3435 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3436 bool recurse_src)
fcc67678 3437{
999658a0 3438 BdrvTrackedRequest req;
fcc67678 3439 int ret;
742bf09b 3440 assert_bdrv_graph_readable();
fcc67678 3441
fe0480d6
KW
3442 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3443 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3444 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
45e62b46
VSO
3445 assert(!(read_flags & BDRV_REQ_NO_WAIT));
3446 assert(!(write_flags & BDRV_REQ_NO_WAIT));
fe0480d6 3447
1e97be91 3448 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
fcc67678
FZ
3449 return -ENOMEDIUM;
3450 }
63f4ad11 3451 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
fcc67678
FZ
3452 if (ret) {
3453 return ret;
3454 }
67b51fb9
VSO
3455 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3456 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
fcc67678
FZ
3457 }
3458
1e97be91 3459 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
d4d3e5a0
FZ
3460 return -ENOMEDIUM;
3461 }
63f4ad11 3462 ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
d4d3e5a0
FZ
3463 if (ret) {
3464 return ret;
3465 }
3466
fcc67678
FZ
3467 if (!src->bs->drv->bdrv_co_copy_range_from
3468 || !dst->bs->drv->bdrv_co_copy_range_to
3469 || src->bs->encrypted || dst->bs->encrypted) {
3470 return -ENOTSUP;
3471 }
37aec7d7 3472
fcc67678 3473 if (recurse_src) {
999658a0
VSO
3474 bdrv_inc_in_flight(src->bs);
3475 tracked_request_begin(&req, src->bs, src_offset, bytes,
3476 BDRV_TRACKED_READ);
3477
09d2f948
VSO
3478 /* BDRV_REQ_SERIALISING is only for write operation */
3479 assert(!(read_flags & BDRV_REQ_SERIALISING));
c53cb427 3480 bdrv_wait_serialising_requests(&req);
999658a0 3481
37aec7d7
FZ
3482 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3483 src, src_offset,
3484 dst, dst_offset,
67b51fb9
VSO
3485 bytes,
3486 read_flags, write_flags);
999658a0
VSO
3487
3488 tracked_request_end(&req);
3489 bdrv_dec_in_flight(src->bs);
fcc67678 3490 } else {
999658a0
VSO
3491 bdrv_inc_in_flight(dst->bs);
3492 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3493 BDRV_TRACKED_WRITE);
0eb1e891
FZ
3494 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3495 write_flags);
3496 if (!ret) {
3497 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3498 src, src_offset,
3499 dst, dst_offset,
3500 bytes,
3501 read_flags, write_flags);
3502 }
3503 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
999658a0
VSO
3504 tracked_request_end(&req);
3505 bdrv_dec_in_flight(dst->bs);
fcc67678 3506 }
999658a0 3507
37aec7d7 3508 return ret;
fcc67678
FZ
3509}
3510
3511/* Copy range from @src to @dst.
3512 *
3513 * See the comment of bdrv_co_copy_range for the parameter and return value
3514 * semantics. */
a5215b8f
VSO
3515int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3516 BdrvChild *dst, int64_t dst_offset,
3517 int64_t bytes,
67b51fb9
VSO
3518 BdrvRequestFlags read_flags,
3519 BdrvRequestFlags write_flags)
fcc67678 3520{
967d7905 3521 IO_CODE();
742bf09b 3522 assert_bdrv_graph_readable();
ecc983a5
FZ
3523 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3524 read_flags, write_flags);
fcc67678 3525 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3526 bytes, read_flags, write_flags, true);
fcc67678
FZ
3527}
3528
3529/* Copy range from @src to @dst.
3530 *
3531 * See the comment of bdrv_co_copy_range for the parameter and return value
3532 * semantics. */
a5215b8f
VSO
3533int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3534 BdrvChild *dst, int64_t dst_offset,
3535 int64_t bytes,
67b51fb9
VSO
3536 BdrvRequestFlags read_flags,
3537 BdrvRequestFlags write_flags)
fcc67678 3538{
967d7905 3539 IO_CODE();
742bf09b 3540 assert_bdrv_graph_readable();
ecc983a5
FZ
3541 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3542 read_flags, write_flags);
fcc67678 3543 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3544 bytes, read_flags, write_flags, false);
fcc67678
FZ
3545}
3546
a5215b8f
VSO
3547int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3548 BdrvChild *dst, int64_t dst_offset,
3549 int64_t bytes, BdrvRequestFlags read_flags,
67b51fb9 3550 BdrvRequestFlags write_flags)
fcc67678 3551{
384a48fb 3552 IO_CODE();
742bf09b
EGE
3553 assert_bdrv_graph_readable();
3554
37aec7d7
FZ
3555 return bdrv_co_copy_range_from(src, src_offset,
3556 dst, dst_offset,
67b51fb9 3557 bytes, read_flags, write_flags);
fcc67678 3558}
3d9f2d2a
KW
3559
3560static void bdrv_parent_cb_resize(BlockDriverState *bs)
3561{
3562 BdrvChild *c;
3563 QLIST_FOREACH(c, &bs->parents, next_parent) {
bd86fb99
HR
3564 if (c->klass->resize) {
3565 c->klass->resize(c);
3d9f2d2a
KW
3566 }
3567 }
3568}
3569
3570/**
3571 * Truncate file to 'offset' bytes (needed only for file protocols)
c80d8b06
HR
3572 *
3573 * If 'exact' is true, the file must be resized to exactly the given
3574 * 'offset'. Otherwise, it is sufficient for the node to be at least
3575 * 'offset' bytes in length.
3d9f2d2a 3576 */
c80d8b06 3577int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
7b8e4857
KW
3578 PreallocMode prealloc, BdrvRequestFlags flags,
3579 Error **errp)
3d9f2d2a
KW
3580{
3581 BlockDriverState *bs = child->bs;
23b93525 3582 BdrvChild *filtered, *backing;
3d9f2d2a 3583 BlockDriver *drv = bs->drv;
1bc5f09f
KW
3584 BdrvTrackedRequest req;
3585 int64_t old_size, new_bytes;
3d9f2d2a 3586 int ret;
384a48fb 3587 IO_CODE();
c2b8e315 3588 assert_bdrv_graph_readable();
3d9f2d2a
KW
3589
3590 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3591 if (!drv) {
3592 error_setg(errp, "No medium inserted");
3593 return -ENOMEDIUM;
3594 }
3595 if (offset < 0) {
3596 error_setg(errp, "Image size cannot be negative");
3597 return -EINVAL;
3598 }
3599
69b55e03 3600 ret = bdrv_check_request(offset, 0, errp);
8b117001 3601 if (ret < 0) {
8b117001
VSO
3602 return ret;
3603 }
3604
0af02bd1 3605 old_size = bdrv_co_getlength(bs);
1bc5f09f
KW
3606 if (old_size < 0) {
3607 error_setg_errno(errp, -old_size, "Failed to get old image size");
3608 return old_size;
3609 }
3610
97efa869
EB
3611 if (bdrv_is_read_only(bs)) {
3612 error_setg(errp, "Image is read-only");
3613 return -EACCES;
3614 }
3615
1bc5f09f
KW
3616 if (offset > old_size) {
3617 new_bytes = offset - old_size;
3618 } else {
3619 new_bytes = 0;
3620 }
3621
3d9f2d2a 3622 bdrv_inc_in_flight(bs);
5416a11e
FZ
3623 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3624 BDRV_TRACKED_TRUNCATE);
1bc5f09f
KW
3625
3626 /* If we are growing the image and potentially using preallocation for the
3627 * new area, we need to make sure that no write requests are made to it
3628 * concurrently or they might be overwritten by preallocation. */
3629 if (new_bytes) {
8ac5aab2 3630 bdrv_make_request_serialising(&req, 1);
cd47d792 3631 }
cd47d792
FZ
3632 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3633 0);
3634 if (ret < 0) {
3635 error_setg_errno(errp, -ret,
3636 "Failed to prepare request for truncation");
3637 goto out;
1bc5f09f 3638 }
3d9f2d2a 3639
93393e69 3640 filtered = bdrv_filter_child(bs);
23b93525 3641 backing = bdrv_cow_child(bs);
93393e69 3642
955c7d66
KW
3643 /*
3644 * If the image has a backing file that is large enough that it would
3645 * provide data for the new area, we cannot leave it unallocated because
3646 * then the backing file content would become visible. Instead, zero-fill
3647 * the new area.
3648 *
3649 * Note that if the image has a backing file, but was opened without the
3650 * backing file, taking care of keeping things consistent with that backing
3651 * file is the user's responsibility.
3652 */
23b93525 3653 if (new_bytes && backing) {
955c7d66
KW
3654 int64_t backing_len;
3655
bd53086e 3656 backing_len = bdrv_co_getlength(backing->bs);
955c7d66
KW
3657 if (backing_len < 0) {
3658 ret = backing_len;
3659 error_setg_errno(errp, -ret, "Could not get backing file size");
3660 goto out;
3661 }
3662
3663 if (backing_len > old_size) {
3664 flags |= BDRV_REQ_ZERO_WRITE;
3665 }
3666 }
3667
6b7e8f8b 3668 if (drv->bdrv_co_truncate) {
92b92799
KW
3669 if (flags & ~bs->supported_truncate_flags) {
3670 error_setg(errp, "Block driver does not support requested flags");
3671 ret = -ENOTSUP;
3672 goto out;
3673 }
3674 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
93393e69
HR
3675 } else if (filtered) {
3676 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
6b7e8f8b 3677 } else {
3d9f2d2a
KW
3678 error_setg(errp, "Image format driver does not support resize");
3679 ret = -ENOTSUP;
3680 goto out;
3681 }
3d9f2d2a
KW
3682 if (ret < 0) {
3683 goto out;
3684 }
6b7e8f8b 3685
bd53086e 3686 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3d9f2d2a
KW
3687 if (ret < 0) {
3688 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3689 } else {
3690 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3691 }
c057960c
EGE
3692 /*
3693 * It's possible that truncation succeeded but bdrv_refresh_total_sectors
cd47d792 3694 * failed, but the latter doesn't affect how we should finish the request.
c057960c
EGE
3695 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3696 */
cd47d792 3697 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3d9f2d2a
KW
3698
3699out:
1bc5f09f 3700 tracked_request_end(&req);
3d9f2d2a 3701 bdrv_dec_in_flight(bs);
1bc5f09f 3702
3d9f2d2a
KW
3703 return ret;
3704}
bd54669a
VSO
3705
3706void bdrv_cancel_in_flight(BlockDriverState *bs)
3707{
f791bf7f 3708 GLOBAL_STATE_CODE();
bd54669a
VSO
3709 if (!bs || !bs->drv) {
3710 return;
3711 }
3712
3713 if (bs->drv->bdrv_cancel_in_flight) {
3714 bs->drv->bdrv_cancel_in_flight(bs);
3715 }
3716}
ce14f3b4
VSO
3717
3718int coroutine_fn
3719bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3720 QEMUIOVector *qiov, size_t qiov_offset)
3721{
3722 BlockDriverState *bs = child->bs;
3723 BlockDriver *drv = bs->drv;
3724 int ret;
3725 IO_CODE();
7b9e8b22 3726 assert_bdrv_graph_readable();
ce14f3b4
VSO
3727
3728 if (!drv) {
3729 return -ENOMEDIUM;
3730 }
3731
3732 if (!drv->bdrv_co_preadv_snapshot) {
3733 return -ENOTSUP;
3734 }
3735
3736 bdrv_inc_in_flight(bs);
3737 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3738 bdrv_dec_in_flight(bs);
3739
3740 return ret;
3741}
3742
3743int coroutine_fn
3744bdrv_co_snapshot_block_status(BlockDriverState *bs,
3745 bool want_zero, int64_t offset, int64_t bytes,
3746 int64_t *pnum, int64_t *map,
3747 BlockDriverState **file)
3748{
3749 BlockDriver *drv = bs->drv;
3750 int ret;
3751 IO_CODE();
7b9e8b22 3752 assert_bdrv_graph_readable();
ce14f3b4
VSO
3753
3754 if (!drv) {
3755 return -ENOMEDIUM;
3756 }
3757
3758 if (!drv->bdrv_co_snapshot_block_status) {
3759 return -ENOTSUP;
3760 }
3761
3762 bdrv_inc_in_flight(bs);
3763 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3764 pnum, map, file);
3765 bdrv_dec_in_flight(bs);
3766
3767 return ret;
3768}
3769
3770int coroutine_fn
3771bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3772{
3773 BlockDriver *drv = bs->drv;
3774 int ret;
3775 IO_CODE();
9a5a1c62 3776 assert_bdrv_graph_readable();
ce14f3b4
VSO
3777
3778 if (!drv) {
3779 return -ENOMEDIUM;
3780 }
3781
3782 if (!drv->bdrv_co_pdiscard_snapshot) {
3783 return -ENOTSUP;
3784 }
3785
3786 bdrv_inc_in_flight(bs);
3787 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3788 bdrv_dec_in_flight(bs);
3789
3790 return ret;
3791}