]> git.proxmox.com Git - mirror_qemu.git/blame - block/io.c
Do not include sysemu/sysemu.h if it's not really necessary
[mirror_qemu.git] / block / io.c
CommitLineData
61007b31
SH
1/*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
61007b31 26#include "trace.h"
7f0e9da6 27#include "sysemu/block-backend.h"
7719f3c9 28#include "block/aio-wait.h"
61007b31 29#include "block/blockjob.h"
f321dcb5 30#include "block/blockjob_int.h"
61007b31 31#include "block/block_int.h"
21c2283e 32#include "block/coroutines.h"
f348b6d1 33#include "qemu/cutils.h"
da34e65c 34#include "qapi/error.h"
d49b6836 35#include "qemu/error-report.h"
db725815 36#include "qemu/main-loop.h"
c8aa7895 37#include "sysemu/replay.h"
61007b31 38
cb2e2878
EB
39/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
40#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41
7f8f03ef 42static void bdrv_parent_cb_resize(BlockDriverState *bs);
d05aa8bb 43static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
5ae07b14 44 int64_t offset, int64_t bytes, BdrvRequestFlags flags);
61007b31 45
f4c8a43b
HR
46static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
47 bool ignore_bds_parents)
61007b31 48{
02d21300 49 BdrvChild *c, *next;
27ccdd52 50
02d21300 51 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
bd86fb99 52 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
0152bf40
KW
53 continue;
54 }
4be6a6d1 55 bdrv_parent_drained_begin_single(c, false);
ce0f1412
PB
56 }
57}
61007b31 58
e037c09c
HR
59static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
60 int *drained_end_counter)
804db8ea
HR
61{
62 assert(c->parent_quiesce_counter > 0);
63 c->parent_quiesce_counter--;
bd86fb99
HR
64 if (c->klass->drained_end) {
65 c->klass->drained_end(c, drained_end_counter);
804db8ea
HR
66 }
67}
68
e037c09c
HR
69void bdrv_parent_drained_end_single(BdrvChild *c)
70{
71 int drained_end_counter = 0;
72 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
d73415a3 73 BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
e037c09c
HR
74}
75
f4c8a43b 76static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
e037c09c
HR
77 bool ignore_bds_parents,
78 int *drained_end_counter)
ce0f1412 79{
61ad631c 80 BdrvChild *c;
27ccdd52 81
61ad631c 82 QLIST_FOREACH(c, &bs->parents, next_parent) {
bd86fb99 83 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
0152bf40
KW
84 continue;
85 }
e037c09c 86 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
27ccdd52 87 }
61007b31
SH
88}
89
4be6a6d1
KW
90static bool bdrv_parent_drained_poll_single(BdrvChild *c)
91{
bd86fb99
HR
92 if (c->klass->drained_poll) {
93 return c->klass->drained_poll(c);
4be6a6d1
KW
94 }
95 return false;
96}
97
6cd5c9d7
KW
98static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
99 bool ignore_bds_parents)
89bd0305
KW
100{
101 BdrvChild *c, *next;
102 bool busy = false;
103
104 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
bd86fb99 105 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
89bd0305
KW
106 continue;
107 }
4be6a6d1 108 busy |= bdrv_parent_drained_poll_single(c);
89bd0305
KW
109 }
110
111 return busy;
112}
113
4be6a6d1
KW
114void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
115{
804db8ea 116 c->parent_quiesce_counter++;
bd86fb99
HR
117 if (c->klass->drained_begin) {
118 c->klass->drained_begin(c);
4be6a6d1
KW
119 }
120 if (poll) {
121 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
122 }
123}
124
d9e0dfa2
EB
125static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
126{
127 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
128 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130 src->opt_mem_alignment);
131 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132 src->min_mem_alignment);
133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
134}
135
1e4c797c
VSO
136typedef struct BdrvRefreshLimitsState {
137 BlockDriverState *bs;
138 BlockLimits old_bl;
139} BdrvRefreshLimitsState;
140
141static void bdrv_refresh_limits_abort(void *opaque)
142{
143 BdrvRefreshLimitsState *s = opaque;
144
145 s->bs->bl = s->old_bl;
146}
147
148static TransactionActionDrv bdrv_refresh_limits_drv = {
149 .abort = bdrv_refresh_limits_abort,
150 .clean = g_free,
151};
152
153/* @tran is allowed to be NULL, in this case no rollback is possible. */
154void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
61007b31 155{
33985614 156 ERRP_GUARD();
61007b31 157 BlockDriver *drv = bs->drv;
66b129ac
HR
158 BdrvChild *c;
159 bool have_limits;
61007b31 160
1e4c797c
VSO
161 if (tran) {
162 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
163 *s = (BdrvRefreshLimitsState) {
164 .bs = bs,
165 .old_bl = bs->bl,
166 };
167 tran_add(tran, &bdrv_refresh_limits_drv, s);
168 }
169
61007b31
SH
170 memset(&bs->bl, 0, sizeof(bs->bl));
171
172 if (!drv) {
173 return;
174 }
175
79ba8c98 176 /* Default alignment based on whether driver has byte interface */
e31f6864 177 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
ac850bf0
VSO
178 drv->bdrv_aio_preadv ||
179 drv->bdrv_co_preadv_part) ? 1 : 512;
79ba8c98 180
61007b31 181 /* Take some limits from the children as a default */
66b129ac
HR
182 have_limits = false;
183 QLIST_FOREACH(c, &bs->children, next) {
184 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
185 {
1e4c797c 186 bdrv_refresh_limits(c->bs, tran, errp);
33985614 187 if (*errp) {
66b129ac
HR
188 return;
189 }
190 bdrv_merge_limits(&bs->bl, &c->bs->bl);
191 have_limits = true;
61007b31 192 }
66b129ac
HR
193 }
194
195 if (!have_limits) {
4196d2f0 196 bs->bl.min_mem_alignment = 512;
038adc2f 197 bs->bl.opt_mem_alignment = qemu_real_host_page_size;
bd44feb7
SH
198
199 /* Safe default since most protocols use readv()/writev()/etc */
200 bs->bl.max_iov = IOV_MAX;
61007b31
SH
201 }
202
61007b31
SH
203 /* Then let the driver override it */
204 if (drv->bdrv_refresh_limits) {
205 drv->bdrv_refresh_limits(bs, errp);
8b117001
VSO
206 if (*errp) {
207 return;
208 }
209 }
210
211 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
212 error_setg(errp, "Driver requires too large request alignment");
61007b31
SH
213 }
214}
215
216/**
217 * The copy-on-read flag is actually a reference count so multiple users may
218 * use the feature without worrying about clobbering its previous state.
219 * Copy-on-read stays enabled until all users have called to disable it.
220 */
221void bdrv_enable_copy_on_read(BlockDriverState *bs)
222{
d73415a3 223 qatomic_inc(&bs->copy_on_read);
61007b31
SH
224}
225
226void bdrv_disable_copy_on_read(BlockDriverState *bs)
227{
d73415a3 228 int old = qatomic_fetch_dec(&bs->copy_on_read);
d3faa13e 229 assert(old >= 1);
61007b31
SH
230}
231
61124f03
PB
232typedef struct {
233 Coroutine *co;
234 BlockDriverState *bs;
235 bool done;
481cad48 236 bool begin;
b0165585 237 bool recursive;
fe4f0614 238 bool poll;
0152bf40 239 BdrvChild *parent;
6cd5c9d7 240 bool ignore_bds_parents;
8e1da77e 241 int *drained_end_counter;
61124f03
PB
242} BdrvCoDrainData;
243
244static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
245{
246 BdrvCoDrainData *data = opaque;
247 BlockDriverState *bs = data->bs;
248
481cad48 249 if (data->begin) {
f8ea8dac 250 bs->drv->bdrv_co_drain_begin(bs);
481cad48
MP
251 } else {
252 bs->drv->bdrv_co_drain_end(bs);
253 }
61124f03 254
65181d63 255 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
d73415a3 256 qatomic_mb_set(&data->done, true);
e037c09c 257 if (!data->begin) {
d73415a3 258 qatomic_dec(data->drained_end_counter);
8e1da77e 259 }
65181d63 260 bdrv_dec_in_flight(bs);
8e1da77e 261
e037c09c 262 g_free(data);
61124f03
PB
263}
264
db0289b9 265/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
8e1da77e
HR
266static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
267 int *drained_end_counter)
61124f03 268{
0109e7e6 269 BdrvCoDrainData *data;
61124f03 270
f8ea8dac 271 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
481cad48 272 (!begin && !bs->drv->bdrv_co_drain_end)) {
61124f03
PB
273 return;
274 }
275
0109e7e6
KW
276 data = g_new(BdrvCoDrainData, 1);
277 *data = (BdrvCoDrainData) {
278 .bs = bs,
279 .done = false,
8e1da77e
HR
280 .begin = begin,
281 .drained_end_counter = drained_end_counter,
0109e7e6
KW
282 };
283
e037c09c 284 if (!begin) {
d73415a3 285 qatomic_inc(drained_end_counter);
8e1da77e
HR
286 }
287
0109e7e6
KW
288 /* Make sure the driver callback completes during the polling phase for
289 * drain_begin. */
290 bdrv_inc_in_flight(bs);
291 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
292 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
61124f03
PB
293}
294
1cc8e54a 295/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
fe4f0614 296bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
6cd5c9d7 297 BdrvChild *ignore_parent, bool ignore_bds_parents)
89bd0305 298{
fe4f0614
KW
299 BdrvChild *child, *next;
300
6cd5c9d7 301 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
89bd0305
KW
302 return true;
303 }
304
d73415a3 305 if (qatomic_read(&bs->in_flight)) {
fe4f0614
KW
306 return true;
307 }
308
309 if (recursive) {
6cd5c9d7 310 assert(!ignore_bds_parents);
fe4f0614 311 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
6cd5c9d7 312 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
fe4f0614
KW
313 return true;
314 }
315 }
316 }
317
318 return false;
89bd0305
KW
319}
320
fe4f0614 321static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
89bd0305 322 BdrvChild *ignore_parent)
1cc8e54a 323{
6cd5c9d7 324 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
1cc8e54a
KW
325}
326
b0165585 327static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
6cd5c9d7
KW
328 BdrvChild *parent, bool ignore_bds_parents,
329 bool poll);
b0165585 330static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
8e1da77e
HR
331 BdrvChild *parent, bool ignore_bds_parents,
332 int *drained_end_counter);
0152bf40 333
a77fd4bb
FZ
334static void bdrv_co_drain_bh_cb(void *opaque)
335{
336 BdrvCoDrainData *data = opaque;
337 Coroutine *co = data->co;
99723548 338 BlockDriverState *bs = data->bs;
a77fd4bb 339
c8ca33d0 340 if (bs) {
aa1361d5 341 AioContext *ctx = bdrv_get_aio_context(bs);
960d5fb3 342 aio_context_acquire(ctx);
c8ca33d0
KW
343 bdrv_dec_in_flight(bs);
344 if (data->begin) {
e037c09c 345 assert(!data->drained_end_counter);
6cd5c9d7
KW
346 bdrv_do_drained_begin(bs, data->recursive, data->parent,
347 data->ignore_bds_parents, data->poll);
c8ca33d0 348 } else {
e037c09c 349 assert(!data->poll);
6cd5c9d7 350 bdrv_do_drained_end(bs, data->recursive, data->parent,
8e1da77e
HR
351 data->ignore_bds_parents,
352 data->drained_end_counter);
c8ca33d0 353 }
960d5fb3 354 aio_context_release(ctx);
481cad48 355 } else {
c8ca33d0
KW
356 assert(data->begin);
357 bdrv_drain_all_begin();
481cad48
MP
358 }
359
a77fd4bb 360 data->done = true;
1919631e 361 aio_co_wake(co);
a77fd4bb
FZ
362}
363
481cad48 364static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
b0165585 365 bool begin, bool recursive,
6cd5c9d7
KW
366 BdrvChild *parent,
367 bool ignore_bds_parents,
8e1da77e
HR
368 bool poll,
369 int *drained_end_counter)
a77fd4bb
FZ
370{
371 BdrvCoDrainData data;
960d5fb3
KW
372 Coroutine *self = qemu_coroutine_self();
373 AioContext *ctx = bdrv_get_aio_context(bs);
374 AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
a77fd4bb
FZ
375
376 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
c40a2545 377 * other coroutines run if they were queued by aio_co_enter(). */
a77fd4bb
FZ
378
379 assert(qemu_in_coroutine());
380 data = (BdrvCoDrainData) {
960d5fb3 381 .co = self,
a77fd4bb
FZ
382 .bs = bs,
383 .done = false,
481cad48 384 .begin = begin,
b0165585 385 .recursive = recursive,
0152bf40 386 .parent = parent,
6cd5c9d7 387 .ignore_bds_parents = ignore_bds_parents,
fe4f0614 388 .poll = poll,
8e1da77e 389 .drained_end_counter = drained_end_counter,
a77fd4bb 390 };
8e1da77e 391
c8ca33d0
KW
392 if (bs) {
393 bdrv_inc_in_flight(bs);
394 }
960d5fb3
KW
395
396 /*
397 * Temporarily drop the lock across yield or we would get deadlocks.
398 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
399 *
400 * When we yield below, the lock for the current context will be
401 * released, so if this is actually the lock that protects bs, don't drop
402 * it a second time.
403 */
404 if (ctx != co_ctx) {
405 aio_context_release(ctx);
406 }
407 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data);
a77fd4bb
FZ
408
409 qemu_coroutine_yield();
410 /* If we are resumed from some other event (such as an aio completion or a
411 * timer callback), it is a bug in the caller that should be fixed. */
412 assert(data.done);
960d5fb3
KW
413
414 /* Reaquire the AioContext of bs if we dropped it */
415 if (ctx != co_ctx) {
416 aio_context_acquire(ctx);
417 }
a77fd4bb
FZ
418}
419
dcf94a23 420void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
6cd5c9d7 421 BdrvChild *parent, bool ignore_bds_parents)
6820643f 422{
dcf94a23 423 assert(!qemu_in_coroutine());
d42cf288 424
60369b86 425 /* Stop things in parent-to-child order */
d73415a3 426 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
6820643f 427 aio_disable_external(bdrv_get_aio_context(bs));
6820643f
KW
428 }
429
6cd5c9d7 430 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
8e1da77e 431 bdrv_drain_invoke(bs, true, NULL);
dcf94a23
KW
432}
433
434static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
6cd5c9d7
KW
435 BdrvChild *parent, bool ignore_bds_parents,
436 bool poll)
dcf94a23
KW
437{
438 BdrvChild *child, *next;
439
440 if (qemu_in_coroutine()) {
6cd5c9d7 441 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
8e1da77e 442 poll, NULL);
dcf94a23
KW
443 return;
444 }
445
6cd5c9d7 446 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
d30b8e64 447
b0165585 448 if (recursive) {
6cd5c9d7 449 assert(!ignore_bds_parents);
d736f119 450 bs->recursive_quiesce_counter++;
b0165585 451 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
6cd5c9d7
KW
452 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
453 false);
b0165585
KW
454 }
455 }
fe4f0614
KW
456
457 /*
458 * Wait for drained requests to finish.
459 *
460 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
461 * call is needed so things in this AioContext can make progress even
462 * though we don't return to the main AioContext loop - this automatically
463 * includes other nodes in the same AioContext and therefore all child
464 * nodes.
465 */
466 if (poll) {
6cd5c9d7 467 assert(!ignore_bds_parents);
fe4f0614
KW
468 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
469 }
6820643f
KW
470}
471
0152bf40
KW
472void bdrv_drained_begin(BlockDriverState *bs)
473{
6cd5c9d7 474 bdrv_do_drained_begin(bs, false, NULL, false, true);
b0165585
KW
475}
476
477void bdrv_subtree_drained_begin(BlockDriverState *bs)
478{
6cd5c9d7 479 bdrv_do_drained_begin(bs, true, NULL, false, true);
0152bf40
KW
480}
481
e037c09c
HR
482/**
483 * This function does not poll, nor must any of its recursively called
484 * functions. The *drained_end_counter pointee will be incremented
485 * once for every background operation scheduled, and decremented once
486 * the operation settles. Therefore, the pointer must remain valid
487 * until the pointee reaches 0. That implies that whoever sets up the
488 * pointee has to poll until it is 0.
489 *
490 * We use atomic operations to access *drained_end_counter, because
491 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
492 * @bs may contain nodes in different AioContexts,
493 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
494 * regardless of which AioContext they are in.
495 */
6cd5c9d7 496static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
8e1da77e
HR
497 BdrvChild *parent, bool ignore_bds_parents,
498 int *drained_end_counter)
6820643f 499{
61ad631c 500 BdrvChild *child;
0f115168
KW
501 int old_quiesce_counter;
502
e037c09c
HR
503 assert(drained_end_counter != NULL);
504
481cad48 505 if (qemu_in_coroutine()) {
6cd5c9d7 506 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
8e1da77e 507 false, drained_end_counter);
481cad48
MP
508 return;
509 }
6820643f 510 assert(bs->quiesce_counter > 0);
6820643f 511
60369b86 512 /* Re-enable things in child-to-parent order */
8e1da77e 513 bdrv_drain_invoke(bs, false, drained_end_counter);
e037c09c
HR
514 bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
515 drained_end_counter);
5cb2737e 516
d73415a3 517 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
0f115168 518 if (old_quiesce_counter == 1) {
0f115168
KW
519 aio_enable_external(bdrv_get_aio_context(bs));
520 }
b0165585
KW
521
522 if (recursive) {
6cd5c9d7 523 assert(!ignore_bds_parents);
d736f119 524 bs->recursive_quiesce_counter--;
61ad631c 525 QLIST_FOREACH(child, &bs->children, next) {
8e1da77e
HR
526 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
527 drained_end_counter);
b0165585
KW
528 }
529 }
6820643f
KW
530}
531
0152bf40
KW
532void bdrv_drained_end(BlockDriverState *bs)
533{
e037c09c
HR
534 int drained_end_counter = 0;
535 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
d73415a3 536 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
e037c09c
HR
537}
538
539void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
540{
541 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
b0165585
KW
542}
543
544void bdrv_subtree_drained_end(BlockDriverState *bs)
545{
e037c09c
HR
546 int drained_end_counter = 0;
547 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
d73415a3 548 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
0152bf40
KW
549}
550
d736f119
KW
551void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
552{
553 int i;
554
555 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
6cd5c9d7 556 bdrv_do_drained_begin(child->bs, true, child, false, true);
d736f119
KW
557 }
558}
559
560void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
561{
e037c09c 562 int drained_end_counter = 0;
d736f119
KW
563 int i;
564
565 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
e037c09c
HR
566 bdrv_do_drained_end(child->bs, true, child, false,
567 &drained_end_counter);
d736f119 568 }
e037c09c 569
d73415a3 570 BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
d736f119
KW
571}
572
61007b31 573/*
67da1dc5
FZ
574 * Wait for pending requests to complete on a single BlockDriverState subtree,
575 * and suspend block driver's internal I/O until next request arrives.
61007b31 576 *
61007b31
SH
577 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
578 * AioContext.
579 */
b6e84c97 580void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
61007b31 581{
6820643f
KW
582 assert(qemu_in_coroutine());
583 bdrv_drained_begin(bs);
584 bdrv_drained_end(bs);
b6e84c97 585}
f406c03c 586
b6e84c97
PB
587void bdrv_drain(BlockDriverState *bs)
588{
6820643f
KW
589 bdrv_drained_begin(bs);
590 bdrv_drained_end(bs);
61007b31
SH
591}
592
c13ad59f
KW
593static void bdrv_drain_assert_idle(BlockDriverState *bs)
594{
595 BdrvChild *child, *next;
596
d73415a3 597 assert(qatomic_read(&bs->in_flight) == 0);
c13ad59f
KW
598 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
599 bdrv_drain_assert_idle(child->bs);
600 }
601}
602
0f12264e
KW
603unsigned int bdrv_drain_all_count = 0;
604
605static bool bdrv_drain_all_poll(void)
606{
607 BlockDriverState *bs = NULL;
608 bool result = false;
609
0f12264e
KW
610 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
611 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
612 while ((bs = bdrv_next_all_states(bs))) {
613 AioContext *aio_context = bdrv_get_aio_context(bs);
614 aio_context_acquire(aio_context);
615 result |= bdrv_drain_poll(bs, false, NULL, true);
616 aio_context_release(aio_context);
617 }
618
619 return result;
620}
621
61007b31
SH
622/*
623 * Wait for pending requests to complete across all BlockDriverStates
624 *
625 * This function does not flush data to disk, use bdrv_flush_all() for that
626 * after calling this function.
c0778f66
AG
627 *
628 * This pauses all block jobs and disables external clients. It must
629 * be paired with bdrv_drain_all_end().
630 *
631 * NOTE: no new block jobs or BlockDriverStates can be created between
632 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
61007b31 633 */
c0778f66 634void bdrv_drain_all_begin(void)
61007b31 635{
0f12264e 636 BlockDriverState *bs = NULL;
61007b31 637
c8ca33d0 638 if (qemu_in_coroutine()) {
8e1da77e 639 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
c8ca33d0
KW
640 return;
641 }
642
c8aa7895
PD
643 /*
644 * bdrv queue is managed by record/replay,
645 * waiting for finishing the I/O requests may
646 * be infinite
647 */
648 if (replay_events_enabled()) {
649 return;
650 }
651
0f12264e
KW
652 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
653 * loop AioContext, so make sure we're in the main context. */
9a7e86c8 654 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
0f12264e
KW
655 assert(bdrv_drain_all_count < INT_MAX);
656 bdrv_drain_all_count++;
9a7e86c8 657
0f12264e
KW
658 /* Quiesce all nodes, without polling in-flight requests yet. The graph
659 * cannot change during this loop. */
660 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
661 AioContext *aio_context = bdrv_get_aio_context(bs);
662
663 aio_context_acquire(aio_context);
0f12264e 664 bdrv_do_drained_begin(bs, false, NULL, true, false);
61007b31
SH
665 aio_context_release(aio_context);
666 }
667
0f12264e 668 /* Now poll the in-flight requests */
cfe29d82 669 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
0f12264e
KW
670
671 while ((bs = bdrv_next_all_states(bs))) {
c13ad59f 672 bdrv_drain_assert_idle(bs);
61007b31 673 }
c0778f66
AG
674}
675
1a6d3bd2
GK
676void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
677{
678 int drained_end_counter = 0;
679
680 g_assert(bs->quiesce_counter > 0);
681 g_assert(!bs->refcnt);
682
683 while (bs->quiesce_counter) {
684 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
685 }
686 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
687}
688
c0778f66
AG
689void bdrv_drain_all_end(void)
690{
0f12264e 691 BlockDriverState *bs = NULL;
e037c09c 692 int drained_end_counter = 0;
c0778f66 693
c8aa7895
PD
694 /*
695 * bdrv queue is managed by record/replay,
696 * waiting for finishing the I/O requests may
697 * be endless
698 */
699 if (replay_events_enabled()) {
700 return;
701 }
702
0f12264e 703 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
704 AioContext *aio_context = bdrv_get_aio_context(bs);
705
706 aio_context_acquire(aio_context);
e037c09c 707 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
61007b31
SH
708 aio_context_release(aio_context);
709 }
0f12264e 710
e037c09c 711 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
d73415a3 712 AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
e037c09c 713
0f12264e
KW
714 assert(bdrv_drain_all_count > 0);
715 bdrv_drain_all_count--;
61007b31
SH
716}
717
c0778f66
AG
718void bdrv_drain_all(void)
719{
720 bdrv_drain_all_begin();
721 bdrv_drain_all_end();
722}
723
61007b31
SH
724/**
725 * Remove an active request from the tracked requests list
726 *
727 * This function should be called when a tracked request is completing.
728 */
729static void tracked_request_end(BdrvTrackedRequest *req)
730{
731 if (req->serialising) {
d73415a3 732 qatomic_dec(&req->bs->serialising_in_flight);
61007b31
SH
733 }
734
3783fa3d 735 qemu_co_mutex_lock(&req->bs->reqs_lock);
61007b31
SH
736 QLIST_REMOVE(req, list);
737 qemu_co_queue_restart_all(&req->wait_queue);
3783fa3d 738 qemu_co_mutex_unlock(&req->bs->reqs_lock);
61007b31
SH
739}
740
741/**
742 * Add an active request to the tracked requests list
743 */
744static void tracked_request_begin(BdrvTrackedRequest *req,
745 BlockDriverState *bs,
746 int64_t offset,
80247264 747 int64_t bytes,
ebde595c 748 enum BdrvTrackedRequestType type)
61007b31 749{
80247264 750 bdrv_check_request(offset, bytes, &error_abort);
22931a15 751
61007b31
SH
752 *req = (BdrvTrackedRequest){
753 .bs = bs,
754 .offset = offset,
755 .bytes = bytes,
ebde595c 756 .type = type,
61007b31
SH
757 .co = qemu_coroutine_self(),
758 .serialising = false,
759 .overlap_offset = offset,
760 .overlap_bytes = bytes,
761 };
762
763 qemu_co_queue_init(&req->wait_queue);
764
3783fa3d 765 qemu_co_mutex_lock(&bs->reqs_lock);
61007b31 766 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
3783fa3d 767 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
768}
769
3ba0e1a0 770static bool tracked_request_overlaps(BdrvTrackedRequest *req,
80247264 771 int64_t offset, int64_t bytes)
3ba0e1a0 772{
80247264
EB
773 bdrv_check_request(offset, bytes, &error_abort);
774
3ba0e1a0
PB
775 /* aaaa bbbb */
776 if (offset >= req->overlap_offset + req->overlap_bytes) {
777 return false;
778 }
779 /* bbbb aaaa */
780 if (req->overlap_offset >= offset + bytes) {
781 return false;
782 }
783 return true;
784}
785
3183937f
VSO
786/* Called with self->bs->reqs_lock held */
787static BdrvTrackedRequest *
788bdrv_find_conflicting_request(BdrvTrackedRequest *self)
789{
790 BdrvTrackedRequest *req;
791
792 QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
793 if (req == self || (!req->serialising && !self->serialising)) {
794 continue;
795 }
796 if (tracked_request_overlaps(req, self->overlap_offset,
797 self->overlap_bytes))
798 {
799 /*
800 * Hitting this means there was a reentrant request, for
801 * example, a block driver issuing nested requests. This must
802 * never happen since it means deadlock.
803 */
804 assert(qemu_coroutine_self() != req->co);
805
806 /*
807 * If the request is already (indirectly) waiting for us, or
808 * will wait for us as soon as it wakes up, then just go on
809 * (instead of producing a deadlock in the former case).
810 */
811 if (!req->waiting_for) {
812 return req;
813 }
814 }
815 }
816
817 return NULL;
818}
819
ec1c8868 820/* Called with self->bs->reqs_lock held */
3ba0e1a0 821static bool coroutine_fn
ec1c8868 822bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
3ba0e1a0
PB
823{
824 BdrvTrackedRequest *req;
3ba0e1a0
PB
825 bool waited = false;
826
3183937f
VSO
827 while ((req = bdrv_find_conflicting_request(self))) {
828 self->waiting_for = req;
ec1c8868 829 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
3183937f
VSO
830 self->waiting_for = NULL;
831 waited = true;
832 }
833
3ba0e1a0
PB
834 return waited;
835}
836
8ac5aab2
VSO
837/* Called with req->bs->reqs_lock held */
838static void tracked_request_set_serialising(BdrvTrackedRequest *req,
839 uint64_t align)
61007b31
SH
840{
841 int64_t overlap_offset = req->offset & ~(align - 1);
80247264
EB
842 int64_t overlap_bytes =
843 ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
844
845 bdrv_check_request(req->offset, req->bytes, &error_abort);
61007b31
SH
846
847 if (!req->serialising) {
d73415a3 848 qatomic_inc(&req->bs->serialising_in_flight);
61007b31
SH
849 req->serialising = true;
850 }
851
852 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
853 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
09d2f948
VSO
854}
855
c28107e9
HR
856/**
857 * Return the tracked request on @bs for the current coroutine, or
858 * NULL if there is none.
859 */
860BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
861{
862 BdrvTrackedRequest *req;
863 Coroutine *self = qemu_coroutine_self();
864
865 QLIST_FOREACH(req, &bs->tracked_requests, list) {
866 if (req->co == self) {
867 return req;
868 }
869 }
870
871 return NULL;
872}
873
244483e6
KW
874/**
875 * Round a region to cluster boundaries
876 */
877void bdrv_round_to_clusters(BlockDriverState *bs,
7cfd5275 878 int64_t offset, int64_t bytes,
244483e6 879 int64_t *cluster_offset,
7cfd5275 880 int64_t *cluster_bytes)
244483e6
KW
881{
882 BlockDriverInfo bdi;
883
884 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
885 *cluster_offset = offset;
886 *cluster_bytes = bytes;
887 } else {
888 int64_t c = bdi.cluster_size;
889 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
890 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
891 }
892}
893
61007b31
SH
894static int bdrv_get_cluster_size(BlockDriverState *bs)
895{
896 BlockDriverInfo bdi;
897 int ret;
898
899 ret = bdrv_get_info(bs, &bdi);
900 if (ret < 0 || bdi.cluster_size == 0) {
a5b8dd2c 901 return bs->bl.request_alignment;
61007b31
SH
902 } else {
903 return bdi.cluster_size;
904 }
905}
906
99723548
PB
907void bdrv_inc_in_flight(BlockDriverState *bs)
908{
d73415a3 909 qatomic_inc(&bs->in_flight);
99723548
PB
910}
911
c9d1a561
PB
912void bdrv_wakeup(BlockDriverState *bs)
913{
cfe29d82 914 aio_wait_kick();
c9d1a561
PB
915}
916
99723548
PB
917void bdrv_dec_in_flight(BlockDriverState *bs)
918{
d73415a3 919 qatomic_dec(&bs->in_flight);
c9d1a561 920 bdrv_wakeup(bs);
99723548
PB
921}
922
18fbd0de 923static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
61007b31
SH
924{
925 BlockDriverState *bs = self->bs;
61007b31
SH
926 bool waited = false;
927
d73415a3 928 if (!qatomic_read(&bs->serialising_in_flight)) {
61007b31
SH
929 return false;
930 }
931
3ba0e1a0 932 qemu_co_mutex_lock(&bs->reqs_lock);
ec1c8868 933 waited = bdrv_wait_serialising_requests_locked(self);
3ba0e1a0 934 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
935
936 return waited;
937}
938
8ac5aab2
VSO
939bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
940 uint64_t align)
941{
942 bool waited;
943
944 qemu_co_mutex_lock(&req->bs->reqs_lock);
945
946 tracked_request_set_serialising(req, align);
947 waited = bdrv_wait_serialising_requests_locked(req);
948
949 qemu_co_mutex_unlock(&req->bs->reqs_lock);
950
951 return waited;
952}
953
63f4ad11
VSO
954static int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
955 QEMUIOVector *qiov, size_t qiov_offset,
956 Error **errp)
61007b31 957{
63f4ad11
VSO
958 /*
959 * Check generic offset/bytes correctness
960 */
961
69b55e03
VSO
962 if (offset < 0) {
963 error_setg(errp, "offset is negative: %" PRIi64, offset);
964 return -EIO;
965 }
966
967 if (bytes < 0) {
968 error_setg(errp, "bytes is negative: %" PRIi64, bytes);
61007b31
SH
969 return -EIO;
970 }
971
8b117001 972 if (bytes > BDRV_MAX_LENGTH) {
69b55e03
VSO
973 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
974 bytes, BDRV_MAX_LENGTH);
975 return -EIO;
976 }
977
978 if (offset > BDRV_MAX_LENGTH) {
979 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
980 offset, BDRV_MAX_LENGTH);
8b117001
VSO
981 return -EIO;
982 }
983
984 if (offset > BDRV_MAX_LENGTH - bytes) {
69b55e03
VSO
985 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
986 "exceeds maximum(%" PRIi64 ")", offset, bytes,
987 BDRV_MAX_LENGTH);
8b117001
VSO
988 return -EIO;
989 }
990
63f4ad11
VSO
991 if (!qiov) {
992 return 0;
993 }
994
995 /*
996 * Check qiov and qiov_offset
997 */
998
999 if (qiov_offset > qiov->size) {
1000 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
1001 qiov_offset, qiov->size);
1002 return -EIO;
1003 }
1004
1005 if (bytes > qiov->size - qiov_offset) {
1006 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
1007 "vector size(%zu)", bytes, qiov_offset, qiov->size);
1008 return -EIO;
1009 }
1010
8b117001
VSO
1011 return 0;
1012}
1013
63f4ad11
VSO
1014int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
1015{
1016 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
1017}
1018
1019static int bdrv_check_request32(int64_t offset, int64_t bytes,
1020 QEMUIOVector *qiov, size_t qiov_offset)
8b117001 1021{
63f4ad11 1022 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
8b117001
VSO
1023 if (ret < 0) {
1024 return ret;
1025 }
1026
1027 if (bytes > BDRV_REQUEST_MAX_BYTES) {
61007b31
SH
1028 return -EIO;
1029 }
1030
1031 return 0;
1032}
1033
720ff280 1034int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
e9e52efd 1035 int64_t bytes, BdrvRequestFlags flags)
61007b31 1036{
fae2681a
VSO
1037 return bdrv_pwritev(child, offset, bytes, NULL,
1038 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
1039}
1040
1041/*
74021bc4 1042 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
61007b31
SH
1043 * The operation is sped up by checking the block status and only writing
1044 * zeroes to the device if they currently do not return zeroes. Optional
74021bc4 1045 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
465fe887 1046 * BDRV_REQ_FUA).
61007b31 1047 *
f4649069 1048 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
61007b31 1049 */
720ff280 1050int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
61007b31 1051{
237d78f8
EB
1052 int ret;
1053 int64_t target_size, bytes, offset = 0;
720ff280 1054 BlockDriverState *bs = child->bs;
61007b31 1055
7286d610
EB
1056 target_size = bdrv_getlength(bs);
1057 if (target_size < 0) {
1058 return target_size;
61007b31
SH
1059 }
1060
1061 for (;;) {
7286d610
EB
1062 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
1063 if (bytes <= 0) {
61007b31
SH
1064 return 0;
1065 }
237d78f8 1066 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
61007b31 1067 if (ret < 0) {
61007b31
SH
1068 return ret;
1069 }
1070 if (ret & BDRV_BLOCK_ZERO) {
237d78f8 1071 offset += bytes;
61007b31
SH
1072 continue;
1073 }
237d78f8 1074 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
61007b31 1075 if (ret < 0) {
61007b31
SH
1076 return ret;
1077 }
237d78f8 1078 offset += bytes;
61007b31
SH
1079 }
1080}
1081
2e11d756 1082/* See bdrv_pwrite() for the return codes */
e9e52efd 1083int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes)
61007b31 1084{
fae2681a 1085 int ret;
0d93ed08 1086 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
61007b31
SH
1087
1088 if (bytes < 0) {
1089 return -EINVAL;
1090 }
1091
fae2681a 1092 ret = bdrv_preadv(child, offset, bytes, &qiov, 0);
61007b31 1093
fae2681a 1094 return ret < 0 ? ret : bytes;
61007b31
SH
1095}
1096
2e11d756
AG
1097/* Return no. of bytes on success or < 0 on error. Important errors are:
1098 -EIO generic I/O error (may happen for all errors)
1099 -ENOMEDIUM No media inserted.
1100 -EINVAL Invalid offset or number of bytes
1101 -EACCES Trying to write a read-only device
1102*/
e9e52efd
VSO
1103int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
1104 int64_t bytes)
61007b31 1105{
fae2681a 1106 int ret;
0d93ed08 1107 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
61007b31
SH
1108
1109 if (bytes < 0) {
1110 return -EINVAL;
1111 }
1112
fae2681a
VSO
1113 ret = bdrv_pwritev(child, offset, bytes, &qiov, 0);
1114
1115 return ret < 0 ? ret : bytes;
61007b31
SH
1116}
1117
1118/*
1119 * Writes to the file and ensures that no writes are reordered across this
1120 * request (acts as a barrier)
1121 *
1122 * Returns 0 on success, -errno in error cases.
1123 */
d9ca2ea2 1124int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
e9e52efd 1125 const void *buf, int64_t count)
61007b31
SH
1126{
1127 int ret;
1128
d9ca2ea2 1129 ret = bdrv_pwrite(child, offset, buf, count);
61007b31
SH
1130 if (ret < 0) {
1131 return ret;
1132 }
1133
d9ca2ea2 1134 ret = bdrv_flush(child->bs);
855a6a93
KW
1135 if (ret < 0) {
1136 return ret;
61007b31
SH
1137 }
1138
1139 return 0;
1140}
1141
08844473
KW
1142typedef struct CoroutineIOCompletion {
1143 Coroutine *coroutine;
1144 int ret;
1145} CoroutineIOCompletion;
1146
1147static void bdrv_co_io_em_complete(void *opaque, int ret)
1148{
1149 CoroutineIOCompletion *co = opaque;
1150
1151 co->ret = ret;
b9e413dd 1152 aio_co_wake(co->coroutine);
08844473
KW
1153}
1154
166fe960 1155static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
17abcbee 1156 int64_t offset, int64_t bytes,
ac850bf0
VSO
1157 QEMUIOVector *qiov,
1158 size_t qiov_offset, int flags)
166fe960
KW
1159{
1160 BlockDriver *drv = bs->drv;
3fb06697
KW
1161 int64_t sector_num;
1162 unsigned int nb_sectors;
ac850bf0
VSO
1163 QEMUIOVector local_qiov;
1164 int ret;
3fb06697 1165
17abcbee 1166 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
fa166538 1167 assert(!(flags & ~BDRV_REQ_MASK));
fe0480d6 1168 assert(!(flags & BDRV_REQ_NO_FALLBACK));
fa166538 1169
d470ad42
HR
1170 if (!drv) {
1171 return -ENOMEDIUM;
1172 }
1173
ac850bf0
VSO
1174 if (drv->bdrv_co_preadv_part) {
1175 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1176 flags);
1177 }
1178
1179 if (qiov_offset > 0 || bytes != qiov->size) {
1180 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1181 qiov = &local_qiov;
1182 }
1183
3fb06697 1184 if (drv->bdrv_co_preadv) {
ac850bf0
VSO
1185 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1186 goto out;
3fb06697
KW
1187 }
1188
edfab6a0 1189 if (drv->bdrv_aio_preadv) {
08844473
KW
1190 BlockAIOCB *acb;
1191 CoroutineIOCompletion co = {
1192 .coroutine = qemu_coroutine_self(),
1193 };
1194
edfab6a0
EB
1195 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1196 bdrv_co_io_em_complete, &co);
08844473 1197 if (acb == NULL) {
ac850bf0
VSO
1198 ret = -EIO;
1199 goto out;
08844473
KW
1200 } else {
1201 qemu_coroutine_yield();
ac850bf0
VSO
1202 ret = co.ret;
1203 goto out;
08844473
KW
1204 }
1205 }
edfab6a0
EB
1206
1207 sector_num = offset >> BDRV_SECTOR_BITS;
1208 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1209
1bbbf32d
NS
1210 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1211 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
41ae31e3 1212 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
edfab6a0
EB
1213 assert(drv->bdrv_co_readv);
1214
ac850bf0
VSO
1215 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1216
1217out:
1218 if (qiov == &local_qiov) {
1219 qemu_iovec_destroy(&local_qiov);
1220 }
1221
1222 return ret;
166fe960
KW
1223}
1224
78a07294 1225static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
17abcbee 1226 int64_t offset, int64_t bytes,
ac850bf0
VSO
1227 QEMUIOVector *qiov,
1228 size_t qiov_offset, int flags)
78a07294
KW
1229{
1230 BlockDriver *drv = bs->drv;
3fb06697
KW
1231 int64_t sector_num;
1232 unsigned int nb_sectors;
ac850bf0 1233 QEMUIOVector local_qiov;
78a07294
KW
1234 int ret;
1235
17abcbee 1236 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
fa166538 1237 assert(!(flags & ~BDRV_REQ_MASK));
fe0480d6 1238 assert(!(flags & BDRV_REQ_NO_FALLBACK));
fa166538 1239
d470ad42
HR
1240 if (!drv) {
1241 return -ENOMEDIUM;
1242 }
1243
ac850bf0
VSO
1244 if (drv->bdrv_co_pwritev_part) {
1245 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1246 flags & bs->supported_write_flags);
1247 flags &= ~bs->supported_write_flags;
1248 goto emulate_flags;
1249 }
1250
1251 if (qiov_offset > 0 || bytes != qiov->size) {
1252 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1253 qiov = &local_qiov;
1254 }
1255
3fb06697 1256 if (drv->bdrv_co_pwritev) {
515c2f43
KW
1257 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1258 flags & bs->supported_write_flags);
1259 flags &= ~bs->supported_write_flags;
3fb06697
KW
1260 goto emulate_flags;
1261 }
1262
edfab6a0 1263 if (drv->bdrv_aio_pwritev) {
08844473
KW
1264 BlockAIOCB *acb;
1265 CoroutineIOCompletion co = {
1266 .coroutine = qemu_coroutine_self(),
1267 };
1268
edfab6a0
EB
1269 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1270 flags & bs->supported_write_flags,
1271 bdrv_co_io_em_complete, &co);
1272 flags &= ~bs->supported_write_flags;
08844473 1273 if (acb == NULL) {
3fb06697 1274 ret = -EIO;
08844473
KW
1275 } else {
1276 qemu_coroutine_yield();
3fb06697 1277 ret = co.ret;
08844473 1278 }
edfab6a0
EB
1279 goto emulate_flags;
1280 }
1281
1282 sector_num = offset >> BDRV_SECTOR_BITS;
1283 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1284
1bbbf32d
NS
1285 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1286 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
41ae31e3 1287 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
edfab6a0 1288
e18a58b4
EB
1289 assert(drv->bdrv_co_writev);
1290 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1291 flags & bs->supported_write_flags);
1292 flags &= ~bs->supported_write_flags;
78a07294 1293
3fb06697 1294emulate_flags:
4df863f3 1295 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
78a07294
KW
1296 ret = bdrv_co_flush(bs);
1297 }
1298
ac850bf0
VSO
1299 if (qiov == &local_qiov) {
1300 qemu_iovec_destroy(&local_qiov);
1301 }
1302
78a07294
KW
1303 return ret;
1304}
1305
29a298af 1306static int coroutine_fn
17abcbee
VSO
1307bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1308 int64_t bytes, QEMUIOVector *qiov,
ac850bf0 1309 size_t qiov_offset)
29a298af
PB
1310{
1311 BlockDriver *drv = bs->drv;
ac850bf0
VSO
1312 QEMUIOVector local_qiov;
1313 int ret;
29a298af 1314
17abcbee
VSO
1315 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1316
d470ad42
HR
1317 if (!drv) {
1318 return -ENOMEDIUM;
1319 }
1320
ac850bf0 1321 if (!block_driver_can_compress(drv)) {
29a298af
PB
1322 return -ENOTSUP;
1323 }
1324
ac850bf0
VSO
1325 if (drv->bdrv_co_pwritev_compressed_part) {
1326 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1327 qiov, qiov_offset);
1328 }
1329
1330 if (qiov_offset == 0) {
1331 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1332 }
1333
1334 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1335 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1336 qemu_iovec_destroy(&local_qiov);
1337
1338 return ret;
29a298af
PB
1339}
1340
85c97ca7 1341static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
9df5afbd 1342 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1143ec5e 1343 size_t qiov_offset, int flags)
61007b31 1344{
85c97ca7
KW
1345 BlockDriverState *bs = child->bs;
1346
61007b31
SH
1347 /* Perform I/O through a temporary buffer so that users who scribble over
1348 * their read buffer while the operation is in progress do not end up
1349 * modifying the image file. This is critical for zero-copy guest I/O
1350 * where anything might happen inside guest memory.
1351 */
2275cc90 1352 void *bounce_buffer = NULL;
61007b31
SH
1353
1354 BlockDriver *drv = bs->drv;
244483e6 1355 int64_t cluster_offset;
7cfd5275 1356 int64_t cluster_bytes;
9df5afbd 1357 int64_t skip_bytes;
61007b31 1358 int ret;
cb2e2878
EB
1359 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1360 BDRV_REQUEST_MAX_BYTES);
9df5afbd 1361 int64_t progress = 0;
8644476e 1362 bool skip_write;
61007b31 1363
9df5afbd
VSO
1364 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1365
d470ad42
HR
1366 if (!drv) {
1367 return -ENOMEDIUM;
1368 }
1369
8644476e
HR
1370 /*
1371 * Do not write anything when the BDS is inactive. That is not
1372 * allowed, and it would not help.
1373 */
1374 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1375
1bf03e66
KW
1376 /* FIXME We cannot require callers to have write permissions when all they
1377 * are doing is a read request. If we did things right, write permissions
1378 * would be obtained anyway, but internally by the copy-on-read code. As
765d9df9 1379 * long as it is implemented here rather than in a separate filter driver,
1bf03e66
KW
1380 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1381 * it could request permissions. Therefore we have to bypass the permission
1382 * system for the moment. */
1383 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
afa4b293 1384
61007b31 1385 /* Cover entire cluster so no additional backing file I/O is required when
cb2e2878
EB
1386 * allocating cluster in the image file. Note that this value may exceed
1387 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1388 * is one reason we loop rather than doing it all at once.
61007b31 1389 */
244483e6 1390 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
cb2e2878 1391 skip_bytes = offset - cluster_offset;
61007b31 1392
244483e6
KW
1393 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1394 cluster_offset, cluster_bytes);
61007b31 1395
cb2e2878
EB
1396 while (cluster_bytes) {
1397 int64_t pnum;
61007b31 1398
8644476e
HR
1399 if (skip_write) {
1400 ret = 1; /* "already allocated", so nothing will be copied */
cb2e2878 1401 pnum = MIN(cluster_bytes, max_transfer);
8644476e
HR
1402 } else {
1403 ret = bdrv_is_allocated(bs, cluster_offset,
1404 MIN(cluster_bytes, max_transfer), &pnum);
1405 if (ret < 0) {
1406 /*
1407 * Safe to treat errors in querying allocation as if
1408 * unallocated; we'll probably fail again soon on the
1409 * read, but at least that will set a decent errno.
1410 */
1411 pnum = MIN(cluster_bytes, max_transfer);
1412 }
61007b31 1413
8644476e
HR
1414 /* Stop at EOF if the image ends in the middle of the cluster */
1415 if (ret == 0 && pnum == 0) {
1416 assert(progress >= bytes);
1417 break;
1418 }
b0ddcbbb 1419
8644476e
HR
1420 assert(skip_bytes < pnum);
1421 }
61007b31 1422
cb2e2878 1423 if (ret <= 0) {
1143ec5e
VSO
1424 QEMUIOVector local_qiov;
1425
cb2e2878 1426 /* Must copy-on-read; use the bounce buffer */
0d93ed08 1427 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
2275cc90
VSO
1428 if (!bounce_buffer) {
1429 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1430 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1431 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1432
1433 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1434 if (!bounce_buffer) {
1435 ret = -ENOMEM;
1436 goto err;
1437 }
1438 }
0d93ed08 1439 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
61007b31 1440
cb2e2878 1441 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
ac850bf0 1442 &local_qiov, 0, 0);
cb2e2878
EB
1443 if (ret < 0) {
1444 goto err;
1445 }
1446
1447 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1448 if (drv->bdrv_co_pwrite_zeroes &&
1449 buffer_is_zero(bounce_buffer, pnum)) {
1450 /* FIXME: Should we (perhaps conditionally) be setting
1451 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1452 * that still correctly reads as zero? */
7adcf59f
HR
1453 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1454 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1455 } else {
1456 /* This does not change the data on the disk, it is not
1457 * necessary to flush even in cache=writethrough mode.
1458 */
1459 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
ac850bf0 1460 &local_qiov, 0,
7adcf59f 1461 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1462 }
1463
1464 if (ret < 0) {
1465 /* It might be okay to ignore write errors for guest
1466 * requests. If this is a deliberate copy-on-read
1467 * then we don't want to ignore the error. Simply
1468 * report it in all cases.
1469 */
1470 goto err;
1471 }
1472
3299e5ec 1473 if (!(flags & BDRV_REQ_PREFETCH)) {
1143ec5e
VSO
1474 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1475 bounce_buffer + skip_bytes,
4ab78b19 1476 MIN(pnum - skip_bytes, bytes - progress));
3299e5ec
VSO
1477 }
1478 } else if (!(flags & BDRV_REQ_PREFETCH)) {
cb2e2878 1479 /* Read directly into the destination */
1143ec5e
VSO
1480 ret = bdrv_driver_preadv(bs, offset + progress,
1481 MIN(pnum - skip_bytes, bytes - progress),
1482 qiov, qiov_offset + progress, 0);
cb2e2878
EB
1483 if (ret < 0) {
1484 goto err;
1485 }
1486 }
1487
1488 cluster_offset += pnum;
1489 cluster_bytes -= pnum;
1490 progress += pnum - skip_bytes;
1491 skip_bytes = 0;
1492 }
1493 ret = 0;
61007b31
SH
1494
1495err:
1496 qemu_vfree(bounce_buffer);
1497 return ret;
1498}
1499
1500/*
1501 * Forwards an already correctly aligned request to the BlockDriver. This
1a62d0ac
EB
1502 * handles copy on read, zeroing after EOF, and fragmentation of large
1503 * reads; any other features must be implemented by the caller.
61007b31 1504 */
85c97ca7 1505static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
8b0c5d76 1506 BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
65cd4424 1507 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
61007b31 1508{
85c97ca7 1509 BlockDriverState *bs = child->bs;
c9d20029 1510 int64_t total_bytes, max_bytes;
1a62d0ac 1511 int ret = 0;
8b0c5d76 1512 int64_t bytes_remaining = bytes;
1a62d0ac 1513 int max_transfer;
61007b31 1514
8b0c5d76 1515 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
49c07526
KW
1516 assert(is_power_of_2(align));
1517 assert((offset & (align - 1)) == 0);
1518 assert((bytes & (align - 1)) == 0);
abb06c5a 1519 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1a62d0ac
EB
1520 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1521 align);
a604fa2b
EB
1522
1523 /* TODO: We would need a per-BDS .supported_read_flags and
1524 * potential fallback support, if we ever implement any read flags
1525 * to pass through to drivers. For now, there aren't any
1526 * passthrough flags. */
c53cb427 1527 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
61007b31
SH
1528
1529 /* Handle Copy on Read and associated serialisation */
1530 if (flags & BDRV_REQ_COPY_ON_READ) {
1531 /* If we touch the same cluster it counts as an overlap. This
1532 * guarantees that allocating writes will be serialized and not race
1533 * with each other for the same cluster. For example, in copy-on-read
1534 * it ensures that the CoR read and write operations are atomic and
1535 * guest writes cannot interleave between them. */
8ac5aab2 1536 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
18fbd0de
PB
1537 } else {
1538 bdrv_wait_serialising_requests(req);
61007b31
SH
1539 }
1540
61007b31 1541 if (flags & BDRV_REQ_COPY_ON_READ) {
d6a644bb 1542 int64_t pnum;
61007b31 1543
897dd0ec
AS
1544 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1545 flags &= ~BDRV_REQ_COPY_ON_READ;
1546
88e63df2 1547 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
61007b31
SH
1548 if (ret < 0) {
1549 goto out;
1550 }
1551
88e63df2 1552 if (!ret || pnum != bytes) {
65cd4424
VSO
1553 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1554 qiov, qiov_offset, flags);
3299e5ec
VSO
1555 goto out;
1556 } else if (flags & BDRV_REQ_PREFETCH) {
61007b31
SH
1557 goto out;
1558 }
1559 }
1560
1a62d0ac 1561 /* Forward the request to the BlockDriver, possibly fragmenting it */
c9d20029
KW
1562 total_bytes = bdrv_getlength(bs);
1563 if (total_bytes < 0) {
1564 ret = total_bytes;
1565 goto out;
1566 }
61007b31 1567
897dd0ec
AS
1568 assert(!(flags & ~bs->supported_read_flags));
1569
c9d20029 1570 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1a62d0ac 1571 if (bytes <= max_bytes && bytes <= max_transfer) {
897dd0ec 1572 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1a62d0ac
EB
1573 goto out;
1574 }
61007b31 1575
1a62d0ac 1576 while (bytes_remaining) {
8b0c5d76 1577 int64_t num;
61007b31 1578
1a62d0ac 1579 if (max_bytes) {
1a62d0ac
EB
1580 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1581 assert(num);
61007b31 1582
1a62d0ac 1583 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
134b7dec 1584 num, qiov,
897dd0ec
AS
1585 qiov_offset + bytes - bytes_remaining,
1586 flags);
1a62d0ac 1587 max_bytes -= num;
1a62d0ac
EB
1588 } else {
1589 num = bytes_remaining;
134b7dec
HR
1590 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1591 0, bytes_remaining);
1a62d0ac
EB
1592 }
1593 if (ret < 0) {
1594 goto out;
1595 }
1596 bytes_remaining -= num;
61007b31
SH
1597 }
1598
1599out:
1a62d0ac 1600 return ret < 0 ? ret : 0;
61007b31
SH
1601}
1602
61007b31 1603/*
7a3f542f
VSO
1604 * Request padding
1605 *
1606 * |<---- align ----->| |<----- align ---->|
1607 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1608 * | | | | | |
1609 * -*----------$-------*-------- ... --------*-----$------------*---
1610 * | | | | | |
1611 * | offset | | end |
1612 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1613 * [buf ... ) [tail_buf )
1614 *
1615 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1616 * is placed at the beginning of @buf and @tail at the @end.
1617 *
1618 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1619 * around tail, if tail exists.
1620 *
1621 * @merge_reads is true for small requests,
1622 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1623 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1624 */
1625typedef struct BdrvRequestPadding {
1626 uint8_t *buf;
1627 size_t buf_len;
1628 uint8_t *tail_buf;
1629 size_t head;
1630 size_t tail;
1631 bool merge_reads;
1632 QEMUIOVector local_qiov;
1633} BdrvRequestPadding;
1634
1635static bool bdrv_init_padding(BlockDriverState *bs,
1636 int64_t offset, int64_t bytes,
1637 BdrvRequestPadding *pad)
1638{
a56ed80c
VSO
1639 int64_t align = bs->bl.request_alignment;
1640 int64_t sum;
1641
1642 bdrv_check_request(offset, bytes, &error_abort);
1643 assert(align <= INT_MAX); /* documented in block/block_int.h */
1644 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
7a3f542f
VSO
1645
1646 memset(pad, 0, sizeof(*pad));
1647
1648 pad->head = offset & (align - 1);
1649 pad->tail = ((offset + bytes) & (align - 1));
1650 if (pad->tail) {
1651 pad->tail = align - pad->tail;
1652 }
1653
ac9d00bf 1654 if (!pad->head && !pad->tail) {
7a3f542f
VSO
1655 return false;
1656 }
1657
ac9d00bf
VSO
1658 assert(bytes); /* Nothing good in aligning zero-length requests */
1659
7a3f542f
VSO
1660 sum = pad->head + bytes + pad->tail;
1661 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1662 pad->buf = qemu_blockalign(bs, pad->buf_len);
1663 pad->merge_reads = sum == pad->buf_len;
1664 if (pad->tail) {
1665 pad->tail_buf = pad->buf + pad->buf_len - align;
1666 }
1667
1668 return true;
1669}
1670
1671static int bdrv_padding_rmw_read(BdrvChild *child,
1672 BdrvTrackedRequest *req,
1673 BdrvRequestPadding *pad,
1674 bool zero_middle)
1675{
1676 QEMUIOVector local_qiov;
1677 BlockDriverState *bs = child->bs;
1678 uint64_t align = bs->bl.request_alignment;
1679 int ret;
1680
1681 assert(req->serialising && pad->buf);
1682
1683 if (pad->head || pad->merge_reads) {
8b0c5d76 1684 int64_t bytes = pad->merge_reads ? pad->buf_len : align;
7a3f542f
VSO
1685
1686 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1687
1688 if (pad->head) {
1689 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1690 }
1691 if (pad->merge_reads && pad->tail) {
1692 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1693 }
1694 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
65cd4424 1695 align, &local_qiov, 0, 0);
7a3f542f
VSO
1696 if (ret < 0) {
1697 return ret;
1698 }
1699 if (pad->head) {
1700 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1701 }
1702 if (pad->merge_reads && pad->tail) {
1703 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1704 }
1705
1706 if (pad->merge_reads) {
1707 goto zero_mem;
1708 }
1709 }
1710
1711 if (pad->tail) {
1712 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1713
1714 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1715 ret = bdrv_aligned_preadv(
1716 child, req,
1717 req->overlap_offset + req->overlap_bytes - align,
65cd4424 1718 align, align, &local_qiov, 0, 0);
7a3f542f
VSO
1719 if (ret < 0) {
1720 return ret;
1721 }
1722 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1723 }
1724
1725zero_mem:
1726 if (zero_middle) {
1727 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1728 }
1729
1730 return 0;
1731}
1732
1733static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1734{
1735 if (pad->buf) {
1736 qemu_vfree(pad->buf);
1737 qemu_iovec_destroy(&pad->local_qiov);
1738 }
98ca4549 1739 memset(pad, 0, sizeof(*pad));
7a3f542f
VSO
1740}
1741
1742/*
1743 * bdrv_pad_request
1744 *
1745 * Exchange request parameters with padded request if needed. Don't include RMW
1746 * read of padding, bdrv_padding_rmw_read() should be called separately if
1747 * needed.
1748 *
98ca4549
VSO
1749 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1750 * - on function start they represent original request
1751 * - on failure or when padding is not needed they are unchanged
1752 * - on success when padding is needed they represent padded request
61007b31 1753 */
98ca4549
VSO
1754static int bdrv_pad_request(BlockDriverState *bs,
1755 QEMUIOVector **qiov, size_t *qiov_offset,
37e9403e 1756 int64_t *offset, int64_t *bytes,
98ca4549 1757 BdrvRequestPadding *pad, bool *padded)
7a3f542f 1758{
4c002cef
VSO
1759 int ret;
1760
37e9403e
VSO
1761 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
1762
7a3f542f 1763 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
98ca4549
VSO
1764 if (padded) {
1765 *padded = false;
1766 }
1767 return 0;
7a3f542f
VSO
1768 }
1769
4c002cef
VSO
1770 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1771 *qiov, *qiov_offset, *bytes,
1772 pad->buf + pad->buf_len - pad->tail,
1773 pad->tail);
98ca4549
VSO
1774 if (ret < 0) {
1775 bdrv_padding_destroy(pad);
1776 return ret;
1777 }
7a3f542f
VSO
1778 *bytes += pad->head + pad->tail;
1779 *offset -= pad->head;
1780 *qiov = &pad->local_qiov;
1acc3466 1781 *qiov_offset = 0;
98ca4549
VSO
1782 if (padded) {
1783 *padded = true;
1784 }
7a3f542f 1785
98ca4549 1786 return 0;
7a3f542f
VSO
1787}
1788
a03ef88f 1789int coroutine_fn bdrv_co_preadv(BdrvChild *child,
e9e52efd 1790 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
61007b31 1791 BdrvRequestFlags flags)
1acc3466
VSO
1792{
1793 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1794}
1795
1796int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
37e9403e 1797 int64_t offset, int64_t bytes,
1acc3466
VSO
1798 QEMUIOVector *qiov, size_t qiov_offset,
1799 BdrvRequestFlags flags)
61007b31 1800{
a03ef88f 1801 BlockDriverState *bs = child->bs;
61007b31 1802 BdrvTrackedRequest req;
7a3f542f 1803 BdrvRequestPadding pad;
61007b31
SH
1804 int ret;
1805
37e9403e 1806 trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
61007b31 1807
f4dad307
VSO
1808 if (!bdrv_is_inserted(bs)) {
1809 return -ENOMEDIUM;
1810 }
1811
63f4ad11 1812 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
61007b31
SH
1813 if (ret < 0) {
1814 return ret;
1815 }
1816
ac9d00bf
VSO
1817 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1818 /*
1819 * Aligning zero request is nonsense. Even if driver has special meaning
1820 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1821 * it to driver due to request_alignment.
1822 *
1823 * Still, no reason to return an error if someone do unaligned
1824 * zero-length read occasionally.
1825 */
1826 return 0;
1827 }
1828
99723548
PB
1829 bdrv_inc_in_flight(bs);
1830
9568b511 1831 /* Don't do copy-on-read if we read data before write operation */
d73415a3 1832 if (qatomic_read(&bs->copy_on_read)) {
61007b31
SH
1833 flags |= BDRV_REQ_COPY_ON_READ;
1834 }
1835
98ca4549
VSO
1836 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
1837 NULL);
1838 if (ret < 0) {
1839 return ret;
1840 }
61007b31 1841
ebde595c 1842 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
7a3f542f
VSO
1843 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1844 bs->bl.request_alignment,
1acc3466 1845 qiov, qiov_offset, flags);
61007b31 1846 tracked_request_end(&req);
99723548 1847 bdrv_dec_in_flight(bs);
61007b31 1848
7a3f542f 1849 bdrv_padding_destroy(&pad);
61007b31
SH
1850
1851 return ret;
1852}
1853
d05aa8bb 1854static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
5ae07b14 1855 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
61007b31
SH
1856{
1857 BlockDriver *drv = bs->drv;
1858 QEMUIOVector qiov;
0d93ed08 1859 void *buf = NULL;
61007b31 1860 int ret = 0;
465fe887 1861 bool need_flush = false;
443668ca
DL
1862 int head = 0;
1863 int tail = 0;
61007b31 1864
cf081fca 1865 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
a5b8dd2c
EB
1866 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1867 bs->bl.request_alignment);
cb2e2878 1868 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
d05aa8bb 1869
5ae07b14
VSO
1870 bdrv_check_request(offset, bytes, &error_abort);
1871
d470ad42
HR
1872 if (!drv) {
1873 return -ENOMEDIUM;
1874 }
1875
fe0480d6
KW
1876 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1877 return -ENOTSUP;
1878 }
1879
b8d0a980
EB
1880 assert(alignment % bs->bl.request_alignment == 0);
1881 head = offset % alignment;
f5a5ca79 1882 tail = (offset + bytes) % alignment;
b8d0a980
EB
1883 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1884 assert(max_write_zeroes >= bs->bl.request_alignment);
61007b31 1885
f5a5ca79 1886 while (bytes > 0 && !ret) {
5ae07b14 1887 int64_t num = bytes;
61007b31
SH
1888
1889 /* Align request. Block drivers can expect the "bulk" of the request
443668ca
DL
1890 * to be aligned, and that unaligned requests do not cross cluster
1891 * boundaries.
61007b31 1892 */
443668ca 1893 if (head) {
b2f95fee
EB
1894 /* Make a small request up to the first aligned sector. For
1895 * convenience, limit this request to max_transfer even if
1896 * we don't need to fall back to writes. */
f5a5ca79 1897 num = MIN(MIN(bytes, max_transfer), alignment - head);
b2f95fee
EB
1898 head = (head + num) % alignment;
1899 assert(num < max_write_zeroes);
d05aa8bb 1900 } else if (tail && num > alignment) {
443668ca
DL
1901 /* Shorten the request to the last aligned sector. */
1902 num -= tail;
61007b31
SH
1903 }
1904
1905 /* limit request size */
1906 if (num > max_write_zeroes) {
1907 num = max_write_zeroes;
1908 }
1909
1910 ret = -ENOTSUP;
1911 /* First try the efficient write zeroes operation */
d05aa8bb
EB
1912 if (drv->bdrv_co_pwrite_zeroes) {
1913 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1914 flags & bs->supported_zero_flags);
1915 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1916 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1917 need_flush = true;
1918 }
465fe887
EB
1919 } else {
1920 assert(!bs->supported_zero_flags);
61007b31
SH
1921 }
1922
294682cc 1923 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
61007b31 1924 /* Fall back to bounce buffer if write zeroes is unsupported */
465fe887
EB
1925 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1926
1927 if ((flags & BDRV_REQ_FUA) &&
1928 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1929 /* No need for bdrv_driver_pwrite() to do a fallback
1930 * flush on each chunk; use just one at the end */
1931 write_flags &= ~BDRV_REQ_FUA;
1932 need_flush = true;
1933 }
5def6b80 1934 num = MIN(num, max_transfer);
0d93ed08
VSO
1935 if (buf == NULL) {
1936 buf = qemu_try_blockalign0(bs, num);
1937 if (buf == NULL) {
61007b31
SH
1938 ret = -ENOMEM;
1939 goto fail;
1940 }
61007b31 1941 }
0d93ed08 1942 qemu_iovec_init_buf(&qiov, buf, num);
61007b31 1943
ac850bf0 1944 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
61007b31
SH
1945
1946 /* Keep bounce buffer around if it is big enough for all
1947 * all future requests.
1948 */
5def6b80 1949 if (num < max_transfer) {
0d93ed08
VSO
1950 qemu_vfree(buf);
1951 buf = NULL;
61007b31
SH
1952 }
1953 }
1954
d05aa8bb 1955 offset += num;
f5a5ca79 1956 bytes -= num;
61007b31
SH
1957 }
1958
1959fail:
465fe887
EB
1960 if (ret == 0 && need_flush) {
1961 ret = bdrv_co_flush(bs);
1962 }
0d93ed08 1963 qemu_vfree(buf);
61007b31
SH
1964 return ret;
1965}
1966
85fe2479 1967static inline int coroutine_fn
fcfd9ade 1968bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
85fe2479
FZ
1969 BdrvTrackedRequest *req, int flags)
1970{
1971 BlockDriverState *bs = child->bs;
fcfd9ade
VSO
1972
1973 bdrv_check_request(offset, bytes, &error_abort);
85fe2479
FZ
1974
1975 if (bs->read_only) {
1976 return -EPERM;
1977 }
1978
85fe2479
FZ
1979 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1980 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1981 assert(!(flags & ~BDRV_REQ_MASK));
d1a764d1 1982 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
85fe2479
FZ
1983
1984 if (flags & BDRV_REQ_SERIALISING) {
d1a764d1
VSO
1985 QEMU_LOCK_GUARD(&bs->reqs_lock);
1986
1987 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1988
1989 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1990 return -EBUSY;
1991 }
1992
1993 bdrv_wait_serialising_requests_locked(req);
18fbd0de
PB
1994 } else {
1995 bdrv_wait_serialising_requests(req);
85fe2479
FZ
1996 }
1997
85fe2479
FZ
1998 assert(req->overlap_offset <= offset);
1999 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
fcfd9ade
VSO
2000 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
2001 child->perm & BLK_PERM_RESIZE);
85fe2479 2002
cd47d792
FZ
2003 switch (req->type) {
2004 case BDRV_TRACKED_WRITE:
2005 case BDRV_TRACKED_DISCARD:
2006 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
2007 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
2008 } else {
2009 assert(child->perm & BLK_PERM_WRITE);
2010 }
2011 return notifier_with_return_list_notify(&bs->before_write_notifiers,
2012 req);
2013 case BDRV_TRACKED_TRUNCATE:
2014 assert(child->perm & BLK_PERM_RESIZE);
2015 return 0;
2016 default:
2017 abort();
85fe2479 2018 }
85fe2479
FZ
2019}
2020
2021static inline void coroutine_fn
fcfd9ade 2022bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
85fe2479
FZ
2023 BdrvTrackedRequest *req, int ret)
2024{
2025 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2026 BlockDriverState *bs = child->bs;
2027
fcfd9ade
VSO
2028 bdrv_check_request(offset, bytes, &error_abort);
2029
d73415a3 2030 qatomic_inc(&bs->write_gen);
85fe2479 2031
00695c27
FZ
2032 /*
2033 * Discard cannot extend the image, but in error handling cases, such as
2034 * when reverting a qcow2 cluster allocation, the discarded range can pass
2035 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2036 * here. Instead, just skip it, since semantically a discard request
2037 * beyond EOF cannot expand the image anyway.
2038 */
7f8f03ef 2039 if (ret == 0 &&
cd47d792
FZ
2040 (req->type == BDRV_TRACKED_TRUNCATE ||
2041 end_sector > bs->total_sectors) &&
2042 req->type != BDRV_TRACKED_DISCARD) {
7f8f03ef
FZ
2043 bs->total_sectors = end_sector;
2044 bdrv_parent_cb_resize(bs);
2045 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
85fe2479 2046 }
00695c27
FZ
2047 if (req->bytes) {
2048 switch (req->type) {
2049 case BDRV_TRACKED_WRITE:
2050 stat64_max(&bs->wr_highest_offset, offset + bytes);
2051 /* fall through, to set dirty bits */
2052 case BDRV_TRACKED_DISCARD:
2053 bdrv_set_dirty(bs, offset, bytes);
2054 break;
2055 default:
2056 break;
2057 }
2058 }
85fe2479
FZ
2059}
2060
61007b31 2061/*
04ed95f4
EB
2062 * Forwards an already correctly aligned write request to the BlockDriver,
2063 * after possibly fragmenting it.
61007b31 2064 */
85c97ca7 2065static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
fcfd9ade 2066 BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
28c4da28 2067 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
61007b31 2068{
85c97ca7 2069 BlockDriverState *bs = child->bs;
61007b31 2070 BlockDriver *drv = bs->drv;
61007b31
SH
2071 int ret;
2072
fcfd9ade 2073 int64_t bytes_remaining = bytes;
04ed95f4 2074 int max_transfer;
61007b31 2075
fcfd9ade
VSO
2076 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2077
d470ad42
HR
2078 if (!drv) {
2079 return -ENOMEDIUM;
2080 }
2081
d6883bc9
VSO
2082 if (bdrv_has_readonly_bitmaps(bs)) {
2083 return -EPERM;
2084 }
2085
cff86b38
EB
2086 assert(is_power_of_2(align));
2087 assert((offset & (align - 1)) == 0);
2088 assert((bytes & (align - 1)) == 0);
04ed95f4
EB
2089 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2090 align);
61007b31 2091
85fe2479 2092 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
61007b31
SH
2093
2094 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
c1499a5e 2095 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
28c4da28 2096 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
61007b31
SH
2097 flags |= BDRV_REQ_ZERO_WRITE;
2098 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2099 flags |= BDRV_REQ_MAY_UNMAP;
2100 }
2101 }
2102
2103 if (ret < 0) {
2104 /* Do nothing, write notifier decided to fail this request */
2105 } else if (flags & BDRV_REQ_ZERO_WRITE) {
9a4f4c31 2106 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
9896c876 2107 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
3ea1a091 2108 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
28c4da28
VSO
2109 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2110 qiov, qiov_offset);
04ed95f4 2111 } else if (bytes <= max_transfer) {
9a4f4c31 2112 bdrv_debug_event(bs, BLKDBG_PWRITEV);
28c4da28 2113 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
04ed95f4
EB
2114 } else {
2115 bdrv_debug_event(bs, BLKDBG_PWRITEV);
2116 while (bytes_remaining) {
2117 int num = MIN(bytes_remaining, max_transfer);
04ed95f4
EB
2118 int local_flags = flags;
2119
2120 assert(num);
2121 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2122 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2123 /* If FUA is going to be emulated by flush, we only
2124 * need to flush on the last iteration */
2125 local_flags &= ~BDRV_REQ_FUA;
2126 }
04ed95f4
EB
2127
2128 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
134b7dec
HR
2129 num, qiov,
2130 qiov_offset + bytes - bytes_remaining,
28c4da28 2131 local_flags);
04ed95f4
EB
2132 if (ret < 0) {
2133 break;
2134 }
2135 bytes_remaining -= num;
2136 }
61007b31 2137 }
9a4f4c31 2138 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
61007b31 2139
61007b31 2140 if (ret >= 0) {
04ed95f4 2141 ret = 0;
61007b31 2142 }
85fe2479 2143 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
61007b31
SH
2144
2145 return ret;
2146}
2147
85c97ca7 2148static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
9eeb6dd1 2149 int64_t offset,
37e9403e 2150 int64_t bytes,
9eeb6dd1
FZ
2151 BdrvRequestFlags flags,
2152 BdrvTrackedRequest *req)
2153{
85c97ca7 2154 BlockDriverState *bs = child->bs;
9eeb6dd1 2155 QEMUIOVector local_qiov;
a5b8dd2c 2156 uint64_t align = bs->bl.request_alignment;
9eeb6dd1 2157 int ret = 0;
7a3f542f
VSO
2158 bool padding;
2159 BdrvRequestPadding pad;
9eeb6dd1 2160
7a3f542f
VSO
2161 padding = bdrv_init_padding(bs, offset, bytes, &pad);
2162 if (padding) {
8ac5aab2 2163 bdrv_make_request_serialising(req, align);
9eeb6dd1 2164
7a3f542f
VSO
2165 bdrv_padding_rmw_read(child, req, &pad, true);
2166
2167 if (pad.head || pad.merge_reads) {
2168 int64_t aligned_offset = offset & ~(align - 1);
2169 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2170
2171 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2172 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
28c4da28 2173 align, &local_qiov, 0,
7a3f542f
VSO
2174 flags & ~BDRV_REQ_ZERO_WRITE);
2175 if (ret < 0 || pad.merge_reads) {
2176 /* Error or all work is done */
2177 goto out;
2178 }
2179 offset += write_bytes - pad.head;
2180 bytes -= write_bytes - pad.head;
9eeb6dd1 2181 }
9eeb6dd1
FZ
2182 }
2183
2184 assert(!bytes || (offset & (align - 1)) == 0);
2185 if (bytes >= align) {
2186 /* Write the aligned part in the middle. */
fcfd9ade 2187 int64_t aligned_bytes = bytes & ~(align - 1);
85c97ca7 2188 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
28c4da28 2189 NULL, 0, flags);
9eeb6dd1 2190 if (ret < 0) {
7a3f542f 2191 goto out;
9eeb6dd1
FZ
2192 }
2193 bytes -= aligned_bytes;
2194 offset += aligned_bytes;
2195 }
2196
2197 assert(!bytes || (offset & (align - 1)) == 0);
2198 if (bytes) {
7a3f542f 2199 assert(align == pad.tail + bytes);
9eeb6dd1 2200
7a3f542f 2201 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
85c97ca7 2202 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
28c4da28
VSO
2203 &local_qiov, 0,
2204 flags & ~BDRV_REQ_ZERO_WRITE);
9eeb6dd1 2205 }
9eeb6dd1 2206
7a3f542f
VSO
2207out:
2208 bdrv_padding_destroy(&pad);
2209
2210 return ret;
9eeb6dd1
FZ
2211}
2212
61007b31
SH
2213/*
2214 * Handle a write request in coroutine context
2215 */
a03ef88f 2216int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
e9e52efd 2217 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
61007b31 2218 BdrvRequestFlags flags)
1acc3466
VSO
2219{
2220 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2221}
2222
2223int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
37e9403e 2224 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
1acc3466 2225 BdrvRequestFlags flags)
61007b31 2226{
a03ef88f 2227 BlockDriverState *bs = child->bs;
61007b31 2228 BdrvTrackedRequest req;
a5b8dd2c 2229 uint64_t align = bs->bl.request_alignment;
7a3f542f 2230 BdrvRequestPadding pad;
61007b31 2231 int ret;
f0deecff 2232 bool padded = false;
61007b31 2233
37e9403e 2234 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
f42cf447 2235
f4dad307 2236 if (!bdrv_is_inserted(bs)) {
61007b31
SH
2237 return -ENOMEDIUM;
2238 }
61007b31 2239
63f4ad11 2240 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
61007b31
SH
2241 if (ret < 0) {
2242 return ret;
2243 }
2244
f2208fdc
AG
2245 /* If the request is misaligned then we can't make it efficient */
2246 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2247 !QEMU_IS_ALIGNED(offset | bytes, align))
2248 {
2249 return -ENOTSUP;
2250 }
2251
ac9d00bf
VSO
2252 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2253 /*
2254 * Aligning zero request is nonsense. Even if driver has special meaning
2255 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2256 * it to driver due to request_alignment.
2257 *
2258 * Still, no reason to return an error if someone do unaligned
2259 * zero-length write occasionally.
2260 */
2261 return 0;
2262 }
2263
f0deecff
VSO
2264 if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2265 /*
2266 * Pad request for following read-modify-write cycle.
2267 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2268 * alignment only if there is no ZERO flag.
2269 */
98ca4549
VSO
2270 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
2271 &padded);
2272 if (ret < 0) {
2273 return ret;
2274 }
f0deecff
VSO
2275 }
2276
99723548 2277 bdrv_inc_in_flight(bs);
ebde595c 2278 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
61007b31 2279
18a59f03 2280 if (flags & BDRV_REQ_ZERO_WRITE) {
f0deecff 2281 assert(!padded);
85c97ca7 2282 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
9eeb6dd1
FZ
2283 goto out;
2284 }
2285
f0deecff
VSO
2286 if (padded) {
2287 /*
2288 * Request was unaligned to request_alignment and therefore
2289 * padded. We are going to do read-modify-write, and must
2290 * serialize the request to prevent interactions of the
2291 * widened region with other transactions.
2292 */
8ac5aab2 2293 bdrv_make_request_serialising(&req, align);
7a3f542f 2294 bdrv_padding_rmw_read(child, &req, &pad, false);
61007b31
SH
2295 }
2296
85c97ca7 2297 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1acc3466 2298 qiov, qiov_offset, flags);
61007b31 2299
7a3f542f 2300 bdrv_padding_destroy(&pad);
61007b31 2301
9eeb6dd1
FZ
2302out:
2303 tracked_request_end(&req);
99723548 2304 bdrv_dec_in_flight(bs);
7a3f542f 2305
61007b31
SH
2306 return ret;
2307}
2308
a03ef88f 2309int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
e9e52efd 2310 int64_t bytes, BdrvRequestFlags flags)
61007b31 2311{
f5a5ca79 2312 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
61007b31 2313
a03ef88f 2314 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
61007b31
SH
2315 flags &= ~BDRV_REQ_MAY_UNMAP;
2316 }
61007b31 2317
f5a5ca79 2318 return bdrv_co_pwritev(child, offset, bytes, NULL,
74021bc4 2319 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
2320}
2321
4085f5c7
JS
2322/*
2323 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2324 */
2325int bdrv_flush_all(void)
2326{
2327 BdrvNextIterator it;
2328 BlockDriverState *bs = NULL;
2329 int result = 0;
2330
c8aa7895
PD
2331 /*
2332 * bdrv queue is managed by record/replay,
2333 * creating new flush request for stopping
2334 * the VM may break the determinism
2335 */
2336 if (replay_events_enabled()) {
2337 return result;
2338 }
2339
4085f5c7
JS
2340 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2341 AioContext *aio_context = bdrv_get_aio_context(bs);
2342 int ret;
2343
2344 aio_context_acquire(aio_context);
2345 ret = bdrv_flush(bs);
2346 if (ret < 0 && !result) {
2347 result = ret;
2348 }
2349 aio_context_release(aio_context);
2350 }
2351
2352 return result;
2353}
2354
61007b31
SH
2355/*
2356 * Returns the allocation status of the specified sectors.
2357 * Drivers not implementing the functionality are assumed to not support
2358 * backing files, hence all their sectors are reported as allocated.
2359 *
86a3d5c6
EB
2360 * If 'want_zero' is true, the caller is querying for mapping
2361 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2362 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2363 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
c9ce8c4d 2364 *
2e8bc787 2365 * If 'offset' is beyond the end of the disk image the return value is
fb0d8654 2366 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
61007b31 2367 *
2e8bc787 2368 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
fb0d8654
EB
2369 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2370 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
67a0fd2a 2371 *
2e8bc787
EB
2372 * 'pnum' is set to the number of bytes (including and immediately
2373 * following the specified offset) that are easily known to be in the
2374 * same allocated/unallocated state. Note that a second call starting
2375 * at the original offset plus returned pnum may have the same status.
2376 * The returned value is non-zero on success except at end-of-file.
2377 *
2378 * Returns negative errno on failure. Otherwise, if the
2379 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2380 * set to the host mapping and BDS corresponding to the guest offset.
61007b31 2381 */
2e8bc787
EB
2382static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2383 bool want_zero,
2384 int64_t offset, int64_t bytes,
2385 int64_t *pnum, int64_t *map,
2386 BlockDriverState **file)
2387{
2388 int64_t total_size;
2389 int64_t n; /* bytes */
efa6e2ed 2390 int ret;
2e8bc787 2391 int64_t local_map = 0;
298a1665 2392 BlockDriverState *local_file = NULL;
efa6e2ed
EB
2393 int64_t aligned_offset, aligned_bytes;
2394 uint32_t align;
549ec0d9 2395 bool has_filtered_child;
61007b31 2396
298a1665
EB
2397 assert(pnum);
2398 *pnum = 0;
2e8bc787
EB
2399 total_size = bdrv_getlength(bs);
2400 if (total_size < 0) {
2401 ret = total_size;
298a1665 2402 goto early_out;
61007b31
SH
2403 }
2404
2e8bc787 2405 if (offset >= total_size) {
298a1665
EB
2406 ret = BDRV_BLOCK_EOF;
2407 goto early_out;
61007b31 2408 }
2e8bc787 2409 if (!bytes) {
298a1665
EB
2410 ret = 0;
2411 goto early_out;
9cdcfd9f 2412 }
61007b31 2413
2e8bc787
EB
2414 n = total_size - offset;
2415 if (n < bytes) {
2416 bytes = n;
61007b31
SH
2417 }
2418
d470ad42
HR
2419 /* Must be non-NULL or bdrv_getlength() would have failed */
2420 assert(bs->drv);
549ec0d9
HR
2421 has_filtered_child = bdrv_filter_child(bs);
2422 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2e8bc787 2423 *pnum = bytes;
61007b31 2424 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2e8bc787 2425 if (offset + bytes == total_size) {
fb0d8654
EB
2426 ret |= BDRV_BLOCK_EOF;
2427 }
61007b31 2428 if (bs->drv->protocol_name) {
2e8bc787
EB
2429 ret |= BDRV_BLOCK_OFFSET_VALID;
2430 local_map = offset;
298a1665 2431 local_file = bs;
61007b31 2432 }
298a1665 2433 goto early_out;
61007b31
SH
2434 }
2435
99723548 2436 bdrv_inc_in_flight(bs);
efa6e2ed
EB
2437
2438 /* Round out to request_alignment boundaries */
86a3d5c6 2439 align = bs->bl.request_alignment;
efa6e2ed
EB
2440 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2441 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2442
549ec0d9
HR
2443 if (bs->drv->bdrv_co_block_status) {
2444 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2445 aligned_bytes, pnum, &local_map,
2446 &local_file);
2447 } else {
2448 /* Default code for filters */
2449
2450 local_file = bdrv_filter_bs(bs);
2451 assert(local_file);
2452
2453 *pnum = aligned_bytes;
2454 local_map = aligned_offset;
2455 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2456 }
636cb512
EB
2457 if (ret < 0) {
2458 *pnum = 0;
2459 goto out;
efa6e2ed
EB
2460 }
2461
2e8bc787 2462 /*
636cb512 2463 * The driver's result must be a non-zero multiple of request_alignment.
efa6e2ed 2464 * Clamp pnum and adjust map to original request.
2e8bc787 2465 */
636cb512
EB
2466 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2467 align > offset - aligned_offset);
69f47505
VSO
2468 if (ret & BDRV_BLOCK_RECURSE) {
2469 assert(ret & BDRV_BLOCK_DATA);
2470 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2471 assert(!(ret & BDRV_BLOCK_ZERO));
2472 }
2473
efa6e2ed
EB
2474 *pnum -= offset - aligned_offset;
2475 if (*pnum > bytes) {
2476 *pnum = bytes;
61007b31 2477 }
2e8bc787 2478 if (ret & BDRV_BLOCK_OFFSET_VALID) {
efa6e2ed 2479 local_map += offset - aligned_offset;
2e8bc787 2480 }
61007b31
SH
2481
2482 if (ret & BDRV_BLOCK_RAW) {
298a1665 2483 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2e8bc787
EB
2484 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2485 *pnum, pnum, &local_map, &local_file);
99723548 2486 goto out;
61007b31
SH
2487 }
2488
2489 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2490 ret |= BDRV_BLOCK_ALLOCATED;
d40f4a56 2491 } else if (bs->drv->supports_backing) {
cb850315
HR
2492 BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2493
d40f4a56
AG
2494 if (!cow_bs) {
2495 ret |= BDRV_BLOCK_ZERO;
2496 } else if (want_zero) {
cb850315 2497 int64_t size2 = bdrv_getlength(cow_bs);
c9ce8c4d 2498
2e8bc787 2499 if (size2 >= 0 && offset >= size2) {
61007b31
SH
2500 ret |= BDRV_BLOCK_ZERO;
2501 }
2502 }
2503 }
2504
69f47505
VSO
2505 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2506 local_file && local_file != bs &&
61007b31
SH
2507 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2508 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2e8bc787
EB
2509 int64_t file_pnum;
2510 int ret2;
61007b31 2511
2e8bc787
EB
2512 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2513 *pnum, &file_pnum, NULL, NULL);
61007b31
SH
2514 if (ret2 >= 0) {
2515 /* Ignore errors. This is just providing extra information, it
2516 * is useful but not necessary.
2517 */
c61e684e
EB
2518 if (ret2 & BDRV_BLOCK_EOF &&
2519 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2520 /*
2521 * It is valid for the format block driver to read
2522 * beyond the end of the underlying file's current
2523 * size; such areas read as zero.
2524 */
61007b31
SH
2525 ret |= BDRV_BLOCK_ZERO;
2526 } else {
2527 /* Limit request to the range reported by the protocol driver */
2528 *pnum = file_pnum;
2529 ret |= (ret2 & BDRV_BLOCK_ZERO);
2530 }
2531 }
2532 }
2533
99723548
PB
2534out:
2535 bdrv_dec_in_flight(bs);
2e8bc787 2536 if (ret >= 0 && offset + *pnum == total_size) {
fb0d8654
EB
2537 ret |= BDRV_BLOCK_EOF;
2538 }
298a1665
EB
2539early_out:
2540 if (file) {
2541 *file = local_file;
2542 }
2e8bc787
EB
2543 if (map) {
2544 *map = local_map;
2545 }
61007b31
SH
2546 return ret;
2547}
2548
21c2283e 2549int coroutine_fn
f9e694cb
VSO
2550bdrv_co_common_block_status_above(BlockDriverState *bs,
2551 BlockDriverState *base,
3555a432 2552 bool include_base,
f9e694cb
VSO
2553 bool want_zero,
2554 int64_t offset,
2555 int64_t bytes,
2556 int64_t *pnum,
2557 int64_t *map,
a92b1b06
EB
2558 BlockDriverState **file,
2559 int *depth)
ba3f0e25 2560{
67c095c8 2561 int ret;
ba3f0e25 2562 BlockDriverState *p;
67c095c8 2563 int64_t eof = 0;
a92b1b06 2564 int dummy;
ba3f0e25 2565
3555a432 2566 assert(!include_base || base); /* Can't include NULL base */
67c095c8 2567
a92b1b06
EB
2568 if (!depth) {
2569 depth = &dummy;
2570 }
2571 *depth = 0;
2572
624f27bb
VSO
2573 if (!include_base && bs == base) {
2574 *pnum = bytes;
2575 return 0;
2576 }
2577
67c095c8 2578 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
a92b1b06 2579 ++*depth;
3555a432 2580 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
67c095c8
VSO
2581 return ret;
2582 }
2583
2584 if (ret & BDRV_BLOCK_EOF) {
2585 eof = offset + *pnum;
2586 }
2587
2588 assert(*pnum <= bytes);
2589 bytes = *pnum;
2590
3555a432 2591 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
67c095c8
VSO
2592 p = bdrv_filter_or_cow_bs(p))
2593 {
5b648c67
EB
2594 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2595 file);
a92b1b06 2596 ++*depth;
c61e684e 2597 if (ret < 0) {
67c095c8 2598 return ret;
c61e684e 2599 }
67c095c8 2600 if (*pnum == 0) {
c61e684e 2601 /*
67c095c8
VSO
2602 * The top layer deferred to this layer, and because this layer is
2603 * short, any zeroes that we synthesize beyond EOF behave as if they
2604 * were allocated at this layer.
2605 *
2606 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2607 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2608 * below.
c61e684e 2609 */
67c095c8 2610 assert(ret & BDRV_BLOCK_EOF);
5b648c67 2611 *pnum = bytes;
67c095c8
VSO
2612 if (file) {
2613 *file = p;
2614 }
2615 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2616 break;
c61e684e 2617 }
67c095c8
VSO
2618 if (ret & BDRV_BLOCK_ALLOCATED) {
2619 /*
2620 * We've found the node and the status, we must break.
2621 *
2622 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2623 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2624 * below.
2625 */
2626 ret &= ~BDRV_BLOCK_EOF;
ba3f0e25
FZ
2627 break;
2628 }
67c095c8 2629
3555a432
VSO
2630 if (p == base) {
2631 assert(include_base);
2632 break;
2633 }
2634
67c095c8
VSO
2635 /*
2636 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2637 * let's continue the diving.
2638 */
2639 assert(*pnum <= bytes);
2640 bytes = *pnum;
ba3f0e25 2641 }
67c095c8
VSO
2642
2643 if (offset + *pnum == eof) {
2644 ret |= BDRV_BLOCK_EOF;
2645 }
2646
ba3f0e25
FZ
2647 return ret;
2648}
2649
31826642
EB
2650int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2651 int64_t offset, int64_t bytes, int64_t *pnum,
2652 int64_t *map, BlockDriverState **file)
c9ce8c4d 2653{
3555a432 2654 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
a92b1b06 2655 pnum, map, file, NULL);
c9ce8c4d
EB
2656}
2657
237d78f8
EB
2658int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2659 int64_t *pnum, int64_t *map, BlockDriverState **file)
ba3f0e25 2660{
cb850315 2661 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
31826642 2662 offset, bytes, pnum, map, file);
ba3f0e25
FZ
2663}
2664
46cd1e8a
AG
2665/*
2666 * Check @bs (and its backing chain) to see if the range defined
2667 * by @offset and @bytes is known to read as zeroes.
2668 * Return 1 if that is the case, 0 otherwise and -errno on error.
2669 * This test is meant to be fast rather than accurate so returning 0
2670 * does not guarantee non-zero data.
2671 */
2672int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2673 int64_t bytes)
2674{
2675 int ret;
2676 int64_t pnum = bytes;
2677
2678 if (!bytes) {
2679 return 1;
2680 }
2681
2682 ret = bdrv_common_block_status_above(bs, NULL, false, false, offset,
a92b1b06 2683 bytes, &pnum, NULL, NULL, NULL);
46cd1e8a
AG
2684
2685 if (ret < 0) {
2686 return ret;
2687 }
2688
2689 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2690}
2691
d6a644bb
EB
2692int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2693 int64_t bytes, int64_t *pnum)
61007b31 2694{
7ddb99b9
EB
2695 int ret;
2696 int64_t dummy;
d6a644bb 2697
3555a432
VSO
2698 ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
2699 bytes, pnum ? pnum : &dummy, NULL,
a92b1b06 2700 NULL, NULL);
61007b31
SH
2701 if (ret < 0) {
2702 return ret;
2703 }
2704 return !!(ret & BDRV_BLOCK_ALLOCATED);
2705}
2706
2707/*
2708 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2709 *
a92b1b06
EB
2710 * Return a positive depth if (a prefix of) the given range is allocated
2711 * in any image between BASE and TOP (BASE is only included if include_base
2712 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
170d3bd3
AS
2713 * BASE can be NULL to check if the given offset is allocated in any
2714 * image of the chain. Return 0 otherwise, or negative errno on
2715 * failure.
61007b31 2716 *
51b0a488
EB
2717 * 'pnum' is set to the number of bytes (including and immediately
2718 * following the specified offset) that are known to be in the same
2719 * allocated/unallocated state. Note that a subsequent call starting
2720 * at 'offset + *pnum' may return the same allocation status (in other
2721 * words, the result is not necessarily the maximum possible range);
2722 * but 'pnum' will only be 0 when end of file is reached.
61007b31
SH
2723 */
2724int bdrv_is_allocated_above(BlockDriverState *top,
2725 BlockDriverState *base,
170d3bd3
AS
2726 bool include_base, int64_t offset,
2727 int64_t bytes, int64_t *pnum)
61007b31 2728{
a92b1b06 2729 int depth;
7e7e5100 2730 int ret = bdrv_common_block_status_above(top, base, include_base, false,
a92b1b06
EB
2731 offset, bytes, pnum, NULL, NULL,
2732 &depth);
7e7e5100
VSO
2733 if (ret < 0) {
2734 return ret;
61007b31
SH
2735 }
2736
a92b1b06
EB
2737 if (ret & BDRV_BLOCK_ALLOCATED) {
2738 return depth;
2739 }
2740 return 0;
61007b31
SH
2741}
2742
21c2283e 2743int coroutine_fn
b33b354f 2744bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1a8ae822
KW
2745{
2746 BlockDriver *drv = bs->drv;
c4db2e25 2747 BlockDriverState *child_bs = bdrv_primary_bs(bs);
dc88a467
SH
2748 int ret = -ENOTSUP;
2749
b33b354f
VSO
2750 if (!drv) {
2751 return -ENOMEDIUM;
2752 }
2753
dc88a467 2754 bdrv_inc_in_flight(bs);
1a8ae822 2755
b33b354f
VSO
2756 if (drv->bdrv_load_vmstate) {
2757 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
c4db2e25 2758 } else if (child_bs) {
b33b354f 2759 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
1a8ae822
KW
2760 }
2761
dc88a467 2762 bdrv_dec_in_flight(bs);
b33b354f 2763
dc88a467 2764 return ret;
1a8ae822
KW
2765}
2766
b33b354f
VSO
2767int coroutine_fn
2768bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
61007b31 2769{
b33b354f
VSO
2770 BlockDriver *drv = bs->drv;
2771 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2772 int ret = -ENOTSUP;
61007b31 2773
b33b354f
VSO
2774 if (!drv) {
2775 return -ENOMEDIUM;
b433d942
KW
2776 }
2777
b33b354f 2778 bdrv_inc_in_flight(bs);
61007b31 2779
b33b354f
VSO
2780 if (drv->bdrv_save_vmstate) {
2781 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2782 } else if (child_bs) {
2783 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2784 }
2785
2786 bdrv_dec_in_flight(bs);
2787
2788 return ret;
61007b31
SH
2789}
2790
b33b354f 2791int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
61007b31 2792 int64_t pos, int size)
5ddda0b8 2793{
0d93ed08 2794 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
b33b354f 2795 int ret = bdrv_writev_vmstate(bs, &qiov, pos);
b433d942 2796
b33b354f 2797 return ret < 0 ? ret : size;
5ddda0b8
KW
2798}
2799
b33b354f
VSO
2800int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2801 int64_t pos, int size)
61007b31 2802{
b33b354f
VSO
2803 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2804 int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2805
2806 return ret < 0 ? ret : size;
61007b31
SH
2807}
2808
2809/**************************************************************/
2810/* async I/Os */
2811
61007b31
SH
2812void bdrv_aio_cancel(BlockAIOCB *acb)
2813{
2814 qemu_aio_ref(acb);
2815 bdrv_aio_cancel_async(acb);
2816 while (acb->refcnt > 1) {
2817 if (acb->aiocb_info->get_aio_context) {
2818 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2819 } else if (acb->bs) {
2f47da5f
PB
2820 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2821 * assert that we're not using an I/O thread. Thread-safe
2822 * code should use bdrv_aio_cancel_async exclusively.
2823 */
2824 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
61007b31
SH
2825 aio_poll(bdrv_get_aio_context(acb->bs), true);
2826 } else {
2827 abort();
2828 }
2829 }
2830 qemu_aio_unref(acb);
2831}
2832
2833/* Async version of aio cancel. The caller is not blocked if the acb implements
2834 * cancel_async, otherwise we do nothing and let the request normally complete.
2835 * In either case the completion callback must be called. */
2836void bdrv_aio_cancel_async(BlockAIOCB *acb)
2837{
2838 if (acb->aiocb_info->cancel_async) {
2839 acb->aiocb_info->cancel_async(acb);
2840 }
2841}
2842
61007b31
SH
2843/**************************************************************/
2844/* Coroutine block device emulation */
2845
61007b31
SH
2846int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2847{
883833e2
HR
2848 BdrvChild *primary_child = bdrv_primary_child(bs);
2849 BdrvChild *child;
49ca6259
FZ
2850 int current_gen;
2851 int ret = 0;
2852
2853 bdrv_inc_in_flight(bs);
61007b31 2854
e914404e 2855 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
1b6bc94d 2856 bdrv_is_sg(bs)) {
49ca6259 2857 goto early_exit;
61007b31
SH
2858 }
2859
3783fa3d 2860 qemu_co_mutex_lock(&bs->reqs_lock);
d73415a3 2861 current_gen = qatomic_read(&bs->write_gen);
3ff2f67a
EY
2862
2863 /* Wait until any previous flushes are completed */
99723548 2864 while (bs->active_flush_req) {
3783fa3d 2865 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
3ff2f67a
EY
2866 }
2867
3783fa3d 2868 /* Flushes reach this point in nondecreasing current_gen order. */
99723548 2869 bs->active_flush_req = true;
3783fa3d 2870 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2871
c32b82af
PD
2872 /* Write back all layers by calling one driver function */
2873 if (bs->drv->bdrv_co_flush) {
2874 ret = bs->drv->bdrv_co_flush(bs);
2875 goto out;
2876 }
2877
61007b31 2878 /* Write back cached data to the OS even with cache=unsafe */
883833e2 2879 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
61007b31
SH
2880 if (bs->drv->bdrv_co_flush_to_os) {
2881 ret = bs->drv->bdrv_co_flush_to_os(bs);
2882 if (ret < 0) {
cdb5e315 2883 goto out;
61007b31
SH
2884 }
2885 }
2886
2887 /* But don't actually force it to the disk with cache=unsafe */
2888 if (bs->open_flags & BDRV_O_NO_FLUSH) {
883833e2 2889 goto flush_children;
61007b31
SH
2890 }
2891
3ff2f67a
EY
2892 /* Check if we really need to flush anything */
2893 if (bs->flushed_gen == current_gen) {
883833e2 2894 goto flush_children;
3ff2f67a
EY
2895 }
2896
883833e2 2897 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
d470ad42
HR
2898 if (!bs->drv) {
2899 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2900 * (even in case of apparent success) */
2901 ret = -ENOMEDIUM;
2902 goto out;
2903 }
61007b31
SH
2904 if (bs->drv->bdrv_co_flush_to_disk) {
2905 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2906 } else if (bs->drv->bdrv_aio_flush) {
2907 BlockAIOCB *acb;
2908 CoroutineIOCompletion co = {
2909 .coroutine = qemu_coroutine_self(),
2910 };
2911
2912 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2913 if (acb == NULL) {
2914 ret = -EIO;
2915 } else {
2916 qemu_coroutine_yield();
2917 ret = co.ret;
2918 }
2919 } else {
2920 /*
2921 * Some block drivers always operate in either writethrough or unsafe
2922 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2923 * know how the server works (because the behaviour is hardcoded or
2924 * depends on server-side configuration), so we can't ensure that
2925 * everything is safe on disk. Returning an error doesn't work because
2926 * that would break guests even if the server operates in writethrough
2927 * mode.
2928 *
2929 * Let's hope the user knows what he's doing.
2930 */
2931 ret = 0;
2932 }
3ff2f67a 2933
61007b31 2934 if (ret < 0) {
cdb5e315 2935 goto out;
61007b31
SH
2936 }
2937
2938 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2939 * in the case of cache=unsafe, so there are no useless flushes.
2940 */
883833e2
HR
2941flush_children:
2942 ret = 0;
2943 QLIST_FOREACH(child, &bs->children, next) {
2944 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
2945 int this_child_ret = bdrv_co_flush(child->bs);
2946 if (!ret) {
2947 ret = this_child_ret;
2948 }
2949 }
2950 }
2951
cdb5e315 2952out:
3ff2f67a 2953 /* Notify any pending flushes that we have completed */
e6af1e08
KW
2954 if (ret == 0) {
2955 bs->flushed_gen = current_gen;
2956 }
3783fa3d
PB
2957
2958 qemu_co_mutex_lock(&bs->reqs_lock);
99723548 2959 bs->active_flush_req = false;
156af3ac
DL
2960 /* Return value is ignored - it's ok if wait queue is empty */
2961 qemu_co_queue_next(&bs->flush_queue);
3783fa3d 2962 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2963
49ca6259 2964early_exit:
99723548 2965 bdrv_dec_in_flight(bs);
cdb5e315 2966 return ret;
61007b31
SH
2967}
2968
d93e5726
VSO
2969int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2970 int64_t bytes)
61007b31 2971{
b1066c87 2972 BdrvTrackedRequest req;
9f1963b3 2973 int max_pdiscard, ret;
3482b9bc 2974 int head, tail, align;
0b9fd3f4 2975 BlockDriverState *bs = child->bs;
61007b31 2976
d93e5726 2977 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
61007b31
SH
2978 return -ENOMEDIUM;
2979 }
2980
d6883bc9
VSO
2981 if (bdrv_has_readonly_bitmaps(bs)) {
2982 return -EPERM;
2983 }
2984
69b55e03 2985 ret = bdrv_check_request(offset, bytes, NULL);
8b117001
VSO
2986 if (ret < 0) {
2987 return ret;
61007b31
SH
2988 }
2989
61007b31
SH
2990 /* Do nothing if disabled. */
2991 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2992 return 0;
2993 }
2994
02aefe43 2995 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
61007b31
SH
2996 return 0;
2997 }
2998
3482b9bc
EB
2999 /* Discard is advisory, but some devices track and coalesce
3000 * unaligned requests, so we must pass everything down rather than
3001 * round here. Still, most devices will just silently ignore
3002 * unaligned requests (by returning -ENOTSUP), so we must fragment
3003 * the request accordingly. */
02aefe43 3004 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
b8d0a980
EB
3005 assert(align % bs->bl.request_alignment == 0);
3006 head = offset % align;
f5a5ca79 3007 tail = (offset + bytes) % align;
9f1963b3 3008
99723548 3009 bdrv_inc_in_flight(bs);
f5a5ca79 3010 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
50824995 3011
00695c27 3012 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
ec050f77
DL
3013 if (ret < 0) {
3014 goto out;
3015 }
3016
9f1963b3
EB
3017 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
3018 align);
3482b9bc 3019 assert(max_pdiscard >= bs->bl.request_alignment);
61007b31 3020
f5a5ca79 3021 while (bytes > 0) {
d93e5726 3022 int64_t num = bytes;
3482b9bc
EB
3023
3024 if (head) {
3025 /* Make small requests to get to alignment boundaries. */
f5a5ca79 3026 num = MIN(bytes, align - head);
3482b9bc
EB
3027 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3028 num %= bs->bl.request_alignment;
3029 }
3030 head = (head + num) % align;
3031 assert(num < max_pdiscard);
3032 } else if (tail) {
3033 if (num > align) {
3034 /* Shorten the request to the last aligned cluster. */
3035 num -= tail;
3036 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3037 tail > bs->bl.request_alignment) {
3038 tail %= bs->bl.request_alignment;
3039 num -= tail;
3040 }
3041 }
3042 /* limit request size */
3043 if (num > max_pdiscard) {
3044 num = max_pdiscard;
3045 }
61007b31 3046
d470ad42
HR
3047 if (!bs->drv) {
3048 ret = -ENOMEDIUM;
3049 goto out;
3050 }
47a5486d
EB
3051 if (bs->drv->bdrv_co_pdiscard) {
3052 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
61007b31
SH
3053 } else {
3054 BlockAIOCB *acb;
3055 CoroutineIOCompletion co = {
3056 .coroutine = qemu_coroutine_self(),
3057 };
3058
4da444a0
EB
3059 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3060 bdrv_co_io_em_complete, &co);
61007b31 3061 if (acb == NULL) {
b1066c87
FZ
3062 ret = -EIO;
3063 goto out;
61007b31
SH
3064 } else {
3065 qemu_coroutine_yield();
3066 ret = co.ret;
3067 }
3068 }
3069 if (ret && ret != -ENOTSUP) {
b1066c87 3070 goto out;
61007b31
SH
3071 }
3072
9f1963b3 3073 offset += num;
f5a5ca79 3074 bytes -= num;
61007b31 3075 }
b1066c87
FZ
3076 ret = 0;
3077out:
00695c27 3078 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
b1066c87 3079 tracked_request_end(&req);
99723548 3080 bdrv_dec_in_flight(bs);
b1066c87 3081 return ret;
61007b31
SH
3082}
3083
48af776a 3084int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
61007b31
SH
3085{
3086 BlockDriver *drv = bs->drv;
5c5ae76a
FZ
3087 CoroutineIOCompletion co = {
3088 .coroutine = qemu_coroutine_self(),
3089 };
3090 BlockAIOCB *acb;
61007b31 3091
99723548 3092 bdrv_inc_in_flight(bs);
16a389dc 3093 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
5c5ae76a
FZ
3094 co.ret = -ENOTSUP;
3095 goto out;
3096 }
3097
16a389dc
KW
3098 if (drv->bdrv_co_ioctl) {
3099 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3100 } else {
3101 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3102 if (!acb) {
3103 co.ret = -ENOTSUP;
3104 goto out;
3105 }
3106 qemu_coroutine_yield();
5c5ae76a 3107 }
5c5ae76a 3108out:
99723548 3109 bdrv_dec_in_flight(bs);
5c5ae76a
FZ
3110 return co.ret;
3111}
3112
61007b31
SH
3113void *qemu_blockalign(BlockDriverState *bs, size_t size)
3114{
3115 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3116}
3117
3118void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3119{
3120 return memset(qemu_blockalign(bs, size), 0, size);
3121}
3122
3123void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3124{
3125 size_t align = bdrv_opt_mem_align(bs);
3126
3127 /* Ensure that NULL is never returned on success */
3128 assert(align > 0);
3129 if (size == 0) {
3130 size = align;
3131 }
3132
3133 return qemu_try_memalign(align, size);
3134}
3135
3136void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3137{
3138 void *mem = qemu_try_blockalign(bs, size);
3139
3140 if (mem) {
3141 memset(mem, 0, size);
3142 }
3143
3144 return mem;
3145}
3146
3147/*
3148 * Check if all memory in this vector is sector aligned.
3149 */
3150bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
3151{
3152 int i;
4196d2f0 3153 size_t alignment = bdrv_min_mem_align(bs);
61007b31
SH
3154
3155 for (i = 0; i < qiov->niov; i++) {
3156 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
3157 return false;
3158 }
3159 if (qiov->iov[i].iov_len % alignment) {
3160 return false;
3161 }
3162 }
3163
3164 return true;
3165}
3166
3167void bdrv_add_before_write_notifier(BlockDriverState *bs,
3168 NotifierWithReturn *notifier)
3169{
3170 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
3171}
3172
3173void bdrv_io_plug(BlockDriverState *bs)
3174{
6b98bd64
PB
3175 BdrvChild *child;
3176
3177 QLIST_FOREACH(child, &bs->children, next) {
3178 bdrv_io_plug(child->bs);
3179 }
3180
d73415a3 3181 if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
6b98bd64
PB
3182 BlockDriver *drv = bs->drv;
3183 if (drv && drv->bdrv_io_plug) {
3184 drv->bdrv_io_plug(bs);
3185 }
61007b31
SH
3186 }
3187}
3188
3189void bdrv_io_unplug(BlockDriverState *bs)
3190{
6b98bd64
PB
3191 BdrvChild *child;
3192
3193 assert(bs->io_plugged);
d73415a3 3194 if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
6b98bd64
PB
3195 BlockDriver *drv = bs->drv;
3196 if (drv && drv->bdrv_io_unplug) {
3197 drv->bdrv_io_unplug(bs);
3198 }
3199 }
3200
3201 QLIST_FOREACH(child, &bs->children, next) {
3202 bdrv_io_unplug(child->bs);
61007b31
SH
3203 }
3204}
23d0ba93
FZ
3205
3206void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
3207{
3208 BdrvChild *child;
3209
3210 if (bs->drv && bs->drv->bdrv_register_buf) {
3211 bs->drv->bdrv_register_buf(bs, host, size);
3212 }
3213 QLIST_FOREACH(child, &bs->children, next) {
3214 bdrv_register_buf(child->bs, host, size);
3215 }
3216}
3217
3218void bdrv_unregister_buf(BlockDriverState *bs, void *host)
3219{
3220 BdrvChild *child;
3221
3222 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3223 bs->drv->bdrv_unregister_buf(bs, host);
3224 }
3225 QLIST_FOREACH(child, &bs->children, next) {
3226 bdrv_unregister_buf(child->bs, host);
3227 }
3228}
fcc67678 3229
67b51fb9 3230static int coroutine_fn bdrv_co_copy_range_internal(
a5215b8f
VSO
3231 BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3232 int64_t dst_offset, int64_t bytes,
67b51fb9
VSO
3233 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3234 bool recurse_src)
fcc67678 3235{
999658a0 3236 BdrvTrackedRequest req;
fcc67678
FZ
3237 int ret;
3238
fe0480d6
KW
3239 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3240 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3241 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3242
f4dad307 3243 if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
fcc67678
FZ
3244 return -ENOMEDIUM;
3245 }
63f4ad11 3246 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
fcc67678
FZ
3247 if (ret) {
3248 return ret;
3249 }
67b51fb9
VSO
3250 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3251 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
fcc67678
FZ
3252 }
3253
f4dad307 3254 if (!src || !src->bs || !bdrv_is_inserted(src->bs)) {
d4d3e5a0
FZ
3255 return -ENOMEDIUM;
3256 }
63f4ad11 3257 ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
d4d3e5a0
FZ
3258 if (ret) {
3259 return ret;
3260 }
3261
fcc67678
FZ
3262 if (!src->bs->drv->bdrv_co_copy_range_from
3263 || !dst->bs->drv->bdrv_co_copy_range_to
3264 || src->bs->encrypted || dst->bs->encrypted) {
3265 return -ENOTSUP;
3266 }
37aec7d7 3267
fcc67678 3268 if (recurse_src) {
999658a0
VSO
3269 bdrv_inc_in_flight(src->bs);
3270 tracked_request_begin(&req, src->bs, src_offset, bytes,
3271 BDRV_TRACKED_READ);
3272
09d2f948
VSO
3273 /* BDRV_REQ_SERIALISING is only for write operation */
3274 assert(!(read_flags & BDRV_REQ_SERIALISING));
c53cb427 3275 bdrv_wait_serialising_requests(&req);
999658a0 3276
37aec7d7
FZ
3277 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3278 src, src_offset,
3279 dst, dst_offset,
67b51fb9
VSO
3280 bytes,
3281 read_flags, write_flags);
999658a0
VSO
3282
3283 tracked_request_end(&req);
3284 bdrv_dec_in_flight(src->bs);
fcc67678 3285 } else {
999658a0
VSO
3286 bdrv_inc_in_flight(dst->bs);
3287 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3288 BDRV_TRACKED_WRITE);
0eb1e891
FZ
3289 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3290 write_flags);
3291 if (!ret) {
3292 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3293 src, src_offset,
3294 dst, dst_offset,
3295 bytes,
3296 read_flags, write_flags);
3297 }
3298 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
999658a0
VSO
3299 tracked_request_end(&req);
3300 bdrv_dec_in_flight(dst->bs);
fcc67678 3301 }
999658a0 3302
37aec7d7 3303 return ret;
fcc67678
FZ
3304}
3305
3306/* Copy range from @src to @dst.
3307 *
3308 * See the comment of bdrv_co_copy_range for the parameter and return value
3309 * semantics. */
a5215b8f
VSO
3310int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3311 BdrvChild *dst, int64_t dst_offset,
3312 int64_t bytes,
67b51fb9
VSO
3313 BdrvRequestFlags read_flags,
3314 BdrvRequestFlags write_flags)
fcc67678 3315{
ecc983a5
FZ
3316 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3317 read_flags, write_flags);
fcc67678 3318 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3319 bytes, read_flags, write_flags, true);
fcc67678
FZ
3320}
3321
3322/* Copy range from @src to @dst.
3323 *
3324 * See the comment of bdrv_co_copy_range for the parameter and return value
3325 * semantics. */
a5215b8f
VSO
3326int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3327 BdrvChild *dst, int64_t dst_offset,
3328 int64_t bytes,
67b51fb9
VSO
3329 BdrvRequestFlags read_flags,
3330 BdrvRequestFlags write_flags)
fcc67678 3331{
ecc983a5
FZ
3332 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3333 read_flags, write_flags);
fcc67678 3334 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3335 bytes, read_flags, write_flags, false);
fcc67678
FZ
3336}
3337
a5215b8f
VSO
3338int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3339 BdrvChild *dst, int64_t dst_offset,
3340 int64_t bytes, BdrvRequestFlags read_flags,
67b51fb9 3341 BdrvRequestFlags write_flags)
fcc67678 3342{
37aec7d7
FZ
3343 return bdrv_co_copy_range_from(src, src_offset,
3344 dst, dst_offset,
67b51fb9 3345 bytes, read_flags, write_flags);
fcc67678 3346}
3d9f2d2a
KW
3347
3348static void bdrv_parent_cb_resize(BlockDriverState *bs)
3349{
3350 BdrvChild *c;
3351 QLIST_FOREACH(c, &bs->parents, next_parent) {
bd86fb99
HR
3352 if (c->klass->resize) {
3353 c->klass->resize(c);
3d9f2d2a
KW
3354 }
3355 }
3356}
3357
3358/**
3359 * Truncate file to 'offset' bytes (needed only for file protocols)
c80d8b06
HR
3360 *
3361 * If 'exact' is true, the file must be resized to exactly the given
3362 * 'offset'. Otherwise, it is sufficient for the node to be at least
3363 * 'offset' bytes in length.
3d9f2d2a 3364 */
c80d8b06 3365int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
7b8e4857
KW
3366 PreallocMode prealloc, BdrvRequestFlags flags,
3367 Error **errp)
3d9f2d2a
KW
3368{
3369 BlockDriverState *bs = child->bs;
23b93525 3370 BdrvChild *filtered, *backing;
3d9f2d2a 3371 BlockDriver *drv = bs->drv;
1bc5f09f
KW
3372 BdrvTrackedRequest req;
3373 int64_t old_size, new_bytes;
3d9f2d2a
KW
3374 int ret;
3375
3d9f2d2a
KW
3376
3377 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3378 if (!drv) {
3379 error_setg(errp, "No medium inserted");
3380 return -ENOMEDIUM;
3381 }
3382 if (offset < 0) {
3383 error_setg(errp, "Image size cannot be negative");
3384 return -EINVAL;
3385 }
3386
69b55e03 3387 ret = bdrv_check_request(offset, 0, errp);
8b117001 3388 if (ret < 0) {
8b117001
VSO
3389 return ret;
3390 }
3391
1bc5f09f
KW
3392 old_size = bdrv_getlength(bs);
3393 if (old_size < 0) {
3394 error_setg_errno(errp, -old_size, "Failed to get old image size");
3395 return old_size;
3396 }
3397
3398 if (offset > old_size) {
3399 new_bytes = offset - old_size;
3400 } else {
3401 new_bytes = 0;
3402 }
3403
3d9f2d2a 3404 bdrv_inc_in_flight(bs);
5416a11e
FZ
3405 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3406 BDRV_TRACKED_TRUNCATE);
1bc5f09f
KW
3407
3408 /* If we are growing the image and potentially using preallocation for the
3409 * new area, we need to make sure that no write requests are made to it
3410 * concurrently or they might be overwritten by preallocation. */
3411 if (new_bytes) {
8ac5aab2 3412 bdrv_make_request_serialising(&req, 1);
cd47d792
FZ
3413 }
3414 if (bs->read_only) {
3415 error_setg(errp, "Image is read-only");
3416 ret = -EACCES;
3417 goto out;
3418 }
3419 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3420 0);
3421 if (ret < 0) {
3422 error_setg_errno(errp, -ret,
3423 "Failed to prepare request for truncation");
3424 goto out;
1bc5f09f 3425 }
3d9f2d2a 3426
93393e69 3427 filtered = bdrv_filter_child(bs);
23b93525 3428 backing = bdrv_cow_child(bs);
93393e69 3429
955c7d66
KW
3430 /*
3431 * If the image has a backing file that is large enough that it would
3432 * provide data for the new area, we cannot leave it unallocated because
3433 * then the backing file content would become visible. Instead, zero-fill
3434 * the new area.
3435 *
3436 * Note that if the image has a backing file, but was opened without the
3437 * backing file, taking care of keeping things consistent with that backing
3438 * file is the user's responsibility.
3439 */
23b93525 3440 if (new_bytes && backing) {
955c7d66
KW
3441 int64_t backing_len;
3442
23b93525 3443 backing_len = bdrv_getlength(backing->bs);
955c7d66
KW
3444 if (backing_len < 0) {
3445 ret = backing_len;
3446 error_setg_errno(errp, -ret, "Could not get backing file size");
3447 goto out;
3448 }
3449
3450 if (backing_len > old_size) {
3451 flags |= BDRV_REQ_ZERO_WRITE;
3452 }
3453 }
3454
6b7e8f8b 3455 if (drv->bdrv_co_truncate) {
92b92799
KW
3456 if (flags & ~bs->supported_truncate_flags) {
3457 error_setg(errp, "Block driver does not support requested flags");
3458 ret = -ENOTSUP;
3459 goto out;
3460 }
3461 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
93393e69
HR
3462 } else if (filtered) {
3463 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
6b7e8f8b 3464 } else {
3d9f2d2a
KW
3465 error_setg(errp, "Image format driver does not support resize");
3466 ret = -ENOTSUP;
3467 goto out;
3468 }
3d9f2d2a
KW
3469 if (ret < 0) {
3470 goto out;
3471 }
6b7e8f8b 3472
3d9f2d2a
KW
3473 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3474 if (ret < 0) {
3475 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3476 } else {
3477 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3478 }
cd47d792
FZ
3479 /* It's possible that truncation succeeded but refresh_total_sectors
3480 * failed, but the latter doesn't affect how we should finish the request.
3481 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3482 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3d9f2d2a
KW
3483
3484out:
1bc5f09f 3485 tracked_request_end(&req);
3d9f2d2a 3486 bdrv_dec_in_flight(bs);
1bc5f09f 3487
3d9f2d2a
KW
3488 return ret;
3489}
bd54669a
VSO
3490
3491void bdrv_cancel_in_flight(BlockDriverState *bs)
3492{
3493 if (!bs || !bs->drv) {
3494 return;
3495 }
3496
3497 if (bs->drv->bdrv_cancel_in_flight) {
3498 bs->drv->bdrv_cancel_in_flight(bs);
3499 }
3500}