2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "block/throttle-groups.h"
31 #include "qemu/cutils.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
35 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
37 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
41 BdrvRequestFlags flags
,
42 BlockCompletionFunc
*cb
,
45 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
46 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
47 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
49 /* throttling disk I/O limits */
50 void bdrv_set_io_limits(BlockDriverState
*bs
,
53 throttle_group_config(bs
, cfg
);
56 void bdrv_no_throttling_begin(BlockDriverState
*bs
)
62 if (blk_get_public(bs
->blk
)->io_limits_disabled
++ == 0) {
63 throttle_group_restart_blk(bs
->blk
);
67 void bdrv_no_throttling_end(BlockDriverState
*bs
)
69 BlockBackendPublic
*blkp
;
75 blkp
= blk_get_public(bs
->blk
);
76 assert(blkp
->io_limits_disabled
);
77 --blkp
->io_limits_disabled
;
80 void bdrv_io_limits_disable(BlockDriverState
*bs
)
82 assert(blk_get_public(bs
->blk
)->throttle_state
);
83 bdrv_no_throttling_begin(bs
);
84 throttle_group_unregister_blk(bs
->blk
);
85 bdrv_no_throttling_end(bs
);
88 /* should be called before bdrv_set_io_limits if a limit is set */
89 void bdrv_io_limits_enable(BlockDriverState
*bs
, const char *group
)
91 BlockBackendPublic
*blkp
= blk_get_public(bs
->blk
);
93 assert(!blkp
->throttle_state
);
94 throttle_group_register_blk(bs
->blk
, group
);
97 void bdrv_io_limits_update_group(BlockDriverState
*bs
, const char *group
)
99 /* this bs is not part of any group */
100 if (!blk_get_public(bs
->blk
)->throttle_state
) {
104 /* this bs is a part of the same group than the one we want */
105 if (!g_strcmp0(throttle_group_get_name(bs
->blk
), group
)) {
109 /* need to change the group this bs belong to */
110 bdrv_io_limits_disable(bs
);
111 bdrv_io_limits_enable(bs
, group
);
114 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
116 BlockDriver
*drv
= bs
->drv
;
117 Error
*local_err
= NULL
;
119 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
125 /* Take some limits from the children as a default */
127 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
129 error_propagate(errp
, local_err
);
132 bs
->bl
.opt_transfer_length
= bs
->file
->bs
->bl
.opt_transfer_length
;
133 bs
->bl
.max_transfer_length
= bs
->file
->bs
->bl
.max_transfer_length
;
134 bs
->bl
.min_mem_alignment
= bs
->file
->bs
->bl
.min_mem_alignment
;
135 bs
->bl
.opt_mem_alignment
= bs
->file
->bs
->bl
.opt_mem_alignment
;
136 bs
->bl
.max_iov
= bs
->file
->bs
->bl
.max_iov
;
138 bs
->bl
.min_mem_alignment
= 512;
139 bs
->bl
.opt_mem_alignment
= getpagesize();
141 /* Safe default since most protocols use readv()/writev()/etc */
142 bs
->bl
.max_iov
= IOV_MAX
;
146 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
148 error_propagate(errp
, local_err
);
151 bs
->bl
.opt_transfer_length
=
152 MAX(bs
->bl
.opt_transfer_length
,
153 bs
->backing
->bs
->bl
.opt_transfer_length
);
154 bs
->bl
.max_transfer_length
=
155 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
156 bs
->backing
->bs
->bl
.max_transfer_length
);
157 bs
->bl
.opt_mem_alignment
=
158 MAX(bs
->bl
.opt_mem_alignment
,
159 bs
->backing
->bs
->bl
.opt_mem_alignment
);
160 bs
->bl
.min_mem_alignment
=
161 MAX(bs
->bl
.min_mem_alignment
,
162 bs
->backing
->bs
->bl
.min_mem_alignment
);
165 bs
->backing
->bs
->bl
.max_iov
);
168 /* Then let the driver override it */
169 if (drv
->bdrv_refresh_limits
) {
170 drv
->bdrv_refresh_limits(bs
, errp
);
175 * The copy-on-read flag is actually a reference count so multiple users may
176 * use the feature without worrying about clobbering its previous state.
177 * Copy-on-read stays enabled until all users have called to disable it.
179 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
184 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
186 assert(bs
->copy_on_read
> 0);
190 /* Check if any requests are in-flight (including throttled requests) */
191 bool bdrv_requests_pending(BlockDriverState
*bs
)
194 BlockBackendPublic
*blkp
= bs
->blk
? blk_get_public(bs
->blk
) : NULL
;
196 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
199 if (blkp
&& !qemu_co_queue_empty(&blkp
->throttled_reqs
[0])) {
202 if (blkp
&& !qemu_co_queue_empty(&blkp
->throttled_reqs
[1])) {
206 QLIST_FOREACH(child
, &bs
->children
, next
) {
207 if (bdrv_requests_pending(child
->bs
)) {
215 static void bdrv_drain_recurse(BlockDriverState
*bs
)
219 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
220 bs
->drv
->bdrv_drain(bs
);
222 QLIST_FOREACH(child
, &bs
->children
, next
) {
223 bdrv_drain_recurse(child
->bs
);
229 BlockDriverState
*bs
;
234 static void bdrv_drain_poll(BlockDriverState
*bs
)
240 busy
= bdrv_requests_pending(bs
);
241 busy
|= aio_poll(bdrv_get_aio_context(bs
), busy
);
245 static void bdrv_co_drain_bh_cb(void *opaque
)
247 BdrvCoDrainData
*data
= opaque
;
248 Coroutine
*co
= data
->co
;
250 qemu_bh_delete(data
->bh
);
251 bdrv_drain_poll(data
->bs
);
253 qemu_coroutine_enter(co
, NULL
);
256 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
)
258 BdrvCoDrainData data
;
260 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
261 * other coroutines run if they were queued from
262 * qemu_co_queue_run_restart(). */
264 assert(qemu_in_coroutine());
265 data
= (BdrvCoDrainData
) {
266 .co
= qemu_coroutine_self(),
269 .bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_drain_bh_cb
, &data
),
271 qemu_bh_schedule(data
.bh
);
273 qemu_coroutine_yield();
274 /* If we are resumed from some other event (such as an aio completion or a
275 * timer callback), it is a bug in the caller that should be fixed. */
280 * Wait for pending requests to complete on a single BlockDriverState subtree,
281 * and suspend block driver's internal I/O until next request arrives.
283 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
286 * Only this BlockDriverState's AioContext is run, so in-flight requests must
287 * not depend on events in other AioContexts. In that case, use
288 * bdrv_drain_all() instead.
290 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
292 bdrv_no_throttling_begin(bs
);
293 bdrv_io_unplugged_begin(bs
);
294 bdrv_drain_recurse(bs
);
295 bdrv_co_yield_to_drain(bs
);
296 bdrv_io_unplugged_end(bs
);
297 bdrv_no_throttling_end(bs
);
300 void bdrv_drain(BlockDriverState
*bs
)
302 bdrv_no_throttling_begin(bs
);
303 bdrv_io_unplugged_begin(bs
);
304 bdrv_drain_recurse(bs
);
305 if (qemu_in_coroutine()) {
306 bdrv_co_yield_to_drain(bs
);
310 bdrv_io_unplugged_end(bs
);
311 bdrv_no_throttling_end(bs
);
315 * Wait for pending requests to complete across all BlockDriverStates
317 * This function does not flush data to disk, use bdrv_flush_all() for that
318 * after calling this function.
320 void bdrv_drain_all(void)
322 /* Always run first iteration so any pending completion BHs run */
324 BlockDriverState
*bs
= NULL
;
325 GSList
*aio_ctxs
= NULL
, *ctx
;
327 while ((bs
= bdrv_next(bs
))) {
328 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
330 aio_context_acquire(aio_context
);
332 block_job_pause(bs
->job
);
334 bdrv_no_throttling_begin(bs
);
335 bdrv_io_unplugged_begin(bs
);
336 bdrv_drain_recurse(bs
);
337 aio_context_release(aio_context
);
339 if (!g_slist_find(aio_ctxs
, aio_context
)) {
340 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
344 /* Note that completion of an asynchronous I/O operation can trigger any
345 * number of other I/O operations on other devices---for example a
346 * coroutine can submit an I/O request to another device in response to
347 * request completion. Therefore we must keep looping until there was no
348 * more activity rather than simply draining each device independently.
353 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
354 AioContext
*aio_context
= ctx
->data
;
357 aio_context_acquire(aio_context
);
358 while ((bs
= bdrv_next(bs
))) {
359 if (aio_context
== bdrv_get_aio_context(bs
)) {
360 if (bdrv_requests_pending(bs
)) {
362 aio_poll(aio_context
, busy
);
366 busy
|= aio_poll(aio_context
, false);
367 aio_context_release(aio_context
);
372 while ((bs
= bdrv_next(bs
))) {
373 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
375 aio_context_acquire(aio_context
);
376 bdrv_io_unplugged_end(bs
);
377 bdrv_no_throttling_end(bs
);
379 block_job_resume(bs
->job
);
381 aio_context_release(aio_context
);
383 g_slist_free(aio_ctxs
);
387 * Remove an active request from the tracked requests list
389 * This function should be called when a tracked request is completing.
391 static void tracked_request_end(BdrvTrackedRequest
*req
)
393 if (req
->serialising
) {
394 req
->bs
->serialising_in_flight
--;
397 QLIST_REMOVE(req
, list
);
398 qemu_co_queue_restart_all(&req
->wait_queue
);
402 * Add an active request to the tracked requests list
404 static void tracked_request_begin(BdrvTrackedRequest
*req
,
405 BlockDriverState
*bs
,
408 enum BdrvTrackedRequestType type
)
410 *req
= (BdrvTrackedRequest
){
415 .co
= qemu_coroutine_self(),
416 .serialising
= false,
417 .overlap_offset
= offset
,
418 .overlap_bytes
= bytes
,
421 qemu_co_queue_init(&req
->wait_queue
);
423 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
426 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
428 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
429 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
432 if (!req
->serialising
) {
433 req
->bs
->serialising_in_flight
++;
434 req
->serialising
= true;
437 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
438 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
442 * Round a region to cluster boundaries
444 void bdrv_round_to_clusters(BlockDriverState
*bs
,
445 int64_t sector_num
, int nb_sectors
,
446 int64_t *cluster_sector_num
,
447 int *cluster_nb_sectors
)
451 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
452 *cluster_sector_num
= sector_num
;
453 *cluster_nb_sectors
= nb_sectors
;
455 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
456 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
457 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
462 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
467 ret
= bdrv_get_info(bs
, &bdi
);
468 if (ret
< 0 || bdi
.cluster_size
== 0) {
469 return bs
->request_alignment
;
471 return bdi
.cluster_size
;
475 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
476 int64_t offset
, unsigned int bytes
)
479 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
483 if (req
->overlap_offset
>= offset
+ bytes
) {
489 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
491 BlockDriverState
*bs
= self
->bs
;
492 BdrvTrackedRequest
*req
;
496 if (!bs
->serialising_in_flight
) {
502 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
503 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
506 if (tracked_request_overlaps(req
, self
->overlap_offset
,
507 self
->overlap_bytes
))
509 /* Hitting this means there was a reentrant request, for
510 * example, a block driver issuing nested requests. This must
511 * never happen since it means deadlock.
513 assert(qemu_coroutine_self() != req
->co
);
515 /* If the request is already (indirectly) waiting for us, or
516 * will wait for us as soon as it wakes up, then just go on
517 * (instead of producing a deadlock in the former case). */
518 if (!req
->waiting_for
) {
519 self
->waiting_for
= req
;
520 qemu_co_queue_wait(&req
->wait_queue
);
521 self
->waiting_for
= NULL
;
533 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
536 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
540 if (!bdrv_is_inserted(bs
)) {
551 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
554 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
558 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
559 nb_sectors
* BDRV_SECTOR_SIZE
);
562 typedef struct RwCo
{
563 BlockDriverState
*bs
;
568 BdrvRequestFlags flags
;
571 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
575 if (!rwco
->is_write
) {
576 rwco
->ret
= bdrv_co_preadv(rwco
->bs
, rwco
->offset
,
577 rwco
->qiov
->size
, rwco
->qiov
,
580 rwco
->ret
= bdrv_co_pwritev(rwco
->bs
, rwco
->offset
,
581 rwco
->qiov
->size
, rwco
->qiov
,
587 * Process a vectored synchronous request using coroutines
589 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
590 QEMUIOVector
*qiov
, bool is_write
,
591 BdrvRequestFlags flags
)
598 .is_write
= is_write
,
603 if (qemu_in_coroutine()) {
604 /* Fast-path if already in coroutine context */
605 bdrv_rw_co_entry(&rwco
);
607 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
609 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
610 qemu_coroutine_enter(co
, &rwco
);
611 while (rwco
.ret
== NOT_DONE
) {
612 aio_poll(aio_context
, true);
619 * Process a synchronous request using coroutines
621 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
622 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
626 .iov_base
= (void *)buf
,
627 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
630 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
634 qemu_iovec_init_external(&qiov
, &iov
, 1);
635 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
636 &qiov
, is_write
, flags
);
639 /* return < 0 if error. See bdrv_write() for the return codes */
640 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
641 uint8_t *buf
, int nb_sectors
)
643 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
646 /* Return < 0 if error. Important errors are:
647 -EIO generic I/O error (may happen for all errors)
648 -ENOMEDIUM No media inserted.
649 -EINVAL Invalid sector number or nb_sectors
650 -EACCES Trying to write a read-only device
652 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
653 const uint8_t *buf
, int nb_sectors
)
655 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
658 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
659 int nb_sectors
, BdrvRequestFlags flags
)
661 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
662 BDRV_REQ_ZERO_WRITE
| flags
);
666 * Completely zero out a block device with the help of bdrv_write_zeroes.
667 * The operation is sped up by checking the block status and only writing
668 * zeroes to the device if they currently do not return zeroes. Optional
669 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
672 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
674 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
676 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
677 BlockDriverState
*file
;
680 target_sectors
= bdrv_nb_sectors(bs
);
681 if (target_sectors
< 0) {
682 return target_sectors
;
686 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
687 if (nb_sectors
<= 0) {
690 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, &file
);
692 error_report("error getting block status at sector %" PRId64
": %s",
693 sector_num
, strerror(-ret
));
696 if (ret
& BDRV_BLOCK_ZERO
) {
700 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
702 error_report("error writing zeroes at sector %" PRId64
": %s",
703 sector_num
, strerror(-ret
));
710 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
714 .iov_base
= (void *)buf
,
723 qemu_iovec_init_external(&qiov
, &iov
, 1);
724 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
732 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
736 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
744 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
745 const void *buf
, int bytes
)
749 .iov_base
= (void *) buf
,
757 qemu_iovec_init_external(&qiov
, &iov
, 1);
758 return bdrv_pwritev(bs
, offset
, &qiov
);
762 * Writes to the file and ensures that no writes are reordered across this
763 * request (acts as a barrier)
765 * Returns 0 on success, -errno in error cases.
767 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
768 const void *buf
, int count
)
772 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
777 ret
= bdrv_flush(bs
);
785 typedef struct CoroutineIOCompletion
{
786 Coroutine
*coroutine
;
788 } CoroutineIOCompletion
;
790 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
792 CoroutineIOCompletion
*co
= opaque
;
795 qemu_coroutine_enter(co
->coroutine
, NULL
);
798 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
799 uint64_t offset
, uint64_t bytes
,
800 QEMUIOVector
*qiov
, int flags
)
802 BlockDriver
*drv
= bs
->drv
;
804 unsigned int nb_sectors
;
806 if (drv
->bdrv_co_preadv
) {
807 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
810 sector_num
= offset
>> BDRV_SECTOR_BITS
;
811 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
813 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
814 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
815 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
817 if (drv
->bdrv_co_readv
) {
818 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
821 CoroutineIOCompletion co
= {
822 .coroutine
= qemu_coroutine_self(),
825 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
826 bdrv_co_io_em_complete
, &co
);
830 qemu_coroutine_yield();
836 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
837 uint64_t offset
, uint64_t bytes
,
838 QEMUIOVector
*qiov
, int flags
)
840 BlockDriver
*drv
= bs
->drv
;
842 unsigned int nb_sectors
;
845 if (drv
->bdrv_co_pwritev
) {
846 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
, flags
);
850 sector_num
= offset
>> BDRV_SECTOR_BITS
;
851 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
853 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
854 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
855 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
857 if (drv
->bdrv_co_writev_flags
) {
858 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
859 flags
& bs
->supported_write_flags
);
860 flags
&= ~bs
->supported_write_flags
;
861 } else if (drv
->bdrv_co_writev
) {
862 assert(!bs
->supported_write_flags
);
863 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
866 CoroutineIOCompletion co
= {
867 .coroutine
= qemu_coroutine_self(),
870 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
871 bdrv_co_io_em_complete
, &co
);
875 qemu_coroutine_yield();
881 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
882 ret
= bdrv_co_flush(bs
);
888 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
889 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
891 /* Perform I/O through a temporary buffer so that users who scribble over
892 * their read buffer while the operation is in progress do not end up
893 * modifying the image file. This is critical for zero-copy guest I/O
894 * where anything might happen inside guest memory.
898 BlockDriver
*drv
= bs
->drv
;
900 QEMUIOVector bounce_qiov
;
901 int64_t cluster_sector_num
;
902 int cluster_nb_sectors
;
906 /* Cover entire cluster so no additional backing file I/O is required when
907 * allocating cluster in the image file.
909 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
910 &cluster_sector_num
, &cluster_nb_sectors
);
912 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
913 cluster_sector_num
, cluster_nb_sectors
);
915 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
916 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
917 if (bounce_buffer
== NULL
) {
922 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
924 ret
= bdrv_driver_preadv(bs
, cluster_sector_num
* BDRV_SECTOR_SIZE
,
925 cluster_nb_sectors
* BDRV_SECTOR_SIZE
,
931 if (drv
->bdrv_co_write_zeroes
&&
932 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
933 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
934 cluster_nb_sectors
, 0);
936 /* This does not change the data on the disk, it is not necessary
937 * to flush even in cache=writethrough mode.
939 ret
= bdrv_driver_pwritev(bs
, cluster_sector_num
* BDRV_SECTOR_SIZE
,
940 cluster_nb_sectors
* BDRV_SECTOR_SIZE
,
945 /* It might be okay to ignore write errors for guest requests. If this
946 * is a deliberate copy-on-read then we don't want to ignore the error.
947 * Simply report it in all cases.
952 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
953 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
954 nb_sectors
* BDRV_SECTOR_SIZE
);
957 qemu_vfree(bounce_buffer
);
962 * Forwards an already correctly aligned request to the BlockDriver. This
963 * handles copy on read and zeroing after EOF; any other features must be
964 * implemented by the caller.
966 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
967 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
968 int64_t align
, QEMUIOVector
*qiov
, int flags
)
972 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
973 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
975 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
976 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
977 assert(!qiov
|| bytes
== qiov
->size
);
978 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
980 /* Handle Copy on Read and associated serialisation */
981 if (flags
& BDRV_REQ_COPY_ON_READ
) {
982 /* If we touch the same cluster it counts as an overlap. This
983 * guarantees that allocating writes will be serialized and not race
984 * with each other for the same cluster. For example, in copy-on-read
985 * it ensures that the CoR read and write operations are atomic and
986 * guest writes cannot interleave between them. */
987 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
990 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
991 wait_serialising_requests(req
);
994 if (flags
& BDRV_REQ_COPY_ON_READ
) {
997 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
1002 if (!ret
|| pnum
!= nb_sectors
) {
1003 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
1008 /* Forward the request to the BlockDriver */
1009 if (!bs
->zero_beyond_eof
) {
1010 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1012 /* Read zeros after EOF */
1013 int64_t total_sectors
, max_nb_sectors
;
1015 total_sectors
= bdrv_nb_sectors(bs
);
1016 if (total_sectors
< 0) {
1017 ret
= total_sectors
;
1021 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
1022 align
>> BDRV_SECTOR_BITS
);
1023 if (nb_sectors
< max_nb_sectors
) {
1024 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1025 } else if (max_nb_sectors
> 0) {
1026 QEMUIOVector local_qiov
;
1028 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1029 qemu_iovec_concat(&local_qiov
, qiov
, 0,
1030 max_nb_sectors
* BDRV_SECTOR_SIZE
);
1032 ret
= bdrv_driver_preadv(bs
, offset
,
1033 max_nb_sectors
* BDRV_SECTOR_SIZE
,
1036 qemu_iovec_destroy(&local_qiov
);
1041 /* Reading beyond end of file is supposed to produce zeroes */
1042 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
1043 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
1044 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
1046 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
1055 * Handle a read request in coroutine context
1057 int coroutine_fn
bdrv_co_preadv(BlockDriverState
*bs
,
1058 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1059 BdrvRequestFlags flags
)
1061 BlockDriver
*drv
= bs
->drv
;
1062 BdrvTrackedRequest req
;
1064 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1065 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1066 uint8_t *head_buf
= NULL
;
1067 uint8_t *tail_buf
= NULL
;
1068 QEMUIOVector local_qiov
;
1069 bool use_local_qiov
= false;
1076 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1081 /* Don't do copy-on-read if we read data before write operation */
1082 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1083 flags
|= BDRV_REQ_COPY_ON_READ
;
1086 /* throttling disk I/O */
1087 if (bs
->blk
&& blk_get_public(bs
->blk
)->throttle_state
) {
1088 throttle_group_co_io_limits_intercept(bs
, bytes
, false);
1091 /* Align read if necessary by padding qiov */
1092 if (offset
& (align
- 1)) {
1093 head_buf
= qemu_blockalign(bs
, align
);
1094 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1095 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1096 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1097 use_local_qiov
= true;
1099 bytes
+= offset
& (align
- 1);
1100 offset
= offset
& ~(align
- 1);
1103 if ((offset
+ bytes
) & (align
- 1)) {
1104 if (!use_local_qiov
) {
1105 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1106 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1107 use_local_qiov
= true;
1109 tail_buf
= qemu_blockalign(bs
, align
);
1110 qemu_iovec_add(&local_qiov
, tail_buf
,
1111 align
- ((offset
+ bytes
) & (align
- 1)));
1113 bytes
= ROUND_UP(bytes
, align
);
1116 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1117 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1118 use_local_qiov
? &local_qiov
: qiov
,
1120 tracked_request_end(&req
);
1122 if (use_local_qiov
) {
1123 qemu_iovec_destroy(&local_qiov
);
1124 qemu_vfree(head_buf
);
1125 qemu_vfree(tail_buf
);
1131 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
1132 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1133 BdrvRequestFlags flags
)
1135 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1139 return bdrv_co_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1140 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1143 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
1144 int nb_sectors
, QEMUIOVector
*qiov
)
1146 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1148 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1151 int coroutine_fn
bdrv_co_readv_no_serialising(BlockDriverState
*bs
,
1152 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1154 trace_bdrv_co_readv_no_serialising(bs
, sector_num
, nb_sectors
);
1156 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1157 BDRV_REQ_NO_SERIALISING
);
1160 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
1161 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1163 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1165 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1166 BDRV_REQ_COPY_ON_READ
);
1169 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1171 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1172 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
1174 BlockDriver
*drv
= bs
->drv
;
1176 struct iovec iov
= {0};
1178 bool need_flush
= false;
1180 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_write_zeroes
,
1181 BDRV_REQUEST_MAX_SECTORS
);
1183 while (nb_sectors
> 0 && !ret
) {
1184 int num
= nb_sectors
;
1186 /* Align request. Block drivers can expect the "bulk" of the request
1189 if (bs
->bl
.write_zeroes_alignment
1190 && num
> bs
->bl
.write_zeroes_alignment
) {
1191 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
1192 /* Make a small request up to the first aligned sector. */
1193 num
= bs
->bl
.write_zeroes_alignment
;
1194 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
1195 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
1196 /* Shorten the request to the last aligned sector. num cannot
1197 * underflow because num > bs->bl.write_zeroes_alignment.
1199 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
1203 /* limit request size */
1204 if (num
> max_write_zeroes
) {
1205 num
= max_write_zeroes
;
1209 /* First try the efficient write zeroes operation */
1210 if (drv
->bdrv_co_write_zeroes
) {
1211 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
,
1212 flags
& bs
->supported_zero_flags
);
1213 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1214 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1218 assert(!bs
->supported_zero_flags
);
1221 if (ret
== -ENOTSUP
) {
1222 /* Fall back to bounce buffer if write zeroes is unsupported */
1223 int max_xfer_len
= MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
1224 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1225 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1227 if ((flags
& BDRV_REQ_FUA
) &&
1228 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1229 /* No need for bdrv_driver_pwrite() to do a fallback
1230 * flush on each chunk; use just one at the end */
1231 write_flags
&= ~BDRV_REQ_FUA
;
1234 num
= MIN(num
, max_xfer_len
);
1235 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
1236 if (iov
.iov_base
== NULL
) {
1237 iov
.iov_base
= qemu_try_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
1238 if (iov
.iov_base
== NULL
) {
1242 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
1244 qemu_iovec_init_external(&qiov
, &iov
, 1);
1246 ret
= bdrv_driver_pwritev(bs
, sector_num
* BDRV_SECTOR_SIZE
,
1247 num
* BDRV_SECTOR_SIZE
, &qiov
,
1250 /* Keep bounce buffer around if it is big enough for all
1251 * all future requests.
1253 if (num
< max_xfer_len
) {
1254 qemu_vfree(iov
.iov_base
);
1255 iov
.iov_base
= NULL
;
1264 if (ret
== 0 && need_flush
) {
1265 ret
= bdrv_co_flush(bs
);
1267 qemu_vfree(iov
.iov_base
);
1272 * Forwards an already correctly aligned write request to the BlockDriver.
1274 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1275 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1276 QEMUIOVector
*qiov
, int flags
)
1278 BlockDriver
*drv
= bs
->drv
;
1282 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1283 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1285 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1286 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1287 assert(!qiov
|| bytes
== qiov
->size
);
1288 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1290 waited
= wait_serialising_requests(req
);
1291 assert(!waited
|| !req
->serialising
);
1292 assert(req
->overlap_offset
<= offset
);
1293 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1295 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1297 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1298 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
1299 qemu_iovec_is_zero(qiov
)) {
1300 flags
|= BDRV_REQ_ZERO_WRITE
;
1301 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1302 flags
|= BDRV_REQ_MAY_UNMAP
;
1307 /* Do nothing, write notifier decided to fail this request */
1308 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1309 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1310 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1312 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1313 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1315 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1317 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
1319 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1320 bs
->wr_highest_offset
= offset
+ bytes
;
1324 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
1330 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1333 BdrvRequestFlags flags
,
1334 BdrvTrackedRequest
*req
)
1336 uint8_t *buf
= NULL
;
1337 QEMUIOVector local_qiov
;
1339 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1340 unsigned int head_padding_bytes
, tail_padding_bytes
;
1343 head_padding_bytes
= offset
& (align
- 1);
1344 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1347 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1348 if (head_padding_bytes
|| tail_padding_bytes
) {
1349 buf
= qemu_blockalign(bs
, align
);
1350 iov
= (struct iovec
) {
1354 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1356 if (head_padding_bytes
) {
1357 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1359 /* RMW the unaligned part before head. */
1360 mark_request_serialising(req
, align
);
1361 wait_serialising_requests(req
);
1362 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1363 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1364 align
, &local_qiov
, 0);
1368 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1370 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1371 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1373 flags
& ~BDRV_REQ_ZERO_WRITE
);
1377 offset
+= zero_bytes
;
1378 bytes
-= zero_bytes
;
1381 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1382 if (bytes
>= align
) {
1383 /* Write the aligned part in the middle. */
1384 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1385 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
,
1390 bytes
-= aligned_bytes
;
1391 offset
+= aligned_bytes
;
1394 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1396 assert(align
== tail_padding_bytes
+ bytes
);
1397 /* RMW the unaligned part after tail. */
1398 mark_request_serialising(req
, align
);
1399 wait_serialising_requests(req
);
1400 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1401 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1402 align
, &local_qiov
, 0);
1406 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1408 memset(buf
, 0, bytes
);
1409 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
,
1410 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1419 * Handle a write request in coroutine context
1421 int coroutine_fn
bdrv_co_pwritev(BlockDriverState
*bs
,
1422 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1423 BdrvRequestFlags flags
)
1425 BdrvTrackedRequest req
;
1426 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1427 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1428 uint8_t *head_buf
= NULL
;
1429 uint8_t *tail_buf
= NULL
;
1430 QEMUIOVector local_qiov
;
1431 bool use_local_qiov
= false;
1437 if (bs
->read_only
) {
1440 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1442 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1447 /* throttling disk I/O */
1448 if (bs
->blk
&& blk_get_public(bs
->blk
)->throttle_state
) {
1449 throttle_group_co_io_limits_intercept(bs
, bytes
, true);
1453 * Align write if necessary by performing a read-modify-write cycle.
1454 * Pad qiov with the read parts and be sure to have a tracked request not
1455 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1457 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1460 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1464 if (offset
& (align
- 1)) {
1465 QEMUIOVector head_qiov
;
1466 struct iovec head_iov
;
1468 mark_request_serialising(&req
, align
);
1469 wait_serialising_requests(&req
);
1471 head_buf
= qemu_blockalign(bs
, align
);
1472 head_iov
= (struct iovec
) {
1473 .iov_base
= head_buf
,
1476 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1478 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1479 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1480 align
, &head_qiov
, 0);
1484 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1486 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1487 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1488 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1489 use_local_qiov
= true;
1491 bytes
+= offset
& (align
- 1);
1492 offset
= offset
& ~(align
- 1);
1495 if ((offset
+ bytes
) & (align
- 1)) {
1496 QEMUIOVector tail_qiov
;
1497 struct iovec tail_iov
;
1501 mark_request_serialising(&req
, align
);
1502 waited
= wait_serialising_requests(&req
);
1503 assert(!waited
|| !use_local_qiov
);
1505 tail_buf
= qemu_blockalign(bs
, align
);
1506 tail_iov
= (struct iovec
) {
1507 .iov_base
= tail_buf
,
1510 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1512 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1513 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1514 align
, &tail_qiov
, 0);
1518 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1520 if (!use_local_qiov
) {
1521 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1522 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1523 use_local_qiov
= true;
1526 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1527 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1529 bytes
= ROUND_UP(bytes
, align
);
1532 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
1533 use_local_qiov
? &local_qiov
: qiov
,
1538 if (use_local_qiov
) {
1539 qemu_iovec_destroy(&local_qiov
);
1541 qemu_vfree(head_buf
);
1542 qemu_vfree(tail_buf
);
1544 tracked_request_end(&req
);
1548 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1549 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1550 BdrvRequestFlags flags
)
1552 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1556 return bdrv_co_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1557 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1560 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1561 int nb_sectors
, QEMUIOVector
*qiov
)
1563 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
1565 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
1568 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
1569 int64_t sector_num
, int nb_sectors
,
1570 BdrvRequestFlags flags
)
1572 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1574 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
1575 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1578 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
1579 BDRV_REQ_ZERO_WRITE
| flags
);
1582 typedef struct BdrvCoGetBlockStatusData
{
1583 BlockDriverState
*bs
;
1584 BlockDriverState
*base
;
1585 BlockDriverState
**file
;
1591 } BdrvCoGetBlockStatusData
;
1594 * Returns the allocation status of the specified sectors.
1595 * Drivers not implementing the functionality are assumed to not support
1596 * backing files, hence all their sectors are reported as allocated.
1598 * If 'sector_num' is beyond the end of the disk image the return value is 0
1599 * and 'pnum' is set to 0.
1601 * 'pnum' is set to the number of sectors (including and immediately following
1602 * the specified sector) that are known to be in the same
1603 * allocated/unallocated state.
1605 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1606 * beyond the end of the disk image it will be clamped.
1608 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1609 * points to the BDS which the sector range is allocated in.
1611 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1613 int nb_sectors
, int *pnum
,
1614 BlockDriverState
**file
)
1616 int64_t total_sectors
;
1620 total_sectors
= bdrv_nb_sectors(bs
);
1621 if (total_sectors
< 0) {
1622 return total_sectors
;
1625 if (sector_num
>= total_sectors
) {
1630 n
= total_sectors
- sector_num
;
1631 if (n
< nb_sectors
) {
1635 if (!bs
->drv
->bdrv_co_get_block_status
) {
1637 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1638 if (bs
->drv
->protocol_name
) {
1639 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1645 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1652 if (ret
& BDRV_BLOCK_RAW
) {
1653 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1654 return bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1658 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1659 ret
|= BDRV_BLOCK_ALLOCATED
;
1661 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1662 ret
|= BDRV_BLOCK_ZERO
;
1663 } else if (bs
->backing
) {
1664 BlockDriverState
*bs2
= bs
->backing
->bs
;
1665 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1666 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1667 ret
|= BDRV_BLOCK_ZERO
;
1672 if (*file
&& *file
!= bs
&&
1673 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1674 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1675 BlockDriverState
*file2
;
1678 ret2
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1679 *pnum
, &file_pnum
, &file2
);
1681 /* Ignore errors. This is just providing extra information, it
1682 * is useful but not necessary.
1685 /* !file_pnum indicates an offset at or beyond the EOF; it is
1686 * perfectly valid for the format block driver to point to such
1687 * offsets, so catch it and mark everything as zero */
1688 ret
|= BDRV_BLOCK_ZERO
;
1690 /* Limit request to the range reported by the protocol driver */
1692 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1700 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1701 BlockDriverState
*base
,
1705 BlockDriverState
**file
)
1707 BlockDriverState
*p
;
1711 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1712 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
, file
);
1713 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1716 /* [sector_num, pnum] unallocated on this layer, which could be only
1717 * the first part of [sector_num, nb_sectors]. */
1718 nb_sectors
= MIN(nb_sectors
, *pnum
);
1723 /* Coroutine wrapper for bdrv_get_block_status_above() */
1724 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1726 BdrvCoGetBlockStatusData
*data
= opaque
;
1728 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1737 * Synchronous wrapper around bdrv_co_get_block_status_above().
1739 * See bdrv_co_get_block_status_above() for details.
1741 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1742 BlockDriverState
*base
,
1744 int nb_sectors
, int *pnum
,
1745 BlockDriverState
**file
)
1748 BdrvCoGetBlockStatusData data
= {
1752 .sector_num
= sector_num
,
1753 .nb_sectors
= nb_sectors
,
1758 if (qemu_in_coroutine()) {
1759 /* Fast-path if already in coroutine context */
1760 bdrv_get_block_status_above_co_entry(&data
);
1762 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1764 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
);
1765 qemu_coroutine_enter(co
, &data
);
1766 while (!data
.done
) {
1767 aio_poll(aio_context
, true);
1773 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1775 int nb_sectors
, int *pnum
,
1776 BlockDriverState
**file
)
1778 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1779 sector_num
, nb_sectors
, pnum
, file
);
1782 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1783 int nb_sectors
, int *pnum
)
1785 BlockDriverState
*file
;
1786 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1791 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1795 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1797 * Return true if the given sector is allocated in any image between
1798 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1799 * sector is allocated in any image of the chain. Return false otherwise.
1801 * 'pnum' is set to the number of sectors (including and immediately following
1802 * the specified sector) that are known to be in the same
1803 * allocated/unallocated state.
1806 int bdrv_is_allocated_above(BlockDriverState
*top
,
1807 BlockDriverState
*base
,
1809 int nb_sectors
, int *pnum
)
1811 BlockDriverState
*intermediate
;
1812 int ret
, n
= nb_sectors
;
1815 while (intermediate
&& intermediate
!= base
) {
1817 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1827 * [sector_num, nb_sectors] is unallocated on top but intermediate
1830 * [sector_num+x, nr_sectors] allocated.
1832 if (n
> pnum_inter
&&
1833 (intermediate
== top
||
1834 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1838 intermediate
= backing_bs(intermediate
);
1845 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1846 const uint8_t *buf
, int nb_sectors
)
1848 BlockDriver
*drv
= bs
->drv
;
1854 if (!drv
->bdrv_write_compressed
) {
1857 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1862 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1864 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1867 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1868 int64_t pos
, int size
)
1871 struct iovec iov
= {
1872 .iov_base
= (void *) buf
,
1876 qemu_iovec_init_external(&qiov
, &iov
, 1);
1877 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
1880 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1882 BlockDriver
*drv
= bs
->drv
;
1886 } else if (drv
->bdrv_save_vmstate
) {
1887 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1888 } else if (bs
->file
) {
1889 return bdrv_writev_vmstate(bs
->file
->bs
, qiov
, pos
);
1895 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1896 int64_t pos
, int size
)
1898 BlockDriver
*drv
= bs
->drv
;
1901 if (drv
->bdrv_load_vmstate
)
1902 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
1904 return bdrv_load_vmstate(bs
->file
->bs
, buf
, pos
, size
);
1908 /**************************************************************/
1911 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
1912 QEMUIOVector
*qiov
, int nb_sectors
,
1913 BlockCompletionFunc
*cb
, void *opaque
)
1915 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
1917 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1921 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
1922 QEMUIOVector
*qiov
, int nb_sectors
,
1923 BlockCompletionFunc
*cb
, void *opaque
)
1925 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
1927 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1931 BlockAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
1932 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
1933 BlockCompletionFunc
*cb
, void *opaque
)
1935 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
1937 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
1938 BDRV_REQ_ZERO_WRITE
| flags
,
1943 typedef struct MultiwriteCB
{
1948 BlockCompletionFunc
*cb
;
1950 QEMUIOVector
*free_qiov
;
1954 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
1958 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
1959 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
1960 if (mcb
->callbacks
[i
].free_qiov
) {
1961 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
1963 g_free(mcb
->callbacks
[i
].free_qiov
);
1967 static void multiwrite_cb(void *opaque
, int ret
)
1969 MultiwriteCB
*mcb
= opaque
;
1971 trace_multiwrite_cb(mcb
, ret
);
1973 if (ret
< 0 && !mcb
->error
) {
1977 mcb
->num_requests
--;
1978 if (mcb
->num_requests
== 0) {
1979 multiwrite_user_cb(mcb
);
1984 static int multiwrite_req_compare(const void *a
, const void *b
)
1986 const BlockRequest
*req1
= a
, *req2
= b
;
1989 * Note that we can't simply subtract req2->sector from req1->sector
1990 * here as that could overflow the return value.
1992 if (req1
->sector
> req2
->sector
) {
1994 } else if (req1
->sector
< req2
->sector
) {
2002 * Takes a bunch of requests and tries to merge them. Returns the number of
2003 * requests that remain after merging.
2005 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
2006 int num_reqs
, MultiwriteCB
*mcb
)
2010 // Sort requests by start sector
2011 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
2013 // Check if adjacent requests touch the same clusters. If so, combine them,
2014 // filling up gaps with zero sectors.
2016 for (i
= 1; i
< num_reqs
; i
++) {
2018 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
2020 // Handle exactly sequential writes and overlapping writes.
2021 if (reqs
[i
].sector
<= oldreq_last
) {
2025 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 >
2030 if (bs
->bl
.max_transfer_length
&& reqs
[outidx
].nb_sectors
+
2031 reqs
[i
].nb_sectors
> bs
->bl
.max_transfer_length
) {
2037 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
2038 qemu_iovec_init(qiov
,
2039 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
2041 // Add the first request to the merged one. If the requests are
2042 // overlapping, drop the last sectors of the first request.
2043 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
2044 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
2046 // We should need to add any zeros between the two requests
2047 assert (reqs
[i
].sector
<= oldreq_last
);
2049 // Add the second request
2050 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
2052 // Add tail of first request, if necessary
2053 if (qiov
->size
< reqs
[outidx
].qiov
->size
) {
2054 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, qiov
->size
,
2055 reqs
[outidx
].qiov
->size
- qiov
->size
);
2058 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
2059 reqs
[outidx
].qiov
= qiov
;
2061 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
2064 reqs
[outidx
].sector
= reqs
[i
].sector
;
2065 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
2066 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
2071 block_acct_merge_done(blk_get_stats(bs
->blk
), BLOCK_ACCT_WRITE
,
2072 num_reqs
- outidx
- 1);
2079 * Submit multiple AIO write requests at once.
2081 * On success, the function returns 0 and all requests in the reqs array have
2082 * been submitted. In error case this function returns -1, and any of the
2083 * requests may or may not be submitted yet. In particular, this means that the
2084 * callback will be called for some of the requests, for others it won't. The
2085 * caller must check the error field of the BlockRequest to wait for the right
2086 * callbacks (if error != 0, no callback will be called).
2088 * The implementation may modify the contents of the reqs array, e.g. to merge
2089 * requests. However, the fields opaque and error are left unmodified as they
2090 * are used to signal failure for a single request to the caller.
2092 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
2097 /* don't submit writes if we don't have a medium */
2098 if (bs
->drv
== NULL
) {
2099 for (i
= 0; i
< num_reqs
; i
++) {
2100 reqs
[i
].error
= -ENOMEDIUM
;
2105 if (num_reqs
== 0) {
2109 // Create MultiwriteCB structure
2110 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
2111 mcb
->num_requests
= 0;
2112 mcb
->num_callbacks
= num_reqs
;
2114 for (i
= 0; i
< num_reqs
; i
++) {
2115 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
2116 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
2119 // Check for mergable requests
2120 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
2122 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
2124 /* Run the aio requests. */
2125 mcb
->num_requests
= num_reqs
;
2126 for (i
= 0; i
< num_reqs
; i
++) {
2127 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
2128 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
2136 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2139 bdrv_aio_cancel_async(acb
);
2140 while (acb
->refcnt
> 1) {
2141 if (acb
->aiocb_info
->get_aio_context
) {
2142 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2143 } else if (acb
->bs
) {
2144 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2149 qemu_aio_unref(acb
);
2152 /* Async version of aio cancel. The caller is not blocked if the acb implements
2153 * cancel_async, otherwise we do nothing and let the request normally complete.
2154 * In either case the completion callback must be called. */
2155 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2157 if (acb
->aiocb_info
->cancel_async
) {
2158 acb
->aiocb_info
->cancel_async(acb
);
2162 /**************************************************************/
2163 /* async block device emulation */
2165 typedef struct BlockAIOCBCoroutine
{
2172 } BlockAIOCBCoroutine
;
2174 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2175 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2178 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2180 if (!acb
->need_bh
) {
2181 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2182 qemu_aio_unref(acb
);
2186 static void bdrv_co_em_bh(void *opaque
)
2188 BlockAIOCBCoroutine
*acb
= opaque
;
2190 assert(!acb
->need_bh
);
2191 qemu_bh_delete(acb
->bh
);
2192 bdrv_co_complete(acb
);
2195 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2197 acb
->need_bh
= false;
2198 if (acb
->req
.error
!= -EINPROGRESS
) {
2199 BlockDriverState
*bs
= acb
->common
.bs
;
2201 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2202 qemu_bh_schedule(acb
->bh
);
2206 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2207 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2209 BlockAIOCBCoroutine
*acb
= opaque
;
2210 BlockDriverState
*bs
= acb
->common
.bs
;
2212 if (!acb
->is_write
) {
2213 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
2214 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2216 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
2217 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2220 bdrv_co_complete(acb
);
2223 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
2227 BdrvRequestFlags flags
,
2228 BlockCompletionFunc
*cb
,
2233 BlockAIOCBCoroutine
*acb
;
2235 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2236 acb
->need_bh
= true;
2237 acb
->req
.error
= -EINPROGRESS
;
2238 acb
->req
.sector
= sector_num
;
2239 acb
->req
.nb_sectors
= nb_sectors
;
2240 acb
->req
.qiov
= qiov
;
2241 acb
->req
.flags
= flags
;
2242 acb
->is_write
= is_write
;
2244 co
= qemu_coroutine_create(bdrv_co_do_rw
);
2245 qemu_coroutine_enter(co
, acb
);
2247 bdrv_co_maybe_schedule_bh(acb
);
2248 return &acb
->common
;
2251 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2253 BlockAIOCBCoroutine
*acb
= opaque
;
2254 BlockDriverState
*bs
= acb
->common
.bs
;
2256 acb
->req
.error
= bdrv_co_flush(bs
);
2257 bdrv_co_complete(acb
);
2260 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2261 BlockCompletionFunc
*cb
, void *opaque
)
2263 trace_bdrv_aio_flush(bs
, opaque
);
2266 BlockAIOCBCoroutine
*acb
;
2268 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2269 acb
->need_bh
= true;
2270 acb
->req
.error
= -EINPROGRESS
;
2272 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
2273 qemu_coroutine_enter(co
, acb
);
2275 bdrv_co_maybe_schedule_bh(acb
);
2276 return &acb
->common
;
2279 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
2281 BlockAIOCBCoroutine
*acb
= opaque
;
2282 BlockDriverState
*bs
= acb
->common
.bs
;
2284 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
2285 bdrv_co_complete(acb
);
2288 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
2289 int64_t sector_num
, int nb_sectors
,
2290 BlockCompletionFunc
*cb
, void *opaque
)
2293 BlockAIOCBCoroutine
*acb
;
2295 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
2297 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2298 acb
->need_bh
= true;
2299 acb
->req
.error
= -EINPROGRESS
;
2300 acb
->req
.sector
= sector_num
;
2301 acb
->req
.nb_sectors
= nb_sectors
;
2302 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
2303 qemu_coroutine_enter(co
, acb
);
2305 bdrv_co_maybe_schedule_bh(acb
);
2306 return &acb
->common
;
2309 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2310 BlockCompletionFunc
*cb
, void *opaque
)
2314 acb
= g_malloc(aiocb_info
->aiocb_size
);
2315 acb
->aiocb_info
= aiocb_info
;
2318 acb
->opaque
= opaque
;
2323 void qemu_aio_ref(void *p
)
2325 BlockAIOCB
*acb
= p
;
2329 void qemu_aio_unref(void *p
)
2331 BlockAIOCB
*acb
= p
;
2332 assert(acb
->refcnt
> 0);
2333 if (--acb
->refcnt
== 0) {
2338 /**************************************************************/
2339 /* Coroutine block device emulation */
2341 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2343 RwCo
*rwco
= opaque
;
2345 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2348 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2351 BdrvTrackedRequest req
;
2353 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2358 tracked_request_begin(&req
, bs
, 0, 0, BDRV_TRACKED_FLUSH
);
2360 /* Write back all layers by calling one driver function */
2361 if (bs
->drv
->bdrv_co_flush
) {
2362 ret
= bs
->drv
->bdrv_co_flush(bs
);
2366 /* Write back cached data to the OS even with cache=unsafe */
2367 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2368 if (bs
->drv
->bdrv_co_flush_to_os
) {
2369 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2375 /* But don't actually force it to the disk with cache=unsafe */
2376 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2380 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2381 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2382 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2383 } else if (bs
->drv
->bdrv_aio_flush
) {
2385 CoroutineIOCompletion co
= {
2386 .coroutine
= qemu_coroutine_self(),
2389 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2393 qemu_coroutine_yield();
2398 * Some block drivers always operate in either writethrough or unsafe
2399 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2400 * know how the server works (because the behaviour is hardcoded or
2401 * depends on server-side configuration), so we can't ensure that
2402 * everything is safe on disk. Returning an error doesn't work because
2403 * that would break guests even if the server operates in writethrough
2406 * Let's hope the user knows what he's doing.
2414 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2415 * in the case of cache=unsafe, so there are no useless flushes.
2418 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2420 tracked_request_end(&req
);
2424 int bdrv_flush(BlockDriverState
*bs
)
2432 if (qemu_in_coroutine()) {
2433 /* Fast-path if already in coroutine context */
2434 bdrv_flush_co_entry(&rwco
);
2436 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2438 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
2439 qemu_coroutine_enter(co
, &rwco
);
2440 while (rwco
.ret
== NOT_DONE
) {
2441 aio_poll(aio_context
, true);
2448 typedef struct DiscardCo
{
2449 BlockDriverState
*bs
;
2454 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
2456 DiscardCo
*rwco
= opaque
;
2458 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
2461 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2464 BdrvTrackedRequest req
;
2465 int max_discard
, ret
;
2471 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
2474 } else if (bs
->read_only
) {
2477 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2479 /* Do nothing if disabled. */
2480 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2484 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
2488 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
,
2489 BDRV_TRACKED_DISCARD
);
2490 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2492 max_discard
= MIN_NON_ZERO(bs
->bl
.max_discard
, BDRV_REQUEST_MAX_SECTORS
);
2493 while (nb_sectors
> 0) {
2495 int num
= nb_sectors
;
2498 if (bs
->bl
.discard_alignment
&&
2499 num
>= bs
->bl
.discard_alignment
&&
2500 sector_num
% bs
->bl
.discard_alignment
) {
2501 if (num
> bs
->bl
.discard_alignment
) {
2502 num
= bs
->bl
.discard_alignment
;
2504 num
-= sector_num
% bs
->bl
.discard_alignment
;
2507 /* limit request size */
2508 if (num
> max_discard
) {
2512 if (bs
->drv
->bdrv_co_discard
) {
2513 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
2516 CoroutineIOCompletion co
= {
2517 .coroutine
= qemu_coroutine_self(),
2520 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
2521 bdrv_co_io_em_complete
, &co
);
2526 qemu_coroutine_yield();
2530 if (ret
&& ret
!= -ENOTSUP
) {
2539 tracked_request_end(&req
);
2543 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2548 .sector_num
= sector_num
,
2549 .nb_sectors
= nb_sectors
,
2553 if (qemu_in_coroutine()) {
2554 /* Fast-path if already in coroutine context */
2555 bdrv_discard_co_entry(&rwco
);
2557 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2559 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
2560 qemu_coroutine_enter(co
, &rwco
);
2561 while (rwco
.ret
== NOT_DONE
) {
2562 aio_poll(aio_context
, true);
2570 CoroutineIOCompletion
*co
;
2572 } BdrvIoctlCompletionData
;
2574 static void bdrv_ioctl_bh_cb(void *opaque
)
2576 BdrvIoctlCompletionData
*data
= opaque
;
2578 bdrv_co_io_em_complete(data
->co
, -ENOTSUP
);
2579 qemu_bh_delete(data
->bh
);
2582 static int bdrv_co_do_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2584 BlockDriver
*drv
= bs
->drv
;
2585 BdrvTrackedRequest tracked_req
;
2586 CoroutineIOCompletion co
= {
2587 .coroutine
= qemu_coroutine_self(),
2591 tracked_request_begin(&tracked_req
, bs
, 0, 0, BDRV_TRACKED_IOCTL
);
2592 if (!drv
|| !drv
->bdrv_aio_ioctl
) {
2597 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2599 BdrvIoctlCompletionData
*data
= g_new(BdrvIoctlCompletionData
, 1);
2600 data
->bh
= aio_bh_new(bdrv_get_aio_context(bs
),
2601 bdrv_ioctl_bh_cb
, data
);
2603 qemu_bh_schedule(data
->bh
);
2605 qemu_coroutine_yield();
2607 tracked_request_end(&tracked_req
);
2612 BlockDriverState
*bs
;
2618 static void coroutine_fn
bdrv_co_ioctl_entry(void *opaque
)
2620 BdrvIoctlCoData
*data
= opaque
;
2621 data
->ret
= bdrv_co_do_ioctl(data
->bs
, data
->req
, data
->buf
);
2624 /* needed for generic scsi interface */
2625 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2627 BdrvIoctlCoData data
= {
2631 .ret
= -EINPROGRESS
,
2634 if (qemu_in_coroutine()) {
2635 /* Fast-path if already in coroutine context */
2636 bdrv_co_ioctl_entry(&data
);
2638 Coroutine
*co
= qemu_coroutine_create(bdrv_co_ioctl_entry
);
2640 qemu_coroutine_enter(co
, &data
);
2641 while (data
.ret
== -EINPROGRESS
) {
2642 aio_poll(bdrv_get_aio_context(bs
), true);
2648 static void coroutine_fn
bdrv_co_aio_ioctl_entry(void *opaque
)
2650 BlockAIOCBCoroutine
*acb
= opaque
;
2651 acb
->req
.error
= bdrv_co_do_ioctl(acb
->common
.bs
,
2652 acb
->req
.req
, acb
->req
.buf
);
2653 bdrv_co_complete(acb
);
2656 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2657 unsigned long int req
, void *buf
,
2658 BlockCompletionFunc
*cb
, void *opaque
)
2660 BlockAIOCBCoroutine
*acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
,
2664 acb
->need_bh
= true;
2665 acb
->req
.error
= -EINPROGRESS
;
2668 co
= qemu_coroutine_create(bdrv_co_aio_ioctl_entry
);
2669 qemu_coroutine_enter(co
, acb
);
2671 bdrv_co_maybe_schedule_bh(acb
);
2672 return &acb
->common
;
2675 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2677 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2680 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2682 return memset(qemu_blockalign(bs
, size
), 0, size
);
2685 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2687 size_t align
= bdrv_opt_mem_align(bs
);
2689 /* Ensure that NULL is never returned on success */
2695 return qemu_try_memalign(align
, size
);
2698 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2700 void *mem
= qemu_try_blockalign(bs
, size
);
2703 memset(mem
, 0, size
);
2710 * Check if all memory in this vector is sector aligned.
2712 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2715 size_t alignment
= bdrv_min_mem_align(bs
);
2717 for (i
= 0; i
< qiov
->niov
; i
++) {
2718 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2721 if (qiov
->iov
[i
].iov_len
% alignment
) {
2729 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2730 NotifierWithReturn
*notifier
)
2732 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2735 void bdrv_io_plug(BlockDriverState
*bs
)
2739 QLIST_FOREACH(child
, &bs
->children
, next
) {
2740 bdrv_io_plug(child
->bs
);
2743 if (bs
->io_plugged
++ == 0 && bs
->io_plug_disabled
== 0) {
2744 BlockDriver
*drv
= bs
->drv
;
2745 if (drv
&& drv
->bdrv_io_plug
) {
2746 drv
->bdrv_io_plug(bs
);
2751 void bdrv_io_unplug(BlockDriverState
*bs
)
2755 assert(bs
->io_plugged
);
2756 if (--bs
->io_plugged
== 0 && bs
->io_plug_disabled
== 0) {
2757 BlockDriver
*drv
= bs
->drv
;
2758 if (drv
&& drv
->bdrv_io_unplug
) {
2759 drv
->bdrv_io_unplug(bs
);
2763 QLIST_FOREACH(child
, &bs
->children
, next
) {
2764 bdrv_io_unplug(child
->bs
);
2768 void bdrv_io_unplugged_begin(BlockDriverState
*bs
)
2772 if (bs
->io_plug_disabled
++ == 0 && bs
->io_plugged
> 0) {
2773 BlockDriver
*drv
= bs
->drv
;
2774 if (drv
&& drv
->bdrv_io_unplug
) {
2775 drv
->bdrv_io_unplug(bs
);
2779 QLIST_FOREACH(child
, &bs
->children
, next
) {
2780 bdrv_io_unplugged_begin(child
->bs
);
2784 void bdrv_io_unplugged_end(BlockDriverState
*bs
)
2788 assert(bs
->io_plug_disabled
);
2789 QLIST_FOREACH(child
, &bs
->children
, next
) {
2790 bdrv_io_unplugged_end(child
->bs
);
2793 if (--bs
->io_plug_disabled
== 0 && bs
->io_plugged
> 0) {
2794 BlockDriver
*drv
= bs
->drv
;
2795 if (drv
&& drv
->bdrv_io_plug
) {
2796 drv
->bdrv_io_plug(bs
);
2801 void bdrv_drained_begin(BlockDriverState
*bs
)
2803 if (!bs
->quiesce_counter
++) {
2804 aio_disable_external(bdrv_get_aio_context(bs
));
2809 void bdrv_drained_end(BlockDriverState
*bs
)
2811 assert(bs
->quiesce_counter
> 0);
2812 if (--bs
->quiesce_counter
> 0) {
2815 aio_enable_external(bdrv_get_aio_context(bs
));