2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
40 BdrvRequestFlags flags
,
41 BlockCompletionFunc
*cb
,
44 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
45 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
46 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
48 static void bdrv_parent_drained_begin(BlockDriverState
*bs
)
52 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
53 if (c
->role
->drained_begin
) {
54 c
->role
->drained_begin(c
);
59 static void bdrv_parent_drained_end(BlockDriverState
*bs
)
63 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
64 if (c
->role
->drained_end
) {
65 c
->role
->drained_end(c
);
70 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
72 BlockDriver
*drv
= bs
->drv
;
73 Error
*local_err
= NULL
;
75 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
81 /* Take some limits from the children as a default */
83 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
85 error_propagate(errp
, local_err
);
88 bs
->bl
.opt_transfer_length
= bs
->file
->bs
->bl
.opt_transfer_length
;
89 bs
->bl
.max_transfer_length
= bs
->file
->bs
->bl
.max_transfer_length
;
90 bs
->bl
.min_mem_alignment
= bs
->file
->bs
->bl
.min_mem_alignment
;
91 bs
->bl
.opt_mem_alignment
= bs
->file
->bs
->bl
.opt_mem_alignment
;
92 bs
->bl
.max_iov
= bs
->file
->bs
->bl
.max_iov
;
94 bs
->bl
.min_mem_alignment
= 512;
95 bs
->bl
.opt_mem_alignment
= getpagesize();
97 /* Safe default since most protocols use readv()/writev()/etc */
98 bs
->bl
.max_iov
= IOV_MAX
;
102 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
104 error_propagate(errp
, local_err
);
107 bs
->bl
.opt_transfer_length
=
108 MAX(bs
->bl
.opt_transfer_length
,
109 bs
->backing
->bs
->bl
.opt_transfer_length
);
110 bs
->bl
.max_transfer_length
=
111 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
112 bs
->backing
->bs
->bl
.max_transfer_length
);
113 bs
->bl
.opt_mem_alignment
=
114 MAX(bs
->bl
.opt_mem_alignment
,
115 bs
->backing
->bs
->bl
.opt_mem_alignment
);
116 bs
->bl
.min_mem_alignment
=
117 MAX(bs
->bl
.min_mem_alignment
,
118 bs
->backing
->bs
->bl
.min_mem_alignment
);
121 bs
->backing
->bs
->bl
.max_iov
);
124 /* Then let the driver override it */
125 if (drv
->bdrv_refresh_limits
) {
126 drv
->bdrv_refresh_limits(bs
, errp
);
131 * The copy-on-read flag is actually a reference count so multiple users may
132 * use the feature without worrying about clobbering its previous state.
133 * Copy-on-read stays enabled until all users have called to disable it.
135 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
140 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
142 assert(bs
->copy_on_read
> 0);
146 /* Check if any requests are in-flight (including throttled requests) */
147 bool bdrv_requests_pending(BlockDriverState
*bs
)
150 BlockBackendPublic
*blkp
= bs
->blk
? blk_get_public(bs
->blk
) : NULL
;
152 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
155 if (blkp
&& !qemu_co_queue_empty(&blkp
->throttled_reqs
[0])) {
158 if (blkp
&& !qemu_co_queue_empty(&blkp
->throttled_reqs
[1])) {
162 QLIST_FOREACH(child
, &bs
->children
, next
) {
163 if (bdrv_requests_pending(child
->bs
)) {
171 static void bdrv_drain_recurse(BlockDriverState
*bs
)
175 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
176 bs
->drv
->bdrv_drain(bs
);
178 QLIST_FOREACH(child
, &bs
->children
, next
) {
179 bdrv_drain_recurse(child
->bs
);
185 BlockDriverState
*bs
;
190 static void bdrv_drain_poll(BlockDriverState
*bs
)
196 busy
= bdrv_requests_pending(bs
);
197 busy
|= aio_poll(bdrv_get_aio_context(bs
), busy
);
201 static void bdrv_co_drain_bh_cb(void *opaque
)
203 BdrvCoDrainData
*data
= opaque
;
204 Coroutine
*co
= data
->co
;
206 qemu_bh_delete(data
->bh
);
207 bdrv_drain_poll(data
->bs
);
209 qemu_coroutine_enter(co
, NULL
);
212 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
)
214 BdrvCoDrainData data
;
216 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
217 * other coroutines run if they were queued from
218 * qemu_co_queue_run_restart(). */
220 assert(qemu_in_coroutine());
221 data
= (BdrvCoDrainData
) {
222 .co
= qemu_coroutine_self(),
225 .bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_drain_bh_cb
, &data
),
227 qemu_bh_schedule(data
.bh
);
229 qemu_coroutine_yield();
230 /* If we are resumed from some other event (such as an aio completion or a
231 * timer callback), it is a bug in the caller that should be fixed. */
236 * Wait for pending requests to complete on a single BlockDriverState subtree,
237 * and suspend block driver's internal I/O until next request arrives.
239 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
242 * Only this BlockDriverState's AioContext is run, so in-flight requests must
243 * not depend on events in other AioContexts. In that case, use
244 * bdrv_drain_all() instead.
246 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
248 bdrv_parent_drained_begin(bs
);
249 bdrv_io_unplugged_begin(bs
);
250 bdrv_drain_recurse(bs
);
251 bdrv_co_yield_to_drain(bs
);
252 bdrv_io_unplugged_end(bs
);
253 bdrv_parent_drained_end(bs
);
256 void bdrv_drain(BlockDriverState
*bs
)
258 bdrv_parent_drained_begin(bs
);
259 bdrv_io_unplugged_begin(bs
);
260 bdrv_drain_recurse(bs
);
261 if (qemu_in_coroutine()) {
262 bdrv_co_yield_to_drain(bs
);
266 bdrv_io_unplugged_end(bs
);
267 bdrv_parent_drained_end(bs
);
271 * Wait for pending requests to complete across all BlockDriverStates
273 * This function does not flush data to disk, use bdrv_flush_all() for that
274 * after calling this function.
276 void bdrv_drain_all(void)
278 /* Always run first iteration so any pending completion BHs run */
280 BlockDriverState
*bs
= NULL
;
281 GSList
*aio_ctxs
= NULL
, *ctx
;
283 while ((bs
= bdrv_next(bs
))) {
284 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
286 aio_context_acquire(aio_context
);
288 block_job_pause(bs
->job
);
290 bdrv_parent_drained_begin(bs
);
291 bdrv_io_unplugged_begin(bs
);
292 bdrv_drain_recurse(bs
);
293 aio_context_release(aio_context
);
295 if (!g_slist_find(aio_ctxs
, aio_context
)) {
296 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
300 /* Note that completion of an asynchronous I/O operation can trigger any
301 * number of other I/O operations on other devices---for example a
302 * coroutine can submit an I/O request to another device in response to
303 * request completion. Therefore we must keep looping until there was no
304 * more activity rather than simply draining each device independently.
309 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
310 AioContext
*aio_context
= ctx
->data
;
313 aio_context_acquire(aio_context
);
314 while ((bs
= bdrv_next(bs
))) {
315 if (aio_context
== bdrv_get_aio_context(bs
)) {
316 if (bdrv_requests_pending(bs
)) {
318 aio_poll(aio_context
, busy
);
322 busy
|= aio_poll(aio_context
, false);
323 aio_context_release(aio_context
);
328 while ((bs
= bdrv_next(bs
))) {
329 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
331 aio_context_acquire(aio_context
);
332 bdrv_io_unplugged_end(bs
);
333 bdrv_parent_drained_end(bs
);
335 block_job_resume(bs
->job
);
337 aio_context_release(aio_context
);
339 g_slist_free(aio_ctxs
);
343 * Remove an active request from the tracked requests list
345 * This function should be called when a tracked request is completing.
347 static void tracked_request_end(BdrvTrackedRequest
*req
)
349 if (req
->serialising
) {
350 req
->bs
->serialising_in_flight
--;
353 QLIST_REMOVE(req
, list
);
354 qemu_co_queue_restart_all(&req
->wait_queue
);
358 * Add an active request to the tracked requests list
360 static void tracked_request_begin(BdrvTrackedRequest
*req
,
361 BlockDriverState
*bs
,
364 enum BdrvTrackedRequestType type
)
366 *req
= (BdrvTrackedRequest
){
371 .co
= qemu_coroutine_self(),
372 .serialising
= false,
373 .overlap_offset
= offset
,
374 .overlap_bytes
= bytes
,
377 qemu_co_queue_init(&req
->wait_queue
);
379 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
382 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
384 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
385 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
388 if (!req
->serialising
) {
389 req
->bs
->serialising_in_flight
++;
390 req
->serialising
= true;
393 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
394 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
398 * Round a region to cluster boundaries
400 void bdrv_round_to_clusters(BlockDriverState
*bs
,
401 int64_t sector_num
, int nb_sectors
,
402 int64_t *cluster_sector_num
,
403 int *cluster_nb_sectors
)
407 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
408 *cluster_sector_num
= sector_num
;
409 *cluster_nb_sectors
= nb_sectors
;
411 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
412 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
413 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
418 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
423 ret
= bdrv_get_info(bs
, &bdi
);
424 if (ret
< 0 || bdi
.cluster_size
== 0) {
425 return bs
->request_alignment
;
427 return bdi
.cluster_size
;
431 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
432 int64_t offset
, unsigned int bytes
)
435 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
439 if (req
->overlap_offset
>= offset
+ bytes
) {
445 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
447 BlockDriverState
*bs
= self
->bs
;
448 BdrvTrackedRequest
*req
;
452 if (!bs
->serialising_in_flight
) {
458 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
459 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
462 if (tracked_request_overlaps(req
, self
->overlap_offset
,
463 self
->overlap_bytes
))
465 /* Hitting this means there was a reentrant request, for
466 * example, a block driver issuing nested requests. This must
467 * never happen since it means deadlock.
469 assert(qemu_coroutine_self() != req
->co
);
471 /* If the request is already (indirectly) waiting for us, or
472 * will wait for us as soon as it wakes up, then just go on
473 * (instead of producing a deadlock in the former case). */
474 if (!req
->waiting_for
) {
475 self
->waiting_for
= req
;
476 qemu_co_queue_wait(&req
->wait_queue
);
477 self
->waiting_for
= NULL
;
489 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
492 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
496 if (!bdrv_is_inserted(bs
)) {
507 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
510 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
514 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
515 nb_sectors
* BDRV_SECTOR_SIZE
);
518 typedef struct RwCo
{
519 BlockDriverState
*bs
;
524 BdrvRequestFlags flags
;
527 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
531 if (!rwco
->is_write
) {
532 rwco
->ret
= bdrv_co_preadv(rwco
->bs
, rwco
->offset
,
533 rwco
->qiov
->size
, rwco
->qiov
,
536 rwco
->ret
= bdrv_co_pwritev(rwco
->bs
, rwco
->offset
,
537 rwco
->qiov
->size
, rwco
->qiov
,
543 * Process a vectored synchronous request using coroutines
545 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
546 QEMUIOVector
*qiov
, bool is_write
,
547 BdrvRequestFlags flags
)
554 .is_write
= is_write
,
559 if (qemu_in_coroutine()) {
560 /* Fast-path if already in coroutine context */
561 bdrv_rw_co_entry(&rwco
);
563 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
565 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
566 qemu_coroutine_enter(co
, &rwco
);
567 while (rwco
.ret
== NOT_DONE
) {
568 aio_poll(aio_context
, true);
575 * Process a synchronous request using coroutines
577 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
578 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
582 .iov_base
= (void *)buf
,
583 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
586 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
590 qemu_iovec_init_external(&qiov
, &iov
, 1);
591 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
592 &qiov
, is_write
, flags
);
595 /* return < 0 if error. See bdrv_write() for the return codes */
596 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
597 uint8_t *buf
, int nb_sectors
)
599 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
602 /* Return < 0 if error. Important errors are:
603 -EIO generic I/O error (may happen for all errors)
604 -ENOMEDIUM No media inserted.
605 -EINVAL Invalid sector number or nb_sectors
606 -EACCES Trying to write a read-only device
608 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
609 const uint8_t *buf
, int nb_sectors
)
611 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
614 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
615 int nb_sectors
, BdrvRequestFlags flags
)
617 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
618 BDRV_REQ_ZERO_WRITE
| flags
);
622 * Completely zero out a block device with the help of bdrv_write_zeroes.
623 * The operation is sped up by checking the block status and only writing
624 * zeroes to the device if they currently do not return zeroes. Optional
625 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
628 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
630 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
632 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
633 BlockDriverState
*file
;
636 target_sectors
= bdrv_nb_sectors(bs
);
637 if (target_sectors
< 0) {
638 return target_sectors
;
642 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
643 if (nb_sectors
<= 0) {
646 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, &file
);
648 error_report("error getting block status at sector %" PRId64
": %s",
649 sector_num
, strerror(-ret
));
652 if (ret
& BDRV_BLOCK_ZERO
) {
656 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
658 error_report("error writing zeroes at sector %" PRId64
": %s",
659 sector_num
, strerror(-ret
));
666 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
670 .iov_base
= (void *)buf
,
679 qemu_iovec_init_external(&qiov
, &iov
, 1);
680 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
688 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
692 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
700 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
701 const void *buf
, int bytes
)
705 .iov_base
= (void *) buf
,
713 qemu_iovec_init_external(&qiov
, &iov
, 1);
714 return bdrv_pwritev(bs
, offset
, &qiov
);
718 * Writes to the file and ensures that no writes are reordered across this
719 * request (acts as a barrier)
721 * Returns 0 on success, -errno in error cases.
723 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
724 const void *buf
, int count
)
728 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
733 ret
= bdrv_flush(bs
);
741 typedef struct CoroutineIOCompletion
{
742 Coroutine
*coroutine
;
744 } CoroutineIOCompletion
;
746 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
748 CoroutineIOCompletion
*co
= opaque
;
751 qemu_coroutine_enter(co
->coroutine
, NULL
);
754 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
755 uint64_t offset
, uint64_t bytes
,
756 QEMUIOVector
*qiov
, int flags
)
758 BlockDriver
*drv
= bs
->drv
;
760 unsigned int nb_sectors
;
762 if (drv
->bdrv_co_preadv
) {
763 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
766 sector_num
= offset
>> BDRV_SECTOR_BITS
;
767 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
769 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
770 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
771 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
773 if (drv
->bdrv_co_readv
) {
774 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
777 CoroutineIOCompletion co
= {
778 .coroutine
= qemu_coroutine_self(),
781 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
782 bdrv_co_io_em_complete
, &co
);
786 qemu_coroutine_yield();
792 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
793 uint64_t offset
, uint64_t bytes
,
794 QEMUIOVector
*qiov
, int flags
)
796 BlockDriver
*drv
= bs
->drv
;
798 unsigned int nb_sectors
;
801 if (drv
->bdrv_co_pwritev
) {
802 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
, flags
);
806 sector_num
= offset
>> BDRV_SECTOR_BITS
;
807 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
809 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
810 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
811 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
813 if (drv
->bdrv_co_writev_flags
) {
814 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
815 flags
& bs
->supported_write_flags
);
816 flags
&= ~bs
->supported_write_flags
;
817 } else if (drv
->bdrv_co_writev
) {
818 assert(!bs
->supported_write_flags
);
819 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
822 CoroutineIOCompletion co
= {
823 .coroutine
= qemu_coroutine_self(),
826 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
827 bdrv_co_io_em_complete
, &co
);
831 qemu_coroutine_yield();
837 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
838 ret
= bdrv_co_flush(bs
);
844 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
845 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
847 /* Perform I/O through a temporary buffer so that users who scribble over
848 * their read buffer while the operation is in progress do not end up
849 * modifying the image file. This is critical for zero-copy guest I/O
850 * where anything might happen inside guest memory.
854 BlockDriver
*drv
= bs
->drv
;
856 QEMUIOVector bounce_qiov
;
857 int64_t cluster_sector_num
;
858 int cluster_nb_sectors
;
862 /* Cover entire cluster so no additional backing file I/O is required when
863 * allocating cluster in the image file.
865 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
866 &cluster_sector_num
, &cluster_nb_sectors
);
868 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
869 cluster_sector_num
, cluster_nb_sectors
);
871 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
872 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
873 if (bounce_buffer
== NULL
) {
878 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
880 ret
= bdrv_driver_preadv(bs
, cluster_sector_num
* BDRV_SECTOR_SIZE
,
881 cluster_nb_sectors
* BDRV_SECTOR_SIZE
,
887 if (drv
->bdrv_co_write_zeroes
&&
888 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
889 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
890 cluster_nb_sectors
, 0);
892 /* This does not change the data on the disk, it is not necessary
893 * to flush even in cache=writethrough mode.
895 ret
= bdrv_driver_pwritev(bs
, cluster_sector_num
* BDRV_SECTOR_SIZE
,
896 cluster_nb_sectors
* BDRV_SECTOR_SIZE
,
901 /* It might be okay to ignore write errors for guest requests. If this
902 * is a deliberate copy-on-read then we don't want to ignore the error.
903 * Simply report it in all cases.
908 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
909 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
910 nb_sectors
* BDRV_SECTOR_SIZE
);
913 qemu_vfree(bounce_buffer
);
918 * Forwards an already correctly aligned request to the BlockDriver. This
919 * handles copy on read and zeroing after EOF; any other features must be
920 * implemented by the caller.
922 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
923 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
924 int64_t align
, QEMUIOVector
*qiov
, int flags
)
928 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
929 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
931 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
932 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
933 assert(!qiov
|| bytes
== qiov
->size
);
934 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
936 /* Handle Copy on Read and associated serialisation */
937 if (flags
& BDRV_REQ_COPY_ON_READ
) {
938 /* If we touch the same cluster it counts as an overlap. This
939 * guarantees that allocating writes will be serialized and not race
940 * with each other for the same cluster. For example, in copy-on-read
941 * it ensures that the CoR read and write operations are atomic and
942 * guest writes cannot interleave between them. */
943 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
946 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
947 wait_serialising_requests(req
);
950 if (flags
& BDRV_REQ_COPY_ON_READ
) {
953 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
958 if (!ret
|| pnum
!= nb_sectors
) {
959 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
964 /* Forward the request to the BlockDriver */
965 if (!bs
->zero_beyond_eof
) {
966 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
968 /* Read zeros after EOF */
969 int64_t total_sectors
, max_nb_sectors
;
971 total_sectors
= bdrv_nb_sectors(bs
);
972 if (total_sectors
< 0) {
977 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
978 align
>> BDRV_SECTOR_BITS
);
979 if (nb_sectors
< max_nb_sectors
) {
980 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
981 } else if (max_nb_sectors
> 0) {
982 QEMUIOVector local_qiov
;
984 qemu_iovec_init(&local_qiov
, qiov
->niov
);
985 qemu_iovec_concat(&local_qiov
, qiov
, 0,
986 max_nb_sectors
* BDRV_SECTOR_SIZE
);
988 ret
= bdrv_driver_preadv(bs
, offset
,
989 max_nb_sectors
* BDRV_SECTOR_SIZE
,
992 qemu_iovec_destroy(&local_qiov
);
997 /* Reading beyond end of file is supposed to produce zeroes */
998 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
999 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
1000 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
1002 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
1011 * Handle a read request in coroutine context
1013 int coroutine_fn
bdrv_co_preadv(BlockDriverState
*bs
,
1014 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1015 BdrvRequestFlags flags
)
1017 BlockDriver
*drv
= bs
->drv
;
1018 BdrvTrackedRequest req
;
1020 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1021 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1022 uint8_t *head_buf
= NULL
;
1023 uint8_t *tail_buf
= NULL
;
1024 QEMUIOVector local_qiov
;
1025 bool use_local_qiov
= false;
1032 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1037 /* Don't do copy-on-read if we read data before write operation */
1038 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1039 flags
|= BDRV_REQ_COPY_ON_READ
;
1042 /* Align read if necessary by padding qiov */
1043 if (offset
& (align
- 1)) {
1044 head_buf
= qemu_blockalign(bs
, align
);
1045 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1046 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1047 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1048 use_local_qiov
= true;
1050 bytes
+= offset
& (align
- 1);
1051 offset
= offset
& ~(align
- 1);
1054 if ((offset
+ bytes
) & (align
- 1)) {
1055 if (!use_local_qiov
) {
1056 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1057 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1058 use_local_qiov
= true;
1060 tail_buf
= qemu_blockalign(bs
, align
);
1061 qemu_iovec_add(&local_qiov
, tail_buf
,
1062 align
- ((offset
+ bytes
) & (align
- 1)));
1064 bytes
= ROUND_UP(bytes
, align
);
1067 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1068 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1069 use_local_qiov
? &local_qiov
: qiov
,
1071 tracked_request_end(&req
);
1073 if (use_local_qiov
) {
1074 qemu_iovec_destroy(&local_qiov
);
1075 qemu_vfree(head_buf
);
1076 qemu_vfree(tail_buf
);
1082 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
1083 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1084 BdrvRequestFlags flags
)
1086 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1090 return bdrv_co_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1091 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1094 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
1095 int nb_sectors
, QEMUIOVector
*qiov
)
1097 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1099 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1102 int coroutine_fn
bdrv_co_readv_no_serialising(BlockDriverState
*bs
,
1103 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1105 trace_bdrv_co_readv_no_serialising(bs
, sector_num
, nb_sectors
);
1107 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1108 BDRV_REQ_NO_SERIALISING
);
1111 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
1112 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1114 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1116 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1117 BDRV_REQ_COPY_ON_READ
);
1120 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1122 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1123 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
1125 BlockDriver
*drv
= bs
->drv
;
1127 struct iovec iov
= {0};
1129 bool need_flush
= false;
1131 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_write_zeroes
,
1132 BDRV_REQUEST_MAX_SECTORS
);
1134 while (nb_sectors
> 0 && !ret
) {
1135 int num
= nb_sectors
;
1137 /* Align request. Block drivers can expect the "bulk" of the request
1140 if (bs
->bl
.write_zeroes_alignment
1141 && num
> bs
->bl
.write_zeroes_alignment
) {
1142 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
1143 /* Make a small request up to the first aligned sector. */
1144 num
= bs
->bl
.write_zeroes_alignment
;
1145 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
1146 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
1147 /* Shorten the request to the last aligned sector. num cannot
1148 * underflow because num > bs->bl.write_zeroes_alignment.
1150 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
1154 /* limit request size */
1155 if (num
> max_write_zeroes
) {
1156 num
= max_write_zeroes
;
1160 /* First try the efficient write zeroes operation */
1161 if (drv
->bdrv_co_write_zeroes
) {
1162 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
,
1163 flags
& bs
->supported_zero_flags
);
1164 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1165 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1169 assert(!bs
->supported_zero_flags
);
1172 if (ret
== -ENOTSUP
) {
1173 /* Fall back to bounce buffer if write zeroes is unsupported */
1174 int max_xfer_len
= MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
1175 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1176 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1178 if ((flags
& BDRV_REQ_FUA
) &&
1179 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1180 /* No need for bdrv_driver_pwrite() to do a fallback
1181 * flush on each chunk; use just one at the end */
1182 write_flags
&= ~BDRV_REQ_FUA
;
1185 num
= MIN(num
, max_xfer_len
);
1186 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
1187 if (iov
.iov_base
== NULL
) {
1188 iov
.iov_base
= qemu_try_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
1189 if (iov
.iov_base
== NULL
) {
1193 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
1195 qemu_iovec_init_external(&qiov
, &iov
, 1);
1197 ret
= bdrv_driver_pwritev(bs
, sector_num
* BDRV_SECTOR_SIZE
,
1198 num
* BDRV_SECTOR_SIZE
, &qiov
,
1201 /* Keep bounce buffer around if it is big enough for all
1202 * all future requests.
1204 if (num
< max_xfer_len
) {
1205 qemu_vfree(iov
.iov_base
);
1206 iov
.iov_base
= NULL
;
1215 if (ret
== 0 && need_flush
) {
1216 ret
= bdrv_co_flush(bs
);
1218 qemu_vfree(iov
.iov_base
);
1223 * Forwards an already correctly aligned write request to the BlockDriver.
1225 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1226 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1227 QEMUIOVector
*qiov
, int flags
)
1229 BlockDriver
*drv
= bs
->drv
;
1233 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1234 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1236 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1237 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1238 assert(!qiov
|| bytes
== qiov
->size
);
1239 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1241 waited
= wait_serialising_requests(req
);
1242 assert(!waited
|| !req
->serialising
);
1243 assert(req
->overlap_offset
<= offset
);
1244 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1246 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1248 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1249 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
1250 qemu_iovec_is_zero(qiov
)) {
1251 flags
|= BDRV_REQ_ZERO_WRITE
;
1252 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1253 flags
|= BDRV_REQ_MAY_UNMAP
;
1258 /* Do nothing, write notifier decided to fail this request */
1259 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1260 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1261 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1263 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1264 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1266 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1268 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
1270 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1271 bs
->wr_highest_offset
= offset
+ bytes
;
1275 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
1281 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1284 BdrvRequestFlags flags
,
1285 BdrvTrackedRequest
*req
)
1287 uint8_t *buf
= NULL
;
1288 QEMUIOVector local_qiov
;
1290 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1291 unsigned int head_padding_bytes
, tail_padding_bytes
;
1294 head_padding_bytes
= offset
& (align
- 1);
1295 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1298 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1299 if (head_padding_bytes
|| tail_padding_bytes
) {
1300 buf
= qemu_blockalign(bs
, align
);
1301 iov
= (struct iovec
) {
1305 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1307 if (head_padding_bytes
) {
1308 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1310 /* RMW the unaligned part before head. */
1311 mark_request_serialising(req
, align
);
1312 wait_serialising_requests(req
);
1313 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1314 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1315 align
, &local_qiov
, 0);
1319 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1321 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1322 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1324 flags
& ~BDRV_REQ_ZERO_WRITE
);
1328 offset
+= zero_bytes
;
1329 bytes
-= zero_bytes
;
1332 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1333 if (bytes
>= align
) {
1334 /* Write the aligned part in the middle. */
1335 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1336 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
,
1341 bytes
-= aligned_bytes
;
1342 offset
+= aligned_bytes
;
1345 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1347 assert(align
== tail_padding_bytes
+ bytes
);
1348 /* RMW the unaligned part after tail. */
1349 mark_request_serialising(req
, align
);
1350 wait_serialising_requests(req
);
1351 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1352 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1353 align
, &local_qiov
, 0);
1357 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1359 memset(buf
, 0, bytes
);
1360 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
,
1361 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1370 * Handle a write request in coroutine context
1372 int coroutine_fn
bdrv_co_pwritev(BlockDriverState
*bs
,
1373 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1374 BdrvRequestFlags flags
)
1376 BdrvTrackedRequest req
;
1377 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1378 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1379 uint8_t *head_buf
= NULL
;
1380 uint8_t *tail_buf
= NULL
;
1381 QEMUIOVector local_qiov
;
1382 bool use_local_qiov
= false;
1388 if (bs
->read_only
) {
1391 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1393 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1399 * Align write if necessary by performing a read-modify-write cycle.
1400 * Pad qiov with the read parts and be sure to have a tracked request not
1401 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1403 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1406 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1410 if (offset
& (align
- 1)) {
1411 QEMUIOVector head_qiov
;
1412 struct iovec head_iov
;
1414 mark_request_serialising(&req
, align
);
1415 wait_serialising_requests(&req
);
1417 head_buf
= qemu_blockalign(bs
, align
);
1418 head_iov
= (struct iovec
) {
1419 .iov_base
= head_buf
,
1422 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1424 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1425 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1426 align
, &head_qiov
, 0);
1430 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1432 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1433 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1434 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1435 use_local_qiov
= true;
1437 bytes
+= offset
& (align
- 1);
1438 offset
= offset
& ~(align
- 1);
1441 if ((offset
+ bytes
) & (align
- 1)) {
1442 QEMUIOVector tail_qiov
;
1443 struct iovec tail_iov
;
1447 mark_request_serialising(&req
, align
);
1448 waited
= wait_serialising_requests(&req
);
1449 assert(!waited
|| !use_local_qiov
);
1451 tail_buf
= qemu_blockalign(bs
, align
);
1452 tail_iov
= (struct iovec
) {
1453 .iov_base
= tail_buf
,
1456 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1458 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1459 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1460 align
, &tail_qiov
, 0);
1464 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1466 if (!use_local_qiov
) {
1467 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1468 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1469 use_local_qiov
= true;
1472 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1473 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1475 bytes
= ROUND_UP(bytes
, align
);
1478 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
1479 use_local_qiov
? &local_qiov
: qiov
,
1484 if (use_local_qiov
) {
1485 qemu_iovec_destroy(&local_qiov
);
1487 qemu_vfree(head_buf
);
1488 qemu_vfree(tail_buf
);
1490 tracked_request_end(&req
);
1494 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1495 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1496 BdrvRequestFlags flags
)
1498 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1502 return bdrv_co_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1503 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1506 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1507 int nb_sectors
, QEMUIOVector
*qiov
)
1509 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
1511 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
1514 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
1515 int64_t sector_num
, int nb_sectors
,
1516 BdrvRequestFlags flags
)
1518 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1520 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
1521 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1524 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
1525 BDRV_REQ_ZERO_WRITE
| flags
);
1528 typedef struct BdrvCoGetBlockStatusData
{
1529 BlockDriverState
*bs
;
1530 BlockDriverState
*base
;
1531 BlockDriverState
**file
;
1537 } BdrvCoGetBlockStatusData
;
1540 * Returns the allocation status of the specified sectors.
1541 * Drivers not implementing the functionality are assumed to not support
1542 * backing files, hence all their sectors are reported as allocated.
1544 * If 'sector_num' is beyond the end of the disk image the return value is 0
1545 * and 'pnum' is set to 0.
1547 * 'pnum' is set to the number of sectors (including and immediately following
1548 * the specified sector) that are known to be in the same
1549 * allocated/unallocated state.
1551 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1552 * beyond the end of the disk image it will be clamped.
1554 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1555 * points to the BDS which the sector range is allocated in.
1557 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1559 int nb_sectors
, int *pnum
,
1560 BlockDriverState
**file
)
1562 int64_t total_sectors
;
1566 total_sectors
= bdrv_nb_sectors(bs
);
1567 if (total_sectors
< 0) {
1568 return total_sectors
;
1571 if (sector_num
>= total_sectors
) {
1576 n
= total_sectors
- sector_num
;
1577 if (n
< nb_sectors
) {
1581 if (!bs
->drv
->bdrv_co_get_block_status
) {
1583 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1584 if (bs
->drv
->protocol_name
) {
1585 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1591 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1598 if (ret
& BDRV_BLOCK_RAW
) {
1599 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1600 return bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1604 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1605 ret
|= BDRV_BLOCK_ALLOCATED
;
1607 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1608 ret
|= BDRV_BLOCK_ZERO
;
1609 } else if (bs
->backing
) {
1610 BlockDriverState
*bs2
= bs
->backing
->bs
;
1611 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1612 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1613 ret
|= BDRV_BLOCK_ZERO
;
1618 if (*file
&& *file
!= bs
&&
1619 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1620 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1621 BlockDriverState
*file2
;
1624 ret2
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1625 *pnum
, &file_pnum
, &file2
);
1627 /* Ignore errors. This is just providing extra information, it
1628 * is useful but not necessary.
1631 /* !file_pnum indicates an offset at or beyond the EOF; it is
1632 * perfectly valid for the format block driver to point to such
1633 * offsets, so catch it and mark everything as zero */
1634 ret
|= BDRV_BLOCK_ZERO
;
1636 /* Limit request to the range reported by the protocol driver */
1638 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1646 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1647 BlockDriverState
*base
,
1651 BlockDriverState
**file
)
1653 BlockDriverState
*p
;
1657 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1658 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
, file
);
1659 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1662 /* [sector_num, pnum] unallocated on this layer, which could be only
1663 * the first part of [sector_num, nb_sectors]. */
1664 nb_sectors
= MIN(nb_sectors
, *pnum
);
1669 /* Coroutine wrapper for bdrv_get_block_status_above() */
1670 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1672 BdrvCoGetBlockStatusData
*data
= opaque
;
1674 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1683 * Synchronous wrapper around bdrv_co_get_block_status_above().
1685 * See bdrv_co_get_block_status_above() for details.
1687 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1688 BlockDriverState
*base
,
1690 int nb_sectors
, int *pnum
,
1691 BlockDriverState
**file
)
1694 BdrvCoGetBlockStatusData data
= {
1698 .sector_num
= sector_num
,
1699 .nb_sectors
= nb_sectors
,
1704 if (qemu_in_coroutine()) {
1705 /* Fast-path if already in coroutine context */
1706 bdrv_get_block_status_above_co_entry(&data
);
1708 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1710 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
);
1711 qemu_coroutine_enter(co
, &data
);
1712 while (!data
.done
) {
1713 aio_poll(aio_context
, true);
1719 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1721 int nb_sectors
, int *pnum
,
1722 BlockDriverState
**file
)
1724 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1725 sector_num
, nb_sectors
, pnum
, file
);
1728 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1729 int nb_sectors
, int *pnum
)
1731 BlockDriverState
*file
;
1732 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1737 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1741 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1743 * Return true if the given sector is allocated in any image between
1744 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1745 * sector is allocated in any image of the chain. Return false otherwise.
1747 * 'pnum' is set to the number of sectors (including and immediately following
1748 * the specified sector) that are known to be in the same
1749 * allocated/unallocated state.
1752 int bdrv_is_allocated_above(BlockDriverState
*top
,
1753 BlockDriverState
*base
,
1755 int nb_sectors
, int *pnum
)
1757 BlockDriverState
*intermediate
;
1758 int ret
, n
= nb_sectors
;
1761 while (intermediate
&& intermediate
!= base
) {
1763 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1773 * [sector_num, nb_sectors] is unallocated on top but intermediate
1776 * [sector_num+x, nr_sectors] allocated.
1778 if (n
> pnum_inter
&&
1779 (intermediate
== top
||
1780 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1784 intermediate
= backing_bs(intermediate
);
1791 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1792 const uint8_t *buf
, int nb_sectors
)
1794 BlockDriver
*drv
= bs
->drv
;
1800 if (!drv
->bdrv_write_compressed
) {
1803 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1808 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1810 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1813 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1814 int64_t pos
, int size
)
1817 struct iovec iov
= {
1818 .iov_base
= (void *) buf
,
1822 qemu_iovec_init_external(&qiov
, &iov
, 1);
1823 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
1826 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1828 BlockDriver
*drv
= bs
->drv
;
1832 } else if (drv
->bdrv_save_vmstate
) {
1833 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1834 } else if (bs
->file
) {
1835 return bdrv_writev_vmstate(bs
->file
->bs
, qiov
, pos
);
1841 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1842 int64_t pos
, int size
)
1844 BlockDriver
*drv
= bs
->drv
;
1847 if (drv
->bdrv_load_vmstate
)
1848 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
1850 return bdrv_load_vmstate(bs
->file
->bs
, buf
, pos
, size
);
1854 /**************************************************************/
1857 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
1858 QEMUIOVector
*qiov
, int nb_sectors
,
1859 BlockCompletionFunc
*cb
, void *opaque
)
1861 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
1863 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1867 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
1868 QEMUIOVector
*qiov
, int nb_sectors
,
1869 BlockCompletionFunc
*cb
, void *opaque
)
1871 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
1873 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1877 BlockAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
1878 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
1879 BlockCompletionFunc
*cb
, void *opaque
)
1881 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
1883 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
1884 BDRV_REQ_ZERO_WRITE
| flags
,
1889 typedef struct MultiwriteCB
{
1894 BlockCompletionFunc
*cb
;
1896 QEMUIOVector
*free_qiov
;
1900 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
1904 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
1905 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
1906 if (mcb
->callbacks
[i
].free_qiov
) {
1907 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
1909 g_free(mcb
->callbacks
[i
].free_qiov
);
1913 static void multiwrite_cb(void *opaque
, int ret
)
1915 MultiwriteCB
*mcb
= opaque
;
1917 trace_multiwrite_cb(mcb
, ret
);
1919 if (ret
< 0 && !mcb
->error
) {
1923 mcb
->num_requests
--;
1924 if (mcb
->num_requests
== 0) {
1925 multiwrite_user_cb(mcb
);
1930 static int multiwrite_req_compare(const void *a
, const void *b
)
1932 const BlockRequest
*req1
= a
, *req2
= b
;
1935 * Note that we can't simply subtract req2->sector from req1->sector
1936 * here as that could overflow the return value.
1938 if (req1
->sector
> req2
->sector
) {
1940 } else if (req1
->sector
< req2
->sector
) {
1948 * Takes a bunch of requests and tries to merge them. Returns the number of
1949 * requests that remain after merging.
1951 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
1952 int num_reqs
, MultiwriteCB
*mcb
)
1956 // Sort requests by start sector
1957 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
1959 // Check if adjacent requests touch the same clusters. If so, combine them,
1960 // filling up gaps with zero sectors.
1962 for (i
= 1; i
< num_reqs
; i
++) {
1964 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
1966 // Handle exactly sequential writes and overlapping writes.
1967 if (reqs
[i
].sector
<= oldreq_last
) {
1971 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 >
1976 if (bs
->bl
.max_transfer_length
&& reqs
[outidx
].nb_sectors
+
1977 reqs
[i
].nb_sectors
> bs
->bl
.max_transfer_length
) {
1983 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
1984 qemu_iovec_init(qiov
,
1985 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
1987 // Add the first request to the merged one. If the requests are
1988 // overlapping, drop the last sectors of the first request.
1989 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
1990 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
1992 // We should need to add any zeros between the two requests
1993 assert (reqs
[i
].sector
<= oldreq_last
);
1995 // Add the second request
1996 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
1998 // Add tail of first request, if necessary
1999 if (qiov
->size
< reqs
[outidx
].qiov
->size
) {
2000 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, qiov
->size
,
2001 reqs
[outidx
].qiov
->size
- qiov
->size
);
2004 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
2005 reqs
[outidx
].qiov
= qiov
;
2007 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
2010 reqs
[outidx
].sector
= reqs
[i
].sector
;
2011 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
2012 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
2017 block_acct_merge_done(blk_get_stats(bs
->blk
), BLOCK_ACCT_WRITE
,
2018 num_reqs
- outidx
- 1);
2025 * Submit multiple AIO write requests at once.
2027 * On success, the function returns 0 and all requests in the reqs array have
2028 * been submitted. In error case this function returns -1, and any of the
2029 * requests may or may not be submitted yet. In particular, this means that the
2030 * callback will be called for some of the requests, for others it won't. The
2031 * caller must check the error field of the BlockRequest to wait for the right
2032 * callbacks (if error != 0, no callback will be called).
2034 * The implementation may modify the contents of the reqs array, e.g. to merge
2035 * requests. However, the fields opaque and error are left unmodified as they
2036 * are used to signal failure for a single request to the caller.
2038 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
2043 /* don't submit writes if we don't have a medium */
2044 if (bs
->drv
== NULL
) {
2045 for (i
= 0; i
< num_reqs
; i
++) {
2046 reqs
[i
].error
= -ENOMEDIUM
;
2051 if (num_reqs
== 0) {
2055 // Create MultiwriteCB structure
2056 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
2057 mcb
->num_requests
= 0;
2058 mcb
->num_callbacks
= num_reqs
;
2060 for (i
= 0; i
< num_reqs
; i
++) {
2061 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
2062 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
2065 // Check for mergable requests
2066 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
2068 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
2070 /* Run the aio requests. */
2071 mcb
->num_requests
= num_reqs
;
2072 for (i
= 0; i
< num_reqs
; i
++) {
2073 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
2074 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
2082 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2085 bdrv_aio_cancel_async(acb
);
2086 while (acb
->refcnt
> 1) {
2087 if (acb
->aiocb_info
->get_aio_context
) {
2088 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2089 } else if (acb
->bs
) {
2090 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2095 qemu_aio_unref(acb
);
2098 /* Async version of aio cancel. The caller is not blocked if the acb implements
2099 * cancel_async, otherwise we do nothing and let the request normally complete.
2100 * In either case the completion callback must be called. */
2101 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2103 if (acb
->aiocb_info
->cancel_async
) {
2104 acb
->aiocb_info
->cancel_async(acb
);
2108 /**************************************************************/
2109 /* async block device emulation */
2111 typedef struct BlockAIOCBCoroutine
{
2118 } BlockAIOCBCoroutine
;
2120 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2121 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2124 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2126 if (!acb
->need_bh
) {
2127 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2128 qemu_aio_unref(acb
);
2132 static void bdrv_co_em_bh(void *opaque
)
2134 BlockAIOCBCoroutine
*acb
= opaque
;
2136 assert(!acb
->need_bh
);
2137 qemu_bh_delete(acb
->bh
);
2138 bdrv_co_complete(acb
);
2141 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2143 acb
->need_bh
= false;
2144 if (acb
->req
.error
!= -EINPROGRESS
) {
2145 BlockDriverState
*bs
= acb
->common
.bs
;
2147 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2148 qemu_bh_schedule(acb
->bh
);
2152 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2153 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2155 BlockAIOCBCoroutine
*acb
= opaque
;
2156 BlockDriverState
*bs
= acb
->common
.bs
;
2158 if (!acb
->is_write
) {
2159 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
2160 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2162 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
2163 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2166 bdrv_co_complete(acb
);
2169 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
2173 BdrvRequestFlags flags
,
2174 BlockCompletionFunc
*cb
,
2179 BlockAIOCBCoroutine
*acb
;
2181 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2182 acb
->need_bh
= true;
2183 acb
->req
.error
= -EINPROGRESS
;
2184 acb
->req
.sector
= sector_num
;
2185 acb
->req
.nb_sectors
= nb_sectors
;
2186 acb
->req
.qiov
= qiov
;
2187 acb
->req
.flags
= flags
;
2188 acb
->is_write
= is_write
;
2190 co
= qemu_coroutine_create(bdrv_co_do_rw
);
2191 qemu_coroutine_enter(co
, acb
);
2193 bdrv_co_maybe_schedule_bh(acb
);
2194 return &acb
->common
;
2197 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2199 BlockAIOCBCoroutine
*acb
= opaque
;
2200 BlockDriverState
*bs
= acb
->common
.bs
;
2202 acb
->req
.error
= bdrv_co_flush(bs
);
2203 bdrv_co_complete(acb
);
2206 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2207 BlockCompletionFunc
*cb
, void *opaque
)
2209 trace_bdrv_aio_flush(bs
, opaque
);
2212 BlockAIOCBCoroutine
*acb
;
2214 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2215 acb
->need_bh
= true;
2216 acb
->req
.error
= -EINPROGRESS
;
2218 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
2219 qemu_coroutine_enter(co
, acb
);
2221 bdrv_co_maybe_schedule_bh(acb
);
2222 return &acb
->common
;
2225 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
2227 BlockAIOCBCoroutine
*acb
= opaque
;
2228 BlockDriverState
*bs
= acb
->common
.bs
;
2230 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
2231 bdrv_co_complete(acb
);
2234 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
2235 int64_t sector_num
, int nb_sectors
,
2236 BlockCompletionFunc
*cb
, void *opaque
)
2239 BlockAIOCBCoroutine
*acb
;
2241 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
2243 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2244 acb
->need_bh
= true;
2245 acb
->req
.error
= -EINPROGRESS
;
2246 acb
->req
.sector
= sector_num
;
2247 acb
->req
.nb_sectors
= nb_sectors
;
2248 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
2249 qemu_coroutine_enter(co
, acb
);
2251 bdrv_co_maybe_schedule_bh(acb
);
2252 return &acb
->common
;
2255 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2256 BlockCompletionFunc
*cb
, void *opaque
)
2260 acb
= g_malloc(aiocb_info
->aiocb_size
);
2261 acb
->aiocb_info
= aiocb_info
;
2264 acb
->opaque
= opaque
;
2269 void qemu_aio_ref(void *p
)
2271 BlockAIOCB
*acb
= p
;
2275 void qemu_aio_unref(void *p
)
2277 BlockAIOCB
*acb
= p
;
2278 assert(acb
->refcnt
> 0);
2279 if (--acb
->refcnt
== 0) {
2284 /**************************************************************/
2285 /* Coroutine block device emulation */
2287 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2289 RwCo
*rwco
= opaque
;
2291 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2294 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2297 BdrvTrackedRequest req
;
2299 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2304 tracked_request_begin(&req
, bs
, 0, 0, BDRV_TRACKED_FLUSH
);
2306 /* Write back all layers by calling one driver function */
2307 if (bs
->drv
->bdrv_co_flush
) {
2308 ret
= bs
->drv
->bdrv_co_flush(bs
);
2312 /* Write back cached data to the OS even with cache=unsafe */
2313 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2314 if (bs
->drv
->bdrv_co_flush_to_os
) {
2315 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2321 /* But don't actually force it to the disk with cache=unsafe */
2322 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2326 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2327 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2328 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2329 } else if (bs
->drv
->bdrv_aio_flush
) {
2331 CoroutineIOCompletion co
= {
2332 .coroutine
= qemu_coroutine_self(),
2335 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2339 qemu_coroutine_yield();
2344 * Some block drivers always operate in either writethrough or unsafe
2345 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2346 * know how the server works (because the behaviour is hardcoded or
2347 * depends on server-side configuration), so we can't ensure that
2348 * everything is safe on disk. Returning an error doesn't work because
2349 * that would break guests even if the server operates in writethrough
2352 * Let's hope the user knows what he's doing.
2360 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2361 * in the case of cache=unsafe, so there are no useless flushes.
2364 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2366 tracked_request_end(&req
);
2370 int bdrv_flush(BlockDriverState
*bs
)
2378 if (qemu_in_coroutine()) {
2379 /* Fast-path if already in coroutine context */
2380 bdrv_flush_co_entry(&rwco
);
2382 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2384 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
2385 qemu_coroutine_enter(co
, &rwco
);
2386 while (rwco
.ret
== NOT_DONE
) {
2387 aio_poll(aio_context
, true);
2394 typedef struct DiscardCo
{
2395 BlockDriverState
*bs
;
2400 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
2402 DiscardCo
*rwco
= opaque
;
2404 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
2407 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2410 BdrvTrackedRequest req
;
2411 int max_discard
, ret
;
2417 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
2420 } else if (bs
->read_only
) {
2423 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2425 /* Do nothing if disabled. */
2426 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2430 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
2434 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
,
2435 BDRV_TRACKED_DISCARD
);
2436 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2438 max_discard
= MIN_NON_ZERO(bs
->bl
.max_discard
, BDRV_REQUEST_MAX_SECTORS
);
2439 while (nb_sectors
> 0) {
2441 int num
= nb_sectors
;
2444 if (bs
->bl
.discard_alignment
&&
2445 num
>= bs
->bl
.discard_alignment
&&
2446 sector_num
% bs
->bl
.discard_alignment
) {
2447 if (num
> bs
->bl
.discard_alignment
) {
2448 num
= bs
->bl
.discard_alignment
;
2450 num
-= sector_num
% bs
->bl
.discard_alignment
;
2453 /* limit request size */
2454 if (num
> max_discard
) {
2458 if (bs
->drv
->bdrv_co_discard
) {
2459 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
2462 CoroutineIOCompletion co
= {
2463 .coroutine
= qemu_coroutine_self(),
2466 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
2467 bdrv_co_io_em_complete
, &co
);
2472 qemu_coroutine_yield();
2476 if (ret
&& ret
!= -ENOTSUP
) {
2485 tracked_request_end(&req
);
2489 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2494 .sector_num
= sector_num
,
2495 .nb_sectors
= nb_sectors
,
2499 if (qemu_in_coroutine()) {
2500 /* Fast-path if already in coroutine context */
2501 bdrv_discard_co_entry(&rwco
);
2503 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2505 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
2506 qemu_coroutine_enter(co
, &rwco
);
2507 while (rwco
.ret
== NOT_DONE
) {
2508 aio_poll(aio_context
, true);
2516 CoroutineIOCompletion
*co
;
2518 } BdrvIoctlCompletionData
;
2520 static void bdrv_ioctl_bh_cb(void *opaque
)
2522 BdrvIoctlCompletionData
*data
= opaque
;
2524 bdrv_co_io_em_complete(data
->co
, -ENOTSUP
);
2525 qemu_bh_delete(data
->bh
);
2528 static int bdrv_co_do_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2530 BlockDriver
*drv
= bs
->drv
;
2531 BdrvTrackedRequest tracked_req
;
2532 CoroutineIOCompletion co
= {
2533 .coroutine
= qemu_coroutine_self(),
2537 tracked_request_begin(&tracked_req
, bs
, 0, 0, BDRV_TRACKED_IOCTL
);
2538 if (!drv
|| !drv
->bdrv_aio_ioctl
) {
2543 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2545 BdrvIoctlCompletionData
*data
= g_new(BdrvIoctlCompletionData
, 1);
2546 data
->bh
= aio_bh_new(bdrv_get_aio_context(bs
),
2547 bdrv_ioctl_bh_cb
, data
);
2549 qemu_bh_schedule(data
->bh
);
2551 qemu_coroutine_yield();
2553 tracked_request_end(&tracked_req
);
2558 BlockDriverState
*bs
;
2564 static void coroutine_fn
bdrv_co_ioctl_entry(void *opaque
)
2566 BdrvIoctlCoData
*data
= opaque
;
2567 data
->ret
= bdrv_co_do_ioctl(data
->bs
, data
->req
, data
->buf
);
2570 /* needed for generic scsi interface */
2571 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2573 BdrvIoctlCoData data
= {
2577 .ret
= -EINPROGRESS
,
2580 if (qemu_in_coroutine()) {
2581 /* Fast-path if already in coroutine context */
2582 bdrv_co_ioctl_entry(&data
);
2584 Coroutine
*co
= qemu_coroutine_create(bdrv_co_ioctl_entry
);
2586 qemu_coroutine_enter(co
, &data
);
2587 while (data
.ret
== -EINPROGRESS
) {
2588 aio_poll(bdrv_get_aio_context(bs
), true);
2594 static void coroutine_fn
bdrv_co_aio_ioctl_entry(void *opaque
)
2596 BlockAIOCBCoroutine
*acb
= opaque
;
2597 acb
->req
.error
= bdrv_co_do_ioctl(acb
->common
.bs
,
2598 acb
->req
.req
, acb
->req
.buf
);
2599 bdrv_co_complete(acb
);
2602 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2603 unsigned long int req
, void *buf
,
2604 BlockCompletionFunc
*cb
, void *opaque
)
2606 BlockAIOCBCoroutine
*acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
,
2610 acb
->need_bh
= true;
2611 acb
->req
.error
= -EINPROGRESS
;
2614 co
= qemu_coroutine_create(bdrv_co_aio_ioctl_entry
);
2615 qemu_coroutine_enter(co
, acb
);
2617 bdrv_co_maybe_schedule_bh(acb
);
2618 return &acb
->common
;
2621 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2623 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2626 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2628 return memset(qemu_blockalign(bs
, size
), 0, size
);
2631 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2633 size_t align
= bdrv_opt_mem_align(bs
);
2635 /* Ensure that NULL is never returned on success */
2641 return qemu_try_memalign(align
, size
);
2644 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2646 void *mem
= qemu_try_blockalign(bs
, size
);
2649 memset(mem
, 0, size
);
2656 * Check if all memory in this vector is sector aligned.
2658 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2661 size_t alignment
= bdrv_min_mem_align(bs
);
2663 for (i
= 0; i
< qiov
->niov
; i
++) {
2664 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2667 if (qiov
->iov
[i
].iov_len
% alignment
) {
2675 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2676 NotifierWithReturn
*notifier
)
2678 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2681 void bdrv_io_plug(BlockDriverState
*bs
)
2685 QLIST_FOREACH(child
, &bs
->children
, next
) {
2686 bdrv_io_plug(child
->bs
);
2689 if (bs
->io_plugged
++ == 0 && bs
->io_plug_disabled
== 0) {
2690 BlockDriver
*drv
= bs
->drv
;
2691 if (drv
&& drv
->bdrv_io_plug
) {
2692 drv
->bdrv_io_plug(bs
);
2697 void bdrv_io_unplug(BlockDriverState
*bs
)
2701 assert(bs
->io_plugged
);
2702 if (--bs
->io_plugged
== 0 && bs
->io_plug_disabled
== 0) {
2703 BlockDriver
*drv
= bs
->drv
;
2704 if (drv
&& drv
->bdrv_io_unplug
) {
2705 drv
->bdrv_io_unplug(bs
);
2709 QLIST_FOREACH(child
, &bs
->children
, next
) {
2710 bdrv_io_unplug(child
->bs
);
2714 void bdrv_io_unplugged_begin(BlockDriverState
*bs
)
2718 if (bs
->io_plug_disabled
++ == 0 && bs
->io_plugged
> 0) {
2719 BlockDriver
*drv
= bs
->drv
;
2720 if (drv
&& drv
->bdrv_io_unplug
) {
2721 drv
->bdrv_io_unplug(bs
);
2725 QLIST_FOREACH(child
, &bs
->children
, next
) {
2726 bdrv_io_unplugged_begin(child
->bs
);
2730 void bdrv_io_unplugged_end(BlockDriverState
*bs
)
2734 assert(bs
->io_plug_disabled
);
2735 QLIST_FOREACH(child
, &bs
->children
, next
) {
2736 bdrv_io_unplugged_end(child
->bs
);
2739 if (--bs
->io_plug_disabled
== 0 && bs
->io_plugged
> 0) {
2740 BlockDriver
*drv
= bs
->drv
;
2741 if (drv
&& drv
->bdrv_io_plug
) {
2742 drv
->bdrv_io_plug(bs
);
2747 void bdrv_drained_begin(BlockDriverState
*bs
)
2749 if (!bs
->quiesce_counter
++) {
2750 aio_disable_external(bdrv_get_aio_context(bs
));
2752 bdrv_parent_drained_begin(bs
);
2756 void bdrv_drained_end(BlockDriverState
*bs
)
2758 bdrv_parent_drained_end(bs
);
2760 assert(bs
->quiesce_counter
> 0);
2761 if (--bs
->quiesce_counter
> 0) {
2764 aio_enable_external(bdrv_get_aio_context(bs
));