4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/defer-call.h"
16 #include "qapi/error.h"
18 #include "qemu/module.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "block/block_int.h"
23 #include "hw/block/block.h"
24 #include "hw/qdev-properties.h"
25 #include "sysemu/blockdev.h"
26 #include "sysemu/block-ram-registrar.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/runstate.h"
29 #include "hw/virtio/virtio-blk.h"
30 #include "dataplane/virtio-blk.h"
31 #include "scsi/constants.h"
35 #include "hw/virtio/virtio-bus.h"
36 #include "migration/qemu-file-types.h"
37 #include "hw/virtio/virtio-access.h"
38 #include "hw/virtio/virtio-blk-common.h"
39 #include "qemu/coroutine.h"
41 static void virtio_blk_init_request(VirtIOBlock
*s
, VirtQueue
*vq
,
52 static void virtio_blk_free_request(VirtIOBlockReq
*req
)
57 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, unsigned char status
)
59 VirtIOBlock
*s
= req
->dev
;
60 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
62 trace_virtio_blk_req_complete(vdev
, req
, status
);
64 stb_p(&req
->in
->status
, status
);
65 iov_discard_undo(&req
->inhdr_undo
);
66 iov_discard_undo(&req
->outhdr_undo
);
67 virtqueue_push(req
->vq
, &req
->elem
, req
->in_len
);
68 if (s
->dataplane_started
&& !s
->dataplane_disabled
) {
69 virtio_blk_data_plane_notify(s
->dataplane
, req
->vq
);
71 virtio_notify(vdev
, req
->vq
);
75 static int virtio_blk_handle_rw_error(VirtIOBlockReq
*req
, int error
,
76 bool is_read
, bool acct_failed
)
78 VirtIOBlock
*s
= req
->dev
;
79 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
81 if (action
== BLOCK_ERROR_ACTION_STOP
) {
82 /* Break the link as the next request is going to be parsed from the
83 * ring again. Otherwise we may end up doing a double completion! */
87 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
88 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
90 block_acct_failed(blk_get_stats(s
->blk
), &req
->acct
);
92 virtio_blk_free_request(req
);
95 blk_error_action(s
->blk
, action
, is_read
, error
);
96 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
99 static void virtio_blk_rw_complete(void *opaque
, int ret
)
101 VirtIOBlockReq
*next
= opaque
;
102 VirtIOBlock
*s
= next
->dev
;
103 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
105 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
107 VirtIOBlockReq
*req
= next
;
109 trace_virtio_blk_rw_complete(vdev
, req
, ret
);
111 if (req
->qiov
.nalloc
!= -1) {
112 /* If nalloc is != -1 req->qiov is a local copy of the original
113 * external iovec. It was allocated in submit_requests to be
114 * able to merge requests. */
115 qemu_iovec_destroy(&req
->qiov
);
119 int p
= virtio_ldl_p(VIRTIO_DEVICE(s
), &req
->out
.type
);
120 bool is_read
= !(p
& VIRTIO_BLK_T_OUT
);
121 /* Note that memory may be dirtied on read failure. If the
122 * virtio request is not completed here, as is the case for
123 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
124 * correctly during live migration. While this is ugly,
125 * it is acceptable because the device is free to write to
126 * the memory until the request is completed (which will
127 * happen on the other side of the migration).
129 if (virtio_blk_handle_rw_error(req
, -ret
, is_read
, true)) {
134 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
135 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
136 virtio_blk_free_request(req
);
138 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
141 static void virtio_blk_flush_complete(void *opaque
, int ret
)
143 VirtIOBlockReq
*req
= opaque
;
144 VirtIOBlock
*s
= req
->dev
;
146 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
148 if (virtio_blk_handle_rw_error(req
, -ret
, 0, true)) {
153 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
154 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
155 virtio_blk_free_request(req
);
158 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
161 static void virtio_blk_discard_write_zeroes_complete(void *opaque
, int ret
)
163 VirtIOBlockReq
*req
= opaque
;
164 VirtIOBlock
*s
= req
->dev
;
165 bool is_write_zeroes
= (virtio_ldl_p(VIRTIO_DEVICE(s
), &req
->out
.type
) &
166 ~VIRTIO_BLK_T_BARRIER
) == VIRTIO_BLK_T_WRITE_ZEROES
;
168 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
170 if (virtio_blk_handle_rw_error(req
, -ret
, false, is_write_zeroes
)) {
175 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
176 if (is_write_zeroes
) {
177 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
179 virtio_blk_free_request(req
);
182 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
189 struct sg_io_hdr hdr
;
190 } VirtIOBlockIoctlReq
;
192 static void virtio_blk_ioctl_complete(void *opaque
, int status
)
194 VirtIOBlockIoctlReq
*ioctl_req
= opaque
;
195 VirtIOBlockReq
*req
= ioctl_req
->req
;
196 VirtIOBlock
*s
= req
->dev
;
197 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
198 struct virtio_scsi_inhdr
*scsi
;
199 struct sg_io_hdr
*hdr
;
201 scsi
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 2].iov_base
;
204 status
= VIRTIO_BLK_S_UNSUPP
;
205 virtio_stl_p(vdev
, &scsi
->errors
, 255);
209 hdr
= &ioctl_req
->hdr
;
211 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
212 * clear the masked_status field [hence status gets cleared too, see
213 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
214 * status has occurred. However they do set DRIVER_SENSE in driver_status
215 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
217 if (hdr
->status
== 0 && hdr
->sb_len_wr
> 0) {
218 hdr
->status
= CHECK_CONDITION
;
221 virtio_stl_p(vdev
, &scsi
->errors
,
222 hdr
->status
| (hdr
->msg_status
<< 8) |
223 (hdr
->host_status
<< 16) | (hdr
->driver_status
<< 24));
224 virtio_stl_p(vdev
, &scsi
->residual
, hdr
->resid
);
225 virtio_stl_p(vdev
, &scsi
->sense_len
, hdr
->sb_len_wr
);
226 virtio_stl_p(vdev
, &scsi
->data_len
, hdr
->dxfer_len
);
229 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
230 virtio_blk_req_complete(req
, status
);
231 virtio_blk_free_request(req
);
232 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
238 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
, VirtQueue
*vq
)
240 VirtIOBlockReq
*req
= virtqueue_pop(vq
, sizeof(VirtIOBlockReq
));
243 virtio_blk_init_request(s
, vq
, req
);
248 static int virtio_blk_handle_scsi_req(VirtIOBlockReq
*req
)
250 int status
= VIRTIO_BLK_S_OK
;
251 struct virtio_scsi_inhdr
*scsi
= NULL
;
252 VirtIOBlock
*blk
= req
->dev
;
253 VirtIODevice
*vdev
= VIRTIO_DEVICE(blk
);
254 VirtQueueElement
*elem
= &req
->elem
;
258 VirtIOBlockIoctlReq
*ioctl_req
;
263 * We require at least one output segment each for the virtio_blk_outhdr
264 * and the SCSI command block.
266 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
267 * and the sense buffer pointer in the input segments.
269 if (elem
->out_num
< 2 || elem
->in_num
< 3) {
270 status
= VIRTIO_BLK_S_IOERR
;
275 * The scsi inhdr is placed in the second-to-last input segment, just
276 * before the regular inhdr.
278 scsi
= (void *)elem
->in_sg
[elem
->in_num
- 2].iov_base
;
280 if (!virtio_has_feature(blk
->host_features
, VIRTIO_BLK_F_SCSI
)) {
281 status
= VIRTIO_BLK_S_UNSUPP
;
286 * No support for bidirection commands yet.
288 if (elem
->out_num
> 2 && elem
->in_num
> 3) {
289 status
= VIRTIO_BLK_S_UNSUPP
;
294 ioctl_req
= g_new0(VirtIOBlockIoctlReq
, 1);
295 ioctl_req
->req
= req
;
296 ioctl_req
->hdr
.interface_id
= 'S';
297 ioctl_req
->hdr
.cmd_len
= elem
->out_sg
[1].iov_len
;
298 ioctl_req
->hdr
.cmdp
= elem
->out_sg
[1].iov_base
;
299 ioctl_req
->hdr
.dxfer_len
= 0;
301 if (elem
->out_num
> 2) {
303 * If there are more than the minimally required 2 output segments
304 * there is write payload starting from the third iovec.
306 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_TO_DEV
;
307 ioctl_req
->hdr
.iovec_count
= elem
->out_num
- 2;
309 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
310 ioctl_req
->hdr
.dxfer_len
+= elem
->out_sg
[i
+ 2].iov_len
;
313 ioctl_req
->hdr
.dxferp
= elem
->out_sg
+ 2;
315 } else if (elem
->in_num
> 3) {
317 * If we have more than 3 input segments the guest wants to actually
320 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
321 ioctl_req
->hdr
.iovec_count
= elem
->in_num
- 3;
322 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
323 ioctl_req
->hdr
.dxfer_len
+= elem
->in_sg
[i
].iov_len
;
326 ioctl_req
->hdr
.dxferp
= elem
->in_sg
;
329 * Some SCSI commands don't actually transfer any data.
331 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_NONE
;
334 ioctl_req
->hdr
.sbp
= elem
->in_sg
[elem
->in_num
- 3].iov_base
;
335 ioctl_req
->hdr
.mx_sb_len
= elem
->in_sg
[elem
->in_num
- 3].iov_len
;
337 acb
= blk_aio_ioctl(blk
->blk
, SG_IO
, &ioctl_req
->hdr
,
338 virtio_blk_ioctl_complete
, ioctl_req
);
341 status
= VIRTIO_BLK_S_UNSUPP
;
350 /* Just put anything nonzero so that the ioctl fails in the guest. */
352 virtio_stl_p(vdev
, &scsi
->errors
, 255);
357 static void virtio_blk_handle_scsi(VirtIOBlockReq
*req
)
361 status
= virtio_blk_handle_scsi_req(req
);
362 if (status
!= -EINPROGRESS
) {
363 virtio_blk_req_complete(req
, status
);
364 virtio_blk_free_request(req
);
368 static inline void submit_requests(VirtIOBlock
*s
, MultiReqBuffer
*mrb
,
369 int start
, int num_reqs
, int niov
)
371 BlockBackend
*blk
= s
->blk
;
372 QEMUIOVector
*qiov
= &mrb
->reqs
[start
]->qiov
;
373 int64_t sector_num
= mrb
->reqs
[start
]->sector_num
;
374 bool is_write
= mrb
->is_write
;
375 BdrvRequestFlags flags
= 0;
379 struct iovec
*tmp_iov
= qiov
->iov
;
380 int tmp_niov
= qiov
->niov
;
382 /* mrb->reqs[start]->qiov was initialized from external so we can't
383 * modify it here. We need to initialize it locally and then add the
384 * external iovecs. */
385 qemu_iovec_init(qiov
, niov
);
387 for (i
= 0; i
< tmp_niov
; i
++) {
388 qemu_iovec_add(qiov
, tmp_iov
[i
].iov_base
, tmp_iov
[i
].iov_len
);
391 for (i
= start
+ 1; i
< start
+ num_reqs
; i
++) {
392 qemu_iovec_concat(qiov
, &mrb
->reqs
[i
]->qiov
, 0,
393 mrb
->reqs
[i
]->qiov
.size
);
394 mrb
->reqs
[i
- 1]->mr_next
= mrb
->reqs
[i
];
397 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb
->reqs
[start
]->dev
),
398 mrb
, start
, num_reqs
,
399 sector_num
<< BDRV_SECTOR_BITS
,
400 qiov
->size
, is_write
);
401 block_acct_merge_done(blk_get_stats(blk
),
402 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
,
406 if (blk_ram_registrar_ok(&s
->blk_ram_registrar
)) {
407 flags
|= BDRV_REQ_REGISTERED_BUF
;
411 blk_aio_pwritev(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
412 flags
, virtio_blk_rw_complete
,
415 blk_aio_preadv(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
416 flags
, virtio_blk_rw_complete
,
421 static int multireq_compare(const void *a
, const void *b
)
423 const VirtIOBlockReq
*req1
= *(VirtIOBlockReq
**)a
,
424 *req2
= *(VirtIOBlockReq
**)b
;
427 * Note that we can't simply subtract sector_num1 from sector_num2
428 * here as that could overflow the return value.
430 if (req1
->sector_num
> req2
->sector_num
) {
432 } else if (req1
->sector_num
< req2
->sector_num
) {
439 static void virtio_blk_submit_multireq(VirtIOBlock
*s
, MultiReqBuffer
*mrb
)
441 int i
= 0, start
= 0, num_reqs
= 0, niov
= 0, nb_sectors
= 0;
442 uint32_t max_transfer
;
443 int64_t sector_num
= 0;
445 if (mrb
->num_reqs
== 1) {
446 submit_requests(s
, mrb
, 0, 1, -1);
451 max_transfer
= blk_get_max_transfer(mrb
->reqs
[0]->dev
->blk
);
453 qsort(mrb
->reqs
, mrb
->num_reqs
, sizeof(*mrb
->reqs
),
456 for (i
= 0; i
< mrb
->num_reqs
; i
++) {
457 VirtIOBlockReq
*req
= mrb
->reqs
[i
];
460 * NOTE: We cannot merge the requests in below situations:
461 * 1. requests are not sequential
462 * 2. merge would exceed maximum number of IOVs
463 * 3. merge would exceed maximum transfer length of backend device
465 if (sector_num
+ nb_sectors
!= req
->sector_num
||
466 niov
> blk_get_max_iov(s
->blk
) - req
->qiov
.niov
||
467 req
->qiov
.size
> max_transfer
||
468 nb_sectors
> (max_transfer
-
469 req
->qiov
.size
) / BDRV_SECTOR_SIZE
) {
470 submit_requests(s
, mrb
, start
, num_reqs
, niov
);
476 sector_num
= req
->sector_num
;
477 nb_sectors
= niov
= 0;
481 nb_sectors
+= req
->qiov
.size
/ BDRV_SECTOR_SIZE
;
482 niov
+= req
->qiov
.niov
;
486 submit_requests(s
, mrb
, start
, num_reqs
, niov
);
490 static void virtio_blk_handle_flush(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
492 VirtIOBlock
*s
= req
->dev
;
494 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, 0,
498 * Make sure all outstanding writes are posted to the backing device.
500 if (mrb
->is_write
&& mrb
->num_reqs
> 0) {
501 virtio_blk_submit_multireq(s
, mrb
);
503 blk_aio_flush(s
->blk
, virtio_blk_flush_complete
, req
);
506 static bool virtio_blk_sect_range_ok(VirtIOBlock
*dev
,
507 uint64_t sector
, size_t size
)
509 uint64_t nb_sectors
= size
>> BDRV_SECTOR_BITS
;
510 uint64_t total_sectors
;
512 if (nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
515 if (sector
& dev
->sector_mask
) {
518 if (size
% dev
->conf
.conf
.logical_block_size
) {
521 blk_get_geometry(dev
->blk
, &total_sectors
);
522 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
528 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq
*req
,
529 struct virtio_blk_discard_write_zeroes
*dwz_hdr
, bool is_write_zeroes
)
531 VirtIOBlock
*s
= req
->dev
;
532 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
534 uint32_t num_sectors
, flags
, max_sectors
;
538 sector
= virtio_ldq_p(vdev
, &dwz_hdr
->sector
);
539 num_sectors
= virtio_ldl_p(vdev
, &dwz_hdr
->num_sectors
);
540 flags
= virtio_ldl_p(vdev
, &dwz_hdr
->flags
);
541 max_sectors
= is_write_zeroes
? s
->conf
.max_write_zeroes_sectors
:
542 s
->conf
.max_discard_sectors
;
545 * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
546 * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
547 * the integer variable.
549 if (unlikely(num_sectors
> max_sectors
)) {
550 err_status
= VIRTIO_BLK_S_IOERR
;
554 bytes
= num_sectors
<< BDRV_SECTOR_BITS
;
556 if (unlikely(!virtio_blk_sect_range_ok(s
, sector
, bytes
))) {
557 err_status
= VIRTIO_BLK_S_IOERR
;
562 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
563 * and write zeroes commands if any unknown flag is set.
565 if (unlikely(flags
& ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
)) {
566 err_status
= VIRTIO_BLK_S_UNSUPP
;
570 if (is_write_zeroes
) { /* VIRTIO_BLK_T_WRITE_ZEROES */
571 int blk_aio_flags
= 0;
573 if (flags
& VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
) {
574 blk_aio_flags
|= BDRV_REQ_MAY_UNMAP
;
577 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, bytes
,
580 blk_aio_pwrite_zeroes(s
->blk
, sector
<< BDRV_SECTOR_BITS
,
581 bytes
, blk_aio_flags
,
582 virtio_blk_discard_write_zeroes_complete
, req
);
583 } else { /* VIRTIO_BLK_T_DISCARD */
585 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
586 * discard commands if the unmap flag is set.
588 if (unlikely(flags
& VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
)) {
589 err_status
= VIRTIO_BLK_S_UNSUPP
;
593 blk_aio_pdiscard(s
->blk
, sector
<< BDRV_SECTOR_BITS
, bytes
,
594 virtio_blk_discard_write_zeroes_complete
, req
);
597 return VIRTIO_BLK_S_OK
;
600 if (is_write_zeroes
) {
601 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_WRITE
);
606 typedef struct ZoneCmdData
{
608 struct iovec
*in_iov
;
612 unsigned int nr_zones
;
613 BlockZoneDescriptor
*zones
;
622 * check zoned_request: error checking before issuing requests. If all checks
623 * passed, return true.
624 * append: true if only zone append requests issued.
626 static bool check_zoned_request(VirtIOBlock
*s
, int64_t offset
, int64_t len
,
627 bool append
, uint8_t *status
) {
628 BlockDriverState
*bs
= blk_bs(s
->blk
);
631 if (!virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_ZONED
)) {
632 *status
= VIRTIO_BLK_S_UNSUPP
;
636 if (offset
< 0 || len
< 0 || len
> (bs
->total_sectors
<< BDRV_SECTOR_BITS
)
637 || offset
> (bs
->total_sectors
<< BDRV_SECTOR_BITS
) - len
) {
638 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
643 if (bs
->bl
.write_granularity
) {
644 if ((offset
% bs
->bl
.write_granularity
) != 0) {
645 *status
= VIRTIO_BLK_S_ZONE_UNALIGNED_WP
;
650 index
= offset
/ bs
->bl
.zone_size
;
651 if (BDRV_ZT_IS_CONV(bs
->wps
->wp
[index
])) {
652 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
656 if (len
/ 512 > bs
->bl
.max_append_sectors
) {
657 if (bs
->bl
.max_append_sectors
== 0) {
658 *status
= VIRTIO_BLK_S_UNSUPP
;
660 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
668 static void virtio_blk_zone_report_complete(void *opaque
, int ret
)
670 ZoneCmdData
*data
= opaque
;
671 VirtIOBlockReq
*req
= data
->req
;
672 VirtIOBlock
*s
= req
->dev
;
673 VirtIODevice
*vdev
= VIRTIO_DEVICE(req
->dev
);
674 struct iovec
*in_iov
= data
->in_iov
;
675 unsigned in_num
= data
->in_num
;
676 int64_t zrp_size
, n
, j
= 0;
677 int64_t nz
= data
->zone_report_data
.nr_zones
;
678 int8_t err_status
= VIRTIO_BLK_S_OK
;
680 trace_virtio_blk_zone_report_complete(vdev
, req
, nz
, ret
);
682 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
686 struct virtio_blk_zone_report zrp_hdr
= (struct virtio_blk_zone_report
) {
687 .nr_zones
= cpu_to_le64(nz
),
689 zrp_size
= sizeof(struct virtio_blk_zone_report
)
690 + sizeof(struct virtio_blk_zone_descriptor
) * nz
;
691 n
= iov_from_buf(in_iov
, in_num
, 0, &zrp_hdr
, sizeof(zrp_hdr
));
692 if (n
!= sizeof(zrp_hdr
)) {
693 virtio_error(vdev
, "Driver provided input buffer that is too small!");
694 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
698 for (size_t i
= sizeof(zrp_hdr
); i
< zrp_size
;
699 i
+= sizeof(struct virtio_blk_zone_descriptor
), ++j
) {
700 struct virtio_blk_zone_descriptor desc
=
701 (struct virtio_blk_zone_descriptor
) {
702 .z_start
= cpu_to_le64(data
->zone_report_data
.zones
[j
].start
703 >> BDRV_SECTOR_BITS
),
704 .z_cap
= cpu_to_le64(data
->zone_report_data
.zones
[j
].cap
705 >> BDRV_SECTOR_BITS
),
706 .z_wp
= cpu_to_le64(data
->zone_report_data
.zones
[j
].wp
707 >> BDRV_SECTOR_BITS
),
710 switch (data
->zone_report_data
.zones
[j
].type
) {
712 desc
.z_type
= VIRTIO_BLK_ZT_CONV
;
715 desc
.z_type
= VIRTIO_BLK_ZT_SWR
;
718 desc
.z_type
= VIRTIO_BLK_ZT_SWP
;
721 g_assert_not_reached();
724 switch (data
->zone_report_data
.zones
[j
].state
) {
726 desc
.z_state
= VIRTIO_BLK_ZS_RDONLY
;
729 desc
.z_state
= VIRTIO_BLK_ZS_OFFLINE
;
732 desc
.z_state
= VIRTIO_BLK_ZS_EMPTY
;
735 desc
.z_state
= VIRTIO_BLK_ZS_CLOSED
;
738 desc
.z_state
= VIRTIO_BLK_ZS_FULL
;
741 desc
.z_state
= VIRTIO_BLK_ZS_EOPEN
;
744 desc
.z_state
= VIRTIO_BLK_ZS_IOPEN
;
747 desc
.z_state
= VIRTIO_BLK_ZS_NOT_WP
;
750 g_assert_not_reached();
753 /* TODO: it takes O(n^2) time complexity. Optimizations required. */
754 n
= iov_from_buf(in_iov
, in_num
, i
, &desc
, sizeof(desc
));
755 if (n
!= sizeof(desc
)) {
756 virtio_error(vdev
, "Driver provided input buffer "
757 "for descriptors that is too small!");
758 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
763 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
764 virtio_blk_req_complete(req
, err_status
);
765 virtio_blk_free_request(req
);
766 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
767 g_free(data
->zone_report_data
.zones
);
771 static void virtio_blk_handle_zone_report(VirtIOBlockReq
*req
,
772 struct iovec
*in_iov
,
775 VirtIOBlock
*s
= req
->dev
;
776 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
777 unsigned int nr_zones
;
779 int64_t zone_size
, offset
;
782 if (req
->in_len
< sizeof(struct virtio_blk_inhdr
) +
783 sizeof(struct virtio_blk_zone_report
) +
784 sizeof(struct virtio_blk_zone_descriptor
)) {
785 virtio_error(vdev
, "in buffer too small for zone report");
789 /* start byte offset of the zone report */
790 offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
791 if (!check_zoned_request(s
, offset
, 0, false, &err_status
)) {
794 nr_zones
= (req
->in_len
- sizeof(struct virtio_blk_inhdr
) -
795 sizeof(struct virtio_blk_zone_report
)) /
796 sizeof(struct virtio_blk_zone_descriptor
);
797 trace_virtio_blk_handle_zone_report(vdev
, req
,
798 offset
>> BDRV_SECTOR_BITS
, nr_zones
);
800 zone_size
= sizeof(BlockZoneDescriptor
) * nr_zones
;
801 data
= g_malloc(sizeof(ZoneCmdData
));
803 data
->in_iov
= in_iov
;
804 data
->in_num
= in_num
;
805 data
->zone_report_data
.nr_zones
= nr_zones
;
806 data
->zone_report_data
.zones
= g_malloc(zone_size
),
808 blk_aio_zone_report(s
->blk
, offset
, &data
->zone_report_data
.nr_zones
,
809 data
->zone_report_data
.zones
,
810 virtio_blk_zone_report_complete
, data
);
813 virtio_blk_req_complete(req
, err_status
);
814 virtio_blk_free_request(req
);
817 static void virtio_blk_zone_mgmt_complete(void *opaque
, int ret
)
819 VirtIOBlockReq
*req
= opaque
;
820 VirtIOBlock
*s
= req
->dev
;
821 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
822 int8_t err_status
= VIRTIO_BLK_S_OK
;
823 trace_virtio_blk_zone_mgmt_complete(vdev
, req
,ret
);
826 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
829 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
830 virtio_blk_req_complete(req
, err_status
);
831 virtio_blk_free_request(req
);
832 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
835 static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq
*req
, BlockZoneOp op
)
837 VirtIOBlock
*s
= req
->dev
;
838 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
839 BlockDriverState
*bs
= blk_bs(s
->blk
);
840 int64_t offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
842 uint64_t capacity
= bs
->total_sectors
<< BDRV_SECTOR_BITS
;
843 uint8_t err_status
= VIRTIO_BLK_S_OK
;
845 uint32_t type
= virtio_ldl_p(vdev
, &req
->out
.type
);
846 if (type
== VIRTIO_BLK_T_ZONE_RESET_ALL
) {
847 /* Entire drive capacity */
850 trace_virtio_blk_handle_zone_reset_all(vdev
, req
, 0,
853 if (bs
->bl
.zone_size
> capacity
- offset
) {
854 /* The zoned device allows the last smaller zone. */
855 len
= capacity
- bs
->bl
.zone_size
* (bs
->bl
.nr_zones
- 1);
857 len
= bs
->bl
.zone_size
;
859 trace_virtio_blk_handle_zone_mgmt(vdev
, req
, op
,
860 offset
>> BDRV_SECTOR_BITS
,
861 len
>> BDRV_SECTOR_BITS
);
864 if (!check_zoned_request(s
, offset
, len
, false, &err_status
)) {
868 blk_aio_zone_mgmt(s
->blk
, op
, offset
, len
,
869 virtio_blk_zone_mgmt_complete
, req
);
873 virtio_blk_req_complete(req
, err_status
);
874 virtio_blk_free_request(req
);
878 static void virtio_blk_zone_append_complete(void *opaque
, int ret
)
880 ZoneCmdData
*data
= opaque
;
881 VirtIOBlockReq
*req
= data
->req
;
882 VirtIOBlock
*s
= req
->dev
;
883 VirtIODevice
*vdev
= VIRTIO_DEVICE(req
->dev
);
884 int64_t append_sector
, n
;
885 uint8_t err_status
= VIRTIO_BLK_S_OK
;
888 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
892 virtio_stq_p(vdev
, &append_sector
,
893 data
->zone_append_data
.offset
>> BDRV_SECTOR_BITS
);
894 n
= iov_from_buf(data
->in_iov
, data
->in_num
, 0, &append_sector
,
895 sizeof(append_sector
));
896 if (n
!= sizeof(append_sector
)) {
897 virtio_error(vdev
, "Driver provided input buffer less than size of "
899 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
902 trace_virtio_blk_zone_append_complete(vdev
, req
, append_sector
, ret
);
905 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
906 virtio_blk_req_complete(req
, err_status
);
907 virtio_blk_free_request(req
);
908 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
912 static int virtio_blk_handle_zone_append(VirtIOBlockReq
*req
,
913 struct iovec
*out_iov
,
914 struct iovec
*in_iov
,
917 VirtIOBlock
*s
= req
->dev
;
918 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
919 uint8_t err_status
= VIRTIO_BLK_S_OK
;
921 int64_t offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
922 int64_t len
= iov_size(out_iov
, out_num
);
924 trace_virtio_blk_handle_zone_append(vdev
, req
, offset
>> BDRV_SECTOR_BITS
);
925 if (!check_zoned_request(s
, offset
, len
, true, &err_status
)) {
929 ZoneCmdData
*data
= g_malloc(sizeof(ZoneCmdData
));
931 data
->in_iov
= in_iov
;
932 data
->in_num
= in_num
;
933 data
->zone_append_data
.offset
= offset
;
934 qemu_iovec_init_external(&req
->qiov
, out_iov
, out_num
);
936 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, len
,
937 BLOCK_ACCT_ZONE_APPEND
);
939 blk_aio_zone_append(s
->blk
, &data
->zone_append_data
.offset
, &req
->qiov
, 0,
940 virtio_blk_zone_append_complete
, data
);
944 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
945 virtio_blk_req_complete(req
, err_status
);
946 virtio_blk_free_request(req
);
947 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
951 static int virtio_blk_handle_request(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
954 struct iovec
*in_iov
= req
->elem
.in_sg
;
955 struct iovec
*out_iov
= req
->elem
.out_sg
;
956 unsigned in_num
= req
->elem
.in_num
;
957 unsigned out_num
= req
->elem
.out_num
;
958 VirtIOBlock
*s
= req
->dev
;
959 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
961 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
962 virtio_error(vdev
, "virtio-blk missing headers");
966 if (unlikely(iov_to_buf(out_iov
, out_num
, 0, &req
->out
,
967 sizeof(req
->out
)) != sizeof(req
->out
))) {
968 virtio_error(vdev
, "virtio-blk request outhdr too short");
972 iov_discard_front_undoable(&out_iov
, &out_num
, sizeof(req
->out
),
975 if (in_iov
[in_num
- 1].iov_len
< sizeof(struct virtio_blk_inhdr
)) {
976 virtio_error(vdev
, "virtio-blk request inhdr too short");
977 iov_discard_undo(&req
->outhdr_undo
);
981 /* We always touch the last byte, so just see how big in_iov is. */
982 req
->in_len
= iov_size(in_iov
, in_num
);
983 req
->in
= (void *)in_iov
[in_num
- 1].iov_base
984 + in_iov
[in_num
- 1].iov_len
985 - sizeof(struct virtio_blk_inhdr
);
986 iov_discard_back_undoable(in_iov
, &in_num
, sizeof(struct virtio_blk_inhdr
),
989 type
= virtio_ldl_p(vdev
, &req
->out
.type
);
991 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
992 * is an optional flag. Although a guest should not send this flag if
993 * not negotiated we ignored it in the past. So keep ignoring it. */
994 switch (type
& ~(VIRTIO_BLK_T_OUT
| VIRTIO_BLK_T_BARRIER
)) {
995 case VIRTIO_BLK_T_IN
:
997 bool is_write
= type
& VIRTIO_BLK_T_OUT
;
998 req
->sector_num
= virtio_ldq_p(vdev
, &req
->out
.sector
);
1001 qemu_iovec_init_external(&req
->qiov
, out_iov
, out_num
);
1002 trace_virtio_blk_handle_write(vdev
, req
, req
->sector_num
,
1003 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
1005 qemu_iovec_init_external(&req
->qiov
, in_iov
, in_num
);
1006 trace_virtio_blk_handle_read(vdev
, req
, req
->sector_num
,
1007 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
1010 if (!virtio_blk_sect_range_ok(s
, req
->sector_num
, req
->qiov
.size
)) {
1011 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
1012 block_acct_invalid(blk_get_stats(s
->blk
),
1013 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
1014 virtio_blk_free_request(req
);
1018 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, req
->qiov
.size
,
1019 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
1021 /* merge would exceed maximum number of requests or IO direction
1023 if (mrb
->num_reqs
> 0 && (mrb
->num_reqs
== VIRTIO_BLK_MAX_MERGE_REQS
||
1024 is_write
!= mrb
->is_write
||
1025 !s
->conf
.request_merging
)) {
1026 virtio_blk_submit_multireq(s
, mrb
);
1029 assert(mrb
->num_reqs
< VIRTIO_BLK_MAX_MERGE_REQS
);
1030 mrb
->reqs
[mrb
->num_reqs
++] = req
;
1031 mrb
->is_write
= is_write
;
1034 case VIRTIO_BLK_T_FLUSH
:
1035 virtio_blk_handle_flush(req
, mrb
);
1037 case VIRTIO_BLK_T_ZONE_REPORT
:
1038 virtio_blk_handle_zone_report(req
, in_iov
, in_num
);
1040 case VIRTIO_BLK_T_ZONE_OPEN
:
1041 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_OPEN
);
1043 case VIRTIO_BLK_T_ZONE_CLOSE
:
1044 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_CLOSE
);
1046 case VIRTIO_BLK_T_ZONE_FINISH
:
1047 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_FINISH
);
1049 case VIRTIO_BLK_T_ZONE_RESET
:
1050 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_RESET
);
1052 case VIRTIO_BLK_T_ZONE_RESET_ALL
:
1053 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_RESET
);
1055 case VIRTIO_BLK_T_SCSI_CMD
:
1056 virtio_blk_handle_scsi(req
);
1058 case VIRTIO_BLK_T_GET_ID
:
1061 * NB: per existing s/n string convention the string is
1062 * terminated by '\0' only when shorter than buffer.
1064 const char *serial
= s
->conf
.serial
? s
->conf
.serial
: "";
1065 size_t size
= MIN(strlen(serial
) + 1,
1066 MIN(iov_size(in_iov
, in_num
),
1067 VIRTIO_BLK_ID_BYTES
));
1068 iov_from_buf(in_iov
, in_num
, 0, serial
, size
);
1069 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
1070 virtio_blk_free_request(req
);
1073 case VIRTIO_BLK_T_ZONE_APPEND
& ~VIRTIO_BLK_T_OUT
:
1075 * Passing out_iov/out_num and in_iov/in_num is not safe
1076 * to access req->elem.out_sg directly because it may be
1077 * modified by virtio_blk_handle_request().
1079 virtio_blk_handle_zone_append(req
, out_iov
, in_iov
, out_num
, in_num
);
1082 * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
1083 * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
1084 * so we must mask it for these requests, then we will check if it is set.
1086 case VIRTIO_BLK_T_DISCARD
& ~VIRTIO_BLK_T_OUT
:
1087 case VIRTIO_BLK_T_WRITE_ZEROES
& ~VIRTIO_BLK_T_OUT
:
1089 struct virtio_blk_discard_write_zeroes dwz_hdr
;
1090 size_t out_len
= iov_size(out_iov
, out_num
);
1091 bool is_write_zeroes
= (type
& ~VIRTIO_BLK_T_BARRIER
) ==
1092 VIRTIO_BLK_T_WRITE_ZEROES
;
1096 * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
1097 * more than one segment.
1099 if (unlikely(!(type
& VIRTIO_BLK_T_OUT
) ||
1100 out_len
> sizeof(dwz_hdr
))) {
1101 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
1102 virtio_blk_free_request(req
);
1106 if (unlikely(iov_to_buf(out_iov
, out_num
, 0, &dwz_hdr
,
1107 sizeof(dwz_hdr
)) != sizeof(dwz_hdr
))) {
1108 iov_discard_undo(&req
->inhdr_undo
);
1109 iov_discard_undo(&req
->outhdr_undo
);
1110 virtio_error(vdev
, "virtio-blk discard/write_zeroes header"
1115 err_status
= virtio_blk_handle_discard_write_zeroes(req
, &dwz_hdr
,
1117 if (err_status
!= VIRTIO_BLK_S_OK
) {
1118 virtio_blk_req_complete(req
, err_status
);
1119 virtio_blk_free_request(req
);
1125 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
1126 virtio_blk_free_request(req
);
1131 void virtio_blk_handle_vq(VirtIOBlock
*s
, VirtQueue
*vq
)
1133 VirtIOBlockReq
*req
;
1134 MultiReqBuffer mrb
= {};
1135 bool suppress_notifications
= virtio_queue_get_notification(vq
);
1137 aio_context_acquire(blk_get_aio_context(s
->blk
));
1141 if (suppress_notifications
) {
1142 virtio_queue_set_notification(vq
, 0);
1145 while ((req
= virtio_blk_get_request(s
, vq
))) {
1146 if (virtio_blk_handle_request(req
, &mrb
)) {
1147 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1148 virtio_blk_free_request(req
);
1153 if (suppress_notifications
) {
1154 virtio_queue_set_notification(vq
, 1);
1156 } while (!virtio_queue_empty(vq
));
1159 virtio_blk_submit_multireq(s
, &mrb
);
1163 aio_context_release(blk_get_aio_context(s
->blk
));
1166 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
1168 VirtIOBlock
*s
= (VirtIOBlock
*)vdev
;
1170 if (s
->dataplane
&& !s
->dataplane_started
) {
1171 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
1172 * dataplane here instead of waiting for .set_status().
1174 virtio_device_start_ioeventfd(vdev
);
1175 if (!s
->dataplane_disabled
) {
1179 virtio_blk_handle_vq(s
, vq
);
1182 static void virtio_blk_dma_restart_bh(void *opaque
)
1184 VirtIOBlock
*s
= opaque
;
1186 VirtIOBlockReq
*req
= s
->rq
;
1187 MultiReqBuffer mrb
= {};
1191 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
1193 VirtIOBlockReq
*next
= req
->next
;
1194 if (virtio_blk_handle_request(req
, &mrb
)) {
1195 /* Device is now broken and won't do any processing until it gets
1196 * reset. Already queued requests will be lost: let's purge them.
1200 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1201 virtio_blk_free_request(req
);
1210 virtio_blk_submit_multireq(s
, &mrb
);
1213 /* Paired with inc in virtio_blk_dma_restart_cb() */
1214 blk_dec_in_flight(s
->conf
.conf
.blk
);
1216 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
1219 static void virtio_blk_dma_restart_cb(void *opaque
, bool running
,
1222 VirtIOBlock
*s
= opaque
;
1228 /* Paired with dec in virtio_blk_dma_restart_bh() */
1229 blk_inc_in_flight(s
->conf
.conf
.blk
);
1231 aio_bh_schedule_oneshot(blk_get_aio_context(s
->conf
.conf
.blk
),
1232 virtio_blk_dma_restart_bh
, s
);
1235 static void virtio_blk_reset(VirtIODevice
*vdev
)
1237 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1239 VirtIOBlockReq
*req
;
1241 ctx
= blk_get_aio_context(s
->blk
);
1242 aio_context_acquire(ctx
);
1245 /* We drop queued requests after blk_drain() because blk_drain() itself can
1250 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1251 virtio_blk_free_request(req
);
1254 aio_context_release(ctx
);
1256 assert(!s
->dataplane_started
);
1257 blk_set_enable_write_cache(s
->blk
, s
->original_wce
);
1260 /* coalesce internal state, copy to pci i/o region 0
1262 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
1264 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1265 BlockConf
*conf
= &s
->conf
.conf
;
1266 BlockDriverState
*bs
= blk_bs(s
->blk
);
1267 struct virtio_blk_config blkcfg
;
1270 int blk_size
= conf
->logical_block_size
;
1273 ctx
= blk_get_aio_context(s
->blk
);
1274 aio_context_acquire(ctx
);
1276 blk_get_geometry(s
->blk
, &capacity
);
1277 memset(&blkcfg
, 0, sizeof(blkcfg
));
1278 virtio_stq_p(vdev
, &blkcfg
.capacity
, capacity
);
1279 virtio_stl_p(vdev
, &blkcfg
.seg_max
,
1280 s
->conf
.seg_max_adjust
? s
->conf
.queue_size
- 2 : 128 - 2);
1281 virtio_stw_p(vdev
, &blkcfg
.geometry
.cylinders
, conf
->cyls
);
1282 virtio_stl_p(vdev
, &blkcfg
.blk_size
, blk_size
);
1283 virtio_stw_p(vdev
, &blkcfg
.min_io_size
, conf
->min_io_size
/ blk_size
);
1284 virtio_stl_p(vdev
, &blkcfg
.opt_io_size
, conf
->opt_io_size
/ blk_size
);
1285 blkcfg
.geometry
.heads
= conf
->heads
;
1287 * We must ensure that the block device capacity is a multiple of
1288 * the logical block size. If that is not the case, let's use
1289 * sector_mask to adopt the geometry to have a correct picture.
1290 * For those devices where the capacity is ok for the given geometry
1291 * we don't touch the sector value of the geometry, since some devices
1292 * (like s390 dasd) need a specific value. Here the capacity is already
1293 * cyls*heads*secs*blk_size and the sector value is not block size
1294 * divided by 512 - instead it is the amount of blk_size blocks
1295 * per track (cylinder).
1297 length
= blk_getlength(s
->blk
);
1298 aio_context_release(ctx
);
1299 if (length
> 0 && length
/ conf
->heads
/ conf
->secs
% blk_size
) {
1300 blkcfg
.geometry
.sectors
= conf
->secs
& ~s
->sector_mask
;
1302 blkcfg
.geometry
.sectors
= conf
->secs
;
1304 blkcfg
.size_max
= 0;
1305 blkcfg
.physical_block_exp
= get_physical_block_exp(conf
);
1306 blkcfg
.alignment_offset
= 0;
1307 blkcfg
.wce
= blk_enable_write_cache(s
->blk
);
1308 virtio_stw_p(vdev
, &blkcfg
.num_queues
, s
->conf
.num_queues
);
1309 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_DISCARD
)) {
1310 uint32_t discard_granularity
= conf
->discard_granularity
;
1311 if (discard_granularity
== -1 || !s
->conf
.report_discard_granularity
) {
1312 discard_granularity
= blk_size
;
1314 virtio_stl_p(vdev
, &blkcfg
.max_discard_sectors
,
1315 s
->conf
.max_discard_sectors
);
1316 virtio_stl_p(vdev
, &blkcfg
.discard_sector_alignment
,
1317 discard_granularity
>> BDRV_SECTOR_BITS
);
1319 * We support only one segment per request since multiple segments
1320 * are not widely used and there are no userspace APIs that allow
1321 * applications to submit multiple segments in a single call.
1323 virtio_stl_p(vdev
, &blkcfg
.max_discard_seg
, 1);
1325 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_WRITE_ZEROES
)) {
1326 virtio_stl_p(vdev
, &blkcfg
.max_write_zeroes_sectors
,
1327 s
->conf
.max_write_zeroes_sectors
);
1328 blkcfg
.write_zeroes_may_unmap
= 1;
1329 virtio_stl_p(vdev
, &blkcfg
.max_write_zeroes_seg
, 1);
1331 if (bs
->bl
.zoned
!= BLK_Z_NONE
) {
1332 switch (bs
->bl
.zoned
) {
1334 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_HM
;
1337 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_HA
;
1340 g_assert_not_reached();
1343 virtio_stl_p(vdev
, &blkcfg
.zoned
.zone_sectors
,
1344 bs
->bl
.zone_size
/ 512);
1345 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_active_zones
,
1346 bs
->bl
.max_active_zones
);
1347 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_open_zones
,
1348 bs
->bl
.max_open_zones
);
1349 virtio_stl_p(vdev
, &blkcfg
.zoned
.write_granularity
, blk_size
);
1350 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_append_sectors
,
1351 bs
->bl
.max_append_sectors
);
1353 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_NONE
;
1355 memcpy(config
, &blkcfg
, s
->config_size
);
1358 static void virtio_blk_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1360 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1361 struct virtio_blk_config blkcfg
;
1363 memcpy(&blkcfg
, config
, s
->config_size
);
1365 aio_context_acquire(blk_get_aio_context(s
->blk
));
1366 blk_set_enable_write_cache(s
->blk
, blkcfg
.wce
!= 0);
1367 aio_context_release(blk_get_aio_context(s
->blk
));
1370 static uint64_t virtio_blk_get_features(VirtIODevice
*vdev
, uint64_t features
,
1373 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1375 /* Firstly sync all virtio-blk possible supported features */
1376 features
|= s
->host_features
;
1378 virtio_add_feature(&features
, VIRTIO_BLK_F_SEG_MAX
);
1379 virtio_add_feature(&features
, VIRTIO_BLK_F_GEOMETRY
);
1380 virtio_add_feature(&features
, VIRTIO_BLK_F_TOPOLOGY
);
1381 virtio_add_feature(&features
, VIRTIO_BLK_F_BLK_SIZE
);
1382 if (virtio_has_feature(features
, VIRTIO_F_VERSION_1
)) {
1383 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_SCSI
)) {
1384 error_setg(errp
, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
1388 virtio_clear_feature(&features
, VIRTIO_F_ANY_LAYOUT
);
1389 virtio_add_feature(&features
, VIRTIO_BLK_F_SCSI
);
1392 if (blk_enable_write_cache(s
->blk
) ||
1393 (s
->conf
.x_enable_wce_if_config_wce
&&
1394 virtio_has_feature(features
, VIRTIO_BLK_F_CONFIG_WCE
))) {
1395 virtio_add_feature(&features
, VIRTIO_BLK_F_WCE
);
1397 if (!blk_is_writable(s
->blk
)) {
1398 virtio_add_feature(&features
, VIRTIO_BLK_F_RO
);
1400 if (s
->conf
.num_queues
> 1) {
1401 virtio_add_feature(&features
, VIRTIO_BLK_F_MQ
);
1407 static void virtio_blk_set_status(VirtIODevice
*vdev
, uint8_t status
)
1409 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1411 if (!(status
& (VIRTIO_CONFIG_S_DRIVER
| VIRTIO_CONFIG_S_DRIVER_OK
))) {
1412 assert(!s
->dataplane_started
);
1415 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1419 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1420 * cache flushes. Thus, the "auto writethrough" behavior is never
1421 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1422 * Leaving it enabled would break the following sequence:
1424 * Guest started with "-drive cache=writethrough"
1425 * Guest sets status to 0
1426 * Guest sets DRIVER bit in status field
1427 * Guest reads host features (WCE=0, CONFIG_WCE=1)
1428 * Guest writes guest features (WCE=0, CONFIG_WCE=1)
1429 * Guest writes 1 to the WCE configuration field (writeback mode)
1430 * Guest sets DRIVER_OK bit in status field
1432 * s->blk would erroneously be placed in writethrough mode.
1434 if (!virtio_vdev_has_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
)) {
1435 aio_context_acquire(blk_get_aio_context(s
->blk
));
1436 blk_set_enable_write_cache(s
->blk
,
1437 virtio_vdev_has_feature(vdev
,
1439 aio_context_release(blk_get_aio_context(s
->blk
));
1443 static void virtio_blk_save_device(VirtIODevice
*vdev
, QEMUFile
*f
)
1445 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1446 VirtIOBlockReq
*req
= s
->rq
;
1449 qemu_put_sbyte(f
, 1);
1451 if (s
->conf
.num_queues
> 1) {
1452 qemu_put_be32(f
, virtio_get_queue_index(req
->vq
));
1455 qemu_put_virtqueue_element(vdev
, f
, &req
->elem
);
1458 qemu_put_sbyte(f
, 0);
1461 static int virtio_blk_load_device(VirtIODevice
*vdev
, QEMUFile
*f
,
1464 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1466 while (qemu_get_sbyte(f
)) {
1467 unsigned nvqs
= s
->conf
.num_queues
;
1468 unsigned vq_idx
= 0;
1469 VirtIOBlockReq
*req
;
1472 vq_idx
= qemu_get_be32(f
);
1474 if (vq_idx
>= nvqs
) {
1475 error_report("Invalid virtqueue index in request list: %#x",
1481 req
= qemu_get_virtqueue_element(vdev
, f
, sizeof(VirtIOBlockReq
));
1482 virtio_blk_init_request(s
, virtio_get_queue(vdev
, vq_idx
), req
);
1490 static void virtio_resize_cb(void *opaque
)
1492 VirtIODevice
*vdev
= opaque
;
1494 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1495 virtio_notify_config(vdev
);
1498 static void virtio_blk_resize(void *opaque
)
1500 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
1503 * virtio_notify_config() needs to acquire the global mutex,
1504 * so it can't be called from an iothread. Instead, schedule
1505 * it to be run in the main context BH.
1507 aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb
, vdev
);
1510 /* Suspend virtqueue ioeventfd processing during drain */
1511 static void virtio_blk_drained_begin(void *opaque
)
1513 VirtIOBlock
*s
= opaque
;
1514 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
1515 AioContext
*ctx
= blk_get_aio_context(s
->conf
.conf
.blk
);
1517 if (!s
->dataplane
|| !s
->dataplane_started
) {
1521 for (uint16_t i
= 0; i
< s
->conf
.num_queues
; i
++) {
1522 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1523 virtio_queue_aio_detach_host_notifier(vq
, ctx
);
1527 /* Resume virtqueue ioeventfd processing after drain */
1528 static void virtio_blk_drained_end(void *opaque
)
1530 VirtIOBlock
*s
= opaque
;
1531 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
1532 AioContext
*ctx
= blk_get_aio_context(s
->conf
.conf
.blk
);
1534 if (!s
->dataplane
|| !s
->dataplane_started
) {
1538 for (uint16_t i
= 0; i
< s
->conf
.num_queues
; i
++) {
1539 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1540 virtio_queue_aio_attach_host_notifier(vq
, ctx
);
1544 static const BlockDevOps virtio_block_ops
= {
1545 .resize_cb
= virtio_blk_resize
,
1546 .drained_begin
= virtio_blk_drained_begin
,
1547 .drained_end
= virtio_blk_drained_end
,
1550 static void virtio_blk_device_realize(DeviceState
*dev
, Error
**errp
)
1552 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1553 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
1554 VirtIOBlkConf
*conf
= &s
->conf
;
1558 if (!conf
->conf
.blk
) {
1559 error_setg(errp
, "drive property not set");
1562 if (!blk_is_inserted(conf
->conf
.blk
)) {
1563 error_setg(errp
, "Device needs media, but drive is empty");
1566 if (conf
->num_queues
== VIRTIO_BLK_AUTO_NUM_QUEUES
) {
1567 conf
->num_queues
= 1;
1569 if (!conf
->num_queues
) {
1570 error_setg(errp
, "num-queues property must be larger than 0");
1573 if (conf
->queue_size
<= 2) {
1574 error_setg(errp
, "invalid queue-size property (%" PRIu16
"), "
1575 "must be > 2", conf
->queue_size
);
1578 if (!is_power_of_2(conf
->queue_size
) ||
1579 conf
->queue_size
> VIRTQUEUE_MAX_SIZE
) {
1580 error_setg(errp
, "invalid queue-size property (%" PRIu16
"), "
1581 "must be a power of 2 (max %d)",
1582 conf
->queue_size
, VIRTQUEUE_MAX_SIZE
);
1586 if (!blkconf_apply_backend_options(&conf
->conf
,
1587 !blk_supports_write_perm(conf
->conf
.blk
),
1591 s
->original_wce
= blk_enable_write_cache(conf
->conf
.blk
);
1592 if (!blkconf_geometry(&conf
->conf
, NULL
, 65535, 255, 255, errp
)) {
1596 if (!blkconf_blocksizes(&conf
->conf
, errp
)) {
1600 BlockDriverState
*bs
= blk_bs(conf
->conf
.blk
);
1601 if (bs
->bl
.zoned
!= BLK_Z_NONE
) {
1602 virtio_add_feature(&s
->host_features
, VIRTIO_BLK_F_ZONED
);
1603 if (bs
->bl
.zoned
== BLK_Z_HM
) {
1604 virtio_clear_feature(&s
->host_features
, VIRTIO_BLK_F_DISCARD
);
1608 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_DISCARD
) &&
1609 (!conf
->max_discard_sectors
||
1610 conf
->max_discard_sectors
> BDRV_REQUEST_MAX_SECTORS
)) {
1611 error_setg(errp
, "invalid max-discard-sectors property (%" PRIu32
")"
1612 ", must be between 1 and %d",
1613 conf
->max_discard_sectors
, (int)BDRV_REQUEST_MAX_SECTORS
);
1617 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_WRITE_ZEROES
) &&
1618 (!conf
->max_write_zeroes_sectors
||
1619 conf
->max_write_zeroes_sectors
> BDRV_REQUEST_MAX_SECTORS
)) {
1620 error_setg(errp
, "invalid max-write-zeroes-sectors property (%" PRIu32
1621 "), must be between 1 and %d",
1622 conf
->max_write_zeroes_sectors
,
1623 (int)BDRV_REQUEST_MAX_SECTORS
);
1627 s
->config_size
= virtio_get_config_size(&virtio_blk_cfg_size_params
,
1629 virtio_init(vdev
, VIRTIO_ID_BLOCK
, s
->config_size
);
1631 s
->blk
= conf
->conf
.blk
;
1633 s
->sector_mask
= (s
->conf
.conf
.logical_block_size
/ BDRV_SECTOR_SIZE
) - 1;
1635 for (i
= 0; i
< conf
->num_queues
; i
++) {
1636 virtio_add_queue(vdev
, conf
->queue_size
, virtio_blk_handle_output
);
1638 qemu_coroutine_inc_pool_size(conf
->num_queues
* conf
->queue_size
/ 2);
1639 virtio_blk_data_plane_create(vdev
, conf
, &s
->dataplane
, &err
);
1641 error_propagate(errp
, err
);
1642 for (i
= 0; i
< conf
->num_queues
; i
++) {
1643 virtio_del_queue(vdev
, i
);
1645 virtio_cleanup(vdev
);
1650 * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
1651 * called after ->start_ioeventfd() has already set blk's AioContext.
1654 qdev_add_vm_change_state_handler(dev
, virtio_blk_dma_restart_cb
, s
);
1656 blk_ram_registrar_init(&s
->blk_ram_registrar
, s
->blk
);
1657 blk_set_dev_ops(s
->blk
, &virtio_block_ops
, s
);
1659 blk_iostatus_enable(s
->blk
);
1661 add_boot_device_lchs(dev
, "/disk@0,0",
1667 static void virtio_blk_device_unrealize(DeviceState
*dev
)
1669 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1670 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
1671 VirtIOBlkConf
*conf
= &s
->conf
;
1675 del_boot_device_lchs(dev
, "/disk@0,0");
1676 virtio_blk_data_plane_destroy(s
->dataplane
);
1677 s
->dataplane
= NULL
;
1678 for (i
= 0; i
< conf
->num_queues
; i
++) {
1679 virtio_del_queue(vdev
, i
);
1681 qemu_coroutine_dec_pool_size(conf
->num_queues
* conf
->queue_size
/ 2);
1682 blk_ram_registrar_destroy(&s
->blk_ram_registrar
);
1683 qemu_del_vm_change_state_handler(s
->change
);
1684 blockdev_mark_auto_del(s
->blk
);
1685 virtio_cleanup(vdev
);
1688 static void virtio_blk_instance_init(Object
*obj
)
1690 VirtIOBlock
*s
= VIRTIO_BLK(obj
);
1692 device_add_bootindex_property(obj
, &s
->conf
.conf
.bootindex
,
1693 "bootindex", "/disk@0,0",
1697 static const VMStateDescription vmstate_virtio_blk
= {
1698 .name
= "virtio-blk",
1699 .minimum_version_id
= 2,
1701 .fields
= (VMStateField
[]) {
1702 VMSTATE_VIRTIO_DEVICE
,
1703 VMSTATE_END_OF_LIST()
1707 static Property virtio_blk_properties
[] = {
1708 DEFINE_BLOCK_PROPERTIES(VirtIOBlock
, conf
.conf
),
1709 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock
, conf
.conf
),
1710 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock
, conf
.conf
),
1711 DEFINE_PROP_STRING("serial", VirtIOBlock
, conf
.serial
),
1712 DEFINE_PROP_BIT64("config-wce", VirtIOBlock
, host_features
,
1713 VIRTIO_BLK_F_CONFIG_WCE
, true),
1715 DEFINE_PROP_BIT64("scsi", VirtIOBlock
, host_features
,
1716 VIRTIO_BLK_F_SCSI
, false),
1718 DEFINE_PROP_BIT("request-merging", VirtIOBlock
, conf
.request_merging
, 0,
1720 DEFINE_PROP_UINT16("num-queues", VirtIOBlock
, conf
.num_queues
,
1721 VIRTIO_BLK_AUTO_NUM_QUEUES
),
1722 DEFINE_PROP_UINT16("queue-size", VirtIOBlock
, conf
.queue_size
, 256),
1723 DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock
, conf
.seg_max_adjust
, true),
1724 DEFINE_PROP_LINK("iothread", VirtIOBlock
, conf
.iothread
, TYPE_IOTHREAD
,
1726 DEFINE_PROP_BIT64("discard", VirtIOBlock
, host_features
,
1727 VIRTIO_BLK_F_DISCARD
, true),
1728 DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock
,
1729 conf
.report_discard_granularity
, true),
1730 DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock
, host_features
,
1731 VIRTIO_BLK_F_WRITE_ZEROES
, true),
1732 DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock
,
1733 conf
.max_discard_sectors
, BDRV_REQUEST_MAX_SECTORS
),
1734 DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock
,
1735 conf
.max_write_zeroes_sectors
, BDRV_REQUEST_MAX_SECTORS
),
1736 DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock
,
1737 conf
.x_enable_wce_if_config_wce
, true),
1738 DEFINE_PROP_END_OF_LIST(),
1741 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
1743 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1744 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1746 device_class_set_props(dc
, virtio_blk_properties
);
1747 dc
->vmsd
= &vmstate_virtio_blk
;
1748 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1749 vdc
->realize
= virtio_blk_device_realize
;
1750 vdc
->unrealize
= virtio_blk_device_unrealize
;
1751 vdc
->get_config
= virtio_blk_update_config
;
1752 vdc
->set_config
= virtio_blk_set_config
;
1753 vdc
->get_features
= virtio_blk_get_features
;
1754 vdc
->set_status
= virtio_blk_set_status
;
1755 vdc
->reset
= virtio_blk_reset
;
1756 vdc
->save
= virtio_blk_save_device
;
1757 vdc
->load
= virtio_blk_load_device
;
1758 vdc
->start_ioeventfd
= virtio_blk_data_plane_start
;
1759 vdc
->stop_ioeventfd
= virtio_blk_data_plane_stop
;
1762 static const TypeInfo virtio_blk_info
= {
1763 .name
= TYPE_VIRTIO_BLK
,
1764 .parent
= TYPE_VIRTIO_DEVICE
,
1765 .instance_size
= sizeof(VirtIOBlock
),
1766 .instance_init
= virtio_blk_instance_init
,
1767 .class_init
= virtio_blk_class_init
,
1770 static void virtio_register_types(void)
1772 type_register_static(&virtio_blk_info
);
1775 type_init(virtio_register_types
)