4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/defer-call.h"
16 #include "qapi/error.h"
18 #include "qemu/module.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "block/block_int.h"
23 #include "hw/block/block.h"
24 #include "hw/qdev-properties.h"
25 #include "sysemu/blockdev.h"
26 #include "sysemu/block-ram-registrar.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/runstate.h"
29 #include "hw/virtio/virtio-blk.h"
30 #include "scsi/constants.h"
34 #include "hw/virtio/virtio-bus.h"
35 #include "migration/qemu-file-types.h"
36 #include "hw/virtio/virtio-access.h"
37 #include "hw/virtio/virtio-blk-common.h"
38 #include "qemu/coroutine.h"
40 static void virtio_blk_init_request(VirtIOBlock
*s
, VirtQueue
*vq
,
51 static void virtio_blk_free_request(VirtIOBlockReq
*req
)
56 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, unsigned char status
)
58 VirtIOBlock
*s
= req
->dev
;
59 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
61 trace_virtio_blk_req_complete(vdev
, req
, status
);
63 stb_p(&req
->in
->status
, status
);
64 iov_discard_undo(&req
->inhdr_undo
);
65 iov_discard_undo(&req
->outhdr_undo
);
66 virtqueue_push(req
->vq
, &req
->elem
, req
->in_len
);
67 if (s
->dataplane_started
&& !s
->dataplane_disabled
) {
68 virtio_notify_irqfd(vdev
, req
->vq
);
70 virtio_notify(vdev
, req
->vq
);
74 static int virtio_blk_handle_rw_error(VirtIOBlockReq
*req
, int error
,
75 bool is_read
, bool acct_failed
)
77 VirtIOBlock
*s
= req
->dev
;
78 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
80 if (action
== BLOCK_ERROR_ACTION_STOP
) {
81 /* Break the link as the next request is going to be parsed from the
82 * ring again. Otherwise we may end up doing a double completion! */
85 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
89 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
90 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
92 block_acct_failed(blk_get_stats(s
->blk
), &req
->acct
);
94 virtio_blk_free_request(req
);
97 blk_error_action(s
->blk
, action
, is_read
, error
);
98 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
101 static void virtio_blk_rw_complete(void *opaque
, int ret
)
103 VirtIOBlockReq
*next
= opaque
;
104 VirtIOBlock
*s
= next
->dev
;
105 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
108 VirtIOBlockReq
*req
= next
;
110 trace_virtio_blk_rw_complete(vdev
, req
, ret
);
112 if (req
->qiov
.nalloc
!= -1) {
113 /* If nalloc is != -1 req->qiov is a local copy of the original
114 * external iovec. It was allocated in submit_requests to be
115 * able to merge requests. */
116 qemu_iovec_destroy(&req
->qiov
);
120 int p
= virtio_ldl_p(VIRTIO_DEVICE(s
), &req
->out
.type
);
121 bool is_read
= !(p
& VIRTIO_BLK_T_OUT
);
122 /* Note that memory may be dirtied on read failure. If the
123 * virtio request is not completed here, as is the case for
124 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
125 * correctly during live migration. While this is ugly,
126 * it is acceptable because the device is free to write to
127 * the memory until the request is completed (which will
128 * happen on the other side of the migration).
130 if (virtio_blk_handle_rw_error(req
, -ret
, is_read
, true)) {
135 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
136 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
137 virtio_blk_free_request(req
);
141 static void virtio_blk_flush_complete(void *opaque
, int ret
)
143 VirtIOBlockReq
*req
= opaque
;
144 VirtIOBlock
*s
= req
->dev
;
146 if (ret
&& virtio_blk_handle_rw_error(req
, -ret
, 0, true)) {
150 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
151 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
152 virtio_blk_free_request(req
);
155 static void virtio_blk_discard_write_zeroes_complete(void *opaque
, int ret
)
157 VirtIOBlockReq
*req
= opaque
;
158 VirtIOBlock
*s
= req
->dev
;
159 bool is_write_zeroes
= (virtio_ldl_p(VIRTIO_DEVICE(s
), &req
->out
.type
) &
160 ~VIRTIO_BLK_T_BARRIER
) == VIRTIO_BLK_T_WRITE_ZEROES
;
162 if (ret
&& virtio_blk_handle_rw_error(req
, -ret
, false, is_write_zeroes
)) {
166 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
167 if (is_write_zeroes
) {
168 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
170 virtio_blk_free_request(req
);
177 struct sg_io_hdr hdr
;
178 } VirtIOBlockIoctlReq
;
180 static void virtio_blk_ioctl_complete(void *opaque
, int status
)
182 VirtIOBlockIoctlReq
*ioctl_req
= opaque
;
183 VirtIOBlockReq
*req
= ioctl_req
->req
;
184 VirtIOBlock
*s
= req
->dev
;
185 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
186 struct virtio_scsi_inhdr
*scsi
;
187 struct sg_io_hdr
*hdr
;
189 scsi
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 2].iov_base
;
192 status
= VIRTIO_BLK_S_UNSUPP
;
193 virtio_stl_p(vdev
, &scsi
->errors
, 255);
197 hdr
= &ioctl_req
->hdr
;
199 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
200 * clear the masked_status field [hence status gets cleared too, see
201 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
202 * status has occurred. However they do set DRIVER_SENSE in driver_status
203 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
205 if (hdr
->status
== 0 && hdr
->sb_len_wr
> 0) {
206 hdr
->status
= CHECK_CONDITION
;
209 virtio_stl_p(vdev
, &scsi
->errors
,
210 hdr
->status
| (hdr
->msg_status
<< 8) |
211 (hdr
->host_status
<< 16) | (hdr
->driver_status
<< 24));
212 virtio_stl_p(vdev
, &scsi
->residual
, hdr
->resid
);
213 virtio_stl_p(vdev
, &scsi
->sense_len
, hdr
->sb_len_wr
);
214 virtio_stl_p(vdev
, &scsi
->data_len
, hdr
->dxfer_len
);
217 virtio_blk_req_complete(req
, status
);
218 virtio_blk_free_request(req
);
224 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
, VirtQueue
*vq
)
226 VirtIOBlockReq
*req
= virtqueue_pop(vq
, sizeof(VirtIOBlockReq
));
229 virtio_blk_init_request(s
, vq
, req
);
234 static int virtio_blk_handle_scsi_req(VirtIOBlockReq
*req
)
236 int status
= VIRTIO_BLK_S_OK
;
237 struct virtio_scsi_inhdr
*scsi
= NULL
;
238 VirtIOBlock
*blk
= req
->dev
;
239 VirtIODevice
*vdev
= VIRTIO_DEVICE(blk
);
240 VirtQueueElement
*elem
= &req
->elem
;
244 VirtIOBlockIoctlReq
*ioctl_req
;
249 * We require at least one output segment each for the virtio_blk_outhdr
250 * and the SCSI command block.
252 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
253 * and the sense buffer pointer in the input segments.
255 if (elem
->out_num
< 2 || elem
->in_num
< 3) {
256 status
= VIRTIO_BLK_S_IOERR
;
261 * The scsi inhdr is placed in the second-to-last input segment, just
262 * before the regular inhdr.
264 scsi
= (void *)elem
->in_sg
[elem
->in_num
- 2].iov_base
;
266 if (!virtio_has_feature(blk
->host_features
, VIRTIO_BLK_F_SCSI
)) {
267 status
= VIRTIO_BLK_S_UNSUPP
;
272 * No support for bidirection commands yet.
274 if (elem
->out_num
> 2 && elem
->in_num
> 3) {
275 status
= VIRTIO_BLK_S_UNSUPP
;
280 ioctl_req
= g_new0(VirtIOBlockIoctlReq
, 1);
281 ioctl_req
->req
= req
;
282 ioctl_req
->hdr
.interface_id
= 'S';
283 ioctl_req
->hdr
.cmd_len
= elem
->out_sg
[1].iov_len
;
284 ioctl_req
->hdr
.cmdp
= elem
->out_sg
[1].iov_base
;
285 ioctl_req
->hdr
.dxfer_len
= 0;
287 if (elem
->out_num
> 2) {
289 * If there are more than the minimally required 2 output segments
290 * there is write payload starting from the third iovec.
292 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_TO_DEV
;
293 ioctl_req
->hdr
.iovec_count
= elem
->out_num
- 2;
295 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
296 ioctl_req
->hdr
.dxfer_len
+= elem
->out_sg
[i
+ 2].iov_len
;
299 ioctl_req
->hdr
.dxferp
= elem
->out_sg
+ 2;
301 } else if (elem
->in_num
> 3) {
303 * If we have more than 3 input segments the guest wants to actually
306 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
307 ioctl_req
->hdr
.iovec_count
= elem
->in_num
- 3;
308 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
309 ioctl_req
->hdr
.dxfer_len
+= elem
->in_sg
[i
].iov_len
;
312 ioctl_req
->hdr
.dxferp
= elem
->in_sg
;
315 * Some SCSI commands don't actually transfer any data.
317 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_NONE
;
320 ioctl_req
->hdr
.sbp
= elem
->in_sg
[elem
->in_num
- 3].iov_base
;
321 ioctl_req
->hdr
.mx_sb_len
= elem
->in_sg
[elem
->in_num
- 3].iov_len
;
323 acb
= blk_aio_ioctl(blk
->blk
, SG_IO
, &ioctl_req
->hdr
,
324 virtio_blk_ioctl_complete
, ioctl_req
);
327 status
= VIRTIO_BLK_S_UNSUPP
;
336 /* Just put anything nonzero so that the ioctl fails in the guest. */
338 virtio_stl_p(vdev
, &scsi
->errors
, 255);
343 static void virtio_blk_handle_scsi(VirtIOBlockReq
*req
)
347 status
= virtio_blk_handle_scsi_req(req
);
348 if (status
!= -EINPROGRESS
) {
349 virtio_blk_req_complete(req
, status
);
350 virtio_blk_free_request(req
);
354 static inline void submit_requests(VirtIOBlock
*s
, MultiReqBuffer
*mrb
,
355 int start
, int num_reqs
, int niov
)
357 BlockBackend
*blk
= s
->blk
;
358 QEMUIOVector
*qiov
= &mrb
->reqs
[start
]->qiov
;
359 int64_t sector_num
= mrb
->reqs
[start
]->sector_num
;
360 bool is_write
= mrb
->is_write
;
361 BdrvRequestFlags flags
= 0;
365 struct iovec
*tmp_iov
= qiov
->iov
;
366 int tmp_niov
= qiov
->niov
;
368 /* mrb->reqs[start]->qiov was initialized from external so we can't
369 * modify it here. We need to initialize it locally and then add the
370 * external iovecs. */
371 qemu_iovec_init(qiov
, niov
);
373 for (i
= 0; i
< tmp_niov
; i
++) {
374 qemu_iovec_add(qiov
, tmp_iov
[i
].iov_base
, tmp_iov
[i
].iov_len
);
377 for (i
= start
+ 1; i
< start
+ num_reqs
; i
++) {
378 qemu_iovec_concat(qiov
, &mrb
->reqs
[i
]->qiov
, 0,
379 mrb
->reqs
[i
]->qiov
.size
);
380 mrb
->reqs
[i
- 1]->mr_next
= mrb
->reqs
[i
];
383 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb
->reqs
[start
]->dev
),
384 mrb
, start
, num_reqs
,
385 sector_num
<< BDRV_SECTOR_BITS
,
386 qiov
->size
, is_write
);
387 block_acct_merge_done(blk_get_stats(blk
),
388 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
,
392 if (blk_ram_registrar_ok(&s
->blk_ram_registrar
)) {
393 flags
|= BDRV_REQ_REGISTERED_BUF
;
397 blk_aio_pwritev(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
398 flags
, virtio_blk_rw_complete
,
401 blk_aio_preadv(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
402 flags
, virtio_blk_rw_complete
,
407 static int multireq_compare(const void *a
, const void *b
)
409 const VirtIOBlockReq
*req1
= *(VirtIOBlockReq
**)a
,
410 *req2
= *(VirtIOBlockReq
**)b
;
413 * Note that we can't simply subtract sector_num1 from sector_num2
414 * here as that could overflow the return value.
416 if (req1
->sector_num
> req2
->sector_num
) {
418 } else if (req1
->sector_num
< req2
->sector_num
) {
425 static void virtio_blk_submit_multireq(VirtIOBlock
*s
, MultiReqBuffer
*mrb
)
427 int i
= 0, start
= 0, num_reqs
= 0, niov
= 0, nb_sectors
= 0;
428 uint32_t max_transfer
;
429 int64_t sector_num
= 0;
431 if (mrb
->num_reqs
== 1) {
432 submit_requests(s
, mrb
, 0, 1, -1);
437 max_transfer
= blk_get_max_transfer(mrb
->reqs
[0]->dev
->blk
);
439 qsort(mrb
->reqs
, mrb
->num_reqs
, sizeof(*mrb
->reqs
),
442 for (i
= 0; i
< mrb
->num_reqs
; i
++) {
443 VirtIOBlockReq
*req
= mrb
->reqs
[i
];
446 * NOTE: We cannot merge the requests in below situations:
447 * 1. requests are not sequential
448 * 2. merge would exceed maximum number of IOVs
449 * 3. merge would exceed maximum transfer length of backend device
451 if (sector_num
+ nb_sectors
!= req
->sector_num
||
452 niov
> blk_get_max_iov(s
->blk
) - req
->qiov
.niov
||
453 req
->qiov
.size
> max_transfer
||
454 nb_sectors
> (max_transfer
-
455 req
->qiov
.size
) / BDRV_SECTOR_SIZE
) {
456 submit_requests(s
, mrb
, start
, num_reqs
, niov
);
462 sector_num
= req
->sector_num
;
463 nb_sectors
= niov
= 0;
467 nb_sectors
+= req
->qiov
.size
/ BDRV_SECTOR_SIZE
;
468 niov
+= req
->qiov
.niov
;
472 submit_requests(s
, mrb
, start
, num_reqs
, niov
);
476 static void virtio_blk_handle_flush(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
478 VirtIOBlock
*s
= req
->dev
;
480 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, 0,
484 * Make sure all outstanding writes are posted to the backing device.
486 if (mrb
->is_write
&& mrb
->num_reqs
> 0) {
487 virtio_blk_submit_multireq(s
, mrb
);
489 blk_aio_flush(s
->blk
, virtio_blk_flush_complete
, req
);
492 static bool virtio_blk_sect_range_ok(VirtIOBlock
*dev
,
493 uint64_t sector
, size_t size
)
495 uint64_t nb_sectors
= size
>> BDRV_SECTOR_BITS
;
496 uint64_t total_sectors
;
498 if (nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
501 if (sector
& dev
->sector_mask
) {
504 if (size
% dev
->conf
.conf
.logical_block_size
) {
507 blk_get_geometry(dev
->blk
, &total_sectors
);
508 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
514 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq
*req
,
515 struct virtio_blk_discard_write_zeroes
*dwz_hdr
, bool is_write_zeroes
)
517 VirtIOBlock
*s
= req
->dev
;
518 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
520 uint32_t num_sectors
, flags
, max_sectors
;
524 sector
= virtio_ldq_p(vdev
, &dwz_hdr
->sector
);
525 num_sectors
= virtio_ldl_p(vdev
, &dwz_hdr
->num_sectors
);
526 flags
= virtio_ldl_p(vdev
, &dwz_hdr
->flags
);
527 max_sectors
= is_write_zeroes
? s
->conf
.max_write_zeroes_sectors
:
528 s
->conf
.max_discard_sectors
;
531 * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
532 * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
533 * the integer variable.
535 if (unlikely(num_sectors
> max_sectors
)) {
536 err_status
= VIRTIO_BLK_S_IOERR
;
540 bytes
= num_sectors
<< BDRV_SECTOR_BITS
;
542 if (unlikely(!virtio_blk_sect_range_ok(s
, sector
, bytes
))) {
543 err_status
= VIRTIO_BLK_S_IOERR
;
548 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
549 * and write zeroes commands if any unknown flag is set.
551 if (unlikely(flags
& ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
)) {
552 err_status
= VIRTIO_BLK_S_UNSUPP
;
556 if (is_write_zeroes
) { /* VIRTIO_BLK_T_WRITE_ZEROES */
557 int blk_aio_flags
= 0;
559 if (flags
& VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
) {
560 blk_aio_flags
|= BDRV_REQ_MAY_UNMAP
;
563 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, bytes
,
566 blk_aio_pwrite_zeroes(s
->blk
, sector
<< BDRV_SECTOR_BITS
,
567 bytes
, blk_aio_flags
,
568 virtio_blk_discard_write_zeroes_complete
, req
);
569 } else { /* VIRTIO_BLK_T_DISCARD */
571 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
572 * discard commands if the unmap flag is set.
574 if (unlikely(flags
& VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
)) {
575 err_status
= VIRTIO_BLK_S_UNSUPP
;
579 blk_aio_pdiscard(s
->blk
, sector
<< BDRV_SECTOR_BITS
, bytes
,
580 virtio_blk_discard_write_zeroes_complete
, req
);
583 return VIRTIO_BLK_S_OK
;
586 if (is_write_zeroes
) {
587 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_WRITE
);
592 typedef struct ZoneCmdData
{
594 struct iovec
*in_iov
;
598 unsigned int nr_zones
;
599 BlockZoneDescriptor
*zones
;
608 * check zoned_request: error checking before issuing requests. If all checks
609 * passed, return true.
610 * append: true if only zone append requests issued.
612 static bool check_zoned_request(VirtIOBlock
*s
, int64_t offset
, int64_t len
,
613 bool append
, uint8_t *status
) {
614 BlockDriverState
*bs
= blk_bs(s
->blk
);
617 if (!virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_ZONED
)) {
618 *status
= VIRTIO_BLK_S_UNSUPP
;
622 if (offset
< 0 || len
< 0 || len
> (bs
->total_sectors
<< BDRV_SECTOR_BITS
)
623 || offset
> (bs
->total_sectors
<< BDRV_SECTOR_BITS
) - len
) {
624 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
629 if (bs
->bl
.write_granularity
) {
630 if ((offset
% bs
->bl
.write_granularity
) != 0) {
631 *status
= VIRTIO_BLK_S_ZONE_UNALIGNED_WP
;
636 index
= offset
/ bs
->bl
.zone_size
;
637 if (BDRV_ZT_IS_CONV(bs
->wps
->wp
[index
])) {
638 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
642 if (len
/ 512 > bs
->bl
.max_append_sectors
) {
643 if (bs
->bl
.max_append_sectors
== 0) {
644 *status
= VIRTIO_BLK_S_UNSUPP
;
646 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
654 static void virtio_blk_zone_report_complete(void *opaque
, int ret
)
656 ZoneCmdData
*data
= opaque
;
657 VirtIOBlockReq
*req
= data
->req
;
658 VirtIODevice
*vdev
= VIRTIO_DEVICE(req
->dev
);
659 struct iovec
*in_iov
= data
->in_iov
;
660 unsigned in_num
= data
->in_num
;
661 int64_t zrp_size
, n
, j
= 0;
662 int64_t nz
= data
->zone_report_data
.nr_zones
;
663 int8_t err_status
= VIRTIO_BLK_S_OK
;
665 trace_virtio_blk_zone_report_complete(vdev
, req
, nz
, ret
);
667 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
671 struct virtio_blk_zone_report zrp_hdr
= (struct virtio_blk_zone_report
) {
672 .nr_zones
= cpu_to_le64(nz
),
674 zrp_size
= sizeof(struct virtio_blk_zone_report
)
675 + sizeof(struct virtio_blk_zone_descriptor
) * nz
;
676 n
= iov_from_buf(in_iov
, in_num
, 0, &zrp_hdr
, sizeof(zrp_hdr
));
677 if (n
!= sizeof(zrp_hdr
)) {
678 virtio_error(vdev
, "Driver provided input buffer that is too small!");
679 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
683 for (size_t i
= sizeof(zrp_hdr
); i
< zrp_size
;
684 i
+= sizeof(struct virtio_blk_zone_descriptor
), ++j
) {
685 struct virtio_blk_zone_descriptor desc
=
686 (struct virtio_blk_zone_descriptor
) {
687 .z_start
= cpu_to_le64(data
->zone_report_data
.zones
[j
].start
688 >> BDRV_SECTOR_BITS
),
689 .z_cap
= cpu_to_le64(data
->zone_report_data
.zones
[j
].cap
690 >> BDRV_SECTOR_BITS
),
691 .z_wp
= cpu_to_le64(data
->zone_report_data
.zones
[j
].wp
692 >> BDRV_SECTOR_BITS
),
695 switch (data
->zone_report_data
.zones
[j
].type
) {
697 desc
.z_type
= VIRTIO_BLK_ZT_CONV
;
700 desc
.z_type
= VIRTIO_BLK_ZT_SWR
;
703 desc
.z_type
= VIRTIO_BLK_ZT_SWP
;
706 g_assert_not_reached();
709 switch (data
->zone_report_data
.zones
[j
].state
) {
711 desc
.z_state
= VIRTIO_BLK_ZS_RDONLY
;
714 desc
.z_state
= VIRTIO_BLK_ZS_OFFLINE
;
717 desc
.z_state
= VIRTIO_BLK_ZS_EMPTY
;
720 desc
.z_state
= VIRTIO_BLK_ZS_CLOSED
;
723 desc
.z_state
= VIRTIO_BLK_ZS_FULL
;
726 desc
.z_state
= VIRTIO_BLK_ZS_EOPEN
;
729 desc
.z_state
= VIRTIO_BLK_ZS_IOPEN
;
732 desc
.z_state
= VIRTIO_BLK_ZS_NOT_WP
;
735 g_assert_not_reached();
738 /* TODO: it takes O(n^2) time complexity. Optimizations required. */
739 n
= iov_from_buf(in_iov
, in_num
, i
, &desc
, sizeof(desc
));
740 if (n
!= sizeof(desc
)) {
741 virtio_error(vdev
, "Driver provided input buffer "
742 "for descriptors that is too small!");
743 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
748 virtio_blk_req_complete(req
, err_status
);
749 virtio_blk_free_request(req
);
750 g_free(data
->zone_report_data
.zones
);
754 static void virtio_blk_handle_zone_report(VirtIOBlockReq
*req
,
755 struct iovec
*in_iov
,
758 VirtIOBlock
*s
= req
->dev
;
759 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
760 unsigned int nr_zones
;
762 int64_t zone_size
, offset
;
765 if (req
->in_len
< sizeof(struct virtio_blk_inhdr
) +
766 sizeof(struct virtio_blk_zone_report
) +
767 sizeof(struct virtio_blk_zone_descriptor
)) {
768 virtio_error(vdev
, "in buffer too small for zone report");
772 /* start byte offset of the zone report */
773 offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
774 if (!check_zoned_request(s
, offset
, 0, false, &err_status
)) {
777 nr_zones
= (req
->in_len
- sizeof(struct virtio_blk_inhdr
) -
778 sizeof(struct virtio_blk_zone_report
)) /
779 sizeof(struct virtio_blk_zone_descriptor
);
780 trace_virtio_blk_handle_zone_report(vdev
, req
,
781 offset
>> BDRV_SECTOR_BITS
, nr_zones
);
783 zone_size
= sizeof(BlockZoneDescriptor
) * nr_zones
;
784 data
= g_malloc(sizeof(ZoneCmdData
));
786 data
->in_iov
= in_iov
;
787 data
->in_num
= in_num
;
788 data
->zone_report_data
.nr_zones
= nr_zones
;
789 data
->zone_report_data
.zones
= g_malloc(zone_size
),
791 blk_aio_zone_report(s
->blk
, offset
, &data
->zone_report_data
.nr_zones
,
792 data
->zone_report_data
.zones
,
793 virtio_blk_zone_report_complete
, data
);
796 virtio_blk_req_complete(req
, err_status
);
797 virtio_blk_free_request(req
);
800 static void virtio_blk_zone_mgmt_complete(void *opaque
, int ret
)
802 VirtIOBlockReq
*req
= opaque
;
803 VirtIOBlock
*s
= req
->dev
;
804 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
805 int8_t err_status
= VIRTIO_BLK_S_OK
;
806 trace_virtio_blk_zone_mgmt_complete(vdev
, req
,ret
);
809 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
812 virtio_blk_req_complete(req
, err_status
);
813 virtio_blk_free_request(req
);
816 static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq
*req
, BlockZoneOp op
)
818 VirtIOBlock
*s
= req
->dev
;
819 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
820 BlockDriverState
*bs
= blk_bs(s
->blk
);
821 int64_t offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
823 uint64_t capacity
= bs
->total_sectors
<< BDRV_SECTOR_BITS
;
824 uint8_t err_status
= VIRTIO_BLK_S_OK
;
826 uint32_t type
= virtio_ldl_p(vdev
, &req
->out
.type
);
827 if (type
== VIRTIO_BLK_T_ZONE_RESET_ALL
) {
828 /* Entire drive capacity */
831 trace_virtio_blk_handle_zone_reset_all(vdev
, req
, 0,
834 if (bs
->bl
.zone_size
> capacity
- offset
) {
835 /* The zoned device allows the last smaller zone. */
836 len
= capacity
- bs
->bl
.zone_size
* (bs
->bl
.nr_zones
- 1);
838 len
= bs
->bl
.zone_size
;
840 trace_virtio_blk_handle_zone_mgmt(vdev
, req
, op
,
841 offset
>> BDRV_SECTOR_BITS
,
842 len
>> BDRV_SECTOR_BITS
);
845 if (!check_zoned_request(s
, offset
, len
, false, &err_status
)) {
849 blk_aio_zone_mgmt(s
->blk
, op
, offset
, len
,
850 virtio_blk_zone_mgmt_complete
, req
);
854 virtio_blk_req_complete(req
, err_status
);
855 virtio_blk_free_request(req
);
859 static void virtio_blk_zone_append_complete(void *opaque
, int ret
)
861 ZoneCmdData
*data
= opaque
;
862 VirtIOBlockReq
*req
= data
->req
;
863 VirtIODevice
*vdev
= VIRTIO_DEVICE(req
->dev
);
864 int64_t append_sector
, n
;
865 uint8_t err_status
= VIRTIO_BLK_S_OK
;
868 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
872 virtio_stq_p(vdev
, &append_sector
,
873 data
->zone_append_data
.offset
>> BDRV_SECTOR_BITS
);
874 n
= iov_from_buf(data
->in_iov
, data
->in_num
, 0, &append_sector
,
875 sizeof(append_sector
));
876 if (n
!= sizeof(append_sector
)) {
877 virtio_error(vdev
, "Driver provided input buffer less than size of "
879 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
882 trace_virtio_blk_zone_append_complete(vdev
, req
, append_sector
, ret
);
885 virtio_blk_req_complete(req
, err_status
);
886 virtio_blk_free_request(req
);
890 static int virtio_blk_handle_zone_append(VirtIOBlockReq
*req
,
891 struct iovec
*out_iov
,
892 struct iovec
*in_iov
,
895 VirtIOBlock
*s
= req
->dev
;
896 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
897 uint8_t err_status
= VIRTIO_BLK_S_OK
;
899 int64_t offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
900 int64_t len
= iov_size(out_iov
, out_num
);
902 trace_virtio_blk_handle_zone_append(vdev
, req
, offset
>> BDRV_SECTOR_BITS
);
903 if (!check_zoned_request(s
, offset
, len
, true, &err_status
)) {
907 ZoneCmdData
*data
= g_malloc(sizeof(ZoneCmdData
));
909 data
->in_iov
= in_iov
;
910 data
->in_num
= in_num
;
911 data
->zone_append_data
.offset
= offset
;
912 qemu_iovec_init_external(&req
->qiov
, out_iov
, out_num
);
914 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, len
,
915 BLOCK_ACCT_ZONE_APPEND
);
917 blk_aio_zone_append(s
->blk
, &data
->zone_append_data
.offset
, &req
->qiov
, 0,
918 virtio_blk_zone_append_complete
, data
);
922 virtio_blk_req_complete(req
, err_status
);
923 virtio_blk_free_request(req
);
927 static int virtio_blk_handle_request(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
930 struct iovec
*in_iov
= req
->elem
.in_sg
;
931 struct iovec
*out_iov
= req
->elem
.out_sg
;
932 unsigned in_num
= req
->elem
.in_num
;
933 unsigned out_num
= req
->elem
.out_num
;
934 VirtIOBlock
*s
= req
->dev
;
935 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
937 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
938 virtio_error(vdev
, "virtio-blk missing headers");
942 if (unlikely(iov_to_buf(out_iov
, out_num
, 0, &req
->out
,
943 sizeof(req
->out
)) != sizeof(req
->out
))) {
944 virtio_error(vdev
, "virtio-blk request outhdr too short");
948 iov_discard_front_undoable(&out_iov
, &out_num
, sizeof(req
->out
),
951 if (in_iov
[in_num
- 1].iov_len
< sizeof(struct virtio_blk_inhdr
)) {
952 virtio_error(vdev
, "virtio-blk request inhdr too short");
953 iov_discard_undo(&req
->outhdr_undo
);
957 /* We always touch the last byte, so just see how big in_iov is. */
958 req
->in_len
= iov_size(in_iov
, in_num
);
959 req
->in
= (void *)in_iov
[in_num
- 1].iov_base
960 + in_iov
[in_num
- 1].iov_len
961 - sizeof(struct virtio_blk_inhdr
);
962 iov_discard_back_undoable(in_iov
, &in_num
, sizeof(struct virtio_blk_inhdr
),
965 type
= virtio_ldl_p(vdev
, &req
->out
.type
);
967 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
968 * is an optional flag. Although a guest should not send this flag if
969 * not negotiated we ignored it in the past. So keep ignoring it. */
970 switch (type
& ~(VIRTIO_BLK_T_OUT
| VIRTIO_BLK_T_BARRIER
)) {
971 case VIRTIO_BLK_T_IN
:
973 bool is_write
= type
& VIRTIO_BLK_T_OUT
;
974 req
->sector_num
= virtio_ldq_p(vdev
, &req
->out
.sector
);
977 qemu_iovec_init_external(&req
->qiov
, out_iov
, out_num
);
978 trace_virtio_blk_handle_write(vdev
, req
, req
->sector_num
,
979 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
981 qemu_iovec_init_external(&req
->qiov
, in_iov
, in_num
);
982 trace_virtio_blk_handle_read(vdev
, req
, req
->sector_num
,
983 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
986 if (!virtio_blk_sect_range_ok(s
, req
->sector_num
, req
->qiov
.size
)) {
987 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
988 block_acct_invalid(blk_get_stats(s
->blk
),
989 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
990 virtio_blk_free_request(req
);
994 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, req
->qiov
.size
,
995 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
997 /* merge would exceed maximum number of requests or IO direction
999 if (mrb
->num_reqs
> 0 && (mrb
->num_reqs
== VIRTIO_BLK_MAX_MERGE_REQS
||
1000 is_write
!= mrb
->is_write
||
1001 !s
->conf
.request_merging
)) {
1002 virtio_blk_submit_multireq(s
, mrb
);
1005 assert(mrb
->num_reqs
< VIRTIO_BLK_MAX_MERGE_REQS
);
1006 mrb
->reqs
[mrb
->num_reqs
++] = req
;
1007 mrb
->is_write
= is_write
;
1010 case VIRTIO_BLK_T_FLUSH
:
1011 virtio_blk_handle_flush(req
, mrb
);
1013 case VIRTIO_BLK_T_ZONE_REPORT
:
1014 virtio_blk_handle_zone_report(req
, in_iov
, in_num
);
1016 case VIRTIO_BLK_T_ZONE_OPEN
:
1017 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_OPEN
);
1019 case VIRTIO_BLK_T_ZONE_CLOSE
:
1020 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_CLOSE
);
1022 case VIRTIO_BLK_T_ZONE_FINISH
:
1023 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_FINISH
);
1025 case VIRTIO_BLK_T_ZONE_RESET
:
1026 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_RESET
);
1028 case VIRTIO_BLK_T_ZONE_RESET_ALL
:
1029 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_RESET
);
1031 case VIRTIO_BLK_T_SCSI_CMD
:
1032 virtio_blk_handle_scsi(req
);
1034 case VIRTIO_BLK_T_GET_ID
:
1037 * NB: per existing s/n string convention the string is
1038 * terminated by '\0' only when shorter than buffer.
1040 const char *serial
= s
->conf
.serial
? s
->conf
.serial
: "";
1041 size_t size
= MIN(strlen(serial
) + 1,
1042 MIN(iov_size(in_iov
, in_num
),
1043 VIRTIO_BLK_ID_BYTES
));
1044 iov_from_buf(in_iov
, in_num
, 0, serial
, size
);
1045 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
1046 virtio_blk_free_request(req
);
1049 case VIRTIO_BLK_T_ZONE_APPEND
& ~VIRTIO_BLK_T_OUT
:
1051 * Passing out_iov/out_num and in_iov/in_num is not safe
1052 * to access req->elem.out_sg directly because it may be
1053 * modified by virtio_blk_handle_request().
1055 virtio_blk_handle_zone_append(req
, out_iov
, in_iov
, out_num
, in_num
);
1058 * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
1059 * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
1060 * so we must mask it for these requests, then we will check if it is set.
1062 case VIRTIO_BLK_T_DISCARD
& ~VIRTIO_BLK_T_OUT
:
1063 case VIRTIO_BLK_T_WRITE_ZEROES
& ~VIRTIO_BLK_T_OUT
:
1065 struct virtio_blk_discard_write_zeroes dwz_hdr
;
1066 size_t out_len
= iov_size(out_iov
, out_num
);
1067 bool is_write_zeroes
= (type
& ~VIRTIO_BLK_T_BARRIER
) ==
1068 VIRTIO_BLK_T_WRITE_ZEROES
;
1072 * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
1073 * more than one segment.
1075 if (unlikely(!(type
& VIRTIO_BLK_T_OUT
) ||
1076 out_len
> sizeof(dwz_hdr
))) {
1077 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
1078 virtio_blk_free_request(req
);
1082 if (unlikely(iov_to_buf(out_iov
, out_num
, 0, &dwz_hdr
,
1083 sizeof(dwz_hdr
)) != sizeof(dwz_hdr
))) {
1084 iov_discard_undo(&req
->inhdr_undo
);
1085 iov_discard_undo(&req
->outhdr_undo
);
1086 virtio_error(vdev
, "virtio-blk discard/write_zeroes header"
1091 err_status
= virtio_blk_handle_discard_write_zeroes(req
, &dwz_hdr
,
1093 if (err_status
!= VIRTIO_BLK_S_OK
) {
1094 virtio_blk_req_complete(req
, err_status
);
1095 virtio_blk_free_request(req
);
1101 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
1102 virtio_blk_free_request(req
);
1107 void virtio_blk_handle_vq(VirtIOBlock
*s
, VirtQueue
*vq
)
1109 VirtIOBlockReq
*req
;
1110 MultiReqBuffer mrb
= {};
1111 bool suppress_notifications
= virtio_queue_get_notification(vq
);
1116 if (suppress_notifications
) {
1117 virtio_queue_set_notification(vq
, 0);
1120 while ((req
= virtio_blk_get_request(s
, vq
))) {
1121 if (virtio_blk_handle_request(req
, &mrb
)) {
1122 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1123 virtio_blk_free_request(req
);
1128 if (suppress_notifications
) {
1129 virtio_queue_set_notification(vq
, 1);
1131 } while (!virtio_queue_empty(vq
));
1134 virtio_blk_submit_multireq(s
, &mrb
);
1140 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
1142 VirtIOBlock
*s
= (VirtIOBlock
*)vdev
;
1144 if (!s
->dataplane_disabled
&& !s
->dataplane_started
) {
1145 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
1146 * dataplane here instead of waiting for .set_status().
1148 virtio_device_start_ioeventfd(vdev
);
1149 if (!s
->dataplane_disabled
) {
1154 virtio_blk_handle_vq(s
, vq
);
1157 static void virtio_blk_dma_restart_bh(void *opaque
)
1159 VirtIOBlock
*s
= opaque
;
1161 VirtIOBlockReq
*req
;
1162 MultiReqBuffer mrb
= {};
1164 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1170 VirtIOBlockReq
*next
= req
->next
;
1171 if (virtio_blk_handle_request(req
, &mrb
)) {
1172 /* Device is now broken and won't do any processing until it gets
1173 * reset. Already queued requests will be lost: let's purge them.
1177 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1178 virtio_blk_free_request(req
);
1187 virtio_blk_submit_multireq(s
, &mrb
);
1190 /* Paired with inc in virtio_blk_dma_restart_cb() */
1191 blk_dec_in_flight(s
->conf
.conf
.blk
);
1194 static void virtio_blk_dma_restart_cb(void *opaque
, bool running
,
1197 VirtIOBlock
*s
= opaque
;
1203 /* Paired with dec in virtio_blk_dma_restart_bh() */
1204 blk_inc_in_flight(s
->conf
.conf
.blk
);
1206 aio_bh_schedule_oneshot(blk_get_aio_context(s
->conf
.conf
.blk
),
1207 virtio_blk_dma_restart_bh
, s
);
1210 static void virtio_blk_reset(VirtIODevice
*vdev
)
1212 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1213 VirtIOBlockReq
*req
;
1215 /* Dataplane has stopped... */
1216 assert(!s
->dataplane_started
);
1218 /* ...but requests may still be in flight. */
1221 /* We drop queued requests after blk_drain() because blk_drain() itself can
1223 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1228 /* No other threads can access req->vq here */
1229 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1231 virtio_blk_free_request(req
);
1235 blk_set_enable_write_cache(s
->blk
, s
->original_wce
);
1238 /* coalesce internal state, copy to pci i/o region 0
1240 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
1242 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1243 BlockConf
*conf
= &s
->conf
.conf
;
1244 BlockDriverState
*bs
= blk_bs(s
->blk
);
1245 struct virtio_blk_config blkcfg
;
1248 int blk_size
= conf
->logical_block_size
;
1250 blk_get_geometry(s
->blk
, &capacity
);
1251 memset(&blkcfg
, 0, sizeof(blkcfg
));
1252 virtio_stq_p(vdev
, &blkcfg
.capacity
, capacity
);
1253 virtio_stl_p(vdev
, &blkcfg
.seg_max
,
1254 s
->conf
.seg_max_adjust
? s
->conf
.queue_size
- 2 : 128 - 2);
1255 virtio_stw_p(vdev
, &blkcfg
.geometry
.cylinders
, conf
->cyls
);
1256 virtio_stl_p(vdev
, &blkcfg
.blk_size
, blk_size
);
1257 virtio_stw_p(vdev
, &blkcfg
.min_io_size
, conf
->min_io_size
/ blk_size
);
1258 virtio_stl_p(vdev
, &blkcfg
.opt_io_size
, conf
->opt_io_size
/ blk_size
);
1259 blkcfg
.geometry
.heads
= conf
->heads
;
1261 * We must ensure that the block device capacity is a multiple of
1262 * the logical block size. If that is not the case, let's use
1263 * sector_mask to adopt the geometry to have a correct picture.
1264 * For those devices where the capacity is ok for the given geometry
1265 * we don't touch the sector value of the geometry, since some devices
1266 * (like s390 dasd) need a specific value. Here the capacity is already
1267 * cyls*heads*secs*blk_size and the sector value is not block size
1268 * divided by 512 - instead it is the amount of blk_size blocks
1269 * per track (cylinder).
1271 length
= blk_getlength(s
->blk
);
1272 if (length
> 0 && length
/ conf
->heads
/ conf
->secs
% blk_size
) {
1273 blkcfg
.geometry
.sectors
= conf
->secs
& ~s
->sector_mask
;
1275 blkcfg
.geometry
.sectors
= conf
->secs
;
1277 blkcfg
.size_max
= 0;
1278 blkcfg
.physical_block_exp
= get_physical_block_exp(conf
);
1279 blkcfg
.alignment_offset
= 0;
1280 blkcfg
.wce
= blk_enable_write_cache(s
->blk
);
1281 virtio_stw_p(vdev
, &blkcfg
.num_queues
, s
->conf
.num_queues
);
1282 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_DISCARD
)) {
1283 uint32_t discard_granularity
= conf
->discard_granularity
;
1284 if (discard_granularity
== -1 || !s
->conf
.report_discard_granularity
) {
1285 discard_granularity
= blk_size
;
1287 virtio_stl_p(vdev
, &blkcfg
.max_discard_sectors
,
1288 s
->conf
.max_discard_sectors
);
1289 virtio_stl_p(vdev
, &blkcfg
.discard_sector_alignment
,
1290 discard_granularity
>> BDRV_SECTOR_BITS
);
1292 * We support only one segment per request since multiple segments
1293 * are not widely used and there are no userspace APIs that allow
1294 * applications to submit multiple segments in a single call.
1296 virtio_stl_p(vdev
, &blkcfg
.max_discard_seg
, 1);
1298 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_WRITE_ZEROES
)) {
1299 virtio_stl_p(vdev
, &blkcfg
.max_write_zeroes_sectors
,
1300 s
->conf
.max_write_zeroes_sectors
);
1301 blkcfg
.write_zeroes_may_unmap
= 1;
1302 virtio_stl_p(vdev
, &blkcfg
.max_write_zeroes_seg
, 1);
1304 if (bs
->bl
.zoned
!= BLK_Z_NONE
) {
1305 switch (bs
->bl
.zoned
) {
1307 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_HM
;
1310 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_HA
;
1313 g_assert_not_reached();
1316 virtio_stl_p(vdev
, &blkcfg
.zoned
.zone_sectors
,
1317 bs
->bl
.zone_size
/ 512);
1318 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_active_zones
,
1319 bs
->bl
.max_active_zones
);
1320 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_open_zones
,
1321 bs
->bl
.max_open_zones
);
1322 virtio_stl_p(vdev
, &blkcfg
.zoned
.write_granularity
, blk_size
);
1323 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_append_sectors
,
1324 bs
->bl
.max_append_sectors
);
1326 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_NONE
;
1328 memcpy(config
, &blkcfg
, s
->config_size
);
1331 static void virtio_blk_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1333 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1334 struct virtio_blk_config blkcfg
;
1336 memcpy(&blkcfg
, config
, s
->config_size
);
1338 blk_set_enable_write_cache(s
->blk
, blkcfg
.wce
!= 0);
1341 static uint64_t virtio_blk_get_features(VirtIODevice
*vdev
, uint64_t features
,
1344 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1346 /* Firstly sync all virtio-blk possible supported features */
1347 features
|= s
->host_features
;
1349 virtio_add_feature(&features
, VIRTIO_BLK_F_SEG_MAX
);
1350 virtio_add_feature(&features
, VIRTIO_BLK_F_GEOMETRY
);
1351 virtio_add_feature(&features
, VIRTIO_BLK_F_TOPOLOGY
);
1352 virtio_add_feature(&features
, VIRTIO_BLK_F_BLK_SIZE
);
1353 if (virtio_has_feature(features
, VIRTIO_F_VERSION_1
)) {
1354 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_SCSI
)) {
1355 error_setg(errp
, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
1359 virtio_clear_feature(&features
, VIRTIO_F_ANY_LAYOUT
);
1360 virtio_add_feature(&features
, VIRTIO_BLK_F_SCSI
);
1363 if (blk_enable_write_cache(s
->blk
) ||
1364 (s
->conf
.x_enable_wce_if_config_wce
&&
1365 virtio_has_feature(features
, VIRTIO_BLK_F_CONFIG_WCE
))) {
1366 virtio_add_feature(&features
, VIRTIO_BLK_F_WCE
);
1368 if (!blk_is_writable(s
->blk
)) {
1369 virtio_add_feature(&features
, VIRTIO_BLK_F_RO
);
1371 if (s
->conf
.num_queues
> 1) {
1372 virtio_add_feature(&features
, VIRTIO_BLK_F_MQ
);
1378 static void virtio_blk_set_status(VirtIODevice
*vdev
, uint8_t status
)
1380 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1382 if (!(status
& (VIRTIO_CONFIG_S_DRIVER
| VIRTIO_CONFIG_S_DRIVER_OK
))) {
1383 assert(!s
->dataplane_started
);
1386 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1390 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1391 * cache flushes. Thus, the "auto writethrough" behavior is never
1392 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1393 * Leaving it enabled would break the following sequence:
1395 * Guest started with "-drive cache=writethrough"
1396 * Guest sets status to 0
1397 * Guest sets DRIVER bit in status field
1398 * Guest reads host features (WCE=0, CONFIG_WCE=1)
1399 * Guest writes guest features (WCE=0, CONFIG_WCE=1)
1400 * Guest writes 1 to the WCE configuration field (writeback mode)
1401 * Guest sets DRIVER_OK bit in status field
1403 * s->blk would erroneously be placed in writethrough mode.
1405 if (!virtio_vdev_has_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
)) {
1406 blk_set_enable_write_cache(s
->blk
,
1407 virtio_vdev_has_feature(vdev
,
1412 static void virtio_blk_save_device(VirtIODevice
*vdev
, QEMUFile
*f
)
1414 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1416 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1417 VirtIOBlockReq
*req
= s
->rq
;
1420 qemu_put_sbyte(f
, 1);
1422 if (s
->conf
.num_queues
> 1) {
1423 qemu_put_be32(f
, virtio_get_queue_index(req
->vq
));
1426 qemu_put_virtqueue_element(vdev
, f
, &req
->elem
);
1431 qemu_put_sbyte(f
, 0);
1434 static int virtio_blk_load_device(VirtIODevice
*vdev
, QEMUFile
*f
,
1437 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1439 while (qemu_get_sbyte(f
)) {
1440 unsigned nvqs
= s
->conf
.num_queues
;
1441 unsigned vq_idx
= 0;
1442 VirtIOBlockReq
*req
;
1445 vq_idx
= qemu_get_be32(f
);
1447 if (vq_idx
>= nvqs
) {
1448 error_report("Invalid virtqueue index in request list: %#x",
1454 req
= qemu_get_virtqueue_element(vdev
, f
, sizeof(VirtIOBlockReq
));
1455 virtio_blk_init_request(s
, virtio_get_queue(vdev
, vq_idx
), req
);
1457 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1467 validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList
*list
,
1468 uint16_t num_queues
, Error
**errp
)
1470 g_autofree
unsigned long *vqs
= bitmap_new(num_queues
);
1471 g_autoptr(GHashTable
) iothreads
=
1472 g_hash_table_new(g_str_hash
, g_str_equal
);
1474 for (IOThreadVirtQueueMappingList
*node
= list
; node
; node
= node
->next
) {
1475 const char *name
= node
->value
->iothread
;
1478 if (!iothread_by_id(name
)) {
1479 error_setg(errp
, "IOThread \"%s\" object does not exist", name
);
1483 if (!g_hash_table_add(iothreads
, (gpointer
)name
)) {
1485 "duplicate IOThread name \"%s\" in iothread-vq-mapping",
1491 if (!!node
->value
->vqs
!= !!list
->value
->vqs
) {
1492 error_setg(errp
, "either all items in iothread-vq-mapping "
1493 "must have vqs or none of them must have it");
1498 for (vq
= node
->value
->vqs
; vq
; vq
= vq
->next
) {
1499 if (vq
->value
>= num_queues
) {
1500 error_setg(errp
, "vq index %u for IOThread \"%s\" must be "
1501 "less than num_queues %u in iothread-vq-mapping",
1502 vq
->value
, name
, num_queues
);
1506 if (test_and_set_bit(vq
->value
, vqs
)) {
1507 error_setg(errp
, "cannot assign vq %u to IOThread \"%s\" "
1508 "because it is already assigned", vq
->value
, name
);
1514 if (list
->value
->vqs
) {
1515 for (uint16_t i
= 0; i
< num_queues
; i
++) {
1516 if (!test_bit(i
, vqs
)) {
1518 "missing vq %u IOThread assignment in iothread-vq-mapping",
1528 static void virtio_resize_cb(void *opaque
)
1530 VirtIODevice
*vdev
= opaque
;
1532 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1533 virtio_notify_config(vdev
);
1536 static void virtio_blk_resize(void *opaque
)
1538 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
1541 * virtio_notify_config() needs to acquire the BQL,
1542 * so it can't be called from an iothread. Instead, schedule
1543 * it to be run in the main context BH.
1545 aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb
, vdev
);
1548 static void virtio_blk_data_plane_detach(VirtIOBlock
*s
)
1550 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
1552 for (uint16_t i
= 0; i
< s
->conf
.num_queues
; i
++) {
1553 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1554 virtio_queue_aio_detach_host_notifier(vq
, s
->vq_aio_context
[i
]);
1558 static void virtio_blk_data_plane_attach(VirtIOBlock
*s
)
1560 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
1562 for (uint16_t i
= 0; i
< s
->conf
.num_queues
; i
++) {
1563 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1564 virtio_queue_aio_attach_host_notifier(vq
, s
->vq_aio_context
[i
]);
1568 /* Suspend virtqueue ioeventfd processing during drain */
1569 static void virtio_blk_drained_begin(void *opaque
)
1571 VirtIOBlock
*s
= opaque
;
1573 if (s
->dataplane_started
) {
1574 virtio_blk_data_plane_detach(s
);
1578 /* Resume virtqueue ioeventfd processing after drain */
1579 static void virtio_blk_drained_end(void *opaque
)
1581 VirtIOBlock
*s
= opaque
;
1583 if (s
->dataplane_started
) {
1584 virtio_blk_data_plane_attach(s
);
1588 static const BlockDevOps virtio_block_ops
= {
1589 .resize_cb
= virtio_blk_resize
,
1590 .drained_begin
= virtio_blk_drained_begin
,
1591 .drained_end
= virtio_blk_drained_end
,
1594 /* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */
1596 apply_vq_mapping(IOThreadVirtQueueMappingList
*iothread_vq_mapping_list
,
1597 AioContext
**vq_aio_context
, uint16_t num_queues
)
1599 IOThreadVirtQueueMappingList
*node
;
1600 size_t num_iothreads
= 0;
1601 size_t cur_iothread
= 0;
1603 for (node
= iothread_vq_mapping_list
; node
; node
= node
->next
) {
1607 for (node
= iothread_vq_mapping_list
; node
; node
= node
->next
) {
1608 IOThread
*iothread
= iothread_by_id(node
->value
->iothread
);
1609 AioContext
*ctx
= iothread_get_aio_context(iothread
);
1611 /* Released in virtio_blk_data_plane_destroy() */
1612 object_ref(OBJECT(iothread
));
1614 if (node
->value
->vqs
) {
1617 /* Explicit vq:IOThread assignment */
1618 for (vq
= node
->value
->vqs
; vq
; vq
= vq
->next
) {
1619 vq_aio_context
[vq
->value
] = ctx
;
1622 /* Round-robin vq:IOThread assignment */
1623 for (unsigned i
= cur_iothread
; i
< num_queues
;
1624 i
+= num_iothreads
) {
1625 vq_aio_context
[i
] = ctx
;
1633 /* Context: BQL held */
1634 static bool virtio_blk_data_plane_create(VirtIOBlock
*s
, Error
**errp
)
1636 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
1637 VirtIOBlkConf
*conf
= &s
->conf
;
1638 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1639 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1641 if (conf
->iothread
|| conf
->iothread_vq_mapping_list
) {
1642 if (!k
->set_guest_notifiers
|| !k
->ioeventfd_assign
) {
1644 "device is incompatible with iothread "
1645 "(transport does not support notifiers)");
1648 if (!virtio_device_ioeventfd_enabled(vdev
)) {
1649 error_setg(errp
, "ioeventfd is required for iothread");
1654 * If dataplane is (re-)enabled while the guest is running there could
1655 * be block jobs that can conflict.
1657 if (blk_op_is_blocked(conf
->conf
.blk
, BLOCK_OP_TYPE_DATAPLANE
, errp
)) {
1658 error_prepend(errp
, "cannot start virtio-blk dataplane: ");
1662 /* Don't try if transport does not support notifiers. */
1663 if (!virtio_device_ioeventfd_enabled(vdev
)) {
1664 s
->dataplane_disabled
= true;
1668 s
->vq_aio_context
= g_new(AioContext
*, conf
->num_queues
);
1670 if (conf
->iothread_vq_mapping_list
) {
1671 apply_vq_mapping(conf
->iothread_vq_mapping_list
, s
->vq_aio_context
,
1673 } else if (conf
->iothread
) {
1674 AioContext
*ctx
= iothread_get_aio_context(conf
->iothread
);
1675 for (unsigned i
= 0; i
< conf
->num_queues
; i
++) {
1676 s
->vq_aio_context
[i
] = ctx
;
1679 /* Released in virtio_blk_data_plane_destroy() */
1680 object_ref(OBJECT(conf
->iothread
));
1682 AioContext
*ctx
= qemu_get_aio_context();
1683 for (unsigned i
= 0; i
< conf
->num_queues
; i
++) {
1684 s
->vq_aio_context
[i
] = ctx
;
1691 /* Context: BQL held */
1692 static void virtio_blk_data_plane_destroy(VirtIOBlock
*s
)
1694 VirtIOBlkConf
*conf
= &s
->conf
;
1696 assert(!s
->dataplane_started
);
1698 if (conf
->iothread_vq_mapping_list
) {
1699 IOThreadVirtQueueMappingList
*node
;
1701 for (node
= conf
->iothread_vq_mapping_list
; node
; node
= node
->next
) {
1702 IOThread
*iothread
= iothread_by_id(node
->value
->iothread
);
1703 object_unref(OBJECT(iothread
));
1707 if (conf
->iothread
) {
1708 object_unref(OBJECT(conf
->iothread
));
1711 g_free(s
->vq_aio_context
);
1712 s
->vq_aio_context
= NULL
;
1715 /* Context: BQL held */
1716 static int virtio_blk_data_plane_start(VirtIODevice
*vdev
)
1718 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1719 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(s
)));
1720 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1722 unsigned nvqs
= s
->conf
.num_queues
;
1723 Error
*local_err
= NULL
;
1726 if (s
->dataplane_started
|| s
->dataplane_starting
) {
1730 s
->dataplane_starting
= true;
1732 /* Set up guest notifier (irq) */
1733 r
= k
->set_guest_notifiers(qbus
->parent
, nvqs
, true);
1735 error_report("virtio-blk failed to set guest notifier (%d), "
1736 "ensure -accel kvm is set.", r
);
1737 goto fail_guest_notifiers
;
1741 * Batch all the host notifiers in a single transaction to avoid
1742 * quadratic time complexity in address_space_update_ioeventfds().
1744 memory_region_transaction_begin();
1746 /* Set up virtqueue notify */
1747 for (i
= 0; i
< nvqs
; i
++) {
1748 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, true);
1752 fprintf(stderr
, "virtio-blk failed to set host notifier (%d)\n", r
);
1754 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, false);
1758 * The transaction expects the ioeventfds to be open when it
1759 * commits. Do it now, before the cleanup loop.
1761 memory_region_transaction_commit();
1764 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), j
);
1766 goto fail_host_notifiers
;
1770 memory_region_transaction_commit();
1772 r
= blk_set_aio_context(s
->conf
.conf
.blk
, s
->vq_aio_context
[0],
1775 error_report_err(local_err
);
1776 goto fail_aio_context
;
1780 * These fields must be visible to the IOThread when it processes the
1781 * virtqueue, otherwise it will think dataplane has not started yet.
1783 * Make sure ->dataplane_started is false when blk_set_aio_context() is
1784 * called above so that draining does not cause the host notifier to be
1785 * detached/attached prematurely.
1787 s
->dataplane_starting
= false;
1788 s
->dataplane_started
= true;
1789 smp_wmb(); /* paired with aio_notify_accept() on the read side */
1791 /* Get this show started by hooking up our callbacks */
1792 if (!blk_in_drain(s
->conf
.conf
.blk
)) {
1793 for (i
= 0; i
< nvqs
; i
++) {
1794 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1795 AioContext
*ctx
= s
->vq_aio_context
[i
];
1797 /* Kick right away to begin processing requests already in vring */
1798 event_notifier_set(virtio_queue_get_host_notifier(vq
));
1800 virtio_queue_aio_attach_host_notifier(vq
, ctx
);
1806 memory_region_transaction_begin();
1808 for (i
= 0; i
< nvqs
; i
++) {
1809 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, false);
1812 memory_region_transaction_commit();
1814 for (i
= 0; i
< nvqs
; i
++) {
1815 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), i
);
1817 fail_host_notifiers
:
1818 k
->set_guest_notifiers(qbus
->parent
, nvqs
, false);
1819 fail_guest_notifiers
:
1820 s
->dataplane_disabled
= true;
1821 s
->dataplane_starting
= false;
1825 /* Stop notifications for new requests from guest.
1827 * Context: BH in IOThread
1829 static void virtio_blk_data_plane_stop_vq_bh(void *opaque
)
1831 VirtQueue
*vq
= opaque
;
1832 EventNotifier
*host_notifier
= virtio_queue_get_host_notifier(vq
);
1834 virtio_queue_aio_detach_host_notifier(vq
, qemu_get_current_aio_context());
1837 * Test and clear notifier after disabling event, in case poll callback
1838 * didn't have time to run.
1840 virtio_queue_host_notifier_read(host_notifier
);
1843 /* Context: BQL held */
1844 static void virtio_blk_data_plane_stop(VirtIODevice
*vdev
)
1846 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1847 BusState
*qbus
= qdev_get_parent_bus(DEVICE(s
));
1848 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1850 unsigned nvqs
= s
->conf
.num_queues
;
1852 if (!s
->dataplane_started
|| s
->dataplane_stopping
) {
1856 /* Better luck next time. */
1857 if (s
->dataplane_disabled
) {
1858 s
->dataplane_disabled
= false;
1859 s
->dataplane_started
= false;
1862 s
->dataplane_stopping
= true;
1864 if (!blk_in_drain(s
->conf
.conf
.blk
)) {
1865 for (i
= 0; i
< nvqs
; i
++) {
1866 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1867 AioContext
*ctx
= s
->vq_aio_context
[i
];
1869 aio_wait_bh_oneshot(ctx
, virtio_blk_data_plane_stop_vq_bh
, vq
);
1874 * Batch all the host notifiers in a single transaction to avoid
1875 * quadratic time complexity in address_space_update_ioeventfds().
1877 memory_region_transaction_begin();
1879 for (i
= 0; i
< nvqs
; i
++) {
1880 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, false);
1884 * The transaction expects the ioeventfds to be open when it
1885 * commits. Do it now, before the cleanup loop.
1887 memory_region_transaction_commit();
1889 for (i
= 0; i
< nvqs
; i
++) {
1890 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), i
);
1894 * Set ->dataplane_started to false before draining so that host notifiers
1895 * are not detached/attached anymore.
1897 s
->dataplane_started
= false;
1899 /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
1900 blk_drain(s
->conf
.conf
.blk
);
1903 * Try to switch bs back to the QEMU main loop. If other users keep the
1904 * BlockBackend in the iothread, that's ok
1906 blk_set_aio_context(s
->conf
.conf
.blk
, qemu_get_aio_context(), NULL
);
1908 /* Clean up guest notifier (irq) */
1909 k
->set_guest_notifiers(qbus
->parent
, nvqs
, false);
1911 s
->dataplane_stopping
= false;
1914 static void virtio_blk_device_realize(DeviceState
*dev
, Error
**errp
)
1916 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1917 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
1918 VirtIOBlkConf
*conf
= &s
->conf
;
1922 if (!conf
->conf
.blk
) {
1923 error_setg(errp
, "drive property not set");
1926 if (!blk_is_inserted(conf
->conf
.blk
)) {
1927 error_setg(errp
, "Device needs media, but drive is empty");
1930 if (conf
->num_queues
== VIRTIO_BLK_AUTO_NUM_QUEUES
) {
1931 conf
->num_queues
= 1;
1933 if (!conf
->num_queues
) {
1934 error_setg(errp
, "num-queues property must be larger than 0");
1937 if (conf
->queue_size
<= 2) {
1938 error_setg(errp
, "invalid queue-size property (%" PRIu16
"), "
1939 "must be > 2", conf
->queue_size
);
1942 if (!is_power_of_2(conf
->queue_size
) ||
1943 conf
->queue_size
> VIRTQUEUE_MAX_SIZE
) {
1944 error_setg(errp
, "invalid queue-size property (%" PRIu16
"), "
1945 "must be a power of 2 (max %d)",
1946 conf
->queue_size
, VIRTQUEUE_MAX_SIZE
);
1950 if (!blkconf_apply_backend_options(&conf
->conf
,
1951 !blk_supports_write_perm(conf
->conf
.blk
),
1955 s
->original_wce
= blk_enable_write_cache(conf
->conf
.blk
);
1956 if (!blkconf_geometry(&conf
->conf
, NULL
, 65535, 255, 255, errp
)) {
1960 if (!blkconf_blocksizes(&conf
->conf
, errp
)) {
1964 BlockDriverState
*bs
= blk_bs(conf
->conf
.blk
);
1965 if (bs
->bl
.zoned
!= BLK_Z_NONE
) {
1966 virtio_add_feature(&s
->host_features
, VIRTIO_BLK_F_ZONED
);
1967 if (bs
->bl
.zoned
== BLK_Z_HM
) {
1968 virtio_clear_feature(&s
->host_features
, VIRTIO_BLK_F_DISCARD
);
1972 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_DISCARD
) &&
1973 (!conf
->max_discard_sectors
||
1974 conf
->max_discard_sectors
> BDRV_REQUEST_MAX_SECTORS
)) {
1975 error_setg(errp
, "invalid max-discard-sectors property (%" PRIu32
")"
1976 ", must be between 1 and %d",
1977 conf
->max_discard_sectors
, (int)BDRV_REQUEST_MAX_SECTORS
);
1981 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_WRITE_ZEROES
) &&
1982 (!conf
->max_write_zeroes_sectors
||
1983 conf
->max_write_zeroes_sectors
> BDRV_REQUEST_MAX_SECTORS
)) {
1984 error_setg(errp
, "invalid max-write-zeroes-sectors property (%" PRIu32
1985 "), must be between 1 and %d",
1986 conf
->max_write_zeroes_sectors
,
1987 (int)BDRV_REQUEST_MAX_SECTORS
);
1991 if (conf
->iothread_vq_mapping_list
) {
1992 if (conf
->iothread
) {
1993 error_setg(errp
, "iothread and iothread-vq-mapping properties "
1994 "cannot be set at the same time");
1998 if (!validate_iothread_vq_mapping_list(conf
->iothread_vq_mapping_list
,
1999 conf
->num_queues
, errp
)) {
2004 s
->config_size
= virtio_get_config_size(&virtio_blk_cfg_size_params
,
2006 virtio_init(vdev
, VIRTIO_ID_BLOCK
, s
->config_size
);
2008 qemu_mutex_init(&s
->rq_lock
);
2010 s
->blk
= conf
->conf
.blk
;
2012 s
->sector_mask
= (s
->conf
.conf
.logical_block_size
/ BDRV_SECTOR_SIZE
) - 1;
2014 for (i
= 0; i
< conf
->num_queues
; i
++) {
2015 virtio_add_queue(vdev
, conf
->queue_size
, virtio_blk_handle_output
);
2017 qemu_coroutine_inc_pool_size(conf
->num_queues
* conf
->queue_size
/ 2);
2018 virtio_blk_data_plane_create(s
, &err
);
2020 error_propagate(errp
, err
);
2021 for (i
= 0; i
< conf
->num_queues
; i
++) {
2022 virtio_del_queue(vdev
, i
);
2024 virtio_cleanup(vdev
);
2029 * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
2030 * called after ->start_ioeventfd() has already set blk's AioContext.
2033 qdev_add_vm_change_state_handler(dev
, virtio_blk_dma_restart_cb
, s
);
2035 blk_ram_registrar_init(&s
->blk_ram_registrar
, s
->blk
);
2036 blk_set_dev_ops(s
->blk
, &virtio_block_ops
, s
);
2038 blk_iostatus_enable(s
->blk
);
2040 add_boot_device_lchs(dev
, "/disk@0,0",
2046 static void virtio_blk_device_unrealize(DeviceState
*dev
)
2048 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2049 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
2050 VirtIOBlkConf
*conf
= &s
->conf
;
2054 del_boot_device_lchs(dev
, "/disk@0,0");
2055 virtio_blk_data_plane_destroy(s
);
2056 for (i
= 0; i
< conf
->num_queues
; i
++) {
2057 virtio_del_queue(vdev
, i
);
2059 qemu_coroutine_dec_pool_size(conf
->num_queues
* conf
->queue_size
/ 2);
2060 qemu_mutex_destroy(&s
->rq_lock
);
2061 blk_ram_registrar_destroy(&s
->blk_ram_registrar
);
2062 qemu_del_vm_change_state_handler(s
->change
);
2063 blockdev_mark_auto_del(s
->blk
);
2064 virtio_cleanup(vdev
);
2067 static void virtio_blk_instance_init(Object
*obj
)
2069 VirtIOBlock
*s
= VIRTIO_BLK(obj
);
2071 device_add_bootindex_property(obj
, &s
->conf
.conf
.bootindex
,
2072 "bootindex", "/disk@0,0",
2076 static const VMStateDescription vmstate_virtio_blk
= {
2077 .name
= "virtio-blk",
2078 .minimum_version_id
= 2,
2080 .fields
= (const VMStateField
[]) {
2081 VMSTATE_VIRTIO_DEVICE
,
2082 VMSTATE_END_OF_LIST()
2086 static Property virtio_blk_properties
[] = {
2087 DEFINE_BLOCK_PROPERTIES(VirtIOBlock
, conf
.conf
),
2088 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock
, conf
.conf
),
2089 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock
, conf
.conf
),
2090 DEFINE_PROP_STRING("serial", VirtIOBlock
, conf
.serial
),
2091 DEFINE_PROP_BIT64("config-wce", VirtIOBlock
, host_features
,
2092 VIRTIO_BLK_F_CONFIG_WCE
, true),
2094 DEFINE_PROP_BIT64("scsi", VirtIOBlock
, host_features
,
2095 VIRTIO_BLK_F_SCSI
, false),
2097 DEFINE_PROP_BIT("request-merging", VirtIOBlock
, conf
.request_merging
, 0,
2099 DEFINE_PROP_UINT16("num-queues", VirtIOBlock
, conf
.num_queues
,
2100 VIRTIO_BLK_AUTO_NUM_QUEUES
),
2101 DEFINE_PROP_UINT16("queue-size", VirtIOBlock
, conf
.queue_size
, 256),
2102 DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock
, conf
.seg_max_adjust
, true),
2103 DEFINE_PROP_LINK("iothread", VirtIOBlock
, conf
.iothread
, TYPE_IOTHREAD
,
2105 DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock
,
2106 conf
.iothread_vq_mapping_list
),
2107 DEFINE_PROP_BIT64("discard", VirtIOBlock
, host_features
,
2108 VIRTIO_BLK_F_DISCARD
, true),
2109 DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock
,
2110 conf
.report_discard_granularity
, true),
2111 DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock
, host_features
,
2112 VIRTIO_BLK_F_WRITE_ZEROES
, true),
2113 DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock
,
2114 conf
.max_discard_sectors
, BDRV_REQUEST_MAX_SECTORS
),
2115 DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock
,
2116 conf
.max_write_zeroes_sectors
, BDRV_REQUEST_MAX_SECTORS
),
2117 DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock
,
2118 conf
.x_enable_wce_if_config_wce
, true),
2119 DEFINE_PROP_END_OF_LIST(),
2122 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
2124 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2125 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
2127 device_class_set_props(dc
, virtio_blk_properties
);
2128 dc
->vmsd
= &vmstate_virtio_blk
;
2129 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
2130 vdc
->realize
= virtio_blk_device_realize
;
2131 vdc
->unrealize
= virtio_blk_device_unrealize
;
2132 vdc
->get_config
= virtio_blk_update_config
;
2133 vdc
->set_config
= virtio_blk_set_config
;
2134 vdc
->get_features
= virtio_blk_get_features
;
2135 vdc
->set_status
= virtio_blk_set_status
;
2136 vdc
->reset
= virtio_blk_reset
;
2137 vdc
->save
= virtio_blk_save_device
;
2138 vdc
->load
= virtio_blk_load_device
;
2139 vdc
->start_ioeventfd
= virtio_blk_data_plane_start
;
2140 vdc
->stop_ioeventfd
= virtio_blk_data_plane_stop
;
2143 static const TypeInfo virtio_blk_info
= {
2144 .name
= TYPE_VIRTIO_BLK
,
2145 .parent
= TYPE_VIRTIO_DEVICE
,
2146 .instance_size
= sizeof(VirtIOBlock
),
2147 .instance_init
= virtio_blk_instance_init
,
2148 .class_init
= virtio_blk_class_init
,
2151 static void virtio_register_types(void)
2153 type_register_static(&virtio_blk_info
);
2156 type_init(virtio_register_types
)