]> git.proxmox.com Git - mirror_qemu.git/blob - hw/block/virtio-blk.c
Merge remote-tracking branch 'remotes/xtensa/tags/20190228-xtensa' into staging
[mirror_qemu.git] / hw / block / virtio-blk.c
1 /*
2 * Virtio Block Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "qemu/iov.h"
18 #include "qemu/error-report.h"
19 #include "trace.h"
20 #include "hw/block/block.h"
21 #include "sysemu/blockdev.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "dataplane/virtio-blk.h"
24 #include "scsi/constants.h"
25 #ifdef __linux__
26 # include <scsi/sg.h>
27 #endif
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/virtio/virtio-access.h"
30
31 /* Config size before the discard support (hide associated config fields) */
32 #define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \
33 max_discard_sectors)
34 /*
35 * Starting from the discard feature, we can use this array to properly
36 * set the config size depending on the features enabled.
37 */
38 static VirtIOFeature feature_sizes[] = {
39 {.flags = 1ULL << VIRTIO_BLK_F_DISCARD,
40 .end = virtio_endof(struct virtio_blk_config, discard_sector_alignment)},
41 {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES,
42 .end = virtio_endof(struct virtio_blk_config, write_zeroes_may_unmap)},
43 {}
44 };
45
46 static void virtio_blk_set_config_size(VirtIOBlock *s, uint64_t host_features)
47 {
48 s->config_size = MAX(VIRTIO_BLK_CFG_SIZE,
49 virtio_feature_get_config_size(feature_sizes, host_features));
50
51 assert(s->config_size <= sizeof(struct virtio_blk_config));
52 }
53
54 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
55 VirtIOBlockReq *req)
56 {
57 req->dev = s;
58 req->vq = vq;
59 req->qiov.size = 0;
60 req->in_len = 0;
61 req->next = NULL;
62 req->mr_next = NULL;
63 }
64
65 static void virtio_blk_free_request(VirtIOBlockReq *req)
66 {
67 g_free(req);
68 }
69
70 static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
71 {
72 VirtIOBlock *s = req->dev;
73 VirtIODevice *vdev = VIRTIO_DEVICE(s);
74
75 trace_virtio_blk_req_complete(vdev, req, status);
76
77 stb_p(&req->in->status, status);
78 virtqueue_push(req->vq, &req->elem, req->in_len);
79 if (s->dataplane_started && !s->dataplane_disabled) {
80 virtio_blk_data_plane_notify(s->dataplane, req->vq);
81 } else {
82 virtio_notify(vdev, req->vq);
83 }
84 }
85
86 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
87 bool is_read, bool acct_failed)
88 {
89 VirtIOBlock *s = req->dev;
90 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
91
92 if (action == BLOCK_ERROR_ACTION_STOP) {
93 /* Break the link as the next request is going to be parsed from the
94 * ring again. Otherwise we may end up doing a double completion! */
95 req->mr_next = NULL;
96 req->next = s->rq;
97 s->rq = req;
98 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
99 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
100 if (acct_failed) {
101 block_acct_failed(blk_get_stats(s->blk), &req->acct);
102 }
103 virtio_blk_free_request(req);
104 }
105
106 blk_error_action(s->blk, action, is_read, error);
107 return action != BLOCK_ERROR_ACTION_IGNORE;
108 }
109
110 static void virtio_blk_rw_complete(void *opaque, int ret)
111 {
112 VirtIOBlockReq *next = opaque;
113 VirtIOBlock *s = next->dev;
114 VirtIODevice *vdev = VIRTIO_DEVICE(s);
115
116 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
117 while (next) {
118 VirtIOBlockReq *req = next;
119 next = req->mr_next;
120 trace_virtio_blk_rw_complete(vdev, req, ret);
121
122 if (req->qiov.nalloc != -1) {
123 /* If nalloc is != -1 req->qiov is a local copy of the original
124 * external iovec. It was allocated in submit_requests to be
125 * able to merge requests. */
126 qemu_iovec_destroy(&req->qiov);
127 }
128
129 if (ret) {
130 int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
131 bool is_read = !(p & VIRTIO_BLK_T_OUT);
132 /* Note that memory may be dirtied on read failure. If the
133 * virtio request is not completed here, as is the case for
134 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
135 * correctly during live migration. While this is ugly,
136 * it is acceptable because the device is free to write to
137 * the memory until the request is completed (which will
138 * happen on the other side of the migration).
139 */
140 if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
141 continue;
142 }
143 }
144
145 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
146 block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
147 virtio_blk_free_request(req);
148 }
149 aio_context_release(blk_get_aio_context(s->conf.conf.blk));
150 }
151
152 static void virtio_blk_flush_complete(void *opaque, int ret)
153 {
154 VirtIOBlockReq *req = opaque;
155 VirtIOBlock *s = req->dev;
156
157 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
158 if (ret) {
159 if (virtio_blk_handle_rw_error(req, -ret, 0, true)) {
160 goto out;
161 }
162 }
163
164 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
165 block_acct_done(blk_get_stats(s->blk), &req->acct);
166 virtio_blk_free_request(req);
167
168 out:
169 aio_context_release(blk_get_aio_context(s->conf.conf.blk));
170 }
171
172 static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
173 {
174 VirtIOBlockReq *req = opaque;
175 VirtIOBlock *s = req->dev;
176 bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
177 ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
178
179 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
180 if (ret) {
181 if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
182 goto out;
183 }
184 }
185
186 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
187 if (is_write_zeroes) {
188 block_acct_done(blk_get_stats(s->blk), &req->acct);
189 }
190 virtio_blk_free_request(req);
191
192 out:
193 aio_context_release(blk_get_aio_context(s->conf.conf.blk));
194 }
195
196 #ifdef __linux__
197
198 typedef struct {
199 VirtIOBlockReq *req;
200 struct sg_io_hdr hdr;
201 } VirtIOBlockIoctlReq;
202
203 static void virtio_blk_ioctl_complete(void *opaque, int status)
204 {
205 VirtIOBlockIoctlReq *ioctl_req = opaque;
206 VirtIOBlockReq *req = ioctl_req->req;
207 VirtIOBlock *s = req->dev;
208 VirtIODevice *vdev = VIRTIO_DEVICE(s);
209 struct virtio_scsi_inhdr *scsi;
210 struct sg_io_hdr *hdr;
211
212 scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
213
214 if (status) {
215 status = VIRTIO_BLK_S_UNSUPP;
216 virtio_stl_p(vdev, &scsi->errors, 255);
217 goto out;
218 }
219
220 hdr = &ioctl_req->hdr;
221 /*
222 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
223 * clear the masked_status field [hence status gets cleared too, see
224 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
225 * status has occurred. However they do set DRIVER_SENSE in driver_status
226 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
227 */
228 if (hdr->status == 0 && hdr->sb_len_wr > 0) {
229 hdr->status = CHECK_CONDITION;
230 }
231
232 virtio_stl_p(vdev, &scsi->errors,
233 hdr->status | (hdr->msg_status << 8) |
234 (hdr->host_status << 16) | (hdr->driver_status << 24));
235 virtio_stl_p(vdev, &scsi->residual, hdr->resid);
236 virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr);
237 virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
238
239 out:
240 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
241 virtio_blk_req_complete(req, status);
242 virtio_blk_free_request(req);
243 aio_context_release(blk_get_aio_context(s->conf.conf.blk));
244 g_free(ioctl_req);
245 }
246
247 #endif
248
249 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
250 {
251 VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
252
253 if (req) {
254 virtio_blk_init_request(s, vq, req);
255 }
256 return req;
257 }
258
259 static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
260 {
261 int status = VIRTIO_BLK_S_OK;
262 struct virtio_scsi_inhdr *scsi = NULL;
263 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
264 VirtQueueElement *elem = &req->elem;
265 VirtIOBlock *blk = req->dev;
266
267 #ifdef __linux__
268 int i;
269 VirtIOBlockIoctlReq *ioctl_req;
270 BlockAIOCB *acb;
271 #endif
272
273 /*
274 * We require at least one output segment each for the virtio_blk_outhdr
275 * and the SCSI command block.
276 *
277 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
278 * and the sense buffer pointer in the input segments.
279 */
280 if (elem->out_num < 2 || elem->in_num < 3) {
281 status = VIRTIO_BLK_S_IOERR;
282 goto fail;
283 }
284
285 /*
286 * The scsi inhdr is placed in the second-to-last input segment, just
287 * before the regular inhdr.
288 */
289 scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
290
291 if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) {
292 status = VIRTIO_BLK_S_UNSUPP;
293 goto fail;
294 }
295
296 /*
297 * No support for bidirection commands yet.
298 */
299 if (elem->out_num > 2 && elem->in_num > 3) {
300 status = VIRTIO_BLK_S_UNSUPP;
301 goto fail;
302 }
303
304 #ifdef __linux__
305 ioctl_req = g_new0(VirtIOBlockIoctlReq, 1);
306 ioctl_req->req = req;
307 ioctl_req->hdr.interface_id = 'S';
308 ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len;
309 ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base;
310 ioctl_req->hdr.dxfer_len = 0;
311
312 if (elem->out_num > 2) {
313 /*
314 * If there are more than the minimally required 2 output segments
315 * there is write payload starting from the third iovec.
316 */
317 ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV;
318 ioctl_req->hdr.iovec_count = elem->out_num - 2;
319
320 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
321 ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
322 }
323
324 ioctl_req->hdr.dxferp = elem->out_sg + 2;
325
326 } else if (elem->in_num > 3) {
327 /*
328 * If we have more than 3 input segments the guest wants to actually
329 * read data.
330 */
331 ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV;
332 ioctl_req->hdr.iovec_count = elem->in_num - 3;
333 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
334 ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len;
335 }
336
337 ioctl_req->hdr.dxferp = elem->in_sg;
338 } else {
339 /*
340 * Some SCSI commands don't actually transfer any data.
341 */
342 ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE;
343 }
344
345 ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
346 ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
347
348 acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr,
349 virtio_blk_ioctl_complete, ioctl_req);
350 if (!acb) {
351 g_free(ioctl_req);
352 status = VIRTIO_BLK_S_UNSUPP;
353 goto fail;
354 }
355 return -EINPROGRESS;
356 #else
357 abort();
358 #endif
359
360 fail:
361 /* Just put anything nonzero so that the ioctl fails in the guest. */
362 if (scsi) {
363 virtio_stl_p(vdev, &scsi->errors, 255);
364 }
365 return status;
366 }
367
368 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
369 {
370 int status;
371
372 status = virtio_blk_handle_scsi_req(req);
373 if (status != -EINPROGRESS) {
374 virtio_blk_req_complete(req, status);
375 virtio_blk_free_request(req);
376 }
377 }
378
379 static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
380 int start, int num_reqs, int niov)
381 {
382 QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
383 int64_t sector_num = mrb->reqs[start]->sector_num;
384 bool is_write = mrb->is_write;
385
386 if (num_reqs > 1) {
387 int i;
388 struct iovec *tmp_iov = qiov->iov;
389 int tmp_niov = qiov->niov;
390
391 /* mrb->reqs[start]->qiov was initialized from external so we can't
392 * modify it here. We need to initialize it locally and then add the
393 * external iovecs. */
394 qemu_iovec_init(qiov, niov);
395
396 for (i = 0; i < tmp_niov; i++) {
397 qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
398 }
399
400 for (i = start + 1; i < start + num_reqs; i++) {
401 qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
402 mrb->reqs[i]->qiov.size);
403 mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
404 }
405
406 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev),
407 mrb, start, num_reqs,
408 sector_num << BDRV_SECTOR_BITS,
409 qiov->size, is_write);
410 block_acct_merge_done(blk_get_stats(blk),
411 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
412 num_reqs - 1);
413 }
414
415 if (is_write) {
416 blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0,
417 virtio_blk_rw_complete, mrb->reqs[start]);
418 } else {
419 blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0,
420 virtio_blk_rw_complete, mrb->reqs[start]);
421 }
422 }
423
424 static int multireq_compare(const void *a, const void *b)
425 {
426 const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
427 *req2 = *(VirtIOBlockReq **)b;
428
429 /*
430 * Note that we can't simply subtract sector_num1 from sector_num2
431 * here as that could overflow the return value.
432 */
433 if (req1->sector_num > req2->sector_num) {
434 return 1;
435 } else if (req1->sector_num < req2->sector_num) {
436 return -1;
437 } else {
438 return 0;
439 }
440 }
441
442 static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
443 {
444 int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
445 uint32_t max_transfer;
446 int64_t sector_num = 0;
447
448 if (mrb->num_reqs == 1) {
449 submit_requests(blk, mrb, 0, 1, -1);
450 mrb->num_reqs = 0;
451 return;
452 }
453
454 max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);
455
456 qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
457 &multireq_compare);
458
459 for (i = 0; i < mrb->num_reqs; i++) {
460 VirtIOBlockReq *req = mrb->reqs[i];
461 if (num_reqs > 0) {
462 /*
463 * NOTE: We cannot merge the requests in below situations:
464 * 1. requests are not sequential
465 * 2. merge would exceed maximum number of IOVs
466 * 3. merge would exceed maximum transfer length of backend device
467 */
468 if (sector_num + nb_sectors != req->sector_num ||
469 niov > blk_get_max_iov(blk) - req->qiov.niov ||
470 req->qiov.size > max_transfer ||
471 nb_sectors > (max_transfer -
472 req->qiov.size) / BDRV_SECTOR_SIZE) {
473 submit_requests(blk, mrb, start, num_reqs, niov);
474 num_reqs = 0;
475 }
476 }
477
478 if (num_reqs == 0) {
479 sector_num = req->sector_num;
480 nb_sectors = niov = 0;
481 start = i;
482 }
483
484 nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
485 niov += req->qiov.niov;
486 num_reqs++;
487 }
488
489 submit_requests(blk, mrb, start, num_reqs, niov);
490 mrb->num_reqs = 0;
491 }
492
493 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
494 {
495 block_acct_start(blk_get_stats(req->dev->blk), &req->acct, 0,
496 BLOCK_ACCT_FLUSH);
497
498 /*
499 * Make sure all outstanding writes are posted to the backing device.
500 */
501 if (mrb->is_write && mrb->num_reqs > 0) {
502 virtio_blk_submit_multireq(req->dev->blk, mrb);
503 }
504 blk_aio_flush(req->dev->blk, virtio_blk_flush_complete, req);
505 }
506
507 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
508 uint64_t sector, size_t size)
509 {
510 uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
511 uint64_t total_sectors;
512
513 if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
514 return false;
515 }
516 if (sector & dev->sector_mask) {
517 return false;
518 }
519 if (size % dev->conf.conf.logical_block_size) {
520 return false;
521 }
522 blk_get_geometry(dev->blk, &total_sectors);
523 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
524 return false;
525 }
526 return true;
527 }
528
529 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
530 struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
531 {
532 VirtIOBlock *s = req->dev;
533 VirtIODevice *vdev = VIRTIO_DEVICE(s);
534 uint64_t sector;
535 uint32_t num_sectors, flags, max_sectors;
536 uint8_t err_status;
537 int bytes;
538
539 sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
540 num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
541 flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
542 max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
543 s->conf.max_discard_sectors;
544
545 /*
546 * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
547 * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
548 * the integer variable.
549 */
550 if (unlikely(num_sectors > max_sectors)) {
551 err_status = VIRTIO_BLK_S_IOERR;
552 goto err;
553 }
554
555 bytes = num_sectors << BDRV_SECTOR_BITS;
556
557 if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
558 err_status = VIRTIO_BLK_S_IOERR;
559 goto err;
560 }
561
562 /*
563 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
564 * and write zeroes commands if any unknown flag is set.
565 */
566 if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
567 err_status = VIRTIO_BLK_S_UNSUPP;
568 goto err;
569 }
570
571 if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
572 int blk_aio_flags = 0;
573
574 if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
575 blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
576 }
577
578 block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
579 BLOCK_ACCT_WRITE);
580
581 blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
582 bytes, blk_aio_flags,
583 virtio_blk_discard_write_zeroes_complete, req);
584 } else { /* VIRTIO_BLK_T_DISCARD */
585 /*
586 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
587 * discard commands if the unmap flag is set.
588 */
589 if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
590 err_status = VIRTIO_BLK_S_UNSUPP;
591 goto err;
592 }
593
594 blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
595 virtio_blk_discard_write_zeroes_complete, req);
596 }
597
598 return VIRTIO_BLK_S_OK;
599
600 err:
601 if (is_write_zeroes) {
602 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
603 }
604 return err_status;
605 }
606
607 static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
608 {
609 uint32_t type;
610 struct iovec *in_iov = req->elem.in_sg;
611 struct iovec *out_iov = req->elem.out_sg;
612 unsigned in_num = req->elem.in_num;
613 unsigned out_num = req->elem.out_num;
614 VirtIOBlock *s = req->dev;
615 VirtIODevice *vdev = VIRTIO_DEVICE(s);
616
617 if (req->elem.out_num < 1 || req->elem.in_num < 1) {
618 virtio_error(vdev, "virtio-blk missing headers");
619 return -1;
620 }
621
622 if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
623 sizeof(req->out)) != sizeof(req->out))) {
624 virtio_error(vdev, "virtio-blk request outhdr too short");
625 return -1;
626 }
627
628 iov_discard_front(&out_iov, &out_num, sizeof(req->out));
629
630 if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
631 virtio_error(vdev, "virtio-blk request inhdr too short");
632 return -1;
633 }
634
635 /* We always touch the last byte, so just see how big in_iov is. */
636 req->in_len = iov_size(in_iov, in_num);
637 req->in = (void *)in_iov[in_num - 1].iov_base
638 + in_iov[in_num - 1].iov_len
639 - sizeof(struct virtio_blk_inhdr);
640 iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
641
642 type = virtio_ldl_p(vdev, &req->out.type);
643
644 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
645 * is an optional flag. Although a guest should not send this flag if
646 * not negotiated we ignored it in the past. So keep ignoring it. */
647 switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
648 case VIRTIO_BLK_T_IN:
649 {
650 bool is_write = type & VIRTIO_BLK_T_OUT;
651 req->sector_num = virtio_ldq_p(vdev, &req->out.sector);
652
653 if (is_write) {
654 qemu_iovec_init_external(&req->qiov, out_iov, out_num);
655 trace_virtio_blk_handle_write(vdev, req, req->sector_num,
656 req->qiov.size / BDRV_SECTOR_SIZE);
657 } else {
658 qemu_iovec_init_external(&req->qiov, in_iov, in_num);
659 trace_virtio_blk_handle_read(vdev, req, req->sector_num,
660 req->qiov.size / BDRV_SECTOR_SIZE);
661 }
662
663 if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) {
664 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
665 block_acct_invalid(blk_get_stats(s->blk),
666 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
667 virtio_blk_free_request(req);
668 return 0;
669 }
670
671 block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size,
672 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
673
674 /* merge would exceed maximum number of requests or IO direction
675 * changes */
676 if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
677 is_write != mrb->is_write ||
678 !s->conf.request_merging)) {
679 virtio_blk_submit_multireq(s->blk, mrb);
680 }
681
682 assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
683 mrb->reqs[mrb->num_reqs++] = req;
684 mrb->is_write = is_write;
685 break;
686 }
687 case VIRTIO_BLK_T_FLUSH:
688 virtio_blk_handle_flush(req, mrb);
689 break;
690 case VIRTIO_BLK_T_SCSI_CMD:
691 virtio_blk_handle_scsi(req);
692 break;
693 case VIRTIO_BLK_T_GET_ID:
694 {
695 /*
696 * NB: per existing s/n string convention the string is
697 * terminated by '\0' only when shorter than buffer.
698 */
699 const char *serial = s->conf.serial ? s->conf.serial : "";
700 size_t size = MIN(strlen(serial) + 1,
701 MIN(iov_size(in_iov, in_num),
702 VIRTIO_BLK_ID_BYTES));
703 iov_from_buf(in_iov, in_num, 0, serial, size);
704 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
705 virtio_blk_free_request(req);
706 break;
707 }
708 /*
709 * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
710 * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
711 * so we must mask it for these requests, then we will check if it is set.
712 */
713 case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
714 case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
715 {
716 struct virtio_blk_discard_write_zeroes dwz_hdr;
717 size_t out_len = iov_size(out_iov, out_num);
718 bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
719 VIRTIO_BLK_T_WRITE_ZEROES;
720 uint8_t err_status;
721
722 /*
723 * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
724 * more than one segment.
725 */
726 if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
727 out_len > sizeof(dwz_hdr))) {
728 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
729 virtio_blk_free_request(req);
730 return 0;
731 }
732
733 if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
734 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
735 virtio_error(vdev, "virtio-blk discard/write_zeroes header"
736 " too short");
737 return -1;
738 }
739
740 err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
741 is_write_zeroes);
742 if (err_status != VIRTIO_BLK_S_OK) {
743 virtio_blk_req_complete(req, err_status);
744 virtio_blk_free_request(req);
745 }
746
747 break;
748 }
749 default:
750 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
751 virtio_blk_free_request(req);
752 }
753 return 0;
754 }
755
756 bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
757 {
758 VirtIOBlockReq *req;
759 MultiReqBuffer mrb = {};
760 bool progress = false;
761
762 aio_context_acquire(blk_get_aio_context(s->blk));
763 blk_io_plug(s->blk);
764
765 do {
766 virtio_queue_set_notification(vq, 0);
767
768 while ((req = virtio_blk_get_request(s, vq))) {
769 progress = true;
770 if (virtio_blk_handle_request(req, &mrb)) {
771 virtqueue_detach_element(req->vq, &req->elem, 0);
772 virtio_blk_free_request(req);
773 break;
774 }
775 }
776
777 virtio_queue_set_notification(vq, 1);
778 } while (!virtio_queue_empty(vq));
779
780 if (mrb.num_reqs) {
781 virtio_blk_submit_multireq(s->blk, &mrb);
782 }
783
784 blk_io_unplug(s->blk);
785 aio_context_release(blk_get_aio_context(s->blk));
786 return progress;
787 }
788
789 static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq)
790 {
791 virtio_blk_handle_vq(s, vq);
792 }
793
794 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
795 {
796 VirtIOBlock *s = (VirtIOBlock *)vdev;
797
798 if (s->dataplane) {
799 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
800 * dataplane here instead of waiting for .set_status().
801 */
802 virtio_device_start_ioeventfd(vdev);
803 if (!s->dataplane_disabled) {
804 return;
805 }
806 }
807 virtio_blk_handle_output_do(s, vq);
808 }
809
810 static void virtio_blk_dma_restart_bh(void *opaque)
811 {
812 VirtIOBlock *s = opaque;
813 VirtIOBlockReq *req = s->rq;
814 MultiReqBuffer mrb = {};
815
816 qemu_bh_delete(s->bh);
817 s->bh = NULL;
818
819 s->rq = NULL;
820
821 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
822 while (req) {
823 VirtIOBlockReq *next = req->next;
824 if (virtio_blk_handle_request(req, &mrb)) {
825 /* Device is now broken and won't do any processing until it gets
826 * reset. Already queued requests will be lost: let's purge them.
827 */
828 while (req) {
829 next = req->next;
830 virtqueue_detach_element(req->vq, &req->elem, 0);
831 virtio_blk_free_request(req);
832 req = next;
833 }
834 break;
835 }
836 req = next;
837 }
838
839 if (mrb.num_reqs) {
840 virtio_blk_submit_multireq(s->blk, &mrb);
841 }
842 blk_dec_in_flight(s->conf.conf.blk);
843 aio_context_release(blk_get_aio_context(s->conf.conf.blk));
844 }
845
846 static void virtio_blk_dma_restart_cb(void *opaque, int running,
847 RunState state)
848 {
849 VirtIOBlock *s = opaque;
850
851 if (!running) {
852 return;
853 }
854
855 if (!s->bh) {
856 /* FIXME The data plane is not started yet, so these requests are
857 * processed in the main thread. */
858 s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
859 virtio_blk_dma_restart_bh, s);
860 blk_inc_in_flight(s->conf.conf.blk);
861 qemu_bh_schedule(s->bh);
862 }
863 }
864
865 static void virtio_blk_reset(VirtIODevice *vdev)
866 {
867 VirtIOBlock *s = VIRTIO_BLK(vdev);
868 AioContext *ctx;
869 VirtIOBlockReq *req;
870
871 ctx = blk_get_aio_context(s->blk);
872 aio_context_acquire(ctx);
873 blk_drain(s->blk);
874
875 /* We drop queued requests after blk_drain() because blk_drain() itself can
876 * produce them. */
877 while (s->rq) {
878 req = s->rq;
879 s->rq = req->next;
880 virtqueue_detach_element(req->vq, &req->elem, 0);
881 virtio_blk_free_request(req);
882 }
883
884 aio_context_release(ctx);
885
886 assert(!s->dataplane_started);
887 blk_set_enable_write_cache(s->blk, s->original_wce);
888 }
889
890 /* coalesce internal state, copy to pci i/o region 0
891 */
892 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
893 {
894 VirtIOBlock *s = VIRTIO_BLK(vdev);
895 BlockConf *conf = &s->conf.conf;
896 struct virtio_blk_config blkcfg;
897 uint64_t capacity;
898 int64_t length;
899 int blk_size = conf->logical_block_size;
900
901 blk_get_geometry(s->blk, &capacity);
902 memset(&blkcfg, 0, sizeof(blkcfg));
903 virtio_stq_p(vdev, &blkcfg.capacity, capacity);
904 virtio_stl_p(vdev, &blkcfg.seg_max, 128 - 2);
905 virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls);
906 virtio_stl_p(vdev, &blkcfg.blk_size, blk_size);
907 virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size);
908 virtio_stw_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size);
909 blkcfg.geometry.heads = conf->heads;
910 /*
911 * We must ensure that the block device capacity is a multiple of
912 * the logical block size. If that is not the case, let's use
913 * sector_mask to adopt the geometry to have a correct picture.
914 * For those devices where the capacity is ok for the given geometry
915 * we don't touch the sector value of the geometry, since some devices
916 * (like s390 dasd) need a specific value. Here the capacity is already
917 * cyls*heads*secs*blk_size and the sector value is not block size
918 * divided by 512 - instead it is the amount of blk_size blocks
919 * per track (cylinder).
920 */
921 length = blk_getlength(s->blk);
922 if (length > 0 && length / conf->heads / conf->secs % blk_size) {
923 blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
924 } else {
925 blkcfg.geometry.sectors = conf->secs;
926 }
927 blkcfg.size_max = 0;
928 blkcfg.physical_block_exp = get_physical_block_exp(conf);
929 blkcfg.alignment_offset = 0;
930 blkcfg.wce = blk_enable_write_cache(s->blk);
931 virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
932 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
933 virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
934 s->conf.max_discard_sectors);
935 virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
936 blk_size >> BDRV_SECTOR_BITS);
937 /*
938 * We support only one segment per request since multiple segments
939 * are not widely used and there are no userspace APIs that allow
940 * applications to submit multiple segments in a single call.
941 */
942 virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
943 }
944 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
945 virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
946 s->conf.max_write_zeroes_sectors);
947 blkcfg.write_zeroes_may_unmap = 1;
948 virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
949 }
950 memcpy(config, &blkcfg, s->config_size);
951 }
952
953 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
954 {
955 VirtIOBlock *s = VIRTIO_BLK(vdev);
956 struct virtio_blk_config blkcfg;
957
958 memcpy(&blkcfg, config, s->config_size);
959
960 aio_context_acquire(blk_get_aio_context(s->blk));
961 blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
962 aio_context_release(blk_get_aio_context(s->blk));
963 }
964
965 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
966 Error **errp)
967 {
968 VirtIOBlock *s = VIRTIO_BLK(vdev);
969
970 /* Firstly sync all virtio-blk possible supported features */
971 features |= s->host_features;
972
973 virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
974 virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
975 virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
976 virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
977 if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
978 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) {
979 error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
980 return 0;
981 }
982 } else {
983 virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT);
984 virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
985 }
986
987 if (blk_enable_write_cache(s->blk)) {
988 virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
989 }
990 if (blk_is_read_only(s->blk)) {
991 virtio_add_feature(&features, VIRTIO_BLK_F_RO);
992 }
993 if (s->conf.num_queues > 1) {
994 virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
995 }
996
997 return features;
998 }
999
1000 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
1001 {
1002 VirtIOBlock *s = VIRTIO_BLK(vdev);
1003
1004 if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
1005 assert(!s->dataplane_started);
1006 }
1007
1008 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1009 return;
1010 }
1011
1012 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1013 * cache flushes. Thus, the "auto writethrough" behavior is never
1014 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1015 * Leaving it enabled would break the following sequence:
1016 *
1017 * Guest started with "-drive cache=writethrough"
1018 * Guest sets status to 0
1019 * Guest sets DRIVER bit in status field
1020 * Guest reads host features (WCE=0, CONFIG_WCE=1)
1021 * Guest writes guest features (WCE=0, CONFIG_WCE=1)
1022 * Guest writes 1 to the WCE configuration field (writeback mode)
1023 * Guest sets DRIVER_OK bit in status field
1024 *
1025 * s->blk would erroneously be placed in writethrough mode.
1026 */
1027 if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
1028 aio_context_acquire(blk_get_aio_context(s->blk));
1029 blk_set_enable_write_cache(s->blk,
1030 virtio_vdev_has_feature(vdev,
1031 VIRTIO_BLK_F_WCE));
1032 aio_context_release(blk_get_aio_context(s->blk));
1033 }
1034 }
1035
1036 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
1037 {
1038 VirtIOBlock *s = VIRTIO_BLK(vdev);
1039 VirtIOBlockReq *req = s->rq;
1040
1041 while (req) {
1042 qemu_put_sbyte(f, 1);
1043
1044 if (s->conf.num_queues > 1) {
1045 qemu_put_be32(f, virtio_get_queue_index(req->vq));
1046 }
1047
1048 qemu_put_virtqueue_element(f, &req->elem);
1049 req = req->next;
1050 }
1051 qemu_put_sbyte(f, 0);
1052 }
1053
1054 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
1055 int version_id)
1056 {
1057 VirtIOBlock *s = VIRTIO_BLK(vdev);
1058
1059 while (qemu_get_sbyte(f)) {
1060 unsigned nvqs = s->conf.num_queues;
1061 unsigned vq_idx = 0;
1062 VirtIOBlockReq *req;
1063
1064 if (nvqs > 1) {
1065 vq_idx = qemu_get_be32(f);
1066
1067 if (vq_idx >= nvqs) {
1068 error_report("Invalid virtqueue index in request list: %#x",
1069 vq_idx);
1070 return -EINVAL;
1071 }
1072 }
1073
1074 req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
1075 virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
1076 req->next = s->rq;
1077 s->rq = req;
1078 }
1079
1080 return 0;
1081 }
1082
1083 static void virtio_blk_resize(void *opaque)
1084 {
1085 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1086
1087 virtio_notify_config(vdev);
1088 }
1089
1090 static const BlockDevOps virtio_block_ops = {
1091 .resize_cb = virtio_blk_resize,
1092 };
1093
1094 static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
1095 {
1096 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1097 VirtIOBlock *s = VIRTIO_BLK(dev);
1098 VirtIOBlkConf *conf = &s->conf;
1099 Error *err = NULL;
1100 unsigned i;
1101
1102 if (!conf->conf.blk) {
1103 error_setg(errp, "drive property not set");
1104 return;
1105 }
1106 if (!blk_is_inserted(conf->conf.blk)) {
1107 error_setg(errp, "Device needs media, but drive is empty");
1108 return;
1109 }
1110 if (!conf->num_queues) {
1111 error_setg(errp, "num-queues property must be larger than 0");
1112 return;
1113 }
1114 if (!is_power_of_2(conf->queue_size) ||
1115 conf->queue_size > VIRTQUEUE_MAX_SIZE) {
1116 error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
1117 "must be a power of 2 (max %d)",
1118 conf->queue_size, VIRTQUEUE_MAX_SIZE);
1119 return;
1120 }
1121
1122 if (!blkconf_apply_backend_options(&conf->conf,
1123 blk_is_read_only(conf->conf.blk), true,
1124 errp)) {
1125 return;
1126 }
1127 s->original_wce = blk_enable_write_cache(conf->conf.blk);
1128 if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
1129 return;
1130 }
1131
1132 blkconf_blocksizes(&conf->conf);
1133
1134 if (conf->conf.logical_block_size >
1135 conf->conf.physical_block_size) {
1136 error_setg(errp,
1137 "logical_block_size > physical_block_size not supported");
1138 return;
1139 }
1140
1141 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
1142 (!conf->max_discard_sectors ||
1143 conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
1144 error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
1145 ", must be between 1 and %d",
1146 conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
1147 return;
1148 }
1149
1150 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
1151 (!conf->max_write_zeroes_sectors ||
1152 conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
1153 error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
1154 "), must be between 1 and %d",
1155 conf->max_write_zeroes_sectors,
1156 (int)BDRV_REQUEST_MAX_SECTORS);
1157 return;
1158 }
1159
1160 virtio_blk_set_config_size(s, s->host_features);
1161
1162 virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size);
1163
1164 s->blk = conf->conf.blk;
1165 s->rq = NULL;
1166 s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
1167
1168 for (i = 0; i < conf->num_queues; i++) {
1169 virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
1170 }
1171 virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
1172 if (err != NULL) {
1173 error_propagate(errp, err);
1174 virtio_cleanup(vdev);
1175 return;
1176 }
1177
1178 s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
1179 blk_set_dev_ops(s->blk, &virtio_block_ops, s);
1180 blk_set_guest_block_size(s->blk, s->conf.conf.logical_block_size);
1181
1182 blk_iostatus_enable(s->blk);
1183 }
1184
1185 static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
1186 {
1187 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1188 VirtIOBlock *s = VIRTIO_BLK(dev);
1189
1190 virtio_blk_data_plane_destroy(s->dataplane);
1191 s->dataplane = NULL;
1192 qemu_del_vm_change_state_handler(s->change);
1193 blockdev_mark_auto_del(s->blk);
1194 virtio_cleanup(vdev);
1195 }
1196
1197 static void virtio_blk_instance_init(Object *obj)
1198 {
1199 VirtIOBlock *s = VIRTIO_BLK(obj);
1200
1201 device_add_bootindex_property(obj, &s->conf.conf.bootindex,
1202 "bootindex", "/disk@0,0",
1203 DEVICE(obj), NULL);
1204 }
1205
1206 static const VMStateDescription vmstate_virtio_blk = {
1207 .name = "virtio-blk",
1208 .minimum_version_id = 2,
1209 .version_id = 2,
1210 .fields = (VMStateField[]) {
1211 VMSTATE_VIRTIO_DEVICE,
1212 VMSTATE_END_OF_LIST()
1213 },
1214 };
1215
1216 static Property virtio_blk_properties[] = {
1217 DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
1218 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
1219 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
1220 DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
1221 DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
1222 VIRTIO_BLK_F_CONFIG_WCE, true),
1223 #ifdef __linux__
1224 DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features,
1225 VIRTIO_BLK_F_SCSI, false),
1226 #endif
1227 DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
1228 true),
1229 DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1),
1230 DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 128),
1231 DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
1232 IOThread *),
1233 DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
1234 VIRTIO_BLK_F_DISCARD, true),
1235 DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
1236 VIRTIO_BLK_F_WRITE_ZEROES, true),
1237 DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
1238 conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
1239 DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
1240 conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
1241 DEFINE_PROP_END_OF_LIST(),
1242 };
1243
1244 static void virtio_blk_class_init(ObjectClass *klass, void *data)
1245 {
1246 DeviceClass *dc = DEVICE_CLASS(klass);
1247 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1248
1249 dc->props = virtio_blk_properties;
1250 dc->vmsd = &vmstate_virtio_blk;
1251 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1252 vdc->realize = virtio_blk_device_realize;
1253 vdc->unrealize = virtio_blk_device_unrealize;
1254 vdc->get_config = virtio_blk_update_config;
1255 vdc->set_config = virtio_blk_set_config;
1256 vdc->get_features = virtio_blk_get_features;
1257 vdc->set_status = virtio_blk_set_status;
1258 vdc->reset = virtio_blk_reset;
1259 vdc->save = virtio_blk_save_device;
1260 vdc->load = virtio_blk_load_device;
1261 vdc->start_ioeventfd = virtio_blk_data_plane_start;
1262 vdc->stop_ioeventfd = virtio_blk_data_plane_stop;
1263 }
1264
1265 static const TypeInfo virtio_blk_info = {
1266 .name = TYPE_VIRTIO_BLK,
1267 .parent = TYPE_VIRTIO_DEVICE,
1268 .instance_size = sizeof(VirtIOBlock),
1269 .instance_init = virtio_blk_instance_init,
1270 .class_init = virtio_blk_class_init,
1271 };
1272
1273 static void virtio_register_types(void)
1274 {
1275 type_register_static(&virtio_blk_info);
1276 }
1277
1278 type_init(virtio_register_types)