]> git.proxmox.com Git - qemu.git/blob - hw/virtio-blk.c
3622bb9c22de3b0810483a72f0637c285d8ac6e3
[qemu.git] / hw / virtio-blk.c
1 /*
2 * Virtio Block Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu-common.h"
15 #include "qemu/error-report.h"
16 #include "trace.h"
17 #include "hw/block-common.h"
18 #include "sysemu/blockdev.h"
19 #include "hw/virtio-blk.h"
20 #include "hw/scsi-defs.h"
21 #ifdef __linux__
22 # include <scsi/sg.h>
23 #endif
24 #include "hw/virtio-bus.h"
25
26 /*
27 * Moving to QOM later in this series.
28 */
29 static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev)
30 {
31 return (VirtIOBlock *)vdev;
32 }
33
34 typedef struct VirtIOBlockReq
35 {
36 VirtIOBlock *dev;
37 VirtQueueElement elem;
38 struct virtio_blk_inhdr *in;
39 struct virtio_blk_outhdr *out;
40 struct virtio_scsi_inhdr *scsi;
41 QEMUIOVector qiov;
42 struct VirtIOBlockReq *next;
43 BlockAcctCookie acct;
44 } VirtIOBlockReq;
45
46 static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
47 {
48 VirtIOBlock *s = req->dev;
49
50 trace_virtio_blk_req_complete(req, status);
51
52 stb_p(&req->in->status, status);
53 virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
54 virtio_notify(&s->vdev, s->vq);
55 }
56
57 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
58 bool is_read)
59 {
60 BlockErrorAction action = bdrv_get_error_action(req->dev->bs, is_read, error);
61 VirtIOBlock *s = req->dev;
62
63 if (action == BDRV_ACTION_STOP) {
64 req->next = s->rq;
65 s->rq = req;
66 } else if (action == BDRV_ACTION_REPORT) {
67 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
68 bdrv_acct_done(s->bs, &req->acct);
69 g_free(req);
70 }
71
72 bdrv_error_action(s->bs, action, is_read, error);
73 return action != BDRV_ACTION_IGNORE;
74 }
75
76 static void virtio_blk_rw_complete(void *opaque, int ret)
77 {
78 VirtIOBlockReq *req = opaque;
79
80 trace_virtio_blk_rw_complete(req, ret);
81
82 if (ret) {
83 bool is_read = !(ldl_p(&req->out->type) & VIRTIO_BLK_T_OUT);
84 if (virtio_blk_handle_rw_error(req, -ret, is_read))
85 return;
86 }
87
88 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
89 bdrv_acct_done(req->dev->bs, &req->acct);
90 g_free(req);
91 }
92
93 static void virtio_blk_flush_complete(void *opaque, int ret)
94 {
95 VirtIOBlockReq *req = opaque;
96
97 if (ret) {
98 if (virtio_blk_handle_rw_error(req, -ret, 0)) {
99 return;
100 }
101 }
102
103 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
104 bdrv_acct_done(req->dev->bs, &req->acct);
105 g_free(req);
106 }
107
108 static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
109 {
110 VirtIOBlockReq *req = g_malloc(sizeof(*req));
111 req->dev = s;
112 req->qiov.size = 0;
113 req->next = NULL;
114 return req;
115 }
116
117 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
118 {
119 VirtIOBlockReq *req = virtio_blk_alloc_request(s);
120
121 if (req != NULL) {
122 if (!virtqueue_pop(s->vq, &req->elem)) {
123 g_free(req);
124 return NULL;
125 }
126 }
127
128 return req;
129 }
130
131 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
132 {
133 #ifdef __linux__
134 int ret;
135 int i;
136 #endif
137 int status = VIRTIO_BLK_S_OK;
138
139 /*
140 * We require at least one output segment each for the virtio_blk_outhdr
141 * and the SCSI command block.
142 *
143 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
144 * and the sense buffer pointer in the input segments.
145 */
146 if (req->elem.out_num < 2 || req->elem.in_num < 3) {
147 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
148 g_free(req);
149 return;
150 }
151
152 /*
153 * The scsi inhdr is placed in the second-to-last input segment, just
154 * before the regular inhdr.
155 */
156 req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
157
158 if (!req->dev->blk.scsi) {
159 status = VIRTIO_BLK_S_UNSUPP;
160 goto fail;
161 }
162
163 /*
164 * No support for bidirection commands yet.
165 */
166 if (req->elem.out_num > 2 && req->elem.in_num > 3) {
167 status = VIRTIO_BLK_S_UNSUPP;
168 goto fail;
169 }
170
171 #ifdef __linux__
172 struct sg_io_hdr hdr;
173 memset(&hdr, 0, sizeof(struct sg_io_hdr));
174 hdr.interface_id = 'S';
175 hdr.cmd_len = req->elem.out_sg[1].iov_len;
176 hdr.cmdp = req->elem.out_sg[1].iov_base;
177 hdr.dxfer_len = 0;
178
179 if (req->elem.out_num > 2) {
180 /*
181 * If there are more than the minimally required 2 output segments
182 * there is write payload starting from the third iovec.
183 */
184 hdr.dxfer_direction = SG_DXFER_TO_DEV;
185 hdr.iovec_count = req->elem.out_num - 2;
186
187 for (i = 0; i < hdr.iovec_count; i++)
188 hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len;
189
190 hdr.dxferp = req->elem.out_sg + 2;
191
192 } else if (req->elem.in_num > 3) {
193 /*
194 * If we have more than 3 input segments the guest wants to actually
195 * read data.
196 */
197 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
198 hdr.iovec_count = req->elem.in_num - 3;
199 for (i = 0; i < hdr.iovec_count; i++)
200 hdr.dxfer_len += req->elem.in_sg[i].iov_len;
201
202 hdr.dxferp = req->elem.in_sg;
203 } else {
204 /*
205 * Some SCSI commands don't actually transfer any data.
206 */
207 hdr.dxfer_direction = SG_DXFER_NONE;
208 }
209
210 hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base;
211 hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len;
212
213 ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr);
214 if (ret) {
215 status = VIRTIO_BLK_S_UNSUPP;
216 goto fail;
217 }
218
219 /*
220 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
221 * clear the masked_status field [hence status gets cleared too, see
222 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
223 * status has occurred. However they do set DRIVER_SENSE in driver_status
224 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
225 */
226 if (hdr.status == 0 && hdr.sb_len_wr > 0) {
227 hdr.status = CHECK_CONDITION;
228 }
229
230 stl_p(&req->scsi->errors,
231 hdr.status | (hdr.msg_status << 8) |
232 (hdr.host_status << 16) | (hdr.driver_status << 24));
233 stl_p(&req->scsi->residual, hdr.resid);
234 stl_p(&req->scsi->sense_len, hdr.sb_len_wr);
235 stl_p(&req->scsi->data_len, hdr.dxfer_len);
236
237 virtio_blk_req_complete(req, status);
238 g_free(req);
239 return;
240 #else
241 abort();
242 #endif
243
244 fail:
245 /* Just put anything nonzero so that the ioctl fails in the guest. */
246 stl_p(&req->scsi->errors, 255);
247 virtio_blk_req_complete(req, status);
248 g_free(req);
249 }
250
251 typedef struct MultiReqBuffer {
252 BlockRequest blkreq[32];
253 unsigned int num_writes;
254 } MultiReqBuffer;
255
256 static void virtio_submit_multiwrite(BlockDriverState *bs, MultiReqBuffer *mrb)
257 {
258 int i, ret;
259
260 if (!mrb->num_writes) {
261 return;
262 }
263
264 ret = bdrv_aio_multiwrite(bs, mrb->blkreq, mrb->num_writes);
265 if (ret != 0) {
266 for (i = 0; i < mrb->num_writes; i++) {
267 if (mrb->blkreq[i].error) {
268 virtio_blk_rw_complete(mrb->blkreq[i].opaque, -EIO);
269 }
270 }
271 }
272
273 mrb->num_writes = 0;
274 }
275
276 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
277 {
278 bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);
279
280 /*
281 * Make sure all outstanding writes are posted to the backing device.
282 */
283 virtio_submit_multiwrite(req->dev->bs, mrb);
284 bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req);
285 }
286
287 static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
288 {
289 BlockRequest *blkreq;
290 uint64_t sector;
291
292 sector = ldq_p(&req->out->sector);
293
294 bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE);
295
296 trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
297
298 if (sector & req->dev->sector_mask) {
299 virtio_blk_rw_complete(req, -EIO);
300 return;
301 }
302 if (req->qiov.size % req->dev->conf->logical_block_size) {
303 virtio_blk_rw_complete(req, -EIO);
304 return;
305 }
306
307 if (mrb->num_writes == 32) {
308 virtio_submit_multiwrite(req->dev->bs, mrb);
309 }
310
311 blkreq = &mrb->blkreq[mrb->num_writes];
312 blkreq->sector = sector;
313 blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE;
314 blkreq->qiov = &req->qiov;
315 blkreq->cb = virtio_blk_rw_complete;
316 blkreq->opaque = req;
317 blkreq->error = 0;
318
319 mrb->num_writes++;
320 }
321
322 static void virtio_blk_handle_read(VirtIOBlockReq *req)
323 {
324 uint64_t sector;
325
326 sector = ldq_p(&req->out->sector);
327
328 bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);
329
330 trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512);
331
332 if (sector & req->dev->sector_mask) {
333 virtio_blk_rw_complete(req, -EIO);
334 return;
335 }
336 if (req->qiov.size % req->dev->conf->logical_block_size) {
337 virtio_blk_rw_complete(req, -EIO);
338 return;
339 }
340 bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
341 req->qiov.size / BDRV_SECTOR_SIZE,
342 virtio_blk_rw_complete, req);
343 }
344
345 static void virtio_blk_handle_request(VirtIOBlockReq *req,
346 MultiReqBuffer *mrb)
347 {
348 uint32_t type;
349
350 if (req->elem.out_num < 1 || req->elem.in_num < 1) {
351 error_report("virtio-blk missing headers");
352 exit(1);
353 }
354
355 if (req->elem.out_sg[0].iov_len < sizeof(*req->out) ||
356 req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) {
357 error_report("virtio-blk header not in correct element");
358 exit(1);
359 }
360
361 req->out = (void *)req->elem.out_sg[0].iov_base;
362 req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base;
363
364 type = ldl_p(&req->out->type);
365
366 if (type & VIRTIO_BLK_T_FLUSH) {
367 virtio_blk_handle_flush(req, mrb);
368 } else if (type & VIRTIO_BLK_T_SCSI_CMD) {
369 virtio_blk_handle_scsi(req);
370 } else if (type & VIRTIO_BLK_T_GET_ID) {
371 VirtIOBlock *s = req->dev;
372
373 /*
374 * NB: per existing s/n string convention the string is
375 * terminated by '\0' only when shorter than buffer.
376 */
377 strncpy(req->elem.in_sg[0].iov_base,
378 s->blk.serial ? s->blk.serial : "",
379 MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
380 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
381 g_free(req);
382 } else if (type & VIRTIO_BLK_T_OUT) {
383 qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
384 req->elem.out_num - 1);
385 virtio_blk_handle_write(req, mrb);
386 } else if (type == VIRTIO_BLK_T_IN || type == VIRTIO_BLK_T_BARRIER) {
387 /* VIRTIO_BLK_T_IN is 0, so we can't just & it. */
388 qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0],
389 req->elem.in_num - 1);
390 virtio_blk_handle_read(req);
391 } else {
392 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
393 g_free(req);
394 }
395 }
396
397 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
398 {
399 VirtIOBlock *s = to_virtio_blk(vdev);
400 VirtIOBlockReq *req;
401 MultiReqBuffer mrb = {
402 .num_writes = 0,
403 };
404
405 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
406 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
407 * dataplane here instead of waiting for .set_status().
408 */
409 if (s->dataplane) {
410 virtio_blk_data_plane_start(s->dataplane);
411 return;
412 }
413 #endif
414
415 while ((req = virtio_blk_get_request(s))) {
416 virtio_blk_handle_request(req, &mrb);
417 }
418
419 virtio_submit_multiwrite(s->bs, &mrb);
420
421 /*
422 * FIXME: Want to check for completions before returning to guest mode,
423 * so cached reads and writes are reported as quickly as possible. But
424 * that should be done in the generic block layer.
425 */
426 }
427
428 static void virtio_blk_dma_restart_bh(void *opaque)
429 {
430 VirtIOBlock *s = opaque;
431 VirtIOBlockReq *req = s->rq;
432 MultiReqBuffer mrb = {
433 .num_writes = 0,
434 };
435
436 qemu_bh_delete(s->bh);
437 s->bh = NULL;
438
439 s->rq = NULL;
440
441 while (req) {
442 virtio_blk_handle_request(req, &mrb);
443 req = req->next;
444 }
445
446 virtio_submit_multiwrite(s->bs, &mrb);
447 }
448
449 static void virtio_blk_dma_restart_cb(void *opaque, int running,
450 RunState state)
451 {
452 VirtIOBlock *s = opaque;
453
454 if (!running) {
455 return;
456 }
457
458 if (!s->bh) {
459 s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s);
460 qemu_bh_schedule(s->bh);
461 }
462 }
463
464 static void virtio_blk_reset(VirtIODevice *vdev)
465 {
466 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
467 VirtIOBlock *s = to_virtio_blk(vdev);
468
469 if (s->dataplane) {
470 virtio_blk_data_plane_stop(s->dataplane);
471 }
472 #endif
473
474 /*
475 * This should cancel pending requests, but can't do nicely until there
476 * are per-device request lists.
477 */
478 bdrv_drain_all();
479 }
480
481 /* coalesce internal state, copy to pci i/o region 0
482 */
483 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
484 {
485 VirtIOBlock *s = to_virtio_blk(vdev);
486 struct virtio_blk_config blkcfg;
487 uint64_t capacity;
488 int blk_size = s->conf->logical_block_size;
489
490 bdrv_get_geometry(s->bs, &capacity);
491 memset(&blkcfg, 0, sizeof(blkcfg));
492 stq_raw(&blkcfg.capacity, capacity);
493 stl_raw(&blkcfg.seg_max, 128 - 2);
494 stw_raw(&blkcfg.cylinders, s->conf->cyls);
495 stl_raw(&blkcfg.blk_size, blk_size);
496 stw_raw(&blkcfg.min_io_size, s->conf->min_io_size / blk_size);
497 stw_raw(&blkcfg.opt_io_size, s->conf->opt_io_size / blk_size);
498 blkcfg.heads = s->conf->heads;
499 /*
500 * We must ensure that the block device capacity is a multiple of
501 * the logical block size. If that is not the case, lets use
502 * sector_mask to adopt the geometry to have a correct picture.
503 * For those devices where the capacity is ok for the given geometry
504 * we dont touch the sector value of the geometry, since some devices
505 * (like s390 dasd) need a specific value. Here the capacity is already
506 * cyls*heads*secs*blk_size and the sector value is not block size
507 * divided by 512 - instead it is the amount of blk_size blocks
508 * per track (cylinder).
509 */
510 if (bdrv_getlength(s->bs) / s->conf->heads / s->conf->secs % blk_size) {
511 blkcfg.sectors = s->conf->secs & ~s->sector_mask;
512 } else {
513 blkcfg.sectors = s->conf->secs;
514 }
515 blkcfg.size_max = 0;
516 blkcfg.physical_block_exp = get_physical_block_exp(s->conf);
517 blkcfg.alignment_offset = 0;
518 blkcfg.wce = bdrv_enable_write_cache(s->bs);
519 memcpy(config, &blkcfg, sizeof(struct virtio_blk_config));
520 }
521
522 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
523 {
524 VirtIOBlock *s = to_virtio_blk(vdev);
525 struct virtio_blk_config blkcfg;
526
527 memcpy(&blkcfg, config, sizeof(blkcfg));
528 bdrv_set_enable_write_cache(s->bs, blkcfg.wce != 0);
529 }
530
531 static uint32_t virtio_blk_get_features(VirtIODevice *vdev, uint32_t features)
532 {
533 VirtIOBlock *s = to_virtio_blk(vdev);
534
535 features |= (1 << VIRTIO_BLK_F_SEG_MAX);
536 features |= (1 << VIRTIO_BLK_F_GEOMETRY);
537 features |= (1 << VIRTIO_BLK_F_TOPOLOGY);
538 features |= (1 << VIRTIO_BLK_F_BLK_SIZE);
539 features |= (1 << VIRTIO_BLK_F_SCSI);
540
541 if (s->blk.config_wce) {
542 features |= (1 << VIRTIO_BLK_F_CONFIG_WCE);
543 }
544 if (bdrv_enable_write_cache(s->bs))
545 features |= (1 << VIRTIO_BLK_F_WCE);
546
547 if (bdrv_is_read_only(s->bs))
548 features |= 1 << VIRTIO_BLK_F_RO;
549
550 return features;
551 }
552
553 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
554 {
555 VirtIOBlock *s = to_virtio_blk(vdev);
556 uint32_t features;
557
558 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
559 if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
560 VIRTIO_CONFIG_S_DRIVER_OK))) {
561 virtio_blk_data_plane_stop(s->dataplane);
562 }
563 #endif
564
565 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
566 return;
567 }
568
569 features = vdev->guest_features;
570 bdrv_set_enable_write_cache(s->bs, !!(features & (1 << VIRTIO_BLK_F_WCE)));
571 }
572
573 static void virtio_blk_save(QEMUFile *f, void *opaque)
574 {
575 VirtIOBlock *s = opaque;
576 VirtIOBlockReq *req = s->rq;
577
578 virtio_save(&s->vdev, f);
579
580 while (req) {
581 qemu_put_sbyte(f, 1);
582 qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem));
583 req = req->next;
584 }
585 qemu_put_sbyte(f, 0);
586 }
587
588 static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id)
589 {
590 VirtIOBlock *s = opaque;
591 int ret;
592
593 if (version_id != 2)
594 return -EINVAL;
595
596 ret = virtio_load(&s->vdev, f);
597 if (ret) {
598 return ret;
599 }
600
601 while (qemu_get_sbyte(f)) {
602 VirtIOBlockReq *req = virtio_blk_alloc_request(s);
603 qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem));
604 req->next = s->rq;
605 s->rq = req;
606
607 virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr,
608 req->elem.in_num, 1);
609 virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr,
610 req->elem.out_num, 0);
611 }
612
613 return 0;
614 }
615
616 static void virtio_blk_resize(void *opaque)
617 {
618 VirtIOBlock *s = opaque;
619
620 virtio_notify_config(&s->vdev);
621 }
622
623 static const BlockDevOps virtio_block_ops = {
624 .resize_cb = virtio_blk_resize,
625 };
626
627 void virtio_blk_set_conf(DeviceState *dev, VirtIOBlkConf *blk)
628 {
629 VirtIOBlock *s = VIRTIO_BLK(dev);
630 memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf));
631 }
632
633 static VirtIODevice *virtio_blk_common_init(DeviceState *dev,
634 VirtIOBlkConf *blk, VirtIOBlock **ps)
635 {
636 VirtIOBlock *s = *ps;
637 static int virtio_blk_id;
638
639 if (!blk->conf.bs) {
640 error_report("drive property not set");
641 return NULL;
642 }
643 if (!bdrv_is_inserted(blk->conf.bs)) {
644 error_report("Device needs media, but drive is empty");
645 return NULL;
646 }
647
648 blkconf_serial(&blk->conf, &blk->serial);
649 if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) {
650 return NULL;
651 }
652
653 /*
654 * We have two cases here: the old virtio-blk-pci device, and the
655 * refactored virtio-blk.
656 */
657 if (s == NULL) {
658 /* virtio-blk-pci */
659 s = (VirtIOBlock *)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK,
660 sizeof(struct virtio_blk_config),
661 sizeof(VirtIOBlock));
662 } else {
663 /* virtio-blk */
664 virtio_init(VIRTIO_DEVICE(s), "virtio-blk", VIRTIO_ID_BLOCK,
665 sizeof(struct virtio_blk_config));
666 }
667
668 s->vdev.get_config = virtio_blk_update_config;
669 s->vdev.set_config = virtio_blk_set_config;
670 s->vdev.get_features = virtio_blk_get_features;
671 s->vdev.set_status = virtio_blk_set_status;
672 s->vdev.reset = virtio_blk_reset;
673 s->bs = blk->conf.bs;
674 s->conf = &blk->conf;
675 memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf));
676 s->rq = NULL;
677 s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1;
678
679 s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output);
680 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
681 if (!virtio_blk_data_plane_create(&s->vdev, blk, &s->dataplane)) {
682 virtio_cleanup(&s->vdev);
683 return NULL;
684 }
685 #endif
686
687 s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
688 s->qdev = dev;
689 register_savevm(dev, "virtio-blk", virtio_blk_id++, 2,
690 virtio_blk_save, virtio_blk_load, s);
691 bdrv_set_dev_ops(s->bs, &virtio_block_ops, s);
692 bdrv_set_buffer_alignment(s->bs, s->conf->logical_block_size);
693
694 bdrv_iostatus_enable(s->bs);
695 add_boot_device_path(s->conf->bootindex, dev, "/disk@0,0");
696
697 return &s->vdev;
698 }
699
700 VirtIODevice *virtio_blk_init(DeviceState *dev, VirtIOBlkConf *blk)
701 {
702 VirtIOBlock *s = NULL;
703 return virtio_blk_common_init(dev, blk, &s);
704 }
705
706 void virtio_blk_exit(VirtIODevice *vdev)
707 {
708 VirtIOBlock *s = to_virtio_blk(vdev);
709
710 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
711 virtio_blk_data_plane_destroy(s->dataplane);
712 s->dataplane = NULL;
713 #endif
714 qemu_del_vm_change_state_handler(s->change);
715 unregister_savevm(s->qdev, "virtio-blk", s);
716 blockdev_mark_auto_del(s->bs);
717 virtio_cleanup(vdev);
718 }
719
720
721 static int virtio_blk_device_init(VirtIODevice *vdev)
722 {
723 DeviceState *qdev = DEVICE(vdev);
724 VirtIOBlock *s = VIRTIO_BLK(vdev);
725 VirtIOBlkConf *blk = &(s->blk);
726 if (virtio_blk_common_init(qdev, blk, &s) == NULL) {
727 return -1;
728 }
729 return 0;
730 }
731
732 static int virtio_blk_device_exit(DeviceState *dev)
733 {
734 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
735 VirtIOBlock *s = VIRTIO_BLK(dev);
736 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
737 virtio_blk_data_plane_destroy(s->dataplane);
738 s->dataplane = NULL;
739 #endif
740 qemu_del_vm_change_state_handler(s->change);
741 unregister_savevm(s->qdev, "virtio-blk", s);
742 blockdev_mark_auto_del(s->bs);
743 virtio_common_cleanup(vdev);
744 return 0;
745 }
746
747 static Property virtio_blk_properties[] = {
748 DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlock, blk),
749 DEFINE_PROP_END_OF_LIST(),
750 };
751
752 static void virtio_blk_class_init(ObjectClass *klass, void *data)
753 {
754 DeviceClass *dc = DEVICE_CLASS(klass);
755 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
756 dc->exit = virtio_blk_device_exit;
757 dc->props = virtio_blk_properties;
758 vdc->init = virtio_blk_device_init;
759 vdc->get_config = virtio_blk_update_config;
760 vdc->set_config = virtio_blk_set_config;
761 vdc->get_features = virtio_blk_get_features;
762 vdc->set_status = virtio_blk_set_status;
763 vdc->reset = virtio_blk_reset;
764 }
765
766 static const TypeInfo virtio_device_info = {
767 .name = TYPE_VIRTIO_BLK,
768 .parent = TYPE_VIRTIO_DEVICE,
769 .instance_size = sizeof(VirtIOBlock),
770 .class_init = virtio_blk_class_init,
771 };
772
773 static void virtio_register_types(void)
774 {
775 type_register_static(&virtio_device_info);
776 }
777
778 type_init(virtio_register_types)