]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/virtio-scsi.c
virtio: cleanup VMSTATE_VIRTIO_DEVICE
[mirror_qemu.git] / hw / scsi / virtio-scsi.c
1 /*
2 * Virtio SCSI HBA
3 *
4 * Copyright IBM, Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "standard-headers/linux/virtio_ids.h"
19 #include "hw/virtio/virtio-scsi.h"
20 #include "qemu/error-report.h"
21 #include "qemu/iov.h"
22 #include "sysemu/block-backend.h"
23 #include "hw/scsi/scsi.h"
24 #include "block/scsi.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27
28 static inline int virtio_scsi_get_lun(uint8_t *lun)
29 {
30 return ((lun[2] << 8) | lun[3]) & 0x3FFF;
31 }
32
33 static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
34 {
35 if (lun[0] != 1) {
36 return NULL;
37 }
38 if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
39 return NULL;
40 }
41 return scsi_device_find(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
42 }
43
44 void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
45 {
46 const size_t zero_skip =
47 offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
48
49 req->vq = vq;
50 req->dev = s;
51 qemu_sglist_init(&req->qsgl, DEVICE(s), 8, &address_space_memory);
52 qemu_iovec_init(&req->resp_iov, 1);
53 memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
54 }
55
56 void virtio_scsi_free_req(VirtIOSCSIReq *req)
57 {
58 qemu_iovec_destroy(&req->resp_iov);
59 qemu_sglist_destroy(&req->qsgl);
60 g_free(req);
61 }
62
63 static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
64 {
65 VirtIOSCSI *s = req->dev;
66 VirtQueue *vq = req->vq;
67 VirtIODevice *vdev = VIRTIO_DEVICE(s);
68
69 qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
70 virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
71 if (s->dataplane_started && !s->dataplane_fenced) {
72 virtio_scsi_dataplane_notify(vdev, req);
73 } else {
74 virtio_notify(vdev, vq);
75 }
76
77 if (req->sreq) {
78 req->sreq->hba_private = NULL;
79 scsi_req_unref(req->sreq);
80 }
81 virtio_scsi_free_req(req);
82 }
83
84 static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
85 {
86 virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
87 virtqueue_detach_element(req->vq, &req->elem, 0);
88 virtio_scsi_free_req(req);
89 }
90
91 static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
92 hwaddr *addr, int num, size_t skip)
93 {
94 QEMUSGList *qsgl = &req->qsgl;
95 size_t copied = 0;
96
97 while (num) {
98 if (skip >= iov->iov_len) {
99 skip -= iov->iov_len;
100 } else {
101 qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
102 copied += iov->iov_len - skip;
103 skip = 0;
104 }
105 iov++;
106 addr++;
107 num--;
108 }
109
110 assert(skip == 0);
111 return copied;
112 }
113
114 static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
115 unsigned req_size, unsigned resp_size)
116 {
117 VirtIODevice *vdev = (VirtIODevice *) req->dev;
118 size_t in_size, out_size;
119
120 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
121 &req->req, req_size) < req_size) {
122 return -EINVAL;
123 }
124
125 if (qemu_iovec_concat_iov(&req->resp_iov,
126 req->elem.in_sg, req->elem.in_num, 0,
127 resp_size) < resp_size) {
128 return -EINVAL;
129 }
130
131 req->resp_size = resp_size;
132
133 /* Old BIOSes left some padding by mistake after the req_size/resp_size.
134 * As a workaround, always consider the first buffer as the virtio-scsi
135 * request/response, making the payload start at the second element
136 * of the iovec.
137 *
138 * The actual length of the response header, stored in req->resp_size,
139 * does not change.
140 *
141 * TODO: always disable this workaround for virtio 1.0 devices.
142 */
143 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
144 if (req->elem.out_num) {
145 req_size = req->elem.out_sg[0].iov_len;
146 }
147 if (req->elem.in_num) {
148 resp_size = req->elem.in_sg[0].iov_len;
149 }
150 }
151
152 out_size = qemu_sgl_concat(req, req->elem.out_sg,
153 &req->elem.out_addr[0], req->elem.out_num,
154 req_size);
155 in_size = qemu_sgl_concat(req, req->elem.in_sg,
156 &req->elem.in_addr[0], req->elem.in_num,
157 resp_size);
158
159 if (out_size && in_size) {
160 return -ENOTSUP;
161 }
162
163 if (out_size) {
164 req->mode = SCSI_XFER_TO_DEV;
165 } else if (in_size) {
166 req->mode = SCSI_XFER_FROM_DEV;
167 }
168
169 return 0;
170 }
171
172 static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
173 {
174 VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
175 VirtIOSCSIReq *req;
176
177 req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
178 if (!req) {
179 return NULL;
180 }
181 virtio_scsi_init_req(s, vq, req);
182 return req;
183 }
184
185 static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
186 {
187 VirtIOSCSIReq *req = sreq->hba_private;
188 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
189 uint32_t n = virtio_get_queue_index(req->vq) - 2;
190
191 assert(n < vs->conf.num_queues);
192 qemu_put_be32s(f, &n);
193 qemu_put_virtqueue_element(f, &req->elem);
194 }
195
196 static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
197 {
198 SCSIBus *bus = sreq->bus;
199 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
200 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
201 VirtIOSCSIReq *req;
202 uint32_t n;
203
204 qemu_get_be32s(f, &n);
205 assert(n < vs->conf.num_queues);
206 req = qemu_get_virtqueue_element(f, sizeof(VirtIOSCSIReq) + vs->cdb_size);
207 virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
208
209 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
210 sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
211 error_report("invalid SCSI request migration data");
212 exit(1);
213 }
214
215 scsi_req_ref(sreq);
216 req->sreq = sreq;
217 if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
218 assert(req->sreq->cmd.mode == req->mode);
219 }
220 return req;
221 }
222
223 typedef struct {
224 Notifier notifier;
225 VirtIOSCSIReq *tmf_req;
226 } VirtIOSCSICancelNotifier;
227
228 static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
229 {
230 VirtIOSCSICancelNotifier *n = container_of(notifier,
231 VirtIOSCSICancelNotifier,
232 notifier);
233
234 if (--n->tmf_req->remaining == 0) {
235 virtio_scsi_complete_req(n->tmf_req);
236 }
237 g_free(n);
238 }
239
240 static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
241 {
242 if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
243 assert(blk_get_aio_context(d->conf.blk) == s->ctx);
244 }
245 }
246
247 /* Return 0 if the request is ready to be completed and return to guest;
248 * -EINPROGRESS if the request is submitted and will be completed later, in the
249 * case of async cancellation. */
250 static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
251 {
252 SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun);
253 SCSIRequest *r, *next;
254 BusChild *kid;
255 int target;
256 int ret = 0;
257
258 virtio_scsi_ctx_check(s, d);
259 /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
260 req->resp.tmf.response = VIRTIO_SCSI_S_OK;
261
262 virtio_tswap32s(VIRTIO_DEVICE(s), &req->req.tmf.subtype);
263 switch (req->req.tmf.subtype) {
264 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
265 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
266 if (!d) {
267 goto fail;
268 }
269 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
270 goto incorrect_lun;
271 }
272 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
273 VirtIOSCSIReq *cmd_req = r->hba_private;
274 if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
275 break;
276 }
277 }
278 if (r) {
279 /*
280 * Assert that the request has not been completed yet, we
281 * check for it in the loop above.
282 */
283 assert(r->hba_private);
284 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
285 /* "If the specified command is present in the task set, then
286 * return a service response set to FUNCTION SUCCEEDED".
287 */
288 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
289 } else {
290 VirtIOSCSICancelNotifier *notifier;
291
292 req->remaining = 1;
293 notifier = g_new(VirtIOSCSICancelNotifier, 1);
294 notifier->tmf_req = req;
295 notifier->notifier.notify = virtio_scsi_cancel_notify;
296 scsi_req_cancel_async(r, &notifier->notifier);
297 ret = -EINPROGRESS;
298 }
299 }
300 break;
301
302 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
303 if (!d) {
304 goto fail;
305 }
306 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
307 goto incorrect_lun;
308 }
309 s->resetting++;
310 qdev_reset_all(&d->qdev);
311 s->resetting--;
312 break;
313
314 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
315 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
316 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
317 if (!d) {
318 goto fail;
319 }
320 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
321 goto incorrect_lun;
322 }
323
324 /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
325 * This way, if the bus starts calling back to the notifiers
326 * even before we finish the loop, virtio_scsi_cancel_notify
327 * will not complete the TMF too early.
328 */
329 req->remaining = 1;
330 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
331 if (r->hba_private) {
332 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
333 /* "If there is any command present in the task set, then
334 * return a service response set to FUNCTION SUCCEEDED".
335 */
336 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
337 break;
338 } else {
339 VirtIOSCSICancelNotifier *notifier;
340
341 req->remaining++;
342 notifier = g_new(VirtIOSCSICancelNotifier, 1);
343 notifier->notifier.notify = virtio_scsi_cancel_notify;
344 notifier->tmf_req = req;
345 scsi_req_cancel_async(r, &notifier->notifier);
346 }
347 }
348 }
349 if (--req->remaining > 0) {
350 ret = -EINPROGRESS;
351 }
352 break;
353
354 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
355 target = req->req.tmf.lun[1];
356 s->resetting++;
357 QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
358 d = SCSI_DEVICE(kid->child);
359 if (d->channel == 0 && d->id == target) {
360 qdev_reset_all(&d->qdev);
361 }
362 }
363 s->resetting--;
364 break;
365
366 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
367 default:
368 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
369 break;
370 }
371
372 return ret;
373
374 incorrect_lun:
375 req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
376 return ret;
377
378 fail:
379 req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
380 return ret;
381 }
382
383 static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
384 {
385 VirtIODevice *vdev = (VirtIODevice *)s;
386 uint32_t type;
387 int r = 0;
388
389 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
390 &type, sizeof(type)) < sizeof(type)) {
391 virtio_scsi_bad_req(req);
392 return;
393 }
394
395 virtio_tswap32s(vdev, &type);
396 if (type == VIRTIO_SCSI_T_TMF) {
397 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
398 sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
399 virtio_scsi_bad_req(req);
400 return;
401 } else {
402 r = virtio_scsi_do_tmf(s, req);
403 }
404
405 } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
406 type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
407 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
408 sizeof(VirtIOSCSICtrlANResp)) < 0) {
409 virtio_scsi_bad_req(req);
410 return;
411 } else {
412 req->resp.an.event_actual = 0;
413 req->resp.an.response = VIRTIO_SCSI_S_OK;
414 }
415 }
416 if (r == 0) {
417 virtio_scsi_complete_req(req);
418 } else {
419 assert(r == -EINPROGRESS);
420 }
421 }
422
423 void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
424 {
425 VirtIOSCSIReq *req;
426
427 while ((req = virtio_scsi_pop_req(s, vq))) {
428 virtio_scsi_handle_ctrl_req(s, req);
429 }
430 }
431
432 static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
433 {
434 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
435
436 if (s->ctx) {
437 virtio_scsi_dataplane_start(s);
438 if (!s->dataplane_fenced) {
439 return;
440 }
441 }
442 virtio_scsi_handle_ctrl_vq(s, vq);
443 }
444
445 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
446 {
447 /* Sense data is not in req->resp and is copied separately
448 * in virtio_scsi_command_complete.
449 */
450 req->resp_size = sizeof(VirtIOSCSICmdResp);
451 virtio_scsi_complete_req(req);
452 }
453
454 static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
455 size_t resid)
456 {
457 VirtIOSCSIReq *req = r->hba_private;
458 uint8_t sense[SCSI_SENSE_BUF_SIZE];
459 uint32_t sense_len;
460 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
461
462 if (r->io_canceled) {
463 return;
464 }
465
466 req->resp.cmd.response = VIRTIO_SCSI_S_OK;
467 req->resp.cmd.status = status;
468 if (req->resp.cmd.status == GOOD) {
469 req->resp.cmd.resid = virtio_tswap32(vdev, resid);
470 } else {
471 req->resp.cmd.resid = 0;
472 sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
473 sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
474 qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
475 sense, sense_len);
476 req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
477 }
478 virtio_scsi_complete_cmd_req(req);
479 }
480
481 static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
482 uint8_t *buf, void *hba_private)
483 {
484 VirtIOSCSIReq *req = hba_private;
485
486 if (cmd->len == 0) {
487 cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
488 memcpy(cmd->buf, buf, cmd->len);
489 }
490
491 /* Extract the direction and mode directly from the request, for
492 * host device passthrough.
493 */
494 cmd->xfer = req->qsgl.size;
495 cmd->mode = req->mode;
496 return 0;
497 }
498
499 static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
500 {
501 VirtIOSCSIReq *req = r->hba_private;
502
503 return &req->qsgl;
504 }
505
506 static void virtio_scsi_request_cancelled(SCSIRequest *r)
507 {
508 VirtIOSCSIReq *req = r->hba_private;
509
510 if (!req) {
511 return;
512 }
513 if (req->dev->resetting) {
514 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
515 } else {
516 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
517 }
518 virtio_scsi_complete_cmd_req(req);
519 }
520
521 static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
522 {
523 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
524 virtio_scsi_complete_cmd_req(req);
525 }
526
527 static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
528 {
529 VirtIOSCSICommon *vs = &s->parent_obj;
530 SCSIDevice *d;
531 int rc;
532
533 rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
534 sizeof(VirtIOSCSICmdResp) + vs->sense_size);
535 if (rc < 0) {
536 if (rc == -ENOTSUP) {
537 virtio_scsi_fail_cmd_req(req);
538 return -ENOTSUP;
539 } else {
540 virtio_scsi_bad_req(req);
541 return -EINVAL;
542 }
543 }
544
545 d = virtio_scsi_device_find(s, req->req.cmd.lun);
546 if (!d) {
547 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
548 virtio_scsi_complete_cmd_req(req);
549 return -ENOENT;
550 }
551 virtio_scsi_ctx_check(s, d);
552 req->sreq = scsi_req_new(d, req->req.cmd.tag,
553 virtio_scsi_get_lun(req->req.cmd.lun),
554 req->req.cmd.cdb, req);
555
556 if (req->sreq->cmd.mode != SCSI_XFER_NONE
557 && (req->sreq->cmd.mode != req->mode ||
558 req->sreq->cmd.xfer > req->qsgl.size)) {
559 req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
560 virtio_scsi_complete_cmd_req(req);
561 return -ENOBUFS;
562 }
563 scsi_req_ref(req->sreq);
564 blk_io_plug(d->conf.blk);
565 return 0;
566 }
567
568 static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
569 {
570 SCSIRequest *sreq = req->sreq;
571 if (scsi_req_enqueue(sreq)) {
572 scsi_req_continue(sreq);
573 }
574 blk_io_unplug(sreq->dev->conf.blk);
575 scsi_req_unref(sreq);
576 }
577
578 void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
579 {
580 VirtIOSCSIReq *req, *next;
581 int ret;
582
583 QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
584
585 while ((req = virtio_scsi_pop_req(s, vq))) {
586 ret = virtio_scsi_handle_cmd_req_prepare(s, req);
587 if (!ret) {
588 QTAILQ_INSERT_TAIL(&reqs, req, next);
589 } else if (ret == -EINVAL) {
590 /* The device is broken and shouldn't process any request */
591 while (!QTAILQ_EMPTY(&reqs)) {
592 req = QTAILQ_FIRST(&reqs);
593 QTAILQ_REMOVE(&reqs, req, next);
594 blk_io_unplug(req->sreq->dev->conf.blk);
595 scsi_req_unref(req->sreq);
596 virtqueue_detach_element(req->vq, &req->elem, 0);
597 virtio_scsi_free_req(req);
598 }
599 }
600 }
601
602 QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
603 virtio_scsi_handle_cmd_req_submit(s, req);
604 }
605 }
606
607 static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
608 {
609 /* use non-QOM casts in the data path */
610 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
611
612 if (s->ctx) {
613 virtio_scsi_dataplane_start(s);
614 if (!s->dataplane_fenced) {
615 return;
616 }
617 }
618 virtio_scsi_handle_cmd_vq(s, vq);
619 }
620
621 static void virtio_scsi_get_config(VirtIODevice *vdev,
622 uint8_t *config)
623 {
624 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
625 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
626
627 virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
628 virtio_stl_p(vdev, &scsiconf->seg_max, 128 - 2);
629 virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
630 virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
631 virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
632 virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
633 virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
634 virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
635 virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
636 virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
637 }
638
639 static void virtio_scsi_set_config(VirtIODevice *vdev,
640 const uint8_t *config)
641 {
642 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
643 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
644
645 if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
646 (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
647 virtio_error(vdev,
648 "bad data written to virtio-scsi configuration space");
649 return;
650 }
651
652 vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
653 vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
654 }
655
656 static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
657 uint64_t requested_features,
658 Error **errp)
659 {
660 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
661
662 /* Firstly sync all virtio-scsi possible supported features */
663 requested_features |= s->host_features;
664 return requested_features;
665 }
666
667 static void virtio_scsi_reset(VirtIODevice *vdev)
668 {
669 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
670 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
671
672 if (s->ctx) {
673 virtio_scsi_dataplane_stop(s);
674 }
675 s->resetting++;
676 qbus_reset_all(&s->bus.qbus);
677 s->resetting--;
678
679 vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
680 vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
681 s->events_dropped = false;
682 }
683
684 void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
685 uint32_t event, uint32_t reason)
686 {
687 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
688 VirtIOSCSIReq *req;
689 VirtIOSCSIEvent *evt;
690 VirtIODevice *vdev = VIRTIO_DEVICE(s);
691
692 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
693 return;
694 }
695
696 if (s->dataplane_started) {
697 assert(s->ctx);
698 aio_context_acquire(s->ctx);
699 }
700
701 req = virtio_scsi_pop_req(s, vs->event_vq);
702 if (!req) {
703 s->events_dropped = true;
704 goto out;
705 }
706
707 if (s->events_dropped) {
708 event |= VIRTIO_SCSI_T_EVENTS_MISSED;
709 s->events_dropped = false;
710 }
711
712 if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
713 virtio_scsi_bad_req(req);
714 goto out;
715 }
716
717 evt = &req->resp.event;
718 memset(evt, 0, sizeof(VirtIOSCSIEvent));
719 evt->event = virtio_tswap32(vdev, event);
720 evt->reason = virtio_tswap32(vdev, reason);
721 if (!dev) {
722 assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
723 } else {
724 evt->lun[0] = 1;
725 evt->lun[1] = dev->id;
726
727 /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
728 if (dev->lun >= 256) {
729 evt->lun[2] = (dev->lun >> 8) | 0x40;
730 }
731 evt->lun[3] = dev->lun & 0xFF;
732 }
733 virtio_scsi_complete_req(req);
734 out:
735 if (s->dataplane_started) {
736 aio_context_release(s->ctx);
737 }
738 }
739
740 void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
741 {
742 if (s->events_dropped) {
743 virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
744 }
745 }
746
747 static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
748 {
749 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
750
751 if (s->ctx) {
752 virtio_scsi_dataplane_start(s);
753 if (!s->dataplane_fenced) {
754 return;
755 }
756 }
757 virtio_scsi_handle_event_vq(s, vq);
758 }
759
760 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
761 {
762 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
763 VirtIODevice *vdev = VIRTIO_DEVICE(s);
764
765 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
766 dev->type != TYPE_ROM) {
767 virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
768 sense.asc | (sense.ascq << 8));
769 }
770 }
771
772 static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
773 Error **errp)
774 {
775 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
776 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
777 SCSIDevice *sd = SCSI_DEVICE(dev);
778
779 if (s->ctx && !s->dataplane_fenced) {
780 if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
781 return;
782 }
783 aio_context_acquire(s->ctx);
784 blk_set_aio_context(sd->conf.blk, s->ctx);
785 aio_context_release(s->ctx);
786
787 }
788
789 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
790 virtio_scsi_push_event(s, sd,
791 VIRTIO_SCSI_T_TRANSPORT_RESET,
792 VIRTIO_SCSI_EVT_RESET_RESCAN);
793 }
794 }
795
796 static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
797 Error **errp)
798 {
799 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
800 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
801 SCSIDevice *sd = SCSI_DEVICE(dev);
802
803 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
804 virtio_scsi_push_event(s, sd,
805 VIRTIO_SCSI_T_TRANSPORT_RESET,
806 VIRTIO_SCSI_EVT_RESET_REMOVED);
807 }
808
809 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
810 }
811
812 static struct SCSIBusInfo virtio_scsi_scsi_info = {
813 .tcq = true,
814 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
815 .max_target = VIRTIO_SCSI_MAX_TARGET,
816 .max_lun = VIRTIO_SCSI_MAX_LUN,
817
818 .complete = virtio_scsi_command_complete,
819 .cancel = virtio_scsi_request_cancelled,
820 .change = virtio_scsi_change,
821 .parse_cdb = virtio_scsi_parse_cdb,
822 .get_sg_list = virtio_scsi_get_sg_list,
823 .save_request = virtio_scsi_save_request,
824 .load_request = virtio_scsi_load_request,
825 };
826
827 void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
828 VirtIOHandleOutput ctrl,
829 VirtIOHandleOutput evt,
830 VirtIOHandleOutput cmd)
831 {
832 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
833 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
834 int i;
835
836 virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI,
837 sizeof(VirtIOSCSIConfig));
838
839 if (s->conf.num_queues == 0 ||
840 s->conf.num_queues > VIRTIO_QUEUE_MAX - 2) {
841 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
842 "must be a positive integer less than %d.",
843 s->conf.num_queues, VIRTIO_QUEUE_MAX - 2);
844 virtio_cleanup(vdev);
845 return;
846 }
847 s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
848 s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
849 s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
850
851 s->ctrl_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl);
852 s->event_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, evt);
853 for (i = 0; i < s->conf.num_queues; i++) {
854 s->cmd_vqs[i] = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, cmd);
855 }
856
857 if (s->conf.iothread) {
858 virtio_scsi_set_iothread(VIRTIO_SCSI(s), s->conf.iothread);
859 }
860 }
861
862 static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
863 {
864 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
865 VirtIOSCSI *s = VIRTIO_SCSI(dev);
866 Error *err = NULL;
867
868 virtio_scsi_common_realize(dev, &err, virtio_scsi_handle_ctrl,
869 virtio_scsi_handle_event,
870 virtio_scsi_handle_cmd);
871 if (err != NULL) {
872 error_propagate(errp, err);
873 return;
874 }
875
876 scsi_bus_new(&s->bus, sizeof(s->bus), dev,
877 &virtio_scsi_scsi_info, vdev->bus_name);
878 /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
879 qbus_set_hotplug_handler(BUS(&s->bus), dev, &error_abort);
880
881 if (!dev->hotplugged) {
882 scsi_bus_legacy_handle_cmdline(&s->bus, &err);
883 if (err != NULL) {
884 error_propagate(errp, err);
885 return;
886 }
887 }
888 }
889
890 static void virtio_scsi_instance_init(Object *obj)
891 {
892 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(obj);
893
894 object_property_add_link(obj, "iothread", TYPE_IOTHREAD,
895 (Object **)&vs->conf.iothread,
896 qdev_prop_allow_set_link_before_realize,
897 OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort);
898 }
899
900 void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp)
901 {
902 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
903 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
904
905 g_free(vs->cmd_vqs);
906 virtio_cleanup(vdev);
907 }
908
909 static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp)
910 {
911 virtio_scsi_common_unrealize(dev, errp);
912 }
913
914 static Property virtio_scsi_properties[] = {
915 DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues, 1),
916 DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
917 0xFFFF),
918 DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
919 128),
920 DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
921 VIRTIO_SCSI_F_HOTPLUG, true),
922 DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
923 VIRTIO_SCSI_F_CHANGE, true),
924 DEFINE_PROP_END_OF_LIST(),
925 };
926
927 static const VMStateDescription vmstate_virtio_scsi = {
928 .name = "virtio-scsi",
929 .minimum_version_id = 1,
930 .version_id = 1,
931 .fields = (VMStateField[]) {
932 VMSTATE_VIRTIO_DEVICE,
933 VMSTATE_END_OF_LIST()
934 },
935 };
936
937 static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
938 {
939 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
940 DeviceClass *dc = DEVICE_CLASS(klass);
941
942 vdc->get_config = virtio_scsi_get_config;
943 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
944 }
945
946 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
947 {
948 DeviceClass *dc = DEVICE_CLASS(klass);
949 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
950 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
951
952 dc->props = virtio_scsi_properties;
953 dc->vmsd = &vmstate_virtio_scsi;
954 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
955 vdc->realize = virtio_scsi_device_realize;
956 vdc->unrealize = virtio_scsi_device_unrealize;
957 vdc->set_config = virtio_scsi_set_config;
958 vdc->get_features = virtio_scsi_get_features;
959 vdc->reset = virtio_scsi_reset;
960 hc->plug = virtio_scsi_hotplug;
961 hc->unplug = virtio_scsi_hotunplug;
962 }
963
964 static const TypeInfo virtio_scsi_common_info = {
965 .name = TYPE_VIRTIO_SCSI_COMMON,
966 .parent = TYPE_VIRTIO_DEVICE,
967 .instance_size = sizeof(VirtIOSCSICommon),
968 .abstract = true,
969 .class_init = virtio_scsi_common_class_init,
970 };
971
972 static const TypeInfo virtio_scsi_info = {
973 .name = TYPE_VIRTIO_SCSI,
974 .parent = TYPE_VIRTIO_SCSI_COMMON,
975 .instance_size = sizeof(VirtIOSCSI),
976 .instance_init = virtio_scsi_instance_init,
977 .class_init = virtio_scsi_class_init,
978 .interfaces = (InterfaceInfo[]) {
979 { TYPE_HOTPLUG_HANDLER },
980 { }
981 }
982 };
983
984 static void virtio_register_types(void)
985 {
986 type_register_static(&virtio_scsi_common_info);
987 type_register_static(&virtio_scsi_info);
988 }
989
990 type_init(virtio_register_types)