]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/virtio-scsi.c
virtio-scsi: clean up virtio_scsi_handle_event_vq()
[mirror_qemu.git] / hw / scsi / virtio-scsi.c
1 /*
2 * Virtio SCSI HBA
3 *
4 * Copyright IBM, Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "standard-headers/linux/virtio_ids.h"
19 #include "hw/virtio/virtio-scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "qemu/error-report.h"
22 #include "qemu/iov.h"
23 #include "qemu/module.h"
24 #include "sysemu/block-backend.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/scsi/scsi.h"
27 #include "scsi/constants.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/virtio/virtio-access.h"
30 #include "trace.h"
31
32 static inline int virtio_scsi_get_lun(uint8_t *lun)
33 {
34 return ((lun[2] << 8) | lun[3]) & 0x3FFF;
35 }
36
37 static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
38 {
39 if (lun[0] != 1) {
40 return NULL;
41 }
42 if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
43 return NULL;
44 }
45 return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
46 }
47
48 void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
49 {
50 VirtIODevice *vdev = VIRTIO_DEVICE(s);
51 const size_t zero_skip =
52 offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
53
54 req->vq = vq;
55 req->dev = s;
56 qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
57 qemu_iovec_init(&req->resp_iov, 1);
58 memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
59 }
60
61 void virtio_scsi_free_req(VirtIOSCSIReq *req)
62 {
63 qemu_iovec_destroy(&req->resp_iov);
64 qemu_sglist_destroy(&req->qsgl);
65 g_free(req);
66 }
67
68 static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
69 {
70 VirtIOSCSI *s = req->dev;
71 VirtQueue *vq = req->vq;
72 VirtIODevice *vdev = VIRTIO_DEVICE(s);
73
74 qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
75 virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
76 if (s->dataplane_started && !s->dataplane_fenced) {
77 virtio_notify_irqfd(vdev, vq);
78 } else {
79 virtio_notify(vdev, vq);
80 }
81
82 if (req->sreq) {
83 req->sreq->hba_private = NULL;
84 scsi_req_unref(req->sreq);
85 }
86 virtio_scsi_free_req(req);
87 }
88
89 static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
90 {
91 virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
92 virtqueue_detach_element(req->vq, &req->elem, 0);
93 virtio_scsi_free_req(req);
94 }
95
96 static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
97 hwaddr *addr, int num, size_t skip)
98 {
99 QEMUSGList *qsgl = &req->qsgl;
100 size_t copied = 0;
101
102 while (num) {
103 if (skip >= iov->iov_len) {
104 skip -= iov->iov_len;
105 } else {
106 qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
107 copied += iov->iov_len - skip;
108 skip = 0;
109 }
110 iov++;
111 addr++;
112 num--;
113 }
114
115 assert(skip == 0);
116 return copied;
117 }
118
119 static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
120 unsigned req_size, unsigned resp_size)
121 {
122 VirtIODevice *vdev = (VirtIODevice *) req->dev;
123 size_t in_size, out_size;
124
125 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
126 &req->req, req_size) < req_size) {
127 return -EINVAL;
128 }
129
130 if (qemu_iovec_concat_iov(&req->resp_iov,
131 req->elem.in_sg, req->elem.in_num, 0,
132 resp_size) < resp_size) {
133 return -EINVAL;
134 }
135
136 req->resp_size = resp_size;
137
138 /* Old BIOSes left some padding by mistake after the req_size/resp_size.
139 * As a workaround, always consider the first buffer as the virtio-scsi
140 * request/response, making the payload start at the second element
141 * of the iovec.
142 *
143 * The actual length of the response header, stored in req->resp_size,
144 * does not change.
145 *
146 * TODO: always disable this workaround for virtio 1.0 devices.
147 */
148 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
149 if (req->elem.out_num) {
150 req_size = req->elem.out_sg[0].iov_len;
151 }
152 if (req->elem.in_num) {
153 resp_size = req->elem.in_sg[0].iov_len;
154 }
155 }
156
157 out_size = qemu_sgl_concat(req, req->elem.out_sg,
158 &req->elem.out_addr[0], req->elem.out_num,
159 req_size);
160 in_size = qemu_sgl_concat(req, req->elem.in_sg,
161 &req->elem.in_addr[0], req->elem.in_num,
162 resp_size);
163
164 if (out_size && in_size) {
165 return -ENOTSUP;
166 }
167
168 if (out_size) {
169 req->mode = SCSI_XFER_TO_DEV;
170 } else if (in_size) {
171 req->mode = SCSI_XFER_FROM_DEV;
172 }
173
174 return 0;
175 }
176
177 static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
178 {
179 VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
180 VirtIOSCSIReq *req;
181
182 req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
183 if (!req) {
184 return NULL;
185 }
186 virtio_scsi_init_req(s, vq, req);
187 return req;
188 }
189
190 static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
191 {
192 VirtIOSCSIReq *req = sreq->hba_private;
193 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
194 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
195 uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
196
197 assert(n < vs->conf.num_queues);
198 qemu_put_be32s(f, &n);
199 qemu_put_virtqueue_element(vdev, f, &req->elem);
200 }
201
202 static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
203 {
204 SCSIBus *bus = sreq->bus;
205 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
206 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
207 VirtIODevice *vdev = VIRTIO_DEVICE(s);
208 VirtIOSCSIReq *req;
209 uint32_t n;
210
211 qemu_get_be32s(f, &n);
212 assert(n < vs->conf.num_queues);
213 req = qemu_get_virtqueue_element(vdev, f,
214 sizeof(VirtIOSCSIReq) + vs->cdb_size);
215 virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
216
217 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
218 sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
219 error_report("invalid SCSI request migration data");
220 exit(1);
221 }
222
223 scsi_req_ref(sreq);
224 req->sreq = sreq;
225 if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
226 assert(req->sreq->cmd.mode == req->mode);
227 }
228 return req;
229 }
230
231 typedef struct {
232 Notifier notifier;
233 VirtIOSCSIReq *tmf_req;
234 } VirtIOSCSICancelNotifier;
235
236 static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
237 {
238 VirtIOSCSICancelNotifier *n = container_of(notifier,
239 VirtIOSCSICancelNotifier,
240 notifier);
241
242 if (--n->tmf_req->remaining == 0) {
243 VirtIOSCSIReq *req = n->tmf_req;
244
245 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
246 req->req.tmf.tag, req->resp.tmf.response);
247 virtio_scsi_complete_req(req);
248 }
249 g_free(n);
250 }
251
252 static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
253 {
254 if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
255 assert(blk_get_aio_context(d->conf.blk) == s->ctx);
256 }
257 }
258
259 /* Return 0 if the request is ready to be completed and return to guest;
260 * -EINPROGRESS if the request is submitted and will be completed later, in the
261 * case of async cancellation. */
262 static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
263 {
264 SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
265 SCSIRequest *r, *next;
266 BusChild *kid;
267 int target;
268 int ret = 0;
269
270 virtio_scsi_ctx_check(s, d);
271 /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
272 req->resp.tmf.response = VIRTIO_SCSI_S_OK;
273
274 /*
275 * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
276 * to avoid compiler errors.
277 */
278 req->req.tmf.subtype =
279 virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
280
281 trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
282 req->req.tmf.tag, req->req.tmf.subtype);
283
284 switch (req->req.tmf.subtype) {
285 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
286 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
287 if (!d) {
288 goto fail;
289 }
290 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
291 goto incorrect_lun;
292 }
293 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
294 VirtIOSCSIReq *cmd_req = r->hba_private;
295 if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
296 break;
297 }
298 }
299 if (r) {
300 /*
301 * Assert that the request has not been completed yet, we
302 * check for it in the loop above.
303 */
304 assert(r->hba_private);
305 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
306 /* "If the specified command is present in the task set, then
307 * return a service response set to FUNCTION SUCCEEDED".
308 */
309 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
310 } else {
311 VirtIOSCSICancelNotifier *notifier;
312
313 req->remaining = 1;
314 notifier = g_new(VirtIOSCSICancelNotifier, 1);
315 notifier->tmf_req = req;
316 notifier->notifier.notify = virtio_scsi_cancel_notify;
317 scsi_req_cancel_async(r, &notifier->notifier);
318 ret = -EINPROGRESS;
319 }
320 }
321 break;
322
323 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
324 if (!d) {
325 goto fail;
326 }
327 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
328 goto incorrect_lun;
329 }
330 s->resetting++;
331 qdev_reset_all(&d->qdev);
332 s->resetting--;
333 break;
334
335 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
336 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
337 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
338 if (!d) {
339 goto fail;
340 }
341 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
342 goto incorrect_lun;
343 }
344
345 /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
346 * This way, if the bus starts calling back to the notifiers
347 * even before we finish the loop, virtio_scsi_cancel_notify
348 * will not complete the TMF too early.
349 */
350 req->remaining = 1;
351 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
352 if (r->hba_private) {
353 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
354 /* "If there is any command present in the task set, then
355 * return a service response set to FUNCTION SUCCEEDED".
356 */
357 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
358 break;
359 } else {
360 VirtIOSCSICancelNotifier *notifier;
361
362 req->remaining++;
363 notifier = g_new(VirtIOSCSICancelNotifier, 1);
364 notifier->notifier.notify = virtio_scsi_cancel_notify;
365 notifier->tmf_req = req;
366 scsi_req_cancel_async(r, &notifier->notifier);
367 }
368 }
369 }
370 if (--req->remaining > 0) {
371 ret = -EINPROGRESS;
372 }
373 break;
374
375 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
376 target = req->req.tmf.lun[1];
377 s->resetting++;
378
379 rcu_read_lock();
380 QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
381 SCSIDevice *d1 = SCSI_DEVICE(kid->child);
382 if (d1->channel == 0 && d1->id == target) {
383 qdev_reset_all(&d1->qdev);
384 }
385 }
386 rcu_read_unlock();
387
388 s->resetting--;
389 break;
390
391 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
392 default:
393 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
394 break;
395 }
396
397 object_unref(OBJECT(d));
398 return ret;
399
400 incorrect_lun:
401 req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
402 object_unref(OBJECT(d));
403 return ret;
404
405 fail:
406 req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
407 object_unref(OBJECT(d));
408 return ret;
409 }
410
411 static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
412 {
413 VirtIODevice *vdev = (VirtIODevice *)s;
414 uint32_t type;
415 int r = 0;
416
417 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
418 &type, sizeof(type)) < sizeof(type)) {
419 virtio_scsi_bad_req(req);
420 return;
421 }
422
423 virtio_tswap32s(vdev, &type);
424 if (type == VIRTIO_SCSI_T_TMF) {
425 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
426 sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
427 virtio_scsi_bad_req(req);
428 return;
429 } else {
430 r = virtio_scsi_do_tmf(s, req);
431 }
432
433 } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
434 type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
435 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
436 sizeof(VirtIOSCSICtrlANResp)) < 0) {
437 virtio_scsi_bad_req(req);
438 return;
439 } else {
440 req->req.an.event_requested =
441 virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
442 trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
443 req->req.an.event_requested);
444 req->resp.an.event_actual = 0;
445 req->resp.an.response = VIRTIO_SCSI_S_OK;
446 }
447 }
448 if (r == 0) {
449 if (type == VIRTIO_SCSI_T_TMF)
450 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
451 req->req.tmf.tag,
452 req->resp.tmf.response);
453 else if (type == VIRTIO_SCSI_T_AN_QUERY ||
454 type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
455 trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
456 req->resp.an.response);
457 virtio_scsi_complete_req(req);
458 } else {
459 assert(r == -EINPROGRESS);
460 }
461 }
462
463 bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
464 {
465 VirtIOSCSIReq *req;
466 bool progress = false;
467
468 while ((req = virtio_scsi_pop_req(s, vq))) {
469 progress = true;
470 virtio_scsi_handle_ctrl_req(s, req);
471 }
472 return progress;
473 }
474
475 /*
476 * If dataplane is configured but not yet started, do so now and return true on
477 * success.
478 *
479 * Dataplane is started by the core virtio code but virtqueue handler functions
480 * can also be invoked when a guest kicks before DRIVER_OK, so this helper
481 * function helps us deal with manually starting ioeventfd in that case.
482 */
483 static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
484 {
485 if (!s->ctx || s->dataplane_started) {
486 return false;
487 }
488
489 virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
490 return !s->dataplane_fenced;
491 }
492
493 static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
494 {
495 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
496
497 if (virtio_scsi_defer_to_dataplane(s)) {
498 return;
499 }
500
501 virtio_scsi_acquire(s);
502 virtio_scsi_handle_ctrl_vq(s, vq);
503 virtio_scsi_release(s);
504 }
505
506 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
507 {
508 trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
509 req->req.cmd.tag,
510 req->resp.cmd.response,
511 req->resp.cmd.status);
512 /* Sense data is not in req->resp and is copied separately
513 * in virtio_scsi_command_complete.
514 */
515 req->resp_size = sizeof(VirtIOSCSICmdResp);
516 virtio_scsi_complete_req(req);
517 }
518
519 static void virtio_scsi_command_failed(SCSIRequest *r)
520 {
521 VirtIOSCSIReq *req = r->hba_private;
522
523 if (r->io_canceled) {
524 return;
525 }
526
527 req->resp.cmd.status = GOOD;
528 switch (r->host_status) {
529 case SCSI_HOST_NO_LUN:
530 req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
531 break;
532 case SCSI_HOST_BUSY:
533 req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
534 break;
535 case SCSI_HOST_TIME_OUT:
536 case SCSI_HOST_ABORTED:
537 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
538 break;
539 case SCSI_HOST_BAD_RESPONSE:
540 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
541 break;
542 case SCSI_HOST_RESET:
543 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
544 break;
545 case SCSI_HOST_TRANSPORT_DISRUPTED:
546 req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
547 break;
548 case SCSI_HOST_TARGET_FAILURE:
549 req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
550 break;
551 case SCSI_HOST_RESERVATION_ERROR:
552 req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
553 break;
554 case SCSI_HOST_ALLOCATION_FAILURE:
555 case SCSI_HOST_MEDIUM_ERROR:
556 case SCSI_HOST_ERROR:
557 default:
558 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
559 break;
560 }
561 virtio_scsi_complete_cmd_req(req);
562 }
563
564 static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
565 {
566 VirtIOSCSIReq *req = r->hba_private;
567 uint8_t sense[SCSI_SENSE_BUF_SIZE];
568 uint32_t sense_len;
569 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
570
571 if (r->io_canceled) {
572 return;
573 }
574
575 req->resp.cmd.response = VIRTIO_SCSI_S_OK;
576 req->resp.cmd.status = r->status;
577 if (req->resp.cmd.status == GOOD) {
578 req->resp.cmd.resid = virtio_tswap32(vdev, resid);
579 } else {
580 req->resp.cmd.resid = 0;
581 sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
582 sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
583 qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
584 sense, sense_len);
585 req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
586 }
587 virtio_scsi_complete_cmd_req(req);
588 }
589
590 static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
591 uint8_t *buf, void *hba_private)
592 {
593 VirtIOSCSIReq *req = hba_private;
594
595 if (cmd->len == 0) {
596 cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
597 memcpy(cmd->buf, buf, cmd->len);
598 }
599
600 /* Extract the direction and mode directly from the request, for
601 * host device passthrough.
602 */
603 cmd->xfer = req->qsgl.size;
604 cmd->mode = req->mode;
605 return 0;
606 }
607
608 static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
609 {
610 VirtIOSCSIReq *req = r->hba_private;
611
612 return &req->qsgl;
613 }
614
615 static void virtio_scsi_request_cancelled(SCSIRequest *r)
616 {
617 VirtIOSCSIReq *req = r->hba_private;
618
619 if (!req) {
620 return;
621 }
622 if (req->dev->resetting) {
623 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
624 } else {
625 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
626 }
627 virtio_scsi_complete_cmd_req(req);
628 }
629
630 static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
631 {
632 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
633 virtio_scsi_complete_cmd_req(req);
634 }
635
636 static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
637 {
638 VirtIOSCSICommon *vs = &s->parent_obj;
639 SCSIDevice *d;
640 int rc;
641
642 rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
643 sizeof(VirtIOSCSICmdResp) + vs->sense_size);
644 if (rc < 0) {
645 if (rc == -ENOTSUP) {
646 virtio_scsi_fail_cmd_req(req);
647 return -ENOTSUP;
648 } else {
649 virtio_scsi_bad_req(req);
650 return -EINVAL;
651 }
652 }
653 trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
654 req->req.cmd.tag, req->req.cmd.cdb[0]);
655
656 d = virtio_scsi_device_get(s, req->req.cmd.lun);
657 if (!d) {
658 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
659 virtio_scsi_complete_cmd_req(req);
660 return -ENOENT;
661 }
662 virtio_scsi_ctx_check(s, d);
663 req->sreq = scsi_req_new(d, req->req.cmd.tag,
664 virtio_scsi_get_lun(req->req.cmd.lun),
665 req->req.cmd.cdb, req);
666
667 if (req->sreq->cmd.mode != SCSI_XFER_NONE
668 && (req->sreq->cmd.mode != req->mode ||
669 req->sreq->cmd.xfer > req->qsgl.size)) {
670 req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
671 virtio_scsi_complete_cmd_req(req);
672 object_unref(OBJECT(d));
673 return -ENOBUFS;
674 }
675 scsi_req_ref(req->sreq);
676 blk_io_plug(d->conf.blk);
677 object_unref(OBJECT(d));
678 return 0;
679 }
680
681 static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
682 {
683 SCSIRequest *sreq = req->sreq;
684 if (scsi_req_enqueue(sreq)) {
685 scsi_req_continue(sreq);
686 }
687 blk_io_unplug(sreq->dev->conf.blk);
688 scsi_req_unref(sreq);
689 }
690
691 bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
692 {
693 VirtIOSCSIReq *req, *next;
694 int ret = 0;
695 bool suppress_notifications = virtio_queue_get_notification(vq);
696 bool progress = false;
697
698 QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
699
700 do {
701 if (suppress_notifications) {
702 virtio_queue_set_notification(vq, 0);
703 }
704
705 while ((req = virtio_scsi_pop_req(s, vq))) {
706 progress = true;
707 ret = virtio_scsi_handle_cmd_req_prepare(s, req);
708 if (!ret) {
709 QTAILQ_INSERT_TAIL(&reqs, req, next);
710 } else if (ret == -EINVAL) {
711 /* The device is broken and shouldn't process any request */
712 while (!QTAILQ_EMPTY(&reqs)) {
713 req = QTAILQ_FIRST(&reqs);
714 QTAILQ_REMOVE(&reqs, req, next);
715 blk_io_unplug(req->sreq->dev->conf.blk);
716 scsi_req_unref(req->sreq);
717 virtqueue_detach_element(req->vq, &req->elem, 0);
718 virtio_scsi_free_req(req);
719 }
720 }
721 }
722
723 if (suppress_notifications) {
724 virtio_queue_set_notification(vq, 1);
725 }
726 } while (ret != -EINVAL && !virtio_queue_empty(vq));
727
728 QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
729 virtio_scsi_handle_cmd_req_submit(s, req);
730 }
731 return progress;
732 }
733
734 static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
735 {
736 /* use non-QOM casts in the data path */
737 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
738
739 if (virtio_scsi_defer_to_dataplane(s)) {
740 return;
741 }
742
743 virtio_scsi_acquire(s);
744 virtio_scsi_handle_cmd_vq(s, vq);
745 virtio_scsi_release(s);
746 }
747
748 static void virtio_scsi_get_config(VirtIODevice *vdev,
749 uint8_t *config)
750 {
751 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
752 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
753
754 virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
755 virtio_stl_p(vdev, &scsiconf->seg_max,
756 s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
757 virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
758 virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
759 virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
760 virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
761 virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
762 virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
763 virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
764 virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
765 }
766
767 static void virtio_scsi_set_config(VirtIODevice *vdev,
768 const uint8_t *config)
769 {
770 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
771 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
772
773 if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
774 (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
775 virtio_error(vdev,
776 "bad data written to virtio-scsi configuration space");
777 return;
778 }
779
780 vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
781 vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
782 }
783
784 static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
785 uint64_t requested_features,
786 Error **errp)
787 {
788 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
789
790 /* Firstly sync all virtio-scsi possible supported features */
791 requested_features |= s->host_features;
792 return requested_features;
793 }
794
795 static void virtio_scsi_reset(VirtIODevice *vdev)
796 {
797 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
798 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
799
800 assert(!s->dataplane_started);
801 s->resetting++;
802 qbus_reset_all(BUS(&s->bus));
803 s->resetting--;
804
805 vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
806 vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
807 s->events_dropped = false;
808 }
809
810 void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
811 uint32_t event, uint32_t reason)
812 {
813 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
814 VirtIOSCSIReq *req;
815 VirtIOSCSIEvent *evt;
816 VirtIODevice *vdev = VIRTIO_DEVICE(s);
817
818 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
819 return;
820 }
821
822 req = virtio_scsi_pop_req(s, vs->event_vq);
823 if (!req) {
824 s->events_dropped = true;
825 return;
826 }
827
828 if (s->events_dropped) {
829 event |= VIRTIO_SCSI_T_EVENTS_MISSED;
830 s->events_dropped = false;
831 }
832
833 if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
834 virtio_scsi_bad_req(req);
835 return;
836 }
837
838 evt = &req->resp.event;
839 memset(evt, 0, sizeof(VirtIOSCSIEvent));
840 evt->event = virtio_tswap32(vdev, event);
841 evt->reason = virtio_tswap32(vdev, reason);
842 if (!dev) {
843 assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
844 } else {
845 evt->lun[0] = 1;
846 evt->lun[1] = dev->id;
847
848 /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
849 if (dev->lun >= 256) {
850 evt->lun[2] = (dev->lun >> 8) | 0x40;
851 }
852 evt->lun[3] = dev->lun & 0xFF;
853 }
854 trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
855
856 virtio_scsi_complete_req(req);
857 }
858
859 static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
860 {
861 if (s->events_dropped) {
862 virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
863 }
864 }
865
866 static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
867 {
868 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
869
870 if (virtio_scsi_defer_to_dataplane(s)) {
871 return;
872 }
873
874 virtio_scsi_acquire(s);
875 virtio_scsi_handle_event_vq(s, vq);
876 virtio_scsi_release(s);
877 }
878
879 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
880 {
881 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
882 VirtIODevice *vdev = VIRTIO_DEVICE(s);
883
884 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
885 dev->type != TYPE_ROM) {
886 virtio_scsi_acquire(s);
887 virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
888 sense.asc | (sense.ascq << 8));
889 virtio_scsi_release(s);
890 }
891 }
892
893 static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
894 DeviceState *dev, Error **errp)
895 {
896 SCSIDevice *sd = SCSI_DEVICE(dev);
897 sd->hba_supports_iothread = true;
898 }
899
900 static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
901 Error **errp)
902 {
903 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
904 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
905 SCSIDevice *sd = SCSI_DEVICE(dev);
906 AioContext *old_context;
907 int ret;
908
909 if (s->ctx && !s->dataplane_fenced) {
910 if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
911 return;
912 }
913 old_context = blk_get_aio_context(sd->conf.blk);
914 aio_context_acquire(old_context);
915 ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
916 aio_context_release(old_context);
917 if (ret < 0) {
918 return;
919 }
920 }
921
922 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
923 virtio_scsi_acquire(s);
924 virtio_scsi_push_event(s, sd,
925 VIRTIO_SCSI_T_TRANSPORT_RESET,
926 VIRTIO_SCSI_EVT_RESET_RESCAN);
927 virtio_scsi_release(s);
928 }
929 }
930
931 static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
932 Error **errp)
933 {
934 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
935 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
936 SCSIDevice *sd = SCSI_DEVICE(dev);
937 AioContext *ctx = s->ctx ?: qemu_get_aio_context();
938
939 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
940 virtio_scsi_acquire(s);
941 virtio_scsi_push_event(s, sd,
942 VIRTIO_SCSI_T_TRANSPORT_RESET,
943 VIRTIO_SCSI_EVT_RESET_REMOVED);
944 virtio_scsi_release(s);
945 }
946
947 aio_disable_external(ctx);
948 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
949 aio_enable_external(ctx);
950
951 if (s->ctx) {
952 virtio_scsi_acquire(s);
953 /* If other users keep the BlockBackend in the iothread, that's ok */
954 blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
955 virtio_scsi_release(s);
956 }
957 }
958
959 static struct SCSIBusInfo virtio_scsi_scsi_info = {
960 .tcq = true,
961 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
962 .max_target = VIRTIO_SCSI_MAX_TARGET,
963 .max_lun = VIRTIO_SCSI_MAX_LUN,
964
965 .complete = virtio_scsi_command_complete,
966 .fail = virtio_scsi_command_failed,
967 .cancel = virtio_scsi_request_cancelled,
968 .change = virtio_scsi_change,
969 .parse_cdb = virtio_scsi_parse_cdb,
970 .get_sg_list = virtio_scsi_get_sg_list,
971 .save_request = virtio_scsi_save_request,
972 .load_request = virtio_scsi_load_request,
973 };
974
975 void virtio_scsi_common_realize(DeviceState *dev,
976 VirtIOHandleOutput ctrl,
977 VirtIOHandleOutput evt,
978 VirtIOHandleOutput cmd,
979 Error **errp)
980 {
981 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
982 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
983 int i;
984
985 virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI,
986 sizeof(VirtIOSCSIConfig));
987
988 if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
989 s->conf.num_queues = 1;
990 }
991 if (s->conf.num_queues == 0 ||
992 s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
993 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
994 "must be a positive integer less than %d.",
995 s->conf.num_queues,
996 VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
997 virtio_cleanup(vdev);
998 return;
999 }
1000 if (s->conf.virtqueue_size <= 2) {
1001 error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
1002 "must be > 2", s->conf.virtqueue_size);
1003 return;
1004 }
1005 s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
1006 s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
1007 s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
1008
1009 s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
1010 s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
1011 for (i = 0; i < s->conf.num_queues; i++) {
1012 s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
1013 }
1014 }
1015
1016 static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
1017 {
1018 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1019 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1020 Error *err = NULL;
1021
1022 virtio_scsi_common_realize(dev,
1023 virtio_scsi_handle_ctrl,
1024 virtio_scsi_handle_event,
1025 virtio_scsi_handle_cmd,
1026 &err);
1027 if (err != NULL) {
1028 error_propagate(errp, err);
1029 return;
1030 }
1031
1032 scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
1033 &virtio_scsi_scsi_info, vdev->bus_name);
1034 /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
1035 qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
1036
1037 virtio_scsi_dataplane_setup(s, errp);
1038 }
1039
1040 void virtio_scsi_common_unrealize(DeviceState *dev)
1041 {
1042 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1043 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
1044 int i;
1045
1046 virtio_delete_queue(vs->ctrl_vq);
1047 virtio_delete_queue(vs->event_vq);
1048 for (i = 0; i < vs->conf.num_queues; i++) {
1049 virtio_delete_queue(vs->cmd_vqs[i]);
1050 }
1051 g_free(vs->cmd_vqs);
1052 virtio_cleanup(vdev);
1053 }
1054
1055 static void virtio_scsi_device_unrealize(DeviceState *dev)
1056 {
1057 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1058
1059 qbus_set_hotplug_handler(BUS(&s->bus), NULL);
1060 virtio_scsi_common_unrealize(dev);
1061 }
1062
1063 static Property virtio_scsi_properties[] = {
1064 DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
1065 VIRTIO_SCSI_AUTO_NUM_QUEUES),
1066 DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
1067 parent_obj.conf.virtqueue_size, 256),
1068 DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
1069 parent_obj.conf.seg_max_adjust, true),
1070 DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
1071 0xFFFF),
1072 DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
1073 128),
1074 DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
1075 VIRTIO_SCSI_F_HOTPLUG, true),
1076 DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
1077 VIRTIO_SCSI_F_CHANGE, true),
1078 DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
1079 TYPE_IOTHREAD, IOThread *),
1080 DEFINE_PROP_END_OF_LIST(),
1081 };
1082
1083 static const VMStateDescription vmstate_virtio_scsi = {
1084 .name = "virtio-scsi",
1085 .minimum_version_id = 1,
1086 .version_id = 1,
1087 .fields = (VMStateField[]) {
1088 VMSTATE_VIRTIO_DEVICE,
1089 VMSTATE_END_OF_LIST()
1090 },
1091 };
1092
1093 static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
1094 {
1095 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1096 DeviceClass *dc = DEVICE_CLASS(klass);
1097
1098 vdc->get_config = virtio_scsi_get_config;
1099 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1100 }
1101
1102 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1103 {
1104 DeviceClass *dc = DEVICE_CLASS(klass);
1105 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1106 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1107
1108 device_class_set_props(dc, virtio_scsi_properties);
1109 dc->vmsd = &vmstate_virtio_scsi;
1110 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1111 vdc->realize = virtio_scsi_device_realize;
1112 vdc->unrealize = virtio_scsi_device_unrealize;
1113 vdc->set_config = virtio_scsi_set_config;
1114 vdc->get_features = virtio_scsi_get_features;
1115 vdc->reset = virtio_scsi_reset;
1116 vdc->start_ioeventfd = virtio_scsi_dataplane_start;
1117 vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
1118 hc->pre_plug = virtio_scsi_pre_hotplug;
1119 hc->plug = virtio_scsi_hotplug;
1120 hc->unplug = virtio_scsi_hotunplug;
1121 }
1122
1123 static const TypeInfo virtio_scsi_common_info = {
1124 .name = TYPE_VIRTIO_SCSI_COMMON,
1125 .parent = TYPE_VIRTIO_DEVICE,
1126 .instance_size = sizeof(VirtIOSCSICommon),
1127 .abstract = true,
1128 .class_init = virtio_scsi_common_class_init,
1129 };
1130
1131 static const TypeInfo virtio_scsi_info = {
1132 .name = TYPE_VIRTIO_SCSI,
1133 .parent = TYPE_VIRTIO_SCSI_COMMON,
1134 .instance_size = sizeof(VirtIOSCSI),
1135 .class_init = virtio_scsi_class_init,
1136 .interfaces = (InterfaceInfo[]) {
1137 { TYPE_HOTPLUG_HANDLER },
1138 { }
1139 }
1140 };
1141
1142 static void virtio_register_types(void)
1143 {
1144 type_register_static(&virtio_scsi_common_info);
1145 type_register_static(&virtio_scsi_info);
1146 }
1147
1148 type_init(virtio_register_types)