]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/virtio-scsi.c
Merge remote-tracking branch 'remotes/armbru/tags/pull-yank-2021-01-13' into staging
[mirror_qemu.git] / hw / scsi / virtio-scsi.c
1 /*
2 * Virtio SCSI HBA
3 *
4 * Copyright IBM, Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "standard-headers/linux/virtio_ids.h"
19 #include "hw/virtio/virtio-scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "qemu/error-report.h"
22 #include "qemu/iov.h"
23 #include "qemu/module.h"
24 #include "sysemu/block-backend.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/scsi/scsi.h"
27 #include "scsi/constants.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/virtio/virtio-access.h"
30 #include "trace.h"
31
32 static inline int virtio_scsi_get_lun(uint8_t *lun)
33 {
34 return ((lun[2] << 8) | lun[3]) & 0x3FFF;
35 }
36
37 static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
38 {
39 if (lun[0] != 1) {
40 return NULL;
41 }
42 if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
43 return NULL;
44 }
45 return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
46 }
47
48 void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
49 {
50 VirtIODevice *vdev = VIRTIO_DEVICE(s);
51 const size_t zero_skip =
52 offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
53
54 req->vq = vq;
55 req->dev = s;
56 qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
57 qemu_iovec_init(&req->resp_iov, 1);
58 memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
59 }
60
61 void virtio_scsi_free_req(VirtIOSCSIReq *req)
62 {
63 qemu_iovec_destroy(&req->resp_iov);
64 qemu_sglist_destroy(&req->qsgl);
65 g_free(req);
66 }
67
68 static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
69 {
70 VirtIOSCSI *s = req->dev;
71 VirtQueue *vq = req->vq;
72 VirtIODevice *vdev = VIRTIO_DEVICE(s);
73
74 qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
75 virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
76 if (s->dataplane_started && !s->dataplane_fenced) {
77 virtio_notify_irqfd(vdev, vq);
78 } else {
79 virtio_notify(vdev, vq);
80 }
81
82 if (req->sreq) {
83 req->sreq->hba_private = NULL;
84 scsi_req_unref(req->sreq);
85 }
86 virtio_scsi_free_req(req);
87 }
88
89 static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
90 {
91 virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
92 virtqueue_detach_element(req->vq, &req->elem, 0);
93 virtio_scsi_free_req(req);
94 }
95
96 static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
97 hwaddr *addr, int num, size_t skip)
98 {
99 QEMUSGList *qsgl = &req->qsgl;
100 size_t copied = 0;
101
102 while (num) {
103 if (skip >= iov->iov_len) {
104 skip -= iov->iov_len;
105 } else {
106 qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
107 copied += iov->iov_len - skip;
108 skip = 0;
109 }
110 iov++;
111 addr++;
112 num--;
113 }
114
115 assert(skip == 0);
116 return copied;
117 }
118
119 static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
120 unsigned req_size, unsigned resp_size)
121 {
122 VirtIODevice *vdev = (VirtIODevice *) req->dev;
123 size_t in_size, out_size;
124
125 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
126 &req->req, req_size) < req_size) {
127 return -EINVAL;
128 }
129
130 if (qemu_iovec_concat_iov(&req->resp_iov,
131 req->elem.in_sg, req->elem.in_num, 0,
132 resp_size) < resp_size) {
133 return -EINVAL;
134 }
135
136 req->resp_size = resp_size;
137
138 /* Old BIOSes left some padding by mistake after the req_size/resp_size.
139 * As a workaround, always consider the first buffer as the virtio-scsi
140 * request/response, making the payload start at the second element
141 * of the iovec.
142 *
143 * The actual length of the response header, stored in req->resp_size,
144 * does not change.
145 *
146 * TODO: always disable this workaround for virtio 1.0 devices.
147 */
148 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
149 if (req->elem.out_num) {
150 req_size = req->elem.out_sg[0].iov_len;
151 }
152 if (req->elem.in_num) {
153 resp_size = req->elem.in_sg[0].iov_len;
154 }
155 }
156
157 out_size = qemu_sgl_concat(req, req->elem.out_sg,
158 &req->elem.out_addr[0], req->elem.out_num,
159 req_size);
160 in_size = qemu_sgl_concat(req, req->elem.in_sg,
161 &req->elem.in_addr[0], req->elem.in_num,
162 resp_size);
163
164 if (out_size && in_size) {
165 return -ENOTSUP;
166 }
167
168 if (out_size) {
169 req->mode = SCSI_XFER_TO_DEV;
170 } else if (in_size) {
171 req->mode = SCSI_XFER_FROM_DEV;
172 }
173
174 return 0;
175 }
176
177 static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
178 {
179 VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
180 VirtIOSCSIReq *req;
181
182 req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
183 if (!req) {
184 return NULL;
185 }
186 virtio_scsi_init_req(s, vq, req);
187 return req;
188 }
189
190 static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
191 {
192 VirtIOSCSIReq *req = sreq->hba_private;
193 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
194 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
195 uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
196
197 assert(n < vs->conf.num_queues);
198 qemu_put_be32s(f, &n);
199 qemu_put_virtqueue_element(vdev, f, &req->elem);
200 }
201
202 static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
203 {
204 SCSIBus *bus = sreq->bus;
205 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
206 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
207 VirtIODevice *vdev = VIRTIO_DEVICE(s);
208 VirtIOSCSIReq *req;
209 uint32_t n;
210
211 qemu_get_be32s(f, &n);
212 assert(n < vs->conf.num_queues);
213 req = qemu_get_virtqueue_element(vdev, f,
214 sizeof(VirtIOSCSIReq) + vs->cdb_size);
215 virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
216
217 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
218 sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
219 error_report("invalid SCSI request migration data");
220 exit(1);
221 }
222
223 scsi_req_ref(sreq);
224 req->sreq = sreq;
225 if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
226 assert(req->sreq->cmd.mode == req->mode);
227 }
228 return req;
229 }
230
231 typedef struct {
232 Notifier notifier;
233 VirtIOSCSIReq *tmf_req;
234 } VirtIOSCSICancelNotifier;
235
236 static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
237 {
238 VirtIOSCSICancelNotifier *n = container_of(notifier,
239 VirtIOSCSICancelNotifier,
240 notifier);
241
242 if (--n->tmf_req->remaining == 0) {
243 VirtIOSCSIReq *req = n->tmf_req;
244
245 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
246 req->req.tmf.tag, req->resp.tmf.response);
247 virtio_scsi_complete_req(req);
248 }
249 g_free(n);
250 }
251
252 static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
253 {
254 if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
255 assert(blk_get_aio_context(d->conf.blk) == s->ctx);
256 }
257 }
258
259 /* Return 0 if the request is ready to be completed and return to guest;
260 * -EINPROGRESS if the request is submitted and will be completed later, in the
261 * case of async cancellation. */
262 static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
263 {
264 SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
265 SCSIRequest *r, *next;
266 BusChild *kid;
267 int target;
268 int ret = 0;
269
270 virtio_scsi_ctx_check(s, d);
271 /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
272 req->resp.tmf.response = VIRTIO_SCSI_S_OK;
273
274 /*
275 * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
276 * to avoid compiler errors.
277 */
278 req->req.tmf.subtype =
279 virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
280
281 trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
282 req->req.tmf.tag, req->req.tmf.subtype);
283
284 switch (req->req.tmf.subtype) {
285 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
286 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
287 if (!d) {
288 goto fail;
289 }
290 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
291 goto incorrect_lun;
292 }
293 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
294 VirtIOSCSIReq *cmd_req = r->hba_private;
295 if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
296 break;
297 }
298 }
299 if (r) {
300 /*
301 * Assert that the request has not been completed yet, we
302 * check for it in the loop above.
303 */
304 assert(r->hba_private);
305 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
306 /* "If the specified command is present in the task set, then
307 * return a service response set to FUNCTION SUCCEEDED".
308 */
309 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
310 } else {
311 VirtIOSCSICancelNotifier *notifier;
312
313 req->remaining = 1;
314 notifier = g_new(VirtIOSCSICancelNotifier, 1);
315 notifier->tmf_req = req;
316 notifier->notifier.notify = virtio_scsi_cancel_notify;
317 scsi_req_cancel_async(r, &notifier->notifier);
318 ret = -EINPROGRESS;
319 }
320 }
321 break;
322
323 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
324 if (!d) {
325 goto fail;
326 }
327 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
328 goto incorrect_lun;
329 }
330 s->resetting++;
331 qdev_reset_all(&d->qdev);
332 s->resetting--;
333 break;
334
335 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
336 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
337 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
338 if (!d) {
339 goto fail;
340 }
341 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
342 goto incorrect_lun;
343 }
344
345 /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
346 * This way, if the bus starts calling back to the notifiers
347 * even before we finish the loop, virtio_scsi_cancel_notify
348 * will not complete the TMF too early.
349 */
350 req->remaining = 1;
351 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
352 if (r->hba_private) {
353 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
354 /* "If there is any command present in the task set, then
355 * return a service response set to FUNCTION SUCCEEDED".
356 */
357 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
358 break;
359 } else {
360 VirtIOSCSICancelNotifier *notifier;
361
362 req->remaining++;
363 notifier = g_new(VirtIOSCSICancelNotifier, 1);
364 notifier->notifier.notify = virtio_scsi_cancel_notify;
365 notifier->tmf_req = req;
366 scsi_req_cancel_async(r, &notifier->notifier);
367 }
368 }
369 }
370 if (--req->remaining > 0) {
371 ret = -EINPROGRESS;
372 }
373 break;
374
375 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
376 target = req->req.tmf.lun[1];
377 s->resetting++;
378
379 rcu_read_lock();
380 QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
381 SCSIDevice *d1 = SCSI_DEVICE(kid->child);
382 if (d1->channel == 0 && d1->id == target) {
383 qdev_reset_all(&d1->qdev);
384 }
385 }
386 rcu_read_unlock();
387
388 s->resetting--;
389 break;
390
391 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
392 default:
393 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
394 break;
395 }
396
397 object_unref(OBJECT(d));
398 return ret;
399
400 incorrect_lun:
401 req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
402 object_unref(OBJECT(d));
403 return ret;
404
405 fail:
406 req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
407 object_unref(OBJECT(d));
408 return ret;
409 }
410
411 static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
412 {
413 VirtIODevice *vdev = (VirtIODevice *)s;
414 uint32_t type;
415 int r = 0;
416
417 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
418 &type, sizeof(type)) < sizeof(type)) {
419 virtio_scsi_bad_req(req);
420 return;
421 }
422
423 virtio_tswap32s(vdev, &type);
424 if (type == VIRTIO_SCSI_T_TMF) {
425 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
426 sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
427 virtio_scsi_bad_req(req);
428 return;
429 } else {
430 r = virtio_scsi_do_tmf(s, req);
431 }
432
433 } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
434 type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
435 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
436 sizeof(VirtIOSCSICtrlANResp)) < 0) {
437 virtio_scsi_bad_req(req);
438 return;
439 } else {
440 req->req.an.event_requested =
441 virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
442 trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
443 req->req.an.event_requested);
444 req->resp.an.event_actual = 0;
445 req->resp.an.response = VIRTIO_SCSI_S_OK;
446 }
447 }
448 if (r == 0) {
449 if (type == VIRTIO_SCSI_T_TMF)
450 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
451 req->req.tmf.tag,
452 req->resp.tmf.response);
453 else if (type == VIRTIO_SCSI_T_AN_QUERY ||
454 type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
455 trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
456 req->resp.an.response);
457 virtio_scsi_complete_req(req);
458 } else {
459 assert(r == -EINPROGRESS);
460 }
461 }
462
463 bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
464 {
465 VirtIOSCSIReq *req;
466 bool progress = false;
467
468 while ((req = virtio_scsi_pop_req(s, vq))) {
469 progress = true;
470 virtio_scsi_handle_ctrl_req(s, req);
471 }
472 return progress;
473 }
474
475 static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
476 {
477 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
478
479 if (s->ctx) {
480 virtio_device_start_ioeventfd(vdev);
481 if (!s->dataplane_fenced) {
482 return;
483 }
484 }
485 virtio_scsi_acquire(s);
486 virtio_scsi_handle_ctrl_vq(s, vq);
487 virtio_scsi_release(s);
488 }
489
490 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
491 {
492 trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
493 req->req.cmd.tag,
494 req->resp.cmd.response,
495 req->resp.cmd.status);
496 /* Sense data is not in req->resp and is copied separately
497 * in virtio_scsi_command_complete.
498 */
499 req->resp_size = sizeof(VirtIOSCSICmdResp);
500 virtio_scsi_complete_req(req);
501 }
502
503 static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
504 size_t resid)
505 {
506 VirtIOSCSIReq *req = r->hba_private;
507 uint8_t sense[SCSI_SENSE_BUF_SIZE];
508 uint32_t sense_len;
509 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
510
511 if (r->io_canceled) {
512 return;
513 }
514
515 req->resp.cmd.response = VIRTIO_SCSI_S_OK;
516 req->resp.cmd.status = status;
517 if (req->resp.cmd.status == GOOD) {
518 req->resp.cmd.resid = virtio_tswap32(vdev, resid);
519 } else {
520 req->resp.cmd.resid = 0;
521 sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
522 sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
523 qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
524 sense, sense_len);
525 req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
526 }
527 virtio_scsi_complete_cmd_req(req);
528 }
529
530 static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
531 uint8_t *buf, void *hba_private)
532 {
533 VirtIOSCSIReq *req = hba_private;
534
535 if (cmd->len == 0) {
536 cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
537 memcpy(cmd->buf, buf, cmd->len);
538 }
539
540 /* Extract the direction and mode directly from the request, for
541 * host device passthrough.
542 */
543 cmd->xfer = req->qsgl.size;
544 cmd->mode = req->mode;
545 return 0;
546 }
547
548 static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
549 {
550 VirtIOSCSIReq *req = r->hba_private;
551
552 return &req->qsgl;
553 }
554
555 static void virtio_scsi_request_cancelled(SCSIRequest *r)
556 {
557 VirtIOSCSIReq *req = r->hba_private;
558
559 if (!req) {
560 return;
561 }
562 if (req->dev->resetting) {
563 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
564 } else {
565 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
566 }
567 virtio_scsi_complete_cmd_req(req);
568 }
569
570 static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
571 {
572 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
573 virtio_scsi_complete_cmd_req(req);
574 }
575
576 static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
577 {
578 VirtIOSCSICommon *vs = &s->parent_obj;
579 SCSIDevice *d;
580 int rc;
581
582 rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
583 sizeof(VirtIOSCSICmdResp) + vs->sense_size);
584 if (rc < 0) {
585 if (rc == -ENOTSUP) {
586 virtio_scsi_fail_cmd_req(req);
587 return -ENOTSUP;
588 } else {
589 virtio_scsi_bad_req(req);
590 return -EINVAL;
591 }
592 }
593 trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
594 req->req.cmd.tag, req->req.cmd.cdb[0]);
595
596 d = virtio_scsi_device_get(s, req->req.cmd.lun);
597 if (!d) {
598 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
599 virtio_scsi_complete_cmd_req(req);
600 return -ENOENT;
601 }
602 virtio_scsi_ctx_check(s, d);
603 req->sreq = scsi_req_new(d, req->req.cmd.tag,
604 virtio_scsi_get_lun(req->req.cmd.lun),
605 req->req.cmd.cdb, req);
606
607 if (req->sreq->cmd.mode != SCSI_XFER_NONE
608 && (req->sreq->cmd.mode != req->mode ||
609 req->sreq->cmd.xfer > req->qsgl.size)) {
610 req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
611 virtio_scsi_complete_cmd_req(req);
612 object_unref(OBJECT(d));
613 return -ENOBUFS;
614 }
615 scsi_req_ref(req->sreq);
616 blk_io_plug(d->conf.blk);
617 object_unref(OBJECT(d));
618 return 0;
619 }
620
621 static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
622 {
623 SCSIRequest *sreq = req->sreq;
624 if (scsi_req_enqueue(sreq)) {
625 scsi_req_continue(sreq);
626 }
627 blk_io_unplug(sreq->dev->conf.blk);
628 scsi_req_unref(sreq);
629 }
630
631 bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
632 {
633 VirtIOSCSIReq *req, *next;
634 int ret = 0;
635 bool suppress_notifications = virtio_queue_get_notification(vq);
636 bool progress = false;
637
638 QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
639
640 do {
641 if (suppress_notifications) {
642 virtio_queue_set_notification(vq, 0);
643 }
644
645 while ((req = virtio_scsi_pop_req(s, vq))) {
646 progress = true;
647 ret = virtio_scsi_handle_cmd_req_prepare(s, req);
648 if (!ret) {
649 QTAILQ_INSERT_TAIL(&reqs, req, next);
650 } else if (ret == -EINVAL) {
651 /* The device is broken and shouldn't process any request */
652 while (!QTAILQ_EMPTY(&reqs)) {
653 req = QTAILQ_FIRST(&reqs);
654 QTAILQ_REMOVE(&reqs, req, next);
655 blk_io_unplug(req->sreq->dev->conf.blk);
656 scsi_req_unref(req->sreq);
657 virtqueue_detach_element(req->vq, &req->elem, 0);
658 virtio_scsi_free_req(req);
659 }
660 }
661 }
662
663 if (suppress_notifications) {
664 virtio_queue_set_notification(vq, 1);
665 }
666 } while (ret != -EINVAL && !virtio_queue_empty(vq));
667
668 QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
669 virtio_scsi_handle_cmd_req_submit(s, req);
670 }
671 return progress;
672 }
673
674 static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
675 {
676 /* use non-QOM casts in the data path */
677 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
678
679 if (s->ctx) {
680 virtio_device_start_ioeventfd(vdev);
681 if (!s->dataplane_fenced) {
682 return;
683 }
684 }
685 virtio_scsi_acquire(s);
686 virtio_scsi_handle_cmd_vq(s, vq);
687 virtio_scsi_release(s);
688 }
689
690 static void virtio_scsi_get_config(VirtIODevice *vdev,
691 uint8_t *config)
692 {
693 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
694 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
695
696 virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
697 virtio_stl_p(vdev, &scsiconf->seg_max,
698 s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
699 virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
700 virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
701 virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
702 virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
703 virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
704 virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
705 virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
706 virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
707 }
708
709 static void virtio_scsi_set_config(VirtIODevice *vdev,
710 const uint8_t *config)
711 {
712 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
713 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
714
715 if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
716 (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
717 virtio_error(vdev,
718 "bad data written to virtio-scsi configuration space");
719 return;
720 }
721
722 vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
723 vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
724 }
725
726 static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
727 uint64_t requested_features,
728 Error **errp)
729 {
730 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
731
732 /* Firstly sync all virtio-scsi possible supported features */
733 requested_features |= s->host_features;
734 return requested_features;
735 }
736
737 static void virtio_scsi_reset(VirtIODevice *vdev)
738 {
739 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
740 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
741
742 assert(!s->dataplane_started);
743 s->resetting++;
744 qbus_reset_all(BUS(&s->bus));
745 s->resetting--;
746
747 vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
748 vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
749 s->events_dropped = false;
750 }
751
752 void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
753 uint32_t event, uint32_t reason)
754 {
755 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
756 VirtIOSCSIReq *req;
757 VirtIOSCSIEvent *evt;
758 VirtIODevice *vdev = VIRTIO_DEVICE(s);
759
760 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
761 return;
762 }
763
764 req = virtio_scsi_pop_req(s, vs->event_vq);
765 if (!req) {
766 s->events_dropped = true;
767 return;
768 }
769
770 if (s->events_dropped) {
771 event |= VIRTIO_SCSI_T_EVENTS_MISSED;
772 s->events_dropped = false;
773 }
774
775 if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
776 virtio_scsi_bad_req(req);
777 return;
778 }
779
780 evt = &req->resp.event;
781 memset(evt, 0, sizeof(VirtIOSCSIEvent));
782 evt->event = virtio_tswap32(vdev, event);
783 evt->reason = virtio_tswap32(vdev, reason);
784 if (!dev) {
785 assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
786 } else {
787 evt->lun[0] = 1;
788 evt->lun[1] = dev->id;
789
790 /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
791 if (dev->lun >= 256) {
792 evt->lun[2] = (dev->lun >> 8) | 0x40;
793 }
794 evt->lun[3] = dev->lun & 0xFF;
795 }
796 trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
797
798 virtio_scsi_complete_req(req);
799 }
800
801 bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
802 {
803 if (s->events_dropped) {
804 virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
805 return true;
806 }
807 return false;
808 }
809
810 static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
811 {
812 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
813
814 if (s->ctx) {
815 virtio_device_start_ioeventfd(vdev);
816 if (!s->dataplane_fenced) {
817 return;
818 }
819 }
820 virtio_scsi_acquire(s);
821 virtio_scsi_handle_event_vq(s, vq);
822 virtio_scsi_release(s);
823 }
824
825 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
826 {
827 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
828 VirtIODevice *vdev = VIRTIO_DEVICE(s);
829
830 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
831 dev->type != TYPE_ROM) {
832 virtio_scsi_acquire(s);
833 virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
834 sense.asc | (sense.ascq << 8));
835 virtio_scsi_release(s);
836 }
837 }
838
839 static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
840 DeviceState *dev, Error **errp)
841 {
842 SCSIDevice *sd = SCSI_DEVICE(dev);
843 sd->hba_supports_iothread = true;
844 }
845
846 static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
847 Error **errp)
848 {
849 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
850 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
851 SCSIDevice *sd = SCSI_DEVICE(dev);
852 int ret;
853
854 if (s->ctx && !s->dataplane_fenced) {
855 if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
856 return;
857 }
858 virtio_scsi_acquire(s);
859 ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
860 virtio_scsi_release(s);
861 if (ret < 0) {
862 return;
863 }
864 }
865
866 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
867 virtio_scsi_acquire(s);
868 virtio_scsi_push_event(s, sd,
869 VIRTIO_SCSI_T_TRANSPORT_RESET,
870 VIRTIO_SCSI_EVT_RESET_RESCAN);
871 virtio_scsi_release(s);
872 }
873 }
874
875 static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
876 Error **errp)
877 {
878 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
879 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
880 SCSIDevice *sd = SCSI_DEVICE(dev);
881 AioContext *ctx = s->ctx ?: qemu_get_aio_context();
882
883 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
884 virtio_scsi_acquire(s);
885 virtio_scsi_push_event(s, sd,
886 VIRTIO_SCSI_T_TRANSPORT_RESET,
887 VIRTIO_SCSI_EVT_RESET_REMOVED);
888 virtio_scsi_release(s);
889 }
890
891 aio_disable_external(ctx);
892 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
893 aio_enable_external(ctx);
894
895 if (s->ctx) {
896 virtio_scsi_acquire(s);
897 /* If other users keep the BlockBackend in the iothread, that's ok */
898 blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
899 virtio_scsi_release(s);
900 }
901 }
902
903 static struct SCSIBusInfo virtio_scsi_scsi_info = {
904 .tcq = true,
905 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
906 .max_target = VIRTIO_SCSI_MAX_TARGET,
907 .max_lun = VIRTIO_SCSI_MAX_LUN,
908
909 .complete = virtio_scsi_command_complete,
910 .cancel = virtio_scsi_request_cancelled,
911 .change = virtio_scsi_change,
912 .parse_cdb = virtio_scsi_parse_cdb,
913 .get_sg_list = virtio_scsi_get_sg_list,
914 .save_request = virtio_scsi_save_request,
915 .load_request = virtio_scsi_load_request,
916 };
917
918 void virtio_scsi_common_realize(DeviceState *dev,
919 VirtIOHandleOutput ctrl,
920 VirtIOHandleOutput evt,
921 VirtIOHandleOutput cmd,
922 Error **errp)
923 {
924 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
925 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
926 int i;
927
928 virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI,
929 sizeof(VirtIOSCSIConfig));
930
931 if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
932 s->conf.num_queues = 1;
933 }
934 if (s->conf.num_queues == 0 ||
935 s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
936 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
937 "must be a positive integer less than %d.",
938 s->conf.num_queues,
939 VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
940 virtio_cleanup(vdev);
941 return;
942 }
943 if (s->conf.virtqueue_size <= 2) {
944 error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
945 "must be > 2", s->conf.virtqueue_size);
946 return;
947 }
948 s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
949 s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
950 s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
951
952 s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
953 s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
954 for (i = 0; i < s->conf.num_queues; i++) {
955 s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
956 }
957 }
958
959 static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
960 {
961 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
962 VirtIOSCSI *s = VIRTIO_SCSI(dev);
963 Error *err = NULL;
964
965 virtio_scsi_common_realize(dev,
966 virtio_scsi_handle_ctrl,
967 virtio_scsi_handle_event,
968 virtio_scsi_handle_cmd,
969 &err);
970 if (err != NULL) {
971 error_propagate(errp, err);
972 return;
973 }
974
975 scsi_bus_new(&s->bus, sizeof(s->bus), dev,
976 &virtio_scsi_scsi_info, vdev->bus_name);
977 /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
978 qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
979
980 virtio_scsi_dataplane_setup(s, errp);
981 }
982
983 void virtio_scsi_common_unrealize(DeviceState *dev)
984 {
985 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
986 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
987 int i;
988
989 virtio_delete_queue(vs->ctrl_vq);
990 virtio_delete_queue(vs->event_vq);
991 for (i = 0; i < vs->conf.num_queues; i++) {
992 virtio_delete_queue(vs->cmd_vqs[i]);
993 }
994 g_free(vs->cmd_vqs);
995 virtio_cleanup(vdev);
996 }
997
998 static void virtio_scsi_device_unrealize(DeviceState *dev)
999 {
1000 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1001
1002 qbus_set_hotplug_handler(BUS(&s->bus), NULL);
1003 virtio_scsi_common_unrealize(dev);
1004 }
1005
1006 static Property virtio_scsi_properties[] = {
1007 DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
1008 VIRTIO_SCSI_AUTO_NUM_QUEUES),
1009 DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
1010 parent_obj.conf.virtqueue_size, 256),
1011 DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
1012 parent_obj.conf.seg_max_adjust, true),
1013 DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
1014 0xFFFF),
1015 DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
1016 128),
1017 DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
1018 VIRTIO_SCSI_F_HOTPLUG, true),
1019 DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
1020 VIRTIO_SCSI_F_CHANGE, true),
1021 DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
1022 TYPE_IOTHREAD, IOThread *),
1023 DEFINE_PROP_END_OF_LIST(),
1024 };
1025
1026 static const VMStateDescription vmstate_virtio_scsi = {
1027 .name = "virtio-scsi",
1028 .minimum_version_id = 1,
1029 .version_id = 1,
1030 .fields = (VMStateField[]) {
1031 VMSTATE_VIRTIO_DEVICE,
1032 VMSTATE_END_OF_LIST()
1033 },
1034 };
1035
1036 static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
1037 {
1038 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1039 DeviceClass *dc = DEVICE_CLASS(klass);
1040
1041 vdc->get_config = virtio_scsi_get_config;
1042 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1043 }
1044
1045 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
1046 {
1047 DeviceClass *dc = DEVICE_CLASS(klass);
1048 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1049 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1050
1051 device_class_set_props(dc, virtio_scsi_properties);
1052 dc->vmsd = &vmstate_virtio_scsi;
1053 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1054 vdc->realize = virtio_scsi_device_realize;
1055 vdc->unrealize = virtio_scsi_device_unrealize;
1056 vdc->set_config = virtio_scsi_set_config;
1057 vdc->get_features = virtio_scsi_get_features;
1058 vdc->reset = virtio_scsi_reset;
1059 vdc->start_ioeventfd = virtio_scsi_dataplane_start;
1060 vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
1061 hc->pre_plug = virtio_scsi_pre_hotplug;
1062 hc->plug = virtio_scsi_hotplug;
1063 hc->unplug = virtio_scsi_hotunplug;
1064 }
1065
1066 static const TypeInfo virtio_scsi_common_info = {
1067 .name = TYPE_VIRTIO_SCSI_COMMON,
1068 .parent = TYPE_VIRTIO_DEVICE,
1069 .instance_size = sizeof(VirtIOSCSICommon),
1070 .abstract = true,
1071 .class_init = virtio_scsi_common_class_init,
1072 };
1073
1074 static const TypeInfo virtio_scsi_info = {
1075 .name = TYPE_VIRTIO_SCSI,
1076 .parent = TYPE_VIRTIO_SCSI_COMMON,
1077 .instance_size = sizeof(VirtIOSCSI),
1078 .class_init = virtio_scsi_class_init,
1079 .interfaces = (InterfaceInfo[]) {
1080 { TYPE_HOTPLUG_HANDLER },
1081 { }
1082 }
1083 };
1084
1085 static void virtio_register_types(void)
1086 {
1087 type_register_static(&virtio_scsi_common_info);
1088 type_register_static(&virtio_scsi_info);
1089 }
1090
1091 type_init(virtio_register_types)