]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/virtio-scsi.c
Use #include "..." for our own headers, <...> for others
[mirror_qemu.git] / hw / scsi / virtio-scsi.c
1 /*
2 * Virtio SCSI HBA
3 *
4 * Copyright IBM, Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "standard-headers/linux/virtio_ids.h"
19 #include "hw/virtio/virtio-scsi.h"
20 #include "qemu/error-report.h"
21 #include "qemu/iov.h"
22 #include "sysemu/block-backend.h"
23 #include "hw/scsi/scsi.h"
24 #include "block/scsi.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27
28 static inline int virtio_scsi_get_lun(uint8_t *lun)
29 {
30 return ((lun[2] << 8) | lun[3]) & 0x3FFF;
31 }
32
33 static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
34 {
35 if (lun[0] != 1) {
36 return NULL;
37 }
38 if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
39 return NULL;
40 }
41 return scsi_device_find(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
42 }
43
44 void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
45 {
46 const size_t zero_skip =
47 offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
48
49 req->vq = vq;
50 req->dev = s;
51 qemu_sglist_init(&req->qsgl, DEVICE(s), 8, &address_space_memory);
52 qemu_iovec_init(&req->resp_iov, 1);
53 memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
54 }
55
56 void virtio_scsi_free_req(VirtIOSCSIReq *req)
57 {
58 qemu_iovec_destroy(&req->resp_iov);
59 qemu_sglist_destroy(&req->qsgl);
60 g_free(req);
61 }
62
63 static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
64 {
65 VirtIOSCSI *s = req->dev;
66 VirtQueue *vq = req->vq;
67 VirtIODevice *vdev = VIRTIO_DEVICE(s);
68
69 qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
70 virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
71 if (s->dataplane_started && !s->dataplane_fenced) {
72 virtio_scsi_dataplane_notify(vdev, req);
73 } else {
74 virtio_notify(vdev, vq);
75 }
76
77 if (req->sreq) {
78 req->sreq->hba_private = NULL;
79 scsi_req_unref(req->sreq);
80 }
81 virtio_scsi_free_req(req);
82 }
83
84 static void virtio_scsi_bad_req(void)
85 {
86 error_report("wrong size for virtio-scsi headers");
87 exit(1);
88 }
89
90 static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
91 hwaddr *addr, int num, size_t skip)
92 {
93 QEMUSGList *qsgl = &req->qsgl;
94 size_t copied = 0;
95
96 while (num) {
97 if (skip >= iov->iov_len) {
98 skip -= iov->iov_len;
99 } else {
100 qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
101 copied += iov->iov_len - skip;
102 skip = 0;
103 }
104 iov++;
105 addr++;
106 num--;
107 }
108
109 assert(skip == 0);
110 return copied;
111 }
112
113 static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
114 unsigned req_size, unsigned resp_size)
115 {
116 VirtIODevice *vdev = (VirtIODevice *) req->dev;
117 size_t in_size, out_size;
118
119 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
120 &req->req, req_size) < req_size) {
121 return -EINVAL;
122 }
123
124 if (qemu_iovec_concat_iov(&req->resp_iov,
125 req->elem.in_sg, req->elem.in_num, 0,
126 resp_size) < resp_size) {
127 return -EINVAL;
128 }
129
130 req->resp_size = resp_size;
131
132 /* Old BIOSes left some padding by mistake after the req_size/resp_size.
133 * As a workaround, always consider the first buffer as the virtio-scsi
134 * request/response, making the payload start at the second element
135 * of the iovec.
136 *
137 * The actual length of the response header, stored in req->resp_size,
138 * does not change.
139 *
140 * TODO: always disable this workaround for virtio 1.0 devices.
141 */
142 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
143 if (req->elem.out_num) {
144 req_size = req->elem.out_sg[0].iov_len;
145 }
146 if (req->elem.in_num) {
147 resp_size = req->elem.in_sg[0].iov_len;
148 }
149 }
150
151 out_size = qemu_sgl_concat(req, req->elem.out_sg,
152 &req->elem.out_addr[0], req->elem.out_num,
153 req_size);
154 in_size = qemu_sgl_concat(req, req->elem.in_sg,
155 &req->elem.in_addr[0], req->elem.in_num,
156 resp_size);
157
158 if (out_size && in_size) {
159 return -ENOTSUP;
160 }
161
162 if (out_size) {
163 req->mode = SCSI_XFER_TO_DEV;
164 } else if (in_size) {
165 req->mode = SCSI_XFER_FROM_DEV;
166 }
167
168 return 0;
169 }
170
171 static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
172 {
173 VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
174 VirtIOSCSIReq *req;
175
176 req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
177 if (!req) {
178 return NULL;
179 }
180 virtio_scsi_init_req(s, vq, req);
181 return req;
182 }
183
184 static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
185 {
186 VirtIOSCSIReq *req = sreq->hba_private;
187 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
188 uint32_t n = virtio_get_queue_index(req->vq) - 2;
189
190 assert(n < vs->conf.num_queues);
191 qemu_put_be32s(f, &n);
192 qemu_put_virtqueue_element(f, &req->elem);
193 }
194
195 static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
196 {
197 SCSIBus *bus = sreq->bus;
198 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
199 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
200 VirtIOSCSIReq *req;
201 uint32_t n;
202
203 qemu_get_be32s(f, &n);
204 assert(n < vs->conf.num_queues);
205 req = qemu_get_virtqueue_element(f, sizeof(VirtIOSCSIReq) + vs->cdb_size);
206 virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
207
208 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
209 sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
210 error_report("invalid SCSI request migration data");
211 exit(1);
212 }
213
214 scsi_req_ref(sreq);
215 req->sreq = sreq;
216 if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
217 assert(req->sreq->cmd.mode == req->mode);
218 }
219 return req;
220 }
221
222 typedef struct {
223 Notifier notifier;
224 VirtIOSCSIReq *tmf_req;
225 } VirtIOSCSICancelNotifier;
226
227 static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
228 {
229 VirtIOSCSICancelNotifier *n = container_of(notifier,
230 VirtIOSCSICancelNotifier,
231 notifier);
232
233 if (--n->tmf_req->remaining == 0) {
234 virtio_scsi_complete_req(n->tmf_req);
235 }
236 g_free(n);
237 }
238
239 /* Return 0 if the request is ready to be completed and return to guest;
240 * -EINPROGRESS if the request is submitted and will be completed later, in the
241 * case of async cancellation. */
242 static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
243 {
244 SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun);
245 SCSIRequest *r, *next;
246 BusChild *kid;
247 int target;
248 int ret = 0;
249
250 if (s->dataplane_started && d) {
251 assert(blk_get_aio_context(d->conf.blk) == s->ctx);
252 }
253 /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
254 req->resp.tmf.response = VIRTIO_SCSI_S_OK;
255
256 virtio_tswap32s(VIRTIO_DEVICE(s), &req->req.tmf.subtype);
257 switch (req->req.tmf.subtype) {
258 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
259 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
260 if (!d) {
261 goto fail;
262 }
263 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
264 goto incorrect_lun;
265 }
266 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
267 VirtIOSCSIReq *cmd_req = r->hba_private;
268 if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
269 break;
270 }
271 }
272 if (r) {
273 /*
274 * Assert that the request has not been completed yet, we
275 * check for it in the loop above.
276 */
277 assert(r->hba_private);
278 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
279 /* "If the specified command is present in the task set, then
280 * return a service response set to FUNCTION SUCCEEDED".
281 */
282 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
283 } else {
284 VirtIOSCSICancelNotifier *notifier;
285
286 req->remaining = 1;
287 notifier = g_new(VirtIOSCSICancelNotifier, 1);
288 notifier->tmf_req = req;
289 notifier->notifier.notify = virtio_scsi_cancel_notify;
290 scsi_req_cancel_async(r, &notifier->notifier);
291 ret = -EINPROGRESS;
292 }
293 }
294 break;
295
296 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
297 if (!d) {
298 goto fail;
299 }
300 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
301 goto incorrect_lun;
302 }
303 s->resetting++;
304 qdev_reset_all(&d->qdev);
305 s->resetting--;
306 break;
307
308 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
309 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
310 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
311 if (!d) {
312 goto fail;
313 }
314 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
315 goto incorrect_lun;
316 }
317
318 /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
319 * This way, if the bus starts calling back to the notifiers
320 * even before we finish the loop, virtio_scsi_cancel_notify
321 * will not complete the TMF too early.
322 */
323 req->remaining = 1;
324 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
325 if (r->hba_private) {
326 if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
327 /* "If there is any command present in the task set, then
328 * return a service response set to FUNCTION SUCCEEDED".
329 */
330 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
331 break;
332 } else {
333 VirtIOSCSICancelNotifier *notifier;
334
335 req->remaining++;
336 notifier = g_new(VirtIOSCSICancelNotifier, 1);
337 notifier->notifier.notify = virtio_scsi_cancel_notify;
338 notifier->tmf_req = req;
339 scsi_req_cancel_async(r, &notifier->notifier);
340 }
341 }
342 }
343 if (--req->remaining > 0) {
344 ret = -EINPROGRESS;
345 }
346 break;
347
348 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
349 target = req->req.tmf.lun[1];
350 s->resetting++;
351 QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
352 d = SCSI_DEVICE(kid->child);
353 if (d->channel == 0 && d->id == target) {
354 qdev_reset_all(&d->qdev);
355 }
356 }
357 s->resetting--;
358 break;
359
360 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
361 default:
362 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
363 break;
364 }
365
366 return ret;
367
368 incorrect_lun:
369 req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
370 return ret;
371
372 fail:
373 req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
374 return ret;
375 }
376
377 static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
378 {
379 VirtIODevice *vdev = (VirtIODevice *)s;
380 uint32_t type;
381 int r = 0;
382
383 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
384 &type, sizeof(type)) < sizeof(type)) {
385 virtio_scsi_bad_req();
386 return;
387 }
388
389 virtio_tswap32s(vdev, &type);
390 if (type == VIRTIO_SCSI_T_TMF) {
391 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
392 sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
393 virtio_scsi_bad_req();
394 } else {
395 r = virtio_scsi_do_tmf(s, req);
396 }
397
398 } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
399 type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
400 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
401 sizeof(VirtIOSCSICtrlANResp)) < 0) {
402 virtio_scsi_bad_req();
403 } else {
404 req->resp.an.event_actual = 0;
405 req->resp.an.response = VIRTIO_SCSI_S_OK;
406 }
407 }
408 if (r == 0) {
409 virtio_scsi_complete_req(req);
410 } else {
411 assert(r == -EINPROGRESS);
412 }
413 }
414
415 void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
416 {
417 VirtIOSCSIReq *req;
418
419 while ((req = virtio_scsi_pop_req(s, vq))) {
420 virtio_scsi_handle_ctrl_req(s, req);
421 }
422 }
423
424 static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
425 {
426 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
427
428 if (s->ctx) {
429 virtio_scsi_dataplane_start(s);
430 if (!s->dataplane_fenced) {
431 return;
432 }
433 }
434 virtio_scsi_handle_ctrl_vq(s, vq);
435 }
436
437 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
438 {
439 /* Sense data is not in req->resp and is copied separately
440 * in virtio_scsi_command_complete.
441 */
442 req->resp_size = sizeof(VirtIOSCSICmdResp);
443 virtio_scsi_complete_req(req);
444 }
445
446 static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
447 size_t resid)
448 {
449 VirtIOSCSIReq *req = r->hba_private;
450 uint8_t sense[SCSI_SENSE_BUF_SIZE];
451 uint32_t sense_len;
452 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
453
454 if (r->io_canceled) {
455 return;
456 }
457
458 req->resp.cmd.response = VIRTIO_SCSI_S_OK;
459 req->resp.cmd.status = status;
460 if (req->resp.cmd.status == GOOD) {
461 req->resp.cmd.resid = virtio_tswap32(vdev, resid);
462 } else {
463 req->resp.cmd.resid = 0;
464 sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
465 sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
466 qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
467 sense, sense_len);
468 req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
469 }
470 virtio_scsi_complete_cmd_req(req);
471 }
472
473 static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
474 uint8_t *buf, void *hba_private)
475 {
476 VirtIOSCSIReq *req = hba_private;
477
478 if (cmd->len == 0) {
479 cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
480 memcpy(cmd->buf, buf, cmd->len);
481 }
482
483 /* Extract the direction and mode directly from the request, for
484 * host device passthrough.
485 */
486 cmd->xfer = req->qsgl.size;
487 cmd->mode = req->mode;
488 return 0;
489 }
490
491 static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
492 {
493 VirtIOSCSIReq *req = r->hba_private;
494
495 return &req->qsgl;
496 }
497
498 static void virtio_scsi_request_cancelled(SCSIRequest *r)
499 {
500 VirtIOSCSIReq *req = r->hba_private;
501
502 if (!req) {
503 return;
504 }
505 if (req->dev->resetting) {
506 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
507 } else {
508 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
509 }
510 virtio_scsi_complete_cmd_req(req);
511 }
512
513 static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
514 {
515 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
516 virtio_scsi_complete_cmd_req(req);
517 }
518
519 static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
520 {
521 VirtIOSCSICommon *vs = &s->parent_obj;
522 SCSIDevice *d;
523 int rc;
524
525 rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
526 sizeof(VirtIOSCSICmdResp) + vs->sense_size);
527 if (rc < 0) {
528 if (rc == -ENOTSUP) {
529 virtio_scsi_fail_cmd_req(req);
530 } else {
531 virtio_scsi_bad_req();
532 }
533 return false;
534 }
535
536 d = virtio_scsi_device_find(s, req->req.cmd.lun);
537 if (!d) {
538 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
539 virtio_scsi_complete_cmd_req(req);
540 return false;
541 }
542 if (s->dataplane_started) {
543 assert(blk_get_aio_context(d->conf.blk) == s->ctx);
544 }
545 req->sreq = scsi_req_new(d, req->req.cmd.tag,
546 virtio_scsi_get_lun(req->req.cmd.lun),
547 req->req.cmd.cdb, req);
548
549 if (req->sreq->cmd.mode != SCSI_XFER_NONE
550 && (req->sreq->cmd.mode != req->mode ||
551 req->sreq->cmd.xfer > req->qsgl.size)) {
552 req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
553 virtio_scsi_complete_cmd_req(req);
554 return false;
555 }
556 scsi_req_ref(req->sreq);
557 blk_io_plug(d->conf.blk);
558 return true;
559 }
560
561 static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
562 {
563 SCSIRequest *sreq = req->sreq;
564 if (scsi_req_enqueue(sreq)) {
565 scsi_req_continue(sreq);
566 }
567 blk_io_unplug(sreq->dev->conf.blk);
568 scsi_req_unref(sreq);
569 }
570
571 void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
572 {
573 VirtIOSCSIReq *req, *next;
574 QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
575
576 while ((req = virtio_scsi_pop_req(s, vq))) {
577 if (virtio_scsi_handle_cmd_req_prepare(s, req)) {
578 QTAILQ_INSERT_TAIL(&reqs, req, next);
579 }
580 }
581
582 QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
583 virtio_scsi_handle_cmd_req_submit(s, req);
584 }
585 }
586
587 static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
588 {
589 /* use non-QOM casts in the data path */
590 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
591
592 if (s->ctx) {
593 virtio_scsi_dataplane_start(s);
594 if (!s->dataplane_fenced) {
595 return;
596 }
597 }
598 virtio_scsi_handle_cmd_vq(s, vq);
599 }
600
601 static void virtio_scsi_get_config(VirtIODevice *vdev,
602 uint8_t *config)
603 {
604 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
605 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
606
607 virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
608 virtio_stl_p(vdev, &scsiconf->seg_max, 128 - 2);
609 virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
610 virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
611 virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
612 virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
613 virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
614 virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
615 virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
616 virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
617 }
618
619 static void virtio_scsi_set_config(VirtIODevice *vdev,
620 const uint8_t *config)
621 {
622 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
623 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
624
625 if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
626 (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
627 error_report("bad data written to virtio-scsi configuration space");
628 exit(1);
629 }
630
631 vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
632 vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
633 }
634
635 static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
636 uint64_t requested_features,
637 Error **errp)
638 {
639 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
640
641 /* Firstly sync all virtio-scsi possible supported features */
642 requested_features |= s->host_features;
643 return requested_features;
644 }
645
646 static void virtio_scsi_reset(VirtIODevice *vdev)
647 {
648 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
649 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
650
651 if (s->ctx) {
652 virtio_scsi_dataplane_stop(s);
653 }
654 s->resetting++;
655 qbus_reset_all(&s->bus.qbus);
656 s->resetting--;
657
658 vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
659 vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
660 s->events_dropped = false;
661 }
662
663 /* The device does not have anything to save beyond the virtio data.
664 * Request data is saved with callbacks from SCSI devices.
665 */
666 static void virtio_scsi_save(QEMUFile *f, void *opaque)
667 {
668 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
669 virtio_save(vdev, f);
670 }
671
672 static int virtio_scsi_load(QEMUFile *f, void *opaque, int version_id)
673 {
674 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
675 int ret;
676
677 ret = virtio_load(vdev, f, version_id);
678 if (ret) {
679 return ret;
680 }
681 return 0;
682 }
683
684 void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
685 uint32_t event, uint32_t reason)
686 {
687 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
688 VirtIOSCSIReq *req;
689 VirtIOSCSIEvent *evt;
690 VirtIODevice *vdev = VIRTIO_DEVICE(s);
691
692 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
693 return;
694 }
695
696 if (s->dataplane_started) {
697 assert(s->ctx);
698 aio_context_acquire(s->ctx);
699 }
700
701 req = virtio_scsi_pop_req(s, vs->event_vq);
702 if (!req) {
703 s->events_dropped = true;
704 goto out;
705 }
706
707 if (s->events_dropped) {
708 event |= VIRTIO_SCSI_T_EVENTS_MISSED;
709 s->events_dropped = false;
710 }
711
712 if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
713 virtio_scsi_bad_req();
714 }
715
716 evt = &req->resp.event;
717 memset(evt, 0, sizeof(VirtIOSCSIEvent));
718 evt->event = virtio_tswap32(vdev, event);
719 evt->reason = virtio_tswap32(vdev, reason);
720 if (!dev) {
721 assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
722 } else {
723 evt->lun[0] = 1;
724 evt->lun[1] = dev->id;
725
726 /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
727 if (dev->lun >= 256) {
728 evt->lun[2] = (dev->lun >> 8) | 0x40;
729 }
730 evt->lun[3] = dev->lun & 0xFF;
731 }
732 virtio_scsi_complete_req(req);
733 out:
734 if (s->dataplane_started) {
735 aio_context_release(s->ctx);
736 }
737 }
738
739 void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
740 {
741 if (s->events_dropped) {
742 virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
743 }
744 }
745
746 static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
747 {
748 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
749
750 if (s->ctx) {
751 virtio_scsi_dataplane_start(s);
752 if (!s->dataplane_fenced) {
753 return;
754 }
755 }
756 virtio_scsi_handle_event_vq(s, vq);
757 }
758
759 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
760 {
761 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
762 VirtIODevice *vdev = VIRTIO_DEVICE(s);
763
764 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
765 dev->type != TYPE_ROM) {
766 virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
767 sense.asc | (sense.ascq << 8));
768 }
769 }
770
771 static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
772 Error **errp)
773 {
774 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
775 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
776 SCSIDevice *sd = SCSI_DEVICE(dev);
777
778 if (s->ctx && !s->dataplane_fenced) {
779 if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
780 return;
781 }
782 aio_context_acquire(s->ctx);
783 blk_set_aio_context(sd->conf.blk, s->ctx);
784 aio_context_release(s->ctx);
785
786 }
787
788 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
789 virtio_scsi_push_event(s, sd,
790 VIRTIO_SCSI_T_TRANSPORT_RESET,
791 VIRTIO_SCSI_EVT_RESET_RESCAN);
792 }
793 }
794
795 static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
796 Error **errp)
797 {
798 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
799 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
800 SCSIDevice *sd = SCSI_DEVICE(dev);
801
802 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
803 virtio_scsi_push_event(s, sd,
804 VIRTIO_SCSI_T_TRANSPORT_RESET,
805 VIRTIO_SCSI_EVT_RESET_REMOVED);
806 }
807
808 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
809 }
810
811 static struct SCSIBusInfo virtio_scsi_scsi_info = {
812 .tcq = true,
813 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
814 .max_target = VIRTIO_SCSI_MAX_TARGET,
815 .max_lun = VIRTIO_SCSI_MAX_LUN,
816
817 .complete = virtio_scsi_command_complete,
818 .cancel = virtio_scsi_request_cancelled,
819 .change = virtio_scsi_change,
820 .parse_cdb = virtio_scsi_parse_cdb,
821 .get_sg_list = virtio_scsi_get_sg_list,
822 .save_request = virtio_scsi_save_request,
823 .load_request = virtio_scsi_load_request,
824 };
825
826 void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
827 HandleOutput ctrl, HandleOutput evt,
828 HandleOutput cmd)
829 {
830 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
831 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
832 int i;
833
834 virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI,
835 sizeof(VirtIOSCSIConfig));
836
837 if (s->conf.num_queues == 0 ||
838 s->conf.num_queues > VIRTIO_QUEUE_MAX - 2) {
839 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
840 "must be a positive integer less than %d.",
841 s->conf.num_queues, VIRTIO_QUEUE_MAX - 2);
842 virtio_cleanup(vdev);
843 return;
844 }
845 s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
846 s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
847 s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
848
849 s->ctrl_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
850 ctrl);
851 s->event_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
852 evt);
853 for (i = 0; i < s->conf.num_queues; i++) {
854 s->cmd_vqs[i] = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
855 cmd);
856 }
857
858 if (s->conf.iothread) {
859 virtio_scsi_set_iothread(VIRTIO_SCSI(s), s->conf.iothread);
860 }
861 }
862
863 static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
864 {
865 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
866 VirtIOSCSI *s = VIRTIO_SCSI(dev);
867 static int virtio_scsi_id;
868 Error *err = NULL;
869
870 virtio_scsi_common_realize(dev, &err, virtio_scsi_handle_ctrl,
871 virtio_scsi_handle_event,
872 virtio_scsi_handle_cmd);
873 if (err != NULL) {
874 error_propagate(errp, err);
875 return;
876 }
877
878 scsi_bus_new(&s->bus, sizeof(s->bus), dev,
879 &virtio_scsi_scsi_info, vdev->bus_name);
880 /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
881 qbus_set_hotplug_handler(BUS(&s->bus), dev, &error_abort);
882
883 if (!dev->hotplugged) {
884 scsi_bus_legacy_handle_cmdline(&s->bus, &err);
885 if (err != NULL) {
886 error_propagate(errp, err);
887 return;
888 }
889 }
890
891 register_savevm(dev, "virtio-scsi", virtio_scsi_id++, 1,
892 virtio_scsi_save, virtio_scsi_load, s);
893 }
894
895 static void virtio_scsi_instance_init(Object *obj)
896 {
897 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(obj);
898
899 object_property_add_link(obj, "iothread", TYPE_IOTHREAD,
900 (Object **)&vs->conf.iothread,
901 qdev_prop_allow_set_link_before_realize,
902 OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort);
903 }
904
905 void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp)
906 {
907 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
908 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
909
910 g_free(vs->cmd_vqs);
911 virtio_cleanup(vdev);
912 }
913
914 static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp)
915 {
916 VirtIOSCSI *s = VIRTIO_SCSI(dev);
917
918 unregister_savevm(dev, "virtio-scsi", s);
919 virtio_scsi_common_unrealize(dev, errp);
920 }
921
922 static Property virtio_scsi_properties[] = {
923 DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues, 1),
924 DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
925 0xFFFF),
926 DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
927 128),
928 DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
929 VIRTIO_SCSI_F_HOTPLUG, true),
930 DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
931 VIRTIO_SCSI_F_CHANGE, true),
932 DEFINE_PROP_END_OF_LIST(),
933 };
934
935 static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
936 {
937 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
938 DeviceClass *dc = DEVICE_CLASS(klass);
939
940 vdc->get_config = virtio_scsi_get_config;
941 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
942 }
943
944 static void virtio_scsi_class_init(ObjectClass *klass, void *data)
945 {
946 DeviceClass *dc = DEVICE_CLASS(klass);
947 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
948 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
949
950 dc->props = virtio_scsi_properties;
951 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
952 vdc->realize = virtio_scsi_device_realize;
953 vdc->unrealize = virtio_scsi_device_unrealize;
954 vdc->set_config = virtio_scsi_set_config;
955 vdc->get_features = virtio_scsi_get_features;
956 vdc->reset = virtio_scsi_reset;
957 hc->plug = virtio_scsi_hotplug;
958 hc->unplug = virtio_scsi_hotunplug;
959 }
960
961 static const TypeInfo virtio_scsi_common_info = {
962 .name = TYPE_VIRTIO_SCSI_COMMON,
963 .parent = TYPE_VIRTIO_DEVICE,
964 .instance_size = sizeof(VirtIOSCSICommon),
965 .abstract = true,
966 .class_init = virtio_scsi_common_class_init,
967 };
968
969 static const TypeInfo virtio_scsi_info = {
970 .name = TYPE_VIRTIO_SCSI,
971 .parent = TYPE_VIRTIO_SCSI_COMMON,
972 .instance_size = sizeof(VirtIOSCSI),
973 .instance_init = virtio_scsi_instance_init,
974 .class_init = virtio_scsi_class_init,
975 .interfaces = (InterfaceInfo[]) {
976 { TYPE_HOTPLUG_HANDLER },
977 { }
978 }
979 };
980
981 static void virtio_register_types(void)
982 {
983 type_register_static(&virtio_scsi_common_info);
984 type_register_static(&virtio_scsi_info);
985 }
986
987 type_init(virtio_register_types)