]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-bus.c
Include qemu/module.h where needed, drop it from qemu-common.h
[mirror_qemu.git] / hw / scsi / scsi-bus.c
1 #include "qemu/osdep.h"
2 #include "hw/hw.h"
3 #include "qapi/error.h"
4 #include "qemu/error-report.h"
5 #include "qemu/module.h"
6 #include "qemu/option.h"
7 #include "hw/scsi/scsi.h"
8 #include "scsi/constants.h"
9 #include "hw/qdev.h"
10 #include "sysemu/block-backend.h"
11 #include "sysemu/blockdev.h"
12 #include "trace.h"
13 #include "sysemu/dma.h"
14 #include "qemu/cutils.h"
15
16 static char *scsibus_get_dev_path(DeviceState *dev);
17 static char *scsibus_get_fw_dev_path(DeviceState *dev);
18 static void scsi_req_dequeue(SCSIRequest *req);
19 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
20 static void scsi_target_free_buf(SCSIRequest *req);
21
22 static Property scsi_props[] = {
23 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
24 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
25 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
26 DEFINE_PROP_END_OF_LIST(),
27 };
28
29 static void scsi_bus_class_init(ObjectClass *klass, void *data)
30 {
31 BusClass *k = BUS_CLASS(klass);
32 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
33
34 k->get_dev_path = scsibus_get_dev_path;
35 k->get_fw_dev_path = scsibus_get_fw_dev_path;
36 hc->unplug = qdev_simple_device_unplug_cb;
37 }
38
39 static const TypeInfo scsi_bus_info = {
40 .name = TYPE_SCSI_BUS,
41 .parent = TYPE_BUS,
42 .instance_size = sizeof(SCSIBus),
43 .class_init = scsi_bus_class_init,
44 .interfaces = (InterfaceInfo[]) {
45 { TYPE_HOTPLUG_HANDLER },
46 { }
47 }
48 };
49 static int next_scsi_bus;
50
51 static void scsi_device_realize(SCSIDevice *s, Error **errp)
52 {
53 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
54 if (sc->realize) {
55 sc->realize(s, errp);
56 }
57 }
58
59 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
60 void *hba_private)
61 {
62 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
63 int rc;
64
65 assert(cmd->len == 0);
66 rc = scsi_req_parse_cdb(dev, cmd, buf);
67 if (bus->info->parse_cdb) {
68 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private);
69 }
70 return rc;
71 }
72
73 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
74 uint8_t *buf, void *hba_private)
75 {
76 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
77 if (sc->alloc_req) {
78 return sc->alloc_req(s, tag, lun, buf, hba_private);
79 }
80
81 return NULL;
82 }
83
84 void scsi_device_unit_attention_reported(SCSIDevice *s)
85 {
86 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
87 if (sc->unit_attention_reported) {
88 sc->unit_attention_reported(s);
89 }
90 }
91
92 /* Create a scsi bus, and attach devices to it. */
93 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
94 const SCSIBusInfo *info, const char *bus_name)
95 {
96 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
97 bus->busnr = next_scsi_bus++;
98 bus->info = info;
99 qbus_set_bus_hotplug_handler(BUS(bus), &error_abort);
100 }
101
102 static void scsi_dma_restart_bh(void *opaque)
103 {
104 SCSIDevice *s = opaque;
105 SCSIRequest *req, *next;
106
107 qemu_bh_delete(s->bh);
108 s->bh = NULL;
109
110 aio_context_acquire(blk_get_aio_context(s->conf.blk));
111 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
112 scsi_req_ref(req);
113 if (req->retry) {
114 req->retry = false;
115 switch (req->cmd.mode) {
116 case SCSI_XFER_FROM_DEV:
117 case SCSI_XFER_TO_DEV:
118 scsi_req_continue(req);
119 break;
120 case SCSI_XFER_NONE:
121 scsi_req_dequeue(req);
122 scsi_req_enqueue(req);
123 break;
124 }
125 }
126 scsi_req_unref(req);
127 }
128 aio_context_release(blk_get_aio_context(s->conf.blk));
129 }
130
131 void scsi_req_retry(SCSIRequest *req)
132 {
133 /* No need to save a reference, because scsi_dma_restart_bh just
134 * looks at the request list. */
135 req->retry = true;
136 }
137
138 static void scsi_dma_restart_cb(void *opaque, int running, RunState state)
139 {
140 SCSIDevice *s = opaque;
141
142 if (!running) {
143 return;
144 }
145 if (!s->bh) {
146 AioContext *ctx = blk_get_aio_context(s->conf.blk);
147 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s);
148 qemu_bh_schedule(s->bh);
149 }
150 }
151
152 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
153 {
154 SCSIDevice *dev = SCSI_DEVICE(qdev);
155 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
156 SCSIDevice *d;
157 Error *local_err = NULL;
158
159 if (dev->channel > bus->info->max_channel) {
160 error_setg(errp, "bad scsi channel id: %d", dev->channel);
161 return;
162 }
163 if (dev->id != -1 && dev->id > bus->info->max_target) {
164 error_setg(errp, "bad scsi device id: %d", dev->id);
165 return;
166 }
167 if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
168 error_setg(errp, "bad scsi device lun: %d", dev->lun);
169 return;
170 }
171
172 if (dev->id == -1) {
173 int id = -1;
174 if (dev->lun == -1) {
175 dev->lun = 0;
176 }
177 do {
178 d = scsi_device_find(bus, dev->channel, ++id, dev->lun);
179 } while (d && d->lun == dev->lun && id < bus->info->max_target);
180 if (d && d->lun == dev->lun) {
181 error_setg(errp, "no free target");
182 return;
183 }
184 dev->id = id;
185 } else if (dev->lun == -1) {
186 int lun = -1;
187 do {
188 d = scsi_device_find(bus, dev->channel, dev->id, ++lun);
189 } while (d && d->lun == lun && lun < bus->info->max_lun);
190 if (d && d->lun == lun) {
191 error_setg(errp, "no free lun");
192 return;
193 }
194 dev->lun = lun;
195 } else {
196 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun);
197 assert(d);
198 if (d->lun == dev->lun && dev != d) {
199 error_setg(errp, "lun already used by '%s'", d->qdev.id);
200 return;
201 }
202 }
203
204 QTAILQ_INIT(&dev->requests);
205 scsi_device_realize(dev, &local_err);
206 if (local_err) {
207 error_propagate(errp, local_err);
208 return;
209 }
210 dev->vmsentry = qemu_add_vm_change_state_handler(scsi_dma_restart_cb,
211 dev);
212 }
213
214 static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp)
215 {
216 SCSIDevice *dev = SCSI_DEVICE(qdev);
217
218 if (dev->vmsentry) {
219 qemu_del_vm_change_state_handler(dev->vmsentry);
220 }
221
222 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
223 blockdev_mark_auto_del(dev->conf.blk);
224 }
225
226 /* handle legacy '-drive if=scsi,...' cmd line args */
227 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
228 int unit, bool removable, int bootindex,
229 bool share_rw,
230 BlockdevOnError rerror,
231 BlockdevOnError werror,
232 const char *serial, Error **errp)
233 {
234 const char *driver;
235 char *name;
236 DeviceState *dev;
237 Error *err = NULL;
238
239 driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk";
240 dev = qdev_create(&bus->qbus, driver);
241 name = g_strdup_printf("legacy[%d]", unit);
242 object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL);
243 g_free(name);
244
245 qdev_prop_set_uint32(dev, "scsi-id", unit);
246 if (bootindex >= 0) {
247 object_property_set_int(OBJECT(dev), bootindex, "bootindex",
248 &error_abort);
249 }
250 if (object_property_find(OBJECT(dev), "removable", NULL)) {
251 qdev_prop_set_bit(dev, "removable", removable);
252 }
253 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) {
254 qdev_prop_set_string(dev, "serial", serial);
255 }
256 qdev_prop_set_drive(dev, "drive", blk, &err);
257 if (err) {
258 error_propagate(errp, err);
259 object_unparent(OBJECT(dev));
260 return NULL;
261 }
262 object_property_set_bool(OBJECT(dev), share_rw, "share-rw", &err);
263 if (err != NULL) {
264 error_propagate(errp, err);
265 object_unparent(OBJECT(dev));
266 return NULL;
267 }
268
269 qdev_prop_set_enum(dev, "rerror", rerror);
270 qdev_prop_set_enum(dev, "werror", werror);
271
272 object_property_set_bool(OBJECT(dev), true, "realized", &err);
273 if (err != NULL) {
274 error_propagate(errp, err);
275 object_unparent(OBJECT(dev));
276 return NULL;
277 }
278 return SCSI_DEVICE(dev);
279 }
280
281 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
282 {
283 Location loc;
284 DriveInfo *dinfo;
285 int unit;
286
287 loc_push_none(&loc);
288 for (unit = 0; unit <= bus->info->max_target; unit++) {
289 dinfo = drive_get(IF_SCSI, bus->busnr, unit);
290 if (dinfo == NULL) {
291 continue;
292 }
293 qemu_opts_loc_restore(dinfo->opts);
294 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
295 unit, false, -1, false,
296 BLOCKDEV_ON_ERROR_AUTO,
297 BLOCKDEV_ON_ERROR_AUTO,
298 NULL, &error_fatal);
299 }
300 loc_pop(&loc);
301 }
302
303 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
304 {
305 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
306 scsi_req_complete(req, CHECK_CONDITION);
307 return 0;
308 }
309
310 static const struct SCSIReqOps reqops_invalid_field = {
311 .size = sizeof(SCSIRequest),
312 .send_command = scsi_invalid_field
313 };
314
315 /* SCSIReqOps implementation for invalid commands. */
316
317 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
318 {
319 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
320 scsi_req_complete(req, CHECK_CONDITION);
321 return 0;
322 }
323
324 static const struct SCSIReqOps reqops_invalid_opcode = {
325 .size = sizeof(SCSIRequest),
326 .send_command = scsi_invalid_command
327 };
328
329 /* SCSIReqOps implementation for unit attention conditions. */
330
331 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
332 {
333 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
334 scsi_req_build_sense(req, req->dev->unit_attention);
335 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
336 scsi_req_build_sense(req, req->bus->unit_attention);
337 }
338 scsi_req_complete(req, CHECK_CONDITION);
339 return 0;
340 }
341
342 static const struct SCSIReqOps reqops_unit_attention = {
343 .size = sizeof(SCSIRequest),
344 .send_command = scsi_unit_attention
345 };
346
347 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
348 an invalid LUN. */
349
350 typedef struct SCSITargetReq SCSITargetReq;
351
352 struct SCSITargetReq {
353 SCSIRequest req;
354 int len;
355 uint8_t *buf;
356 int buf_len;
357 };
358
359 static void store_lun(uint8_t *outbuf, int lun)
360 {
361 if (lun < 256) {
362 outbuf[1] = lun;
363 return;
364 }
365 outbuf[1] = (lun & 255);
366 outbuf[0] = (lun >> 8) | 0x40;
367 }
368
369 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
370 {
371 BusChild *kid;
372 int i, len, n;
373 int channel, id;
374 bool found_lun0;
375
376 if (r->req.cmd.xfer < 16) {
377 return false;
378 }
379 if (r->req.cmd.buf[2] > 2) {
380 return false;
381 }
382 channel = r->req.dev->channel;
383 id = r->req.dev->id;
384 found_lun0 = false;
385 n = 0;
386 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
387 DeviceState *qdev = kid->child;
388 SCSIDevice *dev = SCSI_DEVICE(qdev);
389
390 if (dev->channel == channel && dev->id == id) {
391 if (dev->lun == 0) {
392 found_lun0 = true;
393 }
394 n += 8;
395 }
396 }
397 if (!found_lun0) {
398 n += 8;
399 }
400
401 scsi_target_alloc_buf(&r->req, n + 8);
402
403 len = MIN(n + 8, r->req.cmd.xfer & ~7);
404 memset(r->buf, 0, len);
405 stl_be_p(&r->buf[0], n);
406 i = found_lun0 ? 8 : 16;
407 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
408 DeviceState *qdev = kid->child;
409 SCSIDevice *dev = SCSI_DEVICE(qdev);
410
411 if (dev->channel == channel && dev->id == id) {
412 store_lun(&r->buf[i], dev->lun);
413 i += 8;
414 }
415 }
416 assert(i == n + 8);
417 r->len = len;
418 return true;
419 }
420
421 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
422 {
423 assert(r->req.dev->lun != r->req.lun);
424
425 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
426
427 if (r->req.cmd.buf[1] & 0x2) {
428 /* Command support data - optional, not implemented */
429 return false;
430 }
431
432 if (r->req.cmd.buf[1] & 0x1) {
433 /* Vital product data */
434 uint8_t page_code = r->req.cmd.buf[2];
435 r->buf[r->len++] = page_code ; /* this page */
436 r->buf[r->len++] = 0x00;
437
438 switch (page_code) {
439 case 0x00: /* Supported page codes, mandatory */
440 {
441 int pages;
442 pages = r->len++;
443 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
444 r->buf[pages] = r->len - pages - 1; /* number of pages */
445 break;
446 }
447 default:
448 return false;
449 }
450 /* done with EVPD */
451 assert(r->len < r->buf_len);
452 r->len = MIN(r->req.cmd.xfer, r->len);
453 return true;
454 }
455
456 /* Standard INQUIRY data */
457 if (r->req.cmd.buf[2] != 0) {
458 return false;
459 }
460
461 /* PAGE CODE == 0 */
462 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
463 memset(r->buf, 0, r->len);
464 if (r->req.lun != 0) {
465 r->buf[0] = TYPE_NO_LUN;
466 } else {
467 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
468 r->buf[2] = 5; /* Version */
469 r->buf[3] = 2 | 0x10; /* HiSup, response data format */
470 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
471 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
472 memcpy(&r->buf[8], "QEMU ", 8);
473 memcpy(&r->buf[16], "QEMU TARGET ", 16);
474 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
475 }
476 return true;
477 }
478
479 static size_t scsi_sense_len(SCSIRequest *req)
480 {
481 if (req->dev->type == TYPE_SCANNER)
482 return SCSI_SENSE_LEN_SCANNER;
483 else
484 return SCSI_SENSE_LEN;
485 }
486
487 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
488 {
489 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
490 int fixed_sense = (req->cmd.buf[1] & 1) == 0;
491
492 if (req->lun != 0 &&
493 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
494 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
495 scsi_req_complete(req, CHECK_CONDITION);
496 return 0;
497 }
498 switch (buf[0]) {
499 case REPORT_LUNS:
500 if (!scsi_target_emulate_report_luns(r)) {
501 goto illegal_request;
502 }
503 break;
504 case INQUIRY:
505 if (!scsi_target_emulate_inquiry(r)) {
506 goto illegal_request;
507 }
508 break;
509 case REQUEST_SENSE:
510 scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
511 if (req->lun != 0) {
512 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
513
514 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
515 sense, fixed_sense);
516 } else {
517 r->len = scsi_device_get_sense(r->req.dev, r->buf,
518 MIN(req->cmd.xfer, r->buf_len),
519 fixed_sense);
520 }
521 if (r->req.dev->sense_is_ua) {
522 scsi_device_unit_attention_reported(req->dev);
523 r->req.dev->sense_len = 0;
524 r->req.dev->sense_is_ua = false;
525 }
526 break;
527 case TEST_UNIT_READY:
528 break;
529 default:
530 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
531 scsi_req_complete(req, CHECK_CONDITION);
532 return 0;
533 illegal_request:
534 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
535 scsi_req_complete(req, CHECK_CONDITION);
536 return 0;
537 }
538
539 if (!r->len) {
540 scsi_req_complete(req, GOOD);
541 }
542 return r->len;
543 }
544
545 static void scsi_target_read_data(SCSIRequest *req)
546 {
547 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
548 uint32_t n;
549
550 n = r->len;
551 if (n > 0) {
552 r->len = 0;
553 scsi_req_data(&r->req, n);
554 } else {
555 scsi_req_complete(&r->req, GOOD);
556 }
557 }
558
559 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
560 {
561 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
562
563 return r->buf;
564 }
565
566 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
567 {
568 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
569
570 r->buf = g_malloc(len);
571 r->buf_len = len;
572
573 return r->buf;
574 }
575
576 static void scsi_target_free_buf(SCSIRequest *req)
577 {
578 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
579
580 g_free(r->buf);
581 }
582
583 static const struct SCSIReqOps reqops_target_command = {
584 .size = sizeof(SCSITargetReq),
585 .send_command = scsi_target_send_command,
586 .read_data = scsi_target_read_data,
587 .get_buf = scsi_target_get_buf,
588 .free_req = scsi_target_free_buf,
589 };
590
591
592 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
593 uint32_t tag, uint32_t lun, void *hba_private)
594 {
595 SCSIRequest *req;
596 SCSIBus *bus = scsi_bus_from_device(d);
597 BusState *qbus = BUS(bus);
598 const int memset_off = offsetof(SCSIRequest, sense)
599 + sizeof(req->sense);
600
601 req = g_malloc(reqops->size);
602 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
603 req->refcount = 1;
604 req->bus = bus;
605 req->dev = d;
606 req->tag = tag;
607 req->lun = lun;
608 req->hba_private = hba_private;
609 req->status = -1;
610 req->ops = reqops;
611 object_ref(OBJECT(d));
612 object_ref(OBJECT(qbus->parent));
613 notifier_list_init(&req->cancel_notifiers);
614 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
615 return req;
616 }
617
618 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
619 uint8_t *buf, void *hba_private)
620 {
621 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
622 const SCSIReqOps *ops;
623 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
624 SCSIRequest *req;
625 SCSICommand cmd = { .len = 0 };
626 int ret;
627
628 if ((d->unit_attention.key == UNIT_ATTENTION ||
629 bus->unit_attention.key == UNIT_ATTENTION) &&
630 (buf[0] != INQUIRY &&
631 buf[0] != REPORT_LUNS &&
632 buf[0] != GET_CONFIGURATION &&
633 buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
634
635 /*
636 * If we already have a pending unit attention condition,
637 * report this one before triggering another one.
638 */
639 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
640 ops = &reqops_unit_attention;
641 } else if (lun != d->lun ||
642 buf[0] == REPORT_LUNS ||
643 (buf[0] == REQUEST_SENSE && d->sense_len)) {
644 ops = &reqops_target_command;
645 } else {
646 ops = NULL;
647 }
648
649 if (ops != NULL || !sc->parse_cdb) {
650 ret = scsi_req_parse_cdb(d, &cmd, buf);
651 } else {
652 ret = sc->parse_cdb(d, &cmd, buf, hba_private);
653 }
654
655 if (ret != 0) {
656 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
657 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
658 } else {
659 assert(cmd.len != 0);
660 trace_scsi_req_parsed(d->id, lun, tag, buf[0],
661 cmd.mode, cmd.xfer);
662 if (cmd.lba != -1) {
663 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
664 cmd.lba);
665 }
666
667 if (cmd.xfer > INT32_MAX) {
668 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
669 } else if (ops) {
670 req = scsi_req_alloc(ops, d, tag, lun, hba_private);
671 } else {
672 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
673 }
674 }
675
676 req->cmd = cmd;
677 req->resid = req->cmd.xfer;
678
679 switch (buf[0]) {
680 case INQUIRY:
681 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
682 break;
683 case TEST_UNIT_READY:
684 trace_scsi_test_unit_ready(d->id, lun, tag);
685 break;
686 case REPORT_LUNS:
687 trace_scsi_report_luns(d->id, lun, tag);
688 break;
689 case REQUEST_SENSE:
690 trace_scsi_request_sense(d->id, lun, tag);
691 break;
692 default:
693 break;
694 }
695
696 return req;
697 }
698
699 uint8_t *scsi_req_get_buf(SCSIRequest *req)
700 {
701 return req->ops->get_buf(req);
702 }
703
704 static void scsi_clear_unit_attention(SCSIRequest *req)
705 {
706 SCSISense *ua;
707 if (req->dev->unit_attention.key != UNIT_ATTENTION &&
708 req->bus->unit_attention.key != UNIT_ATTENTION) {
709 return;
710 }
711
712 /*
713 * If an INQUIRY command enters the enabled command state,
714 * the device server shall [not] clear any unit attention condition;
715 * See also MMC-6, paragraphs 6.5 and 6.6.2.
716 */
717 if (req->cmd.buf[0] == INQUIRY ||
718 req->cmd.buf[0] == GET_CONFIGURATION ||
719 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) {
720 return;
721 }
722
723 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
724 ua = &req->dev->unit_attention;
725 } else {
726 ua = &req->bus->unit_attention;
727 }
728
729 /*
730 * If a REPORT LUNS command enters the enabled command state, [...]
731 * the device server shall clear any pending unit attention condition
732 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
733 */
734 if (req->cmd.buf[0] == REPORT_LUNS &&
735 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
736 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) {
737 return;
738 }
739
740 *ua = SENSE_CODE(NO_SENSE);
741 }
742
743 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
744 {
745 int ret;
746
747 assert(len >= 14);
748 if (!req->sense_len) {
749 return 0;
750 }
751
752 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
753
754 /*
755 * FIXME: clearing unit attention conditions upon autosense should be done
756 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
757 * (SAM-5, 5.14).
758 *
759 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
760 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
761 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
762 */
763 if (req->dev->sense_is_ua) {
764 scsi_device_unit_attention_reported(req->dev);
765 req->dev->sense_len = 0;
766 req->dev->sense_is_ua = false;
767 }
768 return ret;
769 }
770
771 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
772 {
773 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
774 }
775
776 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
777 {
778 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
779 sense.key, sense.asc, sense.ascq);
780 req->sense_len = scsi_build_sense(req->sense, sense);
781 }
782
783 static void scsi_req_enqueue_internal(SCSIRequest *req)
784 {
785 assert(!req->enqueued);
786 scsi_req_ref(req);
787 if (req->bus->info->get_sg_list) {
788 req->sg = req->bus->info->get_sg_list(req);
789 } else {
790 req->sg = NULL;
791 }
792 req->enqueued = true;
793 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
794 }
795
796 int32_t scsi_req_enqueue(SCSIRequest *req)
797 {
798 int32_t rc;
799
800 assert(!req->retry);
801 scsi_req_enqueue_internal(req);
802 scsi_req_ref(req);
803 rc = req->ops->send_command(req, req->cmd.buf);
804 scsi_req_unref(req);
805 return rc;
806 }
807
808 static void scsi_req_dequeue(SCSIRequest *req)
809 {
810 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
811 req->retry = false;
812 if (req->enqueued) {
813 QTAILQ_REMOVE(&req->dev->requests, req, next);
814 req->enqueued = false;
815 scsi_req_unref(req);
816 }
817 }
818
819 static int scsi_get_performance_length(int num_desc, int type, int data_type)
820 {
821 /* MMC-6, paragraph 6.7. */
822 switch (type) {
823 case 0:
824 if ((data_type & 3) == 0) {
825 /* Each descriptor is as in Table 295 - Nominal performance. */
826 return 16 * num_desc + 8;
827 } else {
828 /* Each descriptor is as in Table 296 - Exceptions. */
829 return 6 * num_desc + 8;
830 }
831 case 1:
832 case 4:
833 case 5:
834 return 8 * num_desc + 8;
835 case 2:
836 return 2048 * num_desc + 8;
837 case 3:
838 return 16 * num_desc + 8;
839 default:
840 return 8;
841 }
842 }
843
844 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
845 {
846 int byte_block = (buf[2] >> 2) & 0x1;
847 int type = (buf[2] >> 4) & 0x1;
848 int xfer_unit;
849
850 if (byte_block) {
851 if (type) {
852 xfer_unit = dev->blocksize;
853 } else {
854 xfer_unit = 512;
855 }
856 } else {
857 xfer_unit = 1;
858 }
859
860 return xfer_unit;
861 }
862
863 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
864 {
865 int length = buf[2] & 0x3;
866 int xfer;
867 int unit = ata_passthrough_xfer_unit(dev, buf);
868
869 switch (length) {
870 case 0:
871 case 3: /* USB-specific. */
872 default:
873 xfer = 0;
874 break;
875 case 1:
876 xfer = buf[3];
877 break;
878 case 2:
879 xfer = buf[4];
880 break;
881 }
882
883 return xfer * unit;
884 }
885
886 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
887 {
888 int extend = buf[1] & 0x1;
889 int length = buf[2] & 0x3;
890 int xfer;
891 int unit = ata_passthrough_xfer_unit(dev, buf);
892
893 switch (length) {
894 case 0:
895 case 3: /* USB-specific. */
896 default:
897 xfer = 0;
898 break;
899 case 1:
900 xfer = buf[4];
901 xfer |= (extend ? buf[3] << 8 : 0);
902 break;
903 case 2:
904 xfer = buf[6];
905 xfer |= (extend ? buf[5] << 8 : 0);
906 break;
907 }
908
909 return xfer * unit;
910 }
911
912 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
913 {
914 cmd->xfer = scsi_cdb_xfer(buf);
915 switch (buf[0]) {
916 case TEST_UNIT_READY:
917 case REWIND:
918 case START_STOP:
919 case SET_CAPACITY:
920 case WRITE_FILEMARKS:
921 case WRITE_FILEMARKS_16:
922 case SPACE:
923 case RESERVE:
924 case RELEASE:
925 case ERASE:
926 case ALLOW_MEDIUM_REMOVAL:
927 case SEEK_10:
928 case SYNCHRONIZE_CACHE:
929 case SYNCHRONIZE_CACHE_16:
930 case LOCATE_16:
931 case LOCK_UNLOCK_CACHE:
932 case SET_CD_SPEED:
933 case SET_LIMITS:
934 case WRITE_LONG_10:
935 case UPDATE_BLOCK:
936 case RESERVE_TRACK:
937 case SET_READ_AHEAD:
938 case PRE_FETCH:
939 case PRE_FETCH_16:
940 case ALLOW_OVERWRITE:
941 cmd->xfer = 0;
942 break;
943 case VERIFY_10:
944 case VERIFY_12:
945 case VERIFY_16:
946 if ((buf[1] & 2) == 0) {
947 cmd->xfer = 0;
948 } else if ((buf[1] & 4) != 0) {
949 cmd->xfer = 1;
950 }
951 cmd->xfer *= dev->blocksize;
952 break;
953 case MODE_SENSE:
954 break;
955 case WRITE_SAME_10:
956 case WRITE_SAME_16:
957 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
958 break;
959 case READ_CAPACITY_10:
960 cmd->xfer = 8;
961 break;
962 case READ_BLOCK_LIMITS:
963 cmd->xfer = 6;
964 break;
965 case SEND_VOLUME_TAG:
966 /* GPCMD_SET_STREAMING from multimedia commands. */
967 if (dev->type == TYPE_ROM) {
968 cmd->xfer = buf[10] | (buf[9] << 8);
969 } else {
970 cmd->xfer = buf[9] | (buf[8] << 8);
971 }
972 break;
973 case WRITE_6:
974 /* length 0 means 256 blocks */
975 if (cmd->xfer == 0) {
976 cmd->xfer = 256;
977 }
978 /* fall through */
979 case WRITE_10:
980 case WRITE_VERIFY_10:
981 case WRITE_12:
982 case WRITE_VERIFY_12:
983 case WRITE_16:
984 case WRITE_VERIFY_16:
985 cmd->xfer *= dev->blocksize;
986 break;
987 case READ_6:
988 case READ_REVERSE:
989 /* length 0 means 256 blocks */
990 if (cmd->xfer == 0) {
991 cmd->xfer = 256;
992 }
993 /* fall through */
994 case READ_10:
995 case READ_12:
996 case READ_16:
997 cmd->xfer *= dev->blocksize;
998 break;
999 case FORMAT_UNIT:
1000 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1001 * for block devices are restricted to the header right now. */
1002 if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1003 cmd->xfer = 12;
1004 } else {
1005 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1006 }
1007 break;
1008 case INQUIRY:
1009 case RECEIVE_DIAGNOSTIC:
1010 case SEND_DIAGNOSTIC:
1011 cmd->xfer = buf[4] | (buf[3] << 8);
1012 break;
1013 case READ_CD:
1014 case READ_BUFFER:
1015 case WRITE_BUFFER:
1016 case SEND_CUE_SHEET:
1017 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1018 break;
1019 case PERSISTENT_RESERVE_OUT:
1020 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1021 break;
1022 case ERASE_12:
1023 if (dev->type == TYPE_ROM) {
1024 /* MMC command GET PERFORMANCE. */
1025 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1026 buf[10], buf[1] & 0x1f);
1027 }
1028 break;
1029 case MECHANISM_STATUS:
1030 case READ_DVD_STRUCTURE:
1031 case SEND_DVD_STRUCTURE:
1032 case MAINTENANCE_OUT:
1033 case MAINTENANCE_IN:
1034 if (dev->type == TYPE_ROM) {
1035 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1036 cmd->xfer = buf[9] | (buf[8] << 8);
1037 }
1038 break;
1039 case ATA_PASSTHROUGH_12:
1040 if (dev->type == TYPE_ROM) {
1041 /* BLANK command of MMC */
1042 cmd->xfer = 0;
1043 } else {
1044 cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1045 }
1046 break;
1047 case ATA_PASSTHROUGH_16:
1048 cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1049 break;
1050 }
1051 return 0;
1052 }
1053
1054 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1055 {
1056 switch (buf[0]) {
1057 /* stream commands */
1058 case ERASE_12:
1059 case ERASE_16:
1060 cmd->xfer = 0;
1061 break;
1062 case READ_6:
1063 case READ_REVERSE:
1064 case RECOVER_BUFFERED_DATA:
1065 case WRITE_6:
1066 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1067 if (buf[1] & 0x01) { /* fixed */
1068 cmd->xfer *= dev->blocksize;
1069 }
1070 break;
1071 case READ_16:
1072 case READ_REVERSE_16:
1073 case VERIFY_16:
1074 case WRITE_16:
1075 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1076 if (buf[1] & 0x01) { /* fixed */
1077 cmd->xfer *= dev->blocksize;
1078 }
1079 break;
1080 case REWIND:
1081 case LOAD_UNLOAD:
1082 cmd->xfer = 0;
1083 break;
1084 case SPACE_16:
1085 cmd->xfer = buf[13] | (buf[12] << 8);
1086 break;
1087 case READ_POSITION:
1088 switch (buf[1] & 0x1f) /* operation code */ {
1089 case SHORT_FORM_BLOCK_ID:
1090 case SHORT_FORM_VENDOR_SPECIFIC:
1091 cmd->xfer = 20;
1092 break;
1093 case LONG_FORM:
1094 cmd->xfer = 32;
1095 break;
1096 case EXTENDED_FORM:
1097 cmd->xfer = buf[8] | (buf[7] << 8);
1098 break;
1099 default:
1100 return -1;
1101 }
1102
1103 break;
1104 case FORMAT_UNIT:
1105 cmd->xfer = buf[4] | (buf[3] << 8);
1106 break;
1107 /* generic commands */
1108 default:
1109 return scsi_req_xfer(cmd, dev, buf);
1110 }
1111 return 0;
1112 }
1113
1114 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1115 {
1116 switch (buf[0]) {
1117 /* medium changer commands */
1118 case EXCHANGE_MEDIUM:
1119 case INITIALIZE_ELEMENT_STATUS:
1120 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1121 case MOVE_MEDIUM:
1122 case POSITION_TO_ELEMENT:
1123 cmd->xfer = 0;
1124 break;
1125 case READ_ELEMENT_STATUS:
1126 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1127 break;
1128
1129 /* generic commands */
1130 default:
1131 return scsi_req_xfer(cmd, dev, buf);
1132 }
1133 return 0;
1134 }
1135
1136 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1137 {
1138 switch (buf[0]) {
1139 /* Scanner commands */
1140 case OBJECT_POSITION:
1141 cmd->xfer = 0;
1142 break;
1143 case SCAN:
1144 cmd->xfer = buf[4];
1145 break;
1146 case READ_10:
1147 case SEND:
1148 case GET_WINDOW:
1149 case SET_WINDOW:
1150 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1151 break;
1152 default:
1153 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1154 return scsi_req_xfer(cmd, dev, buf);
1155 }
1156
1157 return 0;
1158 }
1159
1160 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1161 {
1162 if (!cmd->xfer) {
1163 cmd->mode = SCSI_XFER_NONE;
1164 return;
1165 }
1166 switch (cmd->buf[0]) {
1167 case WRITE_6:
1168 case WRITE_10:
1169 case WRITE_VERIFY_10:
1170 case WRITE_12:
1171 case WRITE_VERIFY_12:
1172 case WRITE_16:
1173 case WRITE_VERIFY_16:
1174 case VERIFY_10:
1175 case VERIFY_12:
1176 case VERIFY_16:
1177 case COPY:
1178 case COPY_VERIFY:
1179 case COMPARE:
1180 case CHANGE_DEFINITION:
1181 case LOG_SELECT:
1182 case MODE_SELECT:
1183 case MODE_SELECT_10:
1184 case SEND_DIAGNOSTIC:
1185 case WRITE_BUFFER:
1186 case FORMAT_UNIT:
1187 case REASSIGN_BLOCKS:
1188 case SEARCH_EQUAL:
1189 case SEARCH_HIGH:
1190 case SEARCH_LOW:
1191 case UPDATE_BLOCK:
1192 case WRITE_LONG_10:
1193 case WRITE_SAME_10:
1194 case WRITE_SAME_16:
1195 case UNMAP:
1196 case SEARCH_HIGH_12:
1197 case SEARCH_EQUAL_12:
1198 case SEARCH_LOW_12:
1199 case MEDIUM_SCAN:
1200 case SEND_VOLUME_TAG:
1201 case SEND_CUE_SHEET:
1202 case SEND_DVD_STRUCTURE:
1203 case PERSISTENT_RESERVE_OUT:
1204 case MAINTENANCE_OUT:
1205 case SET_WINDOW:
1206 case SCAN:
1207 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1208 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1209 */
1210 cmd->mode = SCSI_XFER_TO_DEV;
1211 break;
1212 case ATA_PASSTHROUGH_12:
1213 case ATA_PASSTHROUGH_16:
1214 /* T_DIR */
1215 cmd->mode = (cmd->buf[2] & 0x8) ?
1216 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1217 break;
1218 default:
1219 cmd->mode = SCSI_XFER_FROM_DEV;
1220 break;
1221 }
1222 }
1223
1224 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf)
1225 {
1226 int rc;
1227 int len;
1228
1229 cmd->lba = -1;
1230 len = scsi_cdb_length(buf);
1231 if (len < 0) {
1232 return -1;
1233 }
1234
1235 cmd->len = len;
1236 switch (dev->type) {
1237 case TYPE_TAPE:
1238 rc = scsi_req_stream_xfer(cmd, dev, buf);
1239 break;
1240 case TYPE_MEDIUM_CHANGER:
1241 rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1242 break;
1243 case TYPE_SCANNER:
1244 rc = scsi_req_scanner_length(cmd, dev, buf);
1245 break;
1246 default:
1247 rc = scsi_req_xfer(cmd, dev, buf);
1248 break;
1249 }
1250
1251 if (rc != 0)
1252 return rc;
1253
1254 memcpy(cmd->buf, buf, cmd->len);
1255 scsi_cmd_xfer_mode(cmd);
1256 cmd->lba = scsi_cmd_lba(cmd);
1257 return 0;
1258 }
1259
1260 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1261 {
1262 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1263
1264 scsi_device_set_ua(dev, sense);
1265 if (bus->info->change) {
1266 bus->info->change(bus, dev, sense);
1267 }
1268 }
1269
1270 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1271 {
1272 assert(req->refcount > 0);
1273 req->refcount++;
1274 return req;
1275 }
1276
1277 void scsi_req_unref(SCSIRequest *req)
1278 {
1279 assert(req->refcount > 0);
1280 if (--req->refcount == 0) {
1281 BusState *qbus = req->dev->qdev.parent_bus;
1282 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1283
1284 if (bus->info->free_request && req->hba_private) {
1285 bus->info->free_request(bus, req->hba_private);
1286 }
1287 if (req->ops->free_req) {
1288 req->ops->free_req(req);
1289 }
1290 object_unref(OBJECT(req->dev));
1291 object_unref(OBJECT(qbus->parent));
1292 g_free(req);
1293 }
1294 }
1295
1296 /* Tell the device that we finished processing this chunk of I/O. It
1297 will start the next chunk or complete the command. */
1298 void scsi_req_continue(SCSIRequest *req)
1299 {
1300 if (req->io_canceled) {
1301 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1302 return;
1303 }
1304 trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1305 if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1306 req->ops->write_data(req);
1307 } else {
1308 req->ops->read_data(req);
1309 }
1310 }
1311
1312 /* Called by the devices when data is ready for the HBA. The HBA should
1313 start a DMA operation to read or fill the device's data buffer.
1314 Once it completes, calling scsi_req_continue will restart I/O. */
1315 void scsi_req_data(SCSIRequest *req, int len)
1316 {
1317 uint8_t *buf;
1318 if (req->io_canceled) {
1319 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1320 return;
1321 }
1322 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1323 assert(req->cmd.mode != SCSI_XFER_NONE);
1324 if (!req->sg) {
1325 req->resid -= len;
1326 req->bus->info->transfer_data(req, len);
1327 return;
1328 }
1329
1330 /* If the device calls scsi_req_data and the HBA specified a
1331 * scatter/gather list, the transfer has to happen in a single
1332 * step. */
1333 assert(!req->dma_started);
1334 req->dma_started = true;
1335
1336 buf = scsi_req_get_buf(req);
1337 if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1338 req->resid = dma_buf_read(buf, len, req->sg);
1339 } else {
1340 req->resid = dma_buf_write(buf, len, req->sg);
1341 }
1342 scsi_req_continue(req);
1343 }
1344
1345 void scsi_req_print(SCSIRequest *req)
1346 {
1347 FILE *fp = stderr;
1348 int i;
1349
1350 fprintf(fp, "[%s id=%d] %s",
1351 req->dev->qdev.parent_bus->name,
1352 req->dev->id,
1353 scsi_command_name(req->cmd.buf[0]));
1354 for (i = 1; i < req->cmd.len; i++) {
1355 fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1356 }
1357 switch (req->cmd.mode) {
1358 case SCSI_XFER_NONE:
1359 fprintf(fp, " - none\n");
1360 break;
1361 case SCSI_XFER_FROM_DEV:
1362 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1363 break;
1364 case SCSI_XFER_TO_DEV:
1365 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1366 break;
1367 default:
1368 fprintf(fp, " - Oops\n");
1369 break;
1370 }
1371 }
1372
1373 void scsi_req_complete(SCSIRequest *req, int status)
1374 {
1375 assert(req->status == -1);
1376 req->status = status;
1377
1378 assert(req->sense_len <= sizeof(req->sense));
1379 if (status == GOOD) {
1380 req->sense_len = 0;
1381 }
1382
1383 if (req->sense_len) {
1384 memcpy(req->dev->sense, req->sense, req->sense_len);
1385 req->dev->sense_len = req->sense_len;
1386 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1387 } else {
1388 req->dev->sense_len = 0;
1389 req->dev->sense_is_ua = false;
1390 }
1391
1392 /*
1393 * Unit attention state is now stored in the device's sense buffer
1394 * if the HBA didn't do autosense. Clear the pending unit attention
1395 * flags.
1396 */
1397 scsi_clear_unit_attention(req);
1398
1399 scsi_req_ref(req);
1400 scsi_req_dequeue(req);
1401 req->bus->info->complete(req, req->status, req->resid);
1402
1403 /* Cancelled requests might end up being completed instead of cancelled */
1404 notifier_list_notify(&req->cancel_notifiers, req);
1405 scsi_req_unref(req);
1406 }
1407
1408 /* Called by the devices when the request is canceled. */
1409 void scsi_req_cancel_complete(SCSIRequest *req)
1410 {
1411 assert(req->io_canceled);
1412 if (req->bus->info->cancel) {
1413 req->bus->info->cancel(req);
1414 }
1415 notifier_list_notify(&req->cancel_notifiers, req);
1416 scsi_req_unref(req);
1417 }
1418
1419 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1420 * notifier list, the bus will be notified the requests cancellation is
1421 * completed.
1422 * */
1423 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1424 {
1425 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1426 if (notifier) {
1427 notifier_list_add(&req->cancel_notifiers, notifier);
1428 }
1429 if (req->io_canceled) {
1430 /* A blk_aio_cancel_async is pending; when it finishes,
1431 * scsi_req_cancel_complete will be called and will
1432 * call the notifier we just added. Just wait for that.
1433 */
1434 assert(req->aiocb);
1435 return;
1436 }
1437 /* Dropped in scsi_req_cancel_complete. */
1438 scsi_req_ref(req);
1439 scsi_req_dequeue(req);
1440 req->io_canceled = true;
1441 if (req->aiocb) {
1442 blk_aio_cancel_async(req->aiocb);
1443 } else {
1444 scsi_req_cancel_complete(req);
1445 }
1446 }
1447
1448 void scsi_req_cancel(SCSIRequest *req)
1449 {
1450 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1451 if (!req->enqueued) {
1452 return;
1453 }
1454 assert(!req->io_canceled);
1455 /* Dropped in scsi_req_cancel_complete. */
1456 scsi_req_ref(req);
1457 scsi_req_dequeue(req);
1458 req->io_canceled = true;
1459 if (req->aiocb) {
1460 blk_aio_cancel(req->aiocb);
1461 } else {
1462 scsi_req_cancel_complete(req);
1463 }
1464 }
1465
1466 static int scsi_ua_precedence(SCSISense sense)
1467 {
1468 if (sense.key != UNIT_ATTENTION) {
1469 return INT_MAX;
1470 }
1471 if (sense.asc == 0x29 && sense.ascq == 0x04) {
1472 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1473 return 1;
1474 } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1475 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1476 return 2;
1477 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1478 /* These two go with "all others". */
1479 ;
1480 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1481 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1482 * POWER ON OCCURRED = 1
1483 * SCSI BUS RESET OCCURRED = 2
1484 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1485 * I_T NEXUS LOSS OCCURRED = 7
1486 */
1487 return sense.ascq;
1488 } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1489 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1490 return 8;
1491 }
1492 return (sense.asc << 8) | sense.ascq;
1493 }
1494
1495 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1496 {
1497 int prec1, prec2;
1498 if (sense.key != UNIT_ATTENTION) {
1499 return;
1500 }
1501 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1502 sense.asc, sense.ascq);
1503
1504 /*
1505 * Override a pre-existing unit attention condition, except for a more
1506 * important reset condition.
1507 */
1508 prec1 = scsi_ua_precedence(sdev->unit_attention);
1509 prec2 = scsi_ua_precedence(sense);
1510 if (prec2 < prec1) {
1511 sdev->unit_attention = sense;
1512 }
1513 }
1514
1515 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1516 {
1517 SCSIRequest *req;
1518
1519 aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
1520 while (!QTAILQ_EMPTY(&sdev->requests)) {
1521 req = QTAILQ_FIRST(&sdev->requests);
1522 scsi_req_cancel_async(req, NULL);
1523 }
1524 blk_drain(sdev->conf.blk);
1525 aio_context_release(blk_get_aio_context(sdev->conf.blk));
1526 scsi_device_set_ua(sdev, sense);
1527 }
1528
1529 static char *scsibus_get_dev_path(DeviceState *dev)
1530 {
1531 SCSIDevice *d = SCSI_DEVICE(dev);
1532 DeviceState *hba = dev->parent_bus->parent;
1533 char *id;
1534 char *path;
1535
1536 id = qdev_get_dev_path(hba);
1537 if (id) {
1538 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1539 } else {
1540 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1541 }
1542 g_free(id);
1543 return path;
1544 }
1545
1546 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1547 {
1548 SCSIDevice *d = SCSI_DEVICE(dev);
1549 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1550 qdev_fw_name(dev), d->id, d->lun);
1551 }
1552
1553 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
1554 {
1555 BusChild *kid;
1556 SCSIDevice *target_dev = NULL;
1557
1558 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, sibling) {
1559 DeviceState *qdev = kid->child;
1560 SCSIDevice *dev = SCSI_DEVICE(qdev);
1561
1562 if (dev->channel == channel && dev->id == id) {
1563 if (dev->lun == lun) {
1564 return dev;
1565 }
1566 target_dev = dev;
1567 }
1568 }
1569 return target_dev;
1570 }
1571
1572 /* SCSI request list. For simplicity, pv points to the whole device */
1573
1574 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1575 const VMStateField *field, QJSON *vmdesc)
1576 {
1577 SCSIDevice *s = pv;
1578 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1579 SCSIRequest *req;
1580
1581 QTAILQ_FOREACH(req, &s->requests, next) {
1582 assert(!req->io_canceled);
1583 assert(req->status == -1);
1584 assert(req->enqueued);
1585
1586 qemu_put_sbyte(f, req->retry ? 1 : 2);
1587 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1588 qemu_put_be32s(f, &req->tag);
1589 qemu_put_be32s(f, &req->lun);
1590 if (bus->info->save_request) {
1591 bus->info->save_request(f, req);
1592 }
1593 if (req->ops->save_request) {
1594 req->ops->save_request(f, req);
1595 }
1596 }
1597 qemu_put_sbyte(f, 0);
1598
1599 return 0;
1600 }
1601
1602 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1603 const VMStateField *field)
1604 {
1605 SCSIDevice *s = pv;
1606 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1607 int8_t sbyte;
1608
1609 while ((sbyte = qemu_get_sbyte(f)) > 0) {
1610 uint8_t buf[SCSI_CMD_BUF_SIZE];
1611 uint32_t tag;
1612 uint32_t lun;
1613 SCSIRequest *req;
1614
1615 qemu_get_buffer(f, buf, sizeof(buf));
1616 qemu_get_be32s(f, &tag);
1617 qemu_get_be32s(f, &lun);
1618 req = scsi_req_new(s, tag, lun, buf, NULL);
1619 req->retry = (sbyte == 1);
1620 if (bus->info->load_request) {
1621 req->hba_private = bus->info->load_request(f, req);
1622 }
1623 if (req->ops->load_request) {
1624 req->ops->load_request(f, req);
1625 }
1626
1627 /* Just restart it later. */
1628 scsi_req_enqueue_internal(req);
1629
1630 /* At this point, the request will be kept alive by the reference
1631 * added by scsi_req_enqueue_internal, so we can release our reference.
1632 * The HBA of course will add its own reference in the load_request
1633 * callback if it needs to hold on the SCSIRequest.
1634 */
1635 scsi_req_unref(req);
1636 }
1637
1638 return 0;
1639 }
1640
1641 static const VMStateInfo vmstate_info_scsi_requests = {
1642 .name = "scsi-requests",
1643 .get = get_scsi_requests,
1644 .put = put_scsi_requests,
1645 };
1646
1647 static bool scsi_sense_state_needed(void *opaque)
1648 {
1649 SCSIDevice *s = opaque;
1650
1651 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1652 }
1653
1654 static const VMStateDescription vmstate_scsi_sense_state = {
1655 .name = "SCSIDevice/sense",
1656 .version_id = 1,
1657 .minimum_version_id = 1,
1658 .needed = scsi_sense_state_needed,
1659 .fields = (VMStateField[]) {
1660 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1661 SCSI_SENSE_BUF_SIZE_OLD,
1662 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1663 VMSTATE_END_OF_LIST()
1664 }
1665 };
1666
1667 const VMStateDescription vmstate_scsi_device = {
1668 .name = "SCSIDevice",
1669 .version_id = 1,
1670 .minimum_version_id = 1,
1671 .fields = (VMStateField[]) {
1672 VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1673 VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1674 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1675 VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1676 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1677 VMSTATE_UINT32(sense_len, SCSIDevice),
1678 {
1679 .name = "requests",
1680 .version_id = 0,
1681 .field_exists = NULL,
1682 .size = 0, /* ouch */
1683 .info = &vmstate_info_scsi_requests,
1684 .flags = VMS_SINGLE,
1685 .offset = 0,
1686 },
1687 VMSTATE_END_OF_LIST()
1688 },
1689 .subsections = (const VMStateDescription*[]) {
1690 &vmstate_scsi_sense_state,
1691 NULL
1692 }
1693 };
1694
1695 static void scsi_device_class_init(ObjectClass *klass, void *data)
1696 {
1697 DeviceClass *k = DEVICE_CLASS(klass);
1698 set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1699 k->bus_type = TYPE_SCSI_BUS;
1700 k->realize = scsi_qdev_realize;
1701 k->unrealize = scsi_qdev_unrealize;
1702 k->props = scsi_props;
1703 }
1704
1705 static void scsi_dev_instance_init(Object *obj)
1706 {
1707 DeviceState *dev = DEVICE(obj);
1708 SCSIDevice *s = SCSI_DEVICE(dev);
1709
1710 device_add_bootindex_property(obj, &s->conf.bootindex,
1711 "bootindex", NULL,
1712 &s->qdev, NULL);
1713 }
1714
1715 static const TypeInfo scsi_device_type_info = {
1716 .name = TYPE_SCSI_DEVICE,
1717 .parent = TYPE_DEVICE,
1718 .instance_size = sizeof(SCSIDevice),
1719 .abstract = true,
1720 .class_size = sizeof(SCSIDeviceClass),
1721 .class_init = scsi_device_class_init,
1722 .instance_init = scsi_dev_instance_init,
1723 };
1724
1725 static void scsi_register_types(void)
1726 {
1727 type_register_static(&scsi_bus_info);
1728 type_register_static(&scsi_device_type_info);
1729 }
1730
1731 type_init(scsi_register_types)