]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-generic.c
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190205' into...
[mirror_qemu.git] / hw / scsi / scsi-generic.c
1 /*
2 * Generic SCSI Device support
3 *
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
7 *
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 * This code is licensed under the LGPL.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
17 #include "qemu/error-report.h"
18 #include "hw/scsi/scsi.h"
19 #include "hw/scsi/emulation.h"
20 #include "sysemu/block-backend.h"
21
22 #ifdef __linux__
23
24 //#define DEBUG_SCSI
25
26 #ifdef DEBUG_SCSI
27 #define DPRINTF(fmt, ...) \
28 do { printf("scsi-generic: " fmt , ## __VA_ARGS__); } while (0)
29 #else
30 #define DPRINTF(fmt, ...) do {} while(0)
31 #endif
32
33 #define BADF(fmt, ...) \
34 do { fprintf(stderr, "scsi-generic: " fmt , ## __VA_ARGS__); } while (0)
35
36 #include <scsi/sg.h>
37 #include "scsi/constants.h"
38
39 #ifndef MAX_UINT
40 #define MAX_UINT ((unsigned int)-1)
41 #endif
42
43 typedef struct SCSIGenericReq {
44 SCSIRequest req;
45 uint8_t *buf;
46 int buflen;
47 int len;
48 sg_io_hdr_t io_header;
49 } SCSIGenericReq;
50
51 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
52 {
53 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
54
55 qemu_put_sbe32s(f, &r->buflen);
56 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
57 assert(!r->req.sg);
58 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
59 }
60 }
61
62 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
63 {
64 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
65
66 qemu_get_sbe32s(f, &r->buflen);
67 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
68 assert(!r->req.sg);
69 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
70 }
71 }
72
73 static void scsi_free_request(SCSIRequest *req)
74 {
75 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
76
77 g_free(r->buf);
78 }
79
80 /* Helper function for command completion. */
81 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
82 {
83 int status;
84 SCSISense sense;
85
86 assert(r->req.aiocb == NULL);
87
88 if (r->req.io_canceled) {
89 scsi_req_cancel_complete(&r->req);
90 goto done;
91 }
92 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense);
93 if (status == CHECK_CONDITION) {
94 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
95 r->req.sense_len = r->io_header.sb_len_wr;
96 } else {
97 scsi_req_build_sense(&r->req, sense);
98 }
99 }
100
101 DPRINTF("Command complete 0x%p tag=0x%x status=%d\n",
102 r, r->req.tag, status);
103
104 scsi_req_complete(&r->req, status);
105 done:
106 scsi_req_unref(&r->req);
107 }
108
109 static void scsi_command_complete(void *opaque, int ret)
110 {
111 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
112 SCSIDevice *s = r->req.dev;
113
114 assert(r->req.aiocb != NULL);
115 r->req.aiocb = NULL;
116
117 aio_context_acquire(blk_get_aio_context(s->conf.blk));
118 scsi_command_complete_noio(r, ret);
119 aio_context_release(blk_get_aio_context(s->conf.blk));
120 }
121
122 static int execute_command(BlockBackend *blk,
123 SCSIGenericReq *r, int direction,
124 BlockCompletionFunc *complete)
125 {
126 r->io_header.interface_id = 'S';
127 r->io_header.dxfer_direction = direction;
128 r->io_header.dxferp = r->buf;
129 r->io_header.dxfer_len = r->buflen;
130 r->io_header.cmdp = r->req.cmd.buf;
131 r->io_header.cmd_len = r->req.cmd.len;
132 r->io_header.mx_sb_len = sizeof(r->req.sense);
133 r->io_header.sbp = r->req.sense;
134 r->io_header.timeout = MAX_UINT;
135 r->io_header.usr_ptr = r;
136 r->io_header.flags |= SG_FLAG_DIRECT_IO;
137
138 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
139 if (r->req.aiocb == NULL) {
140 return -EIO;
141 }
142
143 return 0;
144 }
145
146 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s)
147 {
148 uint8_t page, page_idx;
149
150 /*
151 * EVPD set to zero returns the standard INQUIRY data.
152 *
153 * Check if scsi_version is unset (-1) to avoid re-defining it
154 * each time an INQUIRY with standard data is received.
155 * scsi_version is initialized with -1 in scsi_generic_reset
156 * and scsi_disk_reset, making sure that we'll set the
157 * scsi_version after a reset. If the version field of the
158 * INQUIRY response somehow changes after a guest reboot,
159 * we'll be able to keep track of it.
160 *
161 * On SCSI-2 and older, first 3 bits of byte 2 is the
162 * ANSI-approved version, while on later versions the
163 * whole byte 2 contains the version. Check if we're dealing
164 * with a newer version and, in that case, assign the
165 * whole byte.
166 */
167 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
168 s->scsi_version = r->buf[2] & 0x07;
169 if (s->scsi_version > 2) {
170 s->scsi_version = r->buf[2];
171 }
172 }
173
174 if (s->type == TYPE_DISK && (r->req.cmd.buf[1] & 0x01)) {
175 page = r->req.cmd.buf[2];
176 if (page == 0xb0) {
177 uint32_t max_transfer =
178 blk_get_max_transfer(s->conf.blk) / s->blocksize;
179
180 assert(max_transfer);
181 stl_be_p(&r->buf[8], max_transfer);
182 /* Also take care of the opt xfer len. */
183 stl_be_p(&r->buf[12],
184 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
185 } else if (s->needs_vpd_bl_emulation && page == 0x00) {
186 /*
187 * Now we're capable of supplying the VPD Block Limits
188 * response if the hardware can't. Add it in the INQUIRY
189 * Supported VPD pages response in case we are using the
190 * emulation for this device.
191 *
192 * This way, the guest kernel will be aware of the support
193 * and will use it to proper setup the SCSI device.
194 *
195 * VPD page numbers must be sorted, so insert 0xb0 at the
196 * right place with an in-place insert. After the initialization
197 * part of the for loop is executed, the device response is
198 * at r[0] to r[page_idx - 1].
199 */
200 for (page_idx = lduw_be_p(r->buf + 2) + 4;
201 page_idx > 4 && r->buf[page_idx - 1] >= 0xb0;
202 page_idx--) {
203 if (page_idx < r->buflen) {
204 r->buf[page_idx] = r->buf[page_idx - 1];
205 }
206 }
207 r->buf[page_idx] = 0xb0;
208 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
209 }
210 }
211 }
212
213 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
214 {
215 int len;
216 uint8_t buf[64];
217
218 SCSIBlockLimits bl = {
219 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
220 };
221
222 memset(r->buf, 0, r->buflen);
223 stb_p(buf, s->type);
224 stb_p(buf + 1, 0xb0);
225 len = scsi_emulate_block_limits(buf + 4, &bl);
226 assert(len <= sizeof(buf) - 4);
227 stw_be_p(buf + 2, len);
228
229 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
230
231 r->io_header.sb_len_wr = 0;
232
233 /*
234 * We have valid contents in the reply buffer but the
235 * io_header can report a sense error coming from
236 * the hardware in scsi_command_complete_noio. Clean
237 * up the io_header to avoid reporting it.
238 */
239 r->io_header.driver_status = 0;
240 r->io_header.status = 0;
241
242 return r->buflen;
243 }
244
245 static void scsi_read_complete(void * opaque, int ret)
246 {
247 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
248 SCSIDevice *s = r->req.dev;
249 int len;
250
251 assert(r->req.aiocb != NULL);
252 r->req.aiocb = NULL;
253
254 aio_context_acquire(blk_get_aio_context(s->conf.blk));
255
256 if (ret || r->req.io_canceled) {
257 scsi_command_complete_noio(r, ret);
258 goto done;
259 }
260
261 len = r->io_header.dxfer_len - r->io_header.resid;
262 DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len);
263
264 r->len = -1;
265
266 /*
267 * Check if this is a VPD Block Limits request that
268 * resulted in sense error but would need emulation.
269 * In this case, emulate a valid VPD response.
270 */
271 if (s->needs_vpd_bl_emulation && ret == 0 &&
272 (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) &&
273 r->req.cmd.buf[0] == INQUIRY &&
274 (r->req.cmd.buf[1] & 0x01) &&
275 r->req.cmd.buf[2] == 0xb0) {
276 SCSISense sense =
277 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
278 if (sense.key == ILLEGAL_REQUEST) {
279 len = scsi_generic_emulate_block_limits(r, s);
280 /*
281 * No need to let scsi_read_complete go on and handle an
282 * INQUIRY VPD BL request we created manually.
283 */
284 goto req_complete;
285 }
286 }
287
288 if (len == 0) {
289 scsi_command_complete_noio(r, 0);
290 goto done;
291 }
292
293 /* Snoop READ CAPACITY output to set the blocksize. */
294 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
295 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
296 s->blocksize = ldl_be_p(&r->buf[4]);
297 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
298 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
299 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
300 s->blocksize = ldl_be_p(&r->buf[8]);
301 s->max_lba = ldq_be_p(&r->buf[0]);
302 }
303 blk_set_guest_block_size(s->conf.blk, s->blocksize);
304
305 /* Patch MODE SENSE device specific parameters if the BDS is opened
306 * readonly.
307 */
308 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) &&
309 blk_is_read_only(s->conf.blk) &&
310 (r->req.cmd.buf[0] == MODE_SENSE ||
311 r->req.cmd.buf[0] == MODE_SENSE_10) &&
312 (r->req.cmd.buf[1] & 0x8) == 0) {
313 if (r->req.cmd.buf[0] == MODE_SENSE) {
314 r->buf[2] |= 0x80;
315 } else {
316 r->buf[3] |= 0x80;
317 }
318 }
319 if (r->req.cmd.buf[0] == INQUIRY) {
320 scsi_handle_inquiry_reply(r, s);
321 }
322
323 req_complete:
324 scsi_req_data(&r->req, len);
325 scsi_req_unref(&r->req);
326
327 done:
328 aio_context_release(blk_get_aio_context(s->conf.blk));
329 }
330
331 /* Read more data from scsi device into buffer. */
332 static void scsi_read_data(SCSIRequest *req)
333 {
334 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
335 SCSIDevice *s = r->req.dev;
336 int ret;
337
338 DPRINTF("scsi_read_data tag=0x%x\n", req->tag);
339
340 /* The request is used as the AIO opaque value, so add a ref. */
341 scsi_req_ref(&r->req);
342 if (r->len == -1) {
343 scsi_command_complete_noio(r, 0);
344 return;
345 }
346
347 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
348 scsi_read_complete);
349 if (ret < 0) {
350 scsi_command_complete_noio(r, ret);
351 }
352 }
353
354 static void scsi_write_complete(void * opaque, int ret)
355 {
356 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
357 SCSIDevice *s = r->req.dev;
358
359 DPRINTF("scsi_write_complete() ret = %d\n", ret);
360
361 assert(r->req.aiocb != NULL);
362 r->req.aiocb = NULL;
363
364 aio_context_acquire(blk_get_aio_context(s->conf.blk));
365
366 if (ret || r->req.io_canceled) {
367 scsi_command_complete_noio(r, ret);
368 goto done;
369 }
370
371 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
372 s->type == TYPE_TAPE) {
373 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
374 DPRINTF("block size %d\n", s->blocksize);
375 }
376
377 scsi_command_complete_noio(r, ret);
378
379 done:
380 aio_context_release(blk_get_aio_context(s->conf.blk));
381 }
382
383 /* Write data to a scsi device. Returns nonzero on failure.
384 The transfer may complete asynchronously. */
385 static void scsi_write_data(SCSIRequest *req)
386 {
387 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
388 SCSIDevice *s = r->req.dev;
389 int ret;
390
391 DPRINTF("scsi_write_data tag=0x%x\n", req->tag);
392 if (r->len == 0) {
393 r->len = r->buflen;
394 scsi_req_data(&r->req, r->len);
395 return;
396 }
397
398 /* The request is used as the AIO opaque value, so add a ref. */
399 scsi_req_ref(&r->req);
400 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
401 if (ret < 0) {
402 scsi_command_complete_noio(r, ret);
403 }
404 }
405
406 /* Return a pointer to the data buffer. */
407 static uint8_t *scsi_get_buf(SCSIRequest *req)
408 {
409 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
410
411 return r->buf;
412 }
413
414 /* Execute a scsi command. Returns the length of the data expected by the
415 command. This will be Positive for data transfers from the device
416 (eg. disk reads), negative for transfers to the device (eg. disk writes),
417 and zero if the command does not transfer any data. */
418
419 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
420 {
421 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
422 SCSIDevice *s = r->req.dev;
423 int ret;
424
425 #ifdef DEBUG_SCSI
426 DPRINTF("Command: data=0x%02x", cmd[0]);
427 {
428 int i;
429 for (i = 1; i < r->req.cmd.len; i++) {
430 printf(" 0x%02x", cmd[i]);
431 }
432 printf("\n");
433 }
434 #endif
435
436 if (r->req.cmd.xfer == 0) {
437 g_free(r->buf);
438 r->buflen = 0;
439 r->buf = NULL;
440 /* The request is used as the AIO opaque value, so add a ref. */
441 scsi_req_ref(&r->req);
442 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
443 scsi_command_complete);
444 if (ret < 0) {
445 scsi_command_complete_noio(r, ret);
446 return 0;
447 }
448 return 0;
449 }
450
451 if (r->buflen != r->req.cmd.xfer) {
452 g_free(r->buf);
453 r->buf = g_malloc(r->req.cmd.xfer);
454 r->buflen = r->req.cmd.xfer;
455 }
456
457 memset(r->buf, 0, r->buflen);
458 r->len = r->req.cmd.xfer;
459 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
460 r->len = 0;
461 return -r->req.cmd.xfer;
462 } else {
463 return r->req.cmd.xfer;
464 }
465 }
466
467 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
468 {
469 int i;
470
471 if ((p[1] & 0xF) == 3) {
472 /* NAA designator type */
473 if (p[3] != 8) {
474 return -EINVAL;
475 }
476 *p_wwn = ldq_be_p(p + 4);
477 return 0;
478 }
479
480 if ((p[1] & 0xF) == 8) {
481 /* SCSI name string designator type */
482 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
483 return -EINVAL;
484 }
485 if (p[3] > 20 && p[24] != ',') {
486 return -EINVAL;
487 }
488 *p_wwn = 0;
489 for (i = 8; i < 24; i++) {
490 char c = qemu_toupper(p[i]);
491 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
492 *p_wwn = (*p_wwn << 4) | c;
493 }
494 return 0;
495 }
496
497 return -EINVAL;
498 }
499
500 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
501 uint8_t *buf, uint8_t buf_size)
502 {
503 sg_io_hdr_t io_header;
504 uint8_t sensebuf[8];
505 int ret;
506
507 memset(&io_header, 0, sizeof(io_header));
508 io_header.interface_id = 'S';
509 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
510 io_header.dxfer_len = buf_size;
511 io_header.dxferp = buf;
512 io_header.cmdp = cmd;
513 io_header.cmd_len = cmd_size;
514 io_header.mx_sb_len = sizeof(sensebuf);
515 io_header.sbp = sensebuf;
516 io_header.timeout = 6000; /* XXX */
517
518 ret = blk_ioctl(blk, SG_IO, &io_header);
519 if (ret < 0 || io_header.driver_status || io_header.host_status) {
520 return -1;
521 }
522 return 0;
523 }
524
525 /*
526 * Executes an INQUIRY request with EVPD set to retrieve the
527 * available VPD pages of the device. If the device does
528 * not support the Block Limits page (page 0xb0), set
529 * the needs_vpd_bl_emulation flag for future use.
530 */
531 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
532 {
533 uint8_t cmd[6];
534 uint8_t buf[250];
535 uint8_t page_len;
536 int ret, i;
537
538 memset(cmd, 0, sizeof(cmd));
539 memset(buf, 0, sizeof(buf));
540 cmd[0] = INQUIRY;
541 cmd[1] = 1;
542 cmd[2] = 0x00;
543 cmd[4] = sizeof(buf);
544
545 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
546 buf, sizeof(buf));
547 if (ret < 0) {
548 /*
549 * Do not assume anything if we can't retrieve the
550 * INQUIRY response to assert the VPD Block Limits
551 * support.
552 */
553 s->needs_vpd_bl_emulation = false;
554 return;
555 }
556
557 page_len = buf[3];
558 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
559 if (buf[i] == 0xb0) {
560 s->needs_vpd_bl_emulation = false;
561 return;
562 }
563 }
564 s->needs_vpd_bl_emulation = true;
565 }
566
567 static void scsi_generic_read_device_identification(SCSIDevice *s)
568 {
569 uint8_t cmd[6];
570 uint8_t buf[250];
571 int ret;
572 int i, len;
573
574 memset(cmd, 0, sizeof(cmd));
575 memset(buf, 0, sizeof(buf));
576 cmd[0] = INQUIRY;
577 cmd[1] = 1;
578 cmd[2] = 0x83;
579 cmd[4] = sizeof(buf);
580
581 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
582 buf, sizeof(buf));
583 if (ret < 0) {
584 return;
585 }
586
587 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
588 for (i = 0; i + 3 <= len; ) {
589 const uint8_t *p = &buf[i + 4];
590 uint64_t wwn;
591
592 if (i + (p[3] + 4) > len) {
593 break;
594 }
595
596 if ((p[1] & 0x10) == 0) {
597 /* Associated with the logical unit */
598 if (read_naa_id(p, &wwn) == 0) {
599 s->wwn = wwn;
600 }
601 } else if ((p[1] & 0x10) == 0x10) {
602 /* Associated with the target port */
603 if (read_naa_id(p, &wwn) == 0) {
604 s->port_wwn = wwn;
605 }
606 }
607
608 i += p[3] + 4;
609 }
610 }
611
612 void scsi_generic_read_device_inquiry(SCSIDevice *s)
613 {
614 scsi_generic_read_device_identification(s);
615 if (s->type == TYPE_DISK) {
616 scsi_generic_set_vpd_bl_emulation(s);
617 } else {
618 s->needs_vpd_bl_emulation = false;
619 }
620 }
621
622 static int get_stream_blocksize(BlockBackend *blk)
623 {
624 uint8_t cmd[6];
625 uint8_t buf[12];
626 int ret;
627
628 memset(cmd, 0, sizeof(cmd));
629 memset(buf, 0, sizeof(buf));
630 cmd[0] = MODE_SENSE;
631 cmd[4] = sizeof(buf);
632
633 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf));
634 if (ret < 0) {
635 return -1;
636 }
637
638 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
639 }
640
641 static void scsi_generic_reset(DeviceState *dev)
642 {
643 SCSIDevice *s = SCSI_DEVICE(dev);
644
645 s->scsi_version = s->default_scsi_version;
646 scsi_device_purge_requests(s, SENSE_CODE(RESET));
647 }
648
649 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
650 {
651 int rc;
652 int sg_version;
653 struct sg_scsi_id scsiid;
654
655 if (!s->conf.blk) {
656 error_setg(errp, "drive property not set");
657 return;
658 }
659
660 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) {
661 error_setg(errp, "Device doesn't support drive option werror");
662 return;
663 }
664 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
665 error_setg(errp, "Device doesn't support drive option rerror");
666 return;
667 }
668
669 /* check we are using a driver managing SG_IO (version 3 and after */
670 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
671 if (rc < 0) {
672 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
673 if (rc != -EPERM) {
674 error_append_hint(errp, "Is this a SCSI device?\n");
675 }
676 return;
677 }
678 if (sg_version < 30000) {
679 error_setg(errp, "scsi generic interface too old");
680 return;
681 }
682
683 /* get LUN of the /dev/sg? */
684 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
685 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
686 return;
687 }
688 if (!blkconf_apply_backend_options(&s->conf,
689 blk_is_read_only(s->conf.blk),
690 true, errp)) {
691 return;
692 }
693
694 /* define device state */
695 s->type = scsiid.scsi_type;
696 DPRINTF("device type %d\n", s->type);
697
698 switch (s->type) {
699 case TYPE_TAPE:
700 s->blocksize = get_stream_blocksize(s->conf.blk);
701 if (s->blocksize == -1) {
702 s->blocksize = 0;
703 }
704 break;
705
706 /* Make a guess for block devices, we'll fix it when the guest sends.
707 * READ CAPACITY. If they don't, they likely would assume these sizes
708 * anyway. (TODO: they could also send MODE SENSE).
709 */
710 case TYPE_ROM:
711 case TYPE_WORM:
712 s->blocksize = 2048;
713 break;
714 default:
715 s->blocksize = 512;
716 break;
717 }
718
719 DPRINTF("block size %d\n", s->blocksize);
720
721 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
722 s->default_scsi_version = -1;
723 scsi_generic_read_device_inquiry(s);
724 }
725
726 const SCSIReqOps scsi_generic_req_ops = {
727 .size = sizeof(SCSIGenericReq),
728 .free_req = scsi_free_request,
729 .send_command = scsi_send_command,
730 .read_data = scsi_read_data,
731 .write_data = scsi_write_data,
732 .get_buf = scsi_get_buf,
733 .load_request = scsi_generic_load_request,
734 .save_request = scsi_generic_save_request,
735 };
736
737 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
738 uint8_t *buf, void *hba_private)
739 {
740 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
741 }
742
743 static Property scsi_generic_properties[] = {
744 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
745 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
746 DEFINE_PROP_END_OF_LIST(),
747 };
748
749 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
750 uint8_t *buf, void *hba_private)
751 {
752 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
753 }
754
755 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
756 {
757 DeviceClass *dc = DEVICE_CLASS(klass);
758 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
759
760 sc->realize = scsi_generic_realize;
761 sc->alloc_req = scsi_new_request;
762 sc->parse_cdb = scsi_generic_parse_cdb;
763 dc->fw_name = "disk";
764 dc->desc = "pass through generic scsi device (/dev/sg*)";
765 dc->reset = scsi_generic_reset;
766 dc->props = scsi_generic_properties;
767 dc->vmsd = &vmstate_scsi_device;
768 }
769
770 static const TypeInfo scsi_generic_info = {
771 .name = "scsi-generic",
772 .parent = TYPE_SCSI_DEVICE,
773 .instance_size = sizeof(SCSIDevice),
774 .class_init = scsi_generic_class_initfn,
775 };
776
777 static void scsi_generic_register_types(void)
778 {
779 type_register_static(&scsi_generic_info);
780 }
781
782 type_init(scsi_generic_register_types)
783
784 #endif /* __linux__ */