]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-generic.c
hw/sh4/sh7750_regs: Replace link to license by its full content
[mirror_qemu.git] / hw / scsi / scsi-generic.c
1 /*
2 * Generic SCSI Device support
3 *
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
7 *
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 * This code is licensed under the LGPL.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/qdev-properties-system.h"
23 #include "hw/scsi/emulation.h"
24 #include "sysemu/block-backend.h"
25 #include "trace.h"
26
27 #ifdef __linux__
28
29 #include <scsi/sg.h>
30 #include "scsi/constants.h"
31
32 #ifndef MAX_UINT
33 #define MAX_UINT ((unsigned int)-1)
34 #endif
35
36 typedef struct SCSIGenericReq {
37 SCSIRequest req;
38 uint8_t *buf;
39 int buflen;
40 int len;
41 sg_io_hdr_t io_header;
42 } SCSIGenericReq;
43
44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
45 {
46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
47
48 qemu_put_sbe32s(f, &r->buflen);
49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
50 assert(!r->req.sg);
51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
52 }
53 }
54
55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
56 {
57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
58
59 qemu_get_sbe32s(f, &r->buflen);
60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
61 assert(!r->req.sg);
62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
63 }
64 }
65
66 static void scsi_free_request(SCSIRequest *req)
67 {
68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
69
70 g_free(r->buf);
71 }
72
73 /* Helper function for command completion. */
74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
75 {
76 int status;
77 SCSISense sense;
78
79 assert(r->req.aiocb == NULL);
80
81 if (r->req.io_canceled) {
82 scsi_req_cancel_complete(&r->req);
83 goto done;
84 }
85 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense);
86 if (status == CHECK_CONDITION) {
87 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
88 r->req.sense_len = r->io_header.sb_len_wr;
89 } else {
90 scsi_req_build_sense(&r->req, sense);
91 }
92 }
93
94 trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
95
96 scsi_req_complete(&r->req, status);
97 done:
98 scsi_req_unref(&r->req);
99 }
100
101 static void scsi_command_complete(void *opaque, int ret)
102 {
103 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
104 SCSIDevice *s = r->req.dev;
105
106 assert(r->req.aiocb != NULL);
107 r->req.aiocb = NULL;
108
109 aio_context_acquire(blk_get_aio_context(s->conf.blk));
110 scsi_command_complete_noio(r, ret);
111 aio_context_release(blk_get_aio_context(s->conf.blk));
112 }
113
114 static int execute_command(BlockBackend *blk,
115 SCSIGenericReq *r, int direction,
116 BlockCompletionFunc *complete)
117 {
118 SCSIDevice *s = r->req.dev;
119
120 r->io_header.interface_id = 'S';
121 r->io_header.dxfer_direction = direction;
122 r->io_header.dxferp = r->buf;
123 r->io_header.dxfer_len = r->buflen;
124 r->io_header.cmdp = r->req.cmd.buf;
125 r->io_header.cmd_len = r->req.cmd.len;
126 r->io_header.mx_sb_len = sizeof(r->req.sense);
127 r->io_header.sbp = r->req.sense;
128 r->io_header.timeout = s->io_timeout * 1000;
129 r->io_header.usr_ptr = r;
130 r->io_header.flags |= SG_FLAG_DIRECT_IO;
131
132 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0],
133 r->io_header.timeout);
134 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
135 if (r->req.aiocb == NULL) {
136 return -EIO;
137 }
138
139 return 0;
140 }
141
142 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s)
143 {
144 uint8_t page, page_idx;
145
146 /*
147 * EVPD set to zero returns the standard INQUIRY data.
148 *
149 * Check if scsi_version is unset (-1) to avoid re-defining it
150 * each time an INQUIRY with standard data is received.
151 * scsi_version is initialized with -1 in scsi_generic_reset
152 * and scsi_disk_reset, making sure that we'll set the
153 * scsi_version after a reset. If the version field of the
154 * INQUIRY response somehow changes after a guest reboot,
155 * we'll be able to keep track of it.
156 *
157 * On SCSI-2 and older, first 3 bits of byte 2 is the
158 * ANSI-approved version, while on later versions the
159 * whole byte 2 contains the version. Check if we're dealing
160 * with a newer version and, in that case, assign the
161 * whole byte.
162 */
163 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
164 s->scsi_version = r->buf[2] & 0x07;
165 if (s->scsi_version > 2) {
166 s->scsi_version = r->buf[2];
167 }
168 }
169
170 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
171 (r->req.cmd.buf[1] & 0x01)) {
172 page = r->req.cmd.buf[2];
173 if (page == 0xb0) {
174 uint32_t max_transfer =
175 blk_get_max_transfer(s->conf.blk) / s->blocksize;
176
177 assert(max_transfer);
178 stl_be_p(&r->buf[8], max_transfer);
179 /* Also take care of the opt xfer len. */
180 stl_be_p(&r->buf[12],
181 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
182 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
183 /*
184 * Now we're capable of supplying the VPD Block Limits
185 * response if the hardware can't. Add it in the INQUIRY
186 * Supported VPD pages response in case we are using the
187 * emulation for this device.
188 *
189 * This way, the guest kernel will be aware of the support
190 * and will use it to proper setup the SCSI device.
191 *
192 * VPD page numbers must be sorted, so insert 0xb0 at the
193 * right place with an in-place insert. When the while loop
194 * begins the device response is at r[0] to r[page_idx - 1].
195 */
196 page_idx = lduw_be_p(r->buf + 2) + 4;
197 page_idx = MIN(page_idx, r->buflen);
198 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
199 if (page_idx < r->buflen) {
200 r->buf[page_idx] = r->buf[page_idx - 1];
201 }
202 page_idx--;
203 }
204 if (page_idx < r->buflen) {
205 r->buf[page_idx] = 0xb0;
206 }
207 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
208 }
209 }
210 }
211
212 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
213 {
214 int len;
215 uint8_t buf[64];
216
217 SCSIBlockLimits bl = {
218 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
219 };
220
221 memset(r->buf, 0, r->buflen);
222 stb_p(buf, s->type);
223 stb_p(buf + 1, 0xb0);
224 len = scsi_emulate_block_limits(buf + 4, &bl);
225 assert(len <= sizeof(buf) - 4);
226 stw_be_p(buf + 2, len);
227
228 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
229
230 r->io_header.sb_len_wr = 0;
231
232 /*
233 * We have valid contents in the reply buffer but the
234 * io_header can report a sense error coming from
235 * the hardware in scsi_command_complete_noio. Clean
236 * up the io_header to avoid reporting it.
237 */
238 r->io_header.driver_status = 0;
239 r->io_header.status = 0;
240
241 return r->buflen;
242 }
243
244 static void scsi_read_complete(void * opaque, int ret)
245 {
246 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
247 SCSIDevice *s = r->req.dev;
248 int len;
249
250 assert(r->req.aiocb != NULL);
251 r->req.aiocb = NULL;
252
253 aio_context_acquire(blk_get_aio_context(s->conf.blk));
254
255 if (ret || r->req.io_canceled) {
256 scsi_command_complete_noio(r, ret);
257 goto done;
258 }
259
260 len = r->io_header.dxfer_len - r->io_header.resid;
261 trace_scsi_generic_read_complete(r->req.tag, len);
262
263 r->len = -1;
264
265 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
266 SCSISense sense =
267 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
268
269 /*
270 * Check if this is a VPD Block Limits request that
271 * resulted in sense error but would need emulation.
272 * In this case, emulate a valid VPD response.
273 */
274 if (sense.key == ILLEGAL_REQUEST &&
275 s->needs_vpd_bl_emulation &&
276 r->req.cmd.buf[0] == INQUIRY &&
277 (r->req.cmd.buf[1] & 0x01) &&
278 r->req.cmd.buf[2] == 0xb0) {
279 len = scsi_generic_emulate_block_limits(r, s);
280 /*
281 * It's okay to jup to req_complete: no need to
282 * let scsi_handle_inquiry_reply handle an
283 * INQUIRY VPD BL request we created manually.
284 */
285 }
286 if (sense.key) {
287 goto req_complete;
288 }
289 }
290
291 if (len == 0) {
292 scsi_command_complete_noio(r, 0);
293 goto done;
294 }
295
296 /* Snoop READ CAPACITY output to set the blocksize. */
297 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
298 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
299 s->blocksize = ldl_be_p(&r->buf[4]);
300 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
301 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
302 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
303 s->blocksize = ldl_be_p(&r->buf[8]);
304 s->max_lba = ldq_be_p(&r->buf[0]);
305 }
306 blk_set_guest_block_size(s->conf.blk, s->blocksize);
307
308 /*
309 * Patch MODE SENSE device specific parameters if the BDS is opened
310 * readonly.
311 */
312 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
313 !blk_is_writable(s->conf.blk) &&
314 (r->req.cmd.buf[0] == MODE_SENSE ||
315 r->req.cmd.buf[0] == MODE_SENSE_10) &&
316 (r->req.cmd.buf[1] & 0x8) == 0) {
317 if (r->req.cmd.buf[0] == MODE_SENSE) {
318 r->buf[2] |= 0x80;
319 } else {
320 r->buf[3] |= 0x80;
321 }
322 }
323 if (r->req.cmd.buf[0] == INQUIRY) {
324 scsi_handle_inquiry_reply(r, s);
325 }
326
327 req_complete:
328 scsi_req_data(&r->req, len);
329 scsi_req_unref(&r->req);
330
331 done:
332 aio_context_release(blk_get_aio_context(s->conf.blk));
333 }
334
335 /* Read more data from scsi device into buffer. */
336 static void scsi_read_data(SCSIRequest *req)
337 {
338 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
339 SCSIDevice *s = r->req.dev;
340 int ret;
341
342 trace_scsi_generic_read_data(req->tag);
343
344 /* The request is used as the AIO opaque value, so add a ref. */
345 scsi_req_ref(&r->req);
346 if (r->len == -1) {
347 scsi_command_complete_noio(r, 0);
348 return;
349 }
350
351 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
352 scsi_read_complete);
353 if (ret < 0) {
354 scsi_command_complete_noio(r, ret);
355 }
356 }
357
358 static void scsi_write_complete(void * opaque, int ret)
359 {
360 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
361 SCSIDevice *s = r->req.dev;
362
363 trace_scsi_generic_write_complete(ret);
364
365 assert(r->req.aiocb != NULL);
366 r->req.aiocb = NULL;
367
368 aio_context_acquire(blk_get_aio_context(s->conf.blk));
369
370 if (ret || r->req.io_canceled) {
371 scsi_command_complete_noio(r, ret);
372 goto done;
373 }
374
375 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
376 s->type == TYPE_TAPE) {
377 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
378 trace_scsi_generic_write_complete_blocksize(s->blocksize);
379 }
380
381 scsi_command_complete_noio(r, ret);
382
383 done:
384 aio_context_release(blk_get_aio_context(s->conf.blk));
385 }
386
387 /* Write data to a scsi device. Returns nonzero on failure.
388 The transfer may complete asynchronously. */
389 static void scsi_write_data(SCSIRequest *req)
390 {
391 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
392 SCSIDevice *s = r->req.dev;
393 int ret;
394
395 trace_scsi_generic_write_data(req->tag);
396 if (r->len == 0) {
397 r->len = r->buflen;
398 scsi_req_data(&r->req, r->len);
399 return;
400 }
401
402 /* The request is used as the AIO opaque value, so add a ref. */
403 scsi_req_ref(&r->req);
404 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
405 if (ret < 0) {
406 scsi_command_complete_noio(r, ret);
407 }
408 }
409
410 /* Return a pointer to the data buffer. */
411 static uint8_t *scsi_get_buf(SCSIRequest *req)
412 {
413 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
414
415 return r->buf;
416 }
417
418 static void scsi_generic_command_dump(uint8_t *cmd, int len)
419 {
420 int i;
421 char *line_buffer, *p;
422
423 line_buffer = g_malloc(len * 5 + 1);
424
425 for (i = 0, p = line_buffer; i < len; i++) {
426 p += sprintf(p, " 0x%02x", cmd[i]);
427 }
428 trace_scsi_generic_send_command(line_buffer);
429
430 g_free(line_buffer);
431 }
432
433 /* Execute a scsi command. Returns the length of the data expected by the
434 command. This will be Positive for data transfers from the device
435 (eg. disk reads), negative for transfers to the device (eg. disk writes),
436 and zero if the command does not transfer any data. */
437
438 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
439 {
440 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
441 SCSIDevice *s = r->req.dev;
442 int ret;
443
444 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
445 scsi_generic_command_dump(cmd, r->req.cmd.len);
446 }
447
448 if (r->req.cmd.xfer == 0) {
449 g_free(r->buf);
450 r->buflen = 0;
451 r->buf = NULL;
452 /* The request is used as the AIO opaque value, so add a ref. */
453 scsi_req_ref(&r->req);
454 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
455 scsi_command_complete);
456 if (ret < 0) {
457 scsi_command_complete_noio(r, ret);
458 return 0;
459 }
460 return 0;
461 }
462
463 if (r->buflen != r->req.cmd.xfer) {
464 g_free(r->buf);
465 r->buf = g_malloc(r->req.cmd.xfer);
466 r->buflen = r->req.cmd.xfer;
467 }
468
469 memset(r->buf, 0, r->buflen);
470 r->len = r->req.cmd.xfer;
471 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
472 r->len = 0;
473 return -r->req.cmd.xfer;
474 } else {
475 return r->req.cmd.xfer;
476 }
477 }
478
479 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
480 {
481 int i;
482
483 if ((p[1] & 0xF) == 3) {
484 /* NAA designator type */
485 if (p[3] != 8) {
486 return -EINVAL;
487 }
488 *p_wwn = ldq_be_p(p + 4);
489 return 0;
490 }
491
492 if ((p[1] & 0xF) == 8) {
493 /* SCSI name string designator type */
494 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
495 return -EINVAL;
496 }
497 if (p[3] > 20 && p[24] != ',') {
498 return -EINVAL;
499 }
500 *p_wwn = 0;
501 for (i = 8; i < 24; i++) {
502 char c = qemu_toupper(p[i]);
503 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
504 *p_wwn = (*p_wwn << 4) | c;
505 }
506 return 0;
507 }
508
509 return -EINVAL;
510 }
511
512 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
513 uint8_t *buf, uint8_t buf_size, uint32_t timeout)
514 {
515 sg_io_hdr_t io_header;
516 uint8_t sensebuf[8];
517 int ret;
518
519 memset(&io_header, 0, sizeof(io_header));
520 io_header.interface_id = 'S';
521 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
522 io_header.dxfer_len = buf_size;
523 io_header.dxferp = buf;
524 io_header.cmdp = cmd;
525 io_header.cmd_len = cmd_size;
526 io_header.mx_sb_len = sizeof(sensebuf);
527 io_header.sbp = sensebuf;
528 io_header.timeout = timeout * 1000;
529
530 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout);
531 ret = blk_ioctl(blk, SG_IO, &io_header);
532 if (ret < 0 || io_header.status ||
533 io_header.driver_status || io_header.host_status) {
534 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status,
535 io_header.host_status);
536 return -1;
537 }
538 return 0;
539 }
540
541 /*
542 * Executes an INQUIRY request with EVPD set to retrieve the
543 * available VPD pages of the device. If the device does
544 * not support the Block Limits page (page 0xb0), set
545 * the needs_vpd_bl_emulation flag for future use.
546 */
547 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
548 {
549 uint8_t cmd[6];
550 uint8_t buf[250];
551 uint8_t page_len;
552 int ret, i;
553
554 memset(cmd, 0, sizeof(cmd));
555 memset(buf, 0, sizeof(buf));
556 cmd[0] = INQUIRY;
557 cmd[1] = 1;
558 cmd[2] = 0x00;
559 cmd[4] = sizeof(buf);
560
561 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
562 buf, sizeof(buf), s->io_timeout);
563 if (ret < 0) {
564 /*
565 * Do not assume anything if we can't retrieve the
566 * INQUIRY response to assert the VPD Block Limits
567 * support.
568 */
569 s->needs_vpd_bl_emulation = false;
570 return;
571 }
572
573 page_len = buf[3];
574 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
575 if (buf[i] == 0xb0) {
576 s->needs_vpd_bl_emulation = false;
577 return;
578 }
579 }
580 s->needs_vpd_bl_emulation = true;
581 }
582
583 static void scsi_generic_read_device_identification(SCSIDevice *s)
584 {
585 uint8_t cmd[6];
586 uint8_t buf[250];
587 int ret;
588 int i, len;
589
590 memset(cmd, 0, sizeof(cmd));
591 memset(buf, 0, sizeof(buf));
592 cmd[0] = INQUIRY;
593 cmd[1] = 1;
594 cmd[2] = 0x83;
595 cmd[4] = sizeof(buf);
596
597 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
598 buf, sizeof(buf), s->io_timeout);
599 if (ret < 0) {
600 return;
601 }
602
603 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
604 for (i = 0; i + 3 <= len; ) {
605 const uint8_t *p = &buf[i + 4];
606 uint64_t wwn;
607
608 if (i + (p[3] + 4) > len) {
609 break;
610 }
611
612 if ((p[1] & 0x10) == 0) {
613 /* Associated with the logical unit */
614 if (read_naa_id(p, &wwn) == 0) {
615 s->wwn = wwn;
616 }
617 } else if ((p[1] & 0x10) == 0x10) {
618 /* Associated with the target port */
619 if (read_naa_id(p, &wwn) == 0) {
620 s->port_wwn = wwn;
621 }
622 }
623
624 i += p[3] + 4;
625 }
626 }
627
628 void scsi_generic_read_device_inquiry(SCSIDevice *s)
629 {
630 scsi_generic_read_device_identification(s);
631 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
632 scsi_generic_set_vpd_bl_emulation(s);
633 } else {
634 s->needs_vpd_bl_emulation = false;
635 }
636 }
637
638 static int get_stream_blocksize(BlockBackend *blk)
639 {
640 uint8_t cmd[6];
641 uint8_t buf[12];
642 int ret;
643
644 memset(cmd, 0, sizeof(cmd));
645 memset(buf, 0, sizeof(buf));
646 cmd[0] = MODE_SENSE;
647 cmd[4] = sizeof(buf);
648
649 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6);
650 if (ret < 0) {
651 return -1;
652 }
653
654 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
655 }
656
657 static void scsi_generic_reset(DeviceState *dev)
658 {
659 SCSIDevice *s = SCSI_DEVICE(dev);
660
661 s->scsi_version = s->default_scsi_version;
662 scsi_device_purge_requests(s, SENSE_CODE(RESET));
663 }
664
665 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
666 {
667 int rc;
668 int sg_version;
669 struct sg_scsi_id scsiid;
670
671 if (!s->conf.blk) {
672 error_setg(errp, "drive property not set");
673 return;
674 }
675
676 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
677 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
678 error_setg(errp, "Device doesn't support drive option werror");
679 return;
680 }
681 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
682 error_setg(errp, "Device doesn't support drive option rerror");
683 return;
684 }
685
686 /* check we are using a driver managing SG_IO (version 3 and after */
687 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
688 if (rc < 0) {
689 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
690 if (rc != -EPERM) {
691 error_append_hint(errp, "Is this a SCSI device?\n");
692 }
693 return;
694 }
695 if (sg_version < 30000) {
696 error_setg(errp, "scsi generic interface too old");
697 return;
698 }
699
700 /* get LUN of the /dev/sg? */
701 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
702 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
703 return;
704 }
705 if (!blkconf_apply_backend_options(&s->conf,
706 !blk_supports_write_perm(s->conf.blk),
707 true, errp)) {
708 return;
709 }
710
711 /* define device state */
712 s->type = scsiid.scsi_type;
713 trace_scsi_generic_realize_type(s->type);
714
715 switch (s->type) {
716 case TYPE_TAPE:
717 s->blocksize = get_stream_blocksize(s->conf.blk);
718 if (s->blocksize == -1) {
719 s->blocksize = 0;
720 }
721 break;
722
723 /* Make a guess for block devices, we'll fix it when the guest sends.
724 * READ CAPACITY. If they don't, they likely would assume these sizes
725 * anyway. (TODO: they could also send MODE SENSE).
726 */
727 case TYPE_ROM:
728 case TYPE_WORM:
729 s->blocksize = 2048;
730 break;
731 default:
732 s->blocksize = 512;
733 break;
734 }
735
736 trace_scsi_generic_realize_blocksize(s->blocksize);
737
738 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
739 s->default_scsi_version = -1;
740 s->io_timeout = DEFAULT_IO_TIMEOUT;
741 scsi_generic_read_device_inquiry(s);
742 }
743
744 const SCSIReqOps scsi_generic_req_ops = {
745 .size = sizeof(SCSIGenericReq),
746 .free_req = scsi_free_request,
747 .send_command = scsi_send_command,
748 .read_data = scsi_read_data,
749 .write_data = scsi_write_data,
750 .get_buf = scsi_get_buf,
751 .load_request = scsi_generic_load_request,
752 .save_request = scsi_generic_save_request,
753 };
754
755 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
756 uint8_t *buf, void *hba_private)
757 {
758 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
759 }
760
761 static Property scsi_generic_properties[] = {
762 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
763 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
764 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
765 DEFAULT_IO_TIMEOUT),
766 DEFINE_PROP_END_OF_LIST(),
767 };
768
769 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
770 uint8_t *buf, void *hba_private)
771 {
772 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
773 }
774
775 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
776 {
777 DeviceClass *dc = DEVICE_CLASS(klass);
778 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
779
780 sc->realize = scsi_generic_realize;
781 sc->alloc_req = scsi_new_request;
782 sc->parse_cdb = scsi_generic_parse_cdb;
783 dc->fw_name = "disk";
784 dc->desc = "pass through generic scsi device (/dev/sg*)";
785 dc->reset = scsi_generic_reset;
786 device_class_set_props(dc, scsi_generic_properties);
787 dc->vmsd = &vmstate_scsi_device;
788 }
789
790 static const TypeInfo scsi_generic_info = {
791 .name = "scsi-generic",
792 .parent = TYPE_SCSI_DEVICE,
793 .instance_size = sizeof(SCSIDevice),
794 .class_init = scsi_generic_class_initfn,
795 };
796
797 static void scsi_generic_register_types(void)
798 {
799 type_register_static(&scsi_generic_info);
800 }
801
802 type_init(scsi_generic_register_types)
803
804 #endif /* __linux__ */