2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
44 #include "qom/object.h"
50 #define SCSI_WRITE_SAME_MAX (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN 256
53 #define SCSI_MAX_MODE_LEN 256
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
61 OBJECT_DECLARE_TYPE(SCSIDiskState
, SCSIDiskClass
, SCSI_DISK_BASE
)
63 struct SCSIDiskClass
{
64 SCSIDeviceClass parent_class
;
66 DMAIOFunc
*dma_writev
;
67 bool (*need_fua_emulation
)(SCSICommand
*cmd
);
68 void (*update_sense
)(SCSIRequest
*r
);
71 typedef struct SCSIDiskReq
{
73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
75 uint32_t sector_count
;
78 bool need_fua_emulation
;
84 #define SCSI_DISK_F_REMOVABLE 0
85 #define SCSI_DISK_F_DPOFUA 1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
88 struct SCSIDiskState
{
95 uint64_t max_unmap_size
;
107 * 0x0000 - rotation rate not reported
108 * 0x0001 - non-rotating medium (SSD)
109 * 0x0002-0x0400 - reserved
110 * 0x0401-0xffe - rotations per minute
113 uint16_t rotation_rate
;
116 static void scsi_free_request(SCSIRequest
*req
)
118 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
120 qemu_vfree(r
->iov
.iov_base
);
123 /* Helper function for command completion with sense. */
124 static void scsi_check_condition(SCSIDiskReq
*r
, SCSISense sense
)
126 trace_scsi_disk_check_condition(r
->req
.tag
, sense
.key
, sense
.asc
,
128 scsi_req_build_sense(&r
->req
, sense
);
129 scsi_req_complete(&r
->req
, CHECK_CONDITION
);
132 static void scsi_init_iovec(SCSIDiskReq
*r
, size_t size
)
134 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
136 if (!r
->iov
.iov_base
) {
138 r
->iov
.iov_base
= blk_blockalign(s
->qdev
.conf
.blk
, r
->buflen
);
140 r
->iov
.iov_len
= MIN(r
->sector_count
* BDRV_SECTOR_SIZE
, r
->buflen
);
141 qemu_iovec_init_external(&r
->qiov
, &r
->iov
, 1);
144 static void scsi_disk_save_request(QEMUFile
*f
, SCSIRequest
*req
)
146 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
148 qemu_put_be64s(f
, &r
->sector
);
149 qemu_put_be32s(f
, &r
->sector_count
);
150 qemu_put_be32s(f
, &r
->buflen
);
152 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
153 qemu_put_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
154 } else if (!req
->retry
) {
155 uint32_t len
= r
->iov
.iov_len
;
156 qemu_put_be32s(f
, &len
);
157 qemu_put_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
162 static void scsi_disk_load_request(QEMUFile
*f
, SCSIRequest
*req
)
164 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
166 qemu_get_be64s(f
, &r
->sector
);
167 qemu_get_be32s(f
, &r
->sector_count
);
168 qemu_get_be32s(f
, &r
->buflen
);
170 scsi_init_iovec(r
, r
->buflen
);
171 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
172 qemu_get_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
173 } else if (!r
->req
.retry
) {
175 qemu_get_be32s(f
, &len
);
176 r
->iov
.iov_len
= len
;
177 assert(r
->iov
.iov_len
<= r
->buflen
);
178 qemu_get_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
182 qemu_iovec_init_external(&r
->qiov
, &r
->iov
, 1);
186 * scsi_handle_rw_error has two return values. False means that the error
187 * must be ignored, true means that the error has been processed and the
188 * caller should not do anything else for this request. Note that
189 * scsi_handle_rw_error always manages its reference counts, independent
190 * of the return value.
192 static bool scsi_handle_rw_error(SCSIDiskReq
*r
, int ret
, bool acct_failed
)
194 bool is_read
= (r
->req
.cmd
.mode
== SCSI_XFER_FROM_DEV
);
195 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
196 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
197 SCSISense sense
= SENSE_CODE(NO_SENSE
);
199 bool req_has_sense
= false;
200 BlockErrorAction action
;
204 status
= scsi_sense_from_errno(-ret
, &sense
);
207 /* A passthrough command has completed with nonzero status. */
209 if (status
== CHECK_CONDITION
) {
210 req_has_sense
= true;
211 error
= scsi_sense_buf_to_errno(r
->req
.sense
, sizeof(r
->req
.sense
));
218 * Check whether the error has to be handled by the guest or should
219 * rather follow the rerror=/werror= settings. Guest-handled errors
220 * are usually retried immediately, so do not post them to QMP and
221 * do not account them as failed I/O.
224 scsi_sense_buf_is_guest_recoverable(r
->req
.sense
, sizeof(r
->req
.sense
))) {
225 action
= BLOCK_ERROR_ACTION_REPORT
;
228 action
= blk_get_error_action(s
->qdev
.conf
.blk
, is_read
, error
);
229 blk_error_action(s
->qdev
.conf
.blk
, action
, is_read
, error
);
233 case BLOCK_ERROR_ACTION_REPORT
:
235 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
238 sdc
->update_sense(&r
->req
);
239 } else if (status
== CHECK_CONDITION
) {
240 scsi_req_build_sense(&r
->req
, sense
);
242 scsi_req_complete(&r
->req
, status
);
245 case BLOCK_ERROR_ACTION_IGNORE
:
248 case BLOCK_ERROR_ACTION_STOP
:
249 scsi_req_retry(&r
->req
);
253 g_assert_not_reached();
257 static bool scsi_disk_req_check_error(SCSIDiskReq
*r
, int ret
, bool acct_failed
)
259 if (r
->req
.io_canceled
) {
260 scsi_req_cancel_complete(&r
->req
);
265 return scsi_handle_rw_error(r
, ret
, acct_failed
);
271 static void scsi_aio_complete(void *opaque
, int ret
)
273 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
274 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
276 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
278 assert(r
->req
.aiocb
!= NULL
);
281 if (scsi_disk_req_check_error(r
, ret
, true)) {
285 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
286 scsi_req_complete(&r
->req
, GOOD
);
289 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
290 scsi_req_unref(&r
->req
);
293 static bool scsi_is_cmd_fua(SCSICommand
*cmd
)
295 switch (cmd
->buf
[0]) {
302 return (cmd
->buf
[1] & 8) != 0;
307 case WRITE_VERIFY_10
:
308 case WRITE_VERIFY_12
:
309 case WRITE_VERIFY_16
:
319 static void scsi_write_do_fua(SCSIDiskReq
*r
)
321 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
323 assert(r
->req
.aiocb
== NULL
);
324 assert(!r
->req
.io_canceled
);
326 if (r
->need_fua_emulation
) {
327 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
329 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
333 scsi_req_complete(&r
->req
, GOOD
);
334 scsi_req_unref(&r
->req
);
337 static void scsi_dma_complete_noio(SCSIDiskReq
*r
, int ret
)
339 assert(r
->req
.aiocb
== NULL
);
340 if (scsi_disk_req_check_error(r
, ret
, false)) {
344 r
->sector
+= r
->sector_count
;
346 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
347 scsi_write_do_fua(r
);
350 scsi_req_complete(&r
->req
, GOOD
);
354 scsi_req_unref(&r
->req
);
357 static void scsi_dma_complete(void *opaque
, int ret
)
359 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
360 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
362 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
364 assert(r
->req
.aiocb
!= NULL
);
368 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
370 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
372 scsi_dma_complete_noio(r
, ret
);
373 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
376 static void scsi_read_complete_noio(SCSIDiskReq
*r
, int ret
)
380 assert(r
->req
.aiocb
== NULL
);
381 if (scsi_disk_req_check_error(r
, ret
, false)) {
385 n
= r
->qiov
.size
/ BDRV_SECTOR_SIZE
;
387 r
->sector_count
-= n
;
388 scsi_req_data(&r
->req
, r
->qiov
.size
);
391 scsi_req_unref(&r
->req
);
394 static void scsi_read_complete(void *opaque
, int ret
)
396 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
397 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
399 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
401 assert(r
->req
.aiocb
!= NULL
);
405 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
407 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
408 trace_scsi_disk_read_complete(r
->req
.tag
, r
->qiov
.size
);
410 scsi_read_complete_noio(r
, ret
);
411 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
414 /* Actually issue a read to the block device. */
415 static void scsi_do_read(SCSIDiskReq
*r
, int ret
)
417 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
418 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
420 assert (r
->req
.aiocb
== NULL
);
421 if (scsi_disk_req_check_error(r
, ret
, false)) {
425 /* The request is used as the AIO opaque value, so add a ref. */
426 scsi_req_ref(&r
->req
);
429 dma_acct_start(s
->qdev
.conf
.blk
, &r
->acct
, r
->req
.sg
, BLOCK_ACCT_READ
);
430 r
->req
.residual
-= r
->req
.sg
->size
;
431 r
->req
.aiocb
= dma_blk_io(blk_get_aio_context(s
->qdev
.conf
.blk
),
432 r
->req
.sg
, r
->sector
<< BDRV_SECTOR_BITS
,
434 sdc
->dma_readv
, r
, scsi_dma_complete
, r
,
435 DMA_DIRECTION_FROM_DEVICE
);
437 scsi_init_iovec(r
, SCSI_DMA_BUF_SIZE
);
438 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
439 r
->qiov
.size
, BLOCK_ACCT_READ
);
440 r
->req
.aiocb
= sdc
->dma_readv(r
->sector
<< BDRV_SECTOR_BITS
, &r
->qiov
,
441 scsi_read_complete
, r
, r
);
445 scsi_req_unref(&r
->req
);
448 static void scsi_do_read_cb(void *opaque
, int ret
)
450 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
451 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
453 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
455 assert (r
->req
.aiocb
!= NULL
);
459 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
461 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
463 scsi_do_read(opaque
, ret
);
464 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
467 /* Read more data from scsi device into buffer. */
468 static void scsi_read_data(SCSIRequest
*req
)
470 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
471 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
474 trace_scsi_disk_read_data_count(r
->sector_count
);
475 if (r
->sector_count
== 0) {
476 /* This also clears the sense buffer for REQUEST SENSE. */
477 scsi_req_complete(&r
->req
, GOOD
);
481 /* No data transfer may already be in progress */
482 assert(r
->req
.aiocb
== NULL
);
484 /* The request is used as the AIO opaque value, so add a ref. */
485 scsi_req_ref(&r
->req
);
486 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
487 trace_scsi_disk_read_data_invalid();
488 scsi_read_complete_noio(r
, -EINVAL
);
492 if (!blk_is_available(req
->dev
->conf
.blk
)) {
493 scsi_read_complete_noio(r
, -ENOMEDIUM
);
499 if (first
&& r
->need_fua_emulation
) {
500 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
502 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_do_read_cb
, r
);
508 static void scsi_write_complete_noio(SCSIDiskReq
*r
, int ret
)
512 assert (r
->req
.aiocb
== NULL
);
513 if (scsi_disk_req_check_error(r
, ret
, false)) {
517 n
= r
->qiov
.size
/ BDRV_SECTOR_SIZE
;
519 r
->sector_count
-= n
;
520 if (r
->sector_count
== 0) {
521 scsi_write_do_fua(r
);
524 scsi_init_iovec(r
, SCSI_DMA_BUF_SIZE
);
525 trace_scsi_disk_write_complete_noio(r
->req
.tag
, r
->qiov
.size
);
526 scsi_req_data(&r
->req
, r
->qiov
.size
);
530 scsi_req_unref(&r
->req
);
533 static void scsi_write_complete(void * opaque
, int ret
)
535 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
536 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
538 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
540 assert (r
->req
.aiocb
!= NULL
);
544 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
546 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
548 scsi_write_complete_noio(r
, ret
);
549 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
552 static void scsi_write_data(SCSIRequest
*req
)
554 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
555 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
556 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
558 /* No data transfer may already be in progress */
559 assert(r
->req
.aiocb
== NULL
);
561 /* The request is used as the AIO opaque value, so add a ref. */
562 scsi_req_ref(&r
->req
);
563 if (r
->req
.cmd
.mode
!= SCSI_XFER_TO_DEV
) {
564 trace_scsi_disk_write_data_invalid();
565 scsi_write_complete_noio(r
, -EINVAL
);
569 if (!r
->req
.sg
&& !r
->qiov
.size
) {
570 /* Called for the first time. Ask the driver to send us more data. */
572 scsi_write_complete_noio(r
, 0);
575 if (!blk_is_available(req
->dev
->conf
.blk
)) {
576 scsi_write_complete_noio(r
, -ENOMEDIUM
);
580 if (r
->req
.cmd
.buf
[0] == VERIFY_10
|| r
->req
.cmd
.buf
[0] == VERIFY_12
||
581 r
->req
.cmd
.buf
[0] == VERIFY_16
) {
583 scsi_dma_complete_noio(r
, 0);
585 scsi_write_complete_noio(r
, 0);
591 dma_acct_start(s
->qdev
.conf
.blk
, &r
->acct
, r
->req
.sg
, BLOCK_ACCT_WRITE
);
592 r
->req
.residual
-= r
->req
.sg
->size
;
593 r
->req
.aiocb
= dma_blk_io(blk_get_aio_context(s
->qdev
.conf
.blk
),
594 r
->req
.sg
, r
->sector
<< BDRV_SECTOR_BITS
,
596 sdc
->dma_writev
, r
, scsi_dma_complete
, r
,
597 DMA_DIRECTION_TO_DEVICE
);
599 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
600 r
->qiov
.size
, BLOCK_ACCT_WRITE
);
601 r
->req
.aiocb
= sdc
->dma_writev(r
->sector
<< BDRV_SECTOR_BITS
, &r
->qiov
,
602 scsi_write_complete
, r
, r
);
606 /* Return a pointer to the data buffer. */
607 static uint8_t *scsi_get_buf(SCSIRequest
*req
)
609 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
611 return (uint8_t *)r
->iov
.iov_base
;
614 static int scsi_disk_emulate_vpd_page(SCSIRequest
*req
, uint8_t *outbuf
)
616 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
617 uint8_t page_code
= req
->cmd
.buf
[2];
618 int start
, buflen
= 0;
620 outbuf
[buflen
++] = s
->qdev
.type
& 0x1f;
621 outbuf
[buflen
++] = page_code
;
622 outbuf
[buflen
++] = 0x00;
623 outbuf
[buflen
++] = 0x00;
627 case 0x00: /* Supported page codes, mandatory */
629 trace_scsi_disk_emulate_vpd_page_00(req
->cmd
.xfer
);
630 outbuf
[buflen
++] = 0x00; /* list of supported pages (this page) */
632 outbuf
[buflen
++] = 0x80; /* unit serial number */
634 outbuf
[buflen
++] = 0x83; /* device identification */
635 if (s
->qdev
.type
== TYPE_DISK
) {
636 outbuf
[buflen
++] = 0xb0; /* block limits */
637 outbuf
[buflen
++] = 0xb1; /* block device characteristics */
638 outbuf
[buflen
++] = 0xb2; /* thin provisioning */
642 case 0x80: /* Device serial number, optional */
647 trace_scsi_disk_emulate_vpd_page_80_not_supported();
651 l
= strlen(s
->serial
);
656 trace_scsi_disk_emulate_vpd_page_80(req
->cmd
.xfer
);
657 memcpy(outbuf
+ buflen
, s
->serial
, l
);
662 case 0x83: /* Device identification page, mandatory */
664 int id_len
= s
->device_id
? MIN(strlen(s
->device_id
), 255 - 8) : 0;
666 trace_scsi_disk_emulate_vpd_page_83(req
->cmd
.xfer
);
669 outbuf
[buflen
++] = 0x2; /* ASCII */
670 outbuf
[buflen
++] = 0; /* not officially assigned */
671 outbuf
[buflen
++] = 0; /* reserved */
672 outbuf
[buflen
++] = id_len
; /* length of data following */
673 memcpy(outbuf
+ buflen
, s
->device_id
, id_len
);
678 outbuf
[buflen
++] = 0x1; /* Binary */
679 outbuf
[buflen
++] = 0x3; /* NAA */
680 outbuf
[buflen
++] = 0; /* reserved */
681 outbuf
[buflen
++] = 8;
682 stq_be_p(&outbuf
[buflen
], s
->qdev
.wwn
);
686 if (s
->qdev
.port_wwn
) {
687 outbuf
[buflen
++] = 0x61; /* SAS / Binary */
688 outbuf
[buflen
++] = 0x93; /* PIV / Target port / NAA */
689 outbuf
[buflen
++] = 0; /* reserved */
690 outbuf
[buflen
++] = 8;
691 stq_be_p(&outbuf
[buflen
], s
->qdev
.port_wwn
);
696 outbuf
[buflen
++] = 0x61; /* SAS / Binary */
698 /* PIV/Target port/relative target port */
699 outbuf
[buflen
++] = 0x94;
701 outbuf
[buflen
++] = 0; /* reserved */
702 outbuf
[buflen
++] = 4;
703 stw_be_p(&outbuf
[buflen
+ 2], s
->port_index
);
708 case 0xb0: /* block limits */
710 SCSIBlockLimits bl
= {};
712 if (s
->qdev
.type
== TYPE_ROM
) {
713 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
718 s
->qdev
.conf
.discard_granularity
/ s
->qdev
.blocksize
;
720 s
->qdev
.conf
.min_io_size
/ s
->qdev
.blocksize
;
722 s
->qdev
.conf
.opt_io_size
/ s
->qdev
.blocksize
;
723 bl
.max_unmap_sectors
=
724 s
->max_unmap_size
/ s
->qdev
.blocksize
;
726 s
->max_io_size
/ s
->qdev
.blocksize
;
727 /* 255 descriptors fit in 4 KiB with an 8-byte header */
728 bl
.max_unmap_descr
= 255;
730 if (s
->qdev
.type
== TYPE_DISK
) {
731 int max_transfer_blk
= blk_get_max_transfer(s
->qdev
.conf
.blk
);
732 int max_io_sectors_blk
=
733 max_transfer_blk
/ s
->qdev
.blocksize
;
736 MIN_NON_ZERO(max_io_sectors_blk
, bl
.max_io_sectors
);
738 buflen
+= scsi_emulate_block_limits(outbuf
+ buflen
, &bl
);
741 case 0xb1: /* block device characteristics */
744 outbuf
[4] = (s
->rotation_rate
>> 8) & 0xff;
745 outbuf
[5] = s
->rotation_rate
& 0xff;
746 outbuf
[6] = 0; /* PRODUCT TYPE */
747 outbuf
[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
748 outbuf
[8] = 0; /* VBULS */
751 case 0xb2: /* thin provisioning */
755 outbuf
[5] = 0xe0; /* unmap & write_same 10/16 all supported */
756 outbuf
[6] = s
->qdev
.conf
.discard_granularity
? 2 : 1;
764 assert(buflen
- start
<= 255);
765 outbuf
[start
- 1] = buflen
- start
;
769 static int scsi_disk_emulate_inquiry(SCSIRequest
*req
, uint8_t *outbuf
)
771 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
774 if (req
->cmd
.buf
[1] & 0x1) {
775 /* Vital product data */
776 return scsi_disk_emulate_vpd_page(req
, outbuf
);
779 /* Standard INQUIRY data */
780 if (req
->cmd
.buf
[2] != 0) {
785 buflen
= req
->cmd
.xfer
;
786 if (buflen
> SCSI_MAX_INQUIRY_LEN
) {
787 buflen
= SCSI_MAX_INQUIRY_LEN
;
790 outbuf
[0] = s
->qdev
.type
& 0x1f;
791 outbuf
[1] = (s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) ? 0x80 : 0;
793 strpadcpy((char *) &outbuf
[16], 16, s
->product
, ' ');
794 strpadcpy((char *) &outbuf
[8], 8, s
->vendor
, ' ');
796 memset(&outbuf
[32], 0, 4);
797 memcpy(&outbuf
[32], s
->version
, MIN(4, strlen(s
->version
)));
799 * We claim conformance to SPC-3, which is required for guests
800 * to ask for modern features like READ CAPACITY(16) or the
801 * block characteristics VPD page by default. Not all of SPC-3
802 * is actually implemented, but we're good enough.
804 outbuf
[2] = s
->qdev
.default_scsi_version
;
805 outbuf
[3] = 2 | 0x10; /* Format 2, HiSup */
808 outbuf
[4] = buflen
- 5; /* Additional Length = (Len - 1) - 4 */
810 /* If the allocation length of CDB is too small,
811 the additional length is not adjusted */
815 /* Sync data transfer and TCQ. */
816 outbuf
[7] = 0x10 | (req
->bus
->info
->tcq
? 0x02 : 0);
820 static inline bool media_is_dvd(SCSIDiskState
*s
)
823 if (s
->qdev
.type
!= TYPE_ROM
) {
826 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
829 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
830 return nb_sectors
> CD_MAX_SECTORS
;
833 static inline bool media_is_cd(SCSIDiskState
*s
)
836 if (s
->qdev
.type
!= TYPE_ROM
) {
839 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
842 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
843 return nb_sectors
<= CD_MAX_SECTORS
;
846 static int scsi_read_disc_information(SCSIDiskState
*s
, SCSIDiskReq
*r
,
849 uint8_t type
= r
->req
.cmd
.buf
[1] & 7;
851 if (s
->qdev
.type
!= TYPE_ROM
) {
855 /* Types 1/2 are only defined for Blu-Ray. */
857 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
861 memset(outbuf
, 0, 34);
863 outbuf
[2] = 0xe; /* last session complete, disc finalized */
864 outbuf
[3] = 1; /* first track on disc */
865 outbuf
[4] = 1; /* # of sessions */
866 outbuf
[5] = 1; /* first track of last session */
867 outbuf
[6] = 1; /* last track of last session */
868 outbuf
[7] = 0x20; /* unrestricted use */
869 outbuf
[8] = 0x00; /* CD-ROM or DVD-ROM */
870 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
871 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
872 /* 24-31: disc bar code */
873 /* 32: disc application code */
874 /* 33: number of OPC tables */
879 static int scsi_read_dvd_structure(SCSIDiskState
*s
, SCSIDiskReq
*r
,
882 static const int rds_caps_size
[5] = {
889 uint8_t media
= r
->req
.cmd
.buf
[1];
890 uint8_t layer
= r
->req
.cmd
.buf
[6];
891 uint8_t format
= r
->req
.cmd
.buf
[7];
894 if (s
->qdev
.type
!= TYPE_ROM
) {
898 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
902 if (format
!= 0xff) {
903 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
904 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
907 if (media_is_cd(s
)) {
908 scsi_check_condition(r
, SENSE_CODE(INCOMPATIBLE_FORMAT
));
911 if (format
>= ARRAY_SIZE(rds_caps_size
)) {
914 size
= rds_caps_size
[format
];
915 memset(outbuf
, 0, size
);
920 /* Physical format information */
925 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
927 outbuf
[4] = 1; /* DVD-ROM, part version 1 */
928 outbuf
[5] = 0xf; /* 120mm disc, minimum rate unspecified */
929 outbuf
[6] = 1; /* one layer, read-only (per MMC-2 spec) */
930 outbuf
[7] = 0; /* default densities */
932 stl_be_p(&outbuf
[12], (nb_sectors
>> 2) - 1); /* end sector */
933 stl_be_p(&outbuf
[16], (nb_sectors
>> 2) - 1); /* l0 end sector */
937 case 0x01: /* DVD copyright information, all zeros */
940 case 0x03: /* BCA information - invalid field for no BCA info */
943 case 0x04: /* DVD disc manufacturing information, all zeros */
946 case 0xff: { /* List capabilities */
949 for (i
= 0; i
< ARRAY_SIZE(rds_caps_size
); i
++) {
950 if (!rds_caps_size
[i
]) {
954 outbuf
[size
+ 1] = 0x40; /* Not writable, readable */
955 stw_be_p(&outbuf
[size
+ 2], rds_caps_size
[i
]);
965 /* Size of buffer, not including 2 byte size field */
966 stw_be_p(outbuf
, size
- 2);
973 static int scsi_event_status_media(SCSIDiskState
*s
, uint8_t *outbuf
)
975 uint8_t event_code
, media_status
;
979 media_status
= MS_TRAY_OPEN
;
980 } else if (blk_is_inserted(s
->qdev
.conf
.blk
)) {
981 media_status
= MS_MEDIA_PRESENT
;
984 /* Event notification descriptor */
985 event_code
= MEC_NO_CHANGE
;
986 if (media_status
!= MS_TRAY_OPEN
) {
987 if (s
->media_event
) {
988 event_code
= MEC_NEW_MEDIA
;
989 s
->media_event
= false;
990 } else if (s
->eject_request
) {
991 event_code
= MEC_EJECT_REQUESTED
;
992 s
->eject_request
= false;
996 outbuf
[0] = event_code
;
997 outbuf
[1] = media_status
;
999 /* These fields are reserved, just clear them. */
1005 static int scsi_get_event_status_notification(SCSIDiskState
*s
, SCSIDiskReq
*r
,
1009 uint8_t *buf
= r
->req
.cmd
.buf
;
1010 uint8_t notification_class_request
= buf
[4];
1011 if (s
->qdev
.type
!= TYPE_ROM
) {
1014 if ((buf
[1] & 1) == 0) {
1020 outbuf
[0] = outbuf
[1] = 0;
1021 outbuf
[3] = 1 << GESN_MEDIA
; /* supported events */
1022 if (notification_class_request
& (1 << GESN_MEDIA
)) {
1023 outbuf
[2] = GESN_MEDIA
;
1024 size
+= scsi_event_status_media(s
, &outbuf
[size
]);
1028 stw_be_p(outbuf
, size
- 4);
1032 static int scsi_get_configuration(SCSIDiskState
*s
, uint8_t *outbuf
)
1036 if (s
->qdev
.type
!= TYPE_ROM
) {
1040 if (media_is_dvd(s
)) {
1041 current
= MMC_PROFILE_DVD_ROM
;
1042 } else if (media_is_cd(s
)) {
1043 current
= MMC_PROFILE_CD_ROM
;
1045 current
= MMC_PROFILE_NONE
;
1048 memset(outbuf
, 0, 40);
1049 stl_be_p(&outbuf
[0], 36); /* Bytes after the data length field */
1050 stw_be_p(&outbuf
[6], current
);
1051 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1052 outbuf
[10] = 0x03; /* persistent, current */
1053 outbuf
[11] = 8; /* two profiles */
1054 stw_be_p(&outbuf
[12], MMC_PROFILE_DVD_ROM
);
1055 outbuf
[14] = (current
== MMC_PROFILE_DVD_ROM
);
1056 stw_be_p(&outbuf
[16], MMC_PROFILE_CD_ROM
);
1057 outbuf
[18] = (current
== MMC_PROFILE_CD_ROM
);
1058 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1059 stw_be_p(&outbuf
[20], 1);
1060 outbuf
[22] = 0x08 | 0x03; /* version 2, persistent, current */
1062 stl_be_p(&outbuf
[24], 1); /* SCSI */
1063 outbuf
[28] = 1; /* DBE = 1, mandatory */
1064 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1065 stw_be_p(&outbuf
[32], 3);
1066 outbuf
[34] = 0x08 | 0x03; /* version 2, persistent, current */
1068 outbuf
[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1069 /* TODO: Random readable, CD read, DVD read, drive serial number,
1074 static int scsi_emulate_mechanism_status(SCSIDiskState
*s
, uint8_t *outbuf
)
1076 if (s
->qdev
.type
!= TYPE_ROM
) {
1079 memset(outbuf
, 0, 8);
1080 outbuf
[5] = 1; /* CD-ROM */
1084 static int mode_sense_page(SCSIDiskState
*s
, int page
, uint8_t **p_outbuf
,
1087 static const int mode_sense_valid
[0x3f] = {
1088 [MODE_PAGE_VENDOR_SPECIFIC
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1089 [MODE_PAGE_HD_GEOMETRY
] = (1 << TYPE_DISK
),
1090 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY
] = (1 << TYPE_DISK
),
1091 [MODE_PAGE_CACHING
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1092 [MODE_PAGE_R_W_ERROR
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1093 [MODE_PAGE_AUDIO_CTL
] = (1 << TYPE_ROM
),
1094 [MODE_PAGE_CAPABILITIES
] = (1 << TYPE_ROM
),
1095 [MODE_PAGE_APPLE_VENDOR
] = (1 << TYPE_ROM
),
1098 uint8_t *p
= *p_outbuf
+ 2;
1101 assert(page
< ARRAY_SIZE(mode_sense_valid
));
1102 if ((mode_sense_valid
[page
] & (1 << s
->qdev
.type
)) == 0) {
1107 * If Changeable Values are requested, a mask denoting those mode parameters
1108 * that are changeable shall be returned. As we currently don't support
1109 * parameter changes via MODE_SELECT all bits are returned set to zero.
1110 * The buffer was already menset to zero by the caller of this function.
1112 * The offsets here are off by two compared to the descriptions in the
1113 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1114 * but it is done so that offsets are consistent within our implementation
1115 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1116 * 2-byte and 4-byte headers.
1119 case MODE_PAGE_HD_GEOMETRY
:
1121 if (page_control
== 1) { /* Changeable Values */
1124 /* if a geometry hint is available, use it */
1125 p
[0] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1126 p
[1] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1127 p
[2] = s
->qdev
.conf
.cyls
& 0xff;
1128 p
[3] = s
->qdev
.conf
.heads
& 0xff;
1129 /* Write precomp start cylinder, disabled */
1130 p
[4] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1131 p
[5] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1132 p
[6] = s
->qdev
.conf
.cyls
& 0xff;
1133 /* Reduced current start cylinder, disabled */
1134 p
[7] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1135 p
[8] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1136 p
[9] = s
->qdev
.conf
.cyls
& 0xff;
1137 /* Device step rate [ns], 200ns */
1140 /* Landing zone cylinder */
1144 /* Medium rotation rate [rpm], 5400 rpm */
1145 p
[18] = (5400 >> 8) & 0xff;
1146 p
[19] = 5400 & 0xff;
1149 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY
:
1151 if (page_control
== 1) { /* Changeable Values */
1154 /* Transfer rate [kbit/s], 5Mbit/s */
1157 /* if a geometry hint is available, use it */
1158 p
[2] = s
->qdev
.conf
.heads
& 0xff;
1159 p
[3] = s
->qdev
.conf
.secs
& 0xff;
1160 p
[4] = s
->qdev
.blocksize
>> 8;
1161 p
[6] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1162 p
[7] = s
->qdev
.conf
.cyls
& 0xff;
1163 /* Write precomp start cylinder, disabled */
1164 p
[8] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1165 p
[9] = s
->qdev
.conf
.cyls
& 0xff;
1166 /* Reduced current start cylinder, disabled */
1167 p
[10] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1168 p
[11] = s
->qdev
.conf
.cyls
& 0xff;
1169 /* Device step rate [100us], 100us */
1172 /* Device step pulse width [us], 1us */
1174 /* Device head settle delay [100us], 100us */
1177 /* Motor on delay [0.1s], 0.1s */
1179 /* Motor off delay [0.1s], 0.1s */
1181 /* Medium rotation rate [rpm], 5400 rpm */
1182 p
[26] = (5400 >> 8) & 0xff;
1183 p
[27] = 5400 & 0xff;
1186 case MODE_PAGE_CACHING
:
1188 if (page_control
== 1 || /* Changeable Values */
1189 blk_enable_write_cache(s
->qdev
.conf
.blk
)) {
1194 case MODE_PAGE_R_W_ERROR
:
1196 if (page_control
== 1) { /* Changeable Values */
1197 if (s
->qdev
.type
== TYPE_ROM
) {
1198 /* Automatic Write Reallocation Enabled */
1203 p
[0] = 0x80; /* Automatic Write Reallocation Enabled */
1204 if (s
->qdev
.type
== TYPE_ROM
) {
1205 p
[1] = 0x20; /* Read Retry Count */
1209 case MODE_PAGE_AUDIO_CTL
:
1213 case MODE_PAGE_CAPABILITIES
:
1215 if (page_control
== 1) { /* Changeable Values */
1219 p
[0] = 0x3b; /* CD-R & CD-RW read */
1220 p
[1] = 0; /* Writing not supported */
1221 p
[2] = 0x7f; /* Audio, composite, digital out,
1222 mode 2 form 1&2, multi session */
1223 p
[3] = 0xff; /* CD DA, DA accurate, RW supported,
1224 RW corrected, C2 errors, ISRC,
1226 p
[4] = 0x2d | (s
->tray_locked
? 2 : 0);
1227 /* Locking supported, jumper present, eject, tray */
1228 p
[5] = 0; /* no volume & mute control, no
1230 p
[6] = (50 * 176) >> 8; /* 50x read speed */
1231 p
[7] = (50 * 176) & 0xff;
1232 p
[8] = 2 >> 8; /* Two volume levels */
1234 p
[10] = 2048 >> 8; /* 2M buffer */
1235 p
[11] = 2048 & 0xff;
1236 p
[12] = (16 * 176) >> 8; /* 16x read speed current */
1237 p
[13] = (16 * 176) & 0xff;
1238 p
[16] = (16 * 176) >> 8; /* 16x write speed */
1239 p
[17] = (16 * 176) & 0xff;
1240 p
[18] = (16 * 176) >> 8; /* 16x write speed current */
1241 p
[19] = (16 * 176) & 0xff;
1244 case MODE_PAGE_APPLE_VENDOR
:
1245 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
)) {
1247 if (page_control
== 1) { /* Changeable Values */
1251 memset(p
, 0, length
);
1252 strcpy((char *)p
+ 8, "APPLE COMPUTER, INC ");
1258 case MODE_PAGE_VENDOR_SPECIFIC
:
1259 if (s
->qdev
.type
== TYPE_DISK
&& (s
->quirks
&
1260 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
))) {
1262 if (page_control
== 1) { /* Changeable Values */
1278 assert(length
< 256);
1279 (*p_outbuf
)[0] = page
;
1280 (*p_outbuf
)[1] = length
;
1281 *p_outbuf
+= length
+ 2;
1285 static int scsi_disk_emulate_mode_sense(SCSIDiskReq
*r
, uint8_t *outbuf
)
1287 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1288 uint64_t nb_sectors
;
1290 int page
, buflen
, ret
, page_control
;
1292 uint8_t dev_specific_param
;
1294 dbd
= (r
->req
.cmd
.buf
[1] & 0x8) != 0;
1295 page
= r
->req
.cmd
.buf
[2] & 0x3f;
1296 page_control
= (r
->req
.cmd
.buf
[2] & 0xc0) >> 6;
1298 trace_scsi_disk_emulate_mode_sense((r
->req
.cmd
.buf
[0] == MODE_SENSE
) ? 6 :
1299 10, page
, r
->req
.cmd
.xfer
, page_control
);
1300 memset(outbuf
, 0, r
->req
.cmd
.xfer
);
1303 if (s
->qdev
.type
== TYPE_DISK
) {
1304 dev_specific_param
= s
->features
& (1 << SCSI_DISK_F_DPOFUA
) ? 0x10 : 0;
1305 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1306 dev_specific_param
|= 0x80; /* Readonly. */
1309 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD
)) {
1310 /* Use DBD from the request... */
1311 dev_specific_param
= 0x00;
1314 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR
1315 * which should never return a block descriptor even though DBD is
1316 * not set, otherwise CDROM detection fails in MacOS
1318 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
) &&
1319 page
== MODE_PAGE_APPLE_VENDOR
) {
1324 * MMC prescribes that CD/DVD drives have no block descriptors,
1325 * and defines no device-specific parameter.
1327 dev_specific_param
= 0x00;
1332 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1333 p
[1] = 0; /* Default media type. */
1334 p
[2] = dev_specific_param
;
1335 p
[3] = 0; /* Block descriptor length. */
1337 } else { /* MODE_SENSE_10 */
1338 p
[2] = 0; /* Default media type. */
1339 p
[3] = dev_specific_param
;
1340 p
[6] = p
[7] = 0; /* Block descriptor length. */
1344 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
1345 if (!dbd
&& nb_sectors
) {
1346 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1347 outbuf
[3] = 8; /* Block descriptor length */
1348 } else { /* MODE_SENSE_10 */
1349 outbuf
[7] = 8; /* Block descriptor length */
1351 nb_sectors
/= (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1352 if (nb_sectors
> 0xffffff) {
1355 p
[0] = 0; /* media density code */
1356 p
[1] = (nb_sectors
>> 16) & 0xff;
1357 p
[2] = (nb_sectors
>> 8) & 0xff;
1358 p
[3] = nb_sectors
& 0xff;
1359 p
[4] = 0; /* reserved */
1360 p
[5] = 0; /* bytes 5-7 are the sector size in bytes */
1361 p
[6] = s
->qdev
.blocksize
>> 8;
1366 if (page_control
== 3) {
1368 scsi_check_condition(r
, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED
));
1373 for (page
= 0; page
<= 0x3e; page
++) {
1374 mode_sense_page(s
, page
, &p
, page_control
);
1377 ret
= mode_sense_page(s
, page
, &p
, page_control
);
1383 buflen
= p
- outbuf
;
1385 * The mode data length field specifies the length in bytes of the
1386 * following data that is available to be transferred. The mode data
1387 * length does not include itself.
1389 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1390 outbuf
[0] = buflen
- 1;
1391 } else { /* MODE_SENSE_10 */
1392 outbuf
[0] = ((buflen
- 2) >> 8) & 0xff;
1393 outbuf
[1] = (buflen
- 2) & 0xff;
1398 static int scsi_disk_emulate_read_toc(SCSIRequest
*req
, uint8_t *outbuf
)
1400 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1401 int start_track
, format
, msf
, toclen
;
1402 uint64_t nb_sectors
;
1404 msf
= req
->cmd
.buf
[1] & 2;
1405 format
= req
->cmd
.buf
[2] & 0xf;
1406 start_track
= req
->cmd
.buf
[6];
1407 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
1408 trace_scsi_disk_emulate_read_toc(start_track
, format
, msf
>> 1);
1409 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
1412 toclen
= cdrom_read_toc(nb_sectors
, outbuf
, msf
, start_track
);
1415 /* multi session : only a single session defined */
1417 memset(outbuf
, 0, 12);
1423 toclen
= cdrom_read_toc_raw(nb_sectors
, outbuf
, msf
, start_track
);
1431 static int scsi_disk_emulate_start_stop(SCSIDiskReq
*r
)
1433 SCSIRequest
*req
= &r
->req
;
1434 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1435 bool start
= req
->cmd
.buf
[4] & 1;
1436 bool loej
= req
->cmd
.buf
[4] & 2; /* load on start, eject on !start */
1437 int pwrcnd
= req
->cmd
.buf
[4] & 0xf0;
1440 /* eject/load only happens for power condition == 0 */
1444 if ((s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) && loej
) {
1445 if (!start
&& !s
->tray_open
&& s
->tray_locked
) {
1446 scsi_check_condition(r
,
1447 blk_is_inserted(s
->qdev
.conf
.blk
)
1448 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED
)
1449 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED
));
1453 if (s
->tray_open
!= !start
) {
1454 blk_eject(s
->qdev
.conf
.blk
, !start
);
1455 s
->tray_open
= !start
;
1461 static void scsi_disk_emulate_read_data(SCSIRequest
*req
)
1463 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1464 int buflen
= r
->iov
.iov_len
;
1467 trace_scsi_disk_emulate_read_data(buflen
);
1470 scsi_req_data(&r
->req
, buflen
);
1474 /* This also clears the sense buffer for REQUEST SENSE. */
1475 scsi_req_complete(&r
->req
, GOOD
);
1478 static int scsi_disk_check_mode_select(SCSIDiskState
*s
, int page
,
1479 uint8_t *inbuf
, int inlen
)
1481 uint8_t mode_current
[SCSI_MAX_MODE_LEN
];
1482 uint8_t mode_changeable
[SCSI_MAX_MODE_LEN
];
1484 int len
, expected_len
, changeable_len
, i
;
1486 /* The input buffer does not include the page header, so it is
1489 expected_len
= inlen
+ 2;
1490 if (expected_len
> SCSI_MAX_MODE_LEN
) {
1494 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1495 if (page
== MODE_PAGE_ALLS
) {
1500 memset(mode_current
, 0, inlen
+ 2);
1501 len
= mode_sense_page(s
, page
, &p
, 0);
1502 if (len
< 0 || len
!= expected_len
) {
1506 p
= mode_changeable
;
1507 memset(mode_changeable
, 0, inlen
+ 2);
1508 changeable_len
= mode_sense_page(s
, page
, &p
, 1);
1509 assert(changeable_len
== len
);
1511 /* Check that unchangeable bits are the same as what MODE SENSE
1514 for (i
= 2; i
< len
; i
++) {
1515 if (((mode_current
[i
] ^ inbuf
[i
- 2]) & ~mode_changeable
[i
]) != 0) {
1522 static void scsi_disk_apply_mode_select(SCSIDiskState
*s
, int page
, uint8_t *p
)
1525 case MODE_PAGE_CACHING
:
1526 blk_set_enable_write_cache(s
->qdev
.conf
.blk
, (p
[0] & 4) != 0);
1534 static int mode_select_pages(SCSIDiskReq
*r
, uint8_t *p
, int len
, bool change
)
1536 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1539 int page
, subpage
, page_len
;
1541 /* Parse both possible formats for the mode page headers. */
1545 goto invalid_param_len
;
1548 page_len
= lduw_be_p(&p
[2]);
1553 goto invalid_param_len
;
1564 if (page_len
> len
) {
1565 if (!(s
->quirks
& SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED
)) {
1566 goto invalid_param_len
;
1568 trace_scsi_disk_mode_select_page_truncated(page
, page_len
, len
);
1572 if (scsi_disk_check_mode_select(s
, page
, p
, page_len
) < 0) {
1576 scsi_disk_apply_mode_select(s
, page
, p
);
1585 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM
));
1589 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1593 static void scsi_disk_emulate_mode_select(SCSIDiskReq
*r
, uint8_t *inbuf
)
1595 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1597 int cmd
= r
->req
.cmd
.buf
[0];
1598 int len
= r
->req
.cmd
.xfer
;
1599 int hdr_len
= (cmd
== MODE_SELECT
? 4 : 8);
1603 if ((r
->req
.cmd
.buf
[1] & 0x11) != 0x10) {
1605 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
))) {
1606 /* We only support PF=1, SP=0. */
1611 if (len
< hdr_len
) {
1612 goto invalid_param_len
;
1615 bd_len
= (cmd
== MODE_SELECT
? p
[3] : lduw_be_p(&p
[6]));
1619 goto invalid_param_len
;
1621 if (bd_len
!= 0 && bd_len
!= 8) {
1625 /* Allow changing the block size */
1627 bs
= p
[5] << 16 | p
[6] << 8 | p
[7];
1630 * Since the existing code only checks/updates bits 8-15 of the block
1631 * size, restrict ourselves to the same requirement for now to ensure
1632 * that a block size set by a block descriptor and then read back by
1633 * a subsequent SCSI command will be the same
1635 if (bs
&& !(bs
& ~0xff00) && bs
!= s
->qdev
.blocksize
) {
1636 s
->qdev
.blocksize
= bs
;
1637 trace_scsi_disk_mode_select_set_blocksize(s
->qdev
.blocksize
);
1644 /* Ensure no change is made if there is an error! */
1645 for (pass
= 0; pass
< 2; pass
++) {
1646 if (mode_select_pages(r
, p
, len
, pass
== 1) < 0) {
1651 if (!blk_enable_write_cache(s
->qdev
.conf
.blk
)) {
1652 /* The request is used as the AIO opaque value, so add a ref. */
1653 scsi_req_ref(&r
->req
);
1654 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
1656 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
1660 scsi_req_complete(&r
->req
, GOOD
);
1664 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM
));
1668 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1672 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1675 /* sector_num and nb_sectors expected to be in qdev blocksize */
1676 static inline bool check_lba_range(SCSIDiskState
*s
,
1677 uint64_t sector_num
, uint32_t nb_sectors
)
1680 * The first line tests that no overflow happens when computing the last
1681 * sector. The second line tests that the last accessed sector is in
1684 * Careful, the computations should not underflow for nb_sectors == 0,
1685 * and a 0-block read to the first LBA beyond the end of device is
1688 return (sector_num
<= sector_num
+ nb_sectors
&&
1689 sector_num
+ nb_sectors
<= s
->qdev
.max_lba
+ 1);
1692 typedef struct UnmapCBData
{
1698 static void scsi_unmap_complete(void *opaque
, int ret
);
1700 static void scsi_unmap_complete_noio(UnmapCBData
*data
, int ret
)
1702 SCSIDiskReq
*r
= data
->r
;
1703 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1705 assert(r
->req
.aiocb
== NULL
);
1707 if (data
->count
> 0) {
1708 uint64_t sector_num
= ldq_be_p(&data
->inbuf
[0]);
1709 uint32_t nb_sectors
= ldl_be_p(&data
->inbuf
[8]) & 0xffffffffULL
;
1710 r
->sector
= sector_num
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1711 r
->sector_count
= nb_sectors
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1713 if (!check_lba_range(s
, sector_num
, nb_sectors
)) {
1714 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
),
1716 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
1720 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1721 r
->sector_count
* BDRV_SECTOR_SIZE
,
1724 r
->req
.aiocb
= blk_aio_pdiscard(s
->qdev
.conf
.blk
,
1725 r
->sector
* BDRV_SECTOR_SIZE
,
1726 r
->sector_count
* BDRV_SECTOR_SIZE
,
1727 scsi_unmap_complete
, data
);
1733 scsi_req_complete(&r
->req
, GOOD
);
1736 scsi_req_unref(&r
->req
);
1740 static void scsi_unmap_complete(void *opaque
, int ret
)
1742 UnmapCBData
*data
= opaque
;
1743 SCSIDiskReq
*r
= data
->r
;
1744 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1746 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
1748 assert(r
->req
.aiocb
!= NULL
);
1749 r
->req
.aiocb
= NULL
;
1751 if (scsi_disk_req_check_error(r
, ret
, true)) {
1752 scsi_req_unref(&r
->req
);
1755 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
1756 scsi_unmap_complete_noio(data
, ret
);
1758 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1761 static void scsi_disk_emulate_unmap(SCSIDiskReq
*r
, uint8_t *inbuf
)
1763 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1765 int len
= r
->req
.cmd
.xfer
;
1768 /* Reject ANCHOR=1. */
1769 if (r
->req
.cmd
.buf
[1] & 0x1) {
1774 goto invalid_param_len
;
1776 if (len
< lduw_be_p(&p
[0]) + 2) {
1777 goto invalid_param_len
;
1779 if (len
< lduw_be_p(&p
[2]) + 8) {
1780 goto invalid_param_len
;
1782 if (lduw_be_p(&p
[2]) & 15) {
1783 goto invalid_param_len
;
1786 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1787 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1788 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
1792 data
= g_new0(UnmapCBData
, 1);
1794 data
->inbuf
= &p
[8];
1795 data
->count
= lduw_be_p(&p
[2]) >> 4;
1797 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1798 scsi_req_ref(&r
->req
);
1799 scsi_unmap_complete_noio(data
, 0);
1803 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1804 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1808 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1809 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1812 typedef struct WriteSameCBData
{
1820 static void scsi_write_same_complete(void *opaque
, int ret
)
1822 WriteSameCBData
*data
= opaque
;
1823 SCSIDiskReq
*r
= data
->r
;
1824 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1826 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
1828 assert(r
->req
.aiocb
!= NULL
);
1829 r
->req
.aiocb
= NULL
;
1831 if (scsi_disk_req_check_error(r
, ret
, true)) {
1835 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
1837 data
->nb_sectors
-= data
->iov
.iov_len
/ BDRV_SECTOR_SIZE
;
1838 data
->sector
+= data
->iov
.iov_len
/ BDRV_SECTOR_SIZE
;
1839 data
->iov
.iov_len
= MIN(data
->nb_sectors
* BDRV_SECTOR_SIZE
,
1841 if (data
->iov
.iov_len
) {
1842 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1843 data
->iov
.iov_len
, BLOCK_ACCT_WRITE
);
1844 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1845 * where final qiov may need smaller size */
1846 qemu_iovec_init_external(&data
->qiov
, &data
->iov
, 1);
1847 r
->req
.aiocb
= blk_aio_pwritev(s
->qdev
.conf
.blk
,
1848 data
->sector
<< BDRV_SECTOR_BITS
,
1850 scsi_write_same_complete
, data
);
1851 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1855 scsi_req_complete(&r
->req
, GOOD
);
1858 scsi_req_unref(&r
->req
);
1859 qemu_vfree(data
->iov
.iov_base
);
1861 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1864 static void scsi_disk_emulate_write_same(SCSIDiskReq
*r
, uint8_t *inbuf
)
1866 SCSIRequest
*req
= &r
->req
;
1867 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1868 uint32_t nb_sectors
= scsi_data_cdb_xfer(r
->req
.cmd
.buf
);
1869 WriteSameCBData
*data
;
1873 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1874 if (nb_sectors
== 0 || (req
->cmd
.buf
[1] & 0x16)) {
1875 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1879 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1880 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
1883 if (!check_lba_range(s
, r
->req
.cmd
.lba
, nb_sectors
)) {
1884 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
1888 if ((req
->cmd
.buf
[1] & 0x1) || buffer_is_zero(inbuf
, s
->qdev
.blocksize
)) {
1889 int flags
= (req
->cmd
.buf
[1] & 0x8) ? BDRV_REQ_MAY_UNMAP
: 0;
1891 /* The request is used as the AIO opaque value, so add a ref. */
1892 scsi_req_ref(&r
->req
);
1893 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1894 nb_sectors
* s
->qdev
.blocksize
,
1896 r
->req
.aiocb
= blk_aio_pwrite_zeroes(s
->qdev
.conf
.blk
,
1897 r
->req
.cmd
.lba
* s
->qdev
.blocksize
,
1898 nb_sectors
* s
->qdev
.blocksize
,
1899 flags
, scsi_aio_complete
, r
);
1903 data
= g_new0(WriteSameCBData
, 1);
1905 data
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1906 data
->nb_sectors
= nb_sectors
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1907 data
->iov
.iov_len
= MIN(data
->nb_sectors
* BDRV_SECTOR_SIZE
,
1908 SCSI_WRITE_SAME_MAX
);
1909 data
->iov
.iov_base
= buf
= blk_blockalign(s
->qdev
.conf
.blk
,
1911 qemu_iovec_init_external(&data
->qiov
, &data
->iov
, 1);
1913 for (i
= 0; i
< data
->iov
.iov_len
; i
+= l
) {
1914 l
= MIN(s
->qdev
.blocksize
, data
->iov
.iov_len
- i
);
1915 memcpy(&buf
[i
], inbuf
, l
);
1918 scsi_req_ref(&r
->req
);
1919 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1920 data
->iov
.iov_len
, BLOCK_ACCT_WRITE
);
1921 r
->req
.aiocb
= blk_aio_pwritev(s
->qdev
.conf
.blk
,
1922 data
->sector
<< BDRV_SECTOR_BITS
,
1924 scsi_write_same_complete
, data
);
1927 static void scsi_disk_emulate_write_data(SCSIRequest
*req
)
1929 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1931 if (r
->iov
.iov_len
) {
1932 int buflen
= r
->iov
.iov_len
;
1933 trace_scsi_disk_emulate_write_data(buflen
);
1935 scsi_req_data(&r
->req
, buflen
);
1939 switch (req
->cmd
.buf
[0]) {
1941 case MODE_SELECT_10
:
1942 /* This also clears the sense buffer for REQUEST SENSE. */
1943 scsi_disk_emulate_mode_select(r
, r
->iov
.iov_base
);
1947 scsi_disk_emulate_unmap(r
, r
->iov
.iov_base
);
1953 if (r
->req
.status
== -1) {
1954 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1960 scsi_disk_emulate_write_same(r
, r
->iov
.iov_base
);
1968 static int32_t scsi_disk_emulate_command(SCSIRequest
*req
, uint8_t *buf
)
1970 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1971 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1972 uint64_t nb_sectors
;
1976 switch (req
->cmd
.buf
[0]) {
1985 case ALLOW_MEDIUM_REMOVAL
:
1986 case GET_CONFIGURATION
:
1987 case GET_EVENT_STATUS_NOTIFICATION
:
1988 case MECHANISM_STATUS
:
1993 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
1994 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
2001 * FIXME: we shouldn't return anything bigger than 4k, but the code
2002 * requires the buffer to be as big as req->cmd.xfer in several
2003 * places. So, do not allow CDBs with a very large ALLOCATION
2004 * LENGTH. The real fix would be to modify scsi_read_data and
2005 * dma_buf_read, so that they return data beyond the buflen
2008 if (req
->cmd
.xfer
> 65536) {
2009 goto illegal_request
;
2011 r
->buflen
= MAX(4096, req
->cmd
.xfer
);
2013 if (!r
->iov
.iov_base
) {
2014 r
->iov
.iov_base
= blk_blockalign(s
->qdev
.conf
.blk
, r
->buflen
);
2017 outbuf
= r
->iov
.iov_base
;
2018 memset(outbuf
, 0, r
->buflen
);
2019 switch (req
->cmd
.buf
[0]) {
2020 case TEST_UNIT_READY
:
2021 assert(blk_is_available(s
->qdev
.conf
.blk
));
2024 buflen
= scsi_disk_emulate_inquiry(req
, outbuf
);
2026 goto illegal_request
;
2031 buflen
= scsi_disk_emulate_mode_sense(r
, outbuf
);
2033 goto illegal_request
;
2037 buflen
= scsi_disk_emulate_read_toc(req
, outbuf
);
2039 goto illegal_request
;
2043 if (req
->cmd
.buf
[1] & 1) {
2044 goto illegal_request
;
2048 if (req
->cmd
.buf
[1] & 3) {
2049 goto illegal_request
;
2053 if (req
->cmd
.buf
[1] & 1) {
2054 goto illegal_request
;
2058 if (req
->cmd
.buf
[1] & 3) {
2059 goto illegal_request
;
2063 if (scsi_disk_emulate_start_stop(r
) < 0) {
2067 case ALLOW_MEDIUM_REMOVAL
:
2068 s
->tray_locked
= req
->cmd
.buf
[4] & 1;
2069 blk_lock_medium(s
->qdev
.conf
.blk
, req
->cmd
.buf
[4] & 1);
2071 case READ_CAPACITY_10
:
2072 /* The normal LEN field for this command is zero. */
2073 memset(outbuf
, 0, 8);
2074 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2076 scsi_check_condition(r
, SENSE_CODE(LUN_NOT_READY
));
2079 if ((req
->cmd
.buf
[8] & 1) == 0 && req
->cmd
.lba
) {
2080 goto illegal_request
;
2082 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2083 /* Returned value is the address of the last sector. */
2085 /* Remember the new size for read/write sanity checking. */
2086 s
->qdev
.max_lba
= nb_sectors
;
2087 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2088 if (nb_sectors
> UINT32_MAX
) {
2089 nb_sectors
= UINT32_MAX
;
2091 outbuf
[0] = (nb_sectors
>> 24) & 0xff;
2092 outbuf
[1] = (nb_sectors
>> 16) & 0xff;
2093 outbuf
[2] = (nb_sectors
>> 8) & 0xff;
2094 outbuf
[3] = nb_sectors
& 0xff;
2097 outbuf
[6] = s
->qdev
.blocksize
>> 8;
2101 /* Just return "NO SENSE". */
2102 buflen
= scsi_convert_sense(NULL
, 0, outbuf
, r
->buflen
,
2103 (req
->cmd
.buf
[1] & 1) == 0);
2105 goto illegal_request
;
2108 case MECHANISM_STATUS
:
2109 buflen
= scsi_emulate_mechanism_status(s
, outbuf
);
2111 goto illegal_request
;
2114 case GET_CONFIGURATION
:
2115 buflen
= scsi_get_configuration(s
, outbuf
);
2117 goto illegal_request
;
2120 case GET_EVENT_STATUS_NOTIFICATION
:
2121 buflen
= scsi_get_event_status_notification(s
, r
, outbuf
);
2123 goto illegal_request
;
2126 case READ_DISC_INFORMATION
:
2127 buflen
= scsi_read_disc_information(s
, r
, outbuf
);
2129 goto illegal_request
;
2132 case READ_DVD_STRUCTURE
:
2133 buflen
= scsi_read_dvd_structure(s
, r
, outbuf
);
2135 goto illegal_request
;
2138 case SERVICE_ACTION_IN_16
:
2139 /* Service Action In subcommands. */
2140 if ((req
->cmd
.buf
[1] & 31) == SAI_READ_CAPACITY_16
) {
2141 trace_scsi_disk_emulate_command_SAI_16();
2142 memset(outbuf
, 0, req
->cmd
.xfer
);
2143 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2145 scsi_check_condition(r
, SENSE_CODE(LUN_NOT_READY
));
2148 if ((req
->cmd
.buf
[14] & 1) == 0 && req
->cmd
.lba
) {
2149 goto illegal_request
;
2151 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2152 /* Returned value is the address of the last sector. */
2154 /* Remember the new size for read/write sanity checking. */
2155 s
->qdev
.max_lba
= nb_sectors
;
2156 outbuf
[0] = (nb_sectors
>> 56) & 0xff;
2157 outbuf
[1] = (nb_sectors
>> 48) & 0xff;
2158 outbuf
[2] = (nb_sectors
>> 40) & 0xff;
2159 outbuf
[3] = (nb_sectors
>> 32) & 0xff;
2160 outbuf
[4] = (nb_sectors
>> 24) & 0xff;
2161 outbuf
[5] = (nb_sectors
>> 16) & 0xff;
2162 outbuf
[6] = (nb_sectors
>> 8) & 0xff;
2163 outbuf
[7] = nb_sectors
& 0xff;
2166 outbuf
[10] = s
->qdev
.blocksize
>> 8;
2169 outbuf
[13] = get_physical_block_exp(&s
->qdev
.conf
);
2171 /* set TPE bit if the format supports discard */
2172 if (s
->qdev
.conf
.discard_granularity
) {
2176 /* Protection, exponent and lowest lba field left blank. */
2179 trace_scsi_disk_emulate_command_SAI_unsupported();
2180 goto illegal_request
;
2181 case SYNCHRONIZE_CACHE
:
2182 /* The request is used as the AIO opaque value, so add a ref. */
2183 scsi_req_ref(&r
->req
);
2184 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
2186 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
2189 trace_scsi_disk_emulate_command_SEEK_10(r
->req
.cmd
.lba
);
2190 if (r
->req
.cmd
.lba
> s
->qdev
.max_lba
) {
2195 trace_scsi_disk_emulate_command_MODE_SELECT(r
->req
.cmd
.xfer
);
2197 case MODE_SELECT_10
:
2198 trace_scsi_disk_emulate_command_MODE_SELECT_10(r
->req
.cmd
.xfer
);
2201 trace_scsi_disk_emulate_command_UNMAP(r
->req
.cmd
.xfer
);
2206 trace_scsi_disk_emulate_command_VERIFY((req
->cmd
.buf
[1] >> 1) & 3);
2207 if (req
->cmd
.buf
[1] & 6) {
2208 goto illegal_request
;
2213 trace_scsi_disk_emulate_command_WRITE_SAME(
2214 req
->cmd
.buf
[0] == WRITE_SAME_10
? 10 : 16, r
->req
.cmd
.xfer
);
2217 trace_scsi_disk_emulate_command_FORMAT_UNIT(r
->req
.cmd
.xfer
);
2220 trace_scsi_disk_emulate_command_UNKNOWN(buf
[0],
2221 scsi_command_name(buf
[0]));
2222 scsi_check_condition(r
, SENSE_CODE(INVALID_OPCODE
));
2225 assert(!r
->req
.aiocb
);
2226 r
->iov
.iov_len
= MIN(r
->buflen
, req
->cmd
.xfer
);
2227 if (r
->iov
.iov_len
== 0) {
2228 scsi_req_complete(&r
->req
, GOOD
);
2230 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
2231 assert(r
->iov
.iov_len
== req
->cmd
.xfer
);
2232 return -r
->iov
.iov_len
;
2234 return r
->iov
.iov_len
;
2238 if (r
->req
.status
== -1) {
2239 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
2244 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
2248 /* Execute a scsi command. Returns the length of the data expected by the
2249 command. This will be Positive for data transfers from the device
2250 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2251 and zero if the command does not transfer any data. */
2253 static int32_t scsi_disk_dma_command(SCSIRequest
*req
, uint8_t *buf
)
2255 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
2256 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
2257 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
2263 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
2264 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
2268 len
= scsi_data_cdb_xfer(r
->req
.cmd
.buf
);
2274 trace_scsi_disk_dma_command_READ(r
->req
.cmd
.lba
, len
);
2275 /* Protection information is not supported. For SCSI versions 2 and
2276 * older (as determined by snooping the guest's INQUIRY commands),
2277 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2279 if (s
->qdev
.scsi_version
> 2 && (r
->req
.cmd
.buf
[1] & 0xe0)) {
2280 goto illegal_request
;
2282 if (!check_lba_range(s
, r
->req
.cmd
.lba
, len
)) {
2285 r
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2286 r
->sector_count
= len
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2292 case WRITE_VERIFY_10
:
2293 case WRITE_VERIFY_12
:
2294 case WRITE_VERIFY_16
:
2295 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
2296 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
2299 trace_scsi_disk_dma_command_WRITE(
2300 (command
& 0xe) == 0xe ? "And Verify " : "",
2301 r
->req
.cmd
.lba
, len
);
2306 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2307 * As far as DMA is concerned, we can treat it the same as a write;
2308 * scsi_block_do_sgio will send VERIFY commands.
2310 if (s
->qdev
.scsi_version
> 2 && (r
->req
.cmd
.buf
[1] & 0xe0)) {
2311 goto illegal_request
;
2313 if (!check_lba_range(s
, r
->req
.cmd
.lba
, len
)) {
2316 r
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2317 r
->sector_count
= len
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2322 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
2325 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
2328 r
->need_fua_emulation
= sdc
->need_fua_emulation(&r
->req
.cmd
);
2329 if (r
->sector_count
== 0) {
2330 scsi_req_complete(&r
->req
, GOOD
);
2332 assert(r
->iov
.iov_len
== 0);
2333 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
2334 return -r
->sector_count
* BDRV_SECTOR_SIZE
;
2336 return r
->sector_count
* BDRV_SECTOR_SIZE
;
2340 static void scsi_disk_reset(DeviceState
*dev
)
2342 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
.qdev
, dev
);
2343 uint64_t nb_sectors
;
2346 scsi_device_purge_requests(&s
->qdev
, SENSE_CODE(RESET
));
2348 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2349 aio_context_acquire(ctx
);
2350 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2351 aio_context_release(ctx
);
2353 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2357 s
->qdev
.max_lba
= nb_sectors
;
2358 /* reset tray statuses */
2362 s
->qdev
.scsi_version
= s
->qdev
.default_scsi_version
;
2365 static void scsi_disk_resize_cb(void *opaque
)
2367 SCSIDiskState
*s
= opaque
;
2369 /* SPC lists this sense code as available only for
2370 * direct-access devices.
2372 if (s
->qdev
.type
== TYPE_DISK
) {
2373 scsi_device_report_change(&s
->qdev
, SENSE_CODE(CAPACITY_CHANGED
));
2377 static void scsi_cd_change_media_cb(void *opaque
, bool load
, Error
**errp
)
2379 SCSIDiskState
*s
= opaque
;
2382 * When a CD gets changed, we have to report an ejected state and
2383 * then a loaded state to guests so that they detect tray
2384 * open/close and media change events. Guests that do not use
2385 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2386 * states rely on this behavior.
2388 * media_changed governs the state machine used for unit attention
2389 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2391 s
->media_changed
= load
;
2392 s
->tray_open
= !load
;
2393 scsi_device_set_ua(&s
->qdev
, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM
));
2394 s
->media_event
= true;
2395 s
->eject_request
= false;
2398 static void scsi_cd_eject_request_cb(void *opaque
, bool force
)
2400 SCSIDiskState
*s
= opaque
;
2402 s
->eject_request
= true;
2404 s
->tray_locked
= false;
2408 static bool scsi_cd_is_tray_open(void *opaque
)
2410 return ((SCSIDiskState
*)opaque
)->tray_open
;
2413 static bool scsi_cd_is_medium_locked(void *opaque
)
2415 return ((SCSIDiskState
*)opaque
)->tray_locked
;
2418 static const BlockDevOps scsi_disk_removable_block_ops
= {
2419 .change_media_cb
= scsi_cd_change_media_cb
,
2420 .eject_request_cb
= scsi_cd_eject_request_cb
,
2421 .is_tray_open
= scsi_cd_is_tray_open
,
2422 .is_medium_locked
= scsi_cd_is_medium_locked
,
2424 .resize_cb
= scsi_disk_resize_cb
,
2427 static const BlockDevOps scsi_disk_block_ops
= {
2428 .resize_cb
= scsi_disk_resize_cb
,
2431 static void scsi_disk_unit_attention_reported(SCSIDevice
*dev
)
2433 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2434 if (s
->media_changed
) {
2435 s
->media_changed
= false;
2436 scsi_device_set_ua(&s
->qdev
, SENSE_CODE(MEDIUM_CHANGED
));
2440 static void scsi_realize(SCSIDevice
*dev
, Error
**errp
)
2442 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2445 if (!s
->qdev
.conf
.blk
) {
2446 error_setg(errp
, "drive property not set");
2450 if (!(s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) &&
2451 !blk_is_inserted(s
->qdev
.conf
.blk
)) {
2452 error_setg(errp
, "Device needs media, but drive is empty");
2456 if (!blkconf_blocksizes(&s
->qdev
.conf
, errp
)) {
2460 if (blk_get_aio_context(s
->qdev
.conf
.blk
) != qemu_get_aio_context() &&
2461 !s
->qdev
.hba_supports_iothread
)
2463 error_setg(errp
, "HBA does not support iothreads");
2467 if (dev
->type
== TYPE_DISK
) {
2468 if (!blkconf_geometry(&dev
->conf
, NULL
, 65535, 255, 255, errp
)) {
2473 read_only
= !blk_supports_write_perm(s
->qdev
.conf
.blk
);
2474 if (dev
->type
== TYPE_ROM
) {
2478 if (!blkconf_apply_backend_options(&dev
->conf
, read_only
,
2479 dev
->type
== TYPE_DISK
, errp
)) {
2483 if (s
->qdev
.conf
.discard_granularity
== -1) {
2484 s
->qdev
.conf
.discard_granularity
=
2485 MAX(s
->qdev
.conf
.logical_block_size
, DEFAULT_DISCARD_GRANULARITY
);
2489 s
->version
= g_strdup(qemu_hw_version());
2492 s
->vendor
= g_strdup("QEMU");
2494 if (!s
->device_id
) {
2496 s
->device_id
= g_strdup_printf("%.20s", s
->serial
);
2498 const char *str
= blk_name(s
->qdev
.conf
.blk
);
2500 s
->device_id
= g_strdup(str
);
2505 if (blk_is_sg(s
->qdev
.conf
.blk
)) {
2506 error_setg(errp
, "unwanted /dev/sg*");
2510 if ((s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) &&
2511 !(s
->features
& (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS
))) {
2512 blk_set_dev_ops(s
->qdev
.conf
.blk
, &scsi_disk_removable_block_ops
, s
);
2514 blk_set_dev_ops(s
->qdev
.conf
.blk
, &scsi_disk_block_ops
, s
);
2517 blk_iostatus_enable(s
->qdev
.conf
.blk
);
2519 add_boot_device_lchs(&dev
->qdev
, NULL
,
2525 static void scsi_unrealize(SCSIDevice
*dev
)
2527 del_boot_device_lchs(&dev
->qdev
, NULL
);
2530 static void scsi_hd_realize(SCSIDevice
*dev
, Error
**errp
)
2532 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2533 AioContext
*ctx
= NULL
;
2534 /* can happen for devices without drive. The error message for missing
2535 * backend will be issued in scsi_realize
2537 if (s
->qdev
.conf
.blk
) {
2538 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2539 aio_context_acquire(ctx
);
2540 if (!blkconf_blocksizes(&s
->qdev
.conf
, errp
)) {
2544 s
->qdev
.blocksize
= s
->qdev
.conf
.logical_block_size
;
2545 s
->qdev
.type
= TYPE_DISK
;
2547 s
->product
= g_strdup("QEMU HARDDISK");
2549 scsi_realize(&s
->qdev
, errp
);
2552 aio_context_release(ctx
);
2556 static void scsi_cd_realize(SCSIDevice
*dev
, Error
**errp
)
2558 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2561 uint32_t blocksize
= 2048;
2563 if (!dev
->conf
.blk
) {
2564 /* Anonymous BlockBackend for an empty drive. As we put it into
2565 * dev->conf, qdev takes care of detaching on unplug. */
2566 dev
->conf
.blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
2567 ret
= blk_attach_dev(dev
->conf
.blk
, &dev
->qdev
);
2571 if (dev
->conf
.physical_block_size
!= 0) {
2572 blocksize
= dev
->conf
.physical_block_size
;
2575 ctx
= blk_get_aio_context(dev
->conf
.blk
);
2576 aio_context_acquire(ctx
);
2577 s
->qdev
.blocksize
= blocksize
;
2578 s
->qdev
.type
= TYPE_ROM
;
2579 s
->features
|= 1 << SCSI_DISK_F_REMOVABLE
;
2581 s
->product
= g_strdup("QEMU CD-ROM");
2583 scsi_realize(&s
->qdev
, errp
);
2584 aio_context_release(ctx
);
2588 static const SCSIReqOps scsi_disk_emulate_reqops
= {
2589 .size
= sizeof(SCSIDiskReq
),
2590 .free_req
= scsi_free_request
,
2591 .send_command
= scsi_disk_emulate_command
,
2592 .read_data
= scsi_disk_emulate_read_data
,
2593 .write_data
= scsi_disk_emulate_write_data
,
2594 .get_buf
= scsi_get_buf
,
2597 static const SCSIReqOps scsi_disk_dma_reqops
= {
2598 .size
= sizeof(SCSIDiskReq
),
2599 .free_req
= scsi_free_request
,
2600 .send_command
= scsi_disk_dma_command
,
2601 .read_data
= scsi_read_data
,
2602 .write_data
= scsi_write_data
,
2603 .get_buf
= scsi_get_buf
,
2604 .load_request
= scsi_disk_load_request
,
2605 .save_request
= scsi_disk_save_request
,
2608 static const SCSIReqOps
*const scsi_disk_reqops_dispatch
[256] = {
2609 [TEST_UNIT_READY
] = &scsi_disk_emulate_reqops
,
2610 [INQUIRY
] = &scsi_disk_emulate_reqops
,
2611 [MODE_SENSE
] = &scsi_disk_emulate_reqops
,
2612 [MODE_SENSE_10
] = &scsi_disk_emulate_reqops
,
2613 [START_STOP
] = &scsi_disk_emulate_reqops
,
2614 [ALLOW_MEDIUM_REMOVAL
] = &scsi_disk_emulate_reqops
,
2615 [READ_CAPACITY_10
] = &scsi_disk_emulate_reqops
,
2616 [READ_TOC
] = &scsi_disk_emulate_reqops
,
2617 [READ_DVD_STRUCTURE
] = &scsi_disk_emulate_reqops
,
2618 [READ_DISC_INFORMATION
] = &scsi_disk_emulate_reqops
,
2619 [GET_CONFIGURATION
] = &scsi_disk_emulate_reqops
,
2620 [GET_EVENT_STATUS_NOTIFICATION
] = &scsi_disk_emulate_reqops
,
2621 [MECHANISM_STATUS
] = &scsi_disk_emulate_reqops
,
2622 [SERVICE_ACTION_IN_16
] = &scsi_disk_emulate_reqops
,
2623 [REQUEST_SENSE
] = &scsi_disk_emulate_reqops
,
2624 [SYNCHRONIZE_CACHE
] = &scsi_disk_emulate_reqops
,
2625 [SEEK_10
] = &scsi_disk_emulate_reqops
,
2626 [MODE_SELECT
] = &scsi_disk_emulate_reqops
,
2627 [MODE_SELECT_10
] = &scsi_disk_emulate_reqops
,
2628 [UNMAP
] = &scsi_disk_emulate_reqops
,
2629 [WRITE_SAME_10
] = &scsi_disk_emulate_reqops
,
2630 [WRITE_SAME_16
] = &scsi_disk_emulate_reqops
,
2631 [VERIFY_10
] = &scsi_disk_emulate_reqops
,
2632 [VERIFY_12
] = &scsi_disk_emulate_reqops
,
2633 [VERIFY_16
] = &scsi_disk_emulate_reqops
,
2634 [FORMAT_UNIT
] = &scsi_disk_emulate_reqops
,
2636 [READ_6
] = &scsi_disk_dma_reqops
,
2637 [READ_10
] = &scsi_disk_dma_reqops
,
2638 [READ_12
] = &scsi_disk_dma_reqops
,
2639 [READ_16
] = &scsi_disk_dma_reqops
,
2640 [WRITE_6
] = &scsi_disk_dma_reqops
,
2641 [WRITE_10
] = &scsi_disk_dma_reqops
,
2642 [WRITE_12
] = &scsi_disk_dma_reqops
,
2643 [WRITE_16
] = &scsi_disk_dma_reqops
,
2644 [WRITE_VERIFY_10
] = &scsi_disk_dma_reqops
,
2645 [WRITE_VERIFY_12
] = &scsi_disk_dma_reqops
,
2646 [WRITE_VERIFY_16
] = &scsi_disk_dma_reqops
,
2649 static void scsi_disk_new_request_dump(uint32_t lun
, uint32_t tag
, uint8_t *buf
)
2652 int len
= scsi_cdb_length(buf
);
2653 char *line_buffer
, *p
;
2655 assert(len
> 0 && len
<= 16);
2656 line_buffer
= g_malloc(len
* 5 + 1);
2658 for (i
= 0, p
= line_buffer
; i
< len
; i
++) {
2659 p
+= sprintf(p
, " 0x%02x", buf
[i
]);
2661 trace_scsi_disk_new_request(lun
, tag
, line_buffer
);
2663 g_free(line_buffer
);
2666 static SCSIRequest
*scsi_new_request(SCSIDevice
*d
, uint32_t tag
, uint32_t lun
,
2667 uint8_t *buf
, void *hba_private
)
2669 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
2671 const SCSIReqOps
*ops
;
2675 ops
= scsi_disk_reqops_dispatch
[command
];
2677 ops
= &scsi_disk_emulate_reqops
;
2679 req
= scsi_req_alloc(ops
, &s
->qdev
, tag
, lun
, hba_private
);
2681 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST
)) {
2682 scsi_disk_new_request_dump(lun
, tag
, buf
);
2689 static int get_device_type(SCSIDiskState
*s
)
2695 memset(cmd
, 0, sizeof(cmd
));
2696 memset(buf
, 0, sizeof(buf
));
2698 cmd
[4] = sizeof(buf
);
2700 ret
= scsi_SG_IO_FROM_DEV(s
->qdev
.conf
.blk
, cmd
, sizeof(cmd
),
2701 buf
, sizeof(buf
), s
->qdev
.io_timeout
);
2705 s
->qdev
.type
= buf
[0];
2706 if (buf
[1] & 0x80) {
2707 s
->features
|= 1 << SCSI_DISK_F_REMOVABLE
;
2712 static void scsi_block_realize(SCSIDevice
*dev
, Error
**errp
)
2714 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2719 if (!s
->qdev
.conf
.blk
) {
2720 error_setg(errp
, "drive property not set");
2724 if (s
->rotation_rate
) {
2725 error_report_once("rotation_rate is specified for scsi-block but is "
2726 "not implemented. This option is deprecated and will "
2727 "be removed in a future version");
2730 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2731 aio_context_acquire(ctx
);
2733 /* check we are using a driver managing SG_IO (version 3 and after) */
2734 rc
= blk_ioctl(s
->qdev
.conf
.blk
, SG_GET_VERSION_NUM
, &sg_version
);
2736 error_setg_errno(errp
, -rc
, "cannot get SG_IO version number");
2738 error_append_hint(errp
, "Is this a SCSI device?\n");
2742 if (sg_version
< 30000) {
2743 error_setg(errp
, "scsi generic interface too old");
2747 /* get device type from INQUIRY data */
2748 rc
= get_device_type(s
);
2750 error_setg(errp
, "INQUIRY failed");
2754 /* Make a guess for the block size, we'll fix it when the guest sends.
2755 * READ CAPACITY. If they don't, they likely would assume these sizes
2756 * anyway. (TODO: check in /sys).
2758 if (s
->qdev
.type
== TYPE_ROM
|| s
->qdev
.type
== TYPE_WORM
) {
2759 s
->qdev
.blocksize
= 2048;
2761 s
->qdev
.blocksize
= 512;
2764 /* Makes the scsi-block device not removable by using HMP and QMP eject
2767 s
->features
|= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS
);
2769 scsi_realize(&s
->qdev
, errp
);
2770 scsi_generic_read_device_inquiry(&s
->qdev
);
2773 aio_context_release(ctx
);
2776 typedef struct SCSIBlockReq
{
2778 sg_io_hdr_t io_header
;
2780 /* Selected bytes of the original CDB, copied into our own CDB. */
2781 uint8_t cmd
, cdb1
, group_number
;
2783 /* CDB passed to SG_IO. */
2785 BlockCompletionFunc
*cb
;
2789 static void scsi_block_sgio_complete(void *opaque
, int ret
)
2791 SCSIBlockReq
*req
= (SCSIBlockReq
*)opaque
;
2792 SCSIDiskReq
*r
= &req
->req
;
2793 SCSIDevice
*s
= r
->req
.dev
;
2794 sg_io_hdr_t
*io_hdr
= &req
->io_header
;
2797 if (io_hdr
->host_status
!= SCSI_HOST_OK
) {
2798 scsi_req_complete_failed(&r
->req
, io_hdr
->host_status
);
2799 scsi_req_unref(&r
->req
);
2803 if (io_hdr
->driver_status
& SG_ERR_DRIVER_TIMEOUT
) {
2806 ret
= io_hdr
->status
;
2810 aio_context_acquire(blk_get_aio_context(s
->conf
.blk
));
2811 if (scsi_handle_rw_error(r
, ret
, true)) {
2812 aio_context_release(blk_get_aio_context(s
->conf
.blk
));
2813 scsi_req_unref(&r
->req
);
2816 aio_context_release(blk_get_aio_context(s
->conf
.blk
));
2823 req
->cb(req
->cb_opaque
, ret
);
2826 static BlockAIOCB
*scsi_block_do_sgio(SCSIBlockReq
*req
,
2827 int64_t offset
, QEMUIOVector
*iov
,
2829 BlockCompletionFunc
*cb
, void *opaque
)
2831 sg_io_hdr_t
*io_header
= &req
->io_header
;
2832 SCSIDiskReq
*r
= &req
->req
;
2833 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
2834 int nb_logical_blocks
;
2838 /* This is not supported yet. It can only happen if the guest does
2839 * reads and writes that are not aligned to one logical sectors
2840 * _and_ cover multiple MemoryRegions.
2842 assert(offset
% s
->qdev
.blocksize
== 0);
2843 assert(iov
->size
% s
->qdev
.blocksize
== 0);
2845 io_header
->interface_id
= 'S';
2847 /* The data transfer comes from the QEMUIOVector. */
2848 io_header
->dxfer_direction
= direction
;
2849 io_header
->dxfer_len
= iov
->size
;
2850 io_header
->dxferp
= (void *)iov
->iov
;
2851 io_header
->iovec_count
= iov
->niov
;
2852 assert(io_header
->iovec_count
== iov
->niov
); /* no overflow! */
2854 /* Build a new CDB with the LBA and length patched in, in case
2855 * DMA helpers split the transfer in multiple segments. Do not
2856 * build a CDB smaller than what the guest wanted, and only build
2857 * a larger one if strictly necessary.
2859 io_header
->cmdp
= req
->cdb
;
2860 lba
= offset
/ s
->qdev
.blocksize
;
2861 nb_logical_blocks
= io_header
->dxfer_len
/ s
->qdev
.blocksize
;
2863 if ((req
->cmd
>> 5) == 0 && lba
<= 0x1ffff) {
2865 stl_be_p(&req
->cdb
[0], lba
| (req
->cmd
<< 24));
2866 req
->cdb
[4] = nb_logical_blocks
;
2868 io_header
->cmd_len
= 6;
2869 } else if ((req
->cmd
>> 5) <= 1 && lba
<= 0xffffffffULL
) {
2871 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0x20;
2872 req
->cdb
[1] = req
->cdb1
;
2873 stl_be_p(&req
->cdb
[2], lba
);
2874 req
->cdb
[6] = req
->group_number
;
2875 stw_be_p(&req
->cdb
[7], nb_logical_blocks
);
2877 io_header
->cmd_len
= 10;
2878 } else if ((req
->cmd
>> 5) != 4 && lba
<= 0xffffffffULL
) {
2880 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0xA0;
2881 req
->cdb
[1] = req
->cdb1
;
2882 stl_be_p(&req
->cdb
[2], lba
);
2883 stl_be_p(&req
->cdb
[6], nb_logical_blocks
);
2884 req
->cdb
[10] = req
->group_number
;
2886 io_header
->cmd_len
= 12;
2889 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0x80;
2890 req
->cdb
[1] = req
->cdb1
;
2891 stq_be_p(&req
->cdb
[2], lba
);
2892 stl_be_p(&req
->cdb
[10], nb_logical_blocks
);
2893 req
->cdb
[14] = req
->group_number
;
2895 io_header
->cmd_len
= 16;
2898 /* The rest is as in scsi-generic.c. */
2899 io_header
->mx_sb_len
= sizeof(r
->req
.sense
);
2900 io_header
->sbp
= r
->req
.sense
;
2901 io_header
->timeout
= s
->qdev
.io_timeout
* 1000;
2902 io_header
->usr_ptr
= r
;
2903 io_header
->flags
|= SG_FLAG_DIRECT_IO
;
2905 req
->cb_opaque
= opaque
;
2906 trace_scsi_disk_aio_sgio_command(r
->req
.tag
, req
->cdb
[0], lba
,
2907 nb_logical_blocks
, io_header
->timeout
);
2908 aiocb
= blk_aio_ioctl(s
->qdev
.conf
.blk
, SG_IO
, io_header
, scsi_block_sgio_complete
, req
);
2909 assert(aiocb
!= NULL
);
2913 static bool scsi_block_no_fua(SCSICommand
*cmd
)
2918 static BlockAIOCB
*scsi_block_dma_readv(int64_t offset
,
2920 BlockCompletionFunc
*cb
, void *cb_opaque
,
2923 SCSIBlockReq
*r
= opaque
;
2924 return scsi_block_do_sgio(r
, offset
, iov
,
2925 SG_DXFER_FROM_DEV
, cb
, cb_opaque
);
2928 static BlockAIOCB
*scsi_block_dma_writev(int64_t offset
,
2930 BlockCompletionFunc
*cb
, void *cb_opaque
,
2933 SCSIBlockReq
*r
= opaque
;
2934 return scsi_block_do_sgio(r
, offset
, iov
,
2935 SG_DXFER_TO_DEV
, cb
, cb_opaque
);
2938 static bool scsi_block_is_passthrough(SCSIDiskState
*s
, uint8_t *buf
)
2944 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2945 * for the number of logical blocks specified in the length
2946 * field). For other modes, do not use scatter/gather operation.
2948 if ((buf
[1] & 6) == 2) {
2961 case WRITE_VERIFY_10
:
2962 case WRITE_VERIFY_12
:
2963 case WRITE_VERIFY_16
:
2964 /* MMC writing cannot be done via DMA helpers, because it sometimes
2965 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2966 * We might use scsi_block_dma_reqops as long as no writing commands are
2967 * seen, but performance usually isn't paramount on optical media. So,
2968 * just make scsi-block operate the same as scsi-generic for them.
2970 if (s
->qdev
.type
!= TYPE_ROM
) {
2983 static int32_t scsi_block_dma_command(SCSIRequest
*req
, uint8_t *buf
)
2985 SCSIBlockReq
*r
= (SCSIBlockReq
*)req
;
2986 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
2988 r
->cmd
= req
->cmd
.buf
[0];
2989 switch (r
->cmd
>> 5) {
2992 r
->cdb1
= r
->group_number
= 0;
2996 r
->cdb1
= req
->cmd
.buf
[1];
2997 r
->group_number
= req
->cmd
.buf
[6];
3001 r
->cdb1
= req
->cmd
.buf
[1];
3002 r
->group_number
= req
->cmd
.buf
[10];
3006 r
->cdb1
= req
->cmd
.buf
[1];
3007 r
->group_number
= req
->cmd
.buf
[14];
3013 /* Protection information is not supported. For SCSI versions 2 and
3014 * older (as determined by snooping the guest's INQUIRY commands),
3015 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
3017 if (s
->qdev
.scsi_version
> 2 && (req
->cmd
.buf
[1] & 0xe0)) {
3018 scsi_check_condition(&r
->req
, SENSE_CODE(INVALID_FIELD
));
3022 return scsi_disk_dma_command(req
, buf
);
3025 static const SCSIReqOps scsi_block_dma_reqops
= {
3026 .size
= sizeof(SCSIBlockReq
),
3027 .free_req
= scsi_free_request
,
3028 .send_command
= scsi_block_dma_command
,
3029 .read_data
= scsi_read_data
,
3030 .write_data
= scsi_write_data
,
3031 .get_buf
= scsi_get_buf
,
3032 .load_request
= scsi_disk_load_request
,
3033 .save_request
= scsi_disk_save_request
,
3036 static SCSIRequest
*scsi_block_new_request(SCSIDevice
*d
, uint32_t tag
,
3037 uint32_t lun
, uint8_t *buf
,
3040 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
3042 if (scsi_block_is_passthrough(s
, buf
)) {
3043 return scsi_req_alloc(&scsi_generic_req_ops
, &s
->qdev
, tag
, lun
,
3046 return scsi_req_alloc(&scsi_block_dma_reqops
, &s
->qdev
, tag
, lun
,
3051 static int scsi_block_parse_cdb(SCSIDevice
*d
, SCSICommand
*cmd
,
3052 uint8_t *buf
, size_t buf_len
,
3055 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
3057 if (scsi_block_is_passthrough(s
, buf
)) {
3058 return scsi_bus_parse_cdb(&s
->qdev
, cmd
, buf
, buf_len
, hba_private
);
3060 return scsi_req_parse_cdb(&s
->qdev
, cmd
, buf
, buf_len
);
3064 static void scsi_block_update_sense(SCSIRequest
*req
)
3066 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
3067 SCSIBlockReq
*br
= DO_UPCAST(SCSIBlockReq
, req
, r
);
3068 r
->req
.sense_len
= MIN(br
->io_header
.sb_len_wr
, sizeof(r
->req
.sense
));
3073 BlockAIOCB
*scsi_dma_readv(int64_t offset
, QEMUIOVector
*iov
,
3074 BlockCompletionFunc
*cb
, void *cb_opaque
,
3077 SCSIDiskReq
*r
= opaque
;
3078 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
3079 return blk_aio_preadv(s
->qdev
.conf
.blk
, offset
, iov
, 0, cb
, cb_opaque
);
3083 BlockAIOCB
*scsi_dma_writev(int64_t offset
, QEMUIOVector
*iov
,
3084 BlockCompletionFunc
*cb
, void *cb_opaque
,
3087 SCSIDiskReq
*r
= opaque
;
3088 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
3089 return blk_aio_pwritev(s
->qdev
.conf
.blk
, offset
, iov
, 0, cb
, cb_opaque
);
3092 static void scsi_disk_base_class_initfn(ObjectClass
*klass
, void *data
)
3094 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3095 SCSIDiskClass
*sdc
= SCSI_DISK_BASE_CLASS(klass
);
3097 dc
->fw_name
= "disk";
3098 dc
->reset
= scsi_disk_reset
;
3099 sdc
->dma_readv
= scsi_dma_readv
;
3100 sdc
->dma_writev
= scsi_dma_writev
;
3101 sdc
->need_fua_emulation
= scsi_is_cmd_fua
;
3104 static const TypeInfo scsi_disk_base_info
= {
3105 .name
= TYPE_SCSI_DISK_BASE
,
3106 .parent
= TYPE_SCSI_DEVICE
,
3107 .class_init
= scsi_disk_base_class_initfn
,
3108 .instance_size
= sizeof(SCSIDiskState
),
3109 .class_size
= sizeof(SCSIDiskClass
),
3113 #define DEFINE_SCSI_DISK_PROPERTIES() \
3114 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
3115 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
3116 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3117 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3118 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3119 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3120 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3121 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3124 static Property scsi_hd_properties
[] = {
3125 DEFINE_SCSI_DISK_PROPERTIES(),
3126 DEFINE_PROP_BIT("removable", SCSIDiskState
, features
,
3127 SCSI_DISK_F_REMOVABLE
, false),
3128 DEFINE_PROP_BIT("dpofua", SCSIDiskState
, features
,
3129 SCSI_DISK_F_DPOFUA
, false),
3130 DEFINE_PROP_UINT64("wwn", SCSIDiskState
, qdev
.wwn
, 0),
3131 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState
, qdev
.port_wwn
, 0),
3132 DEFINE_PROP_UINT16("port_index", SCSIDiskState
, port_index
, 0),
3133 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState
, max_unmap_size
,
3134 DEFAULT_MAX_UNMAP_SIZE
),
3135 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3136 DEFAULT_MAX_IO_SIZE
),
3137 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState
, rotation_rate
, 0),
3138 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3140 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState
,
3141 quirks
, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
,
3143 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState
, qdev
.conf
),
3144 DEFINE_PROP_END_OF_LIST(),
3147 static const VMStateDescription vmstate_scsi_disk_state
= {
3148 .name
= "scsi-disk",
3150 .minimum_version_id
= 1,
3151 .fields
= (VMStateField
[]) {
3152 VMSTATE_SCSI_DEVICE(qdev
, SCSIDiskState
),
3153 VMSTATE_BOOL(media_changed
, SCSIDiskState
),
3154 VMSTATE_BOOL(media_event
, SCSIDiskState
),
3155 VMSTATE_BOOL(eject_request
, SCSIDiskState
),
3156 VMSTATE_BOOL(tray_open
, SCSIDiskState
),
3157 VMSTATE_BOOL(tray_locked
, SCSIDiskState
),
3158 VMSTATE_END_OF_LIST()
3162 static void scsi_hd_class_initfn(ObjectClass
*klass
, void *data
)
3164 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3165 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3167 sc
->realize
= scsi_hd_realize
;
3168 sc
->unrealize
= scsi_unrealize
;
3169 sc
->alloc_req
= scsi_new_request
;
3170 sc
->unit_attention_reported
= scsi_disk_unit_attention_reported
;
3171 dc
->desc
= "virtual SCSI disk";
3172 device_class_set_props(dc
, scsi_hd_properties
);
3173 dc
->vmsd
= &vmstate_scsi_disk_state
;
3176 static const TypeInfo scsi_hd_info
= {
3178 .parent
= TYPE_SCSI_DISK_BASE
,
3179 .class_init
= scsi_hd_class_initfn
,
3182 static Property scsi_cd_properties
[] = {
3183 DEFINE_SCSI_DISK_PROPERTIES(),
3184 DEFINE_PROP_UINT64("wwn", SCSIDiskState
, qdev
.wwn
, 0),
3185 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState
, qdev
.port_wwn
, 0),
3186 DEFINE_PROP_UINT16("port_index", SCSIDiskState
, port_index
, 0),
3187 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3188 DEFAULT_MAX_IO_SIZE
),
3189 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3191 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState
, quirks
,
3192 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
, 0),
3193 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState
, quirks
,
3194 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD
, 0),
3195 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState
,
3196 quirks
, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
,
3198 DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState
, quirks
,
3199 SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED
, 0),
3200 DEFINE_PROP_END_OF_LIST(),
3203 static void scsi_cd_class_initfn(ObjectClass
*klass
, void *data
)
3205 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3206 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3208 sc
->realize
= scsi_cd_realize
;
3209 sc
->alloc_req
= scsi_new_request
;
3210 sc
->unit_attention_reported
= scsi_disk_unit_attention_reported
;
3211 dc
->desc
= "virtual SCSI CD-ROM";
3212 device_class_set_props(dc
, scsi_cd_properties
);
3213 dc
->vmsd
= &vmstate_scsi_disk_state
;
3216 static const TypeInfo scsi_cd_info
= {
3218 .parent
= TYPE_SCSI_DISK_BASE
,
3219 .class_init
= scsi_cd_class_initfn
,
3223 static Property scsi_block_properties
[] = {
3224 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState
, qdev
.conf
),
3225 DEFINE_PROP_DRIVE("drive", SCSIDiskState
, qdev
.conf
.blk
),
3226 DEFINE_PROP_BOOL("share-rw", SCSIDiskState
, qdev
.conf
.share_rw
, false),
3227 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState
, rotation_rate
, 0),
3228 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState
, max_unmap_size
,
3229 DEFAULT_MAX_UNMAP_SIZE
),
3230 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3231 DEFAULT_MAX_IO_SIZE
),
3232 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3234 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState
, qdev
.io_timeout
,
3235 DEFAULT_IO_TIMEOUT
),
3236 DEFINE_PROP_END_OF_LIST(),
3239 static void scsi_block_class_initfn(ObjectClass
*klass
, void *data
)
3241 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3242 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3243 SCSIDiskClass
*sdc
= SCSI_DISK_BASE_CLASS(klass
);
3245 sc
->realize
= scsi_block_realize
;
3246 sc
->alloc_req
= scsi_block_new_request
;
3247 sc
->parse_cdb
= scsi_block_parse_cdb
;
3248 sdc
->dma_readv
= scsi_block_dma_readv
;
3249 sdc
->dma_writev
= scsi_block_dma_writev
;
3250 sdc
->update_sense
= scsi_block_update_sense
;
3251 sdc
->need_fua_emulation
= scsi_block_no_fua
;
3252 dc
->desc
= "SCSI block device passthrough";
3253 device_class_set_props(dc
, scsi_block_properties
);
3254 dc
->vmsd
= &vmstate_scsi_disk_state
;
3257 static const TypeInfo scsi_block_info
= {
3258 .name
= "scsi-block",
3259 .parent
= TYPE_SCSI_DISK_BASE
,
3260 .class_init
= scsi_block_class_initfn
,
3264 static void scsi_disk_register_types(void)
3266 type_register_static(&scsi_disk_base_info
);
3267 type_register_static(&scsi_hd_info
);
3268 type_register_static(&scsi_cd_info
);
3270 type_register_static(&scsi_block_info
);
3274 type_init(scsi_disk_register_types
)