]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-disk.c
Merge remote-tracking branch 'remotes/kraxel/tags/ui-20210304-pull-request' into...
[mirror_qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "hw/scsi/scsi.h"
29 #include "migration/qemu-file-types.h"
30 #include "migration/vmstate.h"
31 #include "hw/scsi/emulation.h"
32 #include "scsi/constants.h"
33 #include "sysemu/block-backend.h"
34 #include "sysemu/blockdev.h"
35 #include "hw/block/block.h"
36 #include "hw/qdev-properties.h"
37 #include "hw/qdev-properties-system.h"
38 #include "sysemu/dma.h"
39 #include "sysemu/sysemu.h"
40 #include "qemu/cutils.h"
41 #include "trace.h"
42 #include "qom/object.h"
43
44 #ifdef __linux
45 #include <scsi/sg.h>
46 #endif
47
48 #define SCSI_WRITE_SAME_MAX (512 * KiB)
49 #define SCSI_DMA_BUF_SIZE (128 * KiB)
50 #define SCSI_MAX_INQUIRY_LEN 256
51 #define SCSI_MAX_MODE_LEN 256
52
53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
56
57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
58
59 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
60
61 struct SCSIDiskClass {
62 SCSIDeviceClass parent_class;
63 DMAIOFunc *dma_readv;
64 DMAIOFunc *dma_writev;
65 bool (*need_fua_emulation)(SCSICommand *cmd);
66 void (*update_sense)(SCSIRequest *r);
67 };
68
69 typedef struct SCSIDiskReq {
70 SCSIRequest req;
71 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
72 uint64_t sector;
73 uint32_t sector_count;
74 uint32_t buflen;
75 bool started;
76 bool need_fua_emulation;
77 struct iovec iov;
78 QEMUIOVector qiov;
79 BlockAcctCookie acct;
80 unsigned char *status;
81 } SCSIDiskReq;
82
83 #define SCSI_DISK_F_REMOVABLE 0
84 #define SCSI_DISK_F_DPOFUA 1
85 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
86
87 struct SCSIDiskState {
88 SCSIDevice qdev;
89 uint32_t features;
90 bool media_changed;
91 bool media_event;
92 bool eject_request;
93 uint16_t port_index;
94 uint64_t max_unmap_size;
95 uint64_t max_io_size;
96 QEMUBH *bh;
97 char *version;
98 char *serial;
99 char *vendor;
100 char *product;
101 char *device_id;
102 bool tray_open;
103 bool tray_locked;
104 /*
105 * 0x0000 - rotation rate not reported
106 * 0x0001 - non-rotating medium (SSD)
107 * 0x0002-0x0400 - reserved
108 * 0x0401-0xffe - rotations per minute
109 * 0xffff - reserved
110 */
111 uint16_t rotation_rate;
112 };
113
114 static void scsi_free_request(SCSIRequest *req)
115 {
116 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
117
118 qemu_vfree(r->iov.iov_base);
119 }
120
121 /* Helper function for command completion with sense. */
122 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
123 {
124 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
125 sense.ascq);
126 scsi_req_build_sense(&r->req, sense);
127 scsi_req_complete(&r->req, CHECK_CONDITION);
128 }
129
130 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
131 {
132 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
133
134 if (!r->iov.iov_base) {
135 r->buflen = size;
136 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
137 }
138 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
139 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
140 }
141
142 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
143 {
144 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
145
146 qemu_put_be64s(f, &r->sector);
147 qemu_put_be32s(f, &r->sector_count);
148 qemu_put_be32s(f, &r->buflen);
149 if (r->buflen) {
150 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
151 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
152 } else if (!req->retry) {
153 uint32_t len = r->iov.iov_len;
154 qemu_put_be32s(f, &len);
155 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
156 }
157 }
158 }
159
160 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
161 {
162 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
163
164 qemu_get_be64s(f, &r->sector);
165 qemu_get_be32s(f, &r->sector_count);
166 qemu_get_be32s(f, &r->buflen);
167 if (r->buflen) {
168 scsi_init_iovec(r, r->buflen);
169 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
170 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
171 } else if (!r->req.retry) {
172 uint32_t len;
173 qemu_get_be32s(f, &len);
174 r->iov.iov_len = len;
175 assert(r->iov.iov_len <= r->buflen);
176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
177 }
178 }
179
180 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
181 }
182
183 /*
184 * scsi_handle_rw_error has two return values. False means that the error
185 * must be ignored, true means that the error has been processed and the
186 * caller should not do anything else for this request. Note that
187 * scsi_handle_rw_error always manages its reference counts, independent
188 * of the return value.
189 */
190 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
191 {
192 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
194 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
195 SCSISense sense = SENSE_CODE(NO_SENSE);
196 int error = 0;
197 bool req_has_sense = false;
198 BlockErrorAction action;
199 int status;
200
201 if (ret < 0) {
202 status = scsi_sense_from_errno(-ret, &sense);
203 error = -ret;
204 } else {
205 /* A passthrough command has completed with nonzero status. */
206 status = ret;
207 if (status == CHECK_CONDITION) {
208 req_has_sense = true;
209 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
210 } else {
211 error = EINVAL;
212 }
213 }
214
215 /*
216 * Check whether the error has to be handled by the guest or should
217 * rather follow the rerror=/werror= settings. Guest-handled errors
218 * are usually retried immediately, so do not post them to QMP and
219 * do not account them as failed I/O.
220 */
221 if (req_has_sense &&
222 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
223 action = BLOCK_ERROR_ACTION_REPORT;
224 acct_failed = false;
225 } else {
226 action = blk_get_error_action(s->qdev.conf.blk, is_read, error);
227 blk_error_action(s->qdev.conf.blk, action, is_read, error);
228 }
229
230 switch (action) {
231 case BLOCK_ERROR_ACTION_REPORT:
232 if (acct_failed) {
233 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
234 }
235 if (req_has_sense) {
236 sdc->update_sense(&r->req);
237 } else if (status == CHECK_CONDITION) {
238 scsi_req_build_sense(&r->req, sense);
239 }
240 scsi_req_complete(&r->req, status);
241 return true;
242
243 case BLOCK_ERROR_ACTION_IGNORE:
244 return false;
245
246 case BLOCK_ERROR_ACTION_STOP:
247 scsi_req_retry(&r->req);
248 return true;
249
250 default:
251 g_assert_not_reached();
252 }
253 }
254
255 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
256 {
257 if (r->req.io_canceled) {
258 scsi_req_cancel_complete(&r->req);
259 return true;
260 }
261
262 if (ret < 0) {
263 return scsi_handle_rw_error(r, ret, acct_failed);
264 } else if (r->status && *r->status) {
265 return scsi_handle_rw_error(r, *r->status, acct_failed);
266 }
267
268 return false;
269 }
270
271 static void scsi_aio_complete(void *opaque, int ret)
272 {
273 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
275
276 assert(r->req.aiocb != NULL);
277 r->req.aiocb = NULL;
278 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
279 if (scsi_disk_req_check_error(r, ret, true)) {
280 goto done;
281 }
282
283 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
284 scsi_req_complete(&r->req, GOOD);
285
286 done:
287 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
288 scsi_req_unref(&r->req);
289 }
290
291 static bool scsi_is_cmd_fua(SCSICommand *cmd)
292 {
293 switch (cmd->buf[0]) {
294 case READ_10:
295 case READ_12:
296 case READ_16:
297 case WRITE_10:
298 case WRITE_12:
299 case WRITE_16:
300 return (cmd->buf[1] & 8) != 0;
301
302 case VERIFY_10:
303 case VERIFY_12:
304 case VERIFY_16:
305 case WRITE_VERIFY_10:
306 case WRITE_VERIFY_12:
307 case WRITE_VERIFY_16:
308 return true;
309
310 case READ_6:
311 case WRITE_6:
312 default:
313 return false;
314 }
315 }
316
317 static void scsi_write_do_fua(SCSIDiskReq *r)
318 {
319 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
320
321 assert(r->req.aiocb == NULL);
322 assert(!r->req.io_canceled);
323
324 if (r->need_fua_emulation) {
325 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
326 BLOCK_ACCT_FLUSH);
327 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
328 return;
329 }
330
331 scsi_req_complete(&r->req, GOOD);
332 scsi_req_unref(&r->req);
333 }
334
335 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
336 {
337 assert(r->req.aiocb == NULL);
338 if (scsi_disk_req_check_error(r, ret, false)) {
339 goto done;
340 }
341
342 r->sector += r->sector_count;
343 r->sector_count = 0;
344 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
345 scsi_write_do_fua(r);
346 return;
347 } else {
348 scsi_req_complete(&r->req, GOOD);
349 }
350
351 done:
352 scsi_req_unref(&r->req);
353 }
354
355 static void scsi_dma_complete(void *opaque, int ret)
356 {
357 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
358 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
359
360 assert(r->req.aiocb != NULL);
361 r->req.aiocb = NULL;
362
363 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
364 if (ret < 0) {
365 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
366 } else {
367 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
368 }
369 scsi_dma_complete_noio(r, ret);
370 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
371 }
372
373 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
374 {
375 uint32_t n;
376
377 assert(r->req.aiocb == NULL);
378 if (scsi_disk_req_check_error(r, ret, false)) {
379 goto done;
380 }
381
382 n = r->qiov.size / BDRV_SECTOR_SIZE;
383 r->sector += n;
384 r->sector_count -= n;
385 scsi_req_data(&r->req, r->qiov.size);
386
387 done:
388 scsi_req_unref(&r->req);
389 }
390
391 static void scsi_read_complete(void *opaque, int ret)
392 {
393 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
394 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
395
396 assert(r->req.aiocb != NULL);
397 r->req.aiocb = NULL;
398
399 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
400 if (ret < 0) {
401 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
402 } else {
403 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
404 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
405 }
406 scsi_read_complete_noio(r, ret);
407 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
408 }
409
410 /* Actually issue a read to the block device. */
411 static void scsi_do_read(SCSIDiskReq *r, int ret)
412 {
413 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
414 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
415
416 assert (r->req.aiocb == NULL);
417 if (scsi_disk_req_check_error(r, ret, false)) {
418 goto done;
419 }
420
421 /* The request is used as the AIO opaque value, so add a ref. */
422 scsi_req_ref(&r->req);
423
424 if (r->req.sg) {
425 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
426 r->req.resid -= r->req.sg->size;
427 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
428 r->req.sg, r->sector << BDRV_SECTOR_BITS,
429 BDRV_SECTOR_SIZE,
430 sdc->dma_readv, r, scsi_dma_complete, r,
431 DMA_DIRECTION_FROM_DEVICE);
432 } else {
433 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
434 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
435 r->qiov.size, BLOCK_ACCT_READ);
436 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
437 scsi_read_complete, r, r);
438 }
439
440 done:
441 scsi_req_unref(&r->req);
442 }
443
444 static void scsi_do_read_cb(void *opaque, int ret)
445 {
446 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
447 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
448
449 assert (r->req.aiocb != NULL);
450 r->req.aiocb = NULL;
451
452 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
453 if (ret < 0) {
454 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
455 } else {
456 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
457 }
458 scsi_do_read(opaque, ret);
459 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
460 }
461
462 /* Read more data from scsi device into buffer. */
463 static void scsi_read_data(SCSIRequest *req)
464 {
465 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
466 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
467 bool first;
468
469 trace_scsi_disk_read_data_count(r->sector_count);
470 if (r->sector_count == 0) {
471 /* This also clears the sense buffer for REQUEST SENSE. */
472 scsi_req_complete(&r->req, GOOD);
473 return;
474 }
475
476 /* No data transfer may already be in progress */
477 assert(r->req.aiocb == NULL);
478
479 /* The request is used as the AIO opaque value, so add a ref. */
480 scsi_req_ref(&r->req);
481 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
482 trace_scsi_disk_read_data_invalid();
483 scsi_read_complete_noio(r, -EINVAL);
484 return;
485 }
486
487 if (!blk_is_available(req->dev->conf.blk)) {
488 scsi_read_complete_noio(r, -ENOMEDIUM);
489 return;
490 }
491
492 first = !r->started;
493 r->started = true;
494 if (first && r->need_fua_emulation) {
495 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
496 BLOCK_ACCT_FLUSH);
497 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
498 } else {
499 scsi_do_read(r, 0);
500 }
501 }
502
503 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
504 {
505 uint32_t n;
506
507 assert (r->req.aiocb == NULL);
508 if (scsi_disk_req_check_error(r, ret, false)) {
509 goto done;
510 }
511
512 n = r->qiov.size / BDRV_SECTOR_SIZE;
513 r->sector += n;
514 r->sector_count -= n;
515 if (r->sector_count == 0) {
516 scsi_write_do_fua(r);
517 return;
518 } else {
519 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
520 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
521 scsi_req_data(&r->req, r->qiov.size);
522 }
523
524 done:
525 scsi_req_unref(&r->req);
526 }
527
528 static void scsi_write_complete(void * opaque, int ret)
529 {
530 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
531 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
532
533 assert (r->req.aiocb != NULL);
534 r->req.aiocb = NULL;
535
536 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
537 if (ret < 0) {
538 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
539 } else {
540 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
541 }
542 scsi_write_complete_noio(r, ret);
543 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
544 }
545
546 static void scsi_write_data(SCSIRequest *req)
547 {
548 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
549 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
550 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
551
552 /* No data transfer may already be in progress */
553 assert(r->req.aiocb == NULL);
554
555 /* The request is used as the AIO opaque value, so add a ref. */
556 scsi_req_ref(&r->req);
557 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
558 trace_scsi_disk_write_data_invalid();
559 scsi_write_complete_noio(r, -EINVAL);
560 return;
561 }
562
563 if (!r->req.sg && !r->qiov.size) {
564 /* Called for the first time. Ask the driver to send us more data. */
565 r->started = true;
566 scsi_write_complete_noio(r, 0);
567 return;
568 }
569 if (!blk_is_available(req->dev->conf.blk)) {
570 scsi_write_complete_noio(r, -ENOMEDIUM);
571 return;
572 }
573
574 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
575 r->req.cmd.buf[0] == VERIFY_16) {
576 if (r->req.sg) {
577 scsi_dma_complete_noio(r, 0);
578 } else {
579 scsi_write_complete_noio(r, 0);
580 }
581 return;
582 }
583
584 if (r->req.sg) {
585 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
586 r->req.resid -= r->req.sg->size;
587 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
588 r->req.sg, r->sector << BDRV_SECTOR_BITS,
589 BDRV_SECTOR_SIZE,
590 sdc->dma_writev, r, scsi_dma_complete, r,
591 DMA_DIRECTION_TO_DEVICE);
592 } else {
593 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
594 r->qiov.size, BLOCK_ACCT_WRITE);
595 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
596 scsi_write_complete, r, r);
597 }
598 }
599
600 /* Return a pointer to the data buffer. */
601 static uint8_t *scsi_get_buf(SCSIRequest *req)
602 {
603 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
604
605 return (uint8_t *)r->iov.iov_base;
606 }
607
608 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
609 {
610 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
611 uint8_t page_code = req->cmd.buf[2];
612 int start, buflen = 0;
613
614 outbuf[buflen++] = s->qdev.type & 0x1f;
615 outbuf[buflen++] = page_code;
616 outbuf[buflen++] = 0x00;
617 outbuf[buflen++] = 0x00;
618 start = buflen;
619
620 switch (page_code) {
621 case 0x00: /* Supported page codes, mandatory */
622 {
623 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
624 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
625 if (s->serial) {
626 outbuf[buflen++] = 0x80; /* unit serial number */
627 }
628 outbuf[buflen++] = 0x83; /* device identification */
629 if (s->qdev.type == TYPE_DISK) {
630 outbuf[buflen++] = 0xb0; /* block limits */
631 outbuf[buflen++] = 0xb1; /* block device characteristics */
632 outbuf[buflen++] = 0xb2; /* thin provisioning */
633 }
634 break;
635 }
636 case 0x80: /* Device serial number, optional */
637 {
638 int l;
639
640 if (!s->serial) {
641 trace_scsi_disk_emulate_vpd_page_80_not_supported();
642 return -1;
643 }
644
645 l = strlen(s->serial);
646 if (l > 36) {
647 l = 36;
648 }
649
650 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
651 memcpy(outbuf + buflen, s->serial, l);
652 buflen += l;
653 break;
654 }
655
656 case 0x83: /* Device identification page, mandatory */
657 {
658 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
659
660 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
661
662 if (id_len) {
663 outbuf[buflen++] = 0x2; /* ASCII */
664 outbuf[buflen++] = 0; /* not officially assigned */
665 outbuf[buflen++] = 0; /* reserved */
666 outbuf[buflen++] = id_len; /* length of data following */
667 memcpy(outbuf + buflen, s->device_id, id_len);
668 buflen += id_len;
669 }
670
671 if (s->qdev.wwn) {
672 outbuf[buflen++] = 0x1; /* Binary */
673 outbuf[buflen++] = 0x3; /* NAA */
674 outbuf[buflen++] = 0; /* reserved */
675 outbuf[buflen++] = 8;
676 stq_be_p(&outbuf[buflen], s->qdev.wwn);
677 buflen += 8;
678 }
679
680 if (s->qdev.port_wwn) {
681 outbuf[buflen++] = 0x61; /* SAS / Binary */
682 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
683 outbuf[buflen++] = 0; /* reserved */
684 outbuf[buflen++] = 8;
685 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
686 buflen += 8;
687 }
688
689 if (s->port_index) {
690 outbuf[buflen++] = 0x61; /* SAS / Binary */
691
692 /* PIV/Target port/relative target port */
693 outbuf[buflen++] = 0x94;
694
695 outbuf[buflen++] = 0; /* reserved */
696 outbuf[buflen++] = 4;
697 stw_be_p(&outbuf[buflen + 2], s->port_index);
698 buflen += 4;
699 }
700 break;
701 }
702 case 0xb0: /* block limits */
703 {
704 SCSIBlockLimits bl = {};
705
706 if (s->qdev.type == TYPE_ROM) {
707 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
708 return -1;
709 }
710 bl.wsnz = 1;
711 bl.unmap_sectors =
712 s->qdev.conf.discard_granularity / s->qdev.blocksize;
713 bl.min_io_size =
714 s->qdev.conf.min_io_size / s->qdev.blocksize;
715 bl.opt_io_size =
716 s->qdev.conf.opt_io_size / s->qdev.blocksize;
717 bl.max_unmap_sectors =
718 s->max_unmap_size / s->qdev.blocksize;
719 bl.max_io_sectors =
720 s->max_io_size / s->qdev.blocksize;
721 /* 255 descriptors fit in 4 KiB with an 8-byte header */
722 bl.max_unmap_descr = 255;
723
724 if (s->qdev.type == TYPE_DISK) {
725 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
726 int max_io_sectors_blk =
727 max_transfer_blk / s->qdev.blocksize;
728
729 bl.max_io_sectors =
730 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
731 }
732 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
733 break;
734 }
735 case 0xb1: /* block device characteristics */
736 {
737 buflen = 0x40;
738 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
739 outbuf[5] = s->rotation_rate & 0xff;
740 outbuf[6] = 0; /* PRODUCT TYPE */
741 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
742 outbuf[8] = 0; /* VBULS */
743 break;
744 }
745 case 0xb2: /* thin provisioning */
746 {
747 buflen = 8;
748 outbuf[4] = 0;
749 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
750 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
751 outbuf[7] = 0;
752 break;
753 }
754 default:
755 return -1;
756 }
757 /* done with EVPD */
758 assert(buflen - start <= 255);
759 outbuf[start - 1] = buflen - start;
760 return buflen;
761 }
762
763 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
764 {
765 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
766 int buflen = 0;
767
768 if (req->cmd.buf[1] & 0x1) {
769 /* Vital product data */
770 return scsi_disk_emulate_vpd_page(req, outbuf);
771 }
772
773 /* Standard INQUIRY data */
774 if (req->cmd.buf[2] != 0) {
775 return -1;
776 }
777
778 /* PAGE CODE == 0 */
779 buflen = req->cmd.xfer;
780 if (buflen > SCSI_MAX_INQUIRY_LEN) {
781 buflen = SCSI_MAX_INQUIRY_LEN;
782 }
783
784 outbuf[0] = s->qdev.type & 0x1f;
785 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
786
787 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
788 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
789
790 memset(&outbuf[32], 0, 4);
791 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
792 /*
793 * We claim conformance to SPC-3, which is required for guests
794 * to ask for modern features like READ CAPACITY(16) or the
795 * block characteristics VPD page by default. Not all of SPC-3
796 * is actually implemented, but we're good enough.
797 */
798 outbuf[2] = s->qdev.default_scsi_version;
799 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
800
801 if (buflen > 36) {
802 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
803 } else {
804 /* If the allocation length of CDB is too small,
805 the additional length is not adjusted */
806 outbuf[4] = 36 - 5;
807 }
808
809 /* Sync data transfer and TCQ. */
810 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
811 return buflen;
812 }
813
814 static inline bool media_is_dvd(SCSIDiskState *s)
815 {
816 uint64_t nb_sectors;
817 if (s->qdev.type != TYPE_ROM) {
818 return false;
819 }
820 if (!blk_is_available(s->qdev.conf.blk)) {
821 return false;
822 }
823 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
824 return nb_sectors > CD_MAX_SECTORS;
825 }
826
827 static inline bool media_is_cd(SCSIDiskState *s)
828 {
829 uint64_t nb_sectors;
830 if (s->qdev.type != TYPE_ROM) {
831 return false;
832 }
833 if (!blk_is_available(s->qdev.conf.blk)) {
834 return false;
835 }
836 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
837 return nb_sectors <= CD_MAX_SECTORS;
838 }
839
840 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
841 uint8_t *outbuf)
842 {
843 uint8_t type = r->req.cmd.buf[1] & 7;
844
845 if (s->qdev.type != TYPE_ROM) {
846 return -1;
847 }
848
849 /* Types 1/2 are only defined for Blu-Ray. */
850 if (type != 0) {
851 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
852 return -1;
853 }
854
855 memset(outbuf, 0, 34);
856 outbuf[1] = 32;
857 outbuf[2] = 0xe; /* last session complete, disc finalized */
858 outbuf[3] = 1; /* first track on disc */
859 outbuf[4] = 1; /* # of sessions */
860 outbuf[5] = 1; /* first track of last session */
861 outbuf[6] = 1; /* last track of last session */
862 outbuf[7] = 0x20; /* unrestricted use */
863 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
864 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
865 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
866 /* 24-31: disc bar code */
867 /* 32: disc application code */
868 /* 33: number of OPC tables */
869
870 return 34;
871 }
872
873 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
874 uint8_t *outbuf)
875 {
876 static const int rds_caps_size[5] = {
877 [0] = 2048 + 4,
878 [1] = 4 + 4,
879 [3] = 188 + 4,
880 [4] = 2048 + 4,
881 };
882
883 uint8_t media = r->req.cmd.buf[1];
884 uint8_t layer = r->req.cmd.buf[6];
885 uint8_t format = r->req.cmd.buf[7];
886 int size = -1;
887
888 if (s->qdev.type != TYPE_ROM) {
889 return -1;
890 }
891 if (media != 0) {
892 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
893 return -1;
894 }
895
896 if (format != 0xff) {
897 if (!blk_is_available(s->qdev.conf.blk)) {
898 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
899 return -1;
900 }
901 if (media_is_cd(s)) {
902 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
903 return -1;
904 }
905 if (format >= ARRAY_SIZE(rds_caps_size)) {
906 return -1;
907 }
908 size = rds_caps_size[format];
909 memset(outbuf, 0, size);
910 }
911
912 switch (format) {
913 case 0x00: {
914 /* Physical format information */
915 uint64_t nb_sectors;
916 if (layer != 0) {
917 goto fail;
918 }
919 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
920
921 outbuf[4] = 1; /* DVD-ROM, part version 1 */
922 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
923 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
924 outbuf[7] = 0; /* default densities */
925
926 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
927 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
928 break;
929 }
930
931 case 0x01: /* DVD copyright information, all zeros */
932 break;
933
934 case 0x03: /* BCA information - invalid field for no BCA info */
935 return -1;
936
937 case 0x04: /* DVD disc manufacturing information, all zeros */
938 break;
939
940 case 0xff: { /* List capabilities */
941 int i;
942 size = 4;
943 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
944 if (!rds_caps_size[i]) {
945 continue;
946 }
947 outbuf[size] = i;
948 outbuf[size + 1] = 0x40; /* Not writable, readable */
949 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
950 size += 4;
951 }
952 break;
953 }
954
955 default:
956 return -1;
957 }
958
959 /* Size of buffer, not including 2 byte size field */
960 stw_be_p(outbuf, size - 2);
961 return size;
962
963 fail:
964 return -1;
965 }
966
967 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
968 {
969 uint8_t event_code, media_status;
970
971 media_status = 0;
972 if (s->tray_open) {
973 media_status = MS_TRAY_OPEN;
974 } else if (blk_is_inserted(s->qdev.conf.blk)) {
975 media_status = MS_MEDIA_PRESENT;
976 }
977
978 /* Event notification descriptor */
979 event_code = MEC_NO_CHANGE;
980 if (media_status != MS_TRAY_OPEN) {
981 if (s->media_event) {
982 event_code = MEC_NEW_MEDIA;
983 s->media_event = false;
984 } else if (s->eject_request) {
985 event_code = MEC_EJECT_REQUESTED;
986 s->eject_request = false;
987 }
988 }
989
990 outbuf[0] = event_code;
991 outbuf[1] = media_status;
992
993 /* These fields are reserved, just clear them. */
994 outbuf[2] = 0;
995 outbuf[3] = 0;
996 return 4;
997 }
998
999 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
1000 uint8_t *outbuf)
1001 {
1002 int size;
1003 uint8_t *buf = r->req.cmd.buf;
1004 uint8_t notification_class_request = buf[4];
1005 if (s->qdev.type != TYPE_ROM) {
1006 return -1;
1007 }
1008 if ((buf[1] & 1) == 0) {
1009 /* asynchronous */
1010 return -1;
1011 }
1012
1013 size = 4;
1014 outbuf[0] = outbuf[1] = 0;
1015 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1016 if (notification_class_request & (1 << GESN_MEDIA)) {
1017 outbuf[2] = GESN_MEDIA;
1018 size += scsi_event_status_media(s, &outbuf[size]);
1019 } else {
1020 outbuf[2] = 0x80;
1021 }
1022 stw_be_p(outbuf, size - 4);
1023 return size;
1024 }
1025
1026 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1027 {
1028 int current;
1029
1030 if (s->qdev.type != TYPE_ROM) {
1031 return -1;
1032 }
1033
1034 if (media_is_dvd(s)) {
1035 current = MMC_PROFILE_DVD_ROM;
1036 } else if (media_is_cd(s)) {
1037 current = MMC_PROFILE_CD_ROM;
1038 } else {
1039 current = MMC_PROFILE_NONE;
1040 }
1041
1042 memset(outbuf, 0, 40);
1043 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1044 stw_be_p(&outbuf[6], current);
1045 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1046 outbuf[10] = 0x03; /* persistent, current */
1047 outbuf[11] = 8; /* two profiles */
1048 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1049 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1050 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1051 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1052 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1053 stw_be_p(&outbuf[20], 1);
1054 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1055 outbuf[23] = 8;
1056 stl_be_p(&outbuf[24], 1); /* SCSI */
1057 outbuf[28] = 1; /* DBE = 1, mandatory */
1058 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1059 stw_be_p(&outbuf[32], 3);
1060 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1061 outbuf[35] = 4;
1062 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1063 /* TODO: Random readable, CD read, DVD read, drive serial number,
1064 power management */
1065 return 40;
1066 }
1067
1068 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1069 {
1070 if (s->qdev.type != TYPE_ROM) {
1071 return -1;
1072 }
1073 memset(outbuf, 0, 8);
1074 outbuf[5] = 1; /* CD-ROM */
1075 return 8;
1076 }
1077
1078 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1079 int page_control)
1080 {
1081 static const int mode_sense_valid[0x3f] = {
1082 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1083 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1084 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1085 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1086 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1087 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1088 };
1089
1090 uint8_t *p = *p_outbuf + 2;
1091 int length;
1092
1093 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1094 return -1;
1095 }
1096
1097 /*
1098 * If Changeable Values are requested, a mask denoting those mode parameters
1099 * that are changeable shall be returned. As we currently don't support
1100 * parameter changes via MODE_SELECT all bits are returned set to zero.
1101 * The buffer was already menset to zero by the caller of this function.
1102 *
1103 * The offsets here are off by two compared to the descriptions in the
1104 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1105 * but it is done so that offsets are consistent within our implementation
1106 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1107 * 2-byte and 4-byte headers.
1108 */
1109 switch (page) {
1110 case MODE_PAGE_HD_GEOMETRY:
1111 length = 0x16;
1112 if (page_control == 1) { /* Changeable Values */
1113 break;
1114 }
1115 /* if a geometry hint is available, use it */
1116 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1117 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1118 p[2] = s->qdev.conf.cyls & 0xff;
1119 p[3] = s->qdev.conf.heads & 0xff;
1120 /* Write precomp start cylinder, disabled */
1121 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1122 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1123 p[6] = s->qdev.conf.cyls & 0xff;
1124 /* Reduced current start cylinder, disabled */
1125 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1126 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1127 p[9] = s->qdev.conf.cyls & 0xff;
1128 /* Device step rate [ns], 200ns */
1129 p[10] = 0;
1130 p[11] = 200;
1131 /* Landing zone cylinder */
1132 p[12] = 0xff;
1133 p[13] = 0xff;
1134 p[14] = 0xff;
1135 /* Medium rotation rate [rpm], 5400 rpm */
1136 p[18] = (5400 >> 8) & 0xff;
1137 p[19] = 5400 & 0xff;
1138 break;
1139
1140 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1141 length = 0x1e;
1142 if (page_control == 1) { /* Changeable Values */
1143 break;
1144 }
1145 /* Transfer rate [kbit/s], 5Mbit/s */
1146 p[0] = 5000 >> 8;
1147 p[1] = 5000 & 0xff;
1148 /* if a geometry hint is available, use it */
1149 p[2] = s->qdev.conf.heads & 0xff;
1150 p[3] = s->qdev.conf.secs & 0xff;
1151 p[4] = s->qdev.blocksize >> 8;
1152 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1153 p[7] = s->qdev.conf.cyls & 0xff;
1154 /* Write precomp start cylinder, disabled */
1155 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1156 p[9] = s->qdev.conf.cyls & 0xff;
1157 /* Reduced current start cylinder, disabled */
1158 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1159 p[11] = s->qdev.conf.cyls & 0xff;
1160 /* Device step rate [100us], 100us */
1161 p[12] = 0;
1162 p[13] = 1;
1163 /* Device step pulse width [us], 1us */
1164 p[14] = 1;
1165 /* Device head settle delay [100us], 100us */
1166 p[15] = 0;
1167 p[16] = 1;
1168 /* Motor on delay [0.1s], 0.1s */
1169 p[17] = 1;
1170 /* Motor off delay [0.1s], 0.1s */
1171 p[18] = 1;
1172 /* Medium rotation rate [rpm], 5400 rpm */
1173 p[26] = (5400 >> 8) & 0xff;
1174 p[27] = 5400 & 0xff;
1175 break;
1176
1177 case MODE_PAGE_CACHING:
1178 length = 0x12;
1179 if (page_control == 1 || /* Changeable Values */
1180 blk_enable_write_cache(s->qdev.conf.blk)) {
1181 p[0] = 4; /* WCE */
1182 }
1183 break;
1184
1185 case MODE_PAGE_R_W_ERROR:
1186 length = 10;
1187 if (page_control == 1) { /* Changeable Values */
1188 break;
1189 }
1190 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1191 if (s->qdev.type == TYPE_ROM) {
1192 p[1] = 0x20; /* Read Retry Count */
1193 }
1194 break;
1195
1196 case MODE_PAGE_AUDIO_CTL:
1197 length = 14;
1198 break;
1199
1200 case MODE_PAGE_CAPABILITIES:
1201 length = 0x14;
1202 if (page_control == 1) { /* Changeable Values */
1203 break;
1204 }
1205
1206 p[0] = 0x3b; /* CD-R & CD-RW read */
1207 p[1] = 0; /* Writing not supported */
1208 p[2] = 0x7f; /* Audio, composite, digital out,
1209 mode 2 form 1&2, multi session */
1210 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1211 RW corrected, C2 errors, ISRC,
1212 UPC, Bar code */
1213 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1214 /* Locking supported, jumper present, eject, tray */
1215 p[5] = 0; /* no volume & mute control, no
1216 changer */
1217 p[6] = (50 * 176) >> 8; /* 50x read speed */
1218 p[7] = (50 * 176) & 0xff;
1219 p[8] = 2 >> 8; /* Two volume levels */
1220 p[9] = 2 & 0xff;
1221 p[10] = 2048 >> 8; /* 2M buffer */
1222 p[11] = 2048 & 0xff;
1223 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1224 p[13] = (16 * 176) & 0xff;
1225 p[16] = (16 * 176) >> 8; /* 16x write speed */
1226 p[17] = (16 * 176) & 0xff;
1227 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1228 p[19] = (16 * 176) & 0xff;
1229 break;
1230
1231 default:
1232 return -1;
1233 }
1234
1235 assert(length < 256);
1236 (*p_outbuf)[0] = page;
1237 (*p_outbuf)[1] = length;
1238 *p_outbuf += length + 2;
1239 return length + 2;
1240 }
1241
1242 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1243 {
1244 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1245 uint64_t nb_sectors;
1246 bool dbd;
1247 int page, buflen, ret, page_control;
1248 uint8_t *p;
1249 uint8_t dev_specific_param;
1250
1251 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1252 page = r->req.cmd.buf[2] & 0x3f;
1253 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1254
1255 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1256 10, page, r->req.cmd.xfer, page_control);
1257 memset(outbuf, 0, r->req.cmd.xfer);
1258 p = outbuf;
1259
1260 if (s->qdev.type == TYPE_DISK) {
1261 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1262 if (!blk_is_writable(s->qdev.conf.blk)) {
1263 dev_specific_param |= 0x80; /* Readonly. */
1264 }
1265 } else {
1266 /* MMC prescribes that CD/DVD drives have no block descriptors,
1267 * and defines no device-specific parameter. */
1268 dev_specific_param = 0x00;
1269 dbd = true;
1270 }
1271
1272 if (r->req.cmd.buf[0] == MODE_SENSE) {
1273 p[1] = 0; /* Default media type. */
1274 p[2] = dev_specific_param;
1275 p[3] = 0; /* Block descriptor length. */
1276 p += 4;
1277 } else { /* MODE_SENSE_10 */
1278 p[2] = 0; /* Default media type. */
1279 p[3] = dev_specific_param;
1280 p[6] = p[7] = 0; /* Block descriptor length. */
1281 p += 8;
1282 }
1283
1284 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1285 if (!dbd && nb_sectors) {
1286 if (r->req.cmd.buf[0] == MODE_SENSE) {
1287 outbuf[3] = 8; /* Block descriptor length */
1288 } else { /* MODE_SENSE_10 */
1289 outbuf[7] = 8; /* Block descriptor length */
1290 }
1291 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1292 if (nb_sectors > 0xffffff) {
1293 nb_sectors = 0;
1294 }
1295 p[0] = 0; /* media density code */
1296 p[1] = (nb_sectors >> 16) & 0xff;
1297 p[2] = (nb_sectors >> 8) & 0xff;
1298 p[3] = nb_sectors & 0xff;
1299 p[4] = 0; /* reserved */
1300 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1301 p[6] = s->qdev.blocksize >> 8;
1302 p[7] = 0;
1303 p += 8;
1304 }
1305
1306 if (page_control == 3) {
1307 /* Saved Values */
1308 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1309 return -1;
1310 }
1311
1312 if (page == 0x3f) {
1313 for (page = 0; page <= 0x3e; page++) {
1314 mode_sense_page(s, page, &p, page_control);
1315 }
1316 } else {
1317 ret = mode_sense_page(s, page, &p, page_control);
1318 if (ret == -1) {
1319 return -1;
1320 }
1321 }
1322
1323 buflen = p - outbuf;
1324 /*
1325 * The mode data length field specifies the length in bytes of the
1326 * following data that is available to be transferred. The mode data
1327 * length does not include itself.
1328 */
1329 if (r->req.cmd.buf[0] == MODE_SENSE) {
1330 outbuf[0] = buflen - 1;
1331 } else { /* MODE_SENSE_10 */
1332 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1333 outbuf[1] = (buflen - 2) & 0xff;
1334 }
1335 return buflen;
1336 }
1337
1338 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1339 {
1340 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1341 int start_track, format, msf, toclen;
1342 uint64_t nb_sectors;
1343
1344 msf = req->cmd.buf[1] & 2;
1345 format = req->cmd.buf[2] & 0xf;
1346 start_track = req->cmd.buf[6];
1347 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1348 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1349 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1350 switch (format) {
1351 case 0:
1352 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1353 break;
1354 case 1:
1355 /* multi session : only a single session defined */
1356 toclen = 12;
1357 memset(outbuf, 0, 12);
1358 outbuf[1] = 0x0a;
1359 outbuf[2] = 0x01;
1360 outbuf[3] = 0x01;
1361 break;
1362 case 2:
1363 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1364 break;
1365 default:
1366 return -1;
1367 }
1368 return toclen;
1369 }
1370
1371 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1372 {
1373 SCSIRequest *req = &r->req;
1374 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1375 bool start = req->cmd.buf[4] & 1;
1376 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1377 int pwrcnd = req->cmd.buf[4] & 0xf0;
1378
1379 if (pwrcnd) {
1380 /* eject/load only happens for power condition == 0 */
1381 return 0;
1382 }
1383
1384 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1385 if (!start && !s->tray_open && s->tray_locked) {
1386 scsi_check_condition(r,
1387 blk_is_inserted(s->qdev.conf.blk)
1388 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1389 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1390 return -1;
1391 }
1392
1393 if (s->tray_open != !start) {
1394 blk_eject(s->qdev.conf.blk, !start);
1395 s->tray_open = !start;
1396 }
1397 }
1398 return 0;
1399 }
1400
1401 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1402 {
1403 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1404 int buflen = r->iov.iov_len;
1405
1406 if (buflen) {
1407 trace_scsi_disk_emulate_read_data(buflen);
1408 r->iov.iov_len = 0;
1409 r->started = true;
1410 scsi_req_data(&r->req, buflen);
1411 return;
1412 }
1413
1414 /* This also clears the sense buffer for REQUEST SENSE. */
1415 scsi_req_complete(&r->req, GOOD);
1416 }
1417
1418 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1419 uint8_t *inbuf, int inlen)
1420 {
1421 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1422 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1423 uint8_t *p;
1424 int len, expected_len, changeable_len, i;
1425
1426 /* The input buffer does not include the page header, so it is
1427 * off by 2 bytes.
1428 */
1429 expected_len = inlen + 2;
1430 if (expected_len > SCSI_MAX_MODE_LEN) {
1431 return -1;
1432 }
1433
1434 p = mode_current;
1435 memset(mode_current, 0, inlen + 2);
1436 len = mode_sense_page(s, page, &p, 0);
1437 if (len < 0 || len != expected_len) {
1438 return -1;
1439 }
1440
1441 p = mode_changeable;
1442 memset(mode_changeable, 0, inlen + 2);
1443 changeable_len = mode_sense_page(s, page, &p, 1);
1444 assert(changeable_len == len);
1445
1446 /* Check that unchangeable bits are the same as what MODE SENSE
1447 * would return.
1448 */
1449 for (i = 2; i < len; i++) {
1450 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1451 return -1;
1452 }
1453 }
1454 return 0;
1455 }
1456
1457 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1458 {
1459 switch (page) {
1460 case MODE_PAGE_CACHING:
1461 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1462 break;
1463
1464 default:
1465 break;
1466 }
1467 }
1468
1469 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1470 {
1471 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1472
1473 while (len > 0) {
1474 int page, subpage, page_len;
1475
1476 /* Parse both possible formats for the mode page headers. */
1477 page = p[0] & 0x3f;
1478 if (p[0] & 0x40) {
1479 if (len < 4) {
1480 goto invalid_param_len;
1481 }
1482 subpage = p[1];
1483 page_len = lduw_be_p(&p[2]);
1484 p += 4;
1485 len -= 4;
1486 } else {
1487 if (len < 2) {
1488 goto invalid_param_len;
1489 }
1490 subpage = 0;
1491 page_len = p[1];
1492 p += 2;
1493 len -= 2;
1494 }
1495
1496 if (subpage) {
1497 goto invalid_param;
1498 }
1499 if (page_len > len) {
1500 goto invalid_param_len;
1501 }
1502
1503 if (!change) {
1504 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1505 goto invalid_param;
1506 }
1507 } else {
1508 scsi_disk_apply_mode_select(s, page, p);
1509 }
1510
1511 p += page_len;
1512 len -= page_len;
1513 }
1514 return 0;
1515
1516 invalid_param:
1517 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1518 return -1;
1519
1520 invalid_param_len:
1521 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1522 return -1;
1523 }
1524
1525 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1526 {
1527 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1528 uint8_t *p = inbuf;
1529 int cmd = r->req.cmd.buf[0];
1530 int len = r->req.cmd.xfer;
1531 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1532 int bd_len;
1533 int pass;
1534
1535 /* We only support PF=1, SP=0. */
1536 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1537 goto invalid_field;
1538 }
1539
1540 if (len < hdr_len) {
1541 goto invalid_param_len;
1542 }
1543
1544 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1545 len -= hdr_len;
1546 p += hdr_len;
1547 if (len < bd_len) {
1548 goto invalid_param_len;
1549 }
1550 if (bd_len != 0 && bd_len != 8) {
1551 goto invalid_param;
1552 }
1553
1554 len -= bd_len;
1555 p += bd_len;
1556
1557 /* Ensure no change is made if there is an error! */
1558 for (pass = 0; pass < 2; pass++) {
1559 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1560 assert(pass == 0);
1561 return;
1562 }
1563 }
1564 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1565 /* The request is used as the AIO opaque value, so add a ref. */
1566 scsi_req_ref(&r->req);
1567 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1568 BLOCK_ACCT_FLUSH);
1569 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1570 return;
1571 }
1572
1573 scsi_req_complete(&r->req, GOOD);
1574 return;
1575
1576 invalid_param:
1577 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1578 return;
1579
1580 invalid_param_len:
1581 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1582 return;
1583
1584 invalid_field:
1585 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1586 }
1587
1588 static inline bool check_lba_range(SCSIDiskState *s,
1589 uint64_t sector_num, uint32_t nb_sectors)
1590 {
1591 /*
1592 * The first line tests that no overflow happens when computing the last
1593 * sector. The second line tests that the last accessed sector is in
1594 * range.
1595 *
1596 * Careful, the computations should not underflow for nb_sectors == 0,
1597 * and a 0-block read to the first LBA beyond the end of device is
1598 * valid.
1599 */
1600 return (sector_num <= sector_num + nb_sectors &&
1601 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1602 }
1603
1604 typedef struct UnmapCBData {
1605 SCSIDiskReq *r;
1606 uint8_t *inbuf;
1607 int count;
1608 } UnmapCBData;
1609
1610 static void scsi_unmap_complete(void *opaque, int ret);
1611
1612 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1613 {
1614 SCSIDiskReq *r = data->r;
1615 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1616
1617 assert(r->req.aiocb == NULL);
1618
1619 if (data->count > 0) {
1620 r->sector = ldq_be_p(&data->inbuf[0])
1621 * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1622 r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL)
1623 * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1624 if (!check_lba_range(s, r->sector, r->sector_count)) {
1625 block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1626 BLOCK_ACCT_UNMAP);
1627 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1628 goto done;
1629 }
1630
1631 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1632 r->sector_count * BDRV_SECTOR_SIZE,
1633 BLOCK_ACCT_UNMAP);
1634
1635 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1636 r->sector * BDRV_SECTOR_SIZE,
1637 r->sector_count * BDRV_SECTOR_SIZE,
1638 scsi_unmap_complete, data);
1639 data->count--;
1640 data->inbuf += 16;
1641 return;
1642 }
1643
1644 scsi_req_complete(&r->req, GOOD);
1645
1646 done:
1647 scsi_req_unref(&r->req);
1648 g_free(data);
1649 }
1650
1651 static void scsi_unmap_complete(void *opaque, int ret)
1652 {
1653 UnmapCBData *data = opaque;
1654 SCSIDiskReq *r = data->r;
1655 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1656
1657 assert(r->req.aiocb != NULL);
1658 r->req.aiocb = NULL;
1659
1660 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1661 if (scsi_disk_req_check_error(r, ret, true)) {
1662 scsi_req_unref(&r->req);
1663 g_free(data);
1664 } else {
1665 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1666 scsi_unmap_complete_noio(data, ret);
1667 }
1668 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1669 }
1670
1671 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1672 {
1673 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1674 uint8_t *p = inbuf;
1675 int len = r->req.cmd.xfer;
1676 UnmapCBData *data;
1677
1678 /* Reject ANCHOR=1. */
1679 if (r->req.cmd.buf[1] & 0x1) {
1680 goto invalid_field;
1681 }
1682
1683 if (len < 8) {
1684 goto invalid_param_len;
1685 }
1686 if (len < lduw_be_p(&p[0]) + 2) {
1687 goto invalid_param_len;
1688 }
1689 if (len < lduw_be_p(&p[2]) + 8) {
1690 goto invalid_param_len;
1691 }
1692 if (lduw_be_p(&p[2]) & 15) {
1693 goto invalid_param_len;
1694 }
1695
1696 if (!blk_is_writable(s->qdev.conf.blk)) {
1697 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1698 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1699 return;
1700 }
1701
1702 data = g_new0(UnmapCBData, 1);
1703 data->r = r;
1704 data->inbuf = &p[8];
1705 data->count = lduw_be_p(&p[2]) >> 4;
1706
1707 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1708 scsi_req_ref(&r->req);
1709 scsi_unmap_complete_noio(data, 0);
1710 return;
1711
1712 invalid_param_len:
1713 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1714 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1715 return;
1716
1717 invalid_field:
1718 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1719 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1720 }
1721
1722 typedef struct WriteSameCBData {
1723 SCSIDiskReq *r;
1724 int64_t sector;
1725 int nb_sectors;
1726 QEMUIOVector qiov;
1727 struct iovec iov;
1728 } WriteSameCBData;
1729
1730 static void scsi_write_same_complete(void *opaque, int ret)
1731 {
1732 WriteSameCBData *data = opaque;
1733 SCSIDiskReq *r = data->r;
1734 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1735
1736 assert(r->req.aiocb != NULL);
1737 r->req.aiocb = NULL;
1738 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1739 if (scsi_disk_req_check_error(r, ret, true)) {
1740 goto done;
1741 }
1742
1743 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1744
1745 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
1746 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
1747 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1748 data->iov.iov_len);
1749 if (data->iov.iov_len) {
1750 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1751 data->iov.iov_len, BLOCK_ACCT_WRITE);
1752 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1753 * where final qiov may need smaller size */
1754 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1755 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1756 data->sector << BDRV_SECTOR_BITS,
1757 &data->qiov, 0,
1758 scsi_write_same_complete, data);
1759 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1760 return;
1761 }
1762
1763 scsi_req_complete(&r->req, GOOD);
1764
1765 done:
1766 scsi_req_unref(&r->req);
1767 qemu_vfree(data->iov.iov_base);
1768 g_free(data);
1769 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1770 }
1771
1772 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1773 {
1774 SCSIRequest *req = &r->req;
1775 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1776 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1777 WriteSameCBData *data;
1778 uint8_t *buf;
1779 int i;
1780
1781 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1782 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1783 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1784 return;
1785 }
1786
1787 if (!blk_is_writable(s->qdev.conf.blk)) {
1788 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1789 return;
1790 }
1791 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1792 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1793 return;
1794 }
1795
1796 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1797 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1798
1799 /* The request is used as the AIO opaque value, so add a ref. */
1800 scsi_req_ref(&r->req);
1801 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1802 nb_sectors * s->qdev.blocksize,
1803 BLOCK_ACCT_WRITE);
1804 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1805 r->req.cmd.lba * s->qdev.blocksize,
1806 nb_sectors * s->qdev.blocksize,
1807 flags, scsi_aio_complete, r);
1808 return;
1809 }
1810
1811 data = g_new0(WriteSameCBData, 1);
1812 data->r = r;
1813 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1814 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1815 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1816 SCSI_WRITE_SAME_MAX);
1817 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1818 data->iov.iov_len);
1819 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1820
1821 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1822 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1823 }
1824
1825 scsi_req_ref(&r->req);
1826 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1827 data->iov.iov_len, BLOCK_ACCT_WRITE);
1828 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1829 data->sector << BDRV_SECTOR_BITS,
1830 &data->qiov, 0,
1831 scsi_write_same_complete, data);
1832 }
1833
1834 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1835 {
1836 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1837
1838 if (r->iov.iov_len) {
1839 int buflen = r->iov.iov_len;
1840 trace_scsi_disk_emulate_write_data(buflen);
1841 r->iov.iov_len = 0;
1842 scsi_req_data(&r->req, buflen);
1843 return;
1844 }
1845
1846 switch (req->cmd.buf[0]) {
1847 case MODE_SELECT:
1848 case MODE_SELECT_10:
1849 /* This also clears the sense buffer for REQUEST SENSE. */
1850 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1851 break;
1852
1853 case UNMAP:
1854 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1855 break;
1856
1857 case VERIFY_10:
1858 case VERIFY_12:
1859 case VERIFY_16:
1860 if (r->req.status == -1) {
1861 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1862 }
1863 break;
1864
1865 case WRITE_SAME_10:
1866 case WRITE_SAME_16:
1867 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1868 break;
1869
1870 default:
1871 abort();
1872 }
1873 }
1874
1875 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1876 {
1877 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1878 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1879 uint64_t nb_sectors;
1880 uint8_t *outbuf;
1881 int buflen;
1882
1883 switch (req->cmd.buf[0]) {
1884 case INQUIRY:
1885 case MODE_SENSE:
1886 case MODE_SENSE_10:
1887 case RESERVE:
1888 case RESERVE_10:
1889 case RELEASE:
1890 case RELEASE_10:
1891 case START_STOP:
1892 case ALLOW_MEDIUM_REMOVAL:
1893 case GET_CONFIGURATION:
1894 case GET_EVENT_STATUS_NOTIFICATION:
1895 case MECHANISM_STATUS:
1896 case REQUEST_SENSE:
1897 break;
1898
1899 default:
1900 if (!blk_is_available(s->qdev.conf.blk)) {
1901 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1902 return 0;
1903 }
1904 break;
1905 }
1906
1907 /*
1908 * FIXME: we shouldn't return anything bigger than 4k, but the code
1909 * requires the buffer to be as big as req->cmd.xfer in several
1910 * places. So, do not allow CDBs with a very large ALLOCATION
1911 * LENGTH. The real fix would be to modify scsi_read_data and
1912 * dma_buf_read, so that they return data beyond the buflen
1913 * as all zeros.
1914 */
1915 if (req->cmd.xfer > 65536) {
1916 goto illegal_request;
1917 }
1918 r->buflen = MAX(4096, req->cmd.xfer);
1919
1920 if (!r->iov.iov_base) {
1921 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1922 }
1923
1924 outbuf = r->iov.iov_base;
1925 memset(outbuf, 0, r->buflen);
1926 switch (req->cmd.buf[0]) {
1927 case TEST_UNIT_READY:
1928 assert(blk_is_available(s->qdev.conf.blk));
1929 break;
1930 case INQUIRY:
1931 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1932 if (buflen < 0) {
1933 goto illegal_request;
1934 }
1935 break;
1936 case MODE_SENSE:
1937 case MODE_SENSE_10:
1938 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1939 if (buflen < 0) {
1940 goto illegal_request;
1941 }
1942 break;
1943 case READ_TOC:
1944 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1945 if (buflen < 0) {
1946 goto illegal_request;
1947 }
1948 break;
1949 case RESERVE:
1950 if (req->cmd.buf[1] & 1) {
1951 goto illegal_request;
1952 }
1953 break;
1954 case RESERVE_10:
1955 if (req->cmd.buf[1] & 3) {
1956 goto illegal_request;
1957 }
1958 break;
1959 case RELEASE:
1960 if (req->cmd.buf[1] & 1) {
1961 goto illegal_request;
1962 }
1963 break;
1964 case RELEASE_10:
1965 if (req->cmd.buf[1] & 3) {
1966 goto illegal_request;
1967 }
1968 break;
1969 case START_STOP:
1970 if (scsi_disk_emulate_start_stop(r) < 0) {
1971 return 0;
1972 }
1973 break;
1974 case ALLOW_MEDIUM_REMOVAL:
1975 s->tray_locked = req->cmd.buf[4] & 1;
1976 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1977 break;
1978 case READ_CAPACITY_10:
1979 /* The normal LEN field for this command is zero. */
1980 memset(outbuf, 0, 8);
1981 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1982 if (!nb_sectors) {
1983 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1984 return 0;
1985 }
1986 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1987 goto illegal_request;
1988 }
1989 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1990 /* Returned value is the address of the last sector. */
1991 nb_sectors--;
1992 /* Remember the new size for read/write sanity checking. */
1993 s->qdev.max_lba = nb_sectors;
1994 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1995 if (nb_sectors > UINT32_MAX) {
1996 nb_sectors = UINT32_MAX;
1997 }
1998 outbuf[0] = (nb_sectors >> 24) & 0xff;
1999 outbuf[1] = (nb_sectors >> 16) & 0xff;
2000 outbuf[2] = (nb_sectors >> 8) & 0xff;
2001 outbuf[3] = nb_sectors & 0xff;
2002 outbuf[4] = 0;
2003 outbuf[5] = 0;
2004 outbuf[6] = s->qdev.blocksize >> 8;
2005 outbuf[7] = 0;
2006 break;
2007 case REQUEST_SENSE:
2008 /* Just return "NO SENSE". */
2009 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2010 (req->cmd.buf[1] & 1) == 0);
2011 if (buflen < 0) {
2012 goto illegal_request;
2013 }
2014 break;
2015 case MECHANISM_STATUS:
2016 buflen = scsi_emulate_mechanism_status(s, outbuf);
2017 if (buflen < 0) {
2018 goto illegal_request;
2019 }
2020 break;
2021 case GET_CONFIGURATION:
2022 buflen = scsi_get_configuration(s, outbuf);
2023 if (buflen < 0) {
2024 goto illegal_request;
2025 }
2026 break;
2027 case GET_EVENT_STATUS_NOTIFICATION:
2028 buflen = scsi_get_event_status_notification(s, r, outbuf);
2029 if (buflen < 0) {
2030 goto illegal_request;
2031 }
2032 break;
2033 case READ_DISC_INFORMATION:
2034 buflen = scsi_read_disc_information(s, r, outbuf);
2035 if (buflen < 0) {
2036 goto illegal_request;
2037 }
2038 break;
2039 case READ_DVD_STRUCTURE:
2040 buflen = scsi_read_dvd_structure(s, r, outbuf);
2041 if (buflen < 0) {
2042 goto illegal_request;
2043 }
2044 break;
2045 case SERVICE_ACTION_IN_16:
2046 /* Service Action In subcommands. */
2047 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2048 trace_scsi_disk_emulate_command_SAI_16();
2049 memset(outbuf, 0, req->cmd.xfer);
2050 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2051 if (!nb_sectors) {
2052 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2053 return 0;
2054 }
2055 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2056 goto illegal_request;
2057 }
2058 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2059 /* Returned value is the address of the last sector. */
2060 nb_sectors--;
2061 /* Remember the new size for read/write sanity checking. */
2062 s->qdev.max_lba = nb_sectors;
2063 outbuf[0] = (nb_sectors >> 56) & 0xff;
2064 outbuf[1] = (nb_sectors >> 48) & 0xff;
2065 outbuf[2] = (nb_sectors >> 40) & 0xff;
2066 outbuf[3] = (nb_sectors >> 32) & 0xff;
2067 outbuf[4] = (nb_sectors >> 24) & 0xff;
2068 outbuf[5] = (nb_sectors >> 16) & 0xff;
2069 outbuf[6] = (nb_sectors >> 8) & 0xff;
2070 outbuf[7] = nb_sectors & 0xff;
2071 outbuf[8] = 0;
2072 outbuf[9] = 0;
2073 outbuf[10] = s->qdev.blocksize >> 8;
2074 outbuf[11] = 0;
2075 outbuf[12] = 0;
2076 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2077
2078 /* set TPE bit if the format supports discard */
2079 if (s->qdev.conf.discard_granularity) {
2080 outbuf[14] = 0x80;
2081 }
2082
2083 /* Protection, exponent and lowest lba field left blank. */
2084 break;
2085 }
2086 trace_scsi_disk_emulate_command_SAI_unsupported();
2087 goto illegal_request;
2088 case SYNCHRONIZE_CACHE:
2089 /* The request is used as the AIO opaque value, so add a ref. */
2090 scsi_req_ref(&r->req);
2091 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2092 BLOCK_ACCT_FLUSH);
2093 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2094 return 0;
2095 case SEEK_10:
2096 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2097 if (r->req.cmd.lba > s->qdev.max_lba) {
2098 goto illegal_lba;
2099 }
2100 break;
2101 case MODE_SELECT:
2102 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2103 break;
2104 case MODE_SELECT_10:
2105 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2106 break;
2107 case UNMAP:
2108 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2109 break;
2110 case VERIFY_10:
2111 case VERIFY_12:
2112 case VERIFY_16:
2113 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2114 if (req->cmd.buf[1] & 6) {
2115 goto illegal_request;
2116 }
2117 break;
2118 case WRITE_SAME_10:
2119 case WRITE_SAME_16:
2120 trace_scsi_disk_emulate_command_WRITE_SAME(
2121 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2122 break;
2123 default:
2124 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2125 scsi_command_name(buf[0]));
2126 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2127 return 0;
2128 }
2129 assert(!r->req.aiocb);
2130 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2131 if (r->iov.iov_len == 0) {
2132 scsi_req_complete(&r->req, GOOD);
2133 }
2134 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2135 assert(r->iov.iov_len == req->cmd.xfer);
2136 return -r->iov.iov_len;
2137 } else {
2138 return r->iov.iov_len;
2139 }
2140
2141 illegal_request:
2142 if (r->req.status == -1) {
2143 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2144 }
2145 return 0;
2146
2147 illegal_lba:
2148 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2149 return 0;
2150 }
2151
2152 /* Execute a scsi command. Returns the length of the data expected by the
2153 command. This will be Positive for data transfers from the device
2154 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2155 and zero if the command does not transfer any data. */
2156
2157 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2158 {
2159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2160 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2161 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2162 uint32_t len;
2163 uint8_t command;
2164
2165 command = buf[0];
2166
2167 if (!blk_is_available(s->qdev.conf.blk)) {
2168 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2169 return 0;
2170 }
2171
2172 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2173 switch (command) {
2174 case READ_6:
2175 case READ_10:
2176 case READ_12:
2177 case READ_16:
2178 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2179 /* Protection information is not supported. For SCSI versions 2 and
2180 * older (as determined by snooping the guest's INQUIRY commands),
2181 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2182 */
2183 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2184 goto illegal_request;
2185 }
2186 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2187 goto illegal_lba;
2188 }
2189 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2190 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2191 break;
2192 case WRITE_6:
2193 case WRITE_10:
2194 case WRITE_12:
2195 case WRITE_16:
2196 case WRITE_VERIFY_10:
2197 case WRITE_VERIFY_12:
2198 case WRITE_VERIFY_16:
2199 if (!blk_is_writable(s->qdev.conf.blk)) {
2200 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2201 return 0;
2202 }
2203 trace_scsi_disk_dma_command_WRITE(
2204 (command & 0xe) == 0xe ? "And Verify " : "",
2205 r->req.cmd.lba, len);
2206 /* fall through */
2207 case VERIFY_10:
2208 case VERIFY_12:
2209 case VERIFY_16:
2210 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2211 * As far as DMA is concerned, we can treat it the same as a write;
2212 * scsi_block_do_sgio will send VERIFY commands.
2213 */
2214 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2215 goto illegal_request;
2216 }
2217 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2218 goto illegal_lba;
2219 }
2220 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2221 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2222 break;
2223 default:
2224 abort();
2225 illegal_request:
2226 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2227 return 0;
2228 illegal_lba:
2229 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2230 return 0;
2231 }
2232 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2233 if (r->sector_count == 0) {
2234 scsi_req_complete(&r->req, GOOD);
2235 }
2236 assert(r->iov.iov_len == 0);
2237 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2238 return -r->sector_count * BDRV_SECTOR_SIZE;
2239 } else {
2240 return r->sector_count * BDRV_SECTOR_SIZE;
2241 }
2242 }
2243
2244 static void scsi_disk_reset(DeviceState *dev)
2245 {
2246 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2247 uint64_t nb_sectors;
2248
2249 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2250
2251 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2252 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2253 if (nb_sectors) {
2254 nb_sectors--;
2255 }
2256 s->qdev.max_lba = nb_sectors;
2257 /* reset tray statuses */
2258 s->tray_locked = 0;
2259 s->tray_open = 0;
2260
2261 s->qdev.scsi_version = s->qdev.default_scsi_version;
2262 }
2263
2264 static void scsi_disk_resize_cb(void *opaque)
2265 {
2266 SCSIDiskState *s = opaque;
2267
2268 /* SPC lists this sense code as available only for
2269 * direct-access devices.
2270 */
2271 if (s->qdev.type == TYPE_DISK) {
2272 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2273 }
2274 }
2275
2276 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2277 {
2278 SCSIDiskState *s = opaque;
2279
2280 /*
2281 * When a CD gets changed, we have to report an ejected state and
2282 * then a loaded state to guests so that they detect tray
2283 * open/close and media change events. Guests that do not use
2284 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2285 * states rely on this behavior.
2286 *
2287 * media_changed governs the state machine used for unit attention
2288 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2289 */
2290 s->media_changed = load;
2291 s->tray_open = !load;
2292 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2293 s->media_event = true;
2294 s->eject_request = false;
2295 }
2296
2297 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2298 {
2299 SCSIDiskState *s = opaque;
2300
2301 s->eject_request = true;
2302 if (force) {
2303 s->tray_locked = false;
2304 }
2305 }
2306
2307 static bool scsi_cd_is_tray_open(void *opaque)
2308 {
2309 return ((SCSIDiskState *)opaque)->tray_open;
2310 }
2311
2312 static bool scsi_cd_is_medium_locked(void *opaque)
2313 {
2314 return ((SCSIDiskState *)opaque)->tray_locked;
2315 }
2316
2317 static const BlockDevOps scsi_disk_removable_block_ops = {
2318 .change_media_cb = scsi_cd_change_media_cb,
2319 .eject_request_cb = scsi_cd_eject_request_cb,
2320 .is_tray_open = scsi_cd_is_tray_open,
2321 .is_medium_locked = scsi_cd_is_medium_locked,
2322
2323 .resize_cb = scsi_disk_resize_cb,
2324 };
2325
2326 static const BlockDevOps scsi_disk_block_ops = {
2327 .resize_cb = scsi_disk_resize_cb,
2328 };
2329
2330 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2331 {
2332 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2333 if (s->media_changed) {
2334 s->media_changed = false;
2335 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2336 }
2337 }
2338
2339 static void scsi_realize(SCSIDevice *dev, Error **errp)
2340 {
2341 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2342 bool read_only;
2343
2344 if (!s->qdev.conf.blk) {
2345 error_setg(errp, "drive property not set");
2346 return;
2347 }
2348
2349 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2350 !blk_is_inserted(s->qdev.conf.blk)) {
2351 error_setg(errp, "Device needs media, but drive is empty");
2352 return;
2353 }
2354
2355 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2356 return;
2357 }
2358
2359 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2360 !s->qdev.hba_supports_iothread)
2361 {
2362 error_setg(errp, "HBA does not support iothreads");
2363 return;
2364 }
2365
2366 if (dev->type == TYPE_DISK) {
2367 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2368 return;
2369 }
2370 }
2371
2372 read_only = !blk_supports_write_perm(s->qdev.conf.blk);
2373 if (dev->type == TYPE_ROM) {
2374 read_only = true;
2375 }
2376
2377 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2378 dev->type == TYPE_DISK, errp)) {
2379 return;
2380 }
2381
2382 if (s->qdev.conf.discard_granularity == -1) {
2383 s->qdev.conf.discard_granularity =
2384 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2385 }
2386
2387 if (!s->version) {
2388 s->version = g_strdup(qemu_hw_version());
2389 }
2390 if (!s->vendor) {
2391 s->vendor = g_strdup("QEMU");
2392 }
2393 if (!s->device_id) {
2394 if (s->serial) {
2395 s->device_id = g_strdup_printf("%.20s", s->serial);
2396 } else {
2397 const char *str = blk_name(s->qdev.conf.blk);
2398 if (str && *str) {
2399 s->device_id = g_strdup(str);
2400 }
2401 }
2402 }
2403
2404 if (blk_is_sg(s->qdev.conf.blk)) {
2405 error_setg(errp, "unwanted /dev/sg*");
2406 return;
2407 }
2408
2409 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2410 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2411 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2412 } else {
2413 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2414 }
2415 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2416
2417 blk_iostatus_enable(s->qdev.conf.blk);
2418
2419 add_boot_device_lchs(&dev->qdev, NULL,
2420 dev->conf.lcyls,
2421 dev->conf.lheads,
2422 dev->conf.lsecs);
2423 }
2424
2425 static void scsi_unrealize(SCSIDevice *dev)
2426 {
2427 del_boot_device_lchs(&dev->qdev, NULL);
2428 }
2429
2430 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2431 {
2432 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2433 AioContext *ctx = NULL;
2434 /* can happen for devices without drive. The error message for missing
2435 * backend will be issued in scsi_realize
2436 */
2437 if (s->qdev.conf.blk) {
2438 ctx = blk_get_aio_context(s->qdev.conf.blk);
2439 aio_context_acquire(ctx);
2440 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2441 goto out;
2442 }
2443 }
2444 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2445 s->qdev.type = TYPE_DISK;
2446 if (!s->product) {
2447 s->product = g_strdup("QEMU HARDDISK");
2448 }
2449 scsi_realize(&s->qdev, errp);
2450 out:
2451 if (ctx) {
2452 aio_context_release(ctx);
2453 }
2454 }
2455
2456 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2457 {
2458 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2459 AioContext *ctx;
2460 int ret;
2461
2462 if (!dev->conf.blk) {
2463 /* Anonymous BlockBackend for an empty drive. As we put it into
2464 * dev->conf, qdev takes care of detaching on unplug. */
2465 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2466 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2467 assert(ret == 0);
2468 }
2469
2470 ctx = blk_get_aio_context(dev->conf.blk);
2471 aio_context_acquire(ctx);
2472 s->qdev.blocksize = 2048;
2473 s->qdev.type = TYPE_ROM;
2474 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2475 if (!s->product) {
2476 s->product = g_strdup("QEMU CD-ROM");
2477 }
2478 scsi_realize(&s->qdev, errp);
2479 aio_context_release(ctx);
2480 }
2481
2482 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2483 {
2484 DriveInfo *dinfo;
2485 Error *local_err = NULL;
2486
2487 warn_report("'scsi-disk' is deprecated, "
2488 "please use 'scsi-hd' or 'scsi-cd' instead");
2489
2490 if (!dev->conf.blk) {
2491 scsi_realize(dev, &local_err);
2492 assert(local_err);
2493 error_propagate(errp, local_err);
2494 return;
2495 }
2496
2497 dinfo = blk_legacy_dinfo(dev->conf.blk);
2498 if (dinfo && dinfo->media_cd) {
2499 scsi_cd_realize(dev, errp);
2500 } else {
2501 scsi_hd_realize(dev, errp);
2502 }
2503 }
2504
2505 static const SCSIReqOps scsi_disk_emulate_reqops = {
2506 .size = sizeof(SCSIDiskReq),
2507 .free_req = scsi_free_request,
2508 .send_command = scsi_disk_emulate_command,
2509 .read_data = scsi_disk_emulate_read_data,
2510 .write_data = scsi_disk_emulate_write_data,
2511 .get_buf = scsi_get_buf,
2512 };
2513
2514 static const SCSIReqOps scsi_disk_dma_reqops = {
2515 .size = sizeof(SCSIDiskReq),
2516 .free_req = scsi_free_request,
2517 .send_command = scsi_disk_dma_command,
2518 .read_data = scsi_read_data,
2519 .write_data = scsi_write_data,
2520 .get_buf = scsi_get_buf,
2521 .load_request = scsi_disk_load_request,
2522 .save_request = scsi_disk_save_request,
2523 };
2524
2525 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2526 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2527 [INQUIRY] = &scsi_disk_emulate_reqops,
2528 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2529 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2530 [START_STOP] = &scsi_disk_emulate_reqops,
2531 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2532 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2533 [READ_TOC] = &scsi_disk_emulate_reqops,
2534 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2535 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2536 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2537 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2538 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2539 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2540 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2541 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2542 [SEEK_10] = &scsi_disk_emulate_reqops,
2543 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2544 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2545 [UNMAP] = &scsi_disk_emulate_reqops,
2546 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2547 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2548 [VERIFY_10] = &scsi_disk_emulate_reqops,
2549 [VERIFY_12] = &scsi_disk_emulate_reqops,
2550 [VERIFY_16] = &scsi_disk_emulate_reqops,
2551
2552 [READ_6] = &scsi_disk_dma_reqops,
2553 [READ_10] = &scsi_disk_dma_reqops,
2554 [READ_12] = &scsi_disk_dma_reqops,
2555 [READ_16] = &scsi_disk_dma_reqops,
2556 [WRITE_6] = &scsi_disk_dma_reqops,
2557 [WRITE_10] = &scsi_disk_dma_reqops,
2558 [WRITE_12] = &scsi_disk_dma_reqops,
2559 [WRITE_16] = &scsi_disk_dma_reqops,
2560 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2561 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2562 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2563 };
2564
2565 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2566 {
2567 int i;
2568 int len = scsi_cdb_length(buf);
2569 char *line_buffer, *p;
2570
2571 line_buffer = g_malloc(len * 5 + 1);
2572
2573 for (i = 0, p = line_buffer; i < len; i++) {
2574 p += sprintf(p, " 0x%02x", buf[i]);
2575 }
2576 trace_scsi_disk_new_request(lun, tag, line_buffer);
2577
2578 g_free(line_buffer);
2579 }
2580
2581 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2582 uint8_t *buf, void *hba_private)
2583 {
2584 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2585 SCSIRequest *req;
2586 const SCSIReqOps *ops;
2587 uint8_t command;
2588
2589 command = buf[0];
2590 ops = scsi_disk_reqops_dispatch[command];
2591 if (!ops) {
2592 ops = &scsi_disk_emulate_reqops;
2593 }
2594 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2595
2596 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2597 scsi_disk_new_request_dump(lun, tag, buf);
2598 }
2599
2600 return req;
2601 }
2602
2603 #ifdef __linux__
2604 static int get_device_type(SCSIDiskState *s)
2605 {
2606 uint8_t cmd[16];
2607 uint8_t buf[36];
2608 int ret;
2609
2610 memset(cmd, 0, sizeof(cmd));
2611 memset(buf, 0, sizeof(buf));
2612 cmd[0] = INQUIRY;
2613 cmd[4] = sizeof(buf);
2614
2615 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2616 buf, sizeof(buf), s->qdev.io_timeout);
2617 if (ret < 0) {
2618 return -1;
2619 }
2620 s->qdev.type = buf[0];
2621 if (buf[1] & 0x80) {
2622 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2623 }
2624 return 0;
2625 }
2626
2627 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2628 {
2629 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2630 AioContext *ctx;
2631 int sg_version;
2632 int rc;
2633
2634 if (!s->qdev.conf.blk) {
2635 error_setg(errp, "drive property not set");
2636 return;
2637 }
2638
2639 if (s->rotation_rate) {
2640 error_report_once("rotation_rate is specified for scsi-block but is "
2641 "not implemented. This option is deprecated and will "
2642 "be removed in a future version");
2643 }
2644
2645 ctx = blk_get_aio_context(s->qdev.conf.blk);
2646 aio_context_acquire(ctx);
2647
2648 /* check we are using a driver managing SG_IO (version 3 and after) */
2649 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2650 if (rc < 0) {
2651 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2652 if (rc != -EPERM) {
2653 error_append_hint(errp, "Is this a SCSI device?\n");
2654 }
2655 goto out;
2656 }
2657 if (sg_version < 30000) {
2658 error_setg(errp, "scsi generic interface too old");
2659 goto out;
2660 }
2661
2662 /* get device type from INQUIRY data */
2663 rc = get_device_type(s);
2664 if (rc < 0) {
2665 error_setg(errp, "INQUIRY failed");
2666 goto out;
2667 }
2668
2669 /* Make a guess for the block size, we'll fix it when the guest sends.
2670 * READ CAPACITY. If they don't, they likely would assume these sizes
2671 * anyway. (TODO: check in /sys).
2672 */
2673 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2674 s->qdev.blocksize = 2048;
2675 } else {
2676 s->qdev.blocksize = 512;
2677 }
2678
2679 /* Makes the scsi-block device not removable by using HMP and QMP eject
2680 * command.
2681 */
2682 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2683
2684 scsi_realize(&s->qdev, errp);
2685 scsi_generic_read_device_inquiry(&s->qdev);
2686
2687 out:
2688 aio_context_release(ctx);
2689 }
2690
2691 typedef struct SCSIBlockReq {
2692 SCSIDiskReq req;
2693 sg_io_hdr_t io_header;
2694
2695 /* Selected bytes of the original CDB, copied into our own CDB. */
2696 uint8_t cmd, cdb1, group_number;
2697
2698 /* CDB passed to SG_IO. */
2699 uint8_t cdb[16];
2700 } SCSIBlockReq;
2701
2702 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2703 int64_t offset, QEMUIOVector *iov,
2704 int direction,
2705 BlockCompletionFunc *cb, void *opaque)
2706 {
2707 sg_io_hdr_t *io_header = &req->io_header;
2708 SCSIDiskReq *r = &req->req;
2709 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2710 int nb_logical_blocks;
2711 uint64_t lba;
2712 BlockAIOCB *aiocb;
2713
2714 /* This is not supported yet. It can only happen if the guest does
2715 * reads and writes that are not aligned to one logical sectors
2716 * _and_ cover multiple MemoryRegions.
2717 */
2718 assert(offset % s->qdev.blocksize == 0);
2719 assert(iov->size % s->qdev.blocksize == 0);
2720
2721 io_header->interface_id = 'S';
2722
2723 /* The data transfer comes from the QEMUIOVector. */
2724 io_header->dxfer_direction = direction;
2725 io_header->dxfer_len = iov->size;
2726 io_header->dxferp = (void *)iov->iov;
2727 io_header->iovec_count = iov->niov;
2728 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2729
2730 /* Build a new CDB with the LBA and length patched in, in case
2731 * DMA helpers split the transfer in multiple segments. Do not
2732 * build a CDB smaller than what the guest wanted, and only build
2733 * a larger one if strictly necessary.
2734 */
2735 io_header->cmdp = req->cdb;
2736 lba = offset / s->qdev.blocksize;
2737 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2738
2739 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2740 /* 6-byte CDB */
2741 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2742 req->cdb[4] = nb_logical_blocks;
2743 req->cdb[5] = 0;
2744 io_header->cmd_len = 6;
2745 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2746 /* 10-byte CDB */
2747 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2748 req->cdb[1] = req->cdb1;
2749 stl_be_p(&req->cdb[2], lba);
2750 req->cdb[6] = req->group_number;
2751 stw_be_p(&req->cdb[7], nb_logical_blocks);
2752 req->cdb[9] = 0;
2753 io_header->cmd_len = 10;
2754 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2755 /* 12-byte CDB */
2756 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2757 req->cdb[1] = req->cdb1;
2758 stl_be_p(&req->cdb[2], lba);
2759 stl_be_p(&req->cdb[6], nb_logical_blocks);
2760 req->cdb[10] = req->group_number;
2761 req->cdb[11] = 0;
2762 io_header->cmd_len = 12;
2763 } else {
2764 /* 16-byte CDB */
2765 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2766 req->cdb[1] = req->cdb1;
2767 stq_be_p(&req->cdb[2], lba);
2768 stl_be_p(&req->cdb[10], nb_logical_blocks);
2769 req->cdb[14] = req->group_number;
2770 req->cdb[15] = 0;
2771 io_header->cmd_len = 16;
2772 }
2773
2774 /* The rest is as in scsi-generic.c. */
2775 io_header->mx_sb_len = sizeof(r->req.sense);
2776 io_header->sbp = r->req.sense;
2777 io_header->timeout = s->qdev.io_timeout * 1000;
2778 io_header->usr_ptr = r;
2779 io_header->flags |= SG_FLAG_DIRECT_IO;
2780 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
2781 nb_logical_blocks, io_header->timeout);
2782 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2783 assert(aiocb != NULL);
2784 return aiocb;
2785 }
2786
2787 static bool scsi_block_no_fua(SCSICommand *cmd)
2788 {
2789 return false;
2790 }
2791
2792 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2793 QEMUIOVector *iov,
2794 BlockCompletionFunc *cb, void *cb_opaque,
2795 void *opaque)
2796 {
2797 SCSIBlockReq *r = opaque;
2798 return scsi_block_do_sgio(r, offset, iov,
2799 SG_DXFER_FROM_DEV, cb, cb_opaque);
2800 }
2801
2802 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2803 QEMUIOVector *iov,
2804 BlockCompletionFunc *cb, void *cb_opaque,
2805 void *opaque)
2806 {
2807 SCSIBlockReq *r = opaque;
2808 return scsi_block_do_sgio(r, offset, iov,
2809 SG_DXFER_TO_DEV, cb, cb_opaque);
2810 }
2811
2812 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2813 {
2814 switch (buf[0]) {
2815 case VERIFY_10:
2816 case VERIFY_12:
2817 case VERIFY_16:
2818 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2819 * for the number of logical blocks specified in the length
2820 * field). For other modes, do not use scatter/gather operation.
2821 */
2822 if ((buf[1] & 6) == 2) {
2823 return false;
2824 }
2825 break;
2826
2827 case READ_6:
2828 case READ_10:
2829 case READ_12:
2830 case READ_16:
2831 case WRITE_6:
2832 case WRITE_10:
2833 case WRITE_12:
2834 case WRITE_16:
2835 case WRITE_VERIFY_10:
2836 case WRITE_VERIFY_12:
2837 case WRITE_VERIFY_16:
2838 /* MMC writing cannot be done via DMA helpers, because it sometimes
2839 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2840 * We might use scsi_block_dma_reqops as long as no writing commands are
2841 * seen, but performance usually isn't paramount on optical media. So,
2842 * just make scsi-block operate the same as scsi-generic for them.
2843 */
2844 if (s->qdev.type != TYPE_ROM) {
2845 return false;
2846 }
2847 break;
2848
2849 default:
2850 break;
2851 }
2852
2853 return true;
2854 }
2855
2856
2857 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2858 {
2859 SCSIBlockReq *r = (SCSIBlockReq *)req;
2860 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2861
2862 r->cmd = req->cmd.buf[0];
2863 switch (r->cmd >> 5) {
2864 case 0:
2865 /* 6-byte CDB. */
2866 r->cdb1 = r->group_number = 0;
2867 break;
2868 case 1:
2869 /* 10-byte CDB. */
2870 r->cdb1 = req->cmd.buf[1];
2871 r->group_number = req->cmd.buf[6];
2872 break;
2873 case 4:
2874 /* 12-byte CDB. */
2875 r->cdb1 = req->cmd.buf[1];
2876 r->group_number = req->cmd.buf[10];
2877 break;
2878 case 5:
2879 /* 16-byte CDB. */
2880 r->cdb1 = req->cmd.buf[1];
2881 r->group_number = req->cmd.buf[14];
2882 break;
2883 default:
2884 abort();
2885 }
2886
2887 /* Protection information is not supported. For SCSI versions 2 and
2888 * older (as determined by snooping the guest's INQUIRY commands),
2889 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2890 */
2891 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2892 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2893 return 0;
2894 }
2895
2896 r->req.status = &r->io_header.status;
2897 return scsi_disk_dma_command(req, buf);
2898 }
2899
2900 static const SCSIReqOps scsi_block_dma_reqops = {
2901 .size = sizeof(SCSIBlockReq),
2902 .free_req = scsi_free_request,
2903 .send_command = scsi_block_dma_command,
2904 .read_data = scsi_read_data,
2905 .write_data = scsi_write_data,
2906 .get_buf = scsi_get_buf,
2907 .load_request = scsi_disk_load_request,
2908 .save_request = scsi_disk_save_request,
2909 };
2910
2911 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2912 uint32_t lun, uint8_t *buf,
2913 void *hba_private)
2914 {
2915 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2916
2917 if (scsi_block_is_passthrough(s, buf)) {
2918 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2919 hba_private);
2920 } else {
2921 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2922 hba_private);
2923 }
2924 }
2925
2926 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2927 uint8_t *buf, void *hba_private)
2928 {
2929 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2930
2931 if (scsi_block_is_passthrough(s, buf)) {
2932 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2933 } else {
2934 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2935 }
2936 }
2937
2938 static void scsi_block_update_sense(SCSIRequest *req)
2939 {
2940 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2941 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
2942 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
2943 }
2944 #endif
2945
2946 static
2947 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2948 BlockCompletionFunc *cb, void *cb_opaque,
2949 void *opaque)
2950 {
2951 SCSIDiskReq *r = opaque;
2952 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2953 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2954 }
2955
2956 static
2957 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2958 BlockCompletionFunc *cb, void *cb_opaque,
2959 void *opaque)
2960 {
2961 SCSIDiskReq *r = opaque;
2962 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2963 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2964 }
2965
2966 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2967 {
2968 DeviceClass *dc = DEVICE_CLASS(klass);
2969 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2970
2971 dc->fw_name = "disk";
2972 dc->reset = scsi_disk_reset;
2973 sdc->dma_readv = scsi_dma_readv;
2974 sdc->dma_writev = scsi_dma_writev;
2975 sdc->need_fua_emulation = scsi_is_cmd_fua;
2976 }
2977
2978 static const TypeInfo scsi_disk_base_info = {
2979 .name = TYPE_SCSI_DISK_BASE,
2980 .parent = TYPE_SCSI_DEVICE,
2981 .class_init = scsi_disk_base_class_initfn,
2982 .instance_size = sizeof(SCSIDiskState),
2983 .class_size = sizeof(SCSIDiskClass),
2984 .abstract = true,
2985 };
2986
2987 #define DEFINE_SCSI_DISK_PROPERTIES() \
2988 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
2989 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
2990 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2991 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2992 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2993 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2994 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
2995 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
2996
2997
2998 static Property scsi_hd_properties[] = {
2999 DEFINE_SCSI_DISK_PROPERTIES(),
3000 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3001 SCSI_DISK_F_REMOVABLE, false),
3002 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3003 SCSI_DISK_F_DPOFUA, false),
3004 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3005 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3006 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3007 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3008 DEFAULT_MAX_UNMAP_SIZE),
3009 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3010 DEFAULT_MAX_IO_SIZE),
3011 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3012 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3013 5),
3014 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3015 DEFINE_PROP_END_OF_LIST(),
3016 };
3017
3018 static const VMStateDescription vmstate_scsi_disk_state = {
3019 .name = "scsi-disk",
3020 .version_id = 1,
3021 .minimum_version_id = 1,
3022 .fields = (VMStateField[]) {
3023 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3024 VMSTATE_BOOL(media_changed, SCSIDiskState),
3025 VMSTATE_BOOL(media_event, SCSIDiskState),
3026 VMSTATE_BOOL(eject_request, SCSIDiskState),
3027 VMSTATE_BOOL(tray_open, SCSIDiskState),
3028 VMSTATE_BOOL(tray_locked, SCSIDiskState),
3029 VMSTATE_END_OF_LIST()
3030 }
3031 };
3032
3033 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3034 {
3035 DeviceClass *dc = DEVICE_CLASS(klass);
3036 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3037
3038 sc->realize = scsi_hd_realize;
3039 sc->unrealize = scsi_unrealize;
3040 sc->alloc_req = scsi_new_request;
3041 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3042 dc->desc = "virtual SCSI disk";
3043 device_class_set_props(dc, scsi_hd_properties);
3044 dc->vmsd = &vmstate_scsi_disk_state;
3045 }
3046
3047 static const TypeInfo scsi_hd_info = {
3048 .name = "scsi-hd",
3049 .parent = TYPE_SCSI_DISK_BASE,
3050 .class_init = scsi_hd_class_initfn,
3051 };
3052
3053 static Property scsi_cd_properties[] = {
3054 DEFINE_SCSI_DISK_PROPERTIES(),
3055 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3056 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3057 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3058 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3059 DEFAULT_MAX_IO_SIZE),
3060 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3061 5),
3062 DEFINE_PROP_END_OF_LIST(),
3063 };
3064
3065 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3066 {
3067 DeviceClass *dc = DEVICE_CLASS(klass);
3068 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3069
3070 sc->realize = scsi_cd_realize;
3071 sc->alloc_req = scsi_new_request;
3072 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3073 dc->desc = "virtual SCSI CD-ROM";
3074 device_class_set_props(dc, scsi_cd_properties);
3075 dc->vmsd = &vmstate_scsi_disk_state;
3076 }
3077
3078 static const TypeInfo scsi_cd_info = {
3079 .name = "scsi-cd",
3080 .parent = TYPE_SCSI_DISK_BASE,
3081 .class_init = scsi_cd_class_initfn,
3082 };
3083
3084 #ifdef __linux__
3085 static Property scsi_block_properties[] = {
3086 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
3087 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3088 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3089 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3090 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3091 DEFAULT_MAX_UNMAP_SIZE),
3092 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3093 DEFAULT_MAX_IO_SIZE),
3094 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3095 -1),
3096 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
3097 DEFAULT_IO_TIMEOUT),
3098 DEFINE_PROP_END_OF_LIST(),
3099 };
3100
3101 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3102 {
3103 DeviceClass *dc = DEVICE_CLASS(klass);
3104 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3105 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3106
3107 sc->realize = scsi_block_realize;
3108 sc->alloc_req = scsi_block_new_request;
3109 sc->parse_cdb = scsi_block_parse_cdb;
3110 sdc->dma_readv = scsi_block_dma_readv;
3111 sdc->dma_writev = scsi_block_dma_writev;
3112 sdc->update_sense = scsi_block_update_sense;
3113 sdc->need_fua_emulation = scsi_block_no_fua;
3114 dc->desc = "SCSI block device passthrough";
3115 device_class_set_props(dc, scsi_block_properties);
3116 dc->vmsd = &vmstate_scsi_disk_state;
3117 }
3118
3119 static const TypeInfo scsi_block_info = {
3120 .name = "scsi-block",
3121 .parent = TYPE_SCSI_DISK_BASE,
3122 .class_init = scsi_block_class_initfn,
3123 };
3124 #endif
3125
3126 static Property scsi_disk_properties[] = {
3127 DEFINE_SCSI_DISK_PROPERTIES(),
3128 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3129 SCSI_DISK_F_REMOVABLE, false),
3130 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3131 SCSI_DISK_F_DPOFUA, false),
3132 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3133 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3134 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3135 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3136 DEFAULT_MAX_UNMAP_SIZE),
3137 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3138 DEFAULT_MAX_IO_SIZE),
3139 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3140 5),
3141 DEFINE_PROP_END_OF_LIST(),
3142 };
3143
3144 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
3145 {
3146 DeviceClass *dc = DEVICE_CLASS(klass);
3147 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3148
3149 sc->realize = scsi_disk_realize;
3150 sc->alloc_req = scsi_new_request;
3151 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3152 dc->fw_name = "disk";
3153 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3154 dc->reset = scsi_disk_reset;
3155 device_class_set_props(dc, scsi_disk_properties);
3156 dc->vmsd = &vmstate_scsi_disk_state;
3157 }
3158
3159 static const TypeInfo scsi_disk_info = {
3160 .name = "scsi-disk",
3161 .parent = TYPE_SCSI_DISK_BASE,
3162 .class_init = scsi_disk_class_initfn,
3163 };
3164
3165 static void scsi_disk_register_types(void)
3166 {
3167 type_register_static(&scsi_disk_base_info);
3168 type_register_static(&scsi_hd_info);
3169 type_register_static(&scsi_cd_info);
3170 #ifdef __linux__
3171 type_register_static(&scsi_block_info);
3172 #endif
3173 type_register_static(&scsi_disk_info);
3174 }
3175
3176 type_init(scsi_disk_register_types)