]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-disk.c
scsi-disk: add FORMAT UNIT command
[mirror_qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
43 #include "trace.h"
44 #include "qom/object.h"
45
46 #ifdef __linux
47 #include <scsi/sg.h>
48 #endif
49
50 #define SCSI_WRITE_SAME_MAX (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN 256
53 #define SCSI_MAX_MODE_LEN 256
54
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
58
59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
60
61 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
62
63 struct SCSIDiskClass {
64 SCSIDeviceClass parent_class;
65 DMAIOFunc *dma_readv;
66 DMAIOFunc *dma_writev;
67 bool (*need_fua_emulation)(SCSICommand *cmd);
68 void (*update_sense)(SCSIRequest *r);
69 };
70
71 typedef struct SCSIDiskReq {
72 SCSIRequest req;
73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
74 uint64_t sector;
75 uint32_t sector_count;
76 uint32_t buflen;
77 bool started;
78 bool need_fua_emulation;
79 struct iovec iov;
80 QEMUIOVector qiov;
81 BlockAcctCookie acct;
82 } SCSIDiskReq;
83
84 #define SCSI_DISK_F_REMOVABLE 0
85 #define SCSI_DISK_F_DPOFUA 1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
87
88 struct SCSIDiskState {
89 SCSIDevice qdev;
90 uint32_t features;
91 bool media_changed;
92 bool media_event;
93 bool eject_request;
94 uint16_t port_index;
95 uint64_t max_unmap_size;
96 uint64_t max_io_size;
97 uint32_t quirks;
98 QEMUBH *bh;
99 char *version;
100 char *serial;
101 char *vendor;
102 char *product;
103 char *device_id;
104 bool tray_open;
105 bool tray_locked;
106 /*
107 * 0x0000 - rotation rate not reported
108 * 0x0001 - non-rotating medium (SSD)
109 * 0x0002-0x0400 - reserved
110 * 0x0401-0xffe - rotations per minute
111 * 0xffff - reserved
112 */
113 uint16_t rotation_rate;
114 };
115
116 static void scsi_free_request(SCSIRequest *req)
117 {
118 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
119
120 qemu_vfree(r->iov.iov_base);
121 }
122
123 /* Helper function for command completion with sense. */
124 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
125 {
126 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
127 sense.ascq);
128 scsi_req_build_sense(&r->req, sense);
129 scsi_req_complete(&r->req, CHECK_CONDITION);
130 }
131
132 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
133 {
134 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
135
136 if (!r->iov.iov_base) {
137 r->buflen = size;
138 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
139 }
140 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
141 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
142 }
143
144 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
145 {
146 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
147
148 qemu_put_be64s(f, &r->sector);
149 qemu_put_be32s(f, &r->sector_count);
150 qemu_put_be32s(f, &r->buflen);
151 if (r->buflen) {
152 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
153 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
154 } else if (!req->retry) {
155 uint32_t len = r->iov.iov_len;
156 qemu_put_be32s(f, &len);
157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
158 }
159 }
160 }
161
162 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
163 {
164 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
165
166 qemu_get_be64s(f, &r->sector);
167 qemu_get_be32s(f, &r->sector_count);
168 qemu_get_be32s(f, &r->buflen);
169 if (r->buflen) {
170 scsi_init_iovec(r, r->buflen);
171 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
172 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
173 } else if (!r->req.retry) {
174 uint32_t len;
175 qemu_get_be32s(f, &len);
176 r->iov.iov_len = len;
177 assert(r->iov.iov_len <= r->buflen);
178 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
179 }
180 }
181
182 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
183 }
184
185 /*
186 * scsi_handle_rw_error has two return values. False means that the error
187 * must be ignored, true means that the error has been processed and the
188 * caller should not do anything else for this request. Note that
189 * scsi_handle_rw_error always manages its reference counts, independent
190 * of the return value.
191 */
192 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
193 {
194 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
195 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
196 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
197 SCSISense sense = SENSE_CODE(NO_SENSE);
198 int error = 0;
199 bool req_has_sense = false;
200 BlockErrorAction action;
201 int status;
202
203 if (ret < 0) {
204 status = scsi_sense_from_errno(-ret, &sense);
205 error = -ret;
206 } else {
207 /* A passthrough command has completed with nonzero status. */
208 status = ret;
209 if (status == CHECK_CONDITION) {
210 req_has_sense = true;
211 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
212 } else {
213 error = EINVAL;
214 }
215 }
216
217 /*
218 * Check whether the error has to be handled by the guest or should
219 * rather follow the rerror=/werror= settings. Guest-handled errors
220 * are usually retried immediately, so do not post them to QMP and
221 * do not account them as failed I/O.
222 */
223 if (req_has_sense &&
224 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
225 action = BLOCK_ERROR_ACTION_REPORT;
226 acct_failed = false;
227 } else {
228 action = blk_get_error_action(s->qdev.conf.blk, is_read, error);
229 blk_error_action(s->qdev.conf.blk, action, is_read, error);
230 }
231
232 switch (action) {
233 case BLOCK_ERROR_ACTION_REPORT:
234 if (acct_failed) {
235 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
236 }
237 if (req_has_sense) {
238 sdc->update_sense(&r->req);
239 } else if (status == CHECK_CONDITION) {
240 scsi_req_build_sense(&r->req, sense);
241 }
242 scsi_req_complete(&r->req, status);
243 return true;
244
245 case BLOCK_ERROR_ACTION_IGNORE:
246 return false;
247
248 case BLOCK_ERROR_ACTION_STOP:
249 scsi_req_retry(&r->req);
250 return true;
251
252 default:
253 g_assert_not_reached();
254 }
255 }
256
257 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
258 {
259 if (r->req.io_canceled) {
260 scsi_req_cancel_complete(&r->req);
261 return true;
262 }
263
264 if (ret < 0) {
265 return scsi_handle_rw_error(r, ret, acct_failed);
266 }
267
268 return false;
269 }
270
271 static void scsi_aio_complete(void *opaque, int ret)
272 {
273 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
275
276 assert(r->req.aiocb != NULL);
277 r->req.aiocb = NULL;
278 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
279 if (scsi_disk_req_check_error(r, ret, true)) {
280 goto done;
281 }
282
283 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
284 scsi_req_complete(&r->req, GOOD);
285
286 done:
287 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
288 scsi_req_unref(&r->req);
289 }
290
291 static bool scsi_is_cmd_fua(SCSICommand *cmd)
292 {
293 switch (cmd->buf[0]) {
294 case READ_10:
295 case READ_12:
296 case READ_16:
297 case WRITE_10:
298 case WRITE_12:
299 case WRITE_16:
300 return (cmd->buf[1] & 8) != 0;
301
302 case VERIFY_10:
303 case VERIFY_12:
304 case VERIFY_16:
305 case WRITE_VERIFY_10:
306 case WRITE_VERIFY_12:
307 case WRITE_VERIFY_16:
308 return true;
309
310 case READ_6:
311 case WRITE_6:
312 default:
313 return false;
314 }
315 }
316
317 static void scsi_write_do_fua(SCSIDiskReq *r)
318 {
319 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
320
321 assert(r->req.aiocb == NULL);
322 assert(!r->req.io_canceled);
323
324 if (r->need_fua_emulation) {
325 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
326 BLOCK_ACCT_FLUSH);
327 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
328 return;
329 }
330
331 scsi_req_complete(&r->req, GOOD);
332 scsi_req_unref(&r->req);
333 }
334
335 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
336 {
337 assert(r->req.aiocb == NULL);
338 if (scsi_disk_req_check_error(r, ret, false)) {
339 goto done;
340 }
341
342 r->sector += r->sector_count;
343 r->sector_count = 0;
344 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
345 scsi_write_do_fua(r);
346 return;
347 } else {
348 scsi_req_complete(&r->req, GOOD);
349 }
350
351 done:
352 scsi_req_unref(&r->req);
353 }
354
355 static void scsi_dma_complete(void *opaque, int ret)
356 {
357 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
358 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
359
360 assert(r->req.aiocb != NULL);
361 r->req.aiocb = NULL;
362
363 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
364 if (ret < 0) {
365 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
366 } else {
367 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
368 }
369 scsi_dma_complete_noio(r, ret);
370 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
371 }
372
373 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
374 {
375 uint32_t n;
376
377 assert(r->req.aiocb == NULL);
378 if (scsi_disk_req_check_error(r, ret, false)) {
379 goto done;
380 }
381
382 n = r->qiov.size / BDRV_SECTOR_SIZE;
383 r->sector += n;
384 r->sector_count -= n;
385 scsi_req_data(&r->req, r->qiov.size);
386
387 done:
388 scsi_req_unref(&r->req);
389 }
390
391 static void scsi_read_complete(void *opaque, int ret)
392 {
393 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
394 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
395
396 assert(r->req.aiocb != NULL);
397 r->req.aiocb = NULL;
398
399 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
400 if (ret < 0) {
401 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
402 } else {
403 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
404 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
405 }
406 scsi_read_complete_noio(r, ret);
407 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
408 }
409
410 /* Actually issue a read to the block device. */
411 static void scsi_do_read(SCSIDiskReq *r, int ret)
412 {
413 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
414 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
415
416 assert (r->req.aiocb == NULL);
417 if (scsi_disk_req_check_error(r, ret, false)) {
418 goto done;
419 }
420
421 /* The request is used as the AIO opaque value, so add a ref. */
422 scsi_req_ref(&r->req);
423
424 if (r->req.sg) {
425 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
426 r->req.residual -= r->req.sg->size;
427 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
428 r->req.sg, r->sector << BDRV_SECTOR_BITS,
429 BDRV_SECTOR_SIZE,
430 sdc->dma_readv, r, scsi_dma_complete, r,
431 DMA_DIRECTION_FROM_DEVICE);
432 } else {
433 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
434 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
435 r->qiov.size, BLOCK_ACCT_READ);
436 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
437 scsi_read_complete, r, r);
438 }
439
440 done:
441 scsi_req_unref(&r->req);
442 }
443
444 static void scsi_do_read_cb(void *opaque, int ret)
445 {
446 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
447 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
448
449 assert (r->req.aiocb != NULL);
450 r->req.aiocb = NULL;
451
452 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
453 if (ret < 0) {
454 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
455 } else {
456 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
457 }
458 scsi_do_read(opaque, ret);
459 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
460 }
461
462 /* Read more data from scsi device into buffer. */
463 static void scsi_read_data(SCSIRequest *req)
464 {
465 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
466 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
467 bool first;
468
469 trace_scsi_disk_read_data_count(r->sector_count);
470 if (r->sector_count == 0) {
471 /* This also clears the sense buffer for REQUEST SENSE. */
472 scsi_req_complete(&r->req, GOOD);
473 return;
474 }
475
476 /* No data transfer may already be in progress */
477 assert(r->req.aiocb == NULL);
478
479 /* The request is used as the AIO opaque value, so add a ref. */
480 scsi_req_ref(&r->req);
481 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
482 trace_scsi_disk_read_data_invalid();
483 scsi_read_complete_noio(r, -EINVAL);
484 return;
485 }
486
487 if (!blk_is_available(req->dev->conf.blk)) {
488 scsi_read_complete_noio(r, -ENOMEDIUM);
489 return;
490 }
491
492 first = !r->started;
493 r->started = true;
494 if (first && r->need_fua_emulation) {
495 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
496 BLOCK_ACCT_FLUSH);
497 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
498 } else {
499 scsi_do_read(r, 0);
500 }
501 }
502
503 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
504 {
505 uint32_t n;
506
507 assert (r->req.aiocb == NULL);
508 if (scsi_disk_req_check_error(r, ret, false)) {
509 goto done;
510 }
511
512 n = r->qiov.size / BDRV_SECTOR_SIZE;
513 r->sector += n;
514 r->sector_count -= n;
515 if (r->sector_count == 0) {
516 scsi_write_do_fua(r);
517 return;
518 } else {
519 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
520 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
521 scsi_req_data(&r->req, r->qiov.size);
522 }
523
524 done:
525 scsi_req_unref(&r->req);
526 }
527
528 static void scsi_write_complete(void * opaque, int ret)
529 {
530 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
531 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
532
533 assert (r->req.aiocb != NULL);
534 r->req.aiocb = NULL;
535
536 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
537 if (ret < 0) {
538 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
539 } else {
540 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
541 }
542 scsi_write_complete_noio(r, ret);
543 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
544 }
545
546 static void scsi_write_data(SCSIRequest *req)
547 {
548 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
549 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
550 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
551
552 /* No data transfer may already be in progress */
553 assert(r->req.aiocb == NULL);
554
555 /* The request is used as the AIO opaque value, so add a ref. */
556 scsi_req_ref(&r->req);
557 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
558 trace_scsi_disk_write_data_invalid();
559 scsi_write_complete_noio(r, -EINVAL);
560 return;
561 }
562
563 if (!r->req.sg && !r->qiov.size) {
564 /* Called for the first time. Ask the driver to send us more data. */
565 r->started = true;
566 scsi_write_complete_noio(r, 0);
567 return;
568 }
569 if (!blk_is_available(req->dev->conf.blk)) {
570 scsi_write_complete_noio(r, -ENOMEDIUM);
571 return;
572 }
573
574 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
575 r->req.cmd.buf[0] == VERIFY_16) {
576 if (r->req.sg) {
577 scsi_dma_complete_noio(r, 0);
578 } else {
579 scsi_write_complete_noio(r, 0);
580 }
581 return;
582 }
583
584 if (r->req.sg) {
585 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
586 r->req.residual -= r->req.sg->size;
587 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
588 r->req.sg, r->sector << BDRV_SECTOR_BITS,
589 BDRV_SECTOR_SIZE,
590 sdc->dma_writev, r, scsi_dma_complete, r,
591 DMA_DIRECTION_TO_DEVICE);
592 } else {
593 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
594 r->qiov.size, BLOCK_ACCT_WRITE);
595 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
596 scsi_write_complete, r, r);
597 }
598 }
599
600 /* Return a pointer to the data buffer. */
601 static uint8_t *scsi_get_buf(SCSIRequest *req)
602 {
603 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
604
605 return (uint8_t *)r->iov.iov_base;
606 }
607
608 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
609 {
610 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
611 uint8_t page_code = req->cmd.buf[2];
612 int start, buflen = 0;
613
614 outbuf[buflen++] = s->qdev.type & 0x1f;
615 outbuf[buflen++] = page_code;
616 outbuf[buflen++] = 0x00;
617 outbuf[buflen++] = 0x00;
618 start = buflen;
619
620 switch (page_code) {
621 case 0x00: /* Supported page codes, mandatory */
622 {
623 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
624 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
625 if (s->serial) {
626 outbuf[buflen++] = 0x80; /* unit serial number */
627 }
628 outbuf[buflen++] = 0x83; /* device identification */
629 if (s->qdev.type == TYPE_DISK) {
630 outbuf[buflen++] = 0xb0; /* block limits */
631 outbuf[buflen++] = 0xb1; /* block device characteristics */
632 outbuf[buflen++] = 0xb2; /* thin provisioning */
633 }
634 break;
635 }
636 case 0x80: /* Device serial number, optional */
637 {
638 int l;
639
640 if (!s->serial) {
641 trace_scsi_disk_emulate_vpd_page_80_not_supported();
642 return -1;
643 }
644
645 l = strlen(s->serial);
646 if (l > 36) {
647 l = 36;
648 }
649
650 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
651 memcpy(outbuf + buflen, s->serial, l);
652 buflen += l;
653 break;
654 }
655
656 case 0x83: /* Device identification page, mandatory */
657 {
658 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
659
660 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
661
662 if (id_len) {
663 outbuf[buflen++] = 0x2; /* ASCII */
664 outbuf[buflen++] = 0; /* not officially assigned */
665 outbuf[buflen++] = 0; /* reserved */
666 outbuf[buflen++] = id_len; /* length of data following */
667 memcpy(outbuf + buflen, s->device_id, id_len);
668 buflen += id_len;
669 }
670
671 if (s->qdev.wwn) {
672 outbuf[buflen++] = 0x1; /* Binary */
673 outbuf[buflen++] = 0x3; /* NAA */
674 outbuf[buflen++] = 0; /* reserved */
675 outbuf[buflen++] = 8;
676 stq_be_p(&outbuf[buflen], s->qdev.wwn);
677 buflen += 8;
678 }
679
680 if (s->qdev.port_wwn) {
681 outbuf[buflen++] = 0x61; /* SAS / Binary */
682 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
683 outbuf[buflen++] = 0; /* reserved */
684 outbuf[buflen++] = 8;
685 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
686 buflen += 8;
687 }
688
689 if (s->port_index) {
690 outbuf[buflen++] = 0x61; /* SAS / Binary */
691
692 /* PIV/Target port/relative target port */
693 outbuf[buflen++] = 0x94;
694
695 outbuf[buflen++] = 0; /* reserved */
696 outbuf[buflen++] = 4;
697 stw_be_p(&outbuf[buflen + 2], s->port_index);
698 buflen += 4;
699 }
700 break;
701 }
702 case 0xb0: /* block limits */
703 {
704 SCSIBlockLimits bl = {};
705
706 if (s->qdev.type == TYPE_ROM) {
707 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
708 return -1;
709 }
710 bl.wsnz = 1;
711 bl.unmap_sectors =
712 s->qdev.conf.discard_granularity / s->qdev.blocksize;
713 bl.min_io_size =
714 s->qdev.conf.min_io_size / s->qdev.blocksize;
715 bl.opt_io_size =
716 s->qdev.conf.opt_io_size / s->qdev.blocksize;
717 bl.max_unmap_sectors =
718 s->max_unmap_size / s->qdev.blocksize;
719 bl.max_io_sectors =
720 s->max_io_size / s->qdev.blocksize;
721 /* 255 descriptors fit in 4 KiB with an 8-byte header */
722 bl.max_unmap_descr = 255;
723
724 if (s->qdev.type == TYPE_DISK) {
725 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
726 int max_io_sectors_blk =
727 max_transfer_blk / s->qdev.blocksize;
728
729 bl.max_io_sectors =
730 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
731 }
732 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
733 break;
734 }
735 case 0xb1: /* block device characteristics */
736 {
737 buflen = 0x40;
738 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
739 outbuf[5] = s->rotation_rate & 0xff;
740 outbuf[6] = 0; /* PRODUCT TYPE */
741 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
742 outbuf[8] = 0; /* VBULS */
743 break;
744 }
745 case 0xb2: /* thin provisioning */
746 {
747 buflen = 8;
748 outbuf[4] = 0;
749 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
750 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
751 outbuf[7] = 0;
752 break;
753 }
754 default:
755 return -1;
756 }
757 /* done with EVPD */
758 assert(buflen - start <= 255);
759 outbuf[start - 1] = buflen - start;
760 return buflen;
761 }
762
763 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
764 {
765 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
766 int buflen = 0;
767
768 if (req->cmd.buf[1] & 0x1) {
769 /* Vital product data */
770 return scsi_disk_emulate_vpd_page(req, outbuf);
771 }
772
773 /* Standard INQUIRY data */
774 if (req->cmd.buf[2] != 0) {
775 return -1;
776 }
777
778 /* PAGE CODE == 0 */
779 buflen = req->cmd.xfer;
780 if (buflen > SCSI_MAX_INQUIRY_LEN) {
781 buflen = SCSI_MAX_INQUIRY_LEN;
782 }
783
784 outbuf[0] = s->qdev.type & 0x1f;
785 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
786
787 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
788 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
789
790 memset(&outbuf[32], 0, 4);
791 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
792 /*
793 * We claim conformance to SPC-3, which is required for guests
794 * to ask for modern features like READ CAPACITY(16) or the
795 * block characteristics VPD page by default. Not all of SPC-3
796 * is actually implemented, but we're good enough.
797 */
798 outbuf[2] = s->qdev.default_scsi_version;
799 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
800
801 if (buflen > 36) {
802 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
803 } else {
804 /* If the allocation length of CDB is too small,
805 the additional length is not adjusted */
806 outbuf[4] = 36 - 5;
807 }
808
809 /* Sync data transfer and TCQ. */
810 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
811 return buflen;
812 }
813
814 static inline bool media_is_dvd(SCSIDiskState *s)
815 {
816 uint64_t nb_sectors;
817 if (s->qdev.type != TYPE_ROM) {
818 return false;
819 }
820 if (!blk_is_available(s->qdev.conf.blk)) {
821 return false;
822 }
823 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
824 return nb_sectors > CD_MAX_SECTORS;
825 }
826
827 static inline bool media_is_cd(SCSIDiskState *s)
828 {
829 uint64_t nb_sectors;
830 if (s->qdev.type != TYPE_ROM) {
831 return false;
832 }
833 if (!blk_is_available(s->qdev.conf.blk)) {
834 return false;
835 }
836 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
837 return nb_sectors <= CD_MAX_SECTORS;
838 }
839
840 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
841 uint8_t *outbuf)
842 {
843 uint8_t type = r->req.cmd.buf[1] & 7;
844
845 if (s->qdev.type != TYPE_ROM) {
846 return -1;
847 }
848
849 /* Types 1/2 are only defined for Blu-Ray. */
850 if (type != 0) {
851 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
852 return -1;
853 }
854
855 memset(outbuf, 0, 34);
856 outbuf[1] = 32;
857 outbuf[2] = 0xe; /* last session complete, disc finalized */
858 outbuf[3] = 1; /* first track on disc */
859 outbuf[4] = 1; /* # of sessions */
860 outbuf[5] = 1; /* first track of last session */
861 outbuf[6] = 1; /* last track of last session */
862 outbuf[7] = 0x20; /* unrestricted use */
863 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
864 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
865 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
866 /* 24-31: disc bar code */
867 /* 32: disc application code */
868 /* 33: number of OPC tables */
869
870 return 34;
871 }
872
873 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
874 uint8_t *outbuf)
875 {
876 static const int rds_caps_size[5] = {
877 [0] = 2048 + 4,
878 [1] = 4 + 4,
879 [3] = 188 + 4,
880 [4] = 2048 + 4,
881 };
882
883 uint8_t media = r->req.cmd.buf[1];
884 uint8_t layer = r->req.cmd.buf[6];
885 uint8_t format = r->req.cmd.buf[7];
886 int size = -1;
887
888 if (s->qdev.type != TYPE_ROM) {
889 return -1;
890 }
891 if (media != 0) {
892 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
893 return -1;
894 }
895
896 if (format != 0xff) {
897 if (!blk_is_available(s->qdev.conf.blk)) {
898 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
899 return -1;
900 }
901 if (media_is_cd(s)) {
902 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
903 return -1;
904 }
905 if (format >= ARRAY_SIZE(rds_caps_size)) {
906 return -1;
907 }
908 size = rds_caps_size[format];
909 memset(outbuf, 0, size);
910 }
911
912 switch (format) {
913 case 0x00: {
914 /* Physical format information */
915 uint64_t nb_sectors;
916 if (layer != 0) {
917 goto fail;
918 }
919 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
920
921 outbuf[4] = 1; /* DVD-ROM, part version 1 */
922 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
923 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
924 outbuf[7] = 0; /* default densities */
925
926 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
927 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
928 break;
929 }
930
931 case 0x01: /* DVD copyright information, all zeros */
932 break;
933
934 case 0x03: /* BCA information - invalid field for no BCA info */
935 return -1;
936
937 case 0x04: /* DVD disc manufacturing information, all zeros */
938 break;
939
940 case 0xff: { /* List capabilities */
941 int i;
942 size = 4;
943 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
944 if (!rds_caps_size[i]) {
945 continue;
946 }
947 outbuf[size] = i;
948 outbuf[size + 1] = 0x40; /* Not writable, readable */
949 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
950 size += 4;
951 }
952 break;
953 }
954
955 default:
956 return -1;
957 }
958
959 /* Size of buffer, not including 2 byte size field */
960 stw_be_p(outbuf, size - 2);
961 return size;
962
963 fail:
964 return -1;
965 }
966
967 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
968 {
969 uint8_t event_code, media_status;
970
971 media_status = 0;
972 if (s->tray_open) {
973 media_status = MS_TRAY_OPEN;
974 } else if (blk_is_inserted(s->qdev.conf.blk)) {
975 media_status = MS_MEDIA_PRESENT;
976 }
977
978 /* Event notification descriptor */
979 event_code = MEC_NO_CHANGE;
980 if (media_status != MS_TRAY_OPEN) {
981 if (s->media_event) {
982 event_code = MEC_NEW_MEDIA;
983 s->media_event = false;
984 } else if (s->eject_request) {
985 event_code = MEC_EJECT_REQUESTED;
986 s->eject_request = false;
987 }
988 }
989
990 outbuf[0] = event_code;
991 outbuf[1] = media_status;
992
993 /* These fields are reserved, just clear them. */
994 outbuf[2] = 0;
995 outbuf[3] = 0;
996 return 4;
997 }
998
999 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
1000 uint8_t *outbuf)
1001 {
1002 int size;
1003 uint8_t *buf = r->req.cmd.buf;
1004 uint8_t notification_class_request = buf[4];
1005 if (s->qdev.type != TYPE_ROM) {
1006 return -1;
1007 }
1008 if ((buf[1] & 1) == 0) {
1009 /* asynchronous */
1010 return -1;
1011 }
1012
1013 size = 4;
1014 outbuf[0] = outbuf[1] = 0;
1015 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1016 if (notification_class_request & (1 << GESN_MEDIA)) {
1017 outbuf[2] = GESN_MEDIA;
1018 size += scsi_event_status_media(s, &outbuf[size]);
1019 } else {
1020 outbuf[2] = 0x80;
1021 }
1022 stw_be_p(outbuf, size - 4);
1023 return size;
1024 }
1025
1026 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1027 {
1028 int current;
1029
1030 if (s->qdev.type != TYPE_ROM) {
1031 return -1;
1032 }
1033
1034 if (media_is_dvd(s)) {
1035 current = MMC_PROFILE_DVD_ROM;
1036 } else if (media_is_cd(s)) {
1037 current = MMC_PROFILE_CD_ROM;
1038 } else {
1039 current = MMC_PROFILE_NONE;
1040 }
1041
1042 memset(outbuf, 0, 40);
1043 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1044 stw_be_p(&outbuf[6], current);
1045 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1046 outbuf[10] = 0x03; /* persistent, current */
1047 outbuf[11] = 8; /* two profiles */
1048 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1049 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1050 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1051 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1052 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1053 stw_be_p(&outbuf[20], 1);
1054 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1055 outbuf[23] = 8;
1056 stl_be_p(&outbuf[24], 1); /* SCSI */
1057 outbuf[28] = 1; /* DBE = 1, mandatory */
1058 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1059 stw_be_p(&outbuf[32], 3);
1060 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1061 outbuf[35] = 4;
1062 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1063 /* TODO: Random readable, CD read, DVD read, drive serial number,
1064 power management */
1065 return 40;
1066 }
1067
1068 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1069 {
1070 if (s->qdev.type != TYPE_ROM) {
1071 return -1;
1072 }
1073 memset(outbuf, 0, 8);
1074 outbuf[5] = 1; /* CD-ROM */
1075 return 8;
1076 }
1077
1078 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1079 int page_control)
1080 {
1081 static const int mode_sense_valid[0x3f] = {
1082 [MODE_PAGE_VENDOR_SPECIFIC] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1083 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1084 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1085 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1086 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1087 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1088 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1089 [MODE_PAGE_APPLE_VENDOR] = (1 << TYPE_ROM),
1090 };
1091
1092 uint8_t *p = *p_outbuf + 2;
1093 int length;
1094
1095 assert(page < ARRAY_SIZE(mode_sense_valid));
1096 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1097 return -1;
1098 }
1099
1100 /*
1101 * If Changeable Values are requested, a mask denoting those mode parameters
1102 * that are changeable shall be returned. As we currently don't support
1103 * parameter changes via MODE_SELECT all bits are returned set to zero.
1104 * The buffer was already menset to zero by the caller of this function.
1105 *
1106 * The offsets here are off by two compared to the descriptions in the
1107 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1108 * but it is done so that offsets are consistent within our implementation
1109 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1110 * 2-byte and 4-byte headers.
1111 */
1112 switch (page) {
1113 case MODE_PAGE_HD_GEOMETRY:
1114 length = 0x16;
1115 if (page_control == 1) { /* Changeable Values */
1116 break;
1117 }
1118 /* if a geometry hint is available, use it */
1119 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1120 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1121 p[2] = s->qdev.conf.cyls & 0xff;
1122 p[3] = s->qdev.conf.heads & 0xff;
1123 /* Write precomp start cylinder, disabled */
1124 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1125 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1126 p[6] = s->qdev.conf.cyls & 0xff;
1127 /* Reduced current start cylinder, disabled */
1128 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1129 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1130 p[9] = s->qdev.conf.cyls & 0xff;
1131 /* Device step rate [ns], 200ns */
1132 p[10] = 0;
1133 p[11] = 200;
1134 /* Landing zone cylinder */
1135 p[12] = 0xff;
1136 p[13] = 0xff;
1137 p[14] = 0xff;
1138 /* Medium rotation rate [rpm], 5400 rpm */
1139 p[18] = (5400 >> 8) & 0xff;
1140 p[19] = 5400 & 0xff;
1141 break;
1142
1143 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1144 length = 0x1e;
1145 if (page_control == 1) { /* Changeable Values */
1146 break;
1147 }
1148 /* Transfer rate [kbit/s], 5Mbit/s */
1149 p[0] = 5000 >> 8;
1150 p[1] = 5000 & 0xff;
1151 /* if a geometry hint is available, use it */
1152 p[2] = s->qdev.conf.heads & 0xff;
1153 p[3] = s->qdev.conf.secs & 0xff;
1154 p[4] = s->qdev.blocksize >> 8;
1155 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1156 p[7] = s->qdev.conf.cyls & 0xff;
1157 /* Write precomp start cylinder, disabled */
1158 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1159 p[9] = s->qdev.conf.cyls & 0xff;
1160 /* Reduced current start cylinder, disabled */
1161 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1162 p[11] = s->qdev.conf.cyls & 0xff;
1163 /* Device step rate [100us], 100us */
1164 p[12] = 0;
1165 p[13] = 1;
1166 /* Device step pulse width [us], 1us */
1167 p[14] = 1;
1168 /* Device head settle delay [100us], 100us */
1169 p[15] = 0;
1170 p[16] = 1;
1171 /* Motor on delay [0.1s], 0.1s */
1172 p[17] = 1;
1173 /* Motor off delay [0.1s], 0.1s */
1174 p[18] = 1;
1175 /* Medium rotation rate [rpm], 5400 rpm */
1176 p[26] = (5400 >> 8) & 0xff;
1177 p[27] = 5400 & 0xff;
1178 break;
1179
1180 case MODE_PAGE_CACHING:
1181 length = 0x12;
1182 if (page_control == 1 || /* Changeable Values */
1183 blk_enable_write_cache(s->qdev.conf.blk)) {
1184 p[0] = 4; /* WCE */
1185 }
1186 break;
1187
1188 case MODE_PAGE_R_W_ERROR:
1189 length = 10;
1190 if (page_control == 1) { /* Changeable Values */
1191 break;
1192 }
1193 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1194 if (s->qdev.type == TYPE_ROM) {
1195 p[1] = 0x20; /* Read Retry Count */
1196 }
1197 break;
1198
1199 case MODE_PAGE_AUDIO_CTL:
1200 length = 14;
1201 break;
1202
1203 case MODE_PAGE_CAPABILITIES:
1204 length = 0x14;
1205 if (page_control == 1) { /* Changeable Values */
1206 break;
1207 }
1208
1209 p[0] = 0x3b; /* CD-R & CD-RW read */
1210 p[1] = 0; /* Writing not supported */
1211 p[2] = 0x7f; /* Audio, composite, digital out,
1212 mode 2 form 1&2, multi session */
1213 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1214 RW corrected, C2 errors, ISRC,
1215 UPC, Bar code */
1216 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1217 /* Locking supported, jumper present, eject, tray */
1218 p[5] = 0; /* no volume & mute control, no
1219 changer */
1220 p[6] = (50 * 176) >> 8; /* 50x read speed */
1221 p[7] = (50 * 176) & 0xff;
1222 p[8] = 2 >> 8; /* Two volume levels */
1223 p[9] = 2 & 0xff;
1224 p[10] = 2048 >> 8; /* 2M buffer */
1225 p[11] = 2048 & 0xff;
1226 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1227 p[13] = (16 * 176) & 0xff;
1228 p[16] = (16 * 176) >> 8; /* 16x write speed */
1229 p[17] = (16 * 176) & 0xff;
1230 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1231 p[19] = (16 * 176) & 0xff;
1232 break;
1233
1234 case MODE_PAGE_APPLE_VENDOR:
1235 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR)) {
1236 length = 0x1e;
1237 if (page_control == 1) { /* Changeable Values */
1238 break;
1239 }
1240
1241 memset(p, 0, length);
1242 strcpy((char *)p + 8, "APPLE COMPUTER, INC ");
1243 break;
1244 } else {
1245 return -1;
1246 }
1247
1248 case MODE_PAGE_VENDOR_SPECIFIC:
1249 if (s->qdev.type == TYPE_DISK && (s->quirks &
1250 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1251 length = 0x2;
1252 if (page_control == 1) { /* Changeable Values */
1253 p[0] = 0xff;
1254 p[1] = 0xff;
1255 break;
1256 }
1257 p[0] = 0;
1258 p[1] = 0;
1259 break;
1260 } else {
1261 return -1;
1262 }
1263
1264 default:
1265 return -1;
1266 }
1267
1268 assert(length < 256);
1269 (*p_outbuf)[0] = page;
1270 (*p_outbuf)[1] = length;
1271 *p_outbuf += length + 2;
1272 return length + 2;
1273 }
1274
1275 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1276 {
1277 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1278 uint64_t nb_sectors;
1279 bool dbd;
1280 int page, buflen, ret, page_control;
1281 uint8_t *p;
1282 uint8_t dev_specific_param;
1283
1284 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1285 page = r->req.cmd.buf[2] & 0x3f;
1286 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1287
1288 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1289 10, page, r->req.cmd.xfer, page_control);
1290 memset(outbuf, 0, r->req.cmd.xfer);
1291 p = outbuf;
1292
1293 if (s->qdev.type == TYPE_DISK) {
1294 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1295 if (!blk_is_writable(s->qdev.conf.blk)) {
1296 dev_specific_param |= 0x80; /* Readonly. */
1297 }
1298 } else {
1299 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD)) {
1300 /* Use DBD from the request... */
1301 dev_specific_param = 0x00;
1302
1303 /*
1304 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR
1305 * which should never return a block descriptor even though DBD is
1306 * not set, otherwise CDROM detection fails in MacOS
1307 */
1308 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR) &&
1309 page == MODE_PAGE_APPLE_VENDOR) {
1310 dbd = true;
1311 }
1312 } else {
1313 /*
1314 * MMC prescribes that CD/DVD drives have no block descriptors,
1315 * and defines no device-specific parameter.
1316 */
1317 dev_specific_param = 0x00;
1318 dbd = true;
1319 }
1320 }
1321
1322 if (r->req.cmd.buf[0] == MODE_SENSE) {
1323 p[1] = 0; /* Default media type. */
1324 p[2] = dev_specific_param;
1325 p[3] = 0; /* Block descriptor length. */
1326 p += 4;
1327 } else { /* MODE_SENSE_10 */
1328 p[2] = 0; /* Default media type. */
1329 p[3] = dev_specific_param;
1330 p[6] = p[7] = 0; /* Block descriptor length. */
1331 p += 8;
1332 }
1333
1334 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1335 if (!dbd && nb_sectors) {
1336 if (r->req.cmd.buf[0] == MODE_SENSE) {
1337 outbuf[3] = 8; /* Block descriptor length */
1338 } else { /* MODE_SENSE_10 */
1339 outbuf[7] = 8; /* Block descriptor length */
1340 }
1341 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1342 if (nb_sectors > 0xffffff) {
1343 nb_sectors = 0;
1344 }
1345 p[0] = 0; /* media density code */
1346 p[1] = (nb_sectors >> 16) & 0xff;
1347 p[2] = (nb_sectors >> 8) & 0xff;
1348 p[3] = nb_sectors & 0xff;
1349 p[4] = 0; /* reserved */
1350 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1351 p[6] = s->qdev.blocksize >> 8;
1352 p[7] = 0;
1353 p += 8;
1354 }
1355
1356 if (page_control == 3) {
1357 /* Saved Values */
1358 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1359 return -1;
1360 }
1361
1362 if (page == 0x3f) {
1363 for (page = 0; page <= 0x3e; page++) {
1364 mode_sense_page(s, page, &p, page_control);
1365 }
1366 } else {
1367 ret = mode_sense_page(s, page, &p, page_control);
1368 if (ret == -1) {
1369 return -1;
1370 }
1371 }
1372
1373 buflen = p - outbuf;
1374 /*
1375 * The mode data length field specifies the length in bytes of the
1376 * following data that is available to be transferred. The mode data
1377 * length does not include itself.
1378 */
1379 if (r->req.cmd.buf[0] == MODE_SENSE) {
1380 outbuf[0] = buflen - 1;
1381 } else { /* MODE_SENSE_10 */
1382 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1383 outbuf[1] = (buflen - 2) & 0xff;
1384 }
1385 return buflen;
1386 }
1387
1388 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1389 {
1390 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1391 int start_track, format, msf, toclen;
1392 uint64_t nb_sectors;
1393
1394 msf = req->cmd.buf[1] & 2;
1395 format = req->cmd.buf[2] & 0xf;
1396 start_track = req->cmd.buf[6];
1397 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1398 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1399 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1400 switch (format) {
1401 case 0:
1402 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1403 break;
1404 case 1:
1405 /* multi session : only a single session defined */
1406 toclen = 12;
1407 memset(outbuf, 0, 12);
1408 outbuf[1] = 0x0a;
1409 outbuf[2] = 0x01;
1410 outbuf[3] = 0x01;
1411 break;
1412 case 2:
1413 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1414 break;
1415 default:
1416 return -1;
1417 }
1418 return toclen;
1419 }
1420
1421 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1422 {
1423 SCSIRequest *req = &r->req;
1424 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1425 bool start = req->cmd.buf[4] & 1;
1426 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1427 int pwrcnd = req->cmd.buf[4] & 0xf0;
1428
1429 if (pwrcnd) {
1430 /* eject/load only happens for power condition == 0 */
1431 return 0;
1432 }
1433
1434 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1435 if (!start && !s->tray_open && s->tray_locked) {
1436 scsi_check_condition(r,
1437 blk_is_inserted(s->qdev.conf.blk)
1438 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1439 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1440 return -1;
1441 }
1442
1443 if (s->tray_open != !start) {
1444 blk_eject(s->qdev.conf.blk, !start);
1445 s->tray_open = !start;
1446 }
1447 }
1448 return 0;
1449 }
1450
1451 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1452 {
1453 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1454 int buflen = r->iov.iov_len;
1455
1456 if (buflen) {
1457 trace_scsi_disk_emulate_read_data(buflen);
1458 r->iov.iov_len = 0;
1459 r->started = true;
1460 scsi_req_data(&r->req, buflen);
1461 return;
1462 }
1463
1464 /* This also clears the sense buffer for REQUEST SENSE. */
1465 scsi_req_complete(&r->req, GOOD);
1466 }
1467
1468 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1469 uint8_t *inbuf, int inlen)
1470 {
1471 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1472 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1473 uint8_t *p;
1474 int len, expected_len, changeable_len, i;
1475
1476 /* The input buffer does not include the page header, so it is
1477 * off by 2 bytes.
1478 */
1479 expected_len = inlen + 2;
1480 if (expected_len > SCSI_MAX_MODE_LEN) {
1481 return -1;
1482 }
1483
1484 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1485 if (page == MODE_PAGE_ALLS) {
1486 return -1;
1487 }
1488
1489 p = mode_current;
1490 memset(mode_current, 0, inlen + 2);
1491 len = mode_sense_page(s, page, &p, 0);
1492 if (len < 0 || len != expected_len) {
1493 return -1;
1494 }
1495
1496 p = mode_changeable;
1497 memset(mode_changeable, 0, inlen + 2);
1498 changeable_len = mode_sense_page(s, page, &p, 1);
1499 assert(changeable_len == len);
1500
1501 /* Check that unchangeable bits are the same as what MODE SENSE
1502 * would return.
1503 */
1504 for (i = 2; i < len; i++) {
1505 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1506 return -1;
1507 }
1508 }
1509 return 0;
1510 }
1511
1512 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1513 {
1514 switch (page) {
1515 case MODE_PAGE_CACHING:
1516 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1517 break;
1518
1519 default:
1520 break;
1521 }
1522 }
1523
1524 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1525 {
1526 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1527
1528 while (len > 0) {
1529 int page, subpage, page_len;
1530
1531 /* Parse both possible formats for the mode page headers. */
1532 page = p[0] & 0x3f;
1533 if (p[0] & 0x40) {
1534 if (len < 4) {
1535 goto invalid_param_len;
1536 }
1537 subpage = p[1];
1538 page_len = lduw_be_p(&p[2]);
1539 p += 4;
1540 len -= 4;
1541 } else {
1542 if (len < 2) {
1543 goto invalid_param_len;
1544 }
1545 subpage = 0;
1546 page_len = p[1];
1547 p += 2;
1548 len -= 2;
1549 }
1550
1551 if (subpage) {
1552 goto invalid_param;
1553 }
1554 if (page_len > len) {
1555 goto invalid_param_len;
1556 }
1557
1558 if (!change) {
1559 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1560 goto invalid_param;
1561 }
1562 } else {
1563 scsi_disk_apply_mode_select(s, page, p);
1564 }
1565
1566 p += page_len;
1567 len -= page_len;
1568 }
1569 return 0;
1570
1571 invalid_param:
1572 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1573 return -1;
1574
1575 invalid_param_len:
1576 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1577 return -1;
1578 }
1579
1580 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1581 {
1582 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1583 uint8_t *p = inbuf;
1584 int cmd = r->req.cmd.buf[0];
1585 int len = r->req.cmd.xfer;
1586 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1587 int bd_len;
1588 int pass;
1589
1590 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1591 if (!(s->quirks &
1592 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1593 /* We only support PF=1, SP=0. */
1594 goto invalid_field;
1595 }
1596 }
1597
1598 if (len < hdr_len) {
1599 goto invalid_param_len;
1600 }
1601
1602 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1603 len -= hdr_len;
1604 p += hdr_len;
1605 if (len < bd_len) {
1606 goto invalid_param_len;
1607 }
1608 if (bd_len != 0 && bd_len != 8) {
1609 goto invalid_param;
1610 }
1611
1612 len -= bd_len;
1613 p += bd_len;
1614
1615 /* Ensure no change is made if there is an error! */
1616 for (pass = 0; pass < 2; pass++) {
1617 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1618 assert(pass == 0);
1619 return;
1620 }
1621 }
1622 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1623 /* The request is used as the AIO opaque value, so add a ref. */
1624 scsi_req_ref(&r->req);
1625 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1626 BLOCK_ACCT_FLUSH);
1627 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1628 return;
1629 }
1630
1631 scsi_req_complete(&r->req, GOOD);
1632 return;
1633
1634 invalid_param:
1635 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1636 return;
1637
1638 invalid_param_len:
1639 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1640 return;
1641
1642 invalid_field:
1643 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1644 }
1645
1646 /* sector_num and nb_sectors expected to be in qdev blocksize */
1647 static inline bool check_lba_range(SCSIDiskState *s,
1648 uint64_t sector_num, uint32_t nb_sectors)
1649 {
1650 /*
1651 * The first line tests that no overflow happens when computing the last
1652 * sector. The second line tests that the last accessed sector is in
1653 * range.
1654 *
1655 * Careful, the computations should not underflow for nb_sectors == 0,
1656 * and a 0-block read to the first LBA beyond the end of device is
1657 * valid.
1658 */
1659 return (sector_num <= sector_num + nb_sectors &&
1660 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1661 }
1662
1663 typedef struct UnmapCBData {
1664 SCSIDiskReq *r;
1665 uint8_t *inbuf;
1666 int count;
1667 } UnmapCBData;
1668
1669 static void scsi_unmap_complete(void *opaque, int ret);
1670
1671 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1672 {
1673 SCSIDiskReq *r = data->r;
1674 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1675
1676 assert(r->req.aiocb == NULL);
1677
1678 if (data->count > 0) {
1679 uint64_t sector_num = ldq_be_p(&data->inbuf[0]);
1680 uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1681 r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1682 r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1683
1684 if (!check_lba_range(s, sector_num, nb_sectors)) {
1685 block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1686 BLOCK_ACCT_UNMAP);
1687 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1688 goto done;
1689 }
1690
1691 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1692 r->sector_count * BDRV_SECTOR_SIZE,
1693 BLOCK_ACCT_UNMAP);
1694
1695 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1696 r->sector * BDRV_SECTOR_SIZE,
1697 r->sector_count * BDRV_SECTOR_SIZE,
1698 scsi_unmap_complete, data);
1699 data->count--;
1700 data->inbuf += 16;
1701 return;
1702 }
1703
1704 scsi_req_complete(&r->req, GOOD);
1705
1706 done:
1707 scsi_req_unref(&r->req);
1708 g_free(data);
1709 }
1710
1711 static void scsi_unmap_complete(void *opaque, int ret)
1712 {
1713 UnmapCBData *data = opaque;
1714 SCSIDiskReq *r = data->r;
1715 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1716
1717 assert(r->req.aiocb != NULL);
1718 r->req.aiocb = NULL;
1719
1720 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1721 if (scsi_disk_req_check_error(r, ret, true)) {
1722 scsi_req_unref(&r->req);
1723 g_free(data);
1724 } else {
1725 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1726 scsi_unmap_complete_noio(data, ret);
1727 }
1728 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1729 }
1730
1731 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1732 {
1733 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1734 uint8_t *p = inbuf;
1735 int len = r->req.cmd.xfer;
1736 UnmapCBData *data;
1737
1738 /* Reject ANCHOR=1. */
1739 if (r->req.cmd.buf[1] & 0x1) {
1740 goto invalid_field;
1741 }
1742
1743 if (len < 8) {
1744 goto invalid_param_len;
1745 }
1746 if (len < lduw_be_p(&p[0]) + 2) {
1747 goto invalid_param_len;
1748 }
1749 if (len < lduw_be_p(&p[2]) + 8) {
1750 goto invalid_param_len;
1751 }
1752 if (lduw_be_p(&p[2]) & 15) {
1753 goto invalid_param_len;
1754 }
1755
1756 if (!blk_is_writable(s->qdev.conf.blk)) {
1757 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1758 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1759 return;
1760 }
1761
1762 data = g_new0(UnmapCBData, 1);
1763 data->r = r;
1764 data->inbuf = &p[8];
1765 data->count = lduw_be_p(&p[2]) >> 4;
1766
1767 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1768 scsi_req_ref(&r->req);
1769 scsi_unmap_complete_noio(data, 0);
1770 return;
1771
1772 invalid_param_len:
1773 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1774 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1775 return;
1776
1777 invalid_field:
1778 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1779 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1780 }
1781
1782 typedef struct WriteSameCBData {
1783 SCSIDiskReq *r;
1784 int64_t sector;
1785 int nb_sectors;
1786 QEMUIOVector qiov;
1787 struct iovec iov;
1788 } WriteSameCBData;
1789
1790 static void scsi_write_same_complete(void *opaque, int ret)
1791 {
1792 WriteSameCBData *data = opaque;
1793 SCSIDiskReq *r = data->r;
1794 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1795
1796 assert(r->req.aiocb != NULL);
1797 r->req.aiocb = NULL;
1798 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1799 if (scsi_disk_req_check_error(r, ret, true)) {
1800 goto done;
1801 }
1802
1803 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1804
1805 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
1806 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
1807 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1808 data->iov.iov_len);
1809 if (data->iov.iov_len) {
1810 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1811 data->iov.iov_len, BLOCK_ACCT_WRITE);
1812 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1813 * where final qiov may need smaller size */
1814 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1815 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1816 data->sector << BDRV_SECTOR_BITS,
1817 &data->qiov, 0,
1818 scsi_write_same_complete, data);
1819 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1820 return;
1821 }
1822
1823 scsi_req_complete(&r->req, GOOD);
1824
1825 done:
1826 scsi_req_unref(&r->req);
1827 qemu_vfree(data->iov.iov_base);
1828 g_free(data);
1829 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1830 }
1831
1832 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1833 {
1834 SCSIRequest *req = &r->req;
1835 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1836 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1837 WriteSameCBData *data;
1838 uint8_t *buf;
1839 int i;
1840
1841 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1842 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1843 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1844 return;
1845 }
1846
1847 if (!blk_is_writable(s->qdev.conf.blk)) {
1848 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1849 return;
1850 }
1851 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1852 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1853 return;
1854 }
1855
1856 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1857 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1858
1859 /* The request is used as the AIO opaque value, so add a ref. */
1860 scsi_req_ref(&r->req);
1861 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1862 nb_sectors * s->qdev.blocksize,
1863 BLOCK_ACCT_WRITE);
1864 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1865 r->req.cmd.lba * s->qdev.blocksize,
1866 nb_sectors * s->qdev.blocksize,
1867 flags, scsi_aio_complete, r);
1868 return;
1869 }
1870
1871 data = g_new0(WriteSameCBData, 1);
1872 data->r = r;
1873 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1874 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1875 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1876 SCSI_WRITE_SAME_MAX);
1877 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1878 data->iov.iov_len);
1879 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1880
1881 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1882 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1883 }
1884
1885 scsi_req_ref(&r->req);
1886 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1887 data->iov.iov_len, BLOCK_ACCT_WRITE);
1888 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1889 data->sector << BDRV_SECTOR_BITS,
1890 &data->qiov, 0,
1891 scsi_write_same_complete, data);
1892 }
1893
1894 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1895 {
1896 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1897
1898 if (r->iov.iov_len) {
1899 int buflen = r->iov.iov_len;
1900 trace_scsi_disk_emulate_write_data(buflen);
1901 r->iov.iov_len = 0;
1902 scsi_req_data(&r->req, buflen);
1903 return;
1904 }
1905
1906 switch (req->cmd.buf[0]) {
1907 case MODE_SELECT:
1908 case MODE_SELECT_10:
1909 /* This also clears the sense buffer for REQUEST SENSE. */
1910 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1911 break;
1912
1913 case UNMAP:
1914 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1915 break;
1916
1917 case VERIFY_10:
1918 case VERIFY_12:
1919 case VERIFY_16:
1920 if (r->req.status == -1) {
1921 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1922 }
1923 break;
1924
1925 case WRITE_SAME_10:
1926 case WRITE_SAME_16:
1927 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1928 break;
1929
1930 default:
1931 abort();
1932 }
1933 }
1934
1935 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1936 {
1937 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1938 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1939 uint64_t nb_sectors;
1940 uint8_t *outbuf;
1941 int buflen;
1942
1943 switch (req->cmd.buf[0]) {
1944 case INQUIRY:
1945 case MODE_SENSE:
1946 case MODE_SENSE_10:
1947 case RESERVE:
1948 case RESERVE_10:
1949 case RELEASE:
1950 case RELEASE_10:
1951 case START_STOP:
1952 case ALLOW_MEDIUM_REMOVAL:
1953 case GET_CONFIGURATION:
1954 case GET_EVENT_STATUS_NOTIFICATION:
1955 case MECHANISM_STATUS:
1956 case REQUEST_SENSE:
1957 break;
1958
1959 default:
1960 if (!blk_is_available(s->qdev.conf.blk)) {
1961 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1962 return 0;
1963 }
1964 break;
1965 }
1966
1967 /*
1968 * FIXME: we shouldn't return anything bigger than 4k, but the code
1969 * requires the buffer to be as big as req->cmd.xfer in several
1970 * places. So, do not allow CDBs with a very large ALLOCATION
1971 * LENGTH. The real fix would be to modify scsi_read_data and
1972 * dma_buf_read, so that they return data beyond the buflen
1973 * as all zeros.
1974 */
1975 if (req->cmd.xfer > 65536) {
1976 goto illegal_request;
1977 }
1978 r->buflen = MAX(4096, req->cmd.xfer);
1979
1980 if (!r->iov.iov_base) {
1981 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1982 }
1983
1984 outbuf = r->iov.iov_base;
1985 memset(outbuf, 0, r->buflen);
1986 switch (req->cmd.buf[0]) {
1987 case TEST_UNIT_READY:
1988 assert(blk_is_available(s->qdev.conf.blk));
1989 break;
1990 case INQUIRY:
1991 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1992 if (buflen < 0) {
1993 goto illegal_request;
1994 }
1995 break;
1996 case MODE_SENSE:
1997 case MODE_SENSE_10:
1998 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1999 if (buflen < 0) {
2000 goto illegal_request;
2001 }
2002 break;
2003 case READ_TOC:
2004 buflen = scsi_disk_emulate_read_toc(req, outbuf);
2005 if (buflen < 0) {
2006 goto illegal_request;
2007 }
2008 break;
2009 case RESERVE:
2010 if (req->cmd.buf[1] & 1) {
2011 goto illegal_request;
2012 }
2013 break;
2014 case RESERVE_10:
2015 if (req->cmd.buf[1] & 3) {
2016 goto illegal_request;
2017 }
2018 break;
2019 case RELEASE:
2020 if (req->cmd.buf[1] & 1) {
2021 goto illegal_request;
2022 }
2023 break;
2024 case RELEASE_10:
2025 if (req->cmd.buf[1] & 3) {
2026 goto illegal_request;
2027 }
2028 break;
2029 case START_STOP:
2030 if (scsi_disk_emulate_start_stop(r) < 0) {
2031 return 0;
2032 }
2033 break;
2034 case ALLOW_MEDIUM_REMOVAL:
2035 s->tray_locked = req->cmd.buf[4] & 1;
2036 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
2037 break;
2038 case READ_CAPACITY_10:
2039 /* The normal LEN field for this command is zero. */
2040 memset(outbuf, 0, 8);
2041 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2042 if (!nb_sectors) {
2043 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2044 return 0;
2045 }
2046 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
2047 goto illegal_request;
2048 }
2049 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2050 /* Returned value is the address of the last sector. */
2051 nb_sectors--;
2052 /* Remember the new size for read/write sanity checking. */
2053 s->qdev.max_lba = nb_sectors;
2054 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2055 if (nb_sectors > UINT32_MAX) {
2056 nb_sectors = UINT32_MAX;
2057 }
2058 outbuf[0] = (nb_sectors >> 24) & 0xff;
2059 outbuf[1] = (nb_sectors >> 16) & 0xff;
2060 outbuf[2] = (nb_sectors >> 8) & 0xff;
2061 outbuf[3] = nb_sectors & 0xff;
2062 outbuf[4] = 0;
2063 outbuf[5] = 0;
2064 outbuf[6] = s->qdev.blocksize >> 8;
2065 outbuf[7] = 0;
2066 break;
2067 case REQUEST_SENSE:
2068 /* Just return "NO SENSE". */
2069 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2070 (req->cmd.buf[1] & 1) == 0);
2071 if (buflen < 0) {
2072 goto illegal_request;
2073 }
2074 break;
2075 case MECHANISM_STATUS:
2076 buflen = scsi_emulate_mechanism_status(s, outbuf);
2077 if (buflen < 0) {
2078 goto illegal_request;
2079 }
2080 break;
2081 case GET_CONFIGURATION:
2082 buflen = scsi_get_configuration(s, outbuf);
2083 if (buflen < 0) {
2084 goto illegal_request;
2085 }
2086 break;
2087 case GET_EVENT_STATUS_NOTIFICATION:
2088 buflen = scsi_get_event_status_notification(s, r, outbuf);
2089 if (buflen < 0) {
2090 goto illegal_request;
2091 }
2092 break;
2093 case READ_DISC_INFORMATION:
2094 buflen = scsi_read_disc_information(s, r, outbuf);
2095 if (buflen < 0) {
2096 goto illegal_request;
2097 }
2098 break;
2099 case READ_DVD_STRUCTURE:
2100 buflen = scsi_read_dvd_structure(s, r, outbuf);
2101 if (buflen < 0) {
2102 goto illegal_request;
2103 }
2104 break;
2105 case SERVICE_ACTION_IN_16:
2106 /* Service Action In subcommands. */
2107 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2108 trace_scsi_disk_emulate_command_SAI_16();
2109 memset(outbuf, 0, req->cmd.xfer);
2110 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2111 if (!nb_sectors) {
2112 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2113 return 0;
2114 }
2115 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2116 goto illegal_request;
2117 }
2118 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2119 /* Returned value is the address of the last sector. */
2120 nb_sectors--;
2121 /* Remember the new size for read/write sanity checking. */
2122 s->qdev.max_lba = nb_sectors;
2123 outbuf[0] = (nb_sectors >> 56) & 0xff;
2124 outbuf[1] = (nb_sectors >> 48) & 0xff;
2125 outbuf[2] = (nb_sectors >> 40) & 0xff;
2126 outbuf[3] = (nb_sectors >> 32) & 0xff;
2127 outbuf[4] = (nb_sectors >> 24) & 0xff;
2128 outbuf[5] = (nb_sectors >> 16) & 0xff;
2129 outbuf[6] = (nb_sectors >> 8) & 0xff;
2130 outbuf[7] = nb_sectors & 0xff;
2131 outbuf[8] = 0;
2132 outbuf[9] = 0;
2133 outbuf[10] = s->qdev.blocksize >> 8;
2134 outbuf[11] = 0;
2135 outbuf[12] = 0;
2136 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2137
2138 /* set TPE bit if the format supports discard */
2139 if (s->qdev.conf.discard_granularity) {
2140 outbuf[14] = 0x80;
2141 }
2142
2143 /* Protection, exponent and lowest lba field left blank. */
2144 break;
2145 }
2146 trace_scsi_disk_emulate_command_SAI_unsupported();
2147 goto illegal_request;
2148 case SYNCHRONIZE_CACHE:
2149 /* The request is used as the AIO opaque value, so add a ref. */
2150 scsi_req_ref(&r->req);
2151 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2152 BLOCK_ACCT_FLUSH);
2153 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2154 return 0;
2155 case SEEK_10:
2156 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2157 if (r->req.cmd.lba > s->qdev.max_lba) {
2158 goto illegal_lba;
2159 }
2160 break;
2161 case MODE_SELECT:
2162 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2163 break;
2164 case MODE_SELECT_10:
2165 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2166 break;
2167 case UNMAP:
2168 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2169 break;
2170 case VERIFY_10:
2171 case VERIFY_12:
2172 case VERIFY_16:
2173 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2174 if (req->cmd.buf[1] & 6) {
2175 goto illegal_request;
2176 }
2177 break;
2178 case WRITE_SAME_10:
2179 case WRITE_SAME_16:
2180 trace_scsi_disk_emulate_command_WRITE_SAME(
2181 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2182 break;
2183 case FORMAT_UNIT:
2184 trace_scsi_disk_emulate_command_FORMAT_UNIT(r->req.cmd.xfer);
2185 break;
2186 default:
2187 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2188 scsi_command_name(buf[0]));
2189 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2190 return 0;
2191 }
2192 assert(!r->req.aiocb);
2193 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2194 if (r->iov.iov_len == 0) {
2195 scsi_req_complete(&r->req, GOOD);
2196 }
2197 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2198 assert(r->iov.iov_len == req->cmd.xfer);
2199 return -r->iov.iov_len;
2200 } else {
2201 return r->iov.iov_len;
2202 }
2203
2204 illegal_request:
2205 if (r->req.status == -1) {
2206 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2207 }
2208 return 0;
2209
2210 illegal_lba:
2211 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2212 return 0;
2213 }
2214
2215 /* Execute a scsi command. Returns the length of the data expected by the
2216 command. This will be Positive for data transfers from the device
2217 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2218 and zero if the command does not transfer any data. */
2219
2220 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2221 {
2222 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2223 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2224 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2225 uint32_t len;
2226 uint8_t command;
2227
2228 command = buf[0];
2229
2230 if (!blk_is_available(s->qdev.conf.blk)) {
2231 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2232 return 0;
2233 }
2234
2235 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2236 switch (command) {
2237 case READ_6:
2238 case READ_10:
2239 case READ_12:
2240 case READ_16:
2241 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2242 /* Protection information is not supported. For SCSI versions 2 and
2243 * older (as determined by snooping the guest's INQUIRY commands),
2244 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2245 */
2246 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2247 goto illegal_request;
2248 }
2249 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2250 goto illegal_lba;
2251 }
2252 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2253 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2254 break;
2255 case WRITE_6:
2256 case WRITE_10:
2257 case WRITE_12:
2258 case WRITE_16:
2259 case WRITE_VERIFY_10:
2260 case WRITE_VERIFY_12:
2261 case WRITE_VERIFY_16:
2262 if (!blk_is_writable(s->qdev.conf.blk)) {
2263 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2264 return 0;
2265 }
2266 trace_scsi_disk_dma_command_WRITE(
2267 (command & 0xe) == 0xe ? "And Verify " : "",
2268 r->req.cmd.lba, len);
2269 /* fall through */
2270 case VERIFY_10:
2271 case VERIFY_12:
2272 case VERIFY_16:
2273 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2274 * As far as DMA is concerned, we can treat it the same as a write;
2275 * scsi_block_do_sgio will send VERIFY commands.
2276 */
2277 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2278 goto illegal_request;
2279 }
2280 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2281 goto illegal_lba;
2282 }
2283 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2284 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2285 break;
2286 default:
2287 abort();
2288 illegal_request:
2289 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2290 return 0;
2291 illegal_lba:
2292 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2293 return 0;
2294 }
2295 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2296 if (r->sector_count == 0) {
2297 scsi_req_complete(&r->req, GOOD);
2298 }
2299 assert(r->iov.iov_len == 0);
2300 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2301 return -r->sector_count * BDRV_SECTOR_SIZE;
2302 } else {
2303 return r->sector_count * BDRV_SECTOR_SIZE;
2304 }
2305 }
2306
2307 static void scsi_disk_reset(DeviceState *dev)
2308 {
2309 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2310 uint64_t nb_sectors;
2311
2312 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2313
2314 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2315 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2316 if (nb_sectors) {
2317 nb_sectors--;
2318 }
2319 s->qdev.max_lba = nb_sectors;
2320 /* reset tray statuses */
2321 s->tray_locked = 0;
2322 s->tray_open = 0;
2323
2324 s->qdev.scsi_version = s->qdev.default_scsi_version;
2325 }
2326
2327 static void scsi_disk_resize_cb(void *opaque)
2328 {
2329 SCSIDiskState *s = opaque;
2330
2331 /* SPC lists this sense code as available only for
2332 * direct-access devices.
2333 */
2334 if (s->qdev.type == TYPE_DISK) {
2335 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2336 }
2337 }
2338
2339 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2340 {
2341 SCSIDiskState *s = opaque;
2342
2343 /*
2344 * When a CD gets changed, we have to report an ejected state and
2345 * then a loaded state to guests so that they detect tray
2346 * open/close and media change events. Guests that do not use
2347 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2348 * states rely on this behavior.
2349 *
2350 * media_changed governs the state machine used for unit attention
2351 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2352 */
2353 s->media_changed = load;
2354 s->tray_open = !load;
2355 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2356 s->media_event = true;
2357 s->eject_request = false;
2358 }
2359
2360 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2361 {
2362 SCSIDiskState *s = opaque;
2363
2364 s->eject_request = true;
2365 if (force) {
2366 s->tray_locked = false;
2367 }
2368 }
2369
2370 static bool scsi_cd_is_tray_open(void *opaque)
2371 {
2372 return ((SCSIDiskState *)opaque)->tray_open;
2373 }
2374
2375 static bool scsi_cd_is_medium_locked(void *opaque)
2376 {
2377 return ((SCSIDiskState *)opaque)->tray_locked;
2378 }
2379
2380 static const BlockDevOps scsi_disk_removable_block_ops = {
2381 .change_media_cb = scsi_cd_change_media_cb,
2382 .eject_request_cb = scsi_cd_eject_request_cb,
2383 .is_tray_open = scsi_cd_is_tray_open,
2384 .is_medium_locked = scsi_cd_is_medium_locked,
2385
2386 .resize_cb = scsi_disk_resize_cb,
2387 };
2388
2389 static const BlockDevOps scsi_disk_block_ops = {
2390 .resize_cb = scsi_disk_resize_cb,
2391 };
2392
2393 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2394 {
2395 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2396 if (s->media_changed) {
2397 s->media_changed = false;
2398 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2399 }
2400 }
2401
2402 static void scsi_realize(SCSIDevice *dev, Error **errp)
2403 {
2404 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2405 bool read_only;
2406
2407 if (!s->qdev.conf.blk) {
2408 error_setg(errp, "drive property not set");
2409 return;
2410 }
2411
2412 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2413 !blk_is_inserted(s->qdev.conf.blk)) {
2414 error_setg(errp, "Device needs media, but drive is empty");
2415 return;
2416 }
2417
2418 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2419 return;
2420 }
2421
2422 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2423 !s->qdev.hba_supports_iothread)
2424 {
2425 error_setg(errp, "HBA does not support iothreads");
2426 return;
2427 }
2428
2429 if (dev->type == TYPE_DISK) {
2430 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2431 return;
2432 }
2433 }
2434
2435 read_only = !blk_supports_write_perm(s->qdev.conf.blk);
2436 if (dev->type == TYPE_ROM) {
2437 read_only = true;
2438 }
2439
2440 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2441 dev->type == TYPE_DISK, errp)) {
2442 return;
2443 }
2444
2445 if (s->qdev.conf.discard_granularity == -1) {
2446 s->qdev.conf.discard_granularity =
2447 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2448 }
2449
2450 if (!s->version) {
2451 s->version = g_strdup(qemu_hw_version());
2452 }
2453 if (!s->vendor) {
2454 s->vendor = g_strdup("QEMU");
2455 }
2456 if (!s->device_id) {
2457 if (s->serial) {
2458 s->device_id = g_strdup_printf("%.20s", s->serial);
2459 } else {
2460 const char *str = blk_name(s->qdev.conf.blk);
2461 if (str && *str) {
2462 s->device_id = g_strdup(str);
2463 }
2464 }
2465 }
2466
2467 if (blk_is_sg(s->qdev.conf.blk)) {
2468 error_setg(errp, "unwanted /dev/sg*");
2469 return;
2470 }
2471
2472 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2473 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2474 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2475 } else {
2476 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2477 }
2478
2479 blk_iostatus_enable(s->qdev.conf.blk);
2480
2481 add_boot_device_lchs(&dev->qdev, NULL,
2482 dev->conf.lcyls,
2483 dev->conf.lheads,
2484 dev->conf.lsecs);
2485 }
2486
2487 static void scsi_unrealize(SCSIDevice *dev)
2488 {
2489 del_boot_device_lchs(&dev->qdev, NULL);
2490 }
2491
2492 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2493 {
2494 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2495 AioContext *ctx = NULL;
2496 /* can happen for devices without drive. The error message for missing
2497 * backend will be issued in scsi_realize
2498 */
2499 if (s->qdev.conf.blk) {
2500 ctx = blk_get_aio_context(s->qdev.conf.blk);
2501 aio_context_acquire(ctx);
2502 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2503 goto out;
2504 }
2505 }
2506 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2507 s->qdev.type = TYPE_DISK;
2508 if (!s->product) {
2509 s->product = g_strdup("QEMU HARDDISK");
2510 }
2511 scsi_realize(&s->qdev, errp);
2512 out:
2513 if (ctx) {
2514 aio_context_release(ctx);
2515 }
2516 }
2517
2518 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2519 {
2520 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2521 AioContext *ctx;
2522 int ret;
2523
2524 if (!dev->conf.blk) {
2525 /* Anonymous BlockBackend for an empty drive. As we put it into
2526 * dev->conf, qdev takes care of detaching on unplug. */
2527 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2528 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2529 assert(ret == 0);
2530 }
2531
2532 ctx = blk_get_aio_context(dev->conf.blk);
2533 aio_context_acquire(ctx);
2534 s->qdev.blocksize = 2048;
2535 s->qdev.type = TYPE_ROM;
2536 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2537 if (!s->product) {
2538 s->product = g_strdup("QEMU CD-ROM");
2539 }
2540 scsi_realize(&s->qdev, errp);
2541 aio_context_release(ctx);
2542 }
2543
2544
2545 static const SCSIReqOps scsi_disk_emulate_reqops = {
2546 .size = sizeof(SCSIDiskReq),
2547 .free_req = scsi_free_request,
2548 .send_command = scsi_disk_emulate_command,
2549 .read_data = scsi_disk_emulate_read_data,
2550 .write_data = scsi_disk_emulate_write_data,
2551 .get_buf = scsi_get_buf,
2552 };
2553
2554 static const SCSIReqOps scsi_disk_dma_reqops = {
2555 .size = sizeof(SCSIDiskReq),
2556 .free_req = scsi_free_request,
2557 .send_command = scsi_disk_dma_command,
2558 .read_data = scsi_read_data,
2559 .write_data = scsi_write_data,
2560 .get_buf = scsi_get_buf,
2561 .load_request = scsi_disk_load_request,
2562 .save_request = scsi_disk_save_request,
2563 };
2564
2565 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2566 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2567 [INQUIRY] = &scsi_disk_emulate_reqops,
2568 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2569 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2570 [START_STOP] = &scsi_disk_emulate_reqops,
2571 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2572 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2573 [READ_TOC] = &scsi_disk_emulate_reqops,
2574 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2575 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2576 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2577 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2578 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2579 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2580 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2581 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2582 [SEEK_10] = &scsi_disk_emulate_reqops,
2583 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2584 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2585 [UNMAP] = &scsi_disk_emulate_reqops,
2586 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2587 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2588 [VERIFY_10] = &scsi_disk_emulate_reqops,
2589 [VERIFY_12] = &scsi_disk_emulate_reqops,
2590 [VERIFY_16] = &scsi_disk_emulate_reqops,
2591 [FORMAT_UNIT] = &scsi_disk_emulate_reqops,
2592
2593 [READ_6] = &scsi_disk_dma_reqops,
2594 [READ_10] = &scsi_disk_dma_reqops,
2595 [READ_12] = &scsi_disk_dma_reqops,
2596 [READ_16] = &scsi_disk_dma_reqops,
2597 [WRITE_6] = &scsi_disk_dma_reqops,
2598 [WRITE_10] = &scsi_disk_dma_reqops,
2599 [WRITE_12] = &scsi_disk_dma_reqops,
2600 [WRITE_16] = &scsi_disk_dma_reqops,
2601 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2602 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2603 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2604 };
2605
2606 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2607 {
2608 int i;
2609 int len = scsi_cdb_length(buf);
2610 char *line_buffer, *p;
2611
2612 assert(len > 0 && len <= 16);
2613 line_buffer = g_malloc(len * 5 + 1);
2614
2615 for (i = 0, p = line_buffer; i < len; i++) {
2616 p += sprintf(p, " 0x%02x", buf[i]);
2617 }
2618 trace_scsi_disk_new_request(lun, tag, line_buffer);
2619
2620 g_free(line_buffer);
2621 }
2622
2623 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2624 uint8_t *buf, void *hba_private)
2625 {
2626 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2627 SCSIRequest *req;
2628 const SCSIReqOps *ops;
2629 uint8_t command;
2630
2631 command = buf[0];
2632 ops = scsi_disk_reqops_dispatch[command];
2633 if (!ops) {
2634 ops = &scsi_disk_emulate_reqops;
2635 }
2636 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2637
2638 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2639 scsi_disk_new_request_dump(lun, tag, buf);
2640 }
2641
2642 return req;
2643 }
2644
2645 #ifdef __linux__
2646 static int get_device_type(SCSIDiskState *s)
2647 {
2648 uint8_t cmd[16];
2649 uint8_t buf[36];
2650 int ret;
2651
2652 memset(cmd, 0, sizeof(cmd));
2653 memset(buf, 0, sizeof(buf));
2654 cmd[0] = INQUIRY;
2655 cmd[4] = sizeof(buf);
2656
2657 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2658 buf, sizeof(buf), s->qdev.io_timeout);
2659 if (ret < 0) {
2660 return -1;
2661 }
2662 s->qdev.type = buf[0];
2663 if (buf[1] & 0x80) {
2664 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2665 }
2666 return 0;
2667 }
2668
2669 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2670 {
2671 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2672 AioContext *ctx;
2673 int sg_version;
2674 int rc;
2675
2676 if (!s->qdev.conf.blk) {
2677 error_setg(errp, "drive property not set");
2678 return;
2679 }
2680
2681 if (s->rotation_rate) {
2682 error_report_once("rotation_rate is specified for scsi-block but is "
2683 "not implemented. This option is deprecated and will "
2684 "be removed in a future version");
2685 }
2686
2687 ctx = blk_get_aio_context(s->qdev.conf.blk);
2688 aio_context_acquire(ctx);
2689
2690 /* check we are using a driver managing SG_IO (version 3 and after) */
2691 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2692 if (rc < 0) {
2693 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2694 if (rc != -EPERM) {
2695 error_append_hint(errp, "Is this a SCSI device?\n");
2696 }
2697 goto out;
2698 }
2699 if (sg_version < 30000) {
2700 error_setg(errp, "scsi generic interface too old");
2701 goto out;
2702 }
2703
2704 /* get device type from INQUIRY data */
2705 rc = get_device_type(s);
2706 if (rc < 0) {
2707 error_setg(errp, "INQUIRY failed");
2708 goto out;
2709 }
2710
2711 /* Make a guess for the block size, we'll fix it when the guest sends.
2712 * READ CAPACITY. If they don't, they likely would assume these sizes
2713 * anyway. (TODO: check in /sys).
2714 */
2715 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2716 s->qdev.blocksize = 2048;
2717 } else {
2718 s->qdev.blocksize = 512;
2719 }
2720
2721 /* Makes the scsi-block device not removable by using HMP and QMP eject
2722 * command.
2723 */
2724 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2725
2726 scsi_realize(&s->qdev, errp);
2727 scsi_generic_read_device_inquiry(&s->qdev);
2728
2729 out:
2730 aio_context_release(ctx);
2731 }
2732
2733 typedef struct SCSIBlockReq {
2734 SCSIDiskReq req;
2735 sg_io_hdr_t io_header;
2736
2737 /* Selected bytes of the original CDB, copied into our own CDB. */
2738 uint8_t cmd, cdb1, group_number;
2739
2740 /* CDB passed to SG_IO. */
2741 uint8_t cdb[16];
2742 BlockCompletionFunc *cb;
2743 void *cb_opaque;
2744 } SCSIBlockReq;
2745
2746 static void scsi_block_sgio_complete(void *opaque, int ret)
2747 {
2748 SCSIBlockReq *req = (SCSIBlockReq *)opaque;
2749 SCSIDiskReq *r = &req->req;
2750 SCSIDevice *s = r->req.dev;
2751 sg_io_hdr_t *io_hdr = &req->io_header;
2752
2753 if (ret == 0) {
2754 if (io_hdr->host_status != SCSI_HOST_OK) {
2755 scsi_req_complete_failed(&r->req, io_hdr->host_status);
2756 scsi_req_unref(&r->req);
2757 return;
2758 }
2759
2760 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
2761 ret = BUSY;
2762 } else {
2763 ret = io_hdr->status;
2764 }
2765
2766 if (ret > 0) {
2767 aio_context_acquire(blk_get_aio_context(s->conf.blk));
2768 if (scsi_handle_rw_error(r, ret, true)) {
2769 aio_context_release(blk_get_aio_context(s->conf.blk));
2770 scsi_req_unref(&r->req);
2771 return;
2772 }
2773 aio_context_release(blk_get_aio_context(s->conf.blk));
2774
2775 /* Ignore error. */
2776 ret = 0;
2777 }
2778 }
2779
2780 req->cb(req->cb_opaque, ret);
2781 }
2782
2783 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2784 int64_t offset, QEMUIOVector *iov,
2785 int direction,
2786 BlockCompletionFunc *cb, void *opaque)
2787 {
2788 sg_io_hdr_t *io_header = &req->io_header;
2789 SCSIDiskReq *r = &req->req;
2790 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2791 int nb_logical_blocks;
2792 uint64_t lba;
2793 BlockAIOCB *aiocb;
2794
2795 /* This is not supported yet. It can only happen if the guest does
2796 * reads and writes that are not aligned to one logical sectors
2797 * _and_ cover multiple MemoryRegions.
2798 */
2799 assert(offset % s->qdev.blocksize == 0);
2800 assert(iov->size % s->qdev.blocksize == 0);
2801
2802 io_header->interface_id = 'S';
2803
2804 /* The data transfer comes from the QEMUIOVector. */
2805 io_header->dxfer_direction = direction;
2806 io_header->dxfer_len = iov->size;
2807 io_header->dxferp = (void *)iov->iov;
2808 io_header->iovec_count = iov->niov;
2809 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2810
2811 /* Build a new CDB with the LBA and length patched in, in case
2812 * DMA helpers split the transfer in multiple segments. Do not
2813 * build a CDB smaller than what the guest wanted, and only build
2814 * a larger one if strictly necessary.
2815 */
2816 io_header->cmdp = req->cdb;
2817 lba = offset / s->qdev.blocksize;
2818 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2819
2820 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2821 /* 6-byte CDB */
2822 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2823 req->cdb[4] = nb_logical_blocks;
2824 req->cdb[5] = 0;
2825 io_header->cmd_len = 6;
2826 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2827 /* 10-byte CDB */
2828 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2829 req->cdb[1] = req->cdb1;
2830 stl_be_p(&req->cdb[2], lba);
2831 req->cdb[6] = req->group_number;
2832 stw_be_p(&req->cdb[7], nb_logical_blocks);
2833 req->cdb[9] = 0;
2834 io_header->cmd_len = 10;
2835 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2836 /* 12-byte CDB */
2837 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2838 req->cdb[1] = req->cdb1;
2839 stl_be_p(&req->cdb[2], lba);
2840 stl_be_p(&req->cdb[6], nb_logical_blocks);
2841 req->cdb[10] = req->group_number;
2842 req->cdb[11] = 0;
2843 io_header->cmd_len = 12;
2844 } else {
2845 /* 16-byte CDB */
2846 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2847 req->cdb[1] = req->cdb1;
2848 stq_be_p(&req->cdb[2], lba);
2849 stl_be_p(&req->cdb[10], nb_logical_blocks);
2850 req->cdb[14] = req->group_number;
2851 req->cdb[15] = 0;
2852 io_header->cmd_len = 16;
2853 }
2854
2855 /* The rest is as in scsi-generic.c. */
2856 io_header->mx_sb_len = sizeof(r->req.sense);
2857 io_header->sbp = r->req.sense;
2858 io_header->timeout = s->qdev.io_timeout * 1000;
2859 io_header->usr_ptr = r;
2860 io_header->flags |= SG_FLAG_DIRECT_IO;
2861 req->cb = cb;
2862 req->cb_opaque = opaque;
2863 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
2864 nb_logical_blocks, io_header->timeout);
2865 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req);
2866 assert(aiocb != NULL);
2867 return aiocb;
2868 }
2869
2870 static bool scsi_block_no_fua(SCSICommand *cmd)
2871 {
2872 return false;
2873 }
2874
2875 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2876 QEMUIOVector *iov,
2877 BlockCompletionFunc *cb, void *cb_opaque,
2878 void *opaque)
2879 {
2880 SCSIBlockReq *r = opaque;
2881 return scsi_block_do_sgio(r, offset, iov,
2882 SG_DXFER_FROM_DEV, cb, cb_opaque);
2883 }
2884
2885 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2886 QEMUIOVector *iov,
2887 BlockCompletionFunc *cb, void *cb_opaque,
2888 void *opaque)
2889 {
2890 SCSIBlockReq *r = opaque;
2891 return scsi_block_do_sgio(r, offset, iov,
2892 SG_DXFER_TO_DEV, cb, cb_opaque);
2893 }
2894
2895 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2896 {
2897 switch (buf[0]) {
2898 case VERIFY_10:
2899 case VERIFY_12:
2900 case VERIFY_16:
2901 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2902 * for the number of logical blocks specified in the length
2903 * field). For other modes, do not use scatter/gather operation.
2904 */
2905 if ((buf[1] & 6) == 2) {
2906 return false;
2907 }
2908 break;
2909
2910 case READ_6:
2911 case READ_10:
2912 case READ_12:
2913 case READ_16:
2914 case WRITE_6:
2915 case WRITE_10:
2916 case WRITE_12:
2917 case WRITE_16:
2918 case WRITE_VERIFY_10:
2919 case WRITE_VERIFY_12:
2920 case WRITE_VERIFY_16:
2921 /* MMC writing cannot be done via DMA helpers, because it sometimes
2922 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2923 * We might use scsi_block_dma_reqops as long as no writing commands are
2924 * seen, but performance usually isn't paramount on optical media. So,
2925 * just make scsi-block operate the same as scsi-generic for them.
2926 */
2927 if (s->qdev.type != TYPE_ROM) {
2928 return false;
2929 }
2930 break;
2931
2932 default:
2933 break;
2934 }
2935
2936 return true;
2937 }
2938
2939
2940 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2941 {
2942 SCSIBlockReq *r = (SCSIBlockReq *)req;
2943 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2944
2945 r->cmd = req->cmd.buf[0];
2946 switch (r->cmd >> 5) {
2947 case 0:
2948 /* 6-byte CDB. */
2949 r->cdb1 = r->group_number = 0;
2950 break;
2951 case 1:
2952 /* 10-byte CDB. */
2953 r->cdb1 = req->cmd.buf[1];
2954 r->group_number = req->cmd.buf[6];
2955 break;
2956 case 4:
2957 /* 12-byte CDB. */
2958 r->cdb1 = req->cmd.buf[1];
2959 r->group_number = req->cmd.buf[10];
2960 break;
2961 case 5:
2962 /* 16-byte CDB. */
2963 r->cdb1 = req->cmd.buf[1];
2964 r->group_number = req->cmd.buf[14];
2965 break;
2966 default:
2967 abort();
2968 }
2969
2970 /* Protection information is not supported. For SCSI versions 2 and
2971 * older (as determined by snooping the guest's INQUIRY commands),
2972 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2973 */
2974 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2975 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2976 return 0;
2977 }
2978
2979 return scsi_disk_dma_command(req, buf);
2980 }
2981
2982 static const SCSIReqOps scsi_block_dma_reqops = {
2983 .size = sizeof(SCSIBlockReq),
2984 .free_req = scsi_free_request,
2985 .send_command = scsi_block_dma_command,
2986 .read_data = scsi_read_data,
2987 .write_data = scsi_write_data,
2988 .get_buf = scsi_get_buf,
2989 .load_request = scsi_disk_load_request,
2990 .save_request = scsi_disk_save_request,
2991 };
2992
2993 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2994 uint32_t lun, uint8_t *buf,
2995 void *hba_private)
2996 {
2997 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2998
2999 if (scsi_block_is_passthrough(s, buf)) {
3000 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
3001 hba_private);
3002 } else {
3003 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
3004 hba_private);
3005 }
3006 }
3007
3008 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
3009 uint8_t *buf, void *hba_private)
3010 {
3011 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
3012
3013 if (scsi_block_is_passthrough(s, buf)) {
3014 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
3015 } else {
3016 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
3017 }
3018 }
3019
3020 static void scsi_block_update_sense(SCSIRequest *req)
3021 {
3022 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
3023 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
3024 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
3025 }
3026 #endif
3027
3028 static
3029 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
3030 BlockCompletionFunc *cb, void *cb_opaque,
3031 void *opaque)
3032 {
3033 SCSIDiskReq *r = opaque;
3034 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3035 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3036 }
3037
3038 static
3039 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
3040 BlockCompletionFunc *cb, void *cb_opaque,
3041 void *opaque)
3042 {
3043 SCSIDiskReq *r = opaque;
3044 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3045 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3046 }
3047
3048 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
3049 {
3050 DeviceClass *dc = DEVICE_CLASS(klass);
3051 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3052
3053 dc->fw_name = "disk";
3054 dc->reset = scsi_disk_reset;
3055 sdc->dma_readv = scsi_dma_readv;
3056 sdc->dma_writev = scsi_dma_writev;
3057 sdc->need_fua_emulation = scsi_is_cmd_fua;
3058 }
3059
3060 static const TypeInfo scsi_disk_base_info = {
3061 .name = TYPE_SCSI_DISK_BASE,
3062 .parent = TYPE_SCSI_DEVICE,
3063 .class_init = scsi_disk_base_class_initfn,
3064 .instance_size = sizeof(SCSIDiskState),
3065 .class_size = sizeof(SCSIDiskClass),
3066 .abstract = true,
3067 };
3068
3069 #define DEFINE_SCSI_DISK_PROPERTIES() \
3070 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
3071 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
3072 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3073 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3074 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3075 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3076 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3077 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3078
3079
3080 static Property scsi_hd_properties[] = {
3081 DEFINE_SCSI_DISK_PROPERTIES(),
3082 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3083 SCSI_DISK_F_REMOVABLE, false),
3084 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3085 SCSI_DISK_F_DPOFUA, false),
3086 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3087 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3088 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3089 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3090 DEFAULT_MAX_UNMAP_SIZE),
3091 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3092 DEFAULT_MAX_IO_SIZE),
3093 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3094 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3095 5),
3096 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3097 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3098 0),
3099 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3100 DEFINE_PROP_END_OF_LIST(),
3101 };
3102
3103 static const VMStateDescription vmstate_scsi_disk_state = {
3104 .name = "scsi-disk",
3105 .version_id = 1,
3106 .minimum_version_id = 1,
3107 .fields = (VMStateField[]) {
3108 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3109 VMSTATE_BOOL(media_changed, SCSIDiskState),
3110 VMSTATE_BOOL(media_event, SCSIDiskState),
3111 VMSTATE_BOOL(eject_request, SCSIDiskState),
3112 VMSTATE_BOOL(tray_open, SCSIDiskState),
3113 VMSTATE_BOOL(tray_locked, SCSIDiskState),
3114 VMSTATE_END_OF_LIST()
3115 }
3116 };
3117
3118 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3119 {
3120 DeviceClass *dc = DEVICE_CLASS(klass);
3121 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3122
3123 sc->realize = scsi_hd_realize;
3124 sc->unrealize = scsi_unrealize;
3125 sc->alloc_req = scsi_new_request;
3126 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3127 dc->desc = "virtual SCSI disk";
3128 device_class_set_props(dc, scsi_hd_properties);
3129 dc->vmsd = &vmstate_scsi_disk_state;
3130 }
3131
3132 static const TypeInfo scsi_hd_info = {
3133 .name = "scsi-hd",
3134 .parent = TYPE_SCSI_DISK_BASE,
3135 .class_init = scsi_hd_class_initfn,
3136 };
3137
3138 static Property scsi_cd_properties[] = {
3139 DEFINE_SCSI_DISK_PROPERTIES(),
3140 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3141 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3142 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3143 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3144 DEFAULT_MAX_IO_SIZE),
3145 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3146 5),
3147 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState, quirks,
3148 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR, 0),
3149 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState, quirks,
3150 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD, 0),
3151 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3152 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3153 0),
3154 DEFINE_PROP_END_OF_LIST(),
3155 };
3156
3157 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3158 {
3159 DeviceClass *dc = DEVICE_CLASS(klass);
3160 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3161
3162 sc->realize = scsi_cd_realize;
3163 sc->alloc_req = scsi_new_request;
3164 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3165 dc->desc = "virtual SCSI CD-ROM";
3166 device_class_set_props(dc, scsi_cd_properties);
3167 dc->vmsd = &vmstate_scsi_disk_state;
3168 }
3169
3170 static const TypeInfo scsi_cd_info = {
3171 .name = "scsi-cd",
3172 .parent = TYPE_SCSI_DISK_BASE,
3173 .class_init = scsi_cd_class_initfn,
3174 };
3175
3176 #ifdef __linux__
3177 static Property scsi_block_properties[] = {
3178 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
3179 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3180 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3181 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3182 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3183 DEFAULT_MAX_UNMAP_SIZE),
3184 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3185 DEFAULT_MAX_IO_SIZE),
3186 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3187 -1),
3188 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
3189 DEFAULT_IO_TIMEOUT),
3190 DEFINE_PROP_END_OF_LIST(),
3191 };
3192
3193 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3194 {
3195 DeviceClass *dc = DEVICE_CLASS(klass);
3196 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3197 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3198
3199 sc->realize = scsi_block_realize;
3200 sc->alloc_req = scsi_block_new_request;
3201 sc->parse_cdb = scsi_block_parse_cdb;
3202 sdc->dma_readv = scsi_block_dma_readv;
3203 sdc->dma_writev = scsi_block_dma_writev;
3204 sdc->update_sense = scsi_block_update_sense;
3205 sdc->need_fua_emulation = scsi_block_no_fua;
3206 dc->desc = "SCSI block device passthrough";
3207 device_class_set_props(dc, scsi_block_properties);
3208 dc->vmsd = &vmstate_scsi_disk_state;
3209 }
3210
3211 static const TypeInfo scsi_block_info = {
3212 .name = "scsi-block",
3213 .parent = TYPE_SCSI_DISK_BASE,
3214 .class_init = scsi_block_class_initfn,
3215 };
3216 #endif
3217
3218 static void scsi_disk_register_types(void)
3219 {
3220 type_register_static(&scsi_disk_base_info);
3221 type_register_static(&scsi_hd_info);
3222 type_register_static(&scsi_cd_info);
3223 #ifdef __linux__
3224 type_register_static(&scsi_block_info);
3225 #endif
3226 }
3227
3228 type_init(scsi_disk_register_types)