]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-disk.c
Merge remote-tracking branch 'remotes/jsnow-gitlab/tags/python-pull-request' into...
[mirror_qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
43 #include "trace.h"
44 #include "qom/object.h"
45
46 #ifdef __linux
47 #include <scsi/sg.h>
48 #endif
49
50 #define SCSI_WRITE_SAME_MAX (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN 256
53 #define SCSI_MAX_MODE_LEN 256
54
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
58
59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
60
61 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
62
63 struct SCSIDiskClass {
64 SCSIDeviceClass parent_class;
65 DMAIOFunc *dma_readv;
66 DMAIOFunc *dma_writev;
67 bool (*need_fua_emulation)(SCSICommand *cmd);
68 void (*update_sense)(SCSIRequest *r);
69 };
70
71 typedef struct SCSIDiskReq {
72 SCSIRequest req;
73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
74 uint64_t sector;
75 uint32_t sector_count;
76 uint32_t buflen;
77 bool started;
78 bool need_fua_emulation;
79 struct iovec iov;
80 QEMUIOVector qiov;
81 BlockAcctCookie acct;
82 } SCSIDiskReq;
83
84 #define SCSI_DISK_F_REMOVABLE 0
85 #define SCSI_DISK_F_DPOFUA 1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
87
88 struct SCSIDiskState {
89 SCSIDevice qdev;
90 uint32_t features;
91 bool media_changed;
92 bool media_event;
93 bool eject_request;
94 uint16_t port_index;
95 uint64_t max_unmap_size;
96 uint64_t max_io_size;
97 QEMUBH *bh;
98 char *version;
99 char *serial;
100 char *vendor;
101 char *product;
102 char *device_id;
103 bool tray_open;
104 bool tray_locked;
105 /*
106 * 0x0000 - rotation rate not reported
107 * 0x0001 - non-rotating medium (SSD)
108 * 0x0002-0x0400 - reserved
109 * 0x0401-0xffe - rotations per minute
110 * 0xffff - reserved
111 */
112 uint16_t rotation_rate;
113 };
114
115 static void scsi_free_request(SCSIRequest *req)
116 {
117 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
118
119 qemu_vfree(r->iov.iov_base);
120 }
121
122 /* Helper function for command completion with sense. */
123 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
124 {
125 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
126 sense.ascq);
127 scsi_req_build_sense(&r->req, sense);
128 scsi_req_complete(&r->req, CHECK_CONDITION);
129 }
130
131 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
132 {
133 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
134
135 if (!r->iov.iov_base) {
136 r->buflen = size;
137 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
138 }
139 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
140 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
141 }
142
143 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
144 {
145 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
146
147 qemu_put_be64s(f, &r->sector);
148 qemu_put_be32s(f, &r->sector_count);
149 qemu_put_be32s(f, &r->buflen);
150 if (r->buflen) {
151 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
153 } else if (!req->retry) {
154 uint32_t len = r->iov.iov_len;
155 qemu_put_be32s(f, &len);
156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
157 }
158 }
159 }
160
161 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
162 {
163 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
164
165 qemu_get_be64s(f, &r->sector);
166 qemu_get_be32s(f, &r->sector_count);
167 qemu_get_be32s(f, &r->buflen);
168 if (r->buflen) {
169 scsi_init_iovec(r, r->buflen);
170 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
171 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
172 } else if (!r->req.retry) {
173 uint32_t len;
174 qemu_get_be32s(f, &len);
175 r->iov.iov_len = len;
176 assert(r->iov.iov_len <= r->buflen);
177 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
178 }
179 }
180
181 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
182 }
183
184 /*
185 * scsi_handle_rw_error has two return values. False means that the error
186 * must be ignored, true means that the error has been processed and the
187 * caller should not do anything else for this request. Note that
188 * scsi_handle_rw_error always manages its reference counts, independent
189 * of the return value.
190 */
191 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
192 {
193 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
194 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
195 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
196 SCSISense sense = SENSE_CODE(NO_SENSE);
197 int error = 0;
198 bool req_has_sense = false;
199 BlockErrorAction action;
200 int status;
201
202 if (ret < 0) {
203 status = scsi_sense_from_errno(-ret, &sense);
204 error = -ret;
205 } else {
206 /* A passthrough command has completed with nonzero status. */
207 status = ret;
208 if (status == CHECK_CONDITION) {
209 req_has_sense = true;
210 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
211 } else {
212 error = EINVAL;
213 }
214 }
215
216 /*
217 * Check whether the error has to be handled by the guest or should
218 * rather follow the rerror=/werror= settings. Guest-handled errors
219 * are usually retried immediately, so do not post them to QMP and
220 * do not account them as failed I/O.
221 */
222 if (req_has_sense &&
223 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
224 action = BLOCK_ERROR_ACTION_REPORT;
225 acct_failed = false;
226 } else {
227 action = blk_get_error_action(s->qdev.conf.blk, is_read, error);
228 blk_error_action(s->qdev.conf.blk, action, is_read, error);
229 }
230
231 switch (action) {
232 case BLOCK_ERROR_ACTION_REPORT:
233 if (acct_failed) {
234 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
235 }
236 if (req_has_sense) {
237 sdc->update_sense(&r->req);
238 } else if (status == CHECK_CONDITION) {
239 scsi_req_build_sense(&r->req, sense);
240 }
241 scsi_req_complete(&r->req, status);
242 return true;
243
244 case BLOCK_ERROR_ACTION_IGNORE:
245 return false;
246
247 case BLOCK_ERROR_ACTION_STOP:
248 scsi_req_retry(&r->req);
249 return true;
250
251 default:
252 g_assert_not_reached();
253 }
254 }
255
256 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
257 {
258 if (r->req.io_canceled) {
259 scsi_req_cancel_complete(&r->req);
260 return true;
261 }
262
263 if (ret < 0) {
264 return scsi_handle_rw_error(r, ret, acct_failed);
265 }
266
267 return false;
268 }
269
270 static void scsi_aio_complete(void *opaque, int ret)
271 {
272 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
273 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
274
275 assert(r->req.aiocb != NULL);
276 r->req.aiocb = NULL;
277 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
278 if (scsi_disk_req_check_error(r, ret, true)) {
279 goto done;
280 }
281
282 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
283 scsi_req_complete(&r->req, GOOD);
284
285 done:
286 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
287 scsi_req_unref(&r->req);
288 }
289
290 static bool scsi_is_cmd_fua(SCSICommand *cmd)
291 {
292 switch (cmd->buf[0]) {
293 case READ_10:
294 case READ_12:
295 case READ_16:
296 case WRITE_10:
297 case WRITE_12:
298 case WRITE_16:
299 return (cmd->buf[1] & 8) != 0;
300
301 case VERIFY_10:
302 case VERIFY_12:
303 case VERIFY_16:
304 case WRITE_VERIFY_10:
305 case WRITE_VERIFY_12:
306 case WRITE_VERIFY_16:
307 return true;
308
309 case READ_6:
310 case WRITE_6:
311 default:
312 return false;
313 }
314 }
315
316 static void scsi_write_do_fua(SCSIDiskReq *r)
317 {
318 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
319
320 assert(r->req.aiocb == NULL);
321 assert(!r->req.io_canceled);
322
323 if (r->need_fua_emulation) {
324 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
325 BLOCK_ACCT_FLUSH);
326 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
327 return;
328 }
329
330 scsi_req_complete(&r->req, GOOD);
331 scsi_req_unref(&r->req);
332 }
333
334 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
335 {
336 assert(r->req.aiocb == NULL);
337 if (scsi_disk_req_check_error(r, ret, false)) {
338 goto done;
339 }
340
341 r->sector += r->sector_count;
342 r->sector_count = 0;
343 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
344 scsi_write_do_fua(r);
345 return;
346 } else {
347 scsi_req_complete(&r->req, GOOD);
348 }
349
350 done:
351 scsi_req_unref(&r->req);
352 }
353
354 static void scsi_dma_complete(void *opaque, int ret)
355 {
356 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
357 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
358
359 assert(r->req.aiocb != NULL);
360 r->req.aiocb = NULL;
361
362 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
363 if (ret < 0) {
364 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
365 } else {
366 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
367 }
368 scsi_dma_complete_noio(r, ret);
369 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
370 }
371
372 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
373 {
374 uint32_t n;
375
376 assert(r->req.aiocb == NULL);
377 if (scsi_disk_req_check_error(r, ret, false)) {
378 goto done;
379 }
380
381 n = r->qiov.size / BDRV_SECTOR_SIZE;
382 r->sector += n;
383 r->sector_count -= n;
384 scsi_req_data(&r->req, r->qiov.size);
385
386 done:
387 scsi_req_unref(&r->req);
388 }
389
390 static void scsi_read_complete(void *opaque, int ret)
391 {
392 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
393 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
394
395 assert(r->req.aiocb != NULL);
396 r->req.aiocb = NULL;
397
398 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
399 if (ret < 0) {
400 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
401 } else {
402 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
403 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
404 }
405 scsi_read_complete_noio(r, ret);
406 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
407 }
408
409 /* Actually issue a read to the block device. */
410 static void scsi_do_read(SCSIDiskReq *r, int ret)
411 {
412 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
413 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
414
415 assert (r->req.aiocb == NULL);
416 if (scsi_disk_req_check_error(r, ret, false)) {
417 goto done;
418 }
419
420 /* The request is used as the AIO opaque value, so add a ref. */
421 scsi_req_ref(&r->req);
422
423 if (r->req.sg) {
424 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
425 r->req.residual -= r->req.sg->size;
426 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
427 r->req.sg, r->sector << BDRV_SECTOR_BITS,
428 BDRV_SECTOR_SIZE,
429 sdc->dma_readv, r, scsi_dma_complete, r,
430 DMA_DIRECTION_FROM_DEVICE);
431 } else {
432 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
433 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
434 r->qiov.size, BLOCK_ACCT_READ);
435 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
436 scsi_read_complete, r, r);
437 }
438
439 done:
440 scsi_req_unref(&r->req);
441 }
442
443 static void scsi_do_read_cb(void *opaque, int ret)
444 {
445 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
446 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
447
448 assert (r->req.aiocb != NULL);
449 r->req.aiocb = NULL;
450
451 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
452 if (ret < 0) {
453 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
454 } else {
455 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
456 }
457 scsi_do_read(opaque, ret);
458 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
459 }
460
461 /* Read more data from scsi device into buffer. */
462 static void scsi_read_data(SCSIRequest *req)
463 {
464 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
465 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
466 bool first;
467
468 trace_scsi_disk_read_data_count(r->sector_count);
469 if (r->sector_count == 0) {
470 /* This also clears the sense buffer for REQUEST SENSE. */
471 scsi_req_complete(&r->req, GOOD);
472 return;
473 }
474
475 /* No data transfer may already be in progress */
476 assert(r->req.aiocb == NULL);
477
478 /* The request is used as the AIO opaque value, so add a ref. */
479 scsi_req_ref(&r->req);
480 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
481 trace_scsi_disk_read_data_invalid();
482 scsi_read_complete_noio(r, -EINVAL);
483 return;
484 }
485
486 if (!blk_is_available(req->dev->conf.blk)) {
487 scsi_read_complete_noio(r, -ENOMEDIUM);
488 return;
489 }
490
491 first = !r->started;
492 r->started = true;
493 if (first && r->need_fua_emulation) {
494 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
495 BLOCK_ACCT_FLUSH);
496 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
497 } else {
498 scsi_do_read(r, 0);
499 }
500 }
501
502 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
503 {
504 uint32_t n;
505
506 assert (r->req.aiocb == NULL);
507 if (scsi_disk_req_check_error(r, ret, false)) {
508 goto done;
509 }
510
511 n = r->qiov.size / BDRV_SECTOR_SIZE;
512 r->sector += n;
513 r->sector_count -= n;
514 if (r->sector_count == 0) {
515 scsi_write_do_fua(r);
516 return;
517 } else {
518 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
519 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
520 scsi_req_data(&r->req, r->qiov.size);
521 }
522
523 done:
524 scsi_req_unref(&r->req);
525 }
526
527 static void scsi_write_complete(void * opaque, int ret)
528 {
529 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
530 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
531
532 assert (r->req.aiocb != NULL);
533 r->req.aiocb = NULL;
534
535 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
536 if (ret < 0) {
537 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
538 } else {
539 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
540 }
541 scsi_write_complete_noio(r, ret);
542 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
543 }
544
545 static void scsi_write_data(SCSIRequest *req)
546 {
547 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
548 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
549 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
550
551 /* No data transfer may already be in progress */
552 assert(r->req.aiocb == NULL);
553
554 /* The request is used as the AIO opaque value, so add a ref. */
555 scsi_req_ref(&r->req);
556 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
557 trace_scsi_disk_write_data_invalid();
558 scsi_write_complete_noio(r, -EINVAL);
559 return;
560 }
561
562 if (!r->req.sg && !r->qiov.size) {
563 /* Called for the first time. Ask the driver to send us more data. */
564 r->started = true;
565 scsi_write_complete_noio(r, 0);
566 return;
567 }
568 if (!blk_is_available(req->dev->conf.blk)) {
569 scsi_write_complete_noio(r, -ENOMEDIUM);
570 return;
571 }
572
573 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
574 r->req.cmd.buf[0] == VERIFY_16) {
575 if (r->req.sg) {
576 scsi_dma_complete_noio(r, 0);
577 } else {
578 scsi_write_complete_noio(r, 0);
579 }
580 return;
581 }
582
583 if (r->req.sg) {
584 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
585 r->req.residual -= r->req.sg->size;
586 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
587 r->req.sg, r->sector << BDRV_SECTOR_BITS,
588 BDRV_SECTOR_SIZE,
589 sdc->dma_writev, r, scsi_dma_complete, r,
590 DMA_DIRECTION_TO_DEVICE);
591 } else {
592 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
593 r->qiov.size, BLOCK_ACCT_WRITE);
594 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
595 scsi_write_complete, r, r);
596 }
597 }
598
599 /* Return a pointer to the data buffer. */
600 static uint8_t *scsi_get_buf(SCSIRequest *req)
601 {
602 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
603
604 return (uint8_t *)r->iov.iov_base;
605 }
606
607 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
608 {
609 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
610 uint8_t page_code = req->cmd.buf[2];
611 int start, buflen = 0;
612
613 outbuf[buflen++] = s->qdev.type & 0x1f;
614 outbuf[buflen++] = page_code;
615 outbuf[buflen++] = 0x00;
616 outbuf[buflen++] = 0x00;
617 start = buflen;
618
619 switch (page_code) {
620 case 0x00: /* Supported page codes, mandatory */
621 {
622 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
623 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
624 if (s->serial) {
625 outbuf[buflen++] = 0x80; /* unit serial number */
626 }
627 outbuf[buflen++] = 0x83; /* device identification */
628 if (s->qdev.type == TYPE_DISK) {
629 outbuf[buflen++] = 0xb0; /* block limits */
630 outbuf[buflen++] = 0xb1; /* block device characteristics */
631 outbuf[buflen++] = 0xb2; /* thin provisioning */
632 }
633 break;
634 }
635 case 0x80: /* Device serial number, optional */
636 {
637 int l;
638
639 if (!s->serial) {
640 trace_scsi_disk_emulate_vpd_page_80_not_supported();
641 return -1;
642 }
643
644 l = strlen(s->serial);
645 if (l > 36) {
646 l = 36;
647 }
648
649 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
650 memcpy(outbuf + buflen, s->serial, l);
651 buflen += l;
652 break;
653 }
654
655 case 0x83: /* Device identification page, mandatory */
656 {
657 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
658
659 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
660
661 if (id_len) {
662 outbuf[buflen++] = 0x2; /* ASCII */
663 outbuf[buflen++] = 0; /* not officially assigned */
664 outbuf[buflen++] = 0; /* reserved */
665 outbuf[buflen++] = id_len; /* length of data following */
666 memcpy(outbuf + buflen, s->device_id, id_len);
667 buflen += id_len;
668 }
669
670 if (s->qdev.wwn) {
671 outbuf[buflen++] = 0x1; /* Binary */
672 outbuf[buflen++] = 0x3; /* NAA */
673 outbuf[buflen++] = 0; /* reserved */
674 outbuf[buflen++] = 8;
675 stq_be_p(&outbuf[buflen], s->qdev.wwn);
676 buflen += 8;
677 }
678
679 if (s->qdev.port_wwn) {
680 outbuf[buflen++] = 0x61; /* SAS / Binary */
681 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
682 outbuf[buflen++] = 0; /* reserved */
683 outbuf[buflen++] = 8;
684 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
685 buflen += 8;
686 }
687
688 if (s->port_index) {
689 outbuf[buflen++] = 0x61; /* SAS / Binary */
690
691 /* PIV/Target port/relative target port */
692 outbuf[buflen++] = 0x94;
693
694 outbuf[buflen++] = 0; /* reserved */
695 outbuf[buflen++] = 4;
696 stw_be_p(&outbuf[buflen + 2], s->port_index);
697 buflen += 4;
698 }
699 break;
700 }
701 case 0xb0: /* block limits */
702 {
703 SCSIBlockLimits bl = {};
704
705 if (s->qdev.type == TYPE_ROM) {
706 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
707 return -1;
708 }
709 bl.wsnz = 1;
710 bl.unmap_sectors =
711 s->qdev.conf.discard_granularity / s->qdev.blocksize;
712 bl.min_io_size =
713 s->qdev.conf.min_io_size / s->qdev.blocksize;
714 bl.opt_io_size =
715 s->qdev.conf.opt_io_size / s->qdev.blocksize;
716 bl.max_unmap_sectors =
717 s->max_unmap_size / s->qdev.blocksize;
718 bl.max_io_sectors =
719 s->max_io_size / s->qdev.blocksize;
720 /* 255 descriptors fit in 4 KiB with an 8-byte header */
721 bl.max_unmap_descr = 255;
722
723 if (s->qdev.type == TYPE_DISK) {
724 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
725 int max_io_sectors_blk =
726 max_transfer_blk / s->qdev.blocksize;
727
728 bl.max_io_sectors =
729 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
730 }
731 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
732 break;
733 }
734 case 0xb1: /* block device characteristics */
735 {
736 buflen = 0x40;
737 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
738 outbuf[5] = s->rotation_rate & 0xff;
739 outbuf[6] = 0; /* PRODUCT TYPE */
740 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
741 outbuf[8] = 0; /* VBULS */
742 break;
743 }
744 case 0xb2: /* thin provisioning */
745 {
746 buflen = 8;
747 outbuf[4] = 0;
748 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
749 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
750 outbuf[7] = 0;
751 break;
752 }
753 default:
754 return -1;
755 }
756 /* done with EVPD */
757 assert(buflen - start <= 255);
758 outbuf[start - 1] = buflen - start;
759 return buflen;
760 }
761
762 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
763 {
764 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
765 int buflen = 0;
766
767 if (req->cmd.buf[1] & 0x1) {
768 /* Vital product data */
769 return scsi_disk_emulate_vpd_page(req, outbuf);
770 }
771
772 /* Standard INQUIRY data */
773 if (req->cmd.buf[2] != 0) {
774 return -1;
775 }
776
777 /* PAGE CODE == 0 */
778 buflen = req->cmd.xfer;
779 if (buflen > SCSI_MAX_INQUIRY_LEN) {
780 buflen = SCSI_MAX_INQUIRY_LEN;
781 }
782
783 outbuf[0] = s->qdev.type & 0x1f;
784 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
785
786 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
787 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
788
789 memset(&outbuf[32], 0, 4);
790 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
791 /*
792 * We claim conformance to SPC-3, which is required for guests
793 * to ask for modern features like READ CAPACITY(16) or the
794 * block characteristics VPD page by default. Not all of SPC-3
795 * is actually implemented, but we're good enough.
796 */
797 outbuf[2] = s->qdev.default_scsi_version;
798 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
799
800 if (buflen > 36) {
801 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
802 } else {
803 /* If the allocation length of CDB is too small,
804 the additional length is not adjusted */
805 outbuf[4] = 36 - 5;
806 }
807
808 /* Sync data transfer and TCQ. */
809 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
810 return buflen;
811 }
812
813 static inline bool media_is_dvd(SCSIDiskState *s)
814 {
815 uint64_t nb_sectors;
816 if (s->qdev.type != TYPE_ROM) {
817 return false;
818 }
819 if (!blk_is_available(s->qdev.conf.blk)) {
820 return false;
821 }
822 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
823 return nb_sectors > CD_MAX_SECTORS;
824 }
825
826 static inline bool media_is_cd(SCSIDiskState *s)
827 {
828 uint64_t nb_sectors;
829 if (s->qdev.type != TYPE_ROM) {
830 return false;
831 }
832 if (!blk_is_available(s->qdev.conf.blk)) {
833 return false;
834 }
835 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
836 return nb_sectors <= CD_MAX_SECTORS;
837 }
838
839 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
840 uint8_t *outbuf)
841 {
842 uint8_t type = r->req.cmd.buf[1] & 7;
843
844 if (s->qdev.type != TYPE_ROM) {
845 return -1;
846 }
847
848 /* Types 1/2 are only defined for Blu-Ray. */
849 if (type != 0) {
850 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
851 return -1;
852 }
853
854 memset(outbuf, 0, 34);
855 outbuf[1] = 32;
856 outbuf[2] = 0xe; /* last session complete, disc finalized */
857 outbuf[3] = 1; /* first track on disc */
858 outbuf[4] = 1; /* # of sessions */
859 outbuf[5] = 1; /* first track of last session */
860 outbuf[6] = 1; /* last track of last session */
861 outbuf[7] = 0x20; /* unrestricted use */
862 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
863 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
864 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
865 /* 24-31: disc bar code */
866 /* 32: disc application code */
867 /* 33: number of OPC tables */
868
869 return 34;
870 }
871
872 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
873 uint8_t *outbuf)
874 {
875 static const int rds_caps_size[5] = {
876 [0] = 2048 + 4,
877 [1] = 4 + 4,
878 [3] = 188 + 4,
879 [4] = 2048 + 4,
880 };
881
882 uint8_t media = r->req.cmd.buf[1];
883 uint8_t layer = r->req.cmd.buf[6];
884 uint8_t format = r->req.cmd.buf[7];
885 int size = -1;
886
887 if (s->qdev.type != TYPE_ROM) {
888 return -1;
889 }
890 if (media != 0) {
891 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
892 return -1;
893 }
894
895 if (format != 0xff) {
896 if (!blk_is_available(s->qdev.conf.blk)) {
897 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
898 return -1;
899 }
900 if (media_is_cd(s)) {
901 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
902 return -1;
903 }
904 if (format >= ARRAY_SIZE(rds_caps_size)) {
905 return -1;
906 }
907 size = rds_caps_size[format];
908 memset(outbuf, 0, size);
909 }
910
911 switch (format) {
912 case 0x00: {
913 /* Physical format information */
914 uint64_t nb_sectors;
915 if (layer != 0) {
916 goto fail;
917 }
918 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
919
920 outbuf[4] = 1; /* DVD-ROM, part version 1 */
921 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
922 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
923 outbuf[7] = 0; /* default densities */
924
925 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
926 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
927 break;
928 }
929
930 case 0x01: /* DVD copyright information, all zeros */
931 break;
932
933 case 0x03: /* BCA information - invalid field for no BCA info */
934 return -1;
935
936 case 0x04: /* DVD disc manufacturing information, all zeros */
937 break;
938
939 case 0xff: { /* List capabilities */
940 int i;
941 size = 4;
942 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
943 if (!rds_caps_size[i]) {
944 continue;
945 }
946 outbuf[size] = i;
947 outbuf[size + 1] = 0x40; /* Not writable, readable */
948 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
949 size += 4;
950 }
951 break;
952 }
953
954 default:
955 return -1;
956 }
957
958 /* Size of buffer, not including 2 byte size field */
959 stw_be_p(outbuf, size - 2);
960 return size;
961
962 fail:
963 return -1;
964 }
965
966 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
967 {
968 uint8_t event_code, media_status;
969
970 media_status = 0;
971 if (s->tray_open) {
972 media_status = MS_TRAY_OPEN;
973 } else if (blk_is_inserted(s->qdev.conf.blk)) {
974 media_status = MS_MEDIA_PRESENT;
975 }
976
977 /* Event notification descriptor */
978 event_code = MEC_NO_CHANGE;
979 if (media_status != MS_TRAY_OPEN) {
980 if (s->media_event) {
981 event_code = MEC_NEW_MEDIA;
982 s->media_event = false;
983 } else if (s->eject_request) {
984 event_code = MEC_EJECT_REQUESTED;
985 s->eject_request = false;
986 }
987 }
988
989 outbuf[0] = event_code;
990 outbuf[1] = media_status;
991
992 /* These fields are reserved, just clear them. */
993 outbuf[2] = 0;
994 outbuf[3] = 0;
995 return 4;
996 }
997
998 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
999 uint8_t *outbuf)
1000 {
1001 int size;
1002 uint8_t *buf = r->req.cmd.buf;
1003 uint8_t notification_class_request = buf[4];
1004 if (s->qdev.type != TYPE_ROM) {
1005 return -1;
1006 }
1007 if ((buf[1] & 1) == 0) {
1008 /* asynchronous */
1009 return -1;
1010 }
1011
1012 size = 4;
1013 outbuf[0] = outbuf[1] = 0;
1014 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1015 if (notification_class_request & (1 << GESN_MEDIA)) {
1016 outbuf[2] = GESN_MEDIA;
1017 size += scsi_event_status_media(s, &outbuf[size]);
1018 } else {
1019 outbuf[2] = 0x80;
1020 }
1021 stw_be_p(outbuf, size - 4);
1022 return size;
1023 }
1024
1025 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1026 {
1027 int current;
1028
1029 if (s->qdev.type != TYPE_ROM) {
1030 return -1;
1031 }
1032
1033 if (media_is_dvd(s)) {
1034 current = MMC_PROFILE_DVD_ROM;
1035 } else if (media_is_cd(s)) {
1036 current = MMC_PROFILE_CD_ROM;
1037 } else {
1038 current = MMC_PROFILE_NONE;
1039 }
1040
1041 memset(outbuf, 0, 40);
1042 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1043 stw_be_p(&outbuf[6], current);
1044 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1045 outbuf[10] = 0x03; /* persistent, current */
1046 outbuf[11] = 8; /* two profiles */
1047 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1048 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1049 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1050 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1051 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1052 stw_be_p(&outbuf[20], 1);
1053 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1054 outbuf[23] = 8;
1055 stl_be_p(&outbuf[24], 1); /* SCSI */
1056 outbuf[28] = 1; /* DBE = 1, mandatory */
1057 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1058 stw_be_p(&outbuf[32], 3);
1059 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1060 outbuf[35] = 4;
1061 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1062 /* TODO: Random readable, CD read, DVD read, drive serial number,
1063 power management */
1064 return 40;
1065 }
1066
1067 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1068 {
1069 if (s->qdev.type != TYPE_ROM) {
1070 return -1;
1071 }
1072 memset(outbuf, 0, 8);
1073 outbuf[5] = 1; /* CD-ROM */
1074 return 8;
1075 }
1076
1077 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1078 int page_control)
1079 {
1080 static const int mode_sense_valid[0x3f] = {
1081 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1082 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1083 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1084 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1085 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1086 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1087 };
1088
1089 uint8_t *p = *p_outbuf + 2;
1090 int length;
1091
1092 assert(page < ARRAY_SIZE(mode_sense_valid));
1093 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1094 return -1;
1095 }
1096
1097 /*
1098 * If Changeable Values are requested, a mask denoting those mode parameters
1099 * that are changeable shall be returned. As we currently don't support
1100 * parameter changes via MODE_SELECT all bits are returned set to zero.
1101 * The buffer was already menset to zero by the caller of this function.
1102 *
1103 * The offsets here are off by two compared to the descriptions in the
1104 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1105 * but it is done so that offsets are consistent within our implementation
1106 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1107 * 2-byte and 4-byte headers.
1108 */
1109 switch (page) {
1110 case MODE_PAGE_HD_GEOMETRY:
1111 length = 0x16;
1112 if (page_control == 1) { /* Changeable Values */
1113 break;
1114 }
1115 /* if a geometry hint is available, use it */
1116 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1117 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1118 p[2] = s->qdev.conf.cyls & 0xff;
1119 p[3] = s->qdev.conf.heads & 0xff;
1120 /* Write precomp start cylinder, disabled */
1121 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1122 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1123 p[6] = s->qdev.conf.cyls & 0xff;
1124 /* Reduced current start cylinder, disabled */
1125 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1126 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1127 p[9] = s->qdev.conf.cyls & 0xff;
1128 /* Device step rate [ns], 200ns */
1129 p[10] = 0;
1130 p[11] = 200;
1131 /* Landing zone cylinder */
1132 p[12] = 0xff;
1133 p[13] = 0xff;
1134 p[14] = 0xff;
1135 /* Medium rotation rate [rpm], 5400 rpm */
1136 p[18] = (5400 >> 8) & 0xff;
1137 p[19] = 5400 & 0xff;
1138 break;
1139
1140 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1141 length = 0x1e;
1142 if (page_control == 1) { /* Changeable Values */
1143 break;
1144 }
1145 /* Transfer rate [kbit/s], 5Mbit/s */
1146 p[0] = 5000 >> 8;
1147 p[1] = 5000 & 0xff;
1148 /* if a geometry hint is available, use it */
1149 p[2] = s->qdev.conf.heads & 0xff;
1150 p[3] = s->qdev.conf.secs & 0xff;
1151 p[4] = s->qdev.blocksize >> 8;
1152 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1153 p[7] = s->qdev.conf.cyls & 0xff;
1154 /* Write precomp start cylinder, disabled */
1155 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1156 p[9] = s->qdev.conf.cyls & 0xff;
1157 /* Reduced current start cylinder, disabled */
1158 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1159 p[11] = s->qdev.conf.cyls & 0xff;
1160 /* Device step rate [100us], 100us */
1161 p[12] = 0;
1162 p[13] = 1;
1163 /* Device step pulse width [us], 1us */
1164 p[14] = 1;
1165 /* Device head settle delay [100us], 100us */
1166 p[15] = 0;
1167 p[16] = 1;
1168 /* Motor on delay [0.1s], 0.1s */
1169 p[17] = 1;
1170 /* Motor off delay [0.1s], 0.1s */
1171 p[18] = 1;
1172 /* Medium rotation rate [rpm], 5400 rpm */
1173 p[26] = (5400 >> 8) & 0xff;
1174 p[27] = 5400 & 0xff;
1175 break;
1176
1177 case MODE_PAGE_CACHING:
1178 length = 0x12;
1179 if (page_control == 1 || /* Changeable Values */
1180 blk_enable_write_cache(s->qdev.conf.blk)) {
1181 p[0] = 4; /* WCE */
1182 }
1183 break;
1184
1185 case MODE_PAGE_R_W_ERROR:
1186 length = 10;
1187 if (page_control == 1) { /* Changeable Values */
1188 break;
1189 }
1190 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1191 if (s->qdev.type == TYPE_ROM) {
1192 p[1] = 0x20; /* Read Retry Count */
1193 }
1194 break;
1195
1196 case MODE_PAGE_AUDIO_CTL:
1197 length = 14;
1198 break;
1199
1200 case MODE_PAGE_CAPABILITIES:
1201 length = 0x14;
1202 if (page_control == 1) { /* Changeable Values */
1203 break;
1204 }
1205
1206 p[0] = 0x3b; /* CD-R & CD-RW read */
1207 p[1] = 0; /* Writing not supported */
1208 p[2] = 0x7f; /* Audio, composite, digital out,
1209 mode 2 form 1&2, multi session */
1210 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1211 RW corrected, C2 errors, ISRC,
1212 UPC, Bar code */
1213 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1214 /* Locking supported, jumper present, eject, tray */
1215 p[5] = 0; /* no volume & mute control, no
1216 changer */
1217 p[6] = (50 * 176) >> 8; /* 50x read speed */
1218 p[7] = (50 * 176) & 0xff;
1219 p[8] = 2 >> 8; /* Two volume levels */
1220 p[9] = 2 & 0xff;
1221 p[10] = 2048 >> 8; /* 2M buffer */
1222 p[11] = 2048 & 0xff;
1223 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1224 p[13] = (16 * 176) & 0xff;
1225 p[16] = (16 * 176) >> 8; /* 16x write speed */
1226 p[17] = (16 * 176) & 0xff;
1227 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1228 p[19] = (16 * 176) & 0xff;
1229 break;
1230
1231 default:
1232 return -1;
1233 }
1234
1235 assert(length < 256);
1236 (*p_outbuf)[0] = page;
1237 (*p_outbuf)[1] = length;
1238 *p_outbuf += length + 2;
1239 return length + 2;
1240 }
1241
1242 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1243 {
1244 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1245 uint64_t nb_sectors;
1246 bool dbd;
1247 int page, buflen, ret, page_control;
1248 uint8_t *p;
1249 uint8_t dev_specific_param;
1250
1251 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1252 page = r->req.cmd.buf[2] & 0x3f;
1253 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1254
1255 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1256 10, page, r->req.cmd.xfer, page_control);
1257 memset(outbuf, 0, r->req.cmd.xfer);
1258 p = outbuf;
1259
1260 if (s->qdev.type == TYPE_DISK) {
1261 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1262 if (!blk_is_writable(s->qdev.conf.blk)) {
1263 dev_specific_param |= 0x80; /* Readonly. */
1264 }
1265 } else {
1266 /* MMC prescribes that CD/DVD drives have no block descriptors,
1267 * and defines no device-specific parameter. */
1268 dev_specific_param = 0x00;
1269 dbd = true;
1270 }
1271
1272 if (r->req.cmd.buf[0] == MODE_SENSE) {
1273 p[1] = 0; /* Default media type. */
1274 p[2] = dev_specific_param;
1275 p[3] = 0; /* Block descriptor length. */
1276 p += 4;
1277 } else { /* MODE_SENSE_10 */
1278 p[2] = 0; /* Default media type. */
1279 p[3] = dev_specific_param;
1280 p[6] = p[7] = 0; /* Block descriptor length. */
1281 p += 8;
1282 }
1283
1284 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1285 if (!dbd && nb_sectors) {
1286 if (r->req.cmd.buf[0] == MODE_SENSE) {
1287 outbuf[3] = 8; /* Block descriptor length */
1288 } else { /* MODE_SENSE_10 */
1289 outbuf[7] = 8; /* Block descriptor length */
1290 }
1291 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1292 if (nb_sectors > 0xffffff) {
1293 nb_sectors = 0;
1294 }
1295 p[0] = 0; /* media density code */
1296 p[1] = (nb_sectors >> 16) & 0xff;
1297 p[2] = (nb_sectors >> 8) & 0xff;
1298 p[3] = nb_sectors & 0xff;
1299 p[4] = 0; /* reserved */
1300 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1301 p[6] = s->qdev.blocksize >> 8;
1302 p[7] = 0;
1303 p += 8;
1304 }
1305
1306 if (page_control == 3) {
1307 /* Saved Values */
1308 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1309 return -1;
1310 }
1311
1312 if (page == 0x3f) {
1313 for (page = 0; page <= 0x3e; page++) {
1314 mode_sense_page(s, page, &p, page_control);
1315 }
1316 } else {
1317 ret = mode_sense_page(s, page, &p, page_control);
1318 if (ret == -1) {
1319 return -1;
1320 }
1321 }
1322
1323 buflen = p - outbuf;
1324 /*
1325 * The mode data length field specifies the length in bytes of the
1326 * following data that is available to be transferred. The mode data
1327 * length does not include itself.
1328 */
1329 if (r->req.cmd.buf[0] == MODE_SENSE) {
1330 outbuf[0] = buflen - 1;
1331 } else { /* MODE_SENSE_10 */
1332 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1333 outbuf[1] = (buflen - 2) & 0xff;
1334 }
1335 return buflen;
1336 }
1337
1338 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1339 {
1340 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1341 int start_track, format, msf, toclen;
1342 uint64_t nb_sectors;
1343
1344 msf = req->cmd.buf[1] & 2;
1345 format = req->cmd.buf[2] & 0xf;
1346 start_track = req->cmd.buf[6];
1347 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1348 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1349 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1350 switch (format) {
1351 case 0:
1352 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1353 break;
1354 case 1:
1355 /* multi session : only a single session defined */
1356 toclen = 12;
1357 memset(outbuf, 0, 12);
1358 outbuf[1] = 0x0a;
1359 outbuf[2] = 0x01;
1360 outbuf[3] = 0x01;
1361 break;
1362 case 2:
1363 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1364 break;
1365 default:
1366 return -1;
1367 }
1368 return toclen;
1369 }
1370
1371 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1372 {
1373 SCSIRequest *req = &r->req;
1374 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1375 bool start = req->cmd.buf[4] & 1;
1376 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1377 int pwrcnd = req->cmd.buf[4] & 0xf0;
1378
1379 if (pwrcnd) {
1380 /* eject/load only happens for power condition == 0 */
1381 return 0;
1382 }
1383
1384 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1385 if (!start && !s->tray_open && s->tray_locked) {
1386 scsi_check_condition(r,
1387 blk_is_inserted(s->qdev.conf.blk)
1388 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1389 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1390 return -1;
1391 }
1392
1393 if (s->tray_open != !start) {
1394 blk_eject(s->qdev.conf.blk, !start);
1395 s->tray_open = !start;
1396 }
1397 }
1398 return 0;
1399 }
1400
1401 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1402 {
1403 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1404 int buflen = r->iov.iov_len;
1405
1406 if (buflen) {
1407 trace_scsi_disk_emulate_read_data(buflen);
1408 r->iov.iov_len = 0;
1409 r->started = true;
1410 scsi_req_data(&r->req, buflen);
1411 return;
1412 }
1413
1414 /* This also clears the sense buffer for REQUEST SENSE. */
1415 scsi_req_complete(&r->req, GOOD);
1416 }
1417
1418 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1419 uint8_t *inbuf, int inlen)
1420 {
1421 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1422 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1423 uint8_t *p;
1424 int len, expected_len, changeable_len, i;
1425
1426 /* The input buffer does not include the page header, so it is
1427 * off by 2 bytes.
1428 */
1429 expected_len = inlen + 2;
1430 if (expected_len > SCSI_MAX_MODE_LEN) {
1431 return -1;
1432 }
1433
1434 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1435 if (page == MODE_PAGE_ALLS) {
1436 return -1;
1437 }
1438
1439 p = mode_current;
1440 memset(mode_current, 0, inlen + 2);
1441 len = mode_sense_page(s, page, &p, 0);
1442 if (len < 0 || len != expected_len) {
1443 return -1;
1444 }
1445
1446 p = mode_changeable;
1447 memset(mode_changeable, 0, inlen + 2);
1448 changeable_len = mode_sense_page(s, page, &p, 1);
1449 assert(changeable_len == len);
1450
1451 /* Check that unchangeable bits are the same as what MODE SENSE
1452 * would return.
1453 */
1454 for (i = 2; i < len; i++) {
1455 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1456 return -1;
1457 }
1458 }
1459 return 0;
1460 }
1461
1462 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1463 {
1464 switch (page) {
1465 case MODE_PAGE_CACHING:
1466 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1467 break;
1468
1469 default:
1470 break;
1471 }
1472 }
1473
1474 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1475 {
1476 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1477
1478 while (len > 0) {
1479 int page, subpage, page_len;
1480
1481 /* Parse both possible formats for the mode page headers. */
1482 page = p[0] & 0x3f;
1483 if (p[0] & 0x40) {
1484 if (len < 4) {
1485 goto invalid_param_len;
1486 }
1487 subpage = p[1];
1488 page_len = lduw_be_p(&p[2]);
1489 p += 4;
1490 len -= 4;
1491 } else {
1492 if (len < 2) {
1493 goto invalid_param_len;
1494 }
1495 subpage = 0;
1496 page_len = p[1];
1497 p += 2;
1498 len -= 2;
1499 }
1500
1501 if (subpage) {
1502 goto invalid_param;
1503 }
1504 if (page_len > len) {
1505 goto invalid_param_len;
1506 }
1507
1508 if (!change) {
1509 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1510 goto invalid_param;
1511 }
1512 } else {
1513 scsi_disk_apply_mode_select(s, page, p);
1514 }
1515
1516 p += page_len;
1517 len -= page_len;
1518 }
1519 return 0;
1520
1521 invalid_param:
1522 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1523 return -1;
1524
1525 invalid_param_len:
1526 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1527 return -1;
1528 }
1529
1530 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1531 {
1532 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1533 uint8_t *p = inbuf;
1534 int cmd = r->req.cmd.buf[0];
1535 int len = r->req.cmd.xfer;
1536 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1537 int bd_len;
1538 int pass;
1539
1540 /* We only support PF=1, SP=0. */
1541 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1542 goto invalid_field;
1543 }
1544
1545 if (len < hdr_len) {
1546 goto invalid_param_len;
1547 }
1548
1549 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1550 len -= hdr_len;
1551 p += hdr_len;
1552 if (len < bd_len) {
1553 goto invalid_param_len;
1554 }
1555 if (bd_len != 0 && bd_len != 8) {
1556 goto invalid_param;
1557 }
1558
1559 len -= bd_len;
1560 p += bd_len;
1561
1562 /* Ensure no change is made if there is an error! */
1563 for (pass = 0; pass < 2; pass++) {
1564 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1565 assert(pass == 0);
1566 return;
1567 }
1568 }
1569 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1570 /* The request is used as the AIO opaque value, so add a ref. */
1571 scsi_req_ref(&r->req);
1572 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1573 BLOCK_ACCT_FLUSH);
1574 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1575 return;
1576 }
1577
1578 scsi_req_complete(&r->req, GOOD);
1579 return;
1580
1581 invalid_param:
1582 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1583 return;
1584
1585 invalid_param_len:
1586 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1587 return;
1588
1589 invalid_field:
1590 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1591 }
1592
1593 /* sector_num and nb_sectors expected to be in qdev blocksize */
1594 static inline bool check_lba_range(SCSIDiskState *s,
1595 uint64_t sector_num, uint32_t nb_sectors)
1596 {
1597 /*
1598 * The first line tests that no overflow happens when computing the last
1599 * sector. The second line tests that the last accessed sector is in
1600 * range.
1601 *
1602 * Careful, the computations should not underflow for nb_sectors == 0,
1603 * and a 0-block read to the first LBA beyond the end of device is
1604 * valid.
1605 */
1606 return (sector_num <= sector_num + nb_sectors &&
1607 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1608 }
1609
1610 typedef struct UnmapCBData {
1611 SCSIDiskReq *r;
1612 uint8_t *inbuf;
1613 int count;
1614 } UnmapCBData;
1615
1616 static void scsi_unmap_complete(void *opaque, int ret);
1617
1618 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1619 {
1620 SCSIDiskReq *r = data->r;
1621 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1622
1623 assert(r->req.aiocb == NULL);
1624
1625 if (data->count > 0) {
1626 uint64_t sector_num = ldq_be_p(&data->inbuf[0]);
1627 uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1628 r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1629 r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1630
1631 if (!check_lba_range(s, sector_num, nb_sectors)) {
1632 block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1633 BLOCK_ACCT_UNMAP);
1634 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1635 goto done;
1636 }
1637
1638 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1639 r->sector_count * BDRV_SECTOR_SIZE,
1640 BLOCK_ACCT_UNMAP);
1641
1642 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1643 r->sector * BDRV_SECTOR_SIZE,
1644 r->sector_count * BDRV_SECTOR_SIZE,
1645 scsi_unmap_complete, data);
1646 data->count--;
1647 data->inbuf += 16;
1648 return;
1649 }
1650
1651 scsi_req_complete(&r->req, GOOD);
1652
1653 done:
1654 scsi_req_unref(&r->req);
1655 g_free(data);
1656 }
1657
1658 static void scsi_unmap_complete(void *opaque, int ret)
1659 {
1660 UnmapCBData *data = opaque;
1661 SCSIDiskReq *r = data->r;
1662 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1663
1664 assert(r->req.aiocb != NULL);
1665 r->req.aiocb = NULL;
1666
1667 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1668 if (scsi_disk_req_check_error(r, ret, true)) {
1669 scsi_req_unref(&r->req);
1670 g_free(data);
1671 } else {
1672 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1673 scsi_unmap_complete_noio(data, ret);
1674 }
1675 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1676 }
1677
1678 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1679 {
1680 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1681 uint8_t *p = inbuf;
1682 int len = r->req.cmd.xfer;
1683 UnmapCBData *data;
1684
1685 /* Reject ANCHOR=1. */
1686 if (r->req.cmd.buf[1] & 0x1) {
1687 goto invalid_field;
1688 }
1689
1690 if (len < 8) {
1691 goto invalid_param_len;
1692 }
1693 if (len < lduw_be_p(&p[0]) + 2) {
1694 goto invalid_param_len;
1695 }
1696 if (len < lduw_be_p(&p[2]) + 8) {
1697 goto invalid_param_len;
1698 }
1699 if (lduw_be_p(&p[2]) & 15) {
1700 goto invalid_param_len;
1701 }
1702
1703 if (!blk_is_writable(s->qdev.conf.blk)) {
1704 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1705 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1706 return;
1707 }
1708
1709 data = g_new0(UnmapCBData, 1);
1710 data->r = r;
1711 data->inbuf = &p[8];
1712 data->count = lduw_be_p(&p[2]) >> 4;
1713
1714 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1715 scsi_req_ref(&r->req);
1716 scsi_unmap_complete_noio(data, 0);
1717 return;
1718
1719 invalid_param_len:
1720 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1721 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1722 return;
1723
1724 invalid_field:
1725 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1726 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1727 }
1728
1729 typedef struct WriteSameCBData {
1730 SCSIDiskReq *r;
1731 int64_t sector;
1732 int nb_sectors;
1733 QEMUIOVector qiov;
1734 struct iovec iov;
1735 } WriteSameCBData;
1736
1737 static void scsi_write_same_complete(void *opaque, int ret)
1738 {
1739 WriteSameCBData *data = opaque;
1740 SCSIDiskReq *r = data->r;
1741 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1742
1743 assert(r->req.aiocb != NULL);
1744 r->req.aiocb = NULL;
1745 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1746 if (scsi_disk_req_check_error(r, ret, true)) {
1747 goto done;
1748 }
1749
1750 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1751
1752 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
1753 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
1754 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1755 data->iov.iov_len);
1756 if (data->iov.iov_len) {
1757 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1758 data->iov.iov_len, BLOCK_ACCT_WRITE);
1759 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1760 * where final qiov may need smaller size */
1761 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1762 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1763 data->sector << BDRV_SECTOR_BITS,
1764 &data->qiov, 0,
1765 scsi_write_same_complete, data);
1766 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1767 return;
1768 }
1769
1770 scsi_req_complete(&r->req, GOOD);
1771
1772 done:
1773 scsi_req_unref(&r->req);
1774 qemu_vfree(data->iov.iov_base);
1775 g_free(data);
1776 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1777 }
1778
1779 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1780 {
1781 SCSIRequest *req = &r->req;
1782 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1783 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1784 WriteSameCBData *data;
1785 uint8_t *buf;
1786 int i;
1787
1788 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1789 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1790 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1791 return;
1792 }
1793
1794 if (!blk_is_writable(s->qdev.conf.blk)) {
1795 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1796 return;
1797 }
1798 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1799 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1800 return;
1801 }
1802
1803 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1804 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1805
1806 /* The request is used as the AIO opaque value, so add a ref. */
1807 scsi_req_ref(&r->req);
1808 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1809 nb_sectors * s->qdev.blocksize,
1810 BLOCK_ACCT_WRITE);
1811 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1812 r->req.cmd.lba * s->qdev.blocksize,
1813 nb_sectors * s->qdev.blocksize,
1814 flags, scsi_aio_complete, r);
1815 return;
1816 }
1817
1818 data = g_new0(WriteSameCBData, 1);
1819 data->r = r;
1820 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1821 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1822 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1823 SCSI_WRITE_SAME_MAX);
1824 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1825 data->iov.iov_len);
1826 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1827
1828 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1829 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1830 }
1831
1832 scsi_req_ref(&r->req);
1833 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1834 data->iov.iov_len, BLOCK_ACCT_WRITE);
1835 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1836 data->sector << BDRV_SECTOR_BITS,
1837 &data->qiov, 0,
1838 scsi_write_same_complete, data);
1839 }
1840
1841 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1842 {
1843 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1844
1845 if (r->iov.iov_len) {
1846 int buflen = r->iov.iov_len;
1847 trace_scsi_disk_emulate_write_data(buflen);
1848 r->iov.iov_len = 0;
1849 scsi_req_data(&r->req, buflen);
1850 return;
1851 }
1852
1853 switch (req->cmd.buf[0]) {
1854 case MODE_SELECT:
1855 case MODE_SELECT_10:
1856 /* This also clears the sense buffer for REQUEST SENSE. */
1857 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1858 break;
1859
1860 case UNMAP:
1861 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1862 break;
1863
1864 case VERIFY_10:
1865 case VERIFY_12:
1866 case VERIFY_16:
1867 if (r->req.status == -1) {
1868 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1869 }
1870 break;
1871
1872 case WRITE_SAME_10:
1873 case WRITE_SAME_16:
1874 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1875 break;
1876
1877 default:
1878 abort();
1879 }
1880 }
1881
1882 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1883 {
1884 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1885 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1886 uint64_t nb_sectors;
1887 uint8_t *outbuf;
1888 int buflen;
1889
1890 switch (req->cmd.buf[0]) {
1891 case INQUIRY:
1892 case MODE_SENSE:
1893 case MODE_SENSE_10:
1894 case RESERVE:
1895 case RESERVE_10:
1896 case RELEASE:
1897 case RELEASE_10:
1898 case START_STOP:
1899 case ALLOW_MEDIUM_REMOVAL:
1900 case GET_CONFIGURATION:
1901 case GET_EVENT_STATUS_NOTIFICATION:
1902 case MECHANISM_STATUS:
1903 case REQUEST_SENSE:
1904 break;
1905
1906 default:
1907 if (!blk_is_available(s->qdev.conf.blk)) {
1908 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1909 return 0;
1910 }
1911 break;
1912 }
1913
1914 /*
1915 * FIXME: we shouldn't return anything bigger than 4k, but the code
1916 * requires the buffer to be as big as req->cmd.xfer in several
1917 * places. So, do not allow CDBs with a very large ALLOCATION
1918 * LENGTH. The real fix would be to modify scsi_read_data and
1919 * dma_buf_read, so that they return data beyond the buflen
1920 * as all zeros.
1921 */
1922 if (req->cmd.xfer > 65536) {
1923 goto illegal_request;
1924 }
1925 r->buflen = MAX(4096, req->cmd.xfer);
1926
1927 if (!r->iov.iov_base) {
1928 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1929 }
1930
1931 outbuf = r->iov.iov_base;
1932 memset(outbuf, 0, r->buflen);
1933 switch (req->cmd.buf[0]) {
1934 case TEST_UNIT_READY:
1935 assert(blk_is_available(s->qdev.conf.blk));
1936 break;
1937 case INQUIRY:
1938 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1939 if (buflen < 0) {
1940 goto illegal_request;
1941 }
1942 break;
1943 case MODE_SENSE:
1944 case MODE_SENSE_10:
1945 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1946 if (buflen < 0) {
1947 goto illegal_request;
1948 }
1949 break;
1950 case READ_TOC:
1951 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1952 if (buflen < 0) {
1953 goto illegal_request;
1954 }
1955 break;
1956 case RESERVE:
1957 if (req->cmd.buf[1] & 1) {
1958 goto illegal_request;
1959 }
1960 break;
1961 case RESERVE_10:
1962 if (req->cmd.buf[1] & 3) {
1963 goto illegal_request;
1964 }
1965 break;
1966 case RELEASE:
1967 if (req->cmd.buf[1] & 1) {
1968 goto illegal_request;
1969 }
1970 break;
1971 case RELEASE_10:
1972 if (req->cmd.buf[1] & 3) {
1973 goto illegal_request;
1974 }
1975 break;
1976 case START_STOP:
1977 if (scsi_disk_emulate_start_stop(r) < 0) {
1978 return 0;
1979 }
1980 break;
1981 case ALLOW_MEDIUM_REMOVAL:
1982 s->tray_locked = req->cmd.buf[4] & 1;
1983 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1984 break;
1985 case READ_CAPACITY_10:
1986 /* The normal LEN field for this command is zero. */
1987 memset(outbuf, 0, 8);
1988 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1989 if (!nb_sectors) {
1990 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1991 return 0;
1992 }
1993 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1994 goto illegal_request;
1995 }
1996 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1997 /* Returned value is the address of the last sector. */
1998 nb_sectors--;
1999 /* Remember the new size for read/write sanity checking. */
2000 s->qdev.max_lba = nb_sectors;
2001 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2002 if (nb_sectors > UINT32_MAX) {
2003 nb_sectors = UINT32_MAX;
2004 }
2005 outbuf[0] = (nb_sectors >> 24) & 0xff;
2006 outbuf[1] = (nb_sectors >> 16) & 0xff;
2007 outbuf[2] = (nb_sectors >> 8) & 0xff;
2008 outbuf[3] = nb_sectors & 0xff;
2009 outbuf[4] = 0;
2010 outbuf[5] = 0;
2011 outbuf[6] = s->qdev.blocksize >> 8;
2012 outbuf[7] = 0;
2013 break;
2014 case REQUEST_SENSE:
2015 /* Just return "NO SENSE". */
2016 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2017 (req->cmd.buf[1] & 1) == 0);
2018 if (buflen < 0) {
2019 goto illegal_request;
2020 }
2021 break;
2022 case MECHANISM_STATUS:
2023 buflen = scsi_emulate_mechanism_status(s, outbuf);
2024 if (buflen < 0) {
2025 goto illegal_request;
2026 }
2027 break;
2028 case GET_CONFIGURATION:
2029 buflen = scsi_get_configuration(s, outbuf);
2030 if (buflen < 0) {
2031 goto illegal_request;
2032 }
2033 break;
2034 case GET_EVENT_STATUS_NOTIFICATION:
2035 buflen = scsi_get_event_status_notification(s, r, outbuf);
2036 if (buflen < 0) {
2037 goto illegal_request;
2038 }
2039 break;
2040 case READ_DISC_INFORMATION:
2041 buflen = scsi_read_disc_information(s, r, outbuf);
2042 if (buflen < 0) {
2043 goto illegal_request;
2044 }
2045 break;
2046 case READ_DVD_STRUCTURE:
2047 buflen = scsi_read_dvd_structure(s, r, outbuf);
2048 if (buflen < 0) {
2049 goto illegal_request;
2050 }
2051 break;
2052 case SERVICE_ACTION_IN_16:
2053 /* Service Action In subcommands. */
2054 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2055 trace_scsi_disk_emulate_command_SAI_16();
2056 memset(outbuf, 0, req->cmd.xfer);
2057 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2058 if (!nb_sectors) {
2059 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2060 return 0;
2061 }
2062 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2063 goto illegal_request;
2064 }
2065 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2066 /* Returned value is the address of the last sector. */
2067 nb_sectors--;
2068 /* Remember the new size for read/write sanity checking. */
2069 s->qdev.max_lba = nb_sectors;
2070 outbuf[0] = (nb_sectors >> 56) & 0xff;
2071 outbuf[1] = (nb_sectors >> 48) & 0xff;
2072 outbuf[2] = (nb_sectors >> 40) & 0xff;
2073 outbuf[3] = (nb_sectors >> 32) & 0xff;
2074 outbuf[4] = (nb_sectors >> 24) & 0xff;
2075 outbuf[5] = (nb_sectors >> 16) & 0xff;
2076 outbuf[6] = (nb_sectors >> 8) & 0xff;
2077 outbuf[7] = nb_sectors & 0xff;
2078 outbuf[8] = 0;
2079 outbuf[9] = 0;
2080 outbuf[10] = s->qdev.blocksize >> 8;
2081 outbuf[11] = 0;
2082 outbuf[12] = 0;
2083 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2084
2085 /* set TPE bit if the format supports discard */
2086 if (s->qdev.conf.discard_granularity) {
2087 outbuf[14] = 0x80;
2088 }
2089
2090 /* Protection, exponent and lowest lba field left blank. */
2091 break;
2092 }
2093 trace_scsi_disk_emulate_command_SAI_unsupported();
2094 goto illegal_request;
2095 case SYNCHRONIZE_CACHE:
2096 /* The request is used as the AIO opaque value, so add a ref. */
2097 scsi_req_ref(&r->req);
2098 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2099 BLOCK_ACCT_FLUSH);
2100 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2101 return 0;
2102 case SEEK_10:
2103 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2104 if (r->req.cmd.lba > s->qdev.max_lba) {
2105 goto illegal_lba;
2106 }
2107 break;
2108 case MODE_SELECT:
2109 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2110 break;
2111 case MODE_SELECT_10:
2112 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2113 break;
2114 case UNMAP:
2115 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2116 break;
2117 case VERIFY_10:
2118 case VERIFY_12:
2119 case VERIFY_16:
2120 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2121 if (req->cmd.buf[1] & 6) {
2122 goto illegal_request;
2123 }
2124 break;
2125 case WRITE_SAME_10:
2126 case WRITE_SAME_16:
2127 trace_scsi_disk_emulate_command_WRITE_SAME(
2128 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2129 break;
2130 default:
2131 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2132 scsi_command_name(buf[0]));
2133 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2134 return 0;
2135 }
2136 assert(!r->req.aiocb);
2137 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2138 if (r->iov.iov_len == 0) {
2139 scsi_req_complete(&r->req, GOOD);
2140 }
2141 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2142 assert(r->iov.iov_len == req->cmd.xfer);
2143 return -r->iov.iov_len;
2144 } else {
2145 return r->iov.iov_len;
2146 }
2147
2148 illegal_request:
2149 if (r->req.status == -1) {
2150 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2151 }
2152 return 0;
2153
2154 illegal_lba:
2155 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2156 return 0;
2157 }
2158
2159 /* Execute a scsi command. Returns the length of the data expected by the
2160 command. This will be Positive for data transfers from the device
2161 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2162 and zero if the command does not transfer any data. */
2163
2164 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2165 {
2166 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2167 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2168 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2169 uint32_t len;
2170 uint8_t command;
2171
2172 command = buf[0];
2173
2174 if (!blk_is_available(s->qdev.conf.blk)) {
2175 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2176 return 0;
2177 }
2178
2179 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2180 switch (command) {
2181 case READ_6:
2182 case READ_10:
2183 case READ_12:
2184 case READ_16:
2185 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2186 /* Protection information is not supported. For SCSI versions 2 and
2187 * older (as determined by snooping the guest's INQUIRY commands),
2188 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2189 */
2190 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2191 goto illegal_request;
2192 }
2193 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2194 goto illegal_lba;
2195 }
2196 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2197 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2198 break;
2199 case WRITE_6:
2200 case WRITE_10:
2201 case WRITE_12:
2202 case WRITE_16:
2203 case WRITE_VERIFY_10:
2204 case WRITE_VERIFY_12:
2205 case WRITE_VERIFY_16:
2206 if (!blk_is_writable(s->qdev.conf.blk)) {
2207 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2208 return 0;
2209 }
2210 trace_scsi_disk_dma_command_WRITE(
2211 (command & 0xe) == 0xe ? "And Verify " : "",
2212 r->req.cmd.lba, len);
2213 /* fall through */
2214 case VERIFY_10:
2215 case VERIFY_12:
2216 case VERIFY_16:
2217 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2218 * As far as DMA is concerned, we can treat it the same as a write;
2219 * scsi_block_do_sgio will send VERIFY commands.
2220 */
2221 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2222 goto illegal_request;
2223 }
2224 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2225 goto illegal_lba;
2226 }
2227 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2228 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2229 break;
2230 default:
2231 abort();
2232 illegal_request:
2233 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2234 return 0;
2235 illegal_lba:
2236 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2237 return 0;
2238 }
2239 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2240 if (r->sector_count == 0) {
2241 scsi_req_complete(&r->req, GOOD);
2242 }
2243 assert(r->iov.iov_len == 0);
2244 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2245 return -r->sector_count * BDRV_SECTOR_SIZE;
2246 } else {
2247 return r->sector_count * BDRV_SECTOR_SIZE;
2248 }
2249 }
2250
2251 static void scsi_disk_reset(DeviceState *dev)
2252 {
2253 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2254 uint64_t nb_sectors;
2255
2256 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2257
2258 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2259 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2260 if (nb_sectors) {
2261 nb_sectors--;
2262 }
2263 s->qdev.max_lba = nb_sectors;
2264 /* reset tray statuses */
2265 s->tray_locked = 0;
2266 s->tray_open = 0;
2267
2268 s->qdev.scsi_version = s->qdev.default_scsi_version;
2269 }
2270
2271 static void scsi_disk_resize_cb(void *opaque)
2272 {
2273 SCSIDiskState *s = opaque;
2274
2275 /* SPC lists this sense code as available only for
2276 * direct-access devices.
2277 */
2278 if (s->qdev.type == TYPE_DISK) {
2279 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2280 }
2281 }
2282
2283 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2284 {
2285 SCSIDiskState *s = opaque;
2286
2287 /*
2288 * When a CD gets changed, we have to report an ejected state and
2289 * then a loaded state to guests so that they detect tray
2290 * open/close and media change events. Guests that do not use
2291 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2292 * states rely on this behavior.
2293 *
2294 * media_changed governs the state machine used for unit attention
2295 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2296 */
2297 s->media_changed = load;
2298 s->tray_open = !load;
2299 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2300 s->media_event = true;
2301 s->eject_request = false;
2302 }
2303
2304 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2305 {
2306 SCSIDiskState *s = opaque;
2307
2308 s->eject_request = true;
2309 if (force) {
2310 s->tray_locked = false;
2311 }
2312 }
2313
2314 static bool scsi_cd_is_tray_open(void *opaque)
2315 {
2316 return ((SCSIDiskState *)opaque)->tray_open;
2317 }
2318
2319 static bool scsi_cd_is_medium_locked(void *opaque)
2320 {
2321 return ((SCSIDiskState *)opaque)->tray_locked;
2322 }
2323
2324 static const BlockDevOps scsi_disk_removable_block_ops = {
2325 .change_media_cb = scsi_cd_change_media_cb,
2326 .eject_request_cb = scsi_cd_eject_request_cb,
2327 .is_tray_open = scsi_cd_is_tray_open,
2328 .is_medium_locked = scsi_cd_is_medium_locked,
2329
2330 .resize_cb = scsi_disk_resize_cb,
2331 };
2332
2333 static const BlockDevOps scsi_disk_block_ops = {
2334 .resize_cb = scsi_disk_resize_cb,
2335 };
2336
2337 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2338 {
2339 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2340 if (s->media_changed) {
2341 s->media_changed = false;
2342 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2343 }
2344 }
2345
2346 static void scsi_realize(SCSIDevice *dev, Error **errp)
2347 {
2348 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2349 bool read_only;
2350
2351 if (!s->qdev.conf.blk) {
2352 error_setg(errp, "drive property not set");
2353 return;
2354 }
2355
2356 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2357 !blk_is_inserted(s->qdev.conf.blk)) {
2358 error_setg(errp, "Device needs media, but drive is empty");
2359 return;
2360 }
2361
2362 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2363 return;
2364 }
2365
2366 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2367 !s->qdev.hba_supports_iothread)
2368 {
2369 error_setg(errp, "HBA does not support iothreads");
2370 return;
2371 }
2372
2373 if (dev->type == TYPE_DISK) {
2374 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2375 return;
2376 }
2377 }
2378
2379 read_only = !blk_supports_write_perm(s->qdev.conf.blk);
2380 if (dev->type == TYPE_ROM) {
2381 read_only = true;
2382 }
2383
2384 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2385 dev->type == TYPE_DISK, errp)) {
2386 return;
2387 }
2388
2389 if (s->qdev.conf.discard_granularity == -1) {
2390 s->qdev.conf.discard_granularity =
2391 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2392 }
2393
2394 if (!s->version) {
2395 s->version = g_strdup(qemu_hw_version());
2396 }
2397 if (!s->vendor) {
2398 s->vendor = g_strdup("QEMU");
2399 }
2400 if (!s->device_id) {
2401 if (s->serial) {
2402 s->device_id = g_strdup_printf("%.20s", s->serial);
2403 } else {
2404 const char *str = blk_name(s->qdev.conf.blk);
2405 if (str && *str) {
2406 s->device_id = g_strdup(str);
2407 }
2408 }
2409 }
2410
2411 if (blk_is_sg(s->qdev.conf.blk)) {
2412 error_setg(errp, "unwanted /dev/sg*");
2413 return;
2414 }
2415
2416 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2417 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2418 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2419 } else {
2420 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2421 }
2422 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2423
2424 blk_iostatus_enable(s->qdev.conf.blk);
2425
2426 add_boot_device_lchs(&dev->qdev, NULL,
2427 dev->conf.lcyls,
2428 dev->conf.lheads,
2429 dev->conf.lsecs);
2430 }
2431
2432 static void scsi_unrealize(SCSIDevice *dev)
2433 {
2434 del_boot_device_lchs(&dev->qdev, NULL);
2435 }
2436
2437 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2438 {
2439 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2440 AioContext *ctx = NULL;
2441 /* can happen for devices without drive. The error message for missing
2442 * backend will be issued in scsi_realize
2443 */
2444 if (s->qdev.conf.blk) {
2445 ctx = blk_get_aio_context(s->qdev.conf.blk);
2446 aio_context_acquire(ctx);
2447 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2448 goto out;
2449 }
2450 }
2451 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2452 s->qdev.type = TYPE_DISK;
2453 if (!s->product) {
2454 s->product = g_strdup("QEMU HARDDISK");
2455 }
2456 scsi_realize(&s->qdev, errp);
2457 out:
2458 if (ctx) {
2459 aio_context_release(ctx);
2460 }
2461 }
2462
2463 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2464 {
2465 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2466 AioContext *ctx;
2467 int ret;
2468
2469 if (!dev->conf.blk) {
2470 /* Anonymous BlockBackend for an empty drive. As we put it into
2471 * dev->conf, qdev takes care of detaching on unplug. */
2472 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2473 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2474 assert(ret == 0);
2475 }
2476
2477 ctx = blk_get_aio_context(dev->conf.blk);
2478 aio_context_acquire(ctx);
2479 s->qdev.blocksize = 2048;
2480 s->qdev.type = TYPE_ROM;
2481 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2482 if (!s->product) {
2483 s->product = g_strdup("QEMU CD-ROM");
2484 }
2485 scsi_realize(&s->qdev, errp);
2486 aio_context_release(ctx);
2487 }
2488
2489
2490 static const SCSIReqOps scsi_disk_emulate_reqops = {
2491 .size = sizeof(SCSIDiskReq),
2492 .free_req = scsi_free_request,
2493 .send_command = scsi_disk_emulate_command,
2494 .read_data = scsi_disk_emulate_read_data,
2495 .write_data = scsi_disk_emulate_write_data,
2496 .get_buf = scsi_get_buf,
2497 };
2498
2499 static const SCSIReqOps scsi_disk_dma_reqops = {
2500 .size = sizeof(SCSIDiskReq),
2501 .free_req = scsi_free_request,
2502 .send_command = scsi_disk_dma_command,
2503 .read_data = scsi_read_data,
2504 .write_data = scsi_write_data,
2505 .get_buf = scsi_get_buf,
2506 .load_request = scsi_disk_load_request,
2507 .save_request = scsi_disk_save_request,
2508 };
2509
2510 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2511 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2512 [INQUIRY] = &scsi_disk_emulate_reqops,
2513 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2514 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2515 [START_STOP] = &scsi_disk_emulate_reqops,
2516 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2517 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2518 [READ_TOC] = &scsi_disk_emulate_reqops,
2519 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2520 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2521 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2522 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2523 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2524 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2525 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2526 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2527 [SEEK_10] = &scsi_disk_emulate_reqops,
2528 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2529 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2530 [UNMAP] = &scsi_disk_emulate_reqops,
2531 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2532 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2533 [VERIFY_10] = &scsi_disk_emulate_reqops,
2534 [VERIFY_12] = &scsi_disk_emulate_reqops,
2535 [VERIFY_16] = &scsi_disk_emulate_reqops,
2536
2537 [READ_6] = &scsi_disk_dma_reqops,
2538 [READ_10] = &scsi_disk_dma_reqops,
2539 [READ_12] = &scsi_disk_dma_reqops,
2540 [READ_16] = &scsi_disk_dma_reqops,
2541 [WRITE_6] = &scsi_disk_dma_reqops,
2542 [WRITE_10] = &scsi_disk_dma_reqops,
2543 [WRITE_12] = &scsi_disk_dma_reqops,
2544 [WRITE_16] = &scsi_disk_dma_reqops,
2545 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2546 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2547 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2548 };
2549
2550 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2551 {
2552 int i;
2553 int len = scsi_cdb_length(buf);
2554 char *line_buffer, *p;
2555
2556 assert(len > 0 && len <= 16);
2557 line_buffer = g_malloc(len * 5 + 1);
2558
2559 for (i = 0, p = line_buffer; i < len; i++) {
2560 p += sprintf(p, " 0x%02x", buf[i]);
2561 }
2562 trace_scsi_disk_new_request(lun, tag, line_buffer);
2563
2564 g_free(line_buffer);
2565 }
2566
2567 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2568 uint8_t *buf, void *hba_private)
2569 {
2570 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2571 SCSIRequest *req;
2572 const SCSIReqOps *ops;
2573 uint8_t command;
2574
2575 command = buf[0];
2576 ops = scsi_disk_reqops_dispatch[command];
2577 if (!ops) {
2578 ops = &scsi_disk_emulate_reqops;
2579 }
2580 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2581
2582 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2583 scsi_disk_new_request_dump(lun, tag, buf);
2584 }
2585
2586 return req;
2587 }
2588
2589 #ifdef __linux__
2590 static int get_device_type(SCSIDiskState *s)
2591 {
2592 uint8_t cmd[16];
2593 uint8_t buf[36];
2594 int ret;
2595
2596 memset(cmd, 0, sizeof(cmd));
2597 memset(buf, 0, sizeof(buf));
2598 cmd[0] = INQUIRY;
2599 cmd[4] = sizeof(buf);
2600
2601 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2602 buf, sizeof(buf), s->qdev.io_timeout);
2603 if (ret < 0) {
2604 return -1;
2605 }
2606 s->qdev.type = buf[0];
2607 if (buf[1] & 0x80) {
2608 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2609 }
2610 return 0;
2611 }
2612
2613 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2614 {
2615 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2616 AioContext *ctx;
2617 int sg_version;
2618 int rc;
2619
2620 if (!s->qdev.conf.blk) {
2621 error_setg(errp, "drive property not set");
2622 return;
2623 }
2624
2625 if (s->rotation_rate) {
2626 error_report_once("rotation_rate is specified for scsi-block but is "
2627 "not implemented. This option is deprecated and will "
2628 "be removed in a future version");
2629 }
2630
2631 ctx = blk_get_aio_context(s->qdev.conf.blk);
2632 aio_context_acquire(ctx);
2633
2634 /* check we are using a driver managing SG_IO (version 3 and after) */
2635 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2636 if (rc < 0) {
2637 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2638 if (rc != -EPERM) {
2639 error_append_hint(errp, "Is this a SCSI device?\n");
2640 }
2641 goto out;
2642 }
2643 if (sg_version < 30000) {
2644 error_setg(errp, "scsi generic interface too old");
2645 goto out;
2646 }
2647
2648 /* get device type from INQUIRY data */
2649 rc = get_device_type(s);
2650 if (rc < 0) {
2651 error_setg(errp, "INQUIRY failed");
2652 goto out;
2653 }
2654
2655 /* Make a guess for the block size, we'll fix it when the guest sends.
2656 * READ CAPACITY. If they don't, they likely would assume these sizes
2657 * anyway. (TODO: check in /sys).
2658 */
2659 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2660 s->qdev.blocksize = 2048;
2661 } else {
2662 s->qdev.blocksize = 512;
2663 }
2664
2665 /* Makes the scsi-block device not removable by using HMP and QMP eject
2666 * command.
2667 */
2668 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2669
2670 scsi_realize(&s->qdev, errp);
2671 scsi_generic_read_device_inquiry(&s->qdev);
2672
2673 out:
2674 aio_context_release(ctx);
2675 }
2676
2677 typedef struct SCSIBlockReq {
2678 SCSIDiskReq req;
2679 sg_io_hdr_t io_header;
2680
2681 /* Selected bytes of the original CDB, copied into our own CDB. */
2682 uint8_t cmd, cdb1, group_number;
2683
2684 /* CDB passed to SG_IO. */
2685 uint8_t cdb[16];
2686 BlockCompletionFunc *cb;
2687 void *cb_opaque;
2688 } SCSIBlockReq;
2689
2690 static void scsi_block_sgio_complete(void *opaque, int ret)
2691 {
2692 SCSIBlockReq *req = (SCSIBlockReq *)opaque;
2693 SCSIDiskReq *r = &req->req;
2694 SCSIDevice *s = r->req.dev;
2695 sg_io_hdr_t *io_hdr = &req->io_header;
2696
2697 if (ret == 0) {
2698 if (io_hdr->host_status != SCSI_HOST_OK) {
2699 scsi_req_complete_failed(&r->req, io_hdr->host_status);
2700 scsi_req_unref(&r->req);
2701 return;
2702 }
2703
2704 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
2705 ret = BUSY;
2706 } else {
2707 ret = io_hdr->status;
2708 }
2709
2710 if (ret > 0) {
2711 aio_context_acquire(blk_get_aio_context(s->conf.blk));
2712 if (scsi_handle_rw_error(r, ret, true)) {
2713 aio_context_release(blk_get_aio_context(s->conf.blk));
2714 scsi_req_unref(&r->req);
2715 return;
2716 }
2717 aio_context_release(blk_get_aio_context(s->conf.blk));
2718
2719 /* Ignore error. */
2720 ret = 0;
2721 }
2722 }
2723
2724 req->cb(req->cb_opaque, ret);
2725 }
2726
2727 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2728 int64_t offset, QEMUIOVector *iov,
2729 int direction,
2730 BlockCompletionFunc *cb, void *opaque)
2731 {
2732 sg_io_hdr_t *io_header = &req->io_header;
2733 SCSIDiskReq *r = &req->req;
2734 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2735 int nb_logical_blocks;
2736 uint64_t lba;
2737 BlockAIOCB *aiocb;
2738
2739 /* This is not supported yet. It can only happen if the guest does
2740 * reads and writes that are not aligned to one logical sectors
2741 * _and_ cover multiple MemoryRegions.
2742 */
2743 assert(offset % s->qdev.blocksize == 0);
2744 assert(iov->size % s->qdev.blocksize == 0);
2745
2746 io_header->interface_id = 'S';
2747
2748 /* The data transfer comes from the QEMUIOVector. */
2749 io_header->dxfer_direction = direction;
2750 io_header->dxfer_len = iov->size;
2751 io_header->dxferp = (void *)iov->iov;
2752 io_header->iovec_count = iov->niov;
2753 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2754
2755 /* Build a new CDB with the LBA and length patched in, in case
2756 * DMA helpers split the transfer in multiple segments. Do not
2757 * build a CDB smaller than what the guest wanted, and only build
2758 * a larger one if strictly necessary.
2759 */
2760 io_header->cmdp = req->cdb;
2761 lba = offset / s->qdev.blocksize;
2762 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2763
2764 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2765 /* 6-byte CDB */
2766 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2767 req->cdb[4] = nb_logical_blocks;
2768 req->cdb[5] = 0;
2769 io_header->cmd_len = 6;
2770 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2771 /* 10-byte CDB */
2772 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2773 req->cdb[1] = req->cdb1;
2774 stl_be_p(&req->cdb[2], lba);
2775 req->cdb[6] = req->group_number;
2776 stw_be_p(&req->cdb[7], nb_logical_blocks);
2777 req->cdb[9] = 0;
2778 io_header->cmd_len = 10;
2779 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2780 /* 12-byte CDB */
2781 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2782 req->cdb[1] = req->cdb1;
2783 stl_be_p(&req->cdb[2], lba);
2784 stl_be_p(&req->cdb[6], nb_logical_blocks);
2785 req->cdb[10] = req->group_number;
2786 req->cdb[11] = 0;
2787 io_header->cmd_len = 12;
2788 } else {
2789 /* 16-byte CDB */
2790 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2791 req->cdb[1] = req->cdb1;
2792 stq_be_p(&req->cdb[2], lba);
2793 stl_be_p(&req->cdb[10], nb_logical_blocks);
2794 req->cdb[14] = req->group_number;
2795 req->cdb[15] = 0;
2796 io_header->cmd_len = 16;
2797 }
2798
2799 /* The rest is as in scsi-generic.c. */
2800 io_header->mx_sb_len = sizeof(r->req.sense);
2801 io_header->sbp = r->req.sense;
2802 io_header->timeout = s->qdev.io_timeout * 1000;
2803 io_header->usr_ptr = r;
2804 io_header->flags |= SG_FLAG_DIRECT_IO;
2805 req->cb = cb;
2806 req->cb_opaque = opaque;
2807 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
2808 nb_logical_blocks, io_header->timeout);
2809 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req);
2810 assert(aiocb != NULL);
2811 return aiocb;
2812 }
2813
2814 static bool scsi_block_no_fua(SCSICommand *cmd)
2815 {
2816 return false;
2817 }
2818
2819 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2820 QEMUIOVector *iov,
2821 BlockCompletionFunc *cb, void *cb_opaque,
2822 void *opaque)
2823 {
2824 SCSIBlockReq *r = opaque;
2825 return scsi_block_do_sgio(r, offset, iov,
2826 SG_DXFER_FROM_DEV, cb, cb_opaque);
2827 }
2828
2829 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2830 QEMUIOVector *iov,
2831 BlockCompletionFunc *cb, void *cb_opaque,
2832 void *opaque)
2833 {
2834 SCSIBlockReq *r = opaque;
2835 return scsi_block_do_sgio(r, offset, iov,
2836 SG_DXFER_TO_DEV, cb, cb_opaque);
2837 }
2838
2839 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2840 {
2841 switch (buf[0]) {
2842 case VERIFY_10:
2843 case VERIFY_12:
2844 case VERIFY_16:
2845 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2846 * for the number of logical blocks specified in the length
2847 * field). For other modes, do not use scatter/gather operation.
2848 */
2849 if ((buf[1] & 6) == 2) {
2850 return false;
2851 }
2852 break;
2853
2854 case READ_6:
2855 case READ_10:
2856 case READ_12:
2857 case READ_16:
2858 case WRITE_6:
2859 case WRITE_10:
2860 case WRITE_12:
2861 case WRITE_16:
2862 case WRITE_VERIFY_10:
2863 case WRITE_VERIFY_12:
2864 case WRITE_VERIFY_16:
2865 /* MMC writing cannot be done via DMA helpers, because it sometimes
2866 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2867 * We might use scsi_block_dma_reqops as long as no writing commands are
2868 * seen, but performance usually isn't paramount on optical media. So,
2869 * just make scsi-block operate the same as scsi-generic for them.
2870 */
2871 if (s->qdev.type != TYPE_ROM) {
2872 return false;
2873 }
2874 break;
2875
2876 default:
2877 break;
2878 }
2879
2880 return true;
2881 }
2882
2883
2884 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2885 {
2886 SCSIBlockReq *r = (SCSIBlockReq *)req;
2887 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2888
2889 r->cmd = req->cmd.buf[0];
2890 switch (r->cmd >> 5) {
2891 case 0:
2892 /* 6-byte CDB. */
2893 r->cdb1 = r->group_number = 0;
2894 break;
2895 case 1:
2896 /* 10-byte CDB. */
2897 r->cdb1 = req->cmd.buf[1];
2898 r->group_number = req->cmd.buf[6];
2899 break;
2900 case 4:
2901 /* 12-byte CDB. */
2902 r->cdb1 = req->cmd.buf[1];
2903 r->group_number = req->cmd.buf[10];
2904 break;
2905 case 5:
2906 /* 16-byte CDB. */
2907 r->cdb1 = req->cmd.buf[1];
2908 r->group_number = req->cmd.buf[14];
2909 break;
2910 default:
2911 abort();
2912 }
2913
2914 /* Protection information is not supported. For SCSI versions 2 and
2915 * older (as determined by snooping the guest's INQUIRY commands),
2916 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2917 */
2918 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2919 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2920 return 0;
2921 }
2922
2923 return scsi_disk_dma_command(req, buf);
2924 }
2925
2926 static const SCSIReqOps scsi_block_dma_reqops = {
2927 .size = sizeof(SCSIBlockReq),
2928 .free_req = scsi_free_request,
2929 .send_command = scsi_block_dma_command,
2930 .read_data = scsi_read_data,
2931 .write_data = scsi_write_data,
2932 .get_buf = scsi_get_buf,
2933 .load_request = scsi_disk_load_request,
2934 .save_request = scsi_disk_save_request,
2935 };
2936
2937 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2938 uint32_t lun, uint8_t *buf,
2939 void *hba_private)
2940 {
2941 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2942
2943 if (scsi_block_is_passthrough(s, buf)) {
2944 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2945 hba_private);
2946 } else {
2947 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2948 hba_private);
2949 }
2950 }
2951
2952 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2953 uint8_t *buf, void *hba_private)
2954 {
2955 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2956
2957 if (scsi_block_is_passthrough(s, buf)) {
2958 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2959 } else {
2960 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2961 }
2962 }
2963
2964 static void scsi_block_update_sense(SCSIRequest *req)
2965 {
2966 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2967 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
2968 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
2969 }
2970 #endif
2971
2972 static
2973 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2974 BlockCompletionFunc *cb, void *cb_opaque,
2975 void *opaque)
2976 {
2977 SCSIDiskReq *r = opaque;
2978 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2979 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2980 }
2981
2982 static
2983 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2984 BlockCompletionFunc *cb, void *cb_opaque,
2985 void *opaque)
2986 {
2987 SCSIDiskReq *r = opaque;
2988 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2989 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2990 }
2991
2992 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2993 {
2994 DeviceClass *dc = DEVICE_CLASS(klass);
2995 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2996
2997 dc->fw_name = "disk";
2998 dc->reset = scsi_disk_reset;
2999 sdc->dma_readv = scsi_dma_readv;
3000 sdc->dma_writev = scsi_dma_writev;
3001 sdc->need_fua_emulation = scsi_is_cmd_fua;
3002 }
3003
3004 static const TypeInfo scsi_disk_base_info = {
3005 .name = TYPE_SCSI_DISK_BASE,
3006 .parent = TYPE_SCSI_DEVICE,
3007 .class_init = scsi_disk_base_class_initfn,
3008 .instance_size = sizeof(SCSIDiskState),
3009 .class_size = sizeof(SCSIDiskClass),
3010 .abstract = true,
3011 };
3012
3013 #define DEFINE_SCSI_DISK_PROPERTIES() \
3014 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
3015 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
3016 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3017 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3018 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3019 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3020 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3021 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3022
3023
3024 static Property scsi_hd_properties[] = {
3025 DEFINE_SCSI_DISK_PROPERTIES(),
3026 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3027 SCSI_DISK_F_REMOVABLE, false),
3028 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3029 SCSI_DISK_F_DPOFUA, false),
3030 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3031 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3032 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3033 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3034 DEFAULT_MAX_UNMAP_SIZE),
3035 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3036 DEFAULT_MAX_IO_SIZE),
3037 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3038 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3039 5),
3040 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3041 DEFINE_PROP_END_OF_LIST(),
3042 };
3043
3044 static const VMStateDescription vmstate_scsi_disk_state = {
3045 .name = "scsi-disk",
3046 .version_id = 1,
3047 .minimum_version_id = 1,
3048 .fields = (VMStateField[]) {
3049 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3050 VMSTATE_BOOL(media_changed, SCSIDiskState),
3051 VMSTATE_BOOL(media_event, SCSIDiskState),
3052 VMSTATE_BOOL(eject_request, SCSIDiskState),
3053 VMSTATE_BOOL(tray_open, SCSIDiskState),
3054 VMSTATE_BOOL(tray_locked, SCSIDiskState),
3055 VMSTATE_END_OF_LIST()
3056 }
3057 };
3058
3059 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3060 {
3061 DeviceClass *dc = DEVICE_CLASS(klass);
3062 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3063
3064 sc->realize = scsi_hd_realize;
3065 sc->unrealize = scsi_unrealize;
3066 sc->alloc_req = scsi_new_request;
3067 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3068 dc->desc = "virtual SCSI disk";
3069 device_class_set_props(dc, scsi_hd_properties);
3070 dc->vmsd = &vmstate_scsi_disk_state;
3071 }
3072
3073 static const TypeInfo scsi_hd_info = {
3074 .name = "scsi-hd",
3075 .parent = TYPE_SCSI_DISK_BASE,
3076 .class_init = scsi_hd_class_initfn,
3077 };
3078
3079 static Property scsi_cd_properties[] = {
3080 DEFINE_SCSI_DISK_PROPERTIES(),
3081 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3082 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3083 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3084 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3085 DEFAULT_MAX_IO_SIZE),
3086 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3087 5),
3088 DEFINE_PROP_END_OF_LIST(),
3089 };
3090
3091 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3092 {
3093 DeviceClass *dc = DEVICE_CLASS(klass);
3094 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3095
3096 sc->realize = scsi_cd_realize;
3097 sc->alloc_req = scsi_new_request;
3098 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3099 dc->desc = "virtual SCSI CD-ROM";
3100 device_class_set_props(dc, scsi_cd_properties);
3101 dc->vmsd = &vmstate_scsi_disk_state;
3102 }
3103
3104 static const TypeInfo scsi_cd_info = {
3105 .name = "scsi-cd",
3106 .parent = TYPE_SCSI_DISK_BASE,
3107 .class_init = scsi_cd_class_initfn,
3108 };
3109
3110 #ifdef __linux__
3111 static Property scsi_block_properties[] = {
3112 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
3113 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3114 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3115 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3116 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3117 DEFAULT_MAX_UNMAP_SIZE),
3118 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3119 DEFAULT_MAX_IO_SIZE),
3120 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3121 -1),
3122 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
3123 DEFAULT_IO_TIMEOUT),
3124 DEFINE_PROP_END_OF_LIST(),
3125 };
3126
3127 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3128 {
3129 DeviceClass *dc = DEVICE_CLASS(klass);
3130 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3131 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3132
3133 sc->realize = scsi_block_realize;
3134 sc->alloc_req = scsi_block_new_request;
3135 sc->parse_cdb = scsi_block_parse_cdb;
3136 sdc->dma_readv = scsi_block_dma_readv;
3137 sdc->dma_writev = scsi_block_dma_writev;
3138 sdc->update_sense = scsi_block_update_sense;
3139 sdc->need_fua_emulation = scsi_block_no_fua;
3140 dc->desc = "SCSI block device passthrough";
3141 device_class_set_props(dc, scsi_block_properties);
3142 dc->vmsd = &vmstate_scsi_disk_state;
3143 }
3144
3145 static const TypeInfo scsi_block_info = {
3146 .name = "scsi-block",
3147 .parent = TYPE_SCSI_DISK_BASE,
3148 .class_init = scsi_block_class_initfn,
3149 };
3150 #endif
3151
3152 static void scsi_disk_register_types(void)
3153 {
3154 type_register_static(&scsi_disk_base_info);
3155 type_register_static(&scsi_hd_info);
3156 type_register_static(&scsi_cd_info);
3157 #ifdef __linux__
3158 type_register_static(&scsi_block_info);
3159 #endif
3160 }
3161
3162 type_init(scsi_disk_register_types)