]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-disk.c
q800: fix coverity warning CID 1412799
[mirror_qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "hw/scsi/scsi.h"
29 #include "migration/qemu-file-types.h"
30 #include "migration/vmstate.h"
31 #include "hw/scsi/emulation.h"
32 #include "scsi/constants.h"
33 #include "sysemu/block-backend.h"
34 #include "sysemu/blockdev.h"
35 #include "hw/block/block.h"
36 #include "hw/qdev-properties.h"
37 #include "sysemu/dma.h"
38 #include "sysemu/sysemu.h"
39 #include "qemu/cutils.h"
40 #include "trace.h"
41
42 #ifdef __linux
43 #include <scsi/sg.h>
44 #endif
45
46 #define SCSI_WRITE_SAME_MAX (512 * KiB)
47 #define SCSI_DMA_BUF_SIZE (128 * KiB)
48 #define SCSI_MAX_INQUIRY_LEN 256
49 #define SCSI_MAX_MODE_LEN 256
50
51 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
52 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
53 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
54
55 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
56
57 #define SCSI_DISK_BASE(obj) \
58 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
59 #define SCSI_DISK_BASE_CLASS(klass) \
60 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
61 #define SCSI_DISK_BASE_GET_CLASS(obj) \
62 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
63
64 typedef struct SCSIDiskClass {
65 SCSIDeviceClass parent_class;
66 DMAIOFunc *dma_readv;
67 DMAIOFunc *dma_writev;
68 bool (*need_fua_emulation)(SCSICommand *cmd);
69 void (*update_sense)(SCSIRequest *r);
70 } SCSIDiskClass;
71
72 typedef struct SCSIDiskReq {
73 SCSIRequest req;
74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
75 uint64_t sector;
76 uint32_t sector_count;
77 uint32_t buflen;
78 bool started;
79 bool need_fua_emulation;
80 struct iovec iov;
81 QEMUIOVector qiov;
82 BlockAcctCookie acct;
83 unsigned char *status;
84 } SCSIDiskReq;
85
86 #define SCSI_DISK_F_REMOVABLE 0
87 #define SCSI_DISK_F_DPOFUA 1
88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
89
90 typedef struct SCSIDiskState
91 {
92 SCSIDevice qdev;
93 uint32_t features;
94 bool media_changed;
95 bool media_event;
96 bool eject_request;
97 uint16_t port_index;
98 uint64_t max_unmap_size;
99 uint64_t max_io_size;
100 QEMUBH *bh;
101 char *version;
102 char *serial;
103 char *vendor;
104 char *product;
105 char *device_id;
106 bool tray_open;
107 bool tray_locked;
108 /*
109 * 0x0000 - rotation rate not reported
110 * 0x0001 - non-rotating medium (SSD)
111 * 0x0002-0x0400 - reserved
112 * 0x0401-0xffe - rotations per minute
113 * 0xffff - reserved
114 */
115 uint16_t rotation_rate;
116 } SCSIDiskState;
117
118 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
119
120 static void scsi_free_request(SCSIRequest *req)
121 {
122 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
123
124 qemu_vfree(r->iov.iov_base);
125 }
126
127 /* Helper function for command completion with sense. */
128 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
129 {
130 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
131 sense.ascq);
132 scsi_req_build_sense(&r->req, sense);
133 scsi_req_complete(&r->req, CHECK_CONDITION);
134 }
135
136 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
137 {
138 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
139
140 if (!r->iov.iov_base) {
141 r->buflen = size;
142 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
143 }
144 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
145 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
146 }
147
148 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
149 {
150 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
151
152 qemu_put_be64s(f, &r->sector);
153 qemu_put_be32s(f, &r->sector_count);
154 qemu_put_be32s(f, &r->buflen);
155 if (r->buflen) {
156 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
158 } else if (!req->retry) {
159 uint32_t len = r->iov.iov_len;
160 qemu_put_be32s(f, &len);
161 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
162 }
163 }
164 }
165
166 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
167 {
168 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
169
170 qemu_get_be64s(f, &r->sector);
171 qemu_get_be32s(f, &r->sector_count);
172 qemu_get_be32s(f, &r->buflen);
173 if (r->buflen) {
174 scsi_init_iovec(r, r->buflen);
175 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
177 } else if (!r->req.retry) {
178 uint32_t len;
179 qemu_get_be32s(f, &len);
180 r->iov.iov_len = len;
181 assert(r->iov.iov_len <= r->buflen);
182 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
183 }
184 }
185
186 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
187 }
188
189 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
190 {
191 if (r->req.io_canceled) {
192 scsi_req_cancel_complete(&r->req);
193 return true;
194 }
195
196 if (ret < 0 || (r->status && *r->status)) {
197 return scsi_handle_rw_error(r, -ret, acct_failed);
198 }
199
200 return false;
201 }
202
203 static void scsi_aio_complete(void *opaque, int ret)
204 {
205 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
207
208 assert(r->req.aiocb != NULL);
209 r->req.aiocb = NULL;
210 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
211 if (scsi_disk_req_check_error(r, ret, true)) {
212 goto done;
213 }
214
215 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
216 scsi_req_complete(&r->req, GOOD);
217
218 done:
219 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
220 scsi_req_unref(&r->req);
221 }
222
223 static bool scsi_is_cmd_fua(SCSICommand *cmd)
224 {
225 switch (cmd->buf[0]) {
226 case READ_10:
227 case READ_12:
228 case READ_16:
229 case WRITE_10:
230 case WRITE_12:
231 case WRITE_16:
232 return (cmd->buf[1] & 8) != 0;
233
234 case VERIFY_10:
235 case VERIFY_12:
236 case VERIFY_16:
237 case WRITE_VERIFY_10:
238 case WRITE_VERIFY_12:
239 case WRITE_VERIFY_16:
240 return true;
241
242 case READ_6:
243 case WRITE_6:
244 default:
245 return false;
246 }
247 }
248
249 static void scsi_write_do_fua(SCSIDiskReq *r)
250 {
251 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
252
253 assert(r->req.aiocb == NULL);
254 assert(!r->req.io_canceled);
255
256 if (r->need_fua_emulation) {
257 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
258 BLOCK_ACCT_FLUSH);
259 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
260 return;
261 }
262
263 scsi_req_complete(&r->req, GOOD);
264 scsi_req_unref(&r->req);
265 }
266
267 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
268 {
269 assert(r->req.aiocb == NULL);
270 if (scsi_disk_req_check_error(r, ret, false)) {
271 goto done;
272 }
273
274 r->sector += r->sector_count;
275 r->sector_count = 0;
276 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
277 scsi_write_do_fua(r);
278 return;
279 } else {
280 scsi_req_complete(&r->req, GOOD);
281 }
282
283 done:
284 scsi_req_unref(&r->req);
285 }
286
287 static void scsi_dma_complete(void *opaque, int ret)
288 {
289 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
290 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
291
292 assert(r->req.aiocb != NULL);
293 r->req.aiocb = NULL;
294
295 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
296 if (ret < 0) {
297 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
298 } else {
299 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
300 }
301 scsi_dma_complete_noio(r, ret);
302 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
303 }
304
305 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
306 {
307 uint32_t n;
308
309 assert(r->req.aiocb == NULL);
310 if (scsi_disk_req_check_error(r, ret, false)) {
311 goto done;
312 }
313
314 n = r->qiov.size / 512;
315 r->sector += n;
316 r->sector_count -= n;
317 scsi_req_data(&r->req, r->qiov.size);
318
319 done:
320 scsi_req_unref(&r->req);
321 }
322
323 static void scsi_read_complete(void *opaque, int ret)
324 {
325 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
326 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
327
328 assert(r->req.aiocb != NULL);
329 r->req.aiocb = NULL;
330
331 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
332 if (ret < 0) {
333 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
334 } else {
335 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
336 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
337 }
338 scsi_read_complete_noio(r, ret);
339 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
340 }
341
342 /* Actually issue a read to the block device. */
343 static void scsi_do_read(SCSIDiskReq *r, int ret)
344 {
345 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
346 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
347
348 assert (r->req.aiocb == NULL);
349 if (scsi_disk_req_check_error(r, ret, false)) {
350 goto done;
351 }
352
353 /* The request is used as the AIO opaque value, so add a ref. */
354 scsi_req_ref(&r->req);
355
356 if (r->req.sg) {
357 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
358 r->req.resid -= r->req.sg->size;
359 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
360 r->req.sg, r->sector << BDRV_SECTOR_BITS,
361 BDRV_SECTOR_SIZE,
362 sdc->dma_readv, r, scsi_dma_complete, r,
363 DMA_DIRECTION_FROM_DEVICE);
364 } else {
365 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
366 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
367 r->qiov.size, BLOCK_ACCT_READ);
368 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
369 scsi_read_complete, r, r);
370 }
371
372 done:
373 scsi_req_unref(&r->req);
374 }
375
376 static void scsi_do_read_cb(void *opaque, int ret)
377 {
378 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
379 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
380
381 assert (r->req.aiocb != NULL);
382 r->req.aiocb = NULL;
383
384 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
385 if (ret < 0) {
386 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
387 } else {
388 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
389 }
390 scsi_do_read(opaque, ret);
391 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
392 }
393
394 /* Read more data from scsi device into buffer. */
395 static void scsi_read_data(SCSIRequest *req)
396 {
397 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
398 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
399 bool first;
400
401 trace_scsi_disk_read_data_count(r->sector_count);
402 if (r->sector_count == 0) {
403 /* This also clears the sense buffer for REQUEST SENSE. */
404 scsi_req_complete(&r->req, GOOD);
405 return;
406 }
407
408 /* No data transfer may already be in progress */
409 assert(r->req.aiocb == NULL);
410
411 /* The request is used as the AIO opaque value, so add a ref. */
412 scsi_req_ref(&r->req);
413 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
414 trace_scsi_disk_read_data_invalid();
415 scsi_read_complete_noio(r, -EINVAL);
416 return;
417 }
418
419 if (!blk_is_available(req->dev->conf.blk)) {
420 scsi_read_complete_noio(r, -ENOMEDIUM);
421 return;
422 }
423
424 first = !r->started;
425 r->started = true;
426 if (first && r->need_fua_emulation) {
427 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
428 BLOCK_ACCT_FLUSH);
429 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
430 } else {
431 scsi_do_read(r, 0);
432 }
433 }
434
435 /*
436 * scsi_handle_rw_error has two return values. False means that the error
437 * must be ignored, true means that the error has been processed and the
438 * caller should not do anything else for this request. Note that
439 * scsi_handle_rw_error always manages its reference counts, independent
440 * of the return value.
441 */
442 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
443 {
444 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
445 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
446 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
447 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
448 is_read, error);
449
450 if (action == BLOCK_ERROR_ACTION_REPORT) {
451 if (acct_failed) {
452 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
453 }
454 switch (error) {
455 case 0:
456 /* A passthrough command has run and has produced sense data; check
457 * whether the error has to be handled by the guest or should rather
458 * pause the host.
459 */
460 assert(r->status && *r->status);
461 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
462 /* These errors are handled by guest. */
463 sdc->update_sense(&r->req);
464 scsi_req_complete(&r->req, *r->status);
465 return true;
466 }
467 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
468 break;
469 case ENOMEDIUM:
470 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
471 break;
472 case ENOMEM:
473 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
474 break;
475 case EINVAL:
476 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
477 break;
478 case ENOSPC:
479 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
480 break;
481 default:
482 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
483 break;
484 }
485 }
486
487 blk_error_action(s->qdev.conf.blk, action, is_read, error);
488 if (action == BLOCK_ERROR_ACTION_IGNORE) {
489 scsi_req_complete(&r->req, 0);
490 return true;
491 }
492
493 if (action == BLOCK_ERROR_ACTION_STOP) {
494 scsi_req_retry(&r->req);
495 }
496 return true;
497 }
498
499 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
500 {
501 uint32_t n;
502
503 assert (r->req.aiocb == NULL);
504 if (scsi_disk_req_check_error(r, ret, false)) {
505 goto done;
506 }
507
508 n = r->qiov.size / 512;
509 r->sector += n;
510 r->sector_count -= n;
511 if (r->sector_count == 0) {
512 scsi_write_do_fua(r);
513 return;
514 } else {
515 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
516 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
517 scsi_req_data(&r->req, r->qiov.size);
518 }
519
520 done:
521 scsi_req_unref(&r->req);
522 }
523
524 static void scsi_write_complete(void * opaque, int ret)
525 {
526 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
527 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
528
529 assert (r->req.aiocb != NULL);
530 r->req.aiocb = NULL;
531
532 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
533 if (ret < 0) {
534 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
535 } else {
536 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
537 }
538 scsi_write_complete_noio(r, ret);
539 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
540 }
541
542 static void scsi_write_data(SCSIRequest *req)
543 {
544 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
545 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
546 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
547
548 /* No data transfer may already be in progress */
549 assert(r->req.aiocb == NULL);
550
551 /* The request is used as the AIO opaque value, so add a ref. */
552 scsi_req_ref(&r->req);
553 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
554 trace_scsi_disk_write_data_invalid();
555 scsi_write_complete_noio(r, -EINVAL);
556 return;
557 }
558
559 if (!r->req.sg && !r->qiov.size) {
560 /* Called for the first time. Ask the driver to send us more data. */
561 r->started = true;
562 scsi_write_complete_noio(r, 0);
563 return;
564 }
565 if (!blk_is_available(req->dev->conf.blk)) {
566 scsi_write_complete_noio(r, -ENOMEDIUM);
567 return;
568 }
569
570 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
571 r->req.cmd.buf[0] == VERIFY_16) {
572 if (r->req.sg) {
573 scsi_dma_complete_noio(r, 0);
574 } else {
575 scsi_write_complete_noio(r, 0);
576 }
577 return;
578 }
579
580 if (r->req.sg) {
581 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
582 r->req.resid -= r->req.sg->size;
583 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
584 r->req.sg, r->sector << BDRV_SECTOR_BITS,
585 BDRV_SECTOR_SIZE,
586 sdc->dma_writev, r, scsi_dma_complete, r,
587 DMA_DIRECTION_TO_DEVICE);
588 } else {
589 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
590 r->qiov.size, BLOCK_ACCT_WRITE);
591 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
592 scsi_write_complete, r, r);
593 }
594 }
595
596 /* Return a pointer to the data buffer. */
597 static uint8_t *scsi_get_buf(SCSIRequest *req)
598 {
599 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
600
601 return (uint8_t *)r->iov.iov_base;
602 }
603
604 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
605 {
606 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
607 uint8_t page_code = req->cmd.buf[2];
608 int start, buflen = 0;
609
610 outbuf[buflen++] = s->qdev.type & 0x1f;
611 outbuf[buflen++] = page_code;
612 outbuf[buflen++] = 0x00;
613 outbuf[buflen++] = 0x00;
614 start = buflen;
615
616 switch (page_code) {
617 case 0x00: /* Supported page codes, mandatory */
618 {
619 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
620 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
621 if (s->serial) {
622 outbuf[buflen++] = 0x80; /* unit serial number */
623 }
624 outbuf[buflen++] = 0x83; /* device identification */
625 if (s->qdev.type == TYPE_DISK) {
626 outbuf[buflen++] = 0xb0; /* block limits */
627 outbuf[buflen++] = 0xb1; /* block device characteristics */
628 outbuf[buflen++] = 0xb2; /* thin provisioning */
629 }
630 break;
631 }
632 case 0x80: /* Device serial number, optional */
633 {
634 int l;
635
636 if (!s->serial) {
637 trace_scsi_disk_emulate_vpd_page_80_not_supported();
638 return -1;
639 }
640
641 l = strlen(s->serial);
642 if (l > 36) {
643 l = 36;
644 }
645
646 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
647 memcpy(outbuf + buflen, s->serial, l);
648 buflen += l;
649 break;
650 }
651
652 case 0x83: /* Device identification page, mandatory */
653 {
654 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
655
656 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
657
658 if (id_len) {
659 outbuf[buflen++] = 0x2; /* ASCII */
660 outbuf[buflen++] = 0; /* not officially assigned */
661 outbuf[buflen++] = 0; /* reserved */
662 outbuf[buflen++] = id_len; /* length of data following */
663 memcpy(outbuf + buflen, s->device_id, id_len);
664 buflen += id_len;
665 }
666
667 if (s->qdev.wwn) {
668 outbuf[buflen++] = 0x1; /* Binary */
669 outbuf[buflen++] = 0x3; /* NAA */
670 outbuf[buflen++] = 0; /* reserved */
671 outbuf[buflen++] = 8;
672 stq_be_p(&outbuf[buflen], s->qdev.wwn);
673 buflen += 8;
674 }
675
676 if (s->qdev.port_wwn) {
677 outbuf[buflen++] = 0x61; /* SAS / Binary */
678 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
679 outbuf[buflen++] = 0; /* reserved */
680 outbuf[buflen++] = 8;
681 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
682 buflen += 8;
683 }
684
685 if (s->port_index) {
686 outbuf[buflen++] = 0x61; /* SAS / Binary */
687
688 /* PIV/Target port/relative target port */
689 outbuf[buflen++] = 0x94;
690
691 outbuf[buflen++] = 0; /* reserved */
692 outbuf[buflen++] = 4;
693 stw_be_p(&outbuf[buflen + 2], s->port_index);
694 buflen += 4;
695 }
696 break;
697 }
698 case 0xb0: /* block limits */
699 {
700 SCSIBlockLimits bl = {};
701
702 if (s->qdev.type == TYPE_ROM) {
703 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
704 return -1;
705 }
706 bl.wsnz = 1;
707 bl.unmap_sectors =
708 s->qdev.conf.discard_granularity / s->qdev.blocksize;
709 bl.min_io_size =
710 s->qdev.conf.min_io_size / s->qdev.blocksize;
711 bl.opt_io_size =
712 s->qdev.conf.opt_io_size / s->qdev.blocksize;
713 bl.max_unmap_sectors =
714 s->max_unmap_size / s->qdev.blocksize;
715 bl.max_io_sectors =
716 s->max_io_size / s->qdev.blocksize;
717 /* 255 descriptors fit in 4 KiB with an 8-byte header */
718 bl.max_unmap_descr = 255;
719
720 if (s->qdev.type == TYPE_DISK) {
721 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
722 int max_io_sectors_blk =
723 max_transfer_blk / s->qdev.blocksize;
724
725 bl.max_io_sectors =
726 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
727 }
728 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
729 break;
730 }
731 case 0xb1: /* block device characteristics */
732 {
733 buflen = 0x40;
734 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
735 outbuf[5] = s->rotation_rate & 0xff;
736 outbuf[6] = 0; /* PRODUCT TYPE */
737 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
738 outbuf[8] = 0; /* VBULS */
739 break;
740 }
741 case 0xb2: /* thin provisioning */
742 {
743 buflen = 8;
744 outbuf[4] = 0;
745 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
746 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
747 outbuf[7] = 0;
748 break;
749 }
750 default:
751 return -1;
752 }
753 /* done with EVPD */
754 assert(buflen - start <= 255);
755 outbuf[start - 1] = buflen - start;
756 return buflen;
757 }
758
759 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
760 {
761 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
762 int buflen = 0;
763
764 if (req->cmd.buf[1] & 0x1) {
765 /* Vital product data */
766 return scsi_disk_emulate_vpd_page(req, outbuf);
767 }
768
769 /* Standard INQUIRY data */
770 if (req->cmd.buf[2] != 0) {
771 return -1;
772 }
773
774 /* PAGE CODE == 0 */
775 buflen = req->cmd.xfer;
776 if (buflen > SCSI_MAX_INQUIRY_LEN) {
777 buflen = SCSI_MAX_INQUIRY_LEN;
778 }
779
780 outbuf[0] = s->qdev.type & 0x1f;
781 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
782
783 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
784 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
785
786 memset(&outbuf[32], 0, 4);
787 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
788 /*
789 * We claim conformance to SPC-3, which is required for guests
790 * to ask for modern features like READ CAPACITY(16) or the
791 * block characteristics VPD page by default. Not all of SPC-3
792 * is actually implemented, but we're good enough.
793 */
794 outbuf[2] = s->qdev.default_scsi_version;
795 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
796
797 if (buflen > 36) {
798 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
799 } else {
800 /* If the allocation length of CDB is too small,
801 the additional length is not adjusted */
802 outbuf[4] = 36 - 5;
803 }
804
805 /* Sync data transfer and TCQ. */
806 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
807 return buflen;
808 }
809
810 static inline bool media_is_dvd(SCSIDiskState *s)
811 {
812 uint64_t nb_sectors;
813 if (s->qdev.type != TYPE_ROM) {
814 return false;
815 }
816 if (!blk_is_available(s->qdev.conf.blk)) {
817 return false;
818 }
819 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
820 return nb_sectors > CD_MAX_SECTORS;
821 }
822
823 static inline bool media_is_cd(SCSIDiskState *s)
824 {
825 uint64_t nb_sectors;
826 if (s->qdev.type != TYPE_ROM) {
827 return false;
828 }
829 if (!blk_is_available(s->qdev.conf.blk)) {
830 return false;
831 }
832 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
833 return nb_sectors <= CD_MAX_SECTORS;
834 }
835
836 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
837 uint8_t *outbuf)
838 {
839 uint8_t type = r->req.cmd.buf[1] & 7;
840
841 if (s->qdev.type != TYPE_ROM) {
842 return -1;
843 }
844
845 /* Types 1/2 are only defined for Blu-Ray. */
846 if (type != 0) {
847 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
848 return -1;
849 }
850
851 memset(outbuf, 0, 34);
852 outbuf[1] = 32;
853 outbuf[2] = 0xe; /* last session complete, disc finalized */
854 outbuf[3] = 1; /* first track on disc */
855 outbuf[4] = 1; /* # of sessions */
856 outbuf[5] = 1; /* first track of last session */
857 outbuf[6] = 1; /* last track of last session */
858 outbuf[7] = 0x20; /* unrestricted use */
859 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
860 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
861 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
862 /* 24-31: disc bar code */
863 /* 32: disc application code */
864 /* 33: number of OPC tables */
865
866 return 34;
867 }
868
869 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
870 uint8_t *outbuf)
871 {
872 static const int rds_caps_size[5] = {
873 [0] = 2048 + 4,
874 [1] = 4 + 4,
875 [3] = 188 + 4,
876 [4] = 2048 + 4,
877 };
878
879 uint8_t media = r->req.cmd.buf[1];
880 uint8_t layer = r->req.cmd.buf[6];
881 uint8_t format = r->req.cmd.buf[7];
882 int size = -1;
883
884 if (s->qdev.type != TYPE_ROM) {
885 return -1;
886 }
887 if (media != 0) {
888 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
889 return -1;
890 }
891
892 if (format != 0xff) {
893 if (!blk_is_available(s->qdev.conf.blk)) {
894 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
895 return -1;
896 }
897 if (media_is_cd(s)) {
898 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
899 return -1;
900 }
901 if (format >= ARRAY_SIZE(rds_caps_size)) {
902 return -1;
903 }
904 size = rds_caps_size[format];
905 memset(outbuf, 0, size);
906 }
907
908 switch (format) {
909 case 0x00: {
910 /* Physical format information */
911 uint64_t nb_sectors;
912 if (layer != 0) {
913 goto fail;
914 }
915 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
916
917 outbuf[4] = 1; /* DVD-ROM, part version 1 */
918 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
919 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
920 outbuf[7] = 0; /* default densities */
921
922 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
923 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
924 break;
925 }
926
927 case 0x01: /* DVD copyright information, all zeros */
928 break;
929
930 case 0x03: /* BCA information - invalid field for no BCA info */
931 return -1;
932
933 case 0x04: /* DVD disc manufacturing information, all zeros */
934 break;
935
936 case 0xff: { /* List capabilities */
937 int i;
938 size = 4;
939 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
940 if (!rds_caps_size[i]) {
941 continue;
942 }
943 outbuf[size] = i;
944 outbuf[size + 1] = 0x40; /* Not writable, readable */
945 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
946 size += 4;
947 }
948 break;
949 }
950
951 default:
952 return -1;
953 }
954
955 /* Size of buffer, not including 2 byte size field */
956 stw_be_p(outbuf, size - 2);
957 return size;
958
959 fail:
960 return -1;
961 }
962
963 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
964 {
965 uint8_t event_code, media_status;
966
967 media_status = 0;
968 if (s->tray_open) {
969 media_status = MS_TRAY_OPEN;
970 } else if (blk_is_inserted(s->qdev.conf.blk)) {
971 media_status = MS_MEDIA_PRESENT;
972 }
973
974 /* Event notification descriptor */
975 event_code = MEC_NO_CHANGE;
976 if (media_status != MS_TRAY_OPEN) {
977 if (s->media_event) {
978 event_code = MEC_NEW_MEDIA;
979 s->media_event = false;
980 } else if (s->eject_request) {
981 event_code = MEC_EJECT_REQUESTED;
982 s->eject_request = false;
983 }
984 }
985
986 outbuf[0] = event_code;
987 outbuf[1] = media_status;
988
989 /* These fields are reserved, just clear them. */
990 outbuf[2] = 0;
991 outbuf[3] = 0;
992 return 4;
993 }
994
995 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
996 uint8_t *outbuf)
997 {
998 int size;
999 uint8_t *buf = r->req.cmd.buf;
1000 uint8_t notification_class_request = buf[4];
1001 if (s->qdev.type != TYPE_ROM) {
1002 return -1;
1003 }
1004 if ((buf[1] & 1) == 0) {
1005 /* asynchronous */
1006 return -1;
1007 }
1008
1009 size = 4;
1010 outbuf[0] = outbuf[1] = 0;
1011 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1012 if (notification_class_request & (1 << GESN_MEDIA)) {
1013 outbuf[2] = GESN_MEDIA;
1014 size += scsi_event_status_media(s, &outbuf[size]);
1015 } else {
1016 outbuf[2] = 0x80;
1017 }
1018 stw_be_p(outbuf, size - 4);
1019 return size;
1020 }
1021
1022 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1023 {
1024 int current;
1025
1026 if (s->qdev.type != TYPE_ROM) {
1027 return -1;
1028 }
1029
1030 if (media_is_dvd(s)) {
1031 current = MMC_PROFILE_DVD_ROM;
1032 } else if (media_is_cd(s)) {
1033 current = MMC_PROFILE_CD_ROM;
1034 } else {
1035 current = MMC_PROFILE_NONE;
1036 }
1037
1038 memset(outbuf, 0, 40);
1039 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1040 stw_be_p(&outbuf[6], current);
1041 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1042 outbuf[10] = 0x03; /* persistent, current */
1043 outbuf[11] = 8; /* two profiles */
1044 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1045 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1046 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1047 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1048 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1049 stw_be_p(&outbuf[20], 1);
1050 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1051 outbuf[23] = 8;
1052 stl_be_p(&outbuf[24], 1); /* SCSI */
1053 outbuf[28] = 1; /* DBE = 1, mandatory */
1054 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1055 stw_be_p(&outbuf[32], 3);
1056 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1057 outbuf[35] = 4;
1058 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1059 /* TODO: Random readable, CD read, DVD read, drive serial number,
1060 power management */
1061 return 40;
1062 }
1063
1064 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1065 {
1066 if (s->qdev.type != TYPE_ROM) {
1067 return -1;
1068 }
1069 memset(outbuf, 0, 8);
1070 outbuf[5] = 1; /* CD-ROM */
1071 return 8;
1072 }
1073
1074 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1075 int page_control)
1076 {
1077 static const int mode_sense_valid[0x3f] = {
1078 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1079 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1080 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1081 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1082 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1083 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1084 };
1085
1086 uint8_t *p = *p_outbuf + 2;
1087 int length;
1088
1089 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1090 return -1;
1091 }
1092
1093 /*
1094 * If Changeable Values are requested, a mask denoting those mode parameters
1095 * that are changeable shall be returned. As we currently don't support
1096 * parameter changes via MODE_SELECT all bits are returned set to zero.
1097 * The buffer was already menset to zero by the caller of this function.
1098 *
1099 * The offsets here are off by two compared to the descriptions in the
1100 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1101 * but it is done so that offsets are consistent within our implementation
1102 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1103 * 2-byte and 4-byte headers.
1104 */
1105 switch (page) {
1106 case MODE_PAGE_HD_GEOMETRY:
1107 length = 0x16;
1108 if (page_control == 1) { /* Changeable Values */
1109 break;
1110 }
1111 /* if a geometry hint is available, use it */
1112 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1113 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1114 p[2] = s->qdev.conf.cyls & 0xff;
1115 p[3] = s->qdev.conf.heads & 0xff;
1116 /* Write precomp start cylinder, disabled */
1117 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1118 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1119 p[6] = s->qdev.conf.cyls & 0xff;
1120 /* Reduced current start cylinder, disabled */
1121 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1122 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1123 p[9] = s->qdev.conf.cyls & 0xff;
1124 /* Device step rate [ns], 200ns */
1125 p[10] = 0;
1126 p[11] = 200;
1127 /* Landing zone cylinder */
1128 p[12] = 0xff;
1129 p[13] = 0xff;
1130 p[14] = 0xff;
1131 /* Medium rotation rate [rpm], 5400 rpm */
1132 p[18] = (5400 >> 8) & 0xff;
1133 p[19] = 5400 & 0xff;
1134 break;
1135
1136 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1137 length = 0x1e;
1138 if (page_control == 1) { /* Changeable Values */
1139 break;
1140 }
1141 /* Transfer rate [kbit/s], 5Mbit/s */
1142 p[0] = 5000 >> 8;
1143 p[1] = 5000 & 0xff;
1144 /* if a geometry hint is available, use it */
1145 p[2] = s->qdev.conf.heads & 0xff;
1146 p[3] = s->qdev.conf.secs & 0xff;
1147 p[4] = s->qdev.blocksize >> 8;
1148 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1149 p[7] = s->qdev.conf.cyls & 0xff;
1150 /* Write precomp start cylinder, disabled */
1151 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1152 p[9] = s->qdev.conf.cyls & 0xff;
1153 /* Reduced current start cylinder, disabled */
1154 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1155 p[11] = s->qdev.conf.cyls & 0xff;
1156 /* Device step rate [100us], 100us */
1157 p[12] = 0;
1158 p[13] = 1;
1159 /* Device step pulse width [us], 1us */
1160 p[14] = 1;
1161 /* Device head settle delay [100us], 100us */
1162 p[15] = 0;
1163 p[16] = 1;
1164 /* Motor on delay [0.1s], 0.1s */
1165 p[17] = 1;
1166 /* Motor off delay [0.1s], 0.1s */
1167 p[18] = 1;
1168 /* Medium rotation rate [rpm], 5400 rpm */
1169 p[26] = (5400 >> 8) & 0xff;
1170 p[27] = 5400 & 0xff;
1171 break;
1172
1173 case MODE_PAGE_CACHING:
1174 length = 0x12;
1175 if (page_control == 1 || /* Changeable Values */
1176 blk_enable_write_cache(s->qdev.conf.blk)) {
1177 p[0] = 4; /* WCE */
1178 }
1179 break;
1180
1181 case MODE_PAGE_R_W_ERROR:
1182 length = 10;
1183 if (page_control == 1) { /* Changeable Values */
1184 break;
1185 }
1186 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1187 if (s->qdev.type == TYPE_ROM) {
1188 p[1] = 0x20; /* Read Retry Count */
1189 }
1190 break;
1191
1192 case MODE_PAGE_AUDIO_CTL:
1193 length = 14;
1194 break;
1195
1196 case MODE_PAGE_CAPABILITIES:
1197 length = 0x14;
1198 if (page_control == 1) { /* Changeable Values */
1199 break;
1200 }
1201
1202 p[0] = 0x3b; /* CD-R & CD-RW read */
1203 p[1] = 0; /* Writing not supported */
1204 p[2] = 0x7f; /* Audio, composite, digital out,
1205 mode 2 form 1&2, multi session */
1206 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1207 RW corrected, C2 errors, ISRC,
1208 UPC, Bar code */
1209 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1210 /* Locking supported, jumper present, eject, tray */
1211 p[5] = 0; /* no volume & mute control, no
1212 changer */
1213 p[6] = (50 * 176) >> 8; /* 50x read speed */
1214 p[7] = (50 * 176) & 0xff;
1215 p[8] = 2 >> 8; /* Two volume levels */
1216 p[9] = 2 & 0xff;
1217 p[10] = 2048 >> 8; /* 2M buffer */
1218 p[11] = 2048 & 0xff;
1219 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1220 p[13] = (16 * 176) & 0xff;
1221 p[16] = (16 * 176) >> 8; /* 16x write speed */
1222 p[17] = (16 * 176) & 0xff;
1223 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1224 p[19] = (16 * 176) & 0xff;
1225 break;
1226
1227 default:
1228 return -1;
1229 }
1230
1231 assert(length < 256);
1232 (*p_outbuf)[0] = page;
1233 (*p_outbuf)[1] = length;
1234 *p_outbuf += length + 2;
1235 return length + 2;
1236 }
1237
1238 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1239 {
1240 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1241 uint64_t nb_sectors;
1242 bool dbd;
1243 int page, buflen, ret, page_control;
1244 uint8_t *p;
1245 uint8_t dev_specific_param;
1246
1247 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1248 page = r->req.cmd.buf[2] & 0x3f;
1249 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1250
1251 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1252 10, page, r->req.cmd.xfer, page_control);
1253 memset(outbuf, 0, r->req.cmd.xfer);
1254 p = outbuf;
1255
1256 if (s->qdev.type == TYPE_DISK) {
1257 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1258 if (blk_is_read_only(s->qdev.conf.blk)) {
1259 dev_specific_param |= 0x80; /* Readonly. */
1260 }
1261 } else {
1262 /* MMC prescribes that CD/DVD drives have no block descriptors,
1263 * and defines no device-specific parameter. */
1264 dev_specific_param = 0x00;
1265 dbd = true;
1266 }
1267
1268 if (r->req.cmd.buf[0] == MODE_SENSE) {
1269 p[1] = 0; /* Default media type. */
1270 p[2] = dev_specific_param;
1271 p[3] = 0; /* Block descriptor length. */
1272 p += 4;
1273 } else { /* MODE_SENSE_10 */
1274 p[2] = 0; /* Default media type. */
1275 p[3] = dev_specific_param;
1276 p[6] = p[7] = 0; /* Block descriptor length. */
1277 p += 8;
1278 }
1279
1280 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1281 if (!dbd && nb_sectors) {
1282 if (r->req.cmd.buf[0] == MODE_SENSE) {
1283 outbuf[3] = 8; /* Block descriptor length */
1284 } else { /* MODE_SENSE_10 */
1285 outbuf[7] = 8; /* Block descriptor length */
1286 }
1287 nb_sectors /= (s->qdev.blocksize / 512);
1288 if (nb_sectors > 0xffffff) {
1289 nb_sectors = 0;
1290 }
1291 p[0] = 0; /* media density code */
1292 p[1] = (nb_sectors >> 16) & 0xff;
1293 p[2] = (nb_sectors >> 8) & 0xff;
1294 p[3] = nb_sectors & 0xff;
1295 p[4] = 0; /* reserved */
1296 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1297 p[6] = s->qdev.blocksize >> 8;
1298 p[7] = 0;
1299 p += 8;
1300 }
1301
1302 if (page_control == 3) {
1303 /* Saved Values */
1304 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1305 return -1;
1306 }
1307
1308 if (page == 0x3f) {
1309 for (page = 0; page <= 0x3e; page++) {
1310 mode_sense_page(s, page, &p, page_control);
1311 }
1312 } else {
1313 ret = mode_sense_page(s, page, &p, page_control);
1314 if (ret == -1) {
1315 return -1;
1316 }
1317 }
1318
1319 buflen = p - outbuf;
1320 /*
1321 * The mode data length field specifies the length in bytes of the
1322 * following data that is available to be transferred. The mode data
1323 * length does not include itself.
1324 */
1325 if (r->req.cmd.buf[0] == MODE_SENSE) {
1326 outbuf[0] = buflen - 1;
1327 } else { /* MODE_SENSE_10 */
1328 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1329 outbuf[1] = (buflen - 2) & 0xff;
1330 }
1331 return buflen;
1332 }
1333
1334 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1335 {
1336 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1337 int start_track, format, msf, toclen;
1338 uint64_t nb_sectors;
1339
1340 msf = req->cmd.buf[1] & 2;
1341 format = req->cmd.buf[2] & 0xf;
1342 start_track = req->cmd.buf[6];
1343 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1344 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1345 nb_sectors /= s->qdev.blocksize / 512;
1346 switch (format) {
1347 case 0:
1348 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1349 break;
1350 case 1:
1351 /* multi session : only a single session defined */
1352 toclen = 12;
1353 memset(outbuf, 0, 12);
1354 outbuf[1] = 0x0a;
1355 outbuf[2] = 0x01;
1356 outbuf[3] = 0x01;
1357 break;
1358 case 2:
1359 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1360 break;
1361 default:
1362 return -1;
1363 }
1364 return toclen;
1365 }
1366
1367 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1368 {
1369 SCSIRequest *req = &r->req;
1370 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1371 bool start = req->cmd.buf[4] & 1;
1372 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1373 int pwrcnd = req->cmd.buf[4] & 0xf0;
1374
1375 if (pwrcnd) {
1376 /* eject/load only happens for power condition == 0 */
1377 return 0;
1378 }
1379
1380 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1381 if (!start && !s->tray_open && s->tray_locked) {
1382 scsi_check_condition(r,
1383 blk_is_inserted(s->qdev.conf.blk)
1384 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1385 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1386 return -1;
1387 }
1388
1389 if (s->tray_open != !start) {
1390 blk_eject(s->qdev.conf.blk, !start);
1391 s->tray_open = !start;
1392 }
1393 }
1394 return 0;
1395 }
1396
1397 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1398 {
1399 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1400 int buflen = r->iov.iov_len;
1401
1402 if (buflen) {
1403 trace_scsi_disk_emulate_read_data(buflen);
1404 r->iov.iov_len = 0;
1405 r->started = true;
1406 scsi_req_data(&r->req, buflen);
1407 return;
1408 }
1409
1410 /* This also clears the sense buffer for REQUEST SENSE. */
1411 scsi_req_complete(&r->req, GOOD);
1412 }
1413
1414 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1415 uint8_t *inbuf, int inlen)
1416 {
1417 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1418 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1419 uint8_t *p;
1420 int len, expected_len, changeable_len, i;
1421
1422 /* The input buffer does not include the page header, so it is
1423 * off by 2 bytes.
1424 */
1425 expected_len = inlen + 2;
1426 if (expected_len > SCSI_MAX_MODE_LEN) {
1427 return -1;
1428 }
1429
1430 p = mode_current;
1431 memset(mode_current, 0, inlen + 2);
1432 len = mode_sense_page(s, page, &p, 0);
1433 if (len < 0 || len != expected_len) {
1434 return -1;
1435 }
1436
1437 p = mode_changeable;
1438 memset(mode_changeable, 0, inlen + 2);
1439 changeable_len = mode_sense_page(s, page, &p, 1);
1440 assert(changeable_len == len);
1441
1442 /* Check that unchangeable bits are the same as what MODE SENSE
1443 * would return.
1444 */
1445 for (i = 2; i < len; i++) {
1446 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1447 return -1;
1448 }
1449 }
1450 return 0;
1451 }
1452
1453 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1454 {
1455 switch (page) {
1456 case MODE_PAGE_CACHING:
1457 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1458 break;
1459
1460 default:
1461 break;
1462 }
1463 }
1464
1465 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1466 {
1467 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1468
1469 while (len > 0) {
1470 int page, subpage, page_len;
1471
1472 /* Parse both possible formats for the mode page headers. */
1473 page = p[0] & 0x3f;
1474 if (p[0] & 0x40) {
1475 if (len < 4) {
1476 goto invalid_param_len;
1477 }
1478 subpage = p[1];
1479 page_len = lduw_be_p(&p[2]);
1480 p += 4;
1481 len -= 4;
1482 } else {
1483 if (len < 2) {
1484 goto invalid_param_len;
1485 }
1486 subpage = 0;
1487 page_len = p[1];
1488 p += 2;
1489 len -= 2;
1490 }
1491
1492 if (subpage) {
1493 goto invalid_param;
1494 }
1495 if (page_len > len) {
1496 goto invalid_param_len;
1497 }
1498
1499 if (!change) {
1500 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1501 goto invalid_param;
1502 }
1503 } else {
1504 scsi_disk_apply_mode_select(s, page, p);
1505 }
1506
1507 p += page_len;
1508 len -= page_len;
1509 }
1510 return 0;
1511
1512 invalid_param:
1513 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1514 return -1;
1515
1516 invalid_param_len:
1517 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1518 return -1;
1519 }
1520
1521 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1522 {
1523 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1524 uint8_t *p = inbuf;
1525 int cmd = r->req.cmd.buf[0];
1526 int len = r->req.cmd.xfer;
1527 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1528 int bd_len;
1529 int pass;
1530
1531 /* We only support PF=1, SP=0. */
1532 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1533 goto invalid_field;
1534 }
1535
1536 if (len < hdr_len) {
1537 goto invalid_param_len;
1538 }
1539
1540 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1541 len -= hdr_len;
1542 p += hdr_len;
1543 if (len < bd_len) {
1544 goto invalid_param_len;
1545 }
1546 if (bd_len != 0 && bd_len != 8) {
1547 goto invalid_param;
1548 }
1549
1550 len -= bd_len;
1551 p += bd_len;
1552
1553 /* Ensure no change is made if there is an error! */
1554 for (pass = 0; pass < 2; pass++) {
1555 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1556 assert(pass == 0);
1557 return;
1558 }
1559 }
1560 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1561 /* The request is used as the AIO opaque value, so add a ref. */
1562 scsi_req_ref(&r->req);
1563 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1564 BLOCK_ACCT_FLUSH);
1565 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1566 return;
1567 }
1568
1569 scsi_req_complete(&r->req, GOOD);
1570 return;
1571
1572 invalid_param:
1573 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1574 return;
1575
1576 invalid_param_len:
1577 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1578 return;
1579
1580 invalid_field:
1581 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1582 }
1583
1584 static inline bool check_lba_range(SCSIDiskState *s,
1585 uint64_t sector_num, uint32_t nb_sectors)
1586 {
1587 /*
1588 * The first line tests that no overflow happens when computing the last
1589 * sector. The second line tests that the last accessed sector is in
1590 * range.
1591 *
1592 * Careful, the computations should not underflow for nb_sectors == 0,
1593 * and a 0-block read to the first LBA beyond the end of device is
1594 * valid.
1595 */
1596 return (sector_num <= sector_num + nb_sectors &&
1597 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1598 }
1599
1600 typedef struct UnmapCBData {
1601 SCSIDiskReq *r;
1602 uint8_t *inbuf;
1603 int count;
1604 } UnmapCBData;
1605
1606 static void scsi_unmap_complete(void *opaque, int ret);
1607
1608 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1609 {
1610 SCSIDiskReq *r = data->r;
1611 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1612
1613 assert(r->req.aiocb == NULL);
1614
1615 if (data->count > 0) {
1616 r->sector = ldq_be_p(&data->inbuf[0])
1617 * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1618 r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL)
1619 * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1620 if (!check_lba_range(s, r->sector, r->sector_count)) {
1621 block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1622 BLOCK_ACCT_UNMAP);
1623 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1624 goto done;
1625 }
1626
1627 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1628 r->sector_count * BDRV_SECTOR_SIZE,
1629 BLOCK_ACCT_UNMAP);
1630
1631 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1632 r->sector * BDRV_SECTOR_SIZE,
1633 r->sector_count * BDRV_SECTOR_SIZE,
1634 scsi_unmap_complete, data);
1635 data->count--;
1636 data->inbuf += 16;
1637 return;
1638 }
1639
1640 scsi_req_complete(&r->req, GOOD);
1641
1642 done:
1643 scsi_req_unref(&r->req);
1644 g_free(data);
1645 }
1646
1647 static void scsi_unmap_complete(void *opaque, int ret)
1648 {
1649 UnmapCBData *data = opaque;
1650 SCSIDiskReq *r = data->r;
1651 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1652
1653 assert(r->req.aiocb != NULL);
1654 r->req.aiocb = NULL;
1655
1656 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1657 if (scsi_disk_req_check_error(r, ret, true)) {
1658 scsi_req_unref(&r->req);
1659 g_free(data);
1660 } else {
1661 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1662 scsi_unmap_complete_noio(data, ret);
1663 }
1664 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1665 }
1666
1667 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1668 {
1669 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1670 uint8_t *p = inbuf;
1671 int len = r->req.cmd.xfer;
1672 UnmapCBData *data;
1673
1674 /* Reject ANCHOR=1. */
1675 if (r->req.cmd.buf[1] & 0x1) {
1676 goto invalid_field;
1677 }
1678
1679 if (len < 8) {
1680 goto invalid_param_len;
1681 }
1682 if (len < lduw_be_p(&p[0]) + 2) {
1683 goto invalid_param_len;
1684 }
1685 if (len < lduw_be_p(&p[2]) + 8) {
1686 goto invalid_param_len;
1687 }
1688 if (lduw_be_p(&p[2]) & 15) {
1689 goto invalid_param_len;
1690 }
1691
1692 if (blk_is_read_only(s->qdev.conf.blk)) {
1693 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1694 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1695 return;
1696 }
1697
1698 data = g_new0(UnmapCBData, 1);
1699 data->r = r;
1700 data->inbuf = &p[8];
1701 data->count = lduw_be_p(&p[2]) >> 4;
1702
1703 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1704 scsi_req_ref(&r->req);
1705 scsi_unmap_complete_noio(data, 0);
1706 return;
1707
1708 invalid_param_len:
1709 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1710 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1711 return;
1712
1713 invalid_field:
1714 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1715 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1716 }
1717
1718 typedef struct WriteSameCBData {
1719 SCSIDiskReq *r;
1720 int64_t sector;
1721 int nb_sectors;
1722 QEMUIOVector qiov;
1723 struct iovec iov;
1724 } WriteSameCBData;
1725
1726 static void scsi_write_same_complete(void *opaque, int ret)
1727 {
1728 WriteSameCBData *data = opaque;
1729 SCSIDiskReq *r = data->r;
1730 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1731
1732 assert(r->req.aiocb != NULL);
1733 r->req.aiocb = NULL;
1734 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1735 if (scsi_disk_req_check_error(r, ret, true)) {
1736 goto done;
1737 }
1738
1739 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1740
1741 data->nb_sectors -= data->iov.iov_len / 512;
1742 data->sector += data->iov.iov_len / 512;
1743 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1744 if (data->iov.iov_len) {
1745 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1746 data->iov.iov_len, BLOCK_ACCT_WRITE);
1747 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1748 * where final qiov may need smaller size */
1749 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1750 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1751 data->sector << BDRV_SECTOR_BITS,
1752 &data->qiov, 0,
1753 scsi_write_same_complete, data);
1754 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1755 return;
1756 }
1757
1758 scsi_req_complete(&r->req, GOOD);
1759
1760 done:
1761 scsi_req_unref(&r->req);
1762 qemu_vfree(data->iov.iov_base);
1763 g_free(data);
1764 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1765 }
1766
1767 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1768 {
1769 SCSIRequest *req = &r->req;
1770 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1771 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1772 WriteSameCBData *data;
1773 uint8_t *buf;
1774 int i;
1775
1776 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1777 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1778 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1779 return;
1780 }
1781
1782 if (blk_is_read_only(s->qdev.conf.blk)) {
1783 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1784 return;
1785 }
1786 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1787 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1788 return;
1789 }
1790
1791 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1792 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1793
1794 /* The request is used as the AIO opaque value, so add a ref. */
1795 scsi_req_ref(&r->req);
1796 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1797 nb_sectors * s->qdev.blocksize,
1798 BLOCK_ACCT_WRITE);
1799 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1800 r->req.cmd.lba * s->qdev.blocksize,
1801 nb_sectors * s->qdev.blocksize,
1802 flags, scsi_aio_complete, r);
1803 return;
1804 }
1805
1806 data = g_new0(WriteSameCBData, 1);
1807 data->r = r;
1808 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1809 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1810 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1811 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1812 data->iov.iov_len);
1813 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1814
1815 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1816 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1817 }
1818
1819 scsi_req_ref(&r->req);
1820 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1821 data->iov.iov_len, BLOCK_ACCT_WRITE);
1822 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1823 data->sector << BDRV_SECTOR_BITS,
1824 &data->qiov, 0,
1825 scsi_write_same_complete, data);
1826 }
1827
1828 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1829 {
1830 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1831
1832 if (r->iov.iov_len) {
1833 int buflen = r->iov.iov_len;
1834 trace_scsi_disk_emulate_write_data(buflen);
1835 r->iov.iov_len = 0;
1836 scsi_req_data(&r->req, buflen);
1837 return;
1838 }
1839
1840 switch (req->cmd.buf[0]) {
1841 case MODE_SELECT:
1842 case MODE_SELECT_10:
1843 /* This also clears the sense buffer for REQUEST SENSE. */
1844 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1845 break;
1846
1847 case UNMAP:
1848 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1849 break;
1850
1851 case VERIFY_10:
1852 case VERIFY_12:
1853 case VERIFY_16:
1854 if (r->req.status == -1) {
1855 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1856 }
1857 break;
1858
1859 case WRITE_SAME_10:
1860 case WRITE_SAME_16:
1861 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1862 break;
1863
1864 default:
1865 abort();
1866 }
1867 }
1868
1869 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1870 {
1871 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1872 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1873 uint64_t nb_sectors;
1874 uint8_t *outbuf;
1875 int buflen;
1876
1877 switch (req->cmd.buf[0]) {
1878 case INQUIRY:
1879 case MODE_SENSE:
1880 case MODE_SENSE_10:
1881 case RESERVE:
1882 case RESERVE_10:
1883 case RELEASE:
1884 case RELEASE_10:
1885 case START_STOP:
1886 case ALLOW_MEDIUM_REMOVAL:
1887 case GET_CONFIGURATION:
1888 case GET_EVENT_STATUS_NOTIFICATION:
1889 case MECHANISM_STATUS:
1890 case REQUEST_SENSE:
1891 break;
1892
1893 default:
1894 if (!blk_is_available(s->qdev.conf.blk)) {
1895 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1896 return 0;
1897 }
1898 break;
1899 }
1900
1901 /*
1902 * FIXME: we shouldn't return anything bigger than 4k, but the code
1903 * requires the buffer to be as big as req->cmd.xfer in several
1904 * places. So, do not allow CDBs with a very large ALLOCATION
1905 * LENGTH. The real fix would be to modify scsi_read_data and
1906 * dma_buf_read, so that they return data beyond the buflen
1907 * as all zeros.
1908 */
1909 if (req->cmd.xfer > 65536) {
1910 goto illegal_request;
1911 }
1912 r->buflen = MAX(4096, req->cmd.xfer);
1913
1914 if (!r->iov.iov_base) {
1915 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1916 }
1917
1918 buflen = req->cmd.xfer;
1919 outbuf = r->iov.iov_base;
1920 memset(outbuf, 0, r->buflen);
1921 switch (req->cmd.buf[0]) {
1922 case TEST_UNIT_READY:
1923 assert(blk_is_available(s->qdev.conf.blk));
1924 break;
1925 case INQUIRY:
1926 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1927 if (buflen < 0) {
1928 goto illegal_request;
1929 }
1930 break;
1931 case MODE_SENSE:
1932 case MODE_SENSE_10:
1933 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1934 if (buflen < 0) {
1935 goto illegal_request;
1936 }
1937 break;
1938 case READ_TOC:
1939 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1940 if (buflen < 0) {
1941 goto illegal_request;
1942 }
1943 break;
1944 case RESERVE:
1945 if (req->cmd.buf[1] & 1) {
1946 goto illegal_request;
1947 }
1948 break;
1949 case RESERVE_10:
1950 if (req->cmd.buf[1] & 3) {
1951 goto illegal_request;
1952 }
1953 break;
1954 case RELEASE:
1955 if (req->cmd.buf[1] & 1) {
1956 goto illegal_request;
1957 }
1958 break;
1959 case RELEASE_10:
1960 if (req->cmd.buf[1] & 3) {
1961 goto illegal_request;
1962 }
1963 break;
1964 case START_STOP:
1965 if (scsi_disk_emulate_start_stop(r) < 0) {
1966 return 0;
1967 }
1968 break;
1969 case ALLOW_MEDIUM_REMOVAL:
1970 s->tray_locked = req->cmd.buf[4] & 1;
1971 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1972 break;
1973 case READ_CAPACITY_10:
1974 /* The normal LEN field for this command is zero. */
1975 memset(outbuf, 0, 8);
1976 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1977 if (!nb_sectors) {
1978 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1979 return 0;
1980 }
1981 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1982 goto illegal_request;
1983 }
1984 nb_sectors /= s->qdev.blocksize / 512;
1985 /* Returned value is the address of the last sector. */
1986 nb_sectors--;
1987 /* Remember the new size for read/write sanity checking. */
1988 s->qdev.max_lba = nb_sectors;
1989 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1990 if (nb_sectors > UINT32_MAX) {
1991 nb_sectors = UINT32_MAX;
1992 }
1993 outbuf[0] = (nb_sectors >> 24) & 0xff;
1994 outbuf[1] = (nb_sectors >> 16) & 0xff;
1995 outbuf[2] = (nb_sectors >> 8) & 0xff;
1996 outbuf[3] = nb_sectors & 0xff;
1997 outbuf[4] = 0;
1998 outbuf[5] = 0;
1999 outbuf[6] = s->qdev.blocksize >> 8;
2000 outbuf[7] = 0;
2001 break;
2002 case REQUEST_SENSE:
2003 /* Just return "NO SENSE". */
2004 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2005 (req->cmd.buf[1] & 1) == 0);
2006 if (buflen < 0) {
2007 goto illegal_request;
2008 }
2009 break;
2010 case MECHANISM_STATUS:
2011 buflen = scsi_emulate_mechanism_status(s, outbuf);
2012 if (buflen < 0) {
2013 goto illegal_request;
2014 }
2015 break;
2016 case GET_CONFIGURATION:
2017 buflen = scsi_get_configuration(s, outbuf);
2018 if (buflen < 0) {
2019 goto illegal_request;
2020 }
2021 break;
2022 case GET_EVENT_STATUS_NOTIFICATION:
2023 buflen = scsi_get_event_status_notification(s, r, outbuf);
2024 if (buflen < 0) {
2025 goto illegal_request;
2026 }
2027 break;
2028 case READ_DISC_INFORMATION:
2029 buflen = scsi_read_disc_information(s, r, outbuf);
2030 if (buflen < 0) {
2031 goto illegal_request;
2032 }
2033 break;
2034 case READ_DVD_STRUCTURE:
2035 buflen = scsi_read_dvd_structure(s, r, outbuf);
2036 if (buflen < 0) {
2037 goto illegal_request;
2038 }
2039 break;
2040 case SERVICE_ACTION_IN_16:
2041 /* Service Action In subcommands. */
2042 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2043 trace_scsi_disk_emulate_command_SAI_16();
2044 memset(outbuf, 0, req->cmd.xfer);
2045 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2046 if (!nb_sectors) {
2047 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2048 return 0;
2049 }
2050 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2051 goto illegal_request;
2052 }
2053 nb_sectors /= s->qdev.blocksize / 512;
2054 /* Returned value is the address of the last sector. */
2055 nb_sectors--;
2056 /* Remember the new size for read/write sanity checking. */
2057 s->qdev.max_lba = nb_sectors;
2058 outbuf[0] = (nb_sectors >> 56) & 0xff;
2059 outbuf[1] = (nb_sectors >> 48) & 0xff;
2060 outbuf[2] = (nb_sectors >> 40) & 0xff;
2061 outbuf[3] = (nb_sectors >> 32) & 0xff;
2062 outbuf[4] = (nb_sectors >> 24) & 0xff;
2063 outbuf[5] = (nb_sectors >> 16) & 0xff;
2064 outbuf[6] = (nb_sectors >> 8) & 0xff;
2065 outbuf[7] = nb_sectors & 0xff;
2066 outbuf[8] = 0;
2067 outbuf[9] = 0;
2068 outbuf[10] = s->qdev.blocksize >> 8;
2069 outbuf[11] = 0;
2070 outbuf[12] = 0;
2071 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2072
2073 /* set TPE bit if the format supports discard */
2074 if (s->qdev.conf.discard_granularity) {
2075 outbuf[14] = 0x80;
2076 }
2077
2078 /* Protection, exponent and lowest lba field left blank. */
2079 break;
2080 }
2081 trace_scsi_disk_emulate_command_SAI_unsupported();
2082 goto illegal_request;
2083 case SYNCHRONIZE_CACHE:
2084 /* The request is used as the AIO opaque value, so add a ref. */
2085 scsi_req_ref(&r->req);
2086 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2087 BLOCK_ACCT_FLUSH);
2088 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2089 return 0;
2090 case SEEK_10:
2091 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2092 if (r->req.cmd.lba > s->qdev.max_lba) {
2093 goto illegal_lba;
2094 }
2095 break;
2096 case MODE_SELECT:
2097 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2098 break;
2099 case MODE_SELECT_10:
2100 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2101 break;
2102 case UNMAP:
2103 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2104 break;
2105 case VERIFY_10:
2106 case VERIFY_12:
2107 case VERIFY_16:
2108 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2109 if (req->cmd.buf[1] & 6) {
2110 goto illegal_request;
2111 }
2112 break;
2113 case WRITE_SAME_10:
2114 case WRITE_SAME_16:
2115 trace_scsi_disk_emulate_command_WRITE_SAME(
2116 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2117 break;
2118 default:
2119 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2120 scsi_command_name(buf[0]));
2121 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2122 return 0;
2123 }
2124 assert(!r->req.aiocb);
2125 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2126 if (r->iov.iov_len == 0) {
2127 scsi_req_complete(&r->req, GOOD);
2128 }
2129 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2130 assert(r->iov.iov_len == req->cmd.xfer);
2131 return -r->iov.iov_len;
2132 } else {
2133 return r->iov.iov_len;
2134 }
2135
2136 illegal_request:
2137 if (r->req.status == -1) {
2138 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2139 }
2140 return 0;
2141
2142 illegal_lba:
2143 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2144 return 0;
2145 }
2146
2147 /* Execute a scsi command. Returns the length of the data expected by the
2148 command. This will be Positive for data transfers from the device
2149 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2150 and zero if the command does not transfer any data. */
2151
2152 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2153 {
2154 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2155 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2156 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2157 uint32_t len;
2158 uint8_t command;
2159
2160 command = buf[0];
2161
2162 if (!blk_is_available(s->qdev.conf.blk)) {
2163 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2164 return 0;
2165 }
2166
2167 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2168 switch (command) {
2169 case READ_6:
2170 case READ_10:
2171 case READ_12:
2172 case READ_16:
2173 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2174 /* Protection information is not supported. For SCSI versions 2 and
2175 * older (as determined by snooping the guest's INQUIRY commands),
2176 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2177 */
2178 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2179 goto illegal_request;
2180 }
2181 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2182 goto illegal_lba;
2183 }
2184 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2185 r->sector_count = len * (s->qdev.blocksize / 512);
2186 break;
2187 case WRITE_6:
2188 case WRITE_10:
2189 case WRITE_12:
2190 case WRITE_16:
2191 case WRITE_VERIFY_10:
2192 case WRITE_VERIFY_12:
2193 case WRITE_VERIFY_16:
2194 if (blk_is_read_only(s->qdev.conf.blk)) {
2195 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2196 return 0;
2197 }
2198 trace_scsi_disk_dma_command_WRITE(
2199 (command & 0xe) == 0xe ? "And Verify " : "",
2200 r->req.cmd.lba, len);
2201 /* fall through */
2202 case VERIFY_10:
2203 case VERIFY_12:
2204 case VERIFY_16:
2205 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2206 * As far as DMA is concerned, we can treat it the same as a write;
2207 * scsi_block_do_sgio will send VERIFY commands.
2208 */
2209 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2210 goto illegal_request;
2211 }
2212 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2213 goto illegal_lba;
2214 }
2215 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2216 r->sector_count = len * (s->qdev.blocksize / 512);
2217 break;
2218 default:
2219 abort();
2220 illegal_request:
2221 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2222 return 0;
2223 illegal_lba:
2224 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2225 return 0;
2226 }
2227 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2228 if (r->sector_count == 0) {
2229 scsi_req_complete(&r->req, GOOD);
2230 }
2231 assert(r->iov.iov_len == 0);
2232 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2233 return -r->sector_count * 512;
2234 } else {
2235 return r->sector_count * 512;
2236 }
2237 }
2238
2239 static void scsi_disk_reset(DeviceState *dev)
2240 {
2241 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2242 uint64_t nb_sectors;
2243
2244 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2245
2246 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2247 nb_sectors /= s->qdev.blocksize / 512;
2248 if (nb_sectors) {
2249 nb_sectors--;
2250 }
2251 s->qdev.max_lba = nb_sectors;
2252 /* reset tray statuses */
2253 s->tray_locked = 0;
2254 s->tray_open = 0;
2255
2256 s->qdev.scsi_version = s->qdev.default_scsi_version;
2257 }
2258
2259 static void scsi_disk_resize_cb(void *opaque)
2260 {
2261 SCSIDiskState *s = opaque;
2262
2263 /* SPC lists this sense code as available only for
2264 * direct-access devices.
2265 */
2266 if (s->qdev.type == TYPE_DISK) {
2267 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2268 }
2269 }
2270
2271 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2272 {
2273 SCSIDiskState *s = opaque;
2274
2275 /*
2276 * When a CD gets changed, we have to report an ejected state and
2277 * then a loaded state to guests so that they detect tray
2278 * open/close and media change events. Guests that do not use
2279 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2280 * states rely on this behavior.
2281 *
2282 * media_changed governs the state machine used for unit attention
2283 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2284 */
2285 s->media_changed = load;
2286 s->tray_open = !load;
2287 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2288 s->media_event = true;
2289 s->eject_request = false;
2290 }
2291
2292 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2293 {
2294 SCSIDiskState *s = opaque;
2295
2296 s->eject_request = true;
2297 if (force) {
2298 s->tray_locked = false;
2299 }
2300 }
2301
2302 static bool scsi_cd_is_tray_open(void *opaque)
2303 {
2304 return ((SCSIDiskState *)opaque)->tray_open;
2305 }
2306
2307 static bool scsi_cd_is_medium_locked(void *opaque)
2308 {
2309 return ((SCSIDiskState *)opaque)->tray_locked;
2310 }
2311
2312 static const BlockDevOps scsi_disk_removable_block_ops = {
2313 .change_media_cb = scsi_cd_change_media_cb,
2314 .eject_request_cb = scsi_cd_eject_request_cb,
2315 .is_tray_open = scsi_cd_is_tray_open,
2316 .is_medium_locked = scsi_cd_is_medium_locked,
2317
2318 .resize_cb = scsi_disk_resize_cb,
2319 };
2320
2321 static const BlockDevOps scsi_disk_block_ops = {
2322 .resize_cb = scsi_disk_resize_cb,
2323 };
2324
2325 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2326 {
2327 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2328 if (s->media_changed) {
2329 s->media_changed = false;
2330 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2331 }
2332 }
2333
2334 static void scsi_realize(SCSIDevice *dev, Error **errp)
2335 {
2336 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2337 bool read_only;
2338
2339 if (!s->qdev.conf.blk) {
2340 error_setg(errp, "drive property not set");
2341 return;
2342 }
2343
2344 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2345 !blk_is_inserted(s->qdev.conf.blk)) {
2346 error_setg(errp, "Device needs media, but drive is empty");
2347 return;
2348 }
2349
2350 blkconf_blocksizes(&s->qdev.conf);
2351
2352 if (s->qdev.conf.logical_block_size >
2353 s->qdev.conf.physical_block_size) {
2354 error_setg(errp,
2355 "logical_block_size > physical_block_size not supported");
2356 return;
2357 }
2358
2359 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2360 !s->qdev.hba_supports_iothread)
2361 {
2362 error_setg(errp, "HBA does not support iothreads");
2363 return;
2364 }
2365
2366 if (dev->type == TYPE_DISK) {
2367 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2368 return;
2369 }
2370 }
2371
2372 read_only = blk_is_read_only(s->qdev.conf.blk);
2373 if (dev->type == TYPE_ROM) {
2374 read_only = true;
2375 }
2376
2377 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2378 dev->type == TYPE_DISK, errp)) {
2379 return;
2380 }
2381
2382 if (s->qdev.conf.discard_granularity == -1) {
2383 s->qdev.conf.discard_granularity =
2384 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2385 }
2386
2387 if (!s->version) {
2388 s->version = g_strdup(qemu_hw_version());
2389 }
2390 if (!s->vendor) {
2391 s->vendor = g_strdup("QEMU");
2392 }
2393 if (!s->device_id) {
2394 if (s->serial) {
2395 s->device_id = g_strdup_printf("%.20s", s->serial);
2396 } else {
2397 const char *str = blk_name(s->qdev.conf.blk);
2398 if (str && *str) {
2399 s->device_id = g_strdup(str);
2400 }
2401 }
2402 }
2403
2404 if (blk_is_sg(s->qdev.conf.blk)) {
2405 error_setg(errp, "unwanted /dev/sg*");
2406 return;
2407 }
2408
2409 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2410 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2411 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2412 } else {
2413 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2414 }
2415 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2416
2417 blk_iostatus_enable(s->qdev.conf.blk);
2418
2419 add_boot_device_lchs(&dev->qdev, NULL,
2420 dev->conf.lcyls,
2421 dev->conf.lheads,
2422 dev->conf.lsecs);
2423 }
2424
2425 static void scsi_unrealize(SCSIDevice *dev, Error **errp)
2426 {
2427 del_boot_device_lchs(&dev->qdev, NULL);
2428 }
2429
2430 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2431 {
2432 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2433 AioContext *ctx = NULL;
2434 /* can happen for devices without drive. The error message for missing
2435 * backend will be issued in scsi_realize
2436 */
2437 if (s->qdev.conf.blk) {
2438 ctx = blk_get_aio_context(s->qdev.conf.blk);
2439 aio_context_acquire(ctx);
2440 blkconf_blocksizes(&s->qdev.conf);
2441 }
2442 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2443 s->qdev.type = TYPE_DISK;
2444 if (!s->product) {
2445 s->product = g_strdup("QEMU HARDDISK");
2446 }
2447 scsi_realize(&s->qdev, errp);
2448 if (ctx) {
2449 aio_context_release(ctx);
2450 }
2451 }
2452
2453 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2454 {
2455 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2456 AioContext *ctx;
2457 int ret;
2458
2459 if (!dev->conf.blk) {
2460 /* Anonymous BlockBackend for an empty drive. As we put it into
2461 * dev->conf, qdev takes care of detaching on unplug. */
2462 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2463 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2464 assert(ret == 0);
2465 }
2466
2467 ctx = blk_get_aio_context(dev->conf.blk);
2468 aio_context_acquire(ctx);
2469 s->qdev.blocksize = 2048;
2470 s->qdev.type = TYPE_ROM;
2471 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2472 if (!s->product) {
2473 s->product = g_strdup("QEMU CD-ROM");
2474 }
2475 scsi_realize(&s->qdev, errp);
2476 aio_context_release(ctx);
2477 }
2478
2479 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2480 {
2481 DriveInfo *dinfo;
2482 Error *local_err = NULL;
2483
2484 warn_report("'scsi-disk' is deprecated, "
2485 "please use 'scsi-hd' or 'scsi-cd' instead");
2486
2487 if (!dev->conf.blk) {
2488 scsi_realize(dev, &local_err);
2489 assert(local_err);
2490 error_propagate(errp, local_err);
2491 return;
2492 }
2493
2494 dinfo = blk_legacy_dinfo(dev->conf.blk);
2495 if (dinfo && dinfo->media_cd) {
2496 scsi_cd_realize(dev, errp);
2497 } else {
2498 scsi_hd_realize(dev, errp);
2499 }
2500 }
2501
2502 static const SCSIReqOps scsi_disk_emulate_reqops = {
2503 .size = sizeof(SCSIDiskReq),
2504 .free_req = scsi_free_request,
2505 .send_command = scsi_disk_emulate_command,
2506 .read_data = scsi_disk_emulate_read_data,
2507 .write_data = scsi_disk_emulate_write_data,
2508 .get_buf = scsi_get_buf,
2509 };
2510
2511 static const SCSIReqOps scsi_disk_dma_reqops = {
2512 .size = sizeof(SCSIDiskReq),
2513 .free_req = scsi_free_request,
2514 .send_command = scsi_disk_dma_command,
2515 .read_data = scsi_read_data,
2516 .write_data = scsi_write_data,
2517 .get_buf = scsi_get_buf,
2518 .load_request = scsi_disk_load_request,
2519 .save_request = scsi_disk_save_request,
2520 };
2521
2522 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2523 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2524 [INQUIRY] = &scsi_disk_emulate_reqops,
2525 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2526 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2527 [START_STOP] = &scsi_disk_emulate_reqops,
2528 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2529 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2530 [READ_TOC] = &scsi_disk_emulate_reqops,
2531 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2532 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2533 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2534 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2535 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2536 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2537 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2538 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2539 [SEEK_10] = &scsi_disk_emulate_reqops,
2540 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2541 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2542 [UNMAP] = &scsi_disk_emulate_reqops,
2543 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2544 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2545 [VERIFY_10] = &scsi_disk_emulate_reqops,
2546 [VERIFY_12] = &scsi_disk_emulate_reqops,
2547 [VERIFY_16] = &scsi_disk_emulate_reqops,
2548
2549 [READ_6] = &scsi_disk_dma_reqops,
2550 [READ_10] = &scsi_disk_dma_reqops,
2551 [READ_12] = &scsi_disk_dma_reqops,
2552 [READ_16] = &scsi_disk_dma_reqops,
2553 [WRITE_6] = &scsi_disk_dma_reqops,
2554 [WRITE_10] = &scsi_disk_dma_reqops,
2555 [WRITE_12] = &scsi_disk_dma_reqops,
2556 [WRITE_16] = &scsi_disk_dma_reqops,
2557 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2558 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2559 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2560 };
2561
2562 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2563 {
2564 int i;
2565 int len = scsi_cdb_length(buf);
2566 char *line_buffer, *p;
2567
2568 line_buffer = g_malloc(len * 5 + 1);
2569
2570 for (i = 0, p = line_buffer; i < len; i++) {
2571 p += sprintf(p, " 0x%02x", buf[i]);
2572 }
2573 trace_scsi_disk_new_request(lun, tag, line_buffer);
2574
2575 g_free(line_buffer);
2576 }
2577
2578 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2579 uint8_t *buf, void *hba_private)
2580 {
2581 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2582 SCSIRequest *req;
2583 const SCSIReqOps *ops;
2584 uint8_t command;
2585
2586 command = buf[0];
2587 ops = scsi_disk_reqops_dispatch[command];
2588 if (!ops) {
2589 ops = &scsi_disk_emulate_reqops;
2590 }
2591 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2592
2593 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2594 scsi_disk_new_request_dump(lun, tag, buf);
2595 }
2596
2597 return req;
2598 }
2599
2600 #ifdef __linux__
2601 static int get_device_type(SCSIDiskState *s)
2602 {
2603 uint8_t cmd[16];
2604 uint8_t buf[36];
2605 int ret;
2606
2607 memset(cmd, 0, sizeof(cmd));
2608 memset(buf, 0, sizeof(buf));
2609 cmd[0] = INQUIRY;
2610 cmd[4] = sizeof(buf);
2611
2612 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2613 buf, sizeof(buf));
2614 if (ret < 0) {
2615 return -1;
2616 }
2617 s->qdev.type = buf[0];
2618 if (buf[1] & 0x80) {
2619 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2620 }
2621 return 0;
2622 }
2623
2624 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2625 {
2626 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2627 AioContext *ctx;
2628 int sg_version;
2629 int rc;
2630
2631 if (!s->qdev.conf.blk) {
2632 error_setg(errp, "drive property not set");
2633 return;
2634 }
2635
2636 if (s->rotation_rate) {
2637 error_report_once("rotation_rate is specified for scsi-block but is "
2638 "not implemented. This option is deprecated and will "
2639 "be removed in a future version");
2640 }
2641
2642 ctx = blk_get_aio_context(s->qdev.conf.blk);
2643 aio_context_acquire(ctx);
2644
2645 /* check we are using a driver managing SG_IO (version 3 and after) */
2646 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2647 if (rc < 0) {
2648 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2649 if (rc != -EPERM) {
2650 error_append_hint(errp, "Is this a SCSI device?\n");
2651 }
2652 goto out;
2653 }
2654 if (sg_version < 30000) {
2655 error_setg(errp, "scsi generic interface too old");
2656 goto out;
2657 }
2658
2659 /* get device type from INQUIRY data */
2660 rc = get_device_type(s);
2661 if (rc < 0) {
2662 error_setg(errp, "INQUIRY failed");
2663 goto out;
2664 }
2665
2666 /* Make a guess for the block size, we'll fix it when the guest sends.
2667 * READ CAPACITY. If they don't, they likely would assume these sizes
2668 * anyway. (TODO: check in /sys).
2669 */
2670 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2671 s->qdev.blocksize = 2048;
2672 } else {
2673 s->qdev.blocksize = 512;
2674 }
2675
2676 /* Makes the scsi-block device not removable by using HMP and QMP eject
2677 * command.
2678 */
2679 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2680
2681 scsi_realize(&s->qdev, errp);
2682 scsi_generic_read_device_inquiry(&s->qdev);
2683
2684 out:
2685 aio_context_release(ctx);
2686 }
2687
2688 typedef struct SCSIBlockReq {
2689 SCSIDiskReq req;
2690 sg_io_hdr_t io_header;
2691
2692 /* Selected bytes of the original CDB, copied into our own CDB. */
2693 uint8_t cmd, cdb1, group_number;
2694
2695 /* CDB passed to SG_IO. */
2696 uint8_t cdb[16];
2697 } SCSIBlockReq;
2698
2699 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2700 int64_t offset, QEMUIOVector *iov,
2701 int direction,
2702 BlockCompletionFunc *cb, void *opaque)
2703 {
2704 sg_io_hdr_t *io_header = &req->io_header;
2705 SCSIDiskReq *r = &req->req;
2706 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2707 int nb_logical_blocks;
2708 uint64_t lba;
2709 BlockAIOCB *aiocb;
2710
2711 /* This is not supported yet. It can only happen if the guest does
2712 * reads and writes that are not aligned to one logical sectors
2713 * _and_ cover multiple MemoryRegions.
2714 */
2715 assert(offset % s->qdev.blocksize == 0);
2716 assert(iov->size % s->qdev.blocksize == 0);
2717
2718 io_header->interface_id = 'S';
2719
2720 /* The data transfer comes from the QEMUIOVector. */
2721 io_header->dxfer_direction = direction;
2722 io_header->dxfer_len = iov->size;
2723 io_header->dxferp = (void *)iov->iov;
2724 io_header->iovec_count = iov->niov;
2725 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2726
2727 /* Build a new CDB with the LBA and length patched in, in case
2728 * DMA helpers split the transfer in multiple segments. Do not
2729 * build a CDB smaller than what the guest wanted, and only build
2730 * a larger one if strictly necessary.
2731 */
2732 io_header->cmdp = req->cdb;
2733 lba = offset / s->qdev.blocksize;
2734 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2735
2736 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2737 /* 6-byte CDB */
2738 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2739 req->cdb[4] = nb_logical_blocks;
2740 req->cdb[5] = 0;
2741 io_header->cmd_len = 6;
2742 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2743 /* 10-byte CDB */
2744 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2745 req->cdb[1] = req->cdb1;
2746 stl_be_p(&req->cdb[2], lba);
2747 req->cdb[6] = req->group_number;
2748 stw_be_p(&req->cdb[7], nb_logical_blocks);
2749 req->cdb[9] = 0;
2750 io_header->cmd_len = 10;
2751 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2752 /* 12-byte CDB */
2753 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2754 req->cdb[1] = req->cdb1;
2755 stl_be_p(&req->cdb[2], lba);
2756 stl_be_p(&req->cdb[6], nb_logical_blocks);
2757 req->cdb[10] = req->group_number;
2758 req->cdb[11] = 0;
2759 io_header->cmd_len = 12;
2760 } else {
2761 /* 16-byte CDB */
2762 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2763 req->cdb[1] = req->cdb1;
2764 stq_be_p(&req->cdb[2], lba);
2765 stl_be_p(&req->cdb[10], nb_logical_blocks);
2766 req->cdb[14] = req->group_number;
2767 req->cdb[15] = 0;
2768 io_header->cmd_len = 16;
2769 }
2770
2771 /* The rest is as in scsi-generic.c. */
2772 io_header->mx_sb_len = sizeof(r->req.sense);
2773 io_header->sbp = r->req.sense;
2774 io_header->timeout = UINT_MAX;
2775 io_header->usr_ptr = r;
2776 io_header->flags |= SG_FLAG_DIRECT_IO;
2777
2778 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2779 assert(aiocb != NULL);
2780 return aiocb;
2781 }
2782
2783 static bool scsi_block_no_fua(SCSICommand *cmd)
2784 {
2785 return false;
2786 }
2787
2788 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2789 QEMUIOVector *iov,
2790 BlockCompletionFunc *cb, void *cb_opaque,
2791 void *opaque)
2792 {
2793 SCSIBlockReq *r = opaque;
2794 return scsi_block_do_sgio(r, offset, iov,
2795 SG_DXFER_FROM_DEV, cb, cb_opaque);
2796 }
2797
2798 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2799 QEMUIOVector *iov,
2800 BlockCompletionFunc *cb, void *cb_opaque,
2801 void *opaque)
2802 {
2803 SCSIBlockReq *r = opaque;
2804 return scsi_block_do_sgio(r, offset, iov,
2805 SG_DXFER_TO_DEV, cb, cb_opaque);
2806 }
2807
2808 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2809 {
2810 switch (buf[0]) {
2811 case VERIFY_10:
2812 case VERIFY_12:
2813 case VERIFY_16:
2814 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2815 * for the number of logical blocks specified in the length
2816 * field). For other modes, do not use scatter/gather operation.
2817 */
2818 if ((buf[1] & 6) == 2) {
2819 return false;
2820 }
2821 break;
2822
2823 case READ_6:
2824 case READ_10:
2825 case READ_12:
2826 case READ_16:
2827 case WRITE_6:
2828 case WRITE_10:
2829 case WRITE_12:
2830 case WRITE_16:
2831 case WRITE_VERIFY_10:
2832 case WRITE_VERIFY_12:
2833 case WRITE_VERIFY_16:
2834 /* MMC writing cannot be done via DMA helpers, because it sometimes
2835 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2836 * We might use scsi_block_dma_reqops as long as no writing commands are
2837 * seen, but performance usually isn't paramount on optical media. So,
2838 * just make scsi-block operate the same as scsi-generic for them.
2839 */
2840 if (s->qdev.type != TYPE_ROM) {
2841 return false;
2842 }
2843 break;
2844
2845 default:
2846 break;
2847 }
2848
2849 return true;
2850 }
2851
2852
2853 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2854 {
2855 SCSIBlockReq *r = (SCSIBlockReq *)req;
2856 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2857
2858 r->cmd = req->cmd.buf[0];
2859 switch (r->cmd >> 5) {
2860 case 0:
2861 /* 6-byte CDB. */
2862 r->cdb1 = r->group_number = 0;
2863 break;
2864 case 1:
2865 /* 10-byte CDB. */
2866 r->cdb1 = req->cmd.buf[1];
2867 r->group_number = req->cmd.buf[6];
2868 break;
2869 case 4:
2870 /* 12-byte CDB. */
2871 r->cdb1 = req->cmd.buf[1];
2872 r->group_number = req->cmd.buf[10];
2873 break;
2874 case 5:
2875 /* 16-byte CDB. */
2876 r->cdb1 = req->cmd.buf[1];
2877 r->group_number = req->cmd.buf[14];
2878 break;
2879 default:
2880 abort();
2881 }
2882
2883 /* Protection information is not supported. For SCSI versions 2 and
2884 * older (as determined by snooping the guest's INQUIRY commands),
2885 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2886 */
2887 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2888 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2889 return 0;
2890 }
2891
2892 r->req.status = &r->io_header.status;
2893 return scsi_disk_dma_command(req, buf);
2894 }
2895
2896 static const SCSIReqOps scsi_block_dma_reqops = {
2897 .size = sizeof(SCSIBlockReq),
2898 .free_req = scsi_free_request,
2899 .send_command = scsi_block_dma_command,
2900 .read_data = scsi_read_data,
2901 .write_data = scsi_write_data,
2902 .get_buf = scsi_get_buf,
2903 .load_request = scsi_disk_load_request,
2904 .save_request = scsi_disk_save_request,
2905 };
2906
2907 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2908 uint32_t lun, uint8_t *buf,
2909 void *hba_private)
2910 {
2911 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2912
2913 if (scsi_block_is_passthrough(s, buf)) {
2914 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2915 hba_private);
2916 } else {
2917 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2918 hba_private);
2919 }
2920 }
2921
2922 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2923 uint8_t *buf, void *hba_private)
2924 {
2925 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2926
2927 if (scsi_block_is_passthrough(s, buf)) {
2928 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2929 } else {
2930 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2931 }
2932 }
2933
2934 static void scsi_block_update_sense(SCSIRequest *req)
2935 {
2936 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2937 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
2938 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
2939 }
2940 #endif
2941
2942 static
2943 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2944 BlockCompletionFunc *cb, void *cb_opaque,
2945 void *opaque)
2946 {
2947 SCSIDiskReq *r = opaque;
2948 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2949 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2950 }
2951
2952 static
2953 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2954 BlockCompletionFunc *cb, void *cb_opaque,
2955 void *opaque)
2956 {
2957 SCSIDiskReq *r = opaque;
2958 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2959 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2960 }
2961
2962 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2963 {
2964 DeviceClass *dc = DEVICE_CLASS(klass);
2965 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2966
2967 dc->fw_name = "disk";
2968 dc->reset = scsi_disk_reset;
2969 sdc->dma_readv = scsi_dma_readv;
2970 sdc->dma_writev = scsi_dma_writev;
2971 sdc->need_fua_emulation = scsi_is_cmd_fua;
2972 }
2973
2974 static const TypeInfo scsi_disk_base_info = {
2975 .name = TYPE_SCSI_DISK_BASE,
2976 .parent = TYPE_SCSI_DEVICE,
2977 .class_init = scsi_disk_base_class_initfn,
2978 .instance_size = sizeof(SCSIDiskState),
2979 .class_size = sizeof(SCSIDiskClass),
2980 .abstract = true,
2981 };
2982
2983 #define DEFINE_SCSI_DISK_PROPERTIES() \
2984 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
2985 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
2986 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2987 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2988 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2989 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2990 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
2991 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
2992
2993
2994 static Property scsi_hd_properties[] = {
2995 DEFINE_SCSI_DISK_PROPERTIES(),
2996 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2997 SCSI_DISK_F_REMOVABLE, false),
2998 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2999 SCSI_DISK_F_DPOFUA, false),
3000 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3001 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3002 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3003 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3004 DEFAULT_MAX_UNMAP_SIZE),
3005 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3006 DEFAULT_MAX_IO_SIZE),
3007 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3008 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3009 5),
3010 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3011 DEFINE_PROP_END_OF_LIST(),
3012 };
3013
3014 static const VMStateDescription vmstate_scsi_disk_state = {
3015 .name = "scsi-disk",
3016 .version_id = 1,
3017 .minimum_version_id = 1,
3018 .fields = (VMStateField[]) {
3019 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3020 VMSTATE_BOOL(media_changed, SCSIDiskState),
3021 VMSTATE_BOOL(media_event, SCSIDiskState),
3022 VMSTATE_BOOL(eject_request, SCSIDiskState),
3023 VMSTATE_BOOL(tray_open, SCSIDiskState),
3024 VMSTATE_BOOL(tray_locked, SCSIDiskState),
3025 VMSTATE_END_OF_LIST()
3026 }
3027 };
3028
3029 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3030 {
3031 DeviceClass *dc = DEVICE_CLASS(klass);
3032 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3033
3034 sc->realize = scsi_hd_realize;
3035 sc->unrealize = scsi_unrealize;
3036 sc->alloc_req = scsi_new_request;
3037 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3038 dc->desc = "virtual SCSI disk";
3039 device_class_set_props(dc, scsi_hd_properties);
3040 dc->vmsd = &vmstate_scsi_disk_state;
3041 }
3042
3043 static const TypeInfo scsi_hd_info = {
3044 .name = "scsi-hd",
3045 .parent = TYPE_SCSI_DISK_BASE,
3046 .class_init = scsi_hd_class_initfn,
3047 };
3048
3049 static Property scsi_cd_properties[] = {
3050 DEFINE_SCSI_DISK_PROPERTIES(),
3051 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3052 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3053 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3054 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3055 DEFAULT_MAX_IO_SIZE),
3056 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3057 5),
3058 DEFINE_PROP_END_OF_LIST(),
3059 };
3060
3061 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3062 {
3063 DeviceClass *dc = DEVICE_CLASS(klass);
3064 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3065
3066 sc->realize = scsi_cd_realize;
3067 sc->alloc_req = scsi_new_request;
3068 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3069 dc->desc = "virtual SCSI CD-ROM";
3070 device_class_set_props(dc, scsi_cd_properties);
3071 dc->vmsd = &vmstate_scsi_disk_state;
3072 }
3073
3074 static const TypeInfo scsi_cd_info = {
3075 .name = "scsi-cd",
3076 .parent = TYPE_SCSI_DISK_BASE,
3077 .class_init = scsi_cd_class_initfn,
3078 };
3079
3080 #ifdef __linux__
3081 static Property scsi_block_properties[] = {
3082 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3083 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3084 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3085 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3086 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3087 DEFAULT_MAX_UNMAP_SIZE),
3088 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3089 DEFAULT_MAX_IO_SIZE),
3090 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3091 -1),
3092 DEFINE_PROP_END_OF_LIST(),
3093 };
3094
3095 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3096 {
3097 DeviceClass *dc = DEVICE_CLASS(klass);
3098 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3099 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3100
3101 sc->realize = scsi_block_realize;
3102 sc->alloc_req = scsi_block_new_request;
3103 sc->parse_cdb = scsi_block_parse_cdb;
3104 sdc->dma_readv = scsi_block_dma_readv;
3105 sdc->dma_writev = scsi_block_dma_writev;
3106 sdc->update_sense = scsi_block_update_sense;
3107 sdc->need_fua_emulation = scsi_block_no_fua;
3108 dc->desc = "SCSI block device passthrough";
3109 device_class_set_props(dc, scsi_block_properties);
3110 dc->vmsd = &vmstate_scsi_disk_state;
3111 }
3112
3113 static const TypeInfo scsi_block_info = {
3114 .name = "scsi-block",
3115 .parent = TYPE_SCSI_DISK_BASE,
3116 .class_init = scsi_block_class_initfn,
3117 };
3118 #endif
3119
3120 static Property scsi_disk_properties[] = {
3121 DEFINE_SCSI_DISK_PROPERTIES(),
3122 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3123 SCSI_DISK_F_REMOVABLE, false),
3124 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3125 SCSI_DISK_F_DPOFUA, false),
3126 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3127 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3128 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3129 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3130 DEFAULT_MAX_UNMAP_SIZE),
3131 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3132 DEFAULT_MAX_IO_SIZE),
3133 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3134 5),
3135 DEFINE_PROP_END_OF_LIST(),
3136 };
3137
3138 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
3139 {
3140 DeviceClass *dc = DEVICE_CLASS(klass);
3141 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3142
3143 sc->realize = scsi_disk_realize;
3144 sc->alloc_req = scsi_new_request;
3145 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3146 dc->fw_name = "disk";
3147 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3148 dc->reset = scsi_disk_reset;
3149 device_class_set_props(dc, scsi_disk_properties);
3150 dc->vmsd = &vmstate_scsi_disk_state;
3151 }
3152
3153 static const TypeInfo scsi_disk_info = {
3154 .name = "scsi-disk",
3155 .parent = TYPE_SCSI_DISK_BASE,
3156 .class_init = scsi_disk_class_initfn,
3157 };
3158
3159 static void scsi_disk_register_types(void)
3160 {
3161 type_register_static(&scsi_disk_base_info);
3162 type_register_static(&scsi_hd_info);
3163 type_register_static(&scsi_cd_info);
3164 #ifdef __linux__
3165 type_register_static(&scsi_block_info);
3166 #endif
3167 type_register_static(&scsi_disk_info);
3168 }
3169
3170 type_init(scsi_disk_register_types)