]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/scsi-disk.c
scsi: protect req->aiocb with AioContext lock
[mirror_qemu.git] / hw / scsi / scsi-disk.c
1 /*
2 * SCSI Device emulation
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
6 *
7 * Written by Paul Brook
8 * Modifications:
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
11 * than 36.
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
14 *
15 * This code is licensed under the LGPL.
16 *
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
20 */
21
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
43 #include "trace.h"
44 #include "qom/object.h"
45
46 #ifdef __linux
47 #include <scsi/sg.h>
48 #endif
49
50 #define SCSI_WRITE_SAME_MAX (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN 256
53 #define SCSI_MAX_MODE_LEN 256
54
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
58
59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
60
61 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
62
63 struct SCSIDiskClass {
64 SCSIDeviceClass parent_class;
65 DMAIOFunc *dma_readv;
66 DMAIOFunc *dma_writev;
67 bool (*need_fua_emulation)(SCSICommand *cmd);
68 void (*update_sense)(SCSIRequest *r);
69 };
70
71 typedef struct SCSIDiskReq {
72 SCSIRequest req;
73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
74 uint64_t sector;
75 uint32_t sector_count;
76 uint32_t buflen;
77 bool started;
78 bool need_fua_emulation;
79 struct iovec iov;
80 QEMUIOVector qiov;
81 BlockAcctCookie acct;
82 } SCSIDiskReq;
83
84 #define SCSI_DISK_F_REMOVABLE 0
85 #define SCSI_DISK_F_DPOFUA 1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
87
88 struct SCSIDiskState {
89 SCSIDevice qdev;
90 uint32_t features;
91 bool media_changed;
92 bool media_event;
93 bool eject_request;
94 uint16_t port_index;
95 uint64_t max_unmap_size;
96 uint64_t max_io_size;
97 uint32_t quirks;
98 QEMUBH *bh;
99 char *version;
100 char *serial;
101 char *vendor;
102 char *product;
103 char *device_id;
104 bool tray_open;
105 bool tray_locked;
106 /*
107 * 0x0000 - rotation rate not reported
108 * 0x0001 - non-rotating medium (SSD)
109 * 0x0002-0x0400 - reserved
110 * 0x0401-0xffe - rotations per minute
111 * 0xffff - reserved
112 */
113 uint16_t rotation_rate;
114 };
115
116 static void scsi_free_request(SCSIRequest *req)
117 {
118 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
119
120 qemu_vfree(r->iov.iov_base);
121 }
122
123 /* Helper function for command completion with sense. */
124 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
125 {
126 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
127 sense.ascq);
128 scsi_req_build_sense(&r->req, sense);
129 scsi_req_complete(&r->req, CHECK_CONDITION);
130 }
131
132 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
133 {
134 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
135
136 if (!r->iov.iov_base) {
137 r->buflen = size;
138 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
139 }
140 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
141 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
142 }
143
144 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
145 {
146 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
147
148 qemu_put_be64s(f, &r->sector);
149 qemu_put_be32s(f, &r->sector_count);
150 qemu_put_be32s(f, &r->buflen);
151 if (r->buflen) {
152 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
153 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
154 } else if (!req->retry) {
155 uint32_t len = r->iov.iov_len;
156 qemu_put_be32s(f, &len);
157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
158 }
159 }
160 }
161
162 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
163 {
164 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
165
166 qemu_get_be64s(f, &r->sector);
167 qemu_get_be32s(f, &r->sector_count);
168 qemu_get_be32s(f, &r->buflen);
169 if (r->buflen) {
170 scsi_init_iovec(r, r->buflen);
171 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
172 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
173 } else if (!r->req.retry) {
174 uint32_t len;
175 qemu_get_be32s(f, &len);
176 r->iov.iov_len = len;
177 assert(r->iov.iov_len <= r->buflen);
178 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
179 }
180 }
181
182 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
183 }
184
185 /*
186 * scsi_handle_rw_error has two return values. False means that the error
187 * must be ignored, true means that the error has been processed and the
188 * caller should not do anything else for this request. Note that
189 * scsi_handle_rw_error always manages its reference counts, independent
190 * of the return value.
191 */
192 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
193 {
194 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
195 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
196 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
197 SCSISense sense = SENSE_CODE(NO_SENSE);
198 int error = 0;
199 bool req_has_sense = false;
200 BlockErrorAction action;
201 int status;
202
203 if (ret < 0) {
204 status = scsi_sense_from_errno(-ret, &sense);
205 error = -ret;
206 } else {
207 /* A passthrough command has completed with nonzero status. */
208 status = ret;
209 if (status == CHECK_CONDITION) {
210 req_has_sense = true;
211 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
212 } else {
213 error = EINVAL;
214 }
215 }
216
217 /*
218 * Check whether the error has to be handled by the guest or should
219 * rather follow the rerror=/werror= settings. Guest-handled errors
220 * are usually retried immediately, so do not post them to QMP and
221 * do not account them as failed I/O.
222 */
223 if (req_has_sense &&
224 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
225 action = BLOCK_ERROR_ACTION_REPORT;
226 acct_failed = false;
227 } else {
228 action = blk_get_error_action(s->qdev.conf.blk, is_read, error);
229 blk_error_action(s->qdev.conf.blk, action, is_read, error);
230 }
231
232 switch (action) {
233 case BLOCK_ERROR_ACTION_REPORT:
234 if (acct_failed) {
235 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
236 }
237 if (req_has_sense) {
238 sdc->update_sense(&r->req);
239 } else if (status == CHECK_CONDITION) {
240 scsi_req_build_sense(&r->req, sense);
241 }
242 scsi_req_complete(&r->req, status);
243 return true;
244
245 case BLOCK_ERROR_ACTION_IGNORE:
246 return false;
247
248 case BLOCK_ERROR_ACTION_STOP:
249 scsi_req_retry(&r->req);
250 return true;
251
252 default:
253 g_assert_not_reached();
254 }
255 }
256
257 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
258 {
259 if (r->req.io_canceled) {
260 scsi_req_cancel_complete(&r->req);
261 return true;
262 }
263
264 if (ret < 0) {
265 return scsi_handle_rw_error(r, ret, acct_failed);
266 }
267
268 return false;
269 }
270
271 static void scsi_aio_complete(void *opaque, int ret)
272 {
273 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
275
276 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
277
278 assert(r->req.aiocb != NULL);
279 r->req.aiocb = NULL;
280
281 if (scsi_disk_req_check_error(r, ret, true)) {
282 goto done;
283 }
284
285 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
286 scsi_req_complete(&r->req, GOOD);
287
288 done:
289 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
290 scsi_req_unref(&r->req);
291 }
292
293 static bool scsi_is_cmd_fua(SCSICommand *cmd)
294 {
295 switch (cmd->buf[0]) {
296 case READ_10:
297 case READ_12:
298 case READ_16:
299 case WRITE_10:
300 case WRITE_12:
301 case WRITE_16:
302 return (cmd->buf[1] & 8) != 0;
303
304 case VERIFY_10:
305 case VERIFY_12:
306 case VERIFY_16:
307 case WRITE_VERIFY_10:
308 case WRITE_VERIFY_12:
309 case WRITE_VERIFY_16:
310 return true;
311
312 case READ_6:
313 case WRITE_6:
314 default:
315 return false;
316 }
317 }
318
319 static void scsi_write_do_fua(SCSIDiskReq *r)
320 {
321 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
322
323 assert(r->req.aiocb == NULL);
324 assert(!r->req.io_canceled);
325
326 if (r->need_fua_emulation) {
327 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
328 BLOCK_ACCT_FLUSH);
329 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
330 return;
331 }
332
333 scsi_req_complete(&r->req, GOOD);
334 scsi_req_unref(&r->req);
335 }
336
337 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
338 {
339 assert(r->req.aiocb == NULL);
340 if (scsi_disk_req_check_error(r, ret, false)) {
341 goto done;
342 }
343
344 r->sector += r->sector_count;
345 r->sector_count = 0;
346 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
347 scsi_write_do_fua(r);
348 return;
349 } else {
350 scsi_req_complete(&r->req, GOOD);
351 }
352
353 done:
354 scsi_req_unref(&r->req);
355 }
356
357 static void scsi_dma_complete(void *opaque, int ret)
358 {
359 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
360 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
361
362 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
363
364 assert(r->req.aiocb != NULL);
365 r->req.aiocb = NULL;
366
367 if (ret < 0) {
368 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
369 } else {
370 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
371 }
372 scsi_dma_complete_noio(r, ret);
373 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
374 }
375
376 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
377 {
378 uint32_t n;
379
380 assert(r->req.aiocb == NULL);
381 if (scsi_disk_req_check_error(r, ret, false)) {
382 goto done;
383 }
384
385 n = r->qiov.size / BDRV_SECTOR_SIZE;
386 r->sector += n;
387 r->sector_count -= n;
388 scsi_req_data(&r->req, r->qiov.size);
389
390 done:
391 scsi_req_unref(&r->req);
392 }
393
394 static void scsi_read_complete(void *opaque, int ret)
395 {
396 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
397 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
398
399 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
400
401 assert(r->req.aiocb != NULL);
402 r->req.aiocb = NULL;
403
404 if (ret < 0) {
405 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
406 } else {
407 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
408 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
409 }
410 scsi_read_complete_noio(r, ret);
411 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
412 }
413
414 /* Actually issue a read to the block device. */
415 static void scsi_do_read(SCSIDiskReq *r, int ret)
416 {
417 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
418 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
419
420 assert (r->req.aiocb == NULL);
421 if (scsi_disk_req_check_error(r, ret, false)) {
422 goto done;
423 }
424
425 /* The request is used as the AIO opaque value, so add a ref. */
426 scsi_req_ref(&r->req);
427
428 if (r->req.sg) {
429 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
430 r->req.residual -= r->req.sg->size;
431 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
432 r->req.sg, r->sector << BDRV_SECTOR_BITS,
433 BDRV_SECTOR_SIZE,
434 sdc->dma_readv, r, scsi_dma_complete, r,
435 DMA_DIRECTION_FROM_DEVICE);
436 } else {
437 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
438 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
439 r->qiov.size, BLOCK_ACCT_READ);
440 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
441 scsi_read_complete, r, r);
442 }
443
444 done:
445 scsi_req_unref(&r->req);
446 }
447
448 static void scsi_do_read_cb(void *opaque, int ret)
449 {
450 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
451 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
452
453 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
454
455 assert (r->req.aiocb != NULL);
456 r->req.aiocb = NULL;
457
458 if (ret < 0) {
459 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
460 } else {
461 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
462 }
463 scsi_do_read(opaque, ret);
464 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
465 }
466
467 /* Read more data from scsi device into buffer. */
468 static void scsi_read_data(SCSIRequest *req)
469 {
470 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
471 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
472 bool first;
473
474 trace_scsi_disk_read_data_count(r->sector_count);
475 if (r->sector_count == 0) {
476 /* This also clears the sense buffer for REQUEST SENSE. */
477 scsi_req_complete(&r->req, GOOD);
478 return;
479 }
480
481 /* No data transfer may already be in progress */
482 assert(r->req.aiocb == NULL);
483
484 /* The request is used as the AIO opaque value, so add a ref. */
485 scsi_req_ref(&r->req);
486 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
487 trace_scsi_disk_read_data_invalid();
488 scsi_read_complete_noio(r, -EINVAL);
489 return;
490 }
491
492 if (!blk_is_available(req->dev->conf.blk)) {
493 scsi_read_complete_noio(r, -ENOMEDIUM);
494 return;
495 }
496
497 first = !r->started;
498 r->started = true;
499 if (first && r->need_fua_emulation) {
500 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
501 BLOCK_ACCT_FLUSH);
502 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
503 } else {
504 scsi_do_read(r, 0);
505 }
506 }
507
508 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
509 {
510 uint32_t n;
511
512 assert (r->req.aiocb == NULL);
513 if (scsi_disk_req_check_error(r, ret, false)) {
514 goto done;
515 }
516
517 n = r->qiov.size / BDRV_SECTOR_SIZE;
518 r->sector += n;
519 r->sector_count -= n;
520 if (r->sector_count == 0) {
521 scsi_write_do_fua(r);
522 return;
523 } else {
524 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
525 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
526 scsi_req_data(&r->req, r->qiov.size);
527 }
528
529 done:
530 scsi_req_unref(&r->req);
531 }
532
533 static void scsi_write_complete(void * opaque, int ret)
534 {
535 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
536 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
537
538 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
539
540 assert (r->req.aiocb != NULL);
541 r->req.aiocb = NULL;
542
543 if (ret < 0) {
544 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
545 } else {
546 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
547 }
548 scsi_write_complete_noio(r, ret);
549 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
550 }
551
552 static void scsi_write_data(SCSIRequest *req)
553 {
554 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
555 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
556 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
557
558 /* No data transfer may already be in progress */
559 assert(r->req.aiocb == NULL);
560
561 /* The request is used as the AIO opaque value, so add a ref. */
562 scsi_req_ref(&r->req);
563 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
564 trace_scsi_disk_write_data_invalid();
565 scsi_write_complete_noio(r, -EINVAL);
566 return;
567 }
568
569 if (!r->req.sg && !r->qiov.size) {
570 /* Called for the first time. Ask the driver to send us more data. */
571 r->started = true;
572 scsi_write_complete_noio(r, 0);
573 return;
574 }
575 if (!blk_is_available(req->dev->conf.blk)) {
576 scsi_write_complete_noio(r, -ENOMEDIUM);
577 return;
578 }
579
580 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
581 r->req.cmd.buf[0] == VERIFY_16) {
582 if (r->req.sg) {
583 scsi_dma_complete_noio(r, 0);
584 } else {
585 scsi_write_complete_noio(r, 0);
586 }
587 return;
588 }
589
590 if (r->req.sg) {
591 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
592 r->req.residual -= r->req.sg->size;
593 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
594 r->req.sg, r->sector << BDRV_SECTOR_BITS,
595 BDRV_SECTOR_SIZE,
596 sdc->dma_writev, r, scsi_dma_complete, r,
597 DMA_DIRECTION_TO_DEVICE);
598 } else {
599 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
600 r->qiov.size, BLOCK_ACCT_WRITE);
601 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
602 scsi_write_complete, r, r);
603 }
604 }
605
606 /* Return a pointer to the data buffer. */
607 static uint8_t *scsi_get_buf(SCSIRequest *req)
608 {
609 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
610
611 return (uint8_t *)r->iov.iov_base;
612 }
613
614 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
615 {
616 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
617 uint8_t page_code = req->cmd.buf[2];
618 int start, buflen = 0;
619
620 outbuf[buflen++] = s->qdev.type & 0x1f;
621 outbuf[buflen++] = page_code;
622 outbuf[buflen++] = 0x00;
623 outbuf[buflen++] = 0x00;
624 start = buflen;
625
626 switch (page_code) {
627 case 0x00: /* Supported page codes, mandatory */
628 {
629 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
630 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
631 if (s->serial) {
632 outbuf[buflen++] = 0x80; /* unit serial number */
633 }
634 outbuf[buflen++] = 0x83; /* device identification */
635 if (s->qdev.type == TYPE_DISK) {
636 outbuf[buflen++] = 0xb0; /* block limits */
637 outbuf[buflen++] = 0xb1; /* block device characteristics */
638 outbuf[buflen++] = 0xb2; /* thin provisioning */
639 }
640 break;
641 }
642 case 0x80: /* Device serial number, optional */
643 {
644 int l;
645
646 if (!s->serial) {
647 trace_scsi_disk_emulate_vpd_page_80_not_supported();
648 return -1;
649 }
650
651 l = strlen(s->serial);
652 if (l > 36) {
653 l = 36;
654 }
655
656 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
657 memcpy(outbuf + buflen, s->serial, l);
658 buflen += l;
659 break;
660 }
661
662 case 0x83: /* Device identification page, mandatory */
663 {
664 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
665
666 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
667
668 if (id_len) {
669 outbuf[buflen++] = 0x2; /* ASCII */
670 outbuf[buflen++] = 0; /* not officially assigned */
671 outbuf[buflen++] = 0; /* reserved */
672 outbuf[buflen++] = id_len; /* length of data following */
673 memcpy(outbuf + buflen, s->device_id, id_len);
674 buflen += id_len;
675 }
676
677 if (s->qdev.wwn) {
678 outbuf[buflen++] = 0x1; /* Binary */
679 outbuf[buflen++] = 0x3; /* NAA */
680 outbuf[buflen++] = 0; /* reserved */
681 outbuf[buflen++] = 8;
682 stq_be_p(&outbuf[buflen], s->qdev.wwn);
683 buflen += 8;
684 }
685
686 if (s->qdev.port_wwn) {
687 outbuf[buflen++] = 0x61; /* SAS / Binary */
688 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
689 outbuf[buflen++] = 0; /* reserved */
690 outbuf[buflen++] = 8;
691 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
692 buflen += 8;
693 }
694
695 if (s->port_index) {
696 outbuf[buflen++] = 0x61; /* SAS / Binary */
697
698 /* PIV/Target port/relative target port */
699 outbuf[buflen++] = 0x94;
700
701 outbuf[buflen++] = 0; /* reserved */
702 outbuf[buflen++] = 4;
703 stw_be_p(&outbuf[buflen + 2], s->port_index);
704 buflen += 4;
705 }
706 break;
707 }
708 case 0xb0: /* block limits */
709 {
710 SCSIBlockLimits bl = {};
711
712 if (s->qdev.type == TYPE_ROM) {
713 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
714 return -1;
715 }
716 bl.wsnz = 1;
717 bl.unmap_sectors =
718 s->qdev.conf.discard_granularity / s->qdev.blocksize;
719 bl.min_io_size =
720 s->qdev.conf.min_io_size / s->qdev.blocksize;
721 bl.opt_io_size =
722 s->qdev.conf.opt_io_size / s->qdev.blocksize;
723 bl.max_unmap_sectors =
724 s->max_unmap_size / s->qdev.blocksize;
725 bl.max_io_sectors =
726 s->max_io_size / s->qdev.blocksize;
727 /* 255 descriptors fit in 4 KiB with an 8-byte header */
728 bl.max_unmap_descr = 255;
729
730 if (s->qdev.type == TYPE_DISK) {
731 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
732 int max_io_sectors_blk =
733 max_transfer_blk / s->qdev.blocksize;
734
735 bl.max_io_sectors =
736 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
737 }
738 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
739 break;
740 }
741 case 0xb1: /* block device characteristics */
742 {
743 buflen = 0x40;
744 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
745 outbuf[5] = s->rotation_rate & 0xff;
746 outbuf[6] = 0; /* PRODUCT TYPE */
747 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
748 outbuf[8] = 0; /* VBULS */
749 break;
750 }
751 case 0xb2: /* thin provisioning */
752 {
753 buflen = 8;
754 outbuf[4] = 0;
755 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
756 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
757 outbuf[7] = 0;
758 break;
759 }
760 default:
761 return -1;
762 }
763 /* done with EVPD */
764 assert(buflen - start <= 255);
765 outbuf[start - 1] = buflen - start;
766 return buflen;
767 }
768
769 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
770 {
771 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
772 int buflen = 0;
773
774 if (req->cmd.buf[1] & 0x1) {
775 /* Vital product data */
776 return scsi_disk_emulate_vpd_page(req, outbuf);
777 }
778
779 /* Standard INQUIRY data */
780 if (req->cmd.buf[2] != 0) {
781 return -1;
782 }
783
784 /* PAGE CODE == 0 */
785 buflen = req->cmd.xfer;
786 if (buflen > SCSI_MAX_INQUIRY_LEN) {
787 buflen = SCSI_MAX_INQUIRY_LEN;
788 }
789
790 outbuf[0] = s->qdev.type & 0x1f;
791 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
792
793 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
794 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
795
796 memset(&outbuf[32], 0, 4);
797 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
798 /*
799 * We claim conformance to SPC-3, which is required for guests
800 * to ask for modern features like READ CAPACITY(16) or the
801 * block characteristics VPD page by default. Not all of SPC-3
802 * is actually implemented, but we're good enough.
803 */
804 outbuf[2] = s->qdev.default_scsi_version;
805 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
806
807 if (buflen > 36) {
808 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
809 } else {
810 /* If the allocation length of CDB is too small,
811 the additional length is not adjusted */
812 outbuf[4] = 36 - 5;
813 }
814
815 /* Sync data transfer and TCQ. */
816 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
817 return buflen;
818 }
819
820 static inline bool media_is_dvd(SCSIDiskState *s)
821 {
822 uint64_t nb_sectors;
823 if (s->qdev.type != TYPE_ROM) {
824 return false;
825 }
826 if (!blk_is_available(s->qdev.conf.blk)) {
827 return false;
828 }
829 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
830 return nb_sectors > CD_MAX_SECTORS;
831 }
832
833 static inline bool media_is_cd(SCSIDiskState *s)
834 {
835 uint64_t nb_sectors;
836 if (s->qdev.type != TYPE_ROM) {
837 return false;
838 }
839 if (!blk_is_available(s->qdev.conf.blk)) {
840 return false;
841 }
842 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
843 return nb_sectors <= CD_MAX_SECTORS;
844 }
845
846 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
847 uint8_t *outbuf)
848 {
849 uint8_t type = r->req.cmd.buf[1] & 7;
850
851 if (s->qdev.type != TYPE_ROM) {
852 return -1;
853 }
854
855 /* Types 1/2 are only defined for Blu-Ray. */
856 if (type != 0) {
857 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
858 return -1;
859 }
860
861 memset(outbuf, 0, 34);
862 outbuf[1] = 32;
863 outbuf[2] = 0xe; /* last session complete, disc finalized */
864 outbuf[3] = 1; /* first track on disc */
865 outbuf[4] = 1; /* # of sessions */
866 outbuf[5] = 1; /* first track of last session */
867 outbuf[6] = 1; /* last track of last session */
868 outbuf[7] = 0x20; /* unrestricted use */
869 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
870 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
871 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
872 /* 24-31: disc bar code */
873 /* 32: disc application code */
874 /* 33: number of OPC tables */
875
876 return 34;
877 }
878
879 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
880 uint8_t *outbuf)
881 {
882 static const int rds_caps_size[5] = {
883 [0] = 2048 + 4,
884 [1] = 4 + 4,
885 [3] = 188 + 4,
886 [4] = 2048 + 4,
887 };
888
889 uint8_t media = r->req.cmd.buf[1];
890 uint8_t layer = r->req.cmd.buf[6];
891 uint8_t format = r->req.cmd.buf[7];
892 int size = -1;
893
894 if (s->qdev.type != TYPE_ROM) {
895 return -1;
896 }
897 if (media != 0) {
898 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
899 return -1;
900 }
901
902 if (format != 0xff) {
903 if (!blk_is_available(s->qdev.conf.blk)) {
904 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
905 return -1;
906 }
907 if (media_is_cd(s)) {
908 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
909 return -1;
910 }
911 if (format >= ARRAY_SIZE(rds_caps_size)) {
912 return -1;
913 }
914 size = rds_caps_size[format];
915 memset(outbuf, 0, size);
916 }
917
918 switch (format) {
919 case 0x00: {
920 /* Physical format information */
921 uint64_t nb_sectors;
922 if (layer != 0) {
923 goto fail;
924 }
925 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
926
927 outbuf[4] = 1; /* DVD-ROM, part version 1 */
928 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
929 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
930 outbuf[7] = 0; /* default densities */
931
932 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
933 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
934 break;
935 }
936
937 case 0x01: /* DVD copyright information, all zeros */
938 break;
939
940 case 0x03: /* BCA information - invalid field for no BCA info */
941 return -1;
942
943 case 0x04: /* DVD disc manufacturing information, all zeros */
944 break;
945
946 case 0xff: { /* List capabilities */
947 int i;
948 size = 4;
949 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
950 if (!rds_caps_size[i]) {
951 continue;
952 }
953 outbuf[size] = i;
954 outbuf[size + 1] = 0x40; /* Not writable, readable */
955 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
956 size += 4;
957 }
958 break;
959 }
960
961 default:
962 return -1;
963 }
964
965 /* Size of buffer, not including 2 byte size field */
966 stw_be_p(outbuf, size - 2);
967 return size;
968
969 fail:
970 return -1;
971 }
972
973 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
974 {
975 uint8_t event_code, media_status;
976
977 media_status = 0;
978 if (s->tray_open) {
979 media_status = MS_TRAY_OPEN;
980 } else if (blk_is_inserted(s->qdev.conf.blk)) {
981 media_status = MS_MEDIA_PRESENT;
982 }
983
984 /* Event notification descriptor */
985 event_code = MEC_NO_CHANGE;
986 if (media_status != MS_TRAY_OPEN) {
987 if (s->media_event) {
988 event_code = MEC_NEW_MEDIA;
989 s->media_event = false;
990 } else if (s->eject_request) {
991 event_code = MEC_EJECT_REQUESTED;
992 s->eject_request = false;
993 }
994 }
995
996 outbuf[0] = event_code;
997 outbuf[1] = media_status;
998
999 /* These fields are reserved, just clear them. */
1000 outbuf[2] = 0;
1001 outbuf[3] = 0;
1002 return 4;
1003 }
1004
1005 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
1006 uint8_t *outbuf)
1007 {
1008 int size;
1009 uint8_t *buf = r->req.cmd.buf;
1010 uint8_t notification_class_request = buf[4];
1011 if (s->qdev.type != TYPE_ROM) {
1012 return -1;
1013 }
1014 if ((buf[1] & 1) == 0) {
1015 /* asynchronous */
1016 return -1;
1017 }
1018
1019 size = 4;
1020 outbuf[0] = outbuf[1] = 0;
1021 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1022 if (notification_class_request & (1 << GESN_MEDIA)) {
1023 outbuf[2] = GESN_MEDIA;
1024 size += scsi_event_status_media(s, &outbuf[size]);
1025 } else {
1026 outbuf[2] = 0x80;
1027 }
1028 stw_be_p(outbuf, size - 4);
1029 return size;
1030 }
1031
1032 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1033 {
1034 int current;
1035
1036 if (s->qdev.type != TYPE_ROM) {
1037 return -1;
1038 }
1039
1040 if (media_is_dvd(s)) {
1041 current = MMC_PROFILE_DVD_ROM;
1042 } else if (media_is_cd(s)) {
1043 current = MMC_PROFILE_CD_ROM;
1044 } else {
1045 current = MMC_PROFILE_NONE;
1046 }
1047
1048 memset(outbuf, 0, 40);
1049 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1050 stw_be_p(&outbuf[6], current);
1051 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1052 outbuf[10] = 0x03; /* persistent, current */
1053 outbuf[11] = 8; /* two profiles */
1054 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1055 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1056 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1057 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1058 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1059 stw_be_p(&outbuf[20], 1);
1060 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1061 outbuf[23] = 8;
1062 stl_be_p(&outbuf[24], 1); /* SCSI */
1063 outbuf[28] = 1; /* DBE = 1, mandatory */
1064 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1065 stw_be_p(&outbuf[32], 3);
1066 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1067 outbuf[35] = 4;
1068 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1069 /* TODO: Random readable, CD read, DVD read, drive serial number,
1070 power management */
1071 return 40;
1072 }
1073
1074 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1075 {
1076 if (s->qdev.type != TYPE_ROM) {
1077 return -1;
1078 }
1079 memset(outbuf, 0, 8);
1080 outbuf[5] = 1; /* CD-ROM */
1081 return 8;
1082 }
1083
1084 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1085 int page_control)
1086 {
1087 static const int mode_sense_valid[0x3f] = {
1088 [MODE_PAGE_VENDOR_SPECIFIC] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1089 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1090 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1091 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1092 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1093 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1094 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1095 [MODE_PAGE_APPLE_VENDOR] = (1 << TYPE_ROM),
1096 };
1097
1098 uint8_t *p = *p_outbuf + 2;
1099 int length;
1100
1101 assert(page < ARRAY_SIZE(mode_sense_valid));
1102 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1103 return -1;
1104 }
1105
1106 /*
1107 * If Changeable Values are requested, a mask denoting those mode parameters
1108 * that are changeable shall be returned. As we currently don't support
1109 * parameter changes via MODE_SELECT all bits are returned set to zero.
1110 * The buffer was already menset to zero by the caller of this function.
1111 *
1112 * The offsets here are off by two compared to the descriptions in the
1113 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1114 * but it is done so that offsets are consistent within our implementation
1115 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1116 * 2-byte and 4-byte headers.
1117 */
1118 switch (page) {
1119 case MODE_PAGE_HD_GEOMETRY:
1120 length = 0x16;
1121 if (page_control == 1) { /* Changeable Values */
1122 break;
1123 }
1124 /* if a geometry hint is available, use it */
1125 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1126 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1127 p[2] = s->qdev.conf.cyls & 0xff;
1128 p[3] = s->qdev.conf.heads & 0xff;
1129 /* Write precomp start cylinder, disabled */
1130 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1131 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1132 p[6] = s->qdev.conf.cyls & 0xff;
1133 /* Reduced current start cylinder, disabled */
1134 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1135 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1136 p[9] = s->qdev.conf.cyls & 0xff;
1137 /* Device step rate [ns], 200ns */
1138 p[10] = 0;
1139 p[11] = 200;
1140 /* Landing zone cylinder */
1141 p[12] = 0xff;
1142 p[13] = 0xff;
1143 p[14] = 0xff;
1144 /* Medium rotation rate [rpm], 5400 rpm */
1145 p[18] = (5400 >> 8) & 0xff;
1146 p[19] = 5400 & 0xff;
1147 break;
1148
1149 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1150 length = 0x1e;
1151 if (page_control == 1) { /* Changeable Values */
1152 break;
1153 }
1154 /* Transfer rate [kbit/s], 5Mbit/s */
1155 p[0] = 5000 >> 8;
1156 p[1] = 5000 & 0xff;
1157 /* if a geometry hint is available, use it */
1158 p[2] = s->qdev.conf.heads & 0xff;
1159 p[3] = s->qdev.conf.secs & 0xff;
1160 p[4] = s->qdev.blocksize >> 8;
1161 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1162 p[7] = s->qdev.conf.cyls & 0xff;
1163 /* Write precomp start cylinder, disabled */
1164 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1165 p[9] = s->qdev.conf.cyls & 0xff;
1166 /* Reduced current start cylinder, disabled */
1167 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1168 p[11] = s->qdev.conf.cyls & 0xff;
1169 /* Device step rate [100us], 100us */
1170 p[12] = 0;
1171 p[13] = 1;
1172 /* Device step pulse width [us], 1us */
1173 p[14] = 1;
1174 /* Device head settle delay [100us], 100us */
1175 p[15] = 0;
1176 p[16] = 1;
1177 /* Motor on delay [0.1s], 0.1s */
1178 p[17] = 1;
1179 /* Motor off delay [0.1s], 0.1s */
1180 p[18] = 1;
1181 /* Medium rotation rate [rpm], 5400 rpm */
1182 p[26] = (5400 >> 8) & 0xff;
1183 p[27] = 5400 & 0xff;
1184 break;
1185
1186 case MODE_PAGE_CACHING:
1187 length = 0x12;
1188 if (page_control == 1 || /* Changeable Values */
1189 blk_enable_write_cache(s->qdev.conf.blk)) {
1190 p[0] = 4; /* WCE */
1191 }
1192 break;
1193
1194 case MODE_PAGE_R_W_ERROR:
1195 length = 10;
1196 if (page_control == 1) { /* Changeable Values */
1197 if (s->qdev.type == TYPE_ROM) {
1198 /* Automatic Write Reallocation Enabled */
1199 p[0] = 0x80;
1200 }
1201 break;
1202 }
1203 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1204 if (s->qdev.type == TYPE_ROM) {
1205 p[1] = 0x20; /* Read Retry Count */
1206 }
1207 break;
1208
1209 case MODE_PAGE_AUDIO_CTL:
1210 length = 14;
1211 break;
1212
1213 case MODE_PAGE_CAPABILITIES:
1214 length = 0x14;
1215 if (page_control == 1) { /* Changeable Values */
1216 break;
1217 }
1218
1219 p[0] = 0x3b; /* CD-R & CD-RW read */
1220 p[1] = 0; /* Writing not supported */
1221 p[2] = 0x7f; /* Audio, composite, digital out,
1222 mode 2 form 1&2, multi session */
1223 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1224 RW corrected, C2 errors, ISRC,
1225 UPC, Bar code */
1226 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1227 /* Locking supported, jumper present, eject, tray */
1228 p[5] = 0; /* no volume & mute control, no
1229 changer */
1230 p[6] = (50 * 176) >> 8; /* 50x read speed */
1231 p[7] = (50 * 176) & 0xff;
1232 p[8] = 2 >> 8; /* Two volume levels */
1233 p[9] = 2 & 0xff;
1234 p[10] = 2048 >> 8; /* 2M buffer */
1235 p[11] = 2048 & 0xff;
1236 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1237 p[13] = (16 * 176) & 0xff;
1238 p[16] = (16 * 176) >> 8; /* 16x write speed */
1239 p[17] = (16 * 176) & 0xff;
1240 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1241 p[19] = (16 * 176) & 0xff;
1242 break;
1243
1244 case MODE_PAGE_APPLE_VENDOR:
1245 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR)) {
1246 length = 0x1e;
1247 if (page_control == 1) { /* Changeable Values */
1248 break;
1249 }
1250
1251 memset(p, 0, length);
1252 strcpy((char *)p + 8, "APPLE COMPUTER, INC ");
1253 break;
1254 } else {
1255 return -1;
1256 }
1257
1258 case MODE_PAGE_VENDOR_SPECIFIC:
1259 if (s->qdev.type == TYPE_DISK && (s->quirks &
1260 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1261 length = 0x2;
1262 if (page_control == 1) { /* Changeable Values */
1263 p[0] = 0xff;
1264 p[1] = 0xff;
1265 break;
1266 }
1267 p[0] = 0;
1268 p[1] = 0;
1269 break;
1270 } else {
1271 return -1;
1272 }
1273
1274 default:
1275 return -1;
1276 }
1277
1278 assert(length < 256);
1279 (*p_outbuf)[0] = page;
1280 (*p_outbuf)[1] = length;
1281 *p_outbuf += length + 2;
1282 return length + 2;
1283 }
1284
1285 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1286 {
1287 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1288 uint64_t nb_sectors;
1289 bool dbd;
1290 int page, buflen, ret, page_control;
1291 uint8_t *p;
1292 uint8_t dev_specific_param;
1293
1294 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1295 page = r->req.cmd.buf[2] & 0x3f;
1296 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1297
1298 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1299 10, page, r->req.cmd.xfer, page_control);
1300 memset(outbuf, 0, r->req.cmd.xfer);
1301 p = outbuf;
1302
1303 if (s->qdev.type == TYPE_DISK) {
1304 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1305 if (!blk_is_writable(s->qdev.conf.blk)) {
1306 dev_specific_param |= 0x80; /* Readonly. */
1307 }
1308 } else {
1309 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD)) {
1310 /* Use DBD from the request... */
1311 dev_specific_param = 0x00;
1312
1313 /*
1314 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR
1315 * which should never return a block descriptor even though DBD is
1316 * not set, otherwise CDROM detection fails in MacOS
1317 */
1318 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR) &&
1319 page == MODE_PAGE_APPLE_VENDOR) {
1320 dbd = true;
1321 }
1322 } else {
1323 /*
1324 * MMC prescribes that CD/DVD drives have no block descriptors,
1325 * and defines no device-specific parameter.
1326 */
1327 dev_specific_param = 0x00;
1328 dbd = true;
1329 }
1330 }
1331
1332 if (r->req.cmd.buf[0] == MODE_SENSE) {
1333 p[1] = 0; /* Default media type. */
1334 p[2] = dev_specific_param;
1335 p[3] = 0; /* Block descriptor length. */
1336 p += 4;
1337 } else { /* MODE_SENSE_10 */
1338 p[2] = 0; /* Default media type. */
1339 p[3] = dev_specific_param;
1340 p[6] = p[7] = 0; /* Block descriptor length. */
1341 p += 8;
1342 }
1343
1344 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1345 if (!dbd && nb_sectors) {
1346 if (r->req.cmd.buf[0] == MODE_SENSE) {
1347 outbuf[3] = 8; /* Block descriptor length */
1348 } else { /* MODE_SENSE_10 */
1349 outbuf[7] = 8; /* Block descriptor length */
1350 }
1351 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1352 if (nb_sectors > 0xffffff) {
1353 nb_sectors = 0;
1354 }
1355 p[0] = 0; /* media density code */
1356 p[1] = (nb_sectors >> 16) & 0xff;
1357 p[2] = (nb_sectors >> 8) & 0xff;
1358 p[3] = nb_sectors & 0xff;
1359 p[4] = 0; /* reserved */
1360 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1361 p[6] = s->qdev.blocksize >> 8;
1362 p[7] = 0;
1363 p += 8;
1364 }
1365
1366 if (page_control == 3) {
1367 /* Saved Values */
1368 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1369 return -1;
1370 }
1371
1372 if (page == 0x3f) {
1373 for (page = 0; page <= 0x3e; page++) {
1374 mode_sense_page(s, page, &p, page_control);
1375 }
1376 } else {
1377 ret = mode_sense_page(s, page, &p, page_control);
1378 if (ret == -1) {
1379 return -1;
1380 }
1381 }
1382
1383 buflen = p - outbuf;
1384 /*
1385 * The mode data length field specifies the length in bytes of the
1386 * following data that is available to be transferred. The mode data
1387 * length does not include itself.
1388 */
1389 if (r->req.cmd.buf[0] == MODE_SENSE) {
1390 outbuf[0] = buflen - 1;
1391 } else { /* MODE_SENSE_10 */
1392 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1393 outbuf[1] = (buflen - 2) & 0xff;
1394 }
1395 return buflen;
1396 }
1397
1398 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1399 {
1400 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1401 int start_track, format, msf, toclen;
1402 uint64_t nb_sectors;
1403
1404 msf = req->cmd.buf[1] & 2;
1405 format = req->cmd.buf[2] & 0xf;
1406 start_track = req->cmd.buf[6];
1407 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1408 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1409 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1410 switch (format) {
1411 case 0:
1412 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1413 break;
1414 case 1:
1415 /* multi session : only a single session defined */
1416 toclen = 12;
1417 memset(outbuf, 0, 12);
1418 outbuf[1] = 0x0a;
1419 outbuf[2] = 0x01;
1420 outbuf[3] = 0x01;
1421 break;
1422 case 2:
1423 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1424 break;
1425 default:
1426 return -1;
1427 }
1428 return toclen;
1429 }
1430
1431 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1432 {
1433 SCSIRequest *req = &r->req;
1434 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1435 bool start = req->cmd.buf[4] & 1;
1436 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1437 int pwrcnd = req->cmd.buf[4] & 0xf0;
1438
1439 if (pwrcnd) {
1440 /* eject/load only happens for power condition == 0 */
1441 return 0;
1442 }
1443
1444 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1445 if (!start && !s->tray_open && s->tray_locked) {
1446 scsi_check_condition(r,
1447 blk_is_inserted(s->qdev.conf.blk)
1448 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1449 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1450 return -1;
1451 }
1452
1453 if (s->tray_open != !start) {
1454 blk_eject(s->qdev.conf.blk, !start);
1455 s->tray_open = !start;
1456 }
1457 }
1458 return 0;
1459 }
1460
1461 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1462 {
1463 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1464 int buflen = r->iov.iov_len;
1465
1466 if (buflen) {
1467 trace_scsi_disk_emulate_read_data(buflen);
1468 r->iov.iov_len = 0;
1469 r->started = true;
1470 scsi_req_data(&r->req, buflen);
1471 return;
1472 }
1473
1474 /* This also clears the sense buffer for REQUEST SENSE. */
1475 scsi_req_complete(&r->req, GOOD);
1476 }
1477
1478 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1479 uint8_t *inbuf, int inlen)
1480 {
1481 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1482 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1483 uint8_t *p;
1484 int len, expected_len, changeable_len, i;
1485
1486 /* The input buffer does not include the page header, so it is
1487 * off by 2 bytes.
1488 */
1489 expected_len = inlen + 2;
1490 if (expected_len > SCSI_MAX_MODE_LEN) {
1491 return -1;
1492 }
1493
1494 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1495 if (page == MODE_PAGE_ALLS) {
1496 return -1;
1497 }
1498
1499 p = mode_current;
1500 memset(mode_current, 0, inlen + 2);
1501 len = mode_sense_page(s, page, &p, 0);
1502 if (len < 0 || len != expected_len) {
1503 return -1;
1504 }
1505
1506 p = mode_changeable;
1507 memset(mode_changeable, 0, inlen + 2);
1508 changeable_len = mode_sense_page(s, page, &p, 1);
1509 assert(changeable_len == len);
1510
1511 /* Check that unchangeable bits are the same as what MODE SENSE
1512 * would return.
1513 */
1514 for (i = 2; i < len; i++) {
1515 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1516 return -1;
1517 }
1518 }
1519 return 0;
1520 }
1521
1522 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1523 {
1524 switch (page) {
1525 case MODE_PAGE_CACHING:
1526 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1527 break;
1528
1529 default:
1530 break;
1531 }
1532 }
1533
1534 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1535 {
1536 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1537
1538 while (len > 0) {
1539 int page, subpage, page_len;
1540
1541 /* Parse both possible formats for the mode page headers. */
1542 page = p[0] & 0x3f;
1543 if (p[0] & 0x40) {
1544 if (len < 4) {
1545 goto invalid_param_len;
1546 }
1547 subpage = p[1];
1548 page_len = lduw_be_p(&p[2]);
1549 p += 4;
1550 len -= 4;
1551 } else {
1552 if (len < 2) {
1553 goto invalid_param_len;
1554 }
1555 subpage = 0;
1556 page_len = p[1];
1557 p += 2;
1558 len -= 2;
1559 }
1560
1561 if (subpage) {
1562 goto invalid_param;
1563 }
1564 if (page_len > len) {
1565 if (!(s->quirks & SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED)) {
1566 goto invalid_param_len;
1567 }
1568 trace_scsi_disk_mode_select_page_truncated(page, page_len, len);
1569 }
1570
1571 if (!change) {
1572 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1573 goto invalid_param;
1574 }
1575 } else {
1576 scsi_disk_apply_mode_select(s, page, p);
1577 }
1578
1579 p += page_len;
1580 len -= page_len;
1581 }
1582 return 0;
1583
1584 invalid_param:
1585 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1586 return -1;
1587
1588 invalid_param_len:
1589 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1590 return -1;
1591 }
1592
1593 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1594 {
1595 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1596 uint8_t *p = inbuf;
1597 int cmd = r->req.cmd.buf[0];
1598 int len = r->req.cmd.xfer;
1599 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1600 int bd_len, bs;
1601 int pass;
1602
1603 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1604 if (!(s->quirks &
1605 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1606 /* We only support PF=1, SP=0. */
1607 goto invalid_field;
1608 }
1609 }
1610
1611 if (len < hdr_len) {
1612 goto invalid_param_len;
1613 }
1614
1615 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1616 len -= hdr_len;
1617 p += hdr_len;
1618 if (len < bd_len) {
1619 goto invalid_param_len;
1620 }
1621 if (bd_len != 0 && bd_len != 8) {
1622 goto invalid_param;
1623 }
1624
1625 /* Allow changing the block size */
1626 if (bd_len) {
1627 bs = p[5] << 16 | p[6] << 8 | p[7];
1628
1629 /*
1630 * Since the existing code only checks/updates bits 8-15 of the block
1631 * size, restrict ourselves to the same requirement for now to ensure
1632 * that a block size set by a block descriptor and then read back by
1633 * a subsequent SCSI command will be the same
1634 */
1635 if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) {
1636 s->qdev.blocksize = bs;
1637 trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
1638 }
1639 }
1640
1641 len -= bd_len;
1642 p += bd_len;
1643
1644 /* Ensure no change is made if there is an error! */
1645 for (pass = 0; pass < 2; pass++) {
1646 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1647 assert(pass == 0);
1648 return;
1649 }
1650 }
1651 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1652 /* The request is used as the AIO opaque value, so add a ref. */
1653 scsi_req_ref(&r->req);
1654 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1655 BLOCK_ACCT_FLUSH);
1656 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1657 return;
1658 }
1659
1660 scsi_req_complete(&r->req, GOOD);
1661 return;
1662
1663 invalid_param:
1664 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1665 return;
1666
1667 invalid_param_len:
1668 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1669 return;
1670
1671 invalid_field:
1672 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1673 }
1674
1675 /* sector_num and nb_sectors expected to be in qdev blocksize */
1676 static inline bool check_lba_range(SCSIDiskState *s,
1677 uint64_t sector_num, uint32_t nb_sectors)
1678 {
1679 /*
1680 * The first line tests that no overflow happens when computing the last
1681 * sector. The second line tests that the last accessed sector is in
1682 * range.
1683 *
1684 * Careful, the computations should not underflow for nb_sectors == 0,
1685 * and a 0-block read to the first LBA beyond the end of device is
1686 * valid.
1687 */
1688 return (sector_num <= sector_num + nb_sectors &&
1689 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1690 }
1691
1692 typedef struct UnmapCBData {
1693 SCSIDiskReq *r;
1694 uint8_t *inbuf;
1695 int count;
1696 } UnmapCBData;
1697
1698 static void scsi_unmap_complete(void *opaque, int ret);
1699
1700 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1701 {
1702 SCSIDiskReq *r = data->r;
1703 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1704
1705 assert(r->req.aiocb == NULL);
1706
1707 if (data->count > 0) {
1708 uint64_t sector_num = ldq_be_p(&data->inbuf[0]);
1709 uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1710 r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1711 r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1712
1713 if (!check_lba_range(s, sector_num, nb_sectors)) {
1714 block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1715 BLOCK_ACCT_UNMAP);
1716 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1717 goto done;
1718 }
1719
1720 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1721 r->sector_count * BDRV_SECTOR_SIZE,
1722 BLOCK_ACCT_UNMAP);
1723
1724 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1725 r->sector * BDRV_SECTOR_SIZE,
1726 r->sector_count * BDRV_SECTOR_SIZE,
1727 scsi_unmap_complete, data);
1728 data->count--;
1729 data->inbuf += 16;
1730 return;
1731 }
1732
1733 scsi_req_complete(&r->req, GOOD);
1734
1735 done:
1736 scsi_req_unref(&r->req);
1737 g_free(data);
1738 }
1739
1740 static void scsi_unmap_complete(void *opaque, int ret)
1741 {
1742 UnmapCBData *data = opaque;
1743 SCSIDiskReq *r = data->r;
1744 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1745
1746 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1747
1748 assert(r->req.aiocb != NULL);
1749 r->req.aiocb = NULL;
1750
1751 if (scsi_disk_req_check_error(r, ret, true)) {
1752 scsi_req_unref(&r->req);
1753 g_free(data);
1754 } else {
1755 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1756 scsi_unmap_complete_noio(data, ret);
1757 }
1758 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1759 }
1760
1761 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1762 {
1763 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1764 uint8_t *p = inbuf;
1765 int len = r->req.cmd.xfer;
1766 UnmapCBData *data;
1767
1768 /* Reject ANCHOR=1. */
1769 if (r->req.cmd.buf[1] & 0x1) {
1770 goto invalid_field;
1771 }
1772
1773 if (len < 8) {
1774 goto invalid_param_len;
1775 }
1776 if (len < lduw_be_p(&p[0]) + 2) {
1777 goto invalid_param_len;
1778 }
1779 if (len < lduw_be_p(&p[2]) + 8) {
1780 goto invalid_param_len;
1781 }
1782 if (lduw_be_p(&p[2]) & 15) {
1783 goto invalid_param_len;
1784 }
1785
1786 if (!blk_is_writable(s->qdev.conf.blk)) {
1787 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1788 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1789 return;
1790 }
1791
1792 data = g_new0(UnmapCBData, 1);
1793 data->r = r;
1794 data->inbuf = &p[8];
1795 data->count = lduw_be_p(&p[2]) >> 4;
1796
1797 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1798 scsi_req_ref(&r->req);
1799 scsi_unmap_complete_noio(data, 0);
1800 return;
1801
1802 invalid_param_len:
1803 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1804 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1805 return;
1806
1807 invalid_field:
1808 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1809 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1810 }
1811
1812 typedef struct WriteSameCBData {
1813 SCSIDiskReq *r;
1814 int64_t sector;
1815 int nb_sectors;
1816 QEMUIOVector qiov;
1817 struct iovec iov;
1818 } WriteSameCBData;
1819
1820 static void scsi_write_same_complete(void *opaque, int ret)
1821 {
1822 WriteSameCBData *data = opaque;
1823 SCSIDiskReq *r = data->r;
1824 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1825
1826 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1827
1828 assert(r->req.aiocb != NULL);
1829 r->req.aiocb = NULL;
1830
1831 if (scsi_disk_req_check_error(r, ret, true)) {
1832 goto done;
1833 }
1834
1835 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1836
1837 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
1838 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
1839 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1840 data->iov.iov_len);
1841 if (data->iov.iov_len) {
1842 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1843 data->iov.iov_len, BLOCK_ACCT_WRITE);
1844 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1845 * where final qiov may need smaller size */
1846 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1847 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1848 data->sector << BDRV_SECTOR_BITS,
1849 &data->qiov, 0,
1850 scsi_write_same_complete, data);
1851 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1852 return;
1853 }
1854
1855 scsi_req_complete(&r->req, GOOD);
1856
1857 done:
1858 scsi_req_unref(&r->req);
1859 qemu_vfree(data->iov.iov_base);
1860 g_free(data);
1861 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1862 }
1863
1864 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1865 {
1866 SCSIRequest *req = &r->req;
1867 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1868 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1869 WriteSameCBData *data;
1870 uint8_t *buf;
1871 int i, l;
1872
1873 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1874 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1875 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1876 return;
1877 }
1878
1879 if (!blk_is_writable(s->qdev.conf.blk)) {
1880 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1881 return;
1882 }
1883 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1884 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1885 return;
1886 }
1887
1888 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1889 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1890
1891 /* The request is used as the AIO opaque value, so add a ref. */
1892 scsi_req_ref(&r->req);
1893 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1894 nb_sectors * s->qdev.blocksize,
1895 BLOCK_ACCT_WRITE);
1896 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1897 r->req.cmd.lba * s->qdev.blocksize,
1898 nb_sectors * s->qdev.blocksize,
1899 flags, scsi_aio_complete, r);
1900 return;
1901 }
1902
1903 data = g_new0(WriteSameCBData, 1);
1904 data->r = r;
1905 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1906 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1907 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1908 SCSI_WRITE_SAME_MAX);
1909 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1910 data->iov.iov_len);
1911 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1912
1913 for (i = 0; i < data->iov.iov_len; i += l) {
1914 l = MIN(s->qdev.blocksize, data->iov.iov_len - i);
1915 memcpy(&buf[i], inbuf, l);
1916 }
1917
1918 scsi_req_ref(&r->req);
1919 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1920 data->iov.iov_len, BLOCK_ACCT_WRITE);
1921 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1922 data->sector << BDRV_SECTOR_BITS,
1923 &data->qiov, 0,
1924 scsi_write_same_complete, data);
1925 }
1926
1927 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1928 {
1929 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1930
1931 if (r->iov.iov_len) {
1932 int buflen = r->iov.iov_len;
1933 trace_scsi_disk_emulate_write_data(buflen);
1934 r->iov.iov_len = 0;
1935 scsi_req_data(&r->req, buflen);
1936 return;
1937 }
1938
1939 switch (req->cmd.buf[0]) {
1940 case MODE_SELECT:
1941 case MODE_SELECT_10:
1942 /* This also clears the sense buffer for REQUEST SENSE. */
1943 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1944 break;
1945
1946 case UNMAP:
1947 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1948 break;
1949
1950 case VERIFY_10:
1951 case VERIFY_12:
1952 case VERIFY_16:
1953 if (r->req.status == -1) {
1954 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1955 }
1956 break;
1957
1958 case WRITE_SAME_10:
1959 case WRITE_SAME_16:
1960 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1961 break;
1962
1963 default:
1964 abort();
1965 }
1966 }
1967
1968 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1969 {
1970 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1971 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1972 uint64_t nb_sectors;
1973 uint8_t *outbuf;
1974 int buflen;
1975
1976 switch (req->cmd.buf[0]) {
1977 case INQUIRY:
1978 case MODE_SENSE:
1979 case MODE_SENSE_10:
1980 case RESERVE:
1981 case RESERVE_10:
1982 case RELEASE:
1983 case RELEASE_10:
1984 case START_STOP:
1985 case ALLOW_MEDIUM_REMOVAL:
1986 case GET_CONFIGURATION:
1987 case GET_EVENT_STATUS_NOTIFICATION:
1988 case MECHANISM_STATUS:
1989 case REQUEST_SENSE:
1990 break;
1991
1992 default:
1993 if (!blk_is_available(s->qdev.conf.blk)) {
1994 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1995 return 0;
1996 }
1997 break;
1998 }
1999
2000 /*
2001 * FIXME: we shouldn't return anything bigger than 4k, but the code
2002 * requires the buffer to be as big as req->cmd.xfer in several
2003 * places. So, do not allow CDBs with a very large ALLOCATION
2004 * LENGTH. The real fix would be to modify scsi_read_data and
2005 * dma_buf_read, so that they return data beyond the buflen
2006 * as all zeros.
2007 */
2008 if (req->cmd.xfer > 65536) {
2009 goto illegal_request;
2010 }
2011 r->buflen = MAX(4096, req->cmd.xfer);
2012
2013 if (!r->iov.iov_base) {
2014 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
2015 }
2016
2017 outbuf = r->iov.iov_base;
2018 memset(outbuf, 0, r->buflen);
2019 switch (req->cmd.buf[0]) {
2020 case TEST_UNIT_READY:
2021 assert(blk_is_available(s->qdev.conf.blk));
2022 break;
2023 case INQUIRY:
2024 buflen = scsi_disk_emulate_inquiry(req, outbuf);
2025 if (buflen < 0) {
2026 goto illegal_request;
2027 }
2028 break;
2029 case MODE_SENSE:
2030 case MODE_SENSE_10:
2031 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
2032 if (buflen < 0) {
2033 goto illegal_request;
2034 }
2035 break;
2036 case READ_TOC:
2037 buflen = scsi_disk_emulate_read_toc(req, outbuf);
2038 if (buflen < 0) {
2039 goto illegal_request;
2040 }
2041 break;
2042 case RESERVE:
2043 if (req->cmd.buf[1] & 1) {
2044 goto illegal_request;
2045 }
2046 break;
2047 case RESERVE_10:
2048 if (req->cmd.buf[1] & 3) {
2049 goto illegal_request;
2050 }
2051 break;
2052 case RELEASE:
2053 if (req->cmd.buf[1] & 1) {
2054 goto illegal_request;
2055 }
2056 break;
2057 case RELEASE_10:
2058 if (req->cmd.buf[1] & 3) {
2059 goto illegal_request;
2060 }
2061 break;
2062 case START_STOP:
2063 if (scsi_disk_emulate_start_stop(r) < 0) {
2064 return 0;
2065 }
2066 break;
2067 case ALLOW_MEDIUM_REMOVAL:
2068 s->tray_locked = req->cmd.buf[4] & 1;
2069 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
2070 break;
2071 case READ_CAPACITY_10:
2072 /* The normal LEN field for this command is zero. */
2073 memset(outbuf, 0, 8);
2074 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2075 if (!nb_sectors) {
2076 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2077 return 0;
2078 }
2079 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
2080 goto illegal_request;
2081 }
2082 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2083 /* Returned value is the address of the last sector. */
2084 nb_sectors--;
2085 /* Remember the new size for read/write sanity checking. */
2086 s->qdev.max_lba = nb_sectors;
2087 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2088 if (nb_sectors > UINT32_MAX) {
2089 nb_sectors = UINT32_MAX;
2090 }
2091 outbuf[0] = (nb_sectors >> 24) & 0xff;
2092 outbuf[1] = (nb_sectors >> 16) & 0xff;
2093 outbuf[2] = (nb_sectors >> 8) & 0xff;
2094 outbuf[3] = nb_sectors & 0xff;
2095 outbuf[4] = 0;
2096 outbuf[5] = 0;
2097 outbuf[6] = s->qdev.blocksize >> 8;
2098 outbuf[7] = 0;
2099 break;
2100 case REQUEST_SENSE:
2101 /* Just return "NO SENSE". */
2102 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2103 (req->cmd.buf[1] & 1) == 0);
2104 if (buflen < 0) {
2105 goto illegal_request;
2106 }
2107 break;
2108 case MECHANISM_STATUS:
2109 buflen = scsi_emulate_mechanism_status(s, outbuf);
2110 if (buflen < 0) {
2111 goto illegal_request;
2112 }
2113 break;
2114 case GET_CONFIGURATION:
2115 buflen = scsi_get_configuration(s, outbuf);
2116 if (buflen < 0) {
2117 goto illegal_request;
2118 }
2119 break;
2120 case GET_EVENT_STATUS_NOTIFICATION:
2121 buflen = scsi_get_event_status_notification(s, r, outbuf);
2122 if (buflen < 0) {
2123 goto illegal_request;
2124 }
2125 break;
2126 case READ_DISC_INFORMATION:
2127 buflen = scsi_read_disc_information(s, r, outbuf);
2128 if (buflen < 0) {
2129 goto illegal_request;
2130 }
2131 break;
2132 case READ_DVD_STRUCTURE:
2133 buflen = scsi_read_dvd_structure(s, r, outbuf);
2134 if (buflen < 0) {
2135 goto illegal_request;
2136 }
2137 break;
2138 case SERVICE_ACTION_IN_16:
2139 /* Service Action In subcommands. */
2140 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2141 trace_scsi_disk_emulate_command_SAI_16();
2142 memset(outbuf, 0, req->cmd.xfer);
2143 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2144 if (!nb_sectors) {
2145 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2146 return 0;
2147 }
2148 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2149 goto illegal_request;
2150 }
2151 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2152 /* Returned value is the address of the last sector. */
2153 nb_sectors--;
2154 /* Remember the new size for read/write sanity checking. */
2155 s->qdev.max_lba = nb_sectors;
2156 outbuf[0] = (nb_sectors >> 56) & 0xff;
2157 outbuf[1] = (nb_sectors >> 48) & 0xff;
2158 outbuf[2] = (nb_sectors >> 40) & 0xff;
2159 outbuf[3] = (nb_sectors >> 32) & 0xff;
2160 outbuf[4] = (nb_sectors >> 24) & 0xff;
2161 outbuf[5] = (nb_sectors >> 16) & 0xff;
2162 outbuf[6] = (nb_sectors >> 8) & 0xff;
2163 outbuf[7] = nb_sectors & 0xff;
2164 outbuf[8] = 0;
2165 outbuf[9] = 0;
2166 outbuf[10] = s->qdev.blocksize >> 8;
2167 outbuf[11] = 0;
2168 outbuf[12] = 0;
2169 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2170
2171 /* set TPE bit if the format supports discard */
2172 if (s->qdev.conf.discard_granularity) {
2173 outbuf[14] = 0x80;
2174 }
2175
2176 /* Protection, exponent and lowest lba field left blank. */
2177 break;
2178 }
2179 trace_scsi_disk_emulate_command_SAI_unsupported();
2180 goto illegal_request;
2181 case SYNCHRONIZE_CACHE:
2182 /* The request is used as the AIO opaque value, so add a ref. */
2183 scsi_req_ref(&r->req);
2184 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2185 BLOCK_ACCT_FLUSH);
2186 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2187 return 0;
2188 case SEEK_10:
2189 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2190 if (r->req.cmd.lba > s->qdev.max_lba) {
2191 goto illegal_lba;
2192 }
2193 break;
2194 case MODE_SELECT:
2195 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2196 break;
2197 case MODE_SELECT_10:
2198 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2199 break;
2200 case UNMAP:
2201 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2202 break;
2203 case VERIFY_10:
2204 case VERIFY_12:
2205 case VERIFY_16:
2206 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2207 if (req->cmd.buf[1] & 6) {
2208 goto illegal_request;
2209 }
2210 break;
2211 case WRITE_SAME_10:
2212 case WRITE_SAME_16:
2213 trace_scsi_disk_emulate_command_WRITE_SAME(
2214 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2215 break;
2216 case FORMAT_UNIT:
2217 trace_scsi_disk_emulate_command_FORMAT_UNIT(r->req.cmd.xfer);
2218 break;
2219 default:
2220 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2221 scsi_command_name(buf[0]));
2222 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2223 return 0;
2224 }
2225 assert(!r->req.aiocb);
2226 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2227 if (r->iov.iov_len == 0) {
2228 scsi_req_complete(&r->req, GOOD);
2229 }
2230 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2231 assert(r->iov.iov_len == req->cmd.xfer);
2232 return -r->iov.iov_len;
2233 } else {
2234 return r->iov.iov_len;
2235 }
2236
2237 illegal_request:
2238 if (r->req.status == -1) {
2239 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2240 }
2241 return 0;
2242
2243 illegal_lba:
2244 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2245 return 0;
2246 }
2247
2248 /* Execute a scsi command. Returns the length of the data expected by the
2249 command. This will be Positive for data transfers from the device
2250 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2251 and zero if the command does not transfer any data. */
2252
2253 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2254 {
2255 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2256 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2257 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2258 uint32_t len;
2259 uint8_t command;
2260
2261 command = buf[0];
2262
2263 if (!blk_is_available(s->qdev.conf.blk)) {
2264 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2265 return 0;
2266 }
2267
2268 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2269 switch (command) {
2270 case READ_6:
2271 case READ_10:
2272 case READ_12:
2273 case READ_16:
2274 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2275 /* Protection information is not supported. For SCSI versions 2 and
2276 * older (as determined by snooping the guest's INQUIRY commands),
2277 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2278 */
2279 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2280 goto illegal_request;
2281 }
2282 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2283 goto illegal_lba;
2284 }
2285 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2286 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2287 break;
2288 case WRITE_6:
2289 case WRITE_10:
2290 case WRITE_12:
2291 case WRITE_16:
2292 case WRITE_VERIFY_10:
2293 case WRITE_VERIFY_12:
2294 case WRITE_VERIFY_16:
2295 if (!blk_is_writable(s->qdev.conf.blk)) {
2296 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2297 return 0;
2298 }
2299 trace_scsi_disk_dma_command_WRITE(
2300 (command & 0xe) == 0xe ? "And Verify " : "",
2301 r->req.cmd.lba, len);
2302 /* fall through */
2303 case VERIFY_10:
2304 case VERIFY_12:
2305 case VERIFY_16:
2306 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2307 * As far as DMA is concerned, we can treat it the same as a write;
2308 * scsi_block_do_sgio will send VERIFY commands.
2309 */
2310 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2311 goto illegal_request;
2312 }
2313 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2314 goto illegal_lba;
2315 }
2316 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2317 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2318 break;
2319 default:
2320 abort();
2321 illegal_request:
2322 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2323 return 0;
2324 illegal_lba:
2325 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2326 return 0;
2327 }
2328 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2329 if (r->sector_count == 0) {
2330 scsi_req_complete(&r->req, GOOD);
2331 }
2332 assert(r->iov.iov_len == 0);
2333 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2334 return -r->sector_count * BDRV_SECTOR_SIZE;
2335 } else {
2336 return r->sector_count * BDRV_SECTOR_SIZE;
2337 }
2338 }
2339
2340 static void scsi_disk_reset(DeviceState *dev)
2341 {
2342 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2343 uint64_t nb_sectors;
2344 AioContext *ctx;
2345
2346 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2347
2348 ctx = blk_get_aio_context(s->qdev.conf.blk);
2349 aio_context_acquire(ctx);
2350 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2351 aio_context_release(ctx);
2352
2353 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2354 if (nb_sectors) {
2355 nb_sectors--;
2356 }
2357 s->qdev.max_lba = nb_sectors;
2358 /* reset tray statuses */
2359 s->tray_locked = 0;
2360 s->tray_open = 0;
2361
2362 s->qdev.scsi_version = s->qdev.default_scsi_version;
2363 }
2364
2365 static void scsi_disk_resize_cb(void *opaque)
2366 {
2367 SCSIDiskState *s = opaque;
2368
2369 /* SPC lists this sense code as available only for
2370 * direct-access devices.
2371 */
2372 if (s->qdev.type == TYPE_DISK) {
2373 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2374 }
2375 }
2376
2377 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2378 {
2379 SCSIDiskState *s = opaque;
2380
2381 /*
2382 * When a CD gets changed, we have to report an ejected state and
2383 * then a loaded state to guests so that they detect tray
2384 * open/close and media change events. Guests that do not use
2385 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2386 * states rely on this behavior.
2387 *
2388 * media_changed governs the state machine used for unit attention
2389 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2390 */
2391 s->media_changed = load;
2392 s->tray_open = !load;
2393 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2394 s->media_event = true;
2395 s->eject_request = false;
2396 }
2397
2398 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2399 {
2400 SCSIDiskState *s = opaque;
2401
2402 s->eject_request = true;
2403 if (force) {
2404 s->tray_locked = false;
2405 }
2406 }
2407
2408 static bool scsi_cd_is_tray_open(void *opaque)
2409 {
2410 return ((SCSIDiskState *)opaque)->tray_open;
2411 }
2412
2413 static bool scsi_cd_is_medium_locked(void *opaque)
2414 {
2415 return ((SCSIDiskState *)opaque)->tray_locked;
2416 }
2417
2418 static const BlockDevOps scsi_disk_removable_block_ops = {
2419 .change_media_cb = scsi_cd_change_media_cb,
2420 .eject_request_cb = scsi_cd_eject_request_cb,
2421 .is_tray_open = scsi_cd_is_tray_open,
2422 .is_medium_locked = scsi_cd_is_medium_locked,
2423
2424 .resize_cb = scsi_disk_resize_cb,
2425 };
2426
2427 static const BlockDevOps scsi_disk_block_ops = {
2428 .resize_cb = scsi_disk_resize_cb,
2429 };
2430
2431 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2432 {
2433 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2434 if (s->media_changed) {
2435 s->media_changed = false;
2436 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2437 }
2438 }
2439
2440 static void scsi_realize(SCSIDevice *dev, Error **errp)
2441 {
2442 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2443 bool read_only;
2444
2445 if (!s->qdev.conf.blk) {
2446 error_setg(errp, "drive property not set");
2447 return;
2448 }
2449
2450 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2451 !blk_is_inserted(s->qdev.conf.blk)) {
2452 error_setg(errp, "Device needs media, but drive is empty");
2453 return;
2454 }
2455
2456 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2457 return;
2458 }
2459
2460 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2461 !s->qdev.hba_supports_iothread)
2462 {
2463 error_setg(errp, "HBA does not support iothreads");
2464 return;
2465 }
2466
2467 if (dev->type == TYPE_DISK) {
2468 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2469 return;
2470 }
2471 }
2472
2473 read_only = !blk_supports_write_perm(s->qdev.conf.blk);
2474 if (dev->type == TYPE_ROM) {
2475 read_only = true;
2476 }
2477
2478 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2479 dev->type == TYPE_DISK, errp)) {
2480 return;
2481 }
2482
2483 if (s->qdev.conf.discard_granularity == -1) {
2484 s->qdev.conf.discard_granularity =
2485 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2486 }
2487
2488 if (!s->version) {
2489 s->version = g_strdup(qemu_hw_version());
2490 }
2491 if (!s->vendor) {
2492 s->vendor = g_strdup("QEMU");
2493 }
2494 if (!s->device_id) {
2495 if (s->serial) {
2496 s->device_id = g_strdup_printf("%.20s", s->serial);
2497 } else {
2498 const char *str = blk_name(s->qdev.conf.blk);
2499 if (str && *str) {
2500 s->device_id = g_strdup(str);
2501 }
2502 }
2503 }
2504
2505 if (blk_is_sg(s->qdev.conf.blk)) {
2506 error_setg(errp, "unwanted /dev/sg*");
2507 return;
2508 }
2509
2510 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2511 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2512 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2513 } else {
2514 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2515 }
2516
2517 blk_iostatus_enable(s->qdev.conf.blk);
2518
2519 add_boot_device_lchs(&dev->qdev, NULL,
2520 dev->conf.lcyls,
2521 dev->conf.lheads,
2522 dev->conf.lsecs);
2523 }
2524
2525 static void scsi_unrealize(SCSIDevice *dev)
2526 {
2527 del_boot_device_lchs(&dev->qdev, NULL);
2528 }
2529
2530 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2531 {
2532 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2533 AioContext *ctx = NULL;
2534 /* can happen for devices without drive. The error message for missing
2535 * backend will be issued in scsi_realize
2536 */
2537 if (s->qdev.conf.blk) {
2538 ctx = blk_get_aio_context(s->qdev.conf.blk);
2539 aio_context_acquire(ctx);
2540 if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2541 goto out;
2542 }
2543 }
2544 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2545 s->qdev.type = TYPE_DISK;
2546 if (!s->product) {
2547 s->product = g_strdup("QEMU HARDDISK");
2548 }
2549 scsi_realize(&s->qdev, errp);
2550 out:
2551 if (ctx) {
2552 aio_context_release(ctx);
2553 }
2554 }
2555
2556 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2557 {
2558 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2559 AioContext *ctx;
2560 int ret;
2561 uint32_t blocksize = 2048;
2562
2563 if (!dev->conf.blk) {
2564 /* Anonymous BlockBackend for an empty drive. As we put it into
2565 * dev->conf, qdev takes care of detaching on unplug. */
2566 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2567 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2568 assert(ret == 0);
2569 }
2570
2571 if (dev->conf.physical_block_size != 0) {
2572 blocksize = dev->conf.physical_block_size;
2573 }
2574
2575 ctx = blk_get_aio_context(dev->conf.blk);
2576 aio_context_acquire(ctx);
2577 s->qdev.blocksize = blocksize;
2578 s->qdev.type = TYPE_ROM;
2579 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2580 if (!s->product) {
2581 s->product = g_strdup("QEMU CD-ROM");
2582 }
2583 scsi_realize(&s->qdev, errp);
2584 aio_context_release(ctx);
2585 }
2586
2587
2588 static const SCSIReqOps scsi_disk_emulate_reqops = {
2589 .size = sizeof(SCSIDiskReq),
2590 .free_req = scsi_free_request,
2591 .send_command = scsi_disk_emulate_command,
2592 .read_data = scsi_disk_emulate_read_data,
2593 .write_data = scsi_disk_emulate_write_data,
2594 .get_buf = scsi_get_buf,
2595 };
2596
2597 static const SCSIReqOps scsi_disk_dma_reqops = {
2598 .size = sizeof(SCSIDiskReq),
2599 .free_req = scsi_free_request,
2600 .send_command = scsi_disk_dma_command,
2601 .read_data = scsi_read_data,
2602 .write_data = scsi_write_data,
2603 .get_buf = scsi_get_buf,
2604 .load_request = scsi_disk_load_request,
2605 .save_request = scsi_disk_save_request,
2606 };
2607
2608 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2609 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2610 [INQUIRY] = &scsi_disk_emulate_reqops,
2611 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2612 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2613 [START_STOP] = &scsi_disk_emulate_reqops,
2614 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2615 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2616 [READ_TOC] = &scsi_disk_emulate_reqops,
2617 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2618 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2619 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2620 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2621 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2622 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2623 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2624 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2625 [SEEK_10] = &scsi_disk_emulate_reqops,
2626 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2627 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2628 [UNMAP] = &scsi_disk_emulate_reqops,
2629 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2630 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2631 [VERIFY_10] = &scsi_disk_emulate_reqops,
2632 [VERIFY_12] = &scsi_disk_emulate_reqops,
2633 [VERIFY_16] = &scsi_disk_emulate_reqops,
2634 [FORMAT_UNIT] = &scsi_disk_emulate_reqops,
2635
2636 [READ_6] = &scsi_disk_dma_reqops,
2637 [READ_10] = &scsi_disk_dma_reqops,
2638 [READ_12] = &scsi_disk_dma_reqops,
2639 [READ_16] = &scsi_disk_dma_reqops,
2640 [WRITE_6] = &scsi_disk_dma_reqops,
2641 [WRITE_10] = &scsi_disk_dma_reqops,
2642 [WRITE_12] = &scsi_disk_dma_reqops,
2643 [WRITE_16] = &scsi_disk_dma_reqops,
2644 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2645 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2646 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2647 };
2648
2649 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2650 {
2651 int i;
2652 int len = scsi_cdb_length(buf);
2653 char *line_buffer, *p;
2654
2655 assert(len > 0 && len <= 16);
2656 line_buffer = g_malloc(len * 5 + 1);
2657
2658 for (i = 0, p = line_buffer; i < len; i++) {
2659 p += sprintf(p, " 0x%02x", buf[i]);
2660 }
2661 trace_scsi_disk_new_request(lun, tag, line_buffer);
2662
2663 g_free(line_buffer);
2664 }
2665
2666 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2667 uint8_t *buf, void *hba_private)
2668 {
2669 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2670 SCSIRequest *req;
2671 const SCSIReqOps *ops;
2672 uint8_t command;
2673
2674 command = buf[0];
2675 ops = scsi_disk_reqops_dispatch[command];
2676 if (!ops) {
2677 ops = &scsi_disk_emulate_reqops;
2678 }
2679 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2680
2681 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2682 scsi_disk_new_request_dump(lun, tag, buf);
2683 }
2684
2685 return req;
2686 }
2687
2688 #ifdef __linux__
2689 static int get_device_type(SCSIDiskState *s)
2690 {
2691 uint8_t cmd[16];
2692 uint8_t buf[36];
2693 int ret;
2694
2695 memset(cmd, 0, sizeof(cmd));
2696 memset(buf, 0, sizeof(buf));
2697 cmd[0] = INQUIRY;
2698 cmd[4] = sizeof(buf);
2699
2700 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2701 buf, sizeof(buf), s->qdev.io_timeout);
2702 if (ret < 0) {
2703 return -1;
2704 }
2705 s->qdev.type = buf[0];
2706 if (buf[1] & 0x80) {
2707 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2708 }
2709 return 0;
2710 }
2711
2712 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2713 {
2714 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2715 AioContext *ctx;
2716 int sg_version;
2717 int rc;
2718
2719 if (!s->qdev.conf.blk) {
2720 error_setg(errp, "drive property not set");
2721 return;
2722 }
2723
2724 if (s->rotation_rate) {
2725 error_report_once("rotation_rate is specified for scsi-block but is "
2726 "not implemented. This option is deprecated and will "
2727 "be removed in a future version");
2728 }
2729
2730 ctx = blk_get_aio_context(s->qdev.conf.blk);
2731 aio_context_acquire(ctx);
2732
2733 /* check we are using a driver managing SG_IO (version 3 and after) */
2734 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2735 if (rc < 0) {
2736 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2737 if (rc != -EPERM) {
2738 error_append_hint(errp, "Is this a SCSI device?\n");
2739 }
2740 goto out;
2741 }
2742 if (sg_version < 30000) {
2743 error_setg(errp, "scsi generic interface too old");
2744 goto out;
2745 }
2746
2747 /* get device type from INQUIRY data */
2748 rc = get_device_type(s);
2749 if (rc < 0) {
2750 error_setg(errp, "INQUIRY failed");
2751 goto out;
2752 }
2753
2754 /* Make a guess for the block size, we'll fix it when the guest sends.
2755 * READ CAPACITY. If they don't, they likely would assume these sizes
2756 * anyway. (TODO: check in /sys).
2757 */
2758 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2759 s->qdev.blocksize = 2048;
2760 } else {
2761 s->qdev.blocksize = 512;
2762 }
2763
2764 /* Makes the scsi-block device not removable by using HMP and QMP eject
2765 * command.
2766 */
2767 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2768
2769 scsi_realize(&s->qdev, errp);
2770 scsi_generic_read_device_inquiry(&s->qdev);
2771
2772 out:
2773 aio_context_release(ctx);
2774 }
2775
2776 typedef struct SCSIBlockReq {
2777 SCSIDiskReq req;
2778 sg_io_hdr_t io_header;
2779
2780 /* Selected bytes of the original CDB, copied into our own CDB. */
2781 uint8_t cmd, cdb1, group_number;
2782
2783 /* CDB passed to SG_IO. */
2784 uint8_t cdb[16];
2785 BlockCompletionFunc *cb;
2786 void *cb_opaque;
2787 } SCSIBlockReq;
2788
2789 static void scsi_block_sgio_complete(void *opaque, int ret)
2790 {
2791 SCSIBlockReq *req = (SCSIBlockReq *)opaque;
2792 SCSIDiskReq *r = &req->req;
2793 SCSIDevice *s = r->req.dev;
2794 sg_io_hdr_t *io_hdr = &req->io_header;
2795
2796 if (ret == 0) {
2797 if (io_hdr->host_status != SCSI_HOST_OK) {
2798 scsi_req_complete_failed(&r->req, io_hdr->host_status);
2799 scsi_req_unref(&r->req);
2800 return;
2801 }
2802
2803 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
2804 ret = BUSY;
2805 } else {
2806 ret = io_hdr->status;
2807 }
2808
2809 if (ret > 0) {
2810 aio_context_acquire(blk_get_aio_context(s->conf.blk));
2811 if (scsi_handle_rw_error(r, ret, true)) {
2812 aio_context_release(blk_get_aio_context(s->conf.blk));
2813 scsi_req_unref(&r->req);
2814 return;
2815 }
2816 aio_context_release(blk_get_aio_context(s->conf.blk));
2817
2818 /* Ignore error. */
2819 ret = 0;
2820 }
2821 }
2822
2823 req->cb(req->cb_opaque, ret);
2824 }
2825
2826 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2827 int64_t offset, QEMUIOVector *iov,
2828 int direction,
2829 BlockCompletionFunc *cb, void *opaque)
2830 {
2831 sg_io_hdr_t *io_header = &req->io_header;
2832 SCSIDiskReq *r = &req->req;
2833 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2834 int nb_logical_blocks;
2835 uint64_t lba;
2836 BlockAIOCB *aiocb;
2837
2838 /* This is not supported yet. It can only happen if the guest does
2839 * reads and writes that are not aligned to one logical sectors
2840 * _and_ cover multiple MemoryRegions.
2841 */
2842 assert(offset % s->qdev.blocksize == 0);
2843 assert(iov->size % s->qdev.blocksize == 0);
2844
2845 io_header->interface_id = 'S';
2846
2847 /* The data transfer comes from the QEMUIOVector. */
2848 io_header->dxfer_direction = direction;
2849 io_header->dxfer_len = iov->size;
2850 io_header->dxferp = (void *)iov->iov;
2851 io_header->iovec_count = iov->niov;
2852 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2853
2854 /* Build a new CDB with the LBA and length patched in, in case
2855 * DMA helpers split the transfer in multiple segments. Do not
2856 * build a CDB smaller than what the guest wanted, and only build
2857 * a larger one if strictly necessary.
2858 */
2859 io_header->cmdp = req->cdb;
2860 lba = offset / s->qdev.blocksize;
2861 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2862
2863 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2864 /* 6-byte CDB */
2865 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2866 req->cdb[4] = nb_logical_blocks;
2867 req->cdb[5] = 0;
2868 io_header->cmd_len = 6;
2869 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2870 /* 10-byte CDB */
2871 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2872 req->cdb[1] = req->cdb1;
2873 stl_be_p(&req->cdb[2], lba);
2874 req->cdb[6] = req->group_number;
2875 stw_be_p(&req->cdb[7], nb_logical_blocks);
2876 req->cdb[9] = 0;
2877 io_header->cmd_len = 10;
2878 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2879 /* 12-byte CDB */
2880 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2881 req->cdb[1] = req->cdb1;
2882 stl_be_p(&req->cdb[2], lba);
2883 stl_be_p(&req->cdb[6], nb_logical_blocks);
2884 req->cdb[10] = req->group_number;
2885 req->cdb[11] = 0;
2886 io_header->cmd_len = 12;
2887 } else {
2888 /* 16-byte CDB */
2889 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2890 req->cdb[1] = req->cdb1;
2891 stq_be_p(&req->cdb[2], lba);
2892 stl_be_p(&req->cdb[10], nb_logical_blocks);
2893 req->cdb[14] = req->group_number;
2894 req->cdb[15] = 0;
2895 io_header->cmd_len = 16;
2896 }
2897
2898 /* The rest is as in scsi-generic.c. */
2899 io_header->mx_sb_len = sizeof(r->req.sense);
2900 io_header->sbp = r->req.sense;
2901 io_header->timeout = s->qdev.io_timeout * 1000;
2902 io_header->usr_ptr = r;
2903 io_header->flags |= SG_FLAG_DIRECT_IO;
2904 req->cb = cb;
2905 req->cb_opaque = opaque;
2906 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
2907 nb_logical_blocks, io_header->timeout);
2908 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req);
2909 assert(aiocb != NULL);
2910 return aiocb;
2911 }
2912
2913 static bool scsi_block_no_fua(SCSICommand *cmd)
2914 {
2915 return false;
2916 }
2917
2918 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2919 QEMUIOVector *iov,
2920 BlockCompletionFunc *cb, void *cb_opaque,
2921 void *opaque)
2922 {
2923 SCSIBlockReq *r = opaque;
2924 return scsi_block_do_sgio(r, offset, iov,
2925 SG_DXFER_FROM_DEV, cb, cb_opaque);
2926 }
2927
2928 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2929 QEMUIOVector *iov,
2930 BlockCompletionFunc *cb, void *cb_opaque,
2931 void *opaque)
2932 {
2933 SCSIBlockReq *r = opaque;
2934 return scsi_block_do_sgio(r, offset, iov,
2935 SG_DXFER_TO_DEV, cb, cb_opaque);
2936 }
2937
2938 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2939 {
2940 switch (buf[0]) {
2941 case VERIFY_10:
2942 case VERIFY_12:
2943 case VERIFY_16:
2944 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2945 * for the number of logical blocks specified in the length
2946 * field). For other modes, do not use scatter/gather operation.
2947 */
2948 if ((buf[1] & 6) == 2) {
2949 return false;
2950 }
2951 break;
2952
2953 case READ_6:
2954 case READ_10:
2955 case READ_12:
2956 case READ_16:
2957 case WRITE_6:
2958 case WRITE_10:
2959 case WRITE_12:
2960 case WRITE_16:
2961 case WRITE_VERIFY_10:
2962 case WRITE_VERIFY_12:
2963 case WRITE_VERIFY_16:
2964 /* MMC writing cannot be done via DMA helpers, because it sometimes
2965 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2966 * We might use scsi_block_dma_reqops as long as no writing commands are
2967 * seen, but performance usually isn't paramount on optical media. So,
2968 * just make scsi-block operate the same as scsi-generic for them.
2969 */
2970 if (s->qdev.type != TYPE_ROM) {
2971 return false;
2972 }
2973 break;
2974
2975 default:
2976 break;
2977 }
2978
2979 return true;
2980 }
2981
2982
2983 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2984 {
2985 SCSIBlockReq *r = (SCSIBlockReq *)req;
2986 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2987
2988 r->cmd = req->cmd.buf[0];
2989 switch (r->cmd >> 5) {
2990 case 0:
2991 /* 6-byte CDB. */
2992 r->cdb1 = r->group_number = 0;
2993 break;
2994 case 1:
2995 /* 10-byte CDB. */
2996 r->cdb1 = req->cmd.buf[1];
2997 r->group_number = req->cmd.buf[6];
2998 break;
2999 case 4:
3000 /* 12-byte CDB. */
3001 r->cdb1 = req->cmd.buf[1];
3002 r->group_number = req->cmd.buf[10];
3003 break;
3004 case 5:
3005 /* 16-byte CDB. */
3006 r->cdb1 = req->cmd.buf[1];
3007 r->group_number = req->cmd.buf[14];
3008 break;
3009 default:
3010 abort();
3011 }
3012
3013 /* Protection information is not supported. For SCSI versions 2 and
3014 * older (as determined by snooping the guest's INQUIRY commands),
3015 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
3016 */
3017 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
3018 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
3019 return 0;
3020 }
3021
3022 return scsi_disk_dma_command(req, buf);
3023 }
3024
3025 static const SCSIReqOps scsi_block_dma_reqops = {
3026 .size = sizeof(SCSIBlockReq),
3027 .free_req = scsi_free_request,
3028 .send_command = scsi_block_dma_command,
3029 .read_data = scsi_read_data,
3030 .write_data = scsi_write_data,
3031 .get_buf = scsi_get_buf,
3032 .load_request = scsi_disk_load_request,
3033 .save_request = scsi_disk_save_request,
3034 };
3035
3036 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
3037 uint32_t lun, uint8_t *buf,
3038 void *hba_private)
3039 {
3040 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
3041
3042 if (scsi_block_is_passthrough(s, buf)) {
3043 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
3044 hba_private);
3045 } else {
3046 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
3047 hba_private);
3048 }
3049 }
3050
3051 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
3052 uint8_t *buf, size_t buf_len,
3053 void *hba_private)
3054 {
3055 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
3056
3057 if (scsi_block_is_passthrough(s, buf)) {
3058 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, buf_len, hba_private);
3059 } else {
3060 return scsi_req_parse_cdb(&s->qdev, cmd, buf, buf_len);
3061 }
3062 }
3063
3064 static void scsi_block_update_sense(SCSIRequest *req)
3065 {
3066 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
3067 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
3068 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
3069 }
3070 #endif
3071
3072 static
3073 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
3074 BlockCompletionFunc *cb, void *cb_opaque,
3075 void *opaque)
3076 {
3077 SCSIDiskReq *r = opaque;
3078 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3079 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3080 }
3081
3082 static
3083 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
3084 BlockCompletionFunc *cb, void *cb_opaque,
3085 void *opaque)
3086 {
3087 SCSIDiskReq *r = opaque;
3088 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3089 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3090 }
3091
3092 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
3093 {
3094 DeviceClass *dc = DEVICE_CLASS(klass);
3095 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3096
3097 dc->fw_name = "disk";
3098 dc->reset = scsi_disk_reset;
3099 sdc->dma_readv = scsi_dma_readv;
3100 sdc->dma_writev = scsi_dma_writev;
3101 sdc->need_fua_emulation = scsi_is_cmd_fua;
3102 }
3103
3104 static const TypeInfo scsi_disk_base_info = {
3105 .name = TYPE_SCSI_DISK_BASE,
3106 .parent = TYPE_SCSI_DEVICE,
3107 .class_init = scsi_disk_base_class_initfn,
3108 .instance_size = sizeof(SCSIDiskState),
3109 .class_size = sizeof(SCSIDiskClass),
3110 .abstract = true,
3111 };
3112
3113 #define DEFINE_SCSI_DISK_PROPERTIES() \
3114 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
3115 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
3116 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3117 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3118 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3119 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3120 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3121 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3122
3123
3124 static Property scsi_hd_properties[] = {
3125 DEFINE_SCSI_DISK_PROPERTIES(),
3126 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3127 SCSI_DISK_F_REMOVABLE, false),
3128 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3129 SCSI_DISK_F_DPOFUA, false),
3130 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3131 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3132 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3133 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3134 DEFAULT_MAX_UNMAP_SIZE),
3135 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3136 DEFAULT_MAX_IO_SIZE),
3137 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3138 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3139 5),
3140 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3141 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3142 0),
3143 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3144 DEFINE_PROP_END_OF_LIST(),
3145 };
3146
3147 static const VMStateDescription vmstate_scsi_disk_state = {
3148 .name = "scsi-disk",
3149 .version_id = 1,
3150 .minimum_version_id = 1,
3151 .fields = (VMStateField[]) {
3152 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3153 VMSTATE_BOOL(media_changed, SCSIDiskState),
3154 VMSTATE_BOOL(media_event, SCSIDiskState),
3155 VMSTATE_BOOL(eject_request, SCSIDiskState),
3156 VMSTATE_BOOL(tray_open, SCSIDiskState),
3157 VMSTATE_BOOL(tray_locked, SCSIDiskState),
3158 VMSTATE_END_OF_LIST()
3159 }
3160 };
3161
3162 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3163 {
3164 DeviceClass *dc = DEVICE_CLASS(klass);
3165 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3166
3167 sc->realize = scsi_hd_realize;
3168 sc->unrealize = scsi_unrealize;
3169 sc->alloc_req = scsi_new_request;
3170 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3171 dc->desc = "virtual SCSI disk";
3172 device_class_set_props(dc, scsi_hd_properties);
3173 dc->vmsd = &vmstate_scsi_disk_state;
3174 }
3175
3176 static const TypeInfo scsi_hd_info = {
3177 .name = "scsi-hd",
3178 .parent = TYPE_SCSI_DISK_BASE,
3179 .class_init = scsi_hd_class_initfn,
3180 };
3181
3182 static Property scsi_cd_properties[] = {
3183 DEFINE_SCSI_DISK_PROPERTIES(),
3184 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3185 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3186 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3187 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3188 DEFAULT_MAX_IO_SIZE),
3189 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3190 5),
3191 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState, quirks,
3192 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR, 0),
3193 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState, quirks,
3194 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD, 0),
3195 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3196 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3197 0),
3198 DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks,
3199 SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0),
3200 DEFINE_PROP_END_OF_LIST(),
3201 };
3202
3203 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3204 {
3205 DeviceClass *dc = DEVICE_CLASS(klass);
3206 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3207
3208 sc->realize = scsi_cd_realize;
3209 sc->alloc_req = scsi_new_request;
3210 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3211 dc->desc = "virtual SCSI CD-ROM";
3212 device_class_set_props(dc, scsi_cd_properties);
3213 dc->vmsd = &vmstate_scsi_disk_state;
3214 }
3215
3216 static const TypeInfo scsi_cd_info = {
3217 .name = "scsi-cd",
3218 .parent = TYPE_SCSI_DISK_BASE,
3219 .class_init = scsi_cd_class_initfn,
3220 };
3221
3222 #ifdef __linux__
3223 static Property scsi_block_properties[] = {
3224 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
3225 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3226 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3227 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3228 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3229 DEFAULT_MAX_UNMAP_SIZE),
3230 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3231 DEFAULT_MAX_IO_SIZE),
3232 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3233 -1),
3234 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
3235 DEFAULT_IO_TIMEOUT),
3236 DEFINE_PROP_END_OF_LIST(),
3237 };
3238
3239 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3240 {
3241 DeviceClass *dc = DEVICE_CLASS(klass);
3242 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3243 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3244
3245 sc->realize = scsi_block_realize;
3246 sc->alloc_req = scsi_block_new_request;
3247 sc->parse_cdb = scsi_block_parse_cdb;
3248 sdc->dma_readv = scsi_block_dma_readv;
3249 sdc->dma_writev = scsi_block_dma_writev;
3250 sdc->update_sense = scsi_block_update_sense;
3251 sdc->need_fua_emulation = scsi_block_no_fua;
3252 dc->desc = "SCSI block device passthrough";
3253 device_class_set_props(dc, scsi_block_properties);
3254 dc->vmsd = &vmstate_scsi_disk_state;
3255 }
3256
3257 static const TypeInfo scsi_block_info = {
3258 .name = "scsi-block",
3259 .parent = TYPE_SCSI_DISK_BASE,
3260 .class_init = scsi_block_class_initfn,
3261 };
3262 #endif
3263
3264 static void scsi_disk_register_types(void)
3265 {
3266 type_register_static(&scsi_disk_base_info);
3267 type_register_static(&scsi_hd_info);
3268 type_register_static(&scsi_cd_info);
3269 #ifdef __linux__
3270 type_register_static(&scsi_block_info);
3271 #endif
3272 }
3273
3274 type_init(scsi_disk_register_types)