2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 * Copyright (c) 2023 Mark Cave-Ayland
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
31 #include "hw/scsi/esp.h"
34 #include "qemu/module.h"
37 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38 * also produced as NCR89C100. See
39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
41 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
43 * On Macintosh Quadra it is a NCR53C96.
46 static void esp_raise_irq(ESPState
*s
)
48 if (!(s
->rregs
[ESP_RSTAT
] & STAT_INT
)) {
49 s
->rregs
[ESP_RSTAT
] |= STAT_INT
;
50 qemu_irq_raise(s
->irq
);
51 trace_esp_raise_irq();
55 static void esp_lower_irq(ESPState
*s
)
57 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
58 s
->rregs
[ESP_RSTAT
] &= ~STAT_INT
;
59 qemu_irq_lower(s
->irq
);
60 trace_esp_lower_irq();
64 static void esp_raise_drq(ESPState
*s
)
66 if (!(s
->drq_state
)) {
67 qemu_irq_raise(s
->drq_irq
);
68 trace_esp_raise_drq();
73 static void esp_lower_drq(ESPState
*s
)
76 qemu_irq_lower(s
->drq_irq
);
77 trace_esp_lower_drq();
82 static const char *esp_phase_names
[8] = {
83 "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84 "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
87 static void esp_set_phase(ESPState
*s
, uint8_t phase
)
89 s
->rregs
[ESP_RSTAT
] &= ~7;
90 s
->rregs
[ESP_RSTAT
] |= phase
;
92 trace_esp_set_phase(esp_phase_names
[phase
]);
95 static uint8_t esp_get_phase(ESPState
*s
)
97 return s
->rregs
[ESP_RSTAT
] & 7;
100 void esp_dma_enable(ESPState
*s
, int irq
, int level
)
104 trace_esp_dma_enable();
110 trace_esp_dma_disable();
115 void esp_request_cancelled(SCSIRequest
*req
)
117 ESPState
*s
= req
->hba_private
;
119 if (req
== s
->current_req
) {
120 scsi_req_unref(s
->current_req
);
121 s
->current_req
= NULL
;
122 s
->current_dev
= NULL
;
127 static void esp_update_drq(ESPState
*s
)
131 switch (esp_get_phase(s
)) {
149 /* DMA request so update DRQ according to transfer direction */
151 if (fifo8_num_free(&s
->fifo
) < 2) {
157 if (fifo8_num_used(&s
->fifo
) < 2) {
164 /* Not a DMA request */
169 static void esp_fifo_push(ESPState
*s
, uint8_t val
)
171 if (fifo8_num_used(&s
->fifo
) == s
->fifo
.capacity
) {
172 trace_esp_error_fifo_overrun();
174 fifo8_push(&s
->fifo
, val
);
180 static void esp_fifo_push_buf(ESPState
*s
, uint8_t *buf
, int len
)
182 fifo8_push_all(&s
->fifo
, buf
, len
);
186 static uint8_t esp_fifo_pop(ESPState
*s
)
190 if (fifo8_is_empty(&s
->fifo
)) {
193 val
= fifo8_pop(&s
->fifo
);
200 static uint32_t esp_fifo8_pop_buf(Fifo8
*fifo
, uint8_t *dest
, int maxlen
)
211 buf
= fifo8_pop_buf(fifo
, len
, &n
);
213 memcpy(dest
, buf
, n
);
216 /* Add FIFO wraparound if needed */
218 len
= MIN(len
, fifo8_num_used(fifo
));
220 buf
= fifo8_pop_buf(fifo
, len
, &n2
);
222 memcpy(&dest
[n
], buf
, n2
);
230 static uint32_t esp_fifo_pop_buf(ESPState
*s
, uint8_t *dest
, int maxlen
)
232 uint32_t len
= esp_fifo8_pop_buf(&s
->fifo
, dest
, maxlen
);
238 static uint32_t esp_get_tc(ESPState
*s
)
242 dmalen
= s
->rregs
[ESP_TCLO
];
243 dmalen
|= s
->rregs
[ESP_TCMID
] << 8;
244 dmalen
|= s
->rregs
[ESP_TCHI
] << 16;
249 static void esp_set_tc(ESPState
*s
, uint32_t dmalen
)
251 uint32_t old_tc
= esp_get_tc(s
);
253 s
->rregs
[ESP_TCLO
] = dmalen
;
254 s
->rregs
[ESP_TCMID
] = dmalen
>> 8;
255 s
->rregs
[ESP_TCHI
] = dmalen
>> 16;
257 if (old_tc
&& dmalen
== 0) {
258 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
262 static uint32_t esp_get_stc(ESPState
*s
)
266 dmalen
= s
->wregs
[ESP_TCLO
];
267 dmalen
|= s
->wregs
[ESP_TCMID
] << 8;
268 dmalen
|= s
->wregs
[ESP_TCHI
] << 16;
273 static uint8_t esp_pdma_read(ESPState
*s
)
277 val
= esp_fifo_pop(s
);
281 static void esp_pdma_write(ESPState
*s
, uint8_t val
)
283 uint32_t dmalen
= esp_get_tc(s
);
285 esp_fifo_push(s
, val
);
287 if (dmalen
&& s
->drq_state
) {
289 esp_set_tc(s
, dmalen
);
293 static int esp_select(ESPState
*s
)
297 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
300 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
302 if (s
->current_req
) {
303 /* Started a new command before the old one finished. Cancel it. */
304 scsi_req_cancel(s
->current_req
);
307 s
->current_dev
= scsi_device_find(&s
->bus
, 0, target
, 0);
308 if (!s
->current_dev
) {
310 s
->rregs
[ESP_RSTAT
] = 0;
311 s
->rregs
[ESP_RINTR
] = INTR_DC
;
317 * Note that we deliberately don't raise the IRQ here: this will be done
318 * either in esp_transfer_data() or esp_command_complete()
323 static void esp_do_dma(ESPState
*s
);
324 static void esp_do_nodma(ESPState
*s
);
326 static void do_command_phase(ESPState
*s
)
330 SCSIDevice
*current_lun
;
331 uint8_t buf
[ESP_CMDFIFO_SZ
];
333 trace_esp_do_command_phase(s
->lun
);
334 cmdlen
= fifo8_num_used(&s
->cmdfifo
);
335 if (!cmdlen
|| !s
->current_dev
) {
338 esp_fifo8_pop_buf(&s
->cmdfifo
, buf
, cmdlen
);
340 current_lun
= scsi_device_find(&s
->bus
, 0, s
->current_dev
->id
, s
->lun
);
343 s
->rregs
[ESP_RSTAT
] = 0;
344 s
->rregs
[ESP_RINTR
] = INTR_DC
;
345 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
350 s
->current_req
= scsi_req_new(current_lun
, 0, s
->lun
, buf
, cmdlen
, s
);
351 datalen
= scsi_req_enqueue(s
->current_req
);
352 s
->ti_size
= datalen
;
353 fifo8_reset(&s
->cmdfifo
);
354 s
->data_ready
= false;
357 * Switch to DATA phase but wait until initial data xfer is
358 * complete before raising the command completion interrupt
361 esp_set_phase(s
, STAT_DI
);
363 esp_set_phase(s
, STAT_DO
);
365 scsi_req_continue(s
->current_req
);
370 static void do_message_phase(ESPState
*s
)
372 if (s
->cmdfifo_cdb_offset
) {
373 uint8_t message
= fifo8_is_empty(&s
->cmdfifo
) ? 0 :
374 fifo8_pop(&s
->cmdfifo
);
376 trace_esp_do_identify(message
);
377 s
->lun
= message
& 7;
378 s
->cmdfifo_cdb_offset
--;
381 /* Ignore extended messages for now */
382 if (s
->cmdfifo_cdb_offset
) {
383 int len
= MIN(s
->cmdfifo_cdb_offset
, fifo8_num_used(&s
->cmdfifo
));
384 esp_fifo8_pop_buf(&s
->cmdfifo
, NULL
, len
);
385 s
->cmdfifo_cdb_offset
= 0;
389 static void do_cmd(ESPState
*s
)
392 assert(s
->cmdfifo_cdb_offset
== 0);
396 static void handle_satn(ESPState
*s
)
398 if (s
->dma
&& !s
->dma_enabled
) {
399 s
->dma_cb
= handle_satn
;
403 if (esp_select(s
) < 0) {
407 esp_set_phase(s
, STAT_MO
);
416 static void handle_s_without_atn(ESPState
*s
)
418 if (s
->dma
&& !s
->dma_enabled
) {
419 s
->dma_cb
= handle_s_without_atn
;
423 if (esp_select(s
) < 0) {
427 esp_set_phase(s
, STAT_CD
);
428 s
->cmdfifo_cdb_offset
= 0;
437 static void handle_satn_stop(ESPState
*s
)
439 if (s
->dma
&& !s
->dma_enabled
) {
440 s
->dma_cb
= handle_satn_stop
;
444 if (esp_select(s
) < 0) {
448 esp_set_phase(s
, STAT_MO
);
449 s
->cmdfifo_cdb_offset
= 0;
458 static void handle_pad(ESPState
*s
)
467 static void write_response(ESPState
*s
)
469 trace_esp_write_response(s
->status
);
478 static bool esp_cdb_ready(ESPState
*s
)
480 int len
= fifo8_num_used(&s
->cmdfifo
) - s
->cmdfifo_cdb_offset
;
489 pbuf
= fifo8_peek_buf(&s
->cmdfifo
, len
, &n
);
492 * In normal use the cmdfifo should never wrap, but include this check
493 * to prevent a malicious guest from reading past the end of the
494 * cmdfifo data buffer below
499 cdblen
= scsi_cdb_length((uint8_t *)&pbuf
[s
->cmdfifo_cdb_offset
]);
501 return cdblen
< 0 ? false : (len
>= cdblen
);
504 static void esp_dma_ti_check(ESPState
*s
)
506 if (esp_get_tc(s
) == 0 && fifo8_num_used(&s
->fifo
) < 2) {
507 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
512 static void esp_do_dma(ESPState
*s
)
514 uint32_t len
, cmdlen
;
515 uint8_t buf
[ESP_CMDFIFO_SZ
];
519 switch (esp_get_phase(s
)) {
521 if (s
->dma_memory_read
) {
522 len
= MIN(len
, fifo8_num_free(&s
->cmdfifo
));
523 s
->dma_memory_read(s
->dma_opaque
, buf
, len
);
524 esp_set_tc(s
, esp_get_tc(s
) - len
);
526 len
= esp_fifo_pop_buf(s
, buf
, fifo8_num_used(&s
->fifo
));
527 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
530 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
531 s
->cmdfifo_cdb_offset
+= len
;
533 switch (s
->rregs
[ESP_CMD
]) {
534 case CMD_SELATN
| CMD_DMA
:
535 if (fifo8_num_used(&s
->cmdfifo
) >= 1) {
536 /* First byte received, switch to command phase */
537 esp_set_phase(s
, STAT_CD
);
538 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
539 s
->cmdfifo_cdb_offset
= 1;
541 if (fifo8_num_used(&s
->cmdfifo
) > 1) {
542 /* Process any additional command phase data */
548 case CMD_SELATNS
| CMD_DMA
:
549 if (fifo8_num_used(&s
->cmdfifo
) == 1) {
550 /* First byte received, stop in message out phase */
551 s
->rregs
[ESP_RSEQ
] = SEQ_MO
;
552 s
->cmdfifo_cdb_offset
= 1;
554 /* Raise command completion interrupt */
555 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
560 case CMD_TI
| CMD_DMA
:
561 /* ATN remains asserted until TC == 0 */
562 if (esp_get_tc(s
) == 0) {
563 esp_set_phase(s
, STAT_CD
);
564 s
->rregs
[ESP_CMD
] = 0;
565 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
573 cmdlen
= fifo8_num_used(&s
->cmdfifo
);
574 trace_esp_do_dma(cmdlen
, len
);
575 if (s
->dma_memory_read
) {
576 len
= MIN(len
, fifo8_num_free(&s
->cmdfifo
));
577 s
->dma_memory_read(s
->dma_opaque
, buf
, len
);
578 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
579 esp_set_tc(s
, esp_get_tc(s
) - len
);
581 len
= esp_fifo_pop_buf(s
, buf
, fifo8_num_used(&s
->fifo
));
582 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
583 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
585 trace_esp_handle_ti_cmd(cmdlen
);
587 if (esp_get_tc(s
) == 0) {
588 /* Command has been received */
594 if (!s
->current_req
) {
597 if (s
->async_len
== 0 && esp_get_tc(s
) && s
->ti_size
) {
598 /* Defer until data is available. */
601 if (len
> s
->async_len
) {
605 switch (s
->rregs
[ESP_CMD
]) {
606 case CMD_TI
| CMD_DMA
:
607 if (s
->dma_memory_read
) {
608 s
->dma_memory_read(s
->dma_opaque
, s
->async_buf
, len
);
609 esp_set_tc(s
, esp_get_tc(s
) - len
);
611 /* Copy FIFO data to device */
612 len
= MIN(s
->async_len
, ESP_FIFO_SZ
);
613 len
= MIN(len
, fifo8_num_used(&s
->fifo
));
614 len
= esp_fifo_pop_buf(s
, s
->async_buf
, len
);
622 case CMD_PAD
| CMD_DMA
:
623 /* Copy TC zero bytes into the incoming stream */
624 if (!s
->dma_memory_read
) {
625 len
= MIN(s
->async_len
, ESP_FIFO_SZ
);
626 len
= MIN(len
, fifo8_num_free(&s
->fifo
));
629 memset(s
->async_buf
, 0, len
);
637 if (s
->async_len
== 0 && fifo8_num_used(&s
->fifo
) < 2) {
638 /* Defer until the scsi layer has completed */
639 scsi_req_continue(s
->current_req
);
647 if (!s
->current_req
) {
650 if (s
->async_len
== 0 && esp_get_tc(s
) && s
->ti_size
) {
651 /* Defer until data is available. */
654 if (len
> s
->async_len
) {
658 switch (s
->rregs
[ESP_CMD
]) {
659 case CMD_TI
| CMD_DMA
:
660 if (s
->dma_memory_write
) {
661 s
->dma_memory_write(s
->dma_opaque
, s
->async_buf
, len
);
663 /* Copy device data to FIFO */
664 len
= MIN(len
, fifo8_num_free(&s
->fifo
));
665 esp_fifo_push_buf(s
, s
->async_buf
, len
);
671 esp_set_tc(s
, esp_get_tc(s
) - len
);
674 case CMD_PAD
| CMD_DMA
:
675 /* Drop TC bytes from the incoming stream */
676 if (!s
->dma_memory_write
) {
677 len
= MIN(len
, fifo8_num_free(&s
->fifo
));
683 esp_set_tc(s
, esp_get_tc(s
) - len
);
687 if (s
->async_len
== 0 && s
->ti_size
== 0 && esp_get_tc(s
)) {
688 /* If the guest underflows TC then terminate SCSI request */
689 scsi_req_continue(s
->current_req
);
693 if (s
->async_len
== 0 && fifo8_num_used(&s
->fifo
) < 2) {
694 /* Defer until the scsi layer has completed */
695 scsi_req_continue(s
->current_req
);
703 switch (s
->rregs
[ESP_CMD
]) {
704 case CMD_ICCS
| CMD_DMA
:
710 if (s
->dma_memory_write
) {
711 s
->dma_memory_write(s
->dma_opaque
, buf
, len
);
713 esp_fifo_push_buf(s
, buf
, len
);
716 esp_set_tc(s
, esp_get_tc(s
) - len
);
717 esp_set_phase(s
, STAT_MI
);
719 if (esp_get_tc(s
) > 0) {
720 /* Process any message in phase data */
727 /* Consume remaining data if the guest underflows TC */
728 if (fifo8_num_used(&s
->fifo
) < 2) {
729 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
737 switch (s
->rregs
[ESP_CMD
]) {
738 case CMD_ICCS
| CMD_DMA
:
744 if (s
->dma_memory_write
) {
745 s
->dma_memory_write(s
->dma_opaque
, buf
, len
);
747 esp_fifo_push_buf(s
, buf
, len
);
750 esp_set_tc(s
, esp_get_tc(s
) - len
);
752 /* Raise end of command interrupt */
753 s
->rregs
[ESP_RINTR
] |= INTR_FC
;
762 static void esp_nodma_ti_dataout(ESPState
*s
)
766 if (!s
->current_req
) {
769 if (s
->async_len
== 0) {
770 /* Defer until data is available. */
773 len
= MIN(s
->async_len
, ESP_FIFO_SZ
);
774 len
= MIN(len
, fifo8_num_used(&s
->fifo
));
775 esp_fifo_pop_buf(s
, s
->async_buf
, len
);
780 if (s
->async_len
== 0) {
781 scsi_req_continue(s
->current_req
);
785 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
789 static void esp_do_nodma(ESPState
*s
)
791 uint8_t buf
[ESP_FIFO_SZ
];
795 switch (esp_get_phase(s
)) {
797 switch (s
->rregs
[ESP_CMD
]) {
799 /* Copy FIFO into cmdfifo */
800 len
= esp_fifo_pop_buf(s
, buf
, fifo8_num_used(&s
->fifo
));
801 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
802 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
804 if (fifo8_num_used(&s
->cmdfifo
) >= 1) {
805 /* First byte received, switch to command phase */
806 esp_set_phase(s
, STAT_CD
);
807 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
808 s
->cmdfifo_cdb_offset
= 1;
810 if (fifo8_num_used(&s
->cmdfifo
) > 1) {
811 /* Process any additional command phase data */
818 /* Copy one byte from FIFO into cmdfifo */
819 len
= esp_fifo_pop_buf(s
, buf
,
820 MIN(fifo8_num_used(&s
->fifo
), 1));
821 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
822 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
824 if (fifo8_num_used(&s
->cmdfifo
) >= 1) {
825 /* First byte received, stop in message out phase */
826 s
->rregs
[ESP_RSEQ
] = SEQ_MO
;
827 s
->cmdfifo_cdb_offset
= 1;
829 /* Raise command completion interrupt */
830 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
836 /* Copy FIFO into cmdfifo */
837 len
= esp_fifo_pop_buf(s
, buf
, fifo8_num_used(&s
->fifo
));
838 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
839 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
841 /* ATN remains asserted until FIFO empty */
842 s
->cmdfifo_cdb_offset
= fifo8_num_used(&s
->cmdfifo
);
843 esp_set_phase(s
, STAT_CD
);
844 s
->rregs
[ESP_CMD
] = 0;
845 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
852 switch (s
->rregs
[ESP_CMD
]) {
854 /* Copy FIFO into cmdfifo */
855 len
= esp_fifo_pop_buf(s
, buf
, fifo8_num_used(&s
->fifo
));
856 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
857 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
859 cmdlen
= fifo8_num_used(&s
->cmdfifo
);
860 trace_esp_handle_ti_cmd(cmdlen
);
862 /* CDB may be transferred in one or more TI commands */
863 if (esp_cdb_ready(s
)) {
864 /* Command has been received */
868 * If data was transferred from the FIFO then raise bus
869 * service interrupt to indicate transfer complete. Otherwise
870 * defer until the next FIFO write.
873 /* Raise interrupt to indicate transfer complete */
874 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
880 case CMD_SEL
| CMD_DMA
:
881 case CMD_SELATN
| CMD_DMA
:
882 /* Copy FIFO into cmdfifo */
883 len
= esp_fifo_pop_buf(s
, buf
, fifo8_num_used(&s
->fifo
));
884 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
885 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
887 /* Handle when DMA transfer is terminated by non-DMA FIFO write */
888 if (esp_cdb_ready(s
)) {
889 /* Command has been received */
896 /* FIFO already contain entire CDB: copy to cmdfifo and execute */
897 len
= esp_fifo_pop_buf(s
, buf
, fifo8_num_used(&s
->fifo
));
898 len
= MIN(fifo8_num_free(&s
->cmdfifo
), len
);
899 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
907 /* Accumulate data in FIFO until non-DMA TI is executed */
911 if (!s
->current_req
) {
914 if (s
->async_len
== 0) {
915 /* Defer until data is available. */
918 if (fifo8_is_empty(&s
->fifo
)) {
919 esp_fifo_push(s
, s
->async_buf
[0]);
925 if (s
->async_len
== 0) {
926 scsi_req_continue(s
->current_req
);
930 /* If preloading the FIFO, defer until TI command issued */
931 if (s
->rregs
[ESP_CMD
] != CMD_TI
) {
935 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
940 switch (s
->rregs
[ESP_CMD
]) {
942 esp_fifo_push(s
, s
->status
);
943 esp_set_phase(s
, STAT_MI
);
945 /* Process any message in phase data */
952 switch (s
->rregs
[ESP_CMD
]) {
956 /* Raise end of command interrupt */
957 s
->rregs
[ESP_RINTR
] |= INTR_FC
;
965 void esp_command_complete(SCSIRequest
*req
, size_t resid
)
967 ESPState
*s
= req
->hba_private
;
968 int to_device
= (esp_get_phase(s
) == STAT_DO
);
970 trace_esp_command_complete();
973 * Non-DMA transfers from the target will leave the last byte in
974 * the FIFO so don't reset ti_size in this case
976 if (s
->dma
|| to_device
) {
977 if (s
->ti_size
!= 0) {
978 trace_esp_command_complete_unexpected();
984 trace_esp_command_complete_fail();
986 s
->status
= req
->status
;
989 * Switch to status phase. For non-DMA transfers from the target the last
990 * byte is still in the FIFO
994 switch (s
->rregs
[ESP_CMD
]) {
995 case CMD_SEL
| CMD_DMA
:
997 case CMD_SELATN
| CMD_DMA
:
1000 * No data phase for sequencer command so raise deferred bus service
1001 * and function complete interrupt
1003 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
1004 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
1007 case CMD_TI
| CMD_DMA
:
1009 s
->rregs
[ESP_CMD
] = 0;
1013 /* Raise bus service interrupt to indicate change to STATUS phase */
1014 esp_set_phase(s
, STAT_ST
);
1015 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
1018 if (s
->current_req
) {
1019 scsi_req_unref(s
->current_req
);
1020 s
->current_req
= NULL
;
1021 s
->current_dev
= NULL
;
1025 void esp_transfer_data(SCSIRequest
*req
, uint32_t len
)
1027 ESPState
*s
= req
->hba_private
;
1028 uint32_t dmalen
= esp_get_tc(s
);
1030 trace_esp_transfer_data(dmalen
, s
->ti_size
);
1032 s
->async_buf
= scsi_req_get_buf(req
);
1034 if (!s
->data_ready
) {
1035 s
->data_ready
= true;
1037 switch (s
->rregs
[ESP_CMD
]) {
1038 case CMD_SEL
| CMD_DMA
:
1040 case CMD_SELATN
| CMD_DMA
:
1043 * Initial incoming data xfer is complete for sequencer command
1044 * so raise deferred bus service and function complete interrupt
1046 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
1047 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
1050 case CMD_SELATNS
| CMD_DMA
:
1053 * Initial incoming data xfer is complete so raise command
1054 * completion interrupt
1056 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
1057 s
->rregs
[ESP_RSEQ
] = SEQ_MO
;
1060 case CMD_TI
| CMD_DMA
:
1063 * Bus service interrupt raised because of initial change to
1066 s
->rregs
[ESP_CMD
] = 0;
1067 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
1075 * Always perform the initial transfer upon reception of the next TI
1076 * command to ensure the DMA/non-DMA status of the command is correct.
1077 * It is not possible to use s->dma directly in the section below as
1078 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1079 * async data transfer is delayed then s->dma is set incorrectly.
1082 if (s
->rregs
[ESP_CMD
] == (CMD_TI
| CMD_DMA
)) {
1083 /* When the SCSI layer returns more data, raise deferred INTR_BS */
1084 esp_dma_ti_check(s
);
1087 } else if (s
->rregs
[ESP_CMD
] == CMD_TI
) {
1092 static void handle_ti(ESPState
*s
)
1096 if (s
->dma
&& !s
->dma_enabled
) {
1097 s
->dma_cb
= handle_ti
;
1102 dmalen
= esp_get_tc(s
);
1103 trace_esp_handle_ti(dmalen
);
1106 trace_esp_handle_ti(s
->ti_size
);
1109 if (esp_get_phase(s
) == STAT_DO
) {
1110 esp_nodma_ti_dataout(s
);
1115 void esp_hard_reset(ESPState
*s
)
1117 memset(s
->rregs
, 0, ESP_REGS
);
1118 memset(s
->wregs
, 0, ESP_REGS
);
1119 s
->tchi_written
= 0;
1122 fifo8_reset(&s
->fifo
);
1123 fifo8_reset(&s
->cmdfifo
);
1127 s
->rregs
[ESP_CFG1
] = 7;
1130 static void esp_soft_reset(ESPState
*s
)
1132 qemu_irq_lower(s
->irq
);
1133 qemu_irq_lower(s
->drq_irq
);
1137 static void esp_bus_reset(ESPState
*s
)
1139 bus_cold_reset(BUS(&s
->bus
));
1142 static void parent_esp_reset(ESPState
*s
, int irq
, int level
)
1149 static void esp_run_cmd(ESPState
*s
)
1151 uint8_t cmd
= s
->rregs
[ESP_CMD
];
1153 if (cmd
& CMD_DMA
) {
1155 /* Reload DMA counter. */
1156 if (esp_get_stc(s
) == 0) {
1157 esp_set_tc(s
, 0x10000);
1159 esp_set_tc(s
, esp_get_stc(s
));
1164 switch (cmd
& CMD_CMD
) {
1166 trace_esp_mem_writeb_cmd_nop(cmd
);
1169 trace_esp_mem_writeb_cmd_flush(cmd
);
1170 fifo8_reset(&s
->fifo
);
1173 trace_esp_mem_writeb_cmd_reset(cmd
);
1177 trace_esp_mem_writeb_cmd_bus_reset(cmd
);
1179 if (!(s
->wregs
[ESP_CFG1
] & CFG1_RESREPT
)) {
1180 s
->rregs
[ESP_RINTR
] |= INTR_RST
;
1185 trace_esp_mem_writeb_cmd_ti(cmd
);
1189 trace_esp_mem_writeb_cmd_iccs(cmd
);
1193 trace_esp_mem_writeb_cmd_msgacc(cmd
);
1194 s
->rregs
[ESP_RINTR
] |= INTR_DC
;
1195 s
->rregs
[ESP_RSEQ
] = 0;
1196 s
->rregs
[ESP_RFLAGS
] = 0;
1200 trace_esp_mem_writeb_cmd_pad(cmd
);
1204 trace_esp_mem_writeb_cmd_satn(cmd
);
1207 trace_esp_mem_writeb_cmd_rstatn(cmd
);
1210 trace_esp_mem_writeb_cmd_sel(cmd
);
1211 handle_s_without_atn(s
);
1214 trace_esp_mem_writeb_cmd_selatn(cmd
);
1218 trace_esp_mem_writeb_cmd_selatns(cmd
);
1219 handle_satn_stop(s
);
1222 trace_esp_mem_writeb_cmd_ensel(cmd
);
1223 s
->rregs
[ESP_RINTR
] = 0;
1226 trace_esp_mem_writeb_cmd_dissel(cmd
);
1227 s
->rregs
[ESP_RINTR
] = 0;
1231 trace_esp_error_unhandled_command(cmd
);
1236 uint64_t esp_reg_read(ESPState
*s
, uint32_t saddr
)
1242 s
->rregs
[ESP_FIFO
] = esp_fifo_pop(s
);
1243 val
= s
->rregs
[ESP_FIFO
];
1247 * Clear sequence step, interrupt register and all status bits
1250 val
= s
->rregs
[ESP_RINTR
];
1251 s
->rregs
[ESP_RINTR
] = 0;
1253 s
->rregs
[ESP_RSTAT
] &= STAT_TC
| 7;
1255 * According to the datasheet ESP_RSEQ should be cleared, but as the
1256 * emulation currently defers information transfers to the next TI
1257 * command leave it for now so that pedantic guests such as the old
1258 * Linux 2.6 driver see the correct flags before the next SCSI phase
1261 * s->rregs[ESP_RSEQ] = SEQ_0;
1265 /* Return the unique id if the value has never been written */
1266 if (!s
->tchi_written
) {
1269 val
= s
->rregs
[saddr
];
1273 /* Bottom 5 bits indicate number of bytes in FIFO */
1274 val
= fifo8_num_used(&s
->fifo
);
1277 val
= s
->rregs
[saddr
];
1281 trace_esp_mem_readb(saddr
, val
);
1285 void esp_reg_write(ESPState
*s
, uint32_t saddr
, uint64_t val
)
1287 trace_esp_mem_writeb(saddr
, s
->wregs
[saddr
], val
);
1290 s
->tchi_written
= true;
1294 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
1297 if (!fifo8_is_full(&s
->fifo
)) {
1298 esp_fifo_push(s
, val
);
1303 s
->rregs
[saddr
] = val
;
1306 case ESP_WBUSID
... ESP_WSYNO
:
1309 case ESP_CFG2
: case ESP_CFG3
:
1310 case ESP_RES3
: case ESP_RES4
:
1311 s
->rregs
[saddr
] = val
;
1313 case ESP_WCCF
... ESP_WTEST
:
1316 trace_esp_error_invalid_write(val
, saddr
);
1319 s
->wregs
[saddr
] = val
;
1322 static bool esp_mem_accepts(void *opaque
, hwaddr addr
,
1323 unsigned size
, bool is_write
,
1326 return (size
== 1) || (is_write
&& size
== 4);
1329 static bool esp_is_before_version_5(void *opaque
, int version_id
)
1331 ESPState
*s
= ESP(opaque
);
1333 version_id
= MIN(version_id
, s
->mig_version_id
);
1334 return version_id
< 5;
1337 static bool esp_is_version_5(void *opaque
, int version_id
)
1339 ESPState
*s
= ESP(opaque
);
1341 version_id
= MIN(version_id
, s
->mig_version_id
);
1342 return version_id
>= 5;
1345 static bool esp_is_version_6(void *opaque
, int version_id
)
1347 ESPState
*s
= ESP(opaque
);
1349 version_id
= MIN(version_id
, s
->mig_version_id
);
1350 return version_id
>= 6;
1353 static bool esp_is_between_version_5_and_6(void *opaque
, int version_id
)
1355 ESPState
*s
= ESP(opaque
);
1357 version_id
= MIN(version_id
, s
->mig_version_id
);
1358 return version_id
>= 5 && version_id
<= 6;
1361 int esp_pre_save(void *opaque
)
1363 ESPState
*s
= ESP(object_resolve_path_component(
1364 OBJECT(opaque
), "esp"));
1366 s
->mig_version_id
= vmstate_esp
.version_id
;
1370 static int esp_post_load(void *opaque
, int version_id
)
1372 ESPState
*s
= ESP(opaque
);
1375 version_id
= MIN(version_id
, s
->mig_version_id
);
1377 if (version_id
< 5) {
1378 esp_set_tc(s
, s
->mig_dma_left
);
1380 /* Migrate ti_buf to fifo */
1381 len
= s
->mig_ti_wptr
- s
->mig_ti_rptr
;
1382 for (i
= 0; i
< len
; i
++) {
1383 fifo8_push(&s
->fifo
, s
->mig_ti_buf
[i
]);
1386 /* Migrate cmdbuf to cmdfifo */
1387 for (i
= 0; i
< s
->mig_cmdlen
; i
++) {
1388 fifo8_push(&s
->cmdfifo
, s
->mig_cmdbuf
[i
]);
1392 s
->mig_version_id
= vmstate_esp
.version_id
;
1396 const VMStateDescription vmstate_esp
= {
1399 .minimum_version_id
= 3,
1400 .post_load
= esp_post_load
,
1401 .fields
= (const VMStateField
[]) {
1402 VMSTATE_BUFFER(rregs
, ESPState
),
1403 VMSTATE_BUFFER(wregs
, ESPState
),
1404 VMSTATE_INT32(ti_size
, ESPState
),
1405 VMSTATE_UINT32_TEST(mig_ti_rptr
, ESPState
, esp_is_before_version_5
),
1406 VMSTATE_UINT32_TEST(mig_ti_wptr
, ESPState
, esp_is_before_version_5
),
1407 VMSTATE_BUFFER_TEST(mig_ti_buf
, ESPState
, esp_is_before_version_5
),
1408 VMSTATE_UINT32(status
, ESPState
),
1409 VMSTATE_UINT32_TEST(mig_deferred_status
, ESPState
,
1410 esp_is_before_version_5
),
1411 VMSTATE_BOOL_TEST(mig_deferred_complete
, ESPState
,
1412 esp_is_before_version_5
),
1413 VMSTATE_UINT32(dma
, ESPState
),
1414 VMSTATE_STATIC_BUFFER(mig_cmdbuf
, ESPState
, 0,
1415 esp_is_before_version_5
, 0, 16),
1416 VMSTATE_STATIC_BUFFER(mig_cmdbuf
, ESPState
, 4,
1417 esp_is_before_version_5
, 16,
1418 sizeof(typeof_field(ESPState
, mig_cmdbuf
))),
1419 VMSTATE_UINT32_TEST(mig_cmdlen
, ESPState
, esp_is_before_version_5
),
1420 VMSTATE_UINT32(do_cmd
, ESPState
),
1421 VMSTATE_UINT32_TEST(mig_dma_left
, ESPState
, esp_is_before_version_5
),
1422 VMSTATE_BOOL_TEST(data_ready
, ESPState
, esp_is_version_5
),
1423 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset
, ESPState
, esp_is_version_5
),
1424 VMSTATE_FIFO8_TEST(fifo
, ESPState
, esp_is_version_5
),
1425 VMSTATE_FIFO8_TEST(cmdfifo
, ESPState
, esp_is_version_5
),
1426 VMSTATE_UINT8_TEST(mig_ti_cmd
, ESPState
,
1427 esp_is_between_version_5_and_6
),
1428 VMSTATE_UINT8_TEST(lun
, ESPState
, esp_is_version_6
),
1429 VMSTATE_BOOL(drq_state
, ESPState
),
1430 VMSTATE_END_OF_LIST()
1434 static void sysbus_esp_mem_write(void *opaque
, hwaddr addr
,
1435 uint64_t val
, unsigned int size
)
1437 SysBusESPState
*sysbus
= opaque
;
1438 ESPState
*s
= ESP(&sysbus
->esp
);
1441 saddr
= addr
>> sysbus
->it_shift
;
1442 esp_reg_write(s
, saddr
, val
);
1445 static uint64_t sysbus_esp_mem_read(void *opaque
, hwaddr addr
,
1448 SysBusESPState
*sysbus
= opaque
;
1449 ESPState
*s
= ESP(&sysbus
->esp
);
1452 saddr
= addr
>> sysbus
->it_shift
;
1453 return esp_reg_read(s
, saddr
);
1456 static const MemoryRegionOps sysbus_esp_mem_ops
= {
1457 .read
= sysbus_esp_mem_read
,
1458 .write
= sysbus_esp_mem_write
,
1459 .endianness
= DEVICE_NATIVE_ENDIAN
,
1460 .valid
.accepts
= esp_mem_accepts
,
1463 static void sysbus_esp_pdma_write(void *opaque
, hwaddr addr
,
1464 uint64_t val
, unsigned int size
)
1466 SysBusESPState
*sysbus
= opaque
;
1467 ESPState
*s
= ESP(&sysbus
->esp
);
1469 trace_esp_pdma_write(size
);
1473 esp_pdma_write(s
, val
);
1476 esp_pdma_write(s
, val
>> 8);
1477 esp_pdma_write(s
, val
);
1483 static uint64_t sysbus_esp_pdma_read(void *opaque
, hwaddr addr
,
1486 SysBusESPState
*sysbus
= opaque
;
1487 ESPState
*s
= ESP(&sysbus
->esp
);
1490 trace_esp_pdma_read(size
);
1494 val
= esp_pdma_read(s
);
1497 val
= esp_pdma_read(s
);
1498 val
= (val
<< 8) | esp_pdma_read(s
);
1505 static void *esp_load_request(QEMUFile
*f
, SCSIRequest
*req
)
1507 ESPState
*s
= container_of(req
->bus
, ESPState
, bus
);
1510 s
->current_req
= req
;
1514 static const MemoryRegionOps sysbus_esp_pdma_ops
= {
1515 .read
= sysbus_esp_pdma_read
,
1516 .write
= sysbus_esp_pdma_write
,
1517 .endianness
= DEVICE_NATIVE_ENDIAN
,
1518 .valid
.min_access_size
= 1,
1519 .valid
.max_access_size
= 4,
1520 .impl
.min_access_size
= 1,
1521 .impl
.max_access_size
= 2,
1524 static const struct SCSIBusInfo esp_scsi_info
= {
1526 .max_target
= ESP_MAX_DEVS
,
1529 .load_request
= esp_load_request
,
1530 .transfer_data
= esp_transfer_data
,
1531 .complete
= esp_command_complete
,
1532 .cancel
= esp_request_cancelled
1535 static void sysbus_esp_gpio_demux(void *opaque
, int irq
, int level
)
1537 SysBusESPState
*sysbus
= SYSBUS_ESP(opaque
);
1538 ESPState
*s
= ESP(&sysbus
->esp
);
1542 parent_esp_reset(s
, irq
, level
);
1545 esp_dma_enable(s
, irq
, level
);
1550 static void sysbus_esp_realize(DeviceState
*dev
, Error
**errp
)
1552 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1553 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1554 ESPState
*s
= ESP(&sysbus
->esp
);
1556 if (!qdev_realize(DEVICE(s
), NULL
, errp
)) {
1560 sysbus_init_irq(sbd
, &s
->irq
);
1561 sysbus_init_irq(sbd
, &s
->drq_irq
);
1562 assert(sysbus
->it_shift
!= -1);
1564 s
->chip_id
= TCHI_FAS100A
;
1565 memory_region_init_io(&sysbus
->iomem
, OBJECT(sysbus
), &sysbus_esp_mem_ops
,
1566 sysbus
, "esp-regs", ESP_REGS
<< sysbus
->it_shift
);
1567 sysbus_init_mmio(sbd
, &sysbus
->iomem
);
1568 memory_region_init_io(&sysbus
->pdma
, OBJECT(sysbus
), &sysbus_esp_pdma_ops
,
1569 sysbus
, "esp-pdma", 4);
1570 sysbus_init_mmio(sbd
, &sysbus
->pdma
);
1572 qdev_init_gpio_in(dev
, sysbus_esp_gpio_demux
, 2);
1574 scsi_bus_init(&s
->bus
, sizeof(s
->bus
), dev
, &esp_scsi_info
);
1577 static void sysbus_esp_hard_reset(DeviceState
*dev
)
1579 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1580 ESPState
*s
= ESP(&sysbus
->esp
);
1585 static void sysbus_esp_init(Object
*obj
)
1587 SysBusESPState
*sysbus
= SYSBUS_ESP(obj
);
1589 object_initialize_child(obj
, "esp", &sysbus
->esp
, TYPE_ESP
);
1592 static const VMStateDescription vmstate_sysbus_esp_scsi
= {
1593 .name
= "sysbusespscsi",
1595 .minimum_version_id
= 1,
1596 .pre_save
= esp_pre_save
,
1597 .fields
= (const VMStateField
[]) {
1598 VMSTATE_UINT8_V(esp
.mig_version_id
, SysBusESPState
, 2),
1599 VMSTATE_STRUCT(esp
, SysBusESPState
, 0, vmstate_esp
, ESPState
),
1600 VMSTATE_END_OF_LIST()
1604 static void sysbus_esp_class_init(ObjectClass
*klass
, void *data
)
1606 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1608 dc
->realize
= sysbus_esp_realize
;
1609 dc
->reset
= sysbus_esp_hard_reset
;
1610 dc
->vmsd
= &vmstate_sysbus_esp_scsi
;
1611 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1614 static void esp_finalize(Object
*obj
)
1616 ESPState
*s
= ESP(obj
);
1618 fifo8_destroy(&s
->fifo
);
1619 fifo8_destroy(&s
->cmdfifo
);
1622 static void esp_init(Object
*obj
)
1624 ESPState
*s
= ESP(obj
);
1626 fifo8_create(&s
->fifo
, ESP_FIFO_SZ
);
1627 fifo8_create(&s
->cmdfifo
, ESP_CMDFIFO_SZ
);
1630 static void esp_class_init(ObjectClass
*klass
, void *data
)
1632 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1634 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1635 dc
->user_creatable
= false;
1636 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1639 static const TypeInfo esp_info_types
[] = {
1641 .name
= TYPE_SYSBUS_ESP
,
1642 .parent
= TYPE_SYS_BUS_DEVICE
,
1643 .instance_init
= sysbus_esp_init
,
1644 .instance_size
= sizeof(SysBusESPState
),
1645 .class_init
= sysbus_esp_class_init
,
1649 .parent
= TYPE_DEVICE
,
1650 .instance_init
= esp_init
,
1651 .instance_finalize
= esp_finalize
,
1652 .instance_size
= sizeof(ESPState
),
1653 .class_init
= esp_class_init
,
1657 DEFINE_TYPES(esp_info_types
)