]> git.proxmox.com Git - mirror_qemu.git/blob - hw/scsi/esp.c
hw/misc : Correct 5 spaces indents in stm32l4x5_exti
[mirror_qemu.git] / hw / scsi / esp.c
1 /*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 * Copyright (c) 2023 Mark Cave-Ayland
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35
36 /*
37 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38 * also produced as NCR89C100. See
39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * and
41 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 *
43 * On Macintosh Quadra it is a NCR53C96.
44 */
45
46 static void esp_raise_irq(ESPState *s)
47 {
48 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49 s->rregs[ESP_RSTAT] |= STAT_INT;
50 qemu_irq_raise(s->irq);
51 trace_esp_raise_irq();
52 }
53 }
54
55 static void esp_lower_irq(ESPState *s)
56 {
57 if (s->rregs[ESP_RSTAT] & STAT_INT) {
58 s->rregs[ESP_RSTAT] &= ~STAT_INT;
59 qemu_irq_lower(s->irq);
60 trace_esp_lower_irq();
61 }
62 }
63
64 static void esp_raise_drq(ESPState *s)
65 {
66 if (!(s->drq_state)) {
67 qemu_irq_raise(s->drq_irq);
68 trace_esp_raise_drq();
69 s->drq_state = true;
70 }
71 }
72
73 static void esp_lower_drq(ESPState *s)
74 {
75 if (s->drq_state) {
76 qemu_irq_lower(s->drq_irq);
77 trace_esp_lower_drq();
78 s->drq_state = false;
79 }
80 }
81
82 static const char *esp_phase_names[8] = {
83 "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84 "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86
87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89 s->rregs[ESP_RSTAT] &= ~7;
90 s->rregs[ESP_RSTAT] |= phase;
91
92 trace_esp_set_phase(esp_phase_names[phase]);
93 }
94
95 static uint8_t esp_get_phase(ESPState *s)
96 {
97 return s->rregs[ESP_RSTAT] & 7;
98 }
99
100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102 if (level) {
103 s->dma_enabled = 1;
104 trace_esp_dma_enable();
105 if (s->dma_cb) {
106 s->dma_cb(s);
107 s->dma_cb = NULL;
108 }
109 } else {
110 trace_esp_dma_disable();
111 s->dma_enabled = 0;
112 }
113 }
114
115 void esp_request_cancelled(SCSIRequest *req)
116 {
117 ESPState *s = req->hba_private;
118
119 if (req == s->current_req) {
120 scsi_req_unref(s->current_req);
121 s->current_req = NULL;
122 s->current_dev = NULL;
123 s->async_len = 0;
124 }
125 }
126
127 static void esp_update_drq(ESPState *s)
128 {
129 bool to_device;
130
131 switch (esp_get_phase(s)) {
132 case STAT_MO:
133 case STAT_CD:
134 case STAT_DO:
135 to_device = true;
136 break;
137
138 case STAT_DI:
139 case STAT_ST:
140 case STAT_MI:
141 to_device = false;
142 break;
143
144 default:
145 return;
146 }
147
148 if (s->dma) {
149 /* DMA request so update DRQ according to transfer direction */
150 if (to_device) {
151 if (fifo8_num_free(&s->fifo) < 2) {
152 esp_lower_drq(s);
153 } else {
154 esp_raise_drq(s);
155 }
156 } else {
157 if (fifo8_num_used(&s->fifo) < 2) {
158 esp_lower_drq(s);
159 } else {
160 esp_raise_drq(s);
161 }
162 }
163 } else {
164 /* Not a DMA request */
165 esp_lower_drq(s);
166 }
167 }
168
169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171 if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172 trace_esp_error_fifo_overrun();
173 } else {
174 fifo8_push(&s->fifo, val);
175 }
176
177 esp_update_drq(s);
178 }
179
180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182 fifo8_push_all(&s->fifo, buf, len);
183 esp_update_drq(s);
184 }
185
186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188 uint8_t val;
189
190 if (fifo8_is_empty(&s->fifo)) {
191 val = 0;
192 } else {
193 val = fifo8_pop(&s->fifo);
194 }
195
196 esp_update_drq(s);
197 return val;
198 }
199
200 static uint32_t esp_fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
201 {
202 const uint8_t *buf;
203 uint32_t n, n2;
204 int len;
205
206 if (maxlen == 0) {
207 return 0;
208 }
209
210 len = maxlen;
211 buf = fifo8_pop_buf(fifo, len, &n);
212 if (dest) {
213 memcpy(dest, buf, n);
214 }
215
216 /* Add FIFO wraparound if needed */
217 len -= n;
218 len = MIN(len, fifo8_num_used(fifo));
219 if (len) {
220 buf = fifo8_pop_buf(fifo, len, &n2);
221 if (dest) {
222 memcpy(&dest[n], buf, n2);
223 }
224 n += n2;
225 }
226
227 return n;
228 }
229
230 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
231 {
232 uint32_t len = esp_fifo8_pop_buf(&s->fifo, dest, maxlen);
233
234 esp_update_drq(s);
235 return len;
236 }
237
238 static uint32_t esp_get_tc(ESPState *s)
239 {
240 uint32_t dmalen;
241
242 dmalen = s->rregs[ESP_TCLO];
243 dmalen |= s->rregs[ESP_TCMID] << 8;
244 dmalen |= s->rregs[ESP_TCHI] << 16;
245
246 return dmalen;
247 }
248
249 static void esp_set_tc(ESPState *s, uint32_t dmalen)
250 {
251 uint32_t old_tc = esp_get_tc(s);
252
253 s->rregs[ESP_TCLO] = dmalen;
254 s->rregs[ESP_TCMID] = dmalen >> 8;
255 s->rregs[ESP_TCHI] = dmalen >> 16;
256
257 if (old_tc && dmalen == 0) {
258 s->rregs[ESP_RSTAT] |= STAT_TC;
259 }
260 }
261
262 static uint32_t esp_get_stc(ESPState *s)
263 {
264 uint32_t dmalen;
265
266 dmalen = s->wregs[ESP_TCLO];
267 dmalen |= s->wregs[ESP_TCMID] << 8;
268 dmalen |= s->wregs[ESP_TCHI] << 16;
269
270 return dmalen;
271 }
272
273 static uint8_t esp_pdma_read(ESPState *s)
274 {
275 uint8_t val;
276
277 val = esp_fifo_pop(s);
278 return val;
279 }
280
281 static void esp_pdma_write(ESPState *s, uint8_t val)
282 {
283 uint32_t dmalen = esp_get_tc(s);
284
285 esp_fifo_push(s, val);
286
287 if (dmalen && s->drq_state) {
288 dmalen--;
289 esp_set_tc(s, dmalen);
290 }
291 }
292
293 static int esp_select(ESPState *s)
294 {
295 int target;
296
297 target = s->wregs[ESP_WBUSID] & BUSID_DID;
298
299 s->ti_size = 0;
300 s->rregs[ESP_RSEQ] = SEQ_0;
301
302 if (s->current_req) {
303 /* Started a new command before the old one finished. Cancel it. */
304 scsi_req_cancel(s->current_req);
305 }
306
307 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
308 if (!s->current_dev) {
309 /* No such drive */
310 s->rregs[ESP_RSTAT] = 0;
311 s->rregs[ESP_RINTR] = INTR_DC;
312 esp_raise_irq(s);
313 return -1;
314 }
315
316 /*
317 * Note that we deliberately don't raise the IRQ here: this will be done
318 * either in esp_transfer_data() or esp_command_complete()
319 */
320 return 0;
321 }
322
323 static void esp_do_dma(ESPState *s);
324 static void esp_do_nodma(ESPState *s);
325
326 static void do_command_phase(ESPState *s)
327 {
328 uint32_t cmdlen;
329 int32_t datalen;
330 SCSIDevice *current_lun;
331 uint8_t buf[ESP_CMDFIFO_SZ];
332
333 trace_esp_do_command_phase(s->lun);
334 cmdlen = fifo8_num_used(&s->cmdfifo);
335 if (!cmdlen || !s->current_dev) {
336 return;
337 }
338 esp_fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
339
340 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
341 if (!current_lun) {
342 /* No such drive */
343 s->rregs[ESP_RSTAT] = 0;
344 s->rregs[ESP_RINTR] = INTR_DC;
345 s->rregs[ESP_RSEQ] = SEQ_0;
346 esp_raise_irq(s);
347 return;
348 }
349
350 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
351 datalen = scsi_req_enqueue(s->current_req);
352 s->ti_size = datalen;
353 fifo8_reset(&s->cmdfifo);
354 s->data_ready = false;
355 if (datalen != 0) {
356 /*
357 * Switch to DATA phase but wait until initial data xfer is
358 * complete before raising the command completion interrupt
359 */
360 if (datalen > 0) {
361 esp_set_phase(s, STAT_DI);
362 } else {
363 esp_set_phase(s, STAT_DO);
364 }
365 scsi_req_continue(s->current_req);
366 return;
367 }
368 }
369
370 static void do_message_phase(ESPState *s)
371 {
372 if (s->cmdfifo_cdb_offset) {
373 uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
374 fifo8_pop(&s->cmdfifo);
375
376 trace_esp_do_identify(message);
377 s->lun = message & 7;
378 s->cmdfifo_cdb_offset--;
379 }
380
381 /* Ignore extended messages for now */
382 if (s->cmdfifo_cdb_offset) {
383 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
384 esp_fifo8_pop_buf(&s->cmdfifo, NULL, len);
385 s->cmdfifo_cdb_offset = 0;
386 }
387 }
388
389 static void do_cmd(ESPState *s)
390 {
391 do_message_phase(s);
392 assert(s->cmdfifo_cdb_offset == 0);
393 do_command_phase(s);
394 }
395
396 static void handle_satn(ESPState *s)
397 {
398 if (s->dma && !s->dma_enabled) {
399 s->dma_cb = handle_satn;
400 return;
401 }
402
403 if (esp_select(s) < 0) {
404 return;
405 }
406
407 esp_set_phase(s, STAT_MO);
408
409 if (s->dma) {
410 esp_do_dma(s);
411 } else {
412 esp_do_nodma(s);
413 }
414 }
415
416 static void handle_s_without_atn(ESPState *s)
417 {
418 if (s->dma && !s->dma_enabled) {
419 s->dma_cb = handle_s_without_atn;
420 return;
421 }
422
423 if (esp_select(s) < 0) {
424 return;
425 }
426
427 esp_set_phase(s, STAT_CD);
428 s->cmdfifo_cdb_offset = 0;
429
430 if (s->dma) {
431 esp_do_dma(s);
432 } else {
433 esp_do_nodma(s);
434 }
435 }
436
437 static void handle_satn_stop(ESPState *s)
438 {
439 if (s->dma && !s->dma_enabled) {
440 s->dma_cb = handle_satn_stop;
441 return;
442 }
443
444 if (esp_select(s) < 0) {
445 return;
446 }
447
448 esp_set_phase(s, STAT_MO);
449 s->cmdfifo_cdb_offset = 0;
450
451 if (s->dma) {
452 esp_do_dma(s);
453 } else {
454 esp_do_nodma(s);
455 }
456 }
457
458 static void handle_pad(ESPState *s)
459 {
460 if (s->dma) {
461 esp_do_dma(s);
462 } else {
463 esp_do_nodma(s);
464 }
465 }
466
467 static void write_response(ESPState *s)
468 {
469 trace_esp_write_response(s->status);
470
471 if (s->dma) {
472 esp_do_dma(s);
473 } else {
474 esp_do_nodma(s);
475 }
476 }
477
478 static bool esp_cdb_ready(ESPState *s)
479 {
480 int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
481 const uint8_t *pbuf;
482 uint32_t n;
483 int cdblen;
484
485 if (len <= 0) {
486 return false;
487 }
488
489 pbuf = fifo8_peek_buf(&s->cmdfifo, len, &n);
490 if (n < len) {
491 /*
492 * In normal use the cmdfifo should never wrap, but include this check
493 * to prevent a malicious guest from reading past the end of the
494 * cmdfifo data buffer below
495 */
496 return false;
497 }
498
499 cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
500
501 return cdblen < 0 ? false : (len >= cdblen);
502 }
503
504 static void esp_dma_ti_check(ESPState *s)
505 {
506 if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
507 s->rregs[ESP_RINTR] |= INTR_BS;
508 esp_raise_irq(s);
509 }
510 }
511
512 static void esp_do_dma(ESPState *s)
513 {
514 uint32_t len, cmdlen;
515 uint8_t buf[ESP_CMDFIFO_SZ];
516
517 len = esp_get_tc(s);
518
519 switch (esp_get_phase(s)) {
520 case STAT_MO:
521 if (s->dma_memory_read) {
522 len = MIN(len, fifo8_num_free(&s->cmdfifo));
523 s->dma_memory_read(s->dma_opaque, buf, len);
524 esp_set_tc(s, esp_get_tc(s) - len);
525 } else {
526 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
527 len = MIN(fifo8_num_free(&s->cmdfifo), len);
528 }
529
530 fifo8_push_all(&s->cmdfifo, buf, len);
531 s->cmdfifo_cdb_offset += len;
532
533 switch (s->rregs[ESP_CMD]) {
534 case CMD_SELATN | CMD_DMA:
535 if (fifo8_num_used(&s->cmdfifo) >= 1) {
536 /* First byte received, switch to command phase */
537 esp_set_phase(s, STAT_CD);
538 s->rregs[ESP_RSEQ] = SEQ_CD;
539 s->cmdfifo_cdb_offset = 1;
540
541 if (fifo8_num_used(&s->cmdfifo) > 1) {
542 /* Process any additional command phase data */
543 esp_do_dma(s);
544 }
545 }
546 break;
547
548 case CMD_SELATNS | CMD_DMA:
549 if (fifo8_num_used(&s->cmdfifo) == 1) {
550 /* First byte received, stop in message out phase */
551 s->rregs[ESP_RSEQ] = SEQ_MO;
552 s->cmdfifo_cdb_offset = 1;
553
554 /* Raise command completion interrupt */
555 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
556 esp_raise_irq(s);
557 }
558 break;
559
560 case CMD_TI | CMD_DMA:
561 /* ATN remains asserted until TC == 0 */
562 if (esp_get_tc(s) == 0) {
563 esp_set_phase(s, STAT_CD);
564 s->rregs[ESP_CMD] = 0;
565 s->rregs[ESP_RINTR] |= INTR_BS;
566 esp_raise_irq(s);
567 }
568 break;
569 }
570 break;
571
572 case STAT_CD:
573 cmdlen = fifo8_num_used(&s->cmdfifo);
574 trace_esp_do_dma(cmdlen, len);
575 if (s->dma_memory_read) {
576 len = MIN(len, fifo8_num_free(&s->cmdfifo));
577 s->dma_memory_read(s->dma_opaque, buf, len);
578 fifo8_push_all(&s->cmdfifo, buf, len);
579 esp_set_tc(s, esp_get_tc(s) - len);
580 } else {
581 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
582 len = MIN(fifo8_num_free(&s->cmdfifo), len);
583 fifo8_push_all(&s->cmdfifo, buf, len);
584 }
585 trace_esp_handle_ti_cmd(cmdlen);
586 s->ti_size = 0;
587 if (esp_get_tc(s) == 0) {
588 /* Command has been received */
589 do_cmd(s);
590 }
591 break;
592
593 case STAT_DO:
594 if (!s->current_req) {
595 return;
596 }
597 if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
598 /* Defer until data is available. */
599 return;
600 }
601 if (len > s->async_len) {
602 len = s->async_len;
603 }
604
605 switch (s->rregs[ESP_CMD]) {
606 case CMD_TI | CMD_DMA:
607 if (s->dma_memory_read) {
608 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
609 esp_set_tc(s, esp_get_tc(s) - len);
610 } else {
611 /* Copy FIFO data to device */
612 len = MIN(s->async_len, ESP_FIFO_SZ);
613 len = MIN(len, fifo8_num_used(&s->fifo));
614 len = esp_fifo_pop_buf(s, s->async_buf, len);
615 }
616
617 s->async_buf += len;
618 s->async_len -= len;
619 s->ti_size += len;
620 break;
621
622 case CMD_PAD | CMD_DMA:
623 /* Copy TC zero bytes into the incoming stream */
624 if (!s->dma_memory_read) {
625 len = MIN(s->async_len, ESP_FIFO_SZ);
626 len = MIN(len, fifo8_num_free(&s->fifo));
627 }
628
629 memset(s->async_buf, 0, len);
630
631 s->async_buf += len;
632 s->async_len -= len;
633 s->ti_size += len;
634 break;
635 }
636
637 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
638 /* Defer until the scsi layer has completed */
639 scsi_req_continue(s->current_req);
640 return;
641 }
642
643 esp_dma_ti_check(s);
644 break;
645
646 case STAT_DI:
647 if (!s->current_req) {
648 return;
649 }
650 if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
651 /* Defer until data is available. */
652 return;
653 }
654 if (len > s->async_len) {
655 len = s->async_len;
656 }
657
658 switch (s->rregs[ESP_CMD]) {
659 case CMD_TI | CMD_DMA:
660 if (s->dma_memory_write) {
661 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
662 } else {
663 /* Copy device data to FIFO */
664 len = MIN(len, fifo8_num_free(&s->fifo));
665 esp_fifo_push_buf(s, s->async_buf, len);
666 }
667
668 s->async_buf += len;
669 s->async_len -= len;
670 s->ti_size -= len;
671 esp_set_tc(s, esp_get_tc(s) - len);
672 break;
673
674 case CMD_PAD | CMD_DMA:
675 /* Drop TC bytes from the incoming stream */
676 if (!s->dma_memory_write) {
677 len = MIN(len, fifo8_num_free(&s->fifo));
678 }
679
680 s->async_buf += len;
681 s->async_len -= len;
682 s->ti_size -= len;
683 esp_set_tc(s, esp_get_tc(s) - len);
684 break;
685 }
686
687 if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
688 /* If the guest underflows TC then terminate SCSI request */
689 scsi_req_continue(s->current_req);
690 return;
691 }
692
693 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
694 /* Defer until the scsi layer has completed */
695 scsi_req_continue(s->current_req);
696 return;
697 }
698
699 esp_dma_ti_check(s);
700 break;
701
702 case STAT_ST:
703 switch (s->rregs[ESP_CMD]) {
704 case CMD_ICCS | CMD_DMA:
705 len = MIN(len, 1);
706
707 if (len) {
708 buf[0] = s->status;
709
710 if (s->dma_memory_write) {
711 s->dma_memory_write(s->dma_opaque, buf, len);
712 } else {
713 esp_fifo_push_buf(s, buf, len);
714 }
715
716 esp_set_tc(s, esp_get_tc(s) - len);
717 esp_set_phase(s, STAT_MI);
718
719 if (esp_get_tc(s) > 0) {
720 /* Process any message in phase data */
721 esp_do_dma(s);
722 }
723 }
724 break;
725
726 default:
727 /* Consume remaining data if the guest underflows TC */
728 if (fifo8_num_used(&s->fifo) < 2) {
729 s->rregs[ESP_RINTR] |= INTR_BS;
730 esp_raise_irq(s);
731 }
732 break;
733 }
734 break;
735
736 case STAT_MI:
737 switch (s->rregs[ESP_CMD]) {
738 case CMD_ICCS | CMD_DMA:
739 len = MIN(len, 1);
740
741 if (len) {
742 buf[0] = 0;
743
744 if (s->dma_memory_write) {
745 s->dma_memory_write(s->dma_opaque, buf, len);
746 } else {
747 esp_fifo_push_buf(s, buf, len);
748 }
749
750 esp_set_tc(s, esp_get_tc(s) - len);
751
752 /* Raise end of command interrupt */
753 s->rregs[ESP_RINTR] |= INTR_FC;
754 esp_raise_irq(s);
755 }
756 break;
757 }
758 break;
759 }
760 }
761
762 static void esp_nodma_ti_dataout(ESPState *s)
763 {
764 int len;
765
766 if (!s->current_req) {
767 return;
768 }
769 if (s->async_len == 0) {
770 /* Defer until data is available. */
771 return;
772 }
773 len = MIN(s->async_len, ESP_FIFO_SZ);
774 len = MIN(len, fifo8_num_used(&s->fifo));
775 esp_fifo_pop_buf(s, s->async_buf, len);
776 s->async_buf += len;
777 s->async_len -= len;
778 s->ti_size += len;
779
780 if (s->async_len == 0) {
781 scsi_req_continue(s->current_req);
782 return;
783 }
784
785 s->rregs[ESP_RINTR] |= INTR_BS;
786 esp_raise_irq(s);
787 }
788
789 static void esp_do_nodma(ESPState *s)
790 {
791 uint8_t buf[ESP_FIFO_SZ];
792 uint32_t cmdlen;
793 int len;
794
795 switch (esp_get_phase(s)) {
796 case STAT_MO:
797 switch (s->rregs[ESP_CMD]) {
798 case CMD_SELATN:
799 /* Copy FIFO into cmdfifo */
800 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
801 len = MIN(fifo8_num_free(&s->cmdfifo), len);
802 fifo8_push_all(&s->cmdfifo, buf, len);
803
804 if (fifo8_num_used(&s->cmdfifo) >= 1) {
805 /* First byte received, switch to command phase */
806 esp_set_phase(s, STAT_CD);
807 s->rregs[ESP_RSEQ] = SEQ_CD;
808 s->cmdfifo_cdb_offset = 1;
809
810 if (fifo8_num_used(&s->cmdfifo) > 1) {
811 /* Process any additional command phase data */
812 esp_do_nodma(s);
813 }
814 }
815 break;
816
817 case CMD_SELATNS:
818 /* Copy one byte from FIFO into cmdfifo */
819 len = esp_fifo_pop_buf(s, buf,
820 MIN(fifo8_num_used(&s->fifo), 1));
821 len = MIN(fifo8_num_free(&s->cmdfifo), len);
822 fifo8_push_all(&s->cmdfifo, buf, len);
823
824 if (fifo8_num_used(&s->cmdfifo) >= 1) {
825 /* First byte received, stop in message out phase */
826 s->rregs[ESP_RSEQ] = SEQ_MO;
827 s->cmdfifo_cdb_offset = 1;
828
829 /* Raise command completion interrupt */
830 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
831 esp_raise_irq(s);
832 }
833 break;
834
835 case CMD_TI:
836 /* Copy FIFO into cmdfifo */
837 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
838 len = MIN(fifo8_num_free(&s->cmdfifo), len);
839 fifo8_push_all(&s->cmdfifo, buf, len);
840
841 /* ATN remains asserted until FIFO empty */
842 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
843 esp_set_phase(s, STAT_CD);
844 s->rregs[ESP_CMD] = 0;
845 s->rregs[ESP_RINTR] |= INTR_BS;
846 esp_raise_irq(s);
847 break;
848 }
849 break;
850
851 case STAT_CD:
852 switch (s->rregs[ESP_CMD]) {
853 case CMD_TI:
854 /* Copy FIFO into cmdfifo */
855 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
856 len = MIN(fifo8_num_free(&s->cmdfifo), len);
857 fifo8_push_all(&s->cmdfifo, buf, len);
858
859 cmdlen = fifo8_num_used(&s->cmdfifo);
860 trace_esp_handle_ti_cmd(cmdlen);
861
862 /* CDB may be transferred in one or more TI commands */
863 if (esp_cdb_ready(s)) {
864 /* Command has been received */
865 do_cmd(s);
866 } else {
867 /*
868 * If data was transferred from the FIFO then raise bus
869 * service interrupt to indicate transfer complete. Otherwise
870 * defer until the next FIFO write.
871 */
872 if (len) {
873 /* Raise interrupt to indicate transfer complete */
874 s->rregs[ESP_RINTR] |= INTR_BS;
875 esp_raise_irq(s);
876 }
877 }
878 break;
879
880 case CMD_SEL | CMD_DMA:
881 case CMD_SELATN | CMD_DMA:
882 /* Copy FIFO into cmdfifo */
883 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
884 len = MIN(fifo8_num_free(&s->cmdfifo), len);
885 fifo8_push_all(&s->cmdfifo, buf, len);
886
887 /* Handle when DMA transfer is terminated by non-DMA FIFO write */
888 if (esp_cdb_ready(s)) {
889 /* Command has been received */
890 do_cmd(s);
891 }
892 break;
893
894 case CMD_SEL:
895 case CMD_SELATN:
896 /* FIFO already contain entire CDB: copy to cmdfifo and execute */
897 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
898 len = MIN(fifo8_num_free(&s->cmdfifo), len);
899 fifo8_push_all(&s->cmdfifo, buf, len);
900
901 do_cmd(s);
902 break;
903 }
904 break;
905
906 case STAT_DO:
907 /* Accumulate data in FIFO until non-DMA TI is executed */
908 break;
909
910 case STAT_DI:
911 if (!s->current_req) {
912 return;
913 }
914 if (s->async_len == 0) {
915 /* Defer until data is available. */
916 return;
917 }
918 if (fifo8_is_empty(&s->fifo)) {
919 esp_fifo_push(s, s->async_buf[0]);
920 s->async_buf++;
921 s->async_len--;
922 s->ti_size--;
923 }
924
925 if (s->async_len == 0) {
926 scsi_req_continue(s->current_req);
927 return;
928 }
929
930 /* If preloading the FIFO, defer until TI command issued */
931 if (s->rregs[ESP_CMD] != CMD_TI) {
932 return;
933 }
934
935 s->rregs[ESP_RINTR] |= INTR_BS;
936 esp_raise_irq(s);
937 break;
938
939 case STAT_ST:
940 switch (s->rregs[ESP_CMD]) {
941 case CMD_ICCS:
942 esp_fifo_push(s, s->status);
943 esp_set_phase(s, STAT_MI);
944
945 /* Process any message in phase data */
946 esp_do_nodma(s);
947 break;
948 }
949 break;
950
951 case STAT_MI:
952 switch (s->rregs[ESP_CMD]) {
953 case CMD_ICCS:
954 esp_fifo_push(s, 0);
955
956 /* Raise end of command interrupt */
957 s->rregs[ESP_RINTR] |= INTR_FC;
958 esp_raise_irq(s);
959 break;
960 }
961 break;
962 }
963 }
964
965 void esp_command_complete(SCSIRequest *req, size_t resid)
966 {
967 ESPState *s = req->hba_private;
968 int to_device = (esp_get_phase(s) == STAT_DO);
969
970 trace_esp_command_complete();
971
972 /*
973 * Non-DMA transfers from the target will leave the last byte in
974 * the FIFO so don't reset ti_size in this case
975 */
976 if (s->dma || to_device) {
977 if (s->ti_size != 0) {
978 trace_esp_command_complete_unexpected();
979 }
980 }
981
982 s->async_len = 0;
983 if (req->status) {
984 trace_esp_command_complete_fail();
985 }
986 s->status = req->status;
987
988 /*
989 * Switch to status phase. For non-DMA transfers from the target the last
990 * byte is still in the FIFO
991 */
992 s->ti_size = 0;
993
994 switch (s->rregs[ESP_CMD]) {
995 case CMD_SEL | CMD_DMA:
996 case CMD_SEL:
997 case CMD_SELATN | CMD_DMA:
998 case CMD_SELATN:
999 /*
1000 * No data phase for sequencer command so raise deferred bus service
1001 * and function complete interrupt
1002 */
1003 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1004 s->rregs[ESP_RSEQ] = SEQ_CD;
1005 break;
1006
1007 case CMD_TI | CMD_DMA:
1008 case CMD_TI:
1009 s->rregs[ESP_CMD] = 0;
1010 break;
1011 }
1012
1013 /* Raise bus service interrupt to indicate change to STATUS phase */
1014 esp_set_phase(s, STAT_ST);
1015 s->rregs[ESP_RINTR] |= INTR_BS;
1016 esp_raise_irq(s);
1017
1018 if (s->current_req) {
1019 scsi_req_unref(s->current_req);
1020 s->current_req = NULL;
1021 s->current_dev = NULL;
1022 }
1023 }
1024
1025 void esp_transfer_data(SCSIRequest *req, uint32_t len)
1026 {
1027 ESPState *s = req->hba_private;
1028 uint32_t dmalen = esp_get_tc(s);
1029
1030 trace_esp_transfer_data(dmalen, s->ti_size);
1031 s->async_len = len;
1032 s->async_buf = scsi_req_get_buf(req);
1033
1034 if (!s->data_ready) {
1035 s->data_ready = true;
1036
1037 switch (s->rregs[ESP_CMD]) {
1038 case CMD_SEL | CMD_DMA:
1039 case CMD_SEL:
1040 case CMD_SELATN | CMD_DMA:
1041 case CMD_SELATN:
1042 /*
1043 * Initial incoming data xfer is complete for sequencer command
1044 * so raise deferred bus service and function complete interrupt
1045 */
1046 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1047 s->rregs[ESP_RSEQ] = SEQ_CD;
1048 break;
1049
1050 case CMD_SELATNS | CMD_DMA:
1051 case CMD_SELATNS:
1052 /*
1053 * Initial incoming data xfer is complete so raise command
1054 * completion interrupt
1055 */
1056 s->rregs[ESP_RINTR] |= INTR_BS;
1057 s->rregs[ESP_RSEQ] = SEQ_MO;
1058 break;
1059
1060 case CMD_TI | CMD_DMA:
1061 case CMD_TI:
1062 /*
1063 * Bus service interrupt raised because of initial change to
1064 * DATA phase
1065 */
1066 s->rregs[ESP_CMD] = 0;
1067 s->rregs[ESP_RINTR] |= INTR_BS;
1068 break;
1069 }
1070
1071 esp_raise_irq(s);
1072 }
1073
1074 /*
1075 * Always perform the initial transfer upon reception of the next TI
1076 * command to ensure the DMA/non-DMA status of the command is correct.
1077 * It is not possible to use s->dma directly in the section below as
1078 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1079 * async data transfer is delayed then s->dma is set incorrectly.
1080 */
1081
1082 if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1083 /* When the SCSI layer returns more data, raise deferred INTR_BS */
1084 esp_dma_ti_check(s);
1085
1086 esp_do_dma(s);
1087 } else if (s->rregs[ESP_CMD] == CMD_TI) {
1088 esp_do_nodma(s);
1089 }
1090 }
1091
1092 static void handle_ti(ESPState *s)
1093 {
1094 uint32_t dmalen;
1095
1096 if (s->dma && !s->dma_enabled) {
1097 s->dma_cb = handle_ti;
1098 return;
1099 }
1100
1101 if (s->dma) {
1102 dmalen = esp_get_tc(s);
1103 trace_esp_handle_ti(dmalen);
1104 esp_do_dma(s);
1105 } else {
1106 trace_esp_handle_ti(s->ti_size);
1107 esp_do_nodma(s);
1108
1109 if (esp_get_phase(s) == STAT_DO) {
1110 esp_nodma_ti_dataout(s);
1111 }
1112 }
1113 }
1114
1115 void esp_hard_reset(ESPState *s)
1116 {
1117 memset(s->rregs, 0, ESP_REGS);
1118 memset(s->wregs, 0, ESP_REGS);
1119 s->tchi_written = 0;
1120 s->ti_size = 0;
1121 s->async_len = 0;
1122 fifo8_reset(&s->fifo);
1123 fifo8_reset(&s->cmdfifo);
1124 s->dma = 0;
1125 s->dma_cb = NULL;
1126
1127 s->rregs[ESP_CFG1] = 7;
1128 }
1129
1130 static void esp_soft_reset(ESPState *s)
1131 {
1132 qemu_irq_lower(s->irq);
1133 qemu_irq_lower(s->drq_irq);
1134 esp_hard_reset(s);
1135 }
1136
1137 static void esp_bus_reset(ESPState *s)
1138 {
1139 bus_cold_reset(BUS(&s->bus));
1140 }
1141
1142 static void parent_esp_reset(ESPState *s, int irq, int level)
1143 {
1144 if (level) {
1145 esp_soft_reset(s);
1146 }
1147 }
1148
1149 static void esp_run_cmd(ESPState *s)
1150 {
1151 uint8_t cmd = s->rregs[ESP_CMD];
1152
1153 if (cmd & CMD_DMA) {
1154 s->dma = 1;
1155 /* Reload DMA counter. */
1156 if (esp_get_stc(s) == 0) {
1157 esp_set_tc(s, 0x10000);
1158 } else {
1159 esp_set_tc(s, esp_get_stc(s));
1160 }
1161 } else {
1162 s->dma = 0;
1163 }
1164 switch (cmd & CMD_CMD) {
1165 case CMD_NOP:
1166 trace_esp_mem_writeb_cmd_nop(cmd);
1167 break;
1168 case CMD_FLUSH:
1169 trace_esp_mem_writeb_cmd_flush(cmd);
1170 fifo8_reset(&s->fifo);
1171 break;
1172 case CMD_RESET:
1173 trace_esp_mem_writeb_cmd_reset(cmd);
1174 esp_soft_reset(s);
1175 break;
1176 case CMD_BUSRESET:
1177 trace_esp_mem_writeb_cmd_bus_reset(cmd);
1178 esp_bus_reset(s);
1179 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1180 s->rregs[ESP_RINTR] |= INTR_RST;
1181 esp_raise_irq(s);
1182 }
1183 break;
1184 case CMD_TI:
1185 trace_esp_mem_writeb_cmd_ti(cmd);
1186 handle_ti(s);
1187 break;
1188 case CMD_ICCS:
1189 trace_esp_mem_writeb_cmd_iccs(cmd);
1190 write_response(s);
1191 break;
1192 case CMD_MSGACC:
1193 trace_esp_mem_writeb_cmd_msgacc(cmd);
1194 s->rregs[ESP_RINTR] |= INTR_DC;
1195 s->rregs[ESP_RSEQ] = 0;
1196 s->rregs[ESP_RFLAGS] = 0;
1197 esp_raise_irq(s);
1198 break;
1199 case CMD_PAD:
1200 trace_esp_mem_writeb_cmd_pad(cmd);
1201 handle_pad(s);
1202 break;
1203 case CMD_SATN:
1204 trace_esp_mem_writeb_cmd_satn(cmd);
1205 break;
1206 case CMD_RSTATN:
1207 trace_esp_mem_writeb_cmd_rstatn(cmd);
1208 break;
1209 case CMD_SEL:
1210 trace_esp_mem_writeb_cmd_sel(cmd);
1211 handle_s_without_atn(s);
1212 break;
1213 case CMD_SELATN:
1214 trace_esp_mem_writeb_cmd_selatn(cmd);
1215 handle_satn(s);
1216 break;
1217 case CMD_SELATNS:
1218 trace_esp_mem_writeb_cmd_selatns(cmd);
1219 handle_satn_stop(s);
1220 break;
1221 case CMD_ENSEL:
1222 trace_esp_mem_writeb_cmd_ensel(cmd);
1223 s->rregs[ESP_RINTR] = 0;
1224 break;
1225 case CMD_DISSEL:
1226 trace_esp_mem_writeb_cmd_dissel(cmd);
1227 s->rregs[ESP_RINTR] = 0;
1228 esp_raise_irq(s);
1229 break;
1230 default:
1231 trace_esp_error_unhandled_command(cmd);
1232 break;
1233 }
1234 }
1235
1236 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1237 {
1238 uint32_t val;
1239
1240 switch (saddr) {
1241 case ESP_FIFO:
1242 s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1243 val = s->rregs[ESP_FIFO];
1244 break;
1245 case ESP_RINTR:
1246 /*
1247 * Clear sequence step, interrupt register and all status bits
1248 * except TC
1249 */
1250 val = s->rregs[ESP_RINTR];
1251 s->rregs[ESP_RINTR] = 0;
1252 esp_lower_irq(s);
1253 s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1254 /*
1255 * According to the datasheet ESP_RSEQ should be cleared, but as the
1256 * emulation currently defers information transfers to the next TI
1257 * command leave it for now so that pedantic guests such as the old
1258 * Linux 2.6 driver see the correct flags before the next SCSI phase
1259 * transition.
1260 *
1261 * s->rregs[ESP_RSEQ] = SEQ_0;
1262 */
1263 break;
1264 case ESP_TCHI:
1265 /* Return the unique id if the value has never been written */
1266 if (!s->tchi_written) {
1267 val = s->chip_id;
1268 } else {
1269 val = s->rregs[saddr];
1270 }
1271 break;
1272 case ESP_RFLAGS:
1273 /* Bottom 5 bits indicate number of bytes in FIFO */
1274 val = fifo8_num_used(&s->fifo);
1275 break;
1276 default:
1277 val = s->rregs[saddr];
1278 break;
1279 }
1280
1281 trace_esp_mem_readb(saddr, val);
1282 return val;
1283 }
1284
1285 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1286 {
1287 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1288 switch (saddr) {
1289 case ESP_TCHI:
1290 s->tchi_written = true;
1291 /* fall through */
1292 case ESP_TCLO:
1293 case ESP_TCMID:
1294 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1295 break;
1296 case ESP_FIFO:
1297 if (!fifo8_is_full(&s->fifo)) {
1298 esp_fifo_push(s, val);
1299 }
1300 esp_do_nodma(s);
1301 break;
1302 case ESP_CMD:
1303 s->rregs[saddr] = val;
1304 esp_run_cmd(s);
1305 break;
1306 case ESP_WBUSID ... ESP_WSYNO:
1307 break;
1308 case ESP_CFG1:
1309 case ESP_CFG2: case ESP_CFG3:
1310 case ESP_RES3: case ESP_RES4:
1311 s->rregs[saddr] = val;
1312 break;
1313 case ESP_WCCF ... ESP_WTEST:
1314 break;
1315 default:
1316 trace_esp_error_invalid_write(val, saddr);
1317 return;
1318 }
1319 s->wregs[saddr] = val;
1320 }
1321
1322 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1323 unsigned size, bool is_write,
1324 MemTxAttrs attrs)
1325 {
1326 return (size == 1) || (is_write && size == 4);
1327 }
1328
1329 static bool esp_is_before_version_5(void *opaque, int version_id)
1330 {
1331 ESPState *s = ESP(opaque);
1332
1333 version_id = MIN(version_id, s->mig_version_id);
1334 return version_id < 5;
1335 }
1336
1337 static bool esp_is_version_5(void *opaque, int version_id)
1338 {
1339 ESPState *s = ESP(opaque);
1340
1341 version_id = MIN(version_id, s->mig_version_id);
1342 return version_id >= 5;
1343 }
1344
1345 static bool esp_is_version_6(void *opaque, int version_id)
1346 {
1347 ESPState *s = ESP(opaque);
1348
1349 version_id = MIN(version_id, s->mig_version_id);
1350 return version_id >= 6;
1351 }
1352
1353 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1354 {
1355 ESPState *s = ESP(opaque);
1356
1357 version_id = MIN(version_id, s->mig_version_id);
1358 return version_id >= 5 && version_id <= 6;
1359 }
1360
1361 int esp_pre_save(void *opaque)
1362 {
1363 ESPState *s = ESP(object_resolve_path_component(
1364 OBJECT(opaque), "esp"));
1365
1366 s->mig_version_id = vmstate_esp.version_id;
1367 return 0;
1368 }
1369
1370 static int esp_post_load(void *opaque, int version_id)
1371 {
1372 ESPState *s = ESP(opaque);
1373 int len, i;
1374
1375 version_id = MIN(version_id, s->mig_version_id);
1376
1377 if (version_id < 5) {
1378 esp_set_tc(s, s->mig_dma_left);
1379
1380 /* Migrate ti_buf to fifo */
1381 len = s->mig_ti_wptr - s->mig_ti_rptr;
1382 for (i = 0; i < len; i++) {
1383 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1384 }
1385
1386 /* Migrate cmdbuf to cmdfifo */
1387 for (i = 0; i < s->mig_cmdlen; i++) {
1388 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1389 }
1390 }
1391
1392 s->mig_version_id = vmstate_esp.version_id;
1393 return 0;
1394 }
1395
1396 const VMStateDescription vmstate_esp = {
1397 .name = "esp",
1398 .version_id = 7,
1399 .minimum_version_id = 3,
1400 .post_load = esp_post_load,
1401 .fields = (const VMStateField[]) {
1402 VMSTATE_BUFFER(rregs, ESPState),
1403 VMSTATE_BUFFER(wregs, ESPState),
1404 VMSTATE_INT32(ti_size, ESPState),
1405 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1406 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1407 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1408 VMSTATE_UINT32(status, ESPState),
1409 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1410 esp_is_before_version_5),
1411 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1412 esp_is_before_version_5),
1413 VMSTATE_UINT32(dma, ESPState),
1414 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1415 esp_is_before_version_5, 0, 16),
1416 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1417 esp_is_before_version_5, 16,
1418 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1419 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1420 VMSTATE_UINT32(do_cmd, ESPState),
1421 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1422 VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1423 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1424 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1425 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1426 VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1427 esp_is_between_version_5_and_6),
1428 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1429 VMSTATE_BOOL(drq_state, ESPState),
1430 VMSTATE_END_OF_LIST()
1431 },
1432 };
1433
1434 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1435 uint64_t val, unsigned int size)
1436 {
1437 SysBusESPState *sysbus = opaque;
1438 ESPState *s = ESP(&sysbus->esp);
1439 uint32_t saddr;
1440
1441 saddr = addr >> sysbus->it_shift;
1442 esp_reg_write(s, saddr, val);
1443 }
1444
1445 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1446 unsigned int size)
1447 {
1448 SysBusESPState *sysbus = opaque;
1449 ESPState *s = ESP(&sysbus->esp);
1450 uint32_t saddr;
1451
1452 saddr = addr >> sysbus->it_shift;
1453 return esp_reg_read(s, saddr);
1454 }
1455
1456 static const MemoryRegionOps sysbus_esp_mem_ops = {
1457 .read = sysbus_esp_mem_read,
1458 .write = sysbus_esp_mem_write,
1459 .endianness = DEVICE_NATIVE_ENDIAN,
1460 .valid.accepts = esp_mem_accepts,
1461 };
1462
1463 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1464 uint64_t val, unsigned int size)
1465 {
1466 SysBusESPState *sysbus = opaque;
1467 ESPState *s = ESP(&sysbus->esp);
1468
1469 trace_esp_pdma_write(size);
1470
1471 switch (size) {
1472 case 1:
1473 esp_pdma_write(s, val);
1474 break;
1475 case 2:
1476 esp_pdma_write(s, val >> 8);
1477 esp_pdma_write(s, val);
1478 break;
1479 }
1480 esp_do_dma(s);
1481 }
1482
1483 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1484 unsigned int size)
1485 {
1486 SysBusESPState *sysbus = opaque;
1487 ESPState *s = ESP(&sysbus->esp);
1488 uint64_t val = 0;
1489
1490 trace_esp_pdma_read(size);
1491
1492 switch (size) {
1493 case 1:
1494 val = esp_pdma_read(s);
1495 break;
1496 case 2:
1497 val = esp_pdma_read(s);
1498 val = (val << 8) | esp_pdma_read(s);
1499 break;
1500 }
1501 esp_do_dma(s);
1502 return val;
1503 }
1504
1505 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1506 {
1507 ESPState *s = container_of(req->bus, ESPState, bus);
1508
1509 scsi_req_ref(req);
1510 s->current_req = req;
1511 return s;
1512 }
1513
1514 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1515 .read = sysbus_esp_pdma_read,
1516 .write = sysbus_esp_pdma_write,
1517 .endianness = DEVICE_NATIVE_ENDIAN,
1518 .valid.min_access_size = 1,
1519 .valid.max_access_size = 4,
1520 .impl.min_access_size = 1,
1521 .impl.max_access_size = 2,
1522 };
1523
1524 static const struct SCSIBusInfo esp_scsi_info = {
1525 .tcq = false,
1526 .max_target = ESP_MAX_DEVS,
1527 .max_lun = 7,
1528
1529 .load_request = esp_load_request,
1530 .transfer_data = esp_transfer_data,
1531 .complete = esp_command_complete,
1532 .cancel = esp_request_cancelled
1533 };
1534
1535 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1536 {
1537 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1538 ESPState *s = ESP(&sysbus->esp);
1539
1540 switch (irq) {
1541 case 0:
1542 parent_esp_reset(s, irq, level);
1543 break;
1544 case 1:
1545 esp_dma_enable(s, irq, level);
1546 break;
1547 }
1548 }
1549
1550 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1551 {
1552 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1553 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1554 ESPState *s = ESP(&sysbus->esp);
1555
1556 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1557 return;
1558 }
1559
1560 sysbus_init_irq(sbd, &s->irq);
1561 sysbus_init_irq(sbd, &s->drq_irq);
1562 assert(sysbus->it_shift != -1);
1563
1564 s->chip_id = TCHI_FAS100A;
1565 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1566 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1567 sysbus_init_mmio(sbd, &sysbus->iomem);
1568 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1569 sysbus, "esp-pdma", 4);
1570 sysbus_init_mmio(sbd, &sysbus->pdma);
1571
1572 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1573
1574 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1575 }
1576
1577 static void sysbus_esp_hard_reset(DeviceState *dev)
1578 {
1579 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1580 ESPState *s = ESP(&sysbus->esp);
1581
1582 esp_hard_reset(s);
1583 }
1584
1585 static void sysbus_esp_init(Object *obj)
1586 {
1587 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1588
1589 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1590 }
1591
1592 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1593 .name = "sysbusespscsi",
1594 .version_id = 2,
1595 .minimum_version_id = 1,
1596 .pre_save = esp_pre_save,
1597 .fields = (const VMStateField[]) {
1598 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1599 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1600 VMSTATE_END_OF_LIST()
1601 }
1602 };
1603
1604 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1605 {
1606 DeviceClass *dc = DEVICE_CLASS(klass);
1607
1608 dc->realize = sysbus_esp_realize;
1609 dc->reset = sysbus_esp_hard_reset;
1610 dc->vmsd = &vmstate_sysbus_esp_scsi;
1611 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1612 }
1613
1614 static void esp_finalize(Object *obj)
1615 {
1616 ESPState *s = ESP(obj);
1617
1618 fifo8_destroy(&s->fifo);
1619 fifo8_destroy(&s->cmdfifo);
1620 }
1621
1622 static void esp_init(Object *obj)
1623 {
1624 ESPState *s = ESP(obj);
1625
1626 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1627 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1628 }
1629
1630 static void esp_class_init(ObjectClass *klass, void *data)
1631 {
1632 DeviceClass *dc = DEVICE_CLASS(klass);
1633
1634 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1635 dc->user_creatable = false;
1636 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1637 }
1638
1639 static const TypeInfo esp_info_types[] = {
1640 {
1641 .name = TYPE_SYSBUS_ESP,
1642 .parent = TYPE_SYS_BUS_DEVICE,
1643 .instance_init = sysbus_esp_init,
1644 .instance_size = sizeof(SysBusESPState),
1645 .class_init = sysbus_esp_class_init,
1646 },
1647 {
1648 .name = TYPE_ESP,
1649 .parent = TYPE_DEVICE,
1650 .instance_init = esp_init,
1651 .instance_finalize = esp_finalize,
1652 .instance_size = sizeof(ESPState),
1653 .class_init = esp_class_init,
1654 },
1655 };
1656
1657 DEFINE_TYPES(esp_info_types)