]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - hw/scsi/esp.c
esp: add trivial implementation of the ESP_RFLAGS register
[mirror_qemu.git] / hw / scsi / esp.c
... / ...
CommitLineData
1/*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26#include "qemu/osdep.h"
27#include "hw/sysbus.h"
28#include "migration/vmstate.h"
29#include "hw/irq.h"
30#include "hw/scsi/esp.h"
31#include "trace.h"
32#include "qemu/log.h"
33#include "qemu/module.h"
34
35/*
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * and
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41 *
42 * On Macintosh Quadra it is a NCR53C96.
43 */
44
45static void esp_raise_irq(ESPState *s)
46{
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
51 }
52}
53
54static void esp_lower_irq(ESPState *s)
55{
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
60 }
61}
62
63static void esp_raise_drq(ESPState *s)
64{
65 qemu_irq_raise(s->irq_data);
66 trace_esp_raise_drq();
67}
68
69static void esp_lower_drq(ESPState *s)
70{
71 qemu_irq_lower(s->irq_data);
72 trace_esp_lower_drq();
73}
74
75void esp_dma_enable(ESPState *s, int irq, int level)
76{
77 if (level) {
78 s->dma_enabled = 1;
79 trace_esp_dma_enable();
80 if (s->dma_cb) {
81 s->dma_cb(s);
82 s->dma_cb = NULL;
83 }
84 } else {
85 trace_esp_dma_disable();
86 s->dma_enabled = 0;
87 }
88}
89
90void esp_request_cancelled(SCSIRequest *req)
91{
92 ESPState *s = req->hba_private;
93
94 if (req == s->current_req) {
95 scsi_req_unref(s->current_req);
96 s->current_req = NULL;
97 s->current_dev = NULL;
98 }
99}
100
101static void esp_fifo_push(ESPState *s, uint8_t val)
102{
103 if (fifo8_num_used(&s->fifo) == ESP_FIFO_SZ) {
104 trace_esp_error_fifo_overrun();
105 return;
106 }
107
108 fifo8_push(&s->fifo, val);
109}
110
111static uint8_t esp_fifo_pop(ESPState *s)
112{
113 if (fifo8_is_empty(&s->fifo)) {
114 return 0;
115 }
116
117 return fifo8_pop(&s->fifo);
118}
119
120static void esp_cmdfifo_push(ESPState *s, uint8_t val)
121{
122 if (fifo8_num_used(&s->cmdfifo) == ESP_CMDFIFO_SZ) {
123 trace_esp_error_fifo_overrun();
124 return;
125 }
126
127 fifo8_push(&s->cmdfifo, val);
128}
129
130static uint8_t esp_cmdfifo_pop(ESPState *s)
131{
132 if (fifo8_is_empty(&s->cmdfifo)) {
133 return 0;
134 }
135
136 return fifo8_pop(&s->cmdfifo);
137}
138
139static uint32_t esp_get_tc(ESPState *s)
140{
141 uint32_t dmalen;
142
143 dmalen = s->rregs[ESP_TCLO];
144 dmalen |= s->rregs[ESP_TCMID] << 8;
145 dmalen |= s->rregs[ESP_TCHI] << 16;
146
147 return dmalen;
148}
149
150static void esp_set_tc(ESPState *s, uint32_t dmalen)
151{
152 s->rregs[ESP_TCLO] = dmalen;
153 s->rregs[ESP_TCMID] = dmalen >> 8;
154 s->rregs[ESP_TCHI] = dmalen >> 16;
155}
156
157static uint32_t esp_get_stc(ESPState *s)
158{
159 uint32_t dmalen;
160
161 dmalen = s->wregs[ESP_TCLO];
162 dmalen |= s->wregs[ESP_TCMID] << 8;
163 dmalen |= s->wregs[ESP_TCHI] << 16;
164
165 return dmalen;
166}
167
168static uint8_t esp_pdma_read(ESPState *s)
169{
170 uint8_t val;
171
172 if (s->do_cmd) {
173 val = esp_cmdfifo_pop(s);
174 } else {
175 val = esp_fifo_pop(s);
176 }
177
178 return val;
179}
180
181static void esp_pdma_write(ESPState *s, uint8_t val)
182{
183 uint32_t dmalen = esp_get_tc(s);
184
185 if (dmalen == 0) {
186 return;
187 }
188
189 if (s->do_cmd) {
190 esp_cmdfifo_push(s, val);
191 } else {
192 esp_fifo_push(s, val);
193 }
194
195 dmalen--;
196 esp_set_tc(s, dmalen);
197}
198
199static int esp_select(ESPState *s)
200{
201 int target;
202
203 target = s->wregs[ESP_WBUSID] & BUSID_DID;
204
205 s->ti_size = 0;
206 fifo8_reset(&s->fifo);
207
208 if (s->current_req) {
209 /* Started a new command before the old one finished. Cancel it. */
210 scsi_req_cancel(s->current_req);
211 s->async_len = 0;
212 }
213
214 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
215 if (!s->current_dev) {
216 /* No such drive */
217 s->rregs[ESP_RSTAT] = 0;
218 s->rregs[ESP_RINTR] |= INTR_DC;
219 s->rregs[ESP_RSEQ] = SEQ_0;
220 esp_raise_irq(s);
221 return -1;
222 }
223
224 /*
225 * Note that we deliberately don't raise the IRQ here: this will be done
226 * either in do_busid_cmd() for DATA OUT transfers or by the deferred
227 * IRQ mechanism in esp_transfer_data() for DATA IN transfers
228 */
229 s->rregs[ESP_RINTR] |= INTR_FC;
230 s->rregs[ESP_RSEQ] = SEQ_CD;
231 return 0;
232}
233
234static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
235{
236 uint8_t buf[ESP_CMDFIFO_SZ];
237 uint32_t dmalen, n;
238 int target;
239
240 target = s->wregs[ESP_WBUSID] & BUSID_DID;
241 if (s->dma) {
242 dmalen = MIN(esp_get_tc(s), maxlen);
243 if (dmalen == 0) {
244 return 0;
245 }
246 if (s->dma_memory_read) {
247 s->dma_memory_read(s->dma_opaque, buf, dmalen);
248 fifo8_push_all(&s->cmdfifo, buf, dmalen);
249 } else {
250 if (esp_select(s) < 0) {
251 fifo8_reset(&s->cmdfifo);
252 return -1;
253 }
254 esp_raise_drq(s);
255 fifo8_reset(&s->cmdfifo);
256 return 0;
257 }
258 } else {
259 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
260 if (dmalen == 0) {
261 return 0;
262 }
263 memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen);
264 if (dmalen >= 3) {
265 buf[0] = buf[2] >> 5;
266 }
267 fifo8_push_all(&s->cmdfifo, buf, dmalen);
268 }
269 trace_esp_get_cmd(dmalen, target);
270
271 if (esp_select(s) < 0) {
272 fifo8_reset(&s->cmdfifo);
273 return -1;
274 }
275 return dmalen;
276}
277
278static void do_busid_cmd(ESPState *s, uint8_t busid)
279{
280 uint32_t n, cmdlen;
281 int32_t datalen;
282 int lun;
283 SCSIDevice *current_lun;
284 uint8_t *buf;
285
286 trace_esp_do_busid_cmd(busid);
287 lun = busid & 7;
288 cmdlen = fifo8_num_used(&s->cmdfifo);
289 buf = (uint8_t *)fifo8_pop_buf(&s->cmdfifo, cmdlen, &n);
290
291 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
292 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
293 datalen = scsi_req_enqueue(s->current_req);
294 s->ti_size = datalen;
295 fifo8_reset(&s->cmdfifo);
296 if (datalen != 0) {
297 s->rregs[ESP_RSTAT] = STAT_TC;
298 s->rregs[ESP_RSEQ] = SEQ_CD;
299 esp_set_tc(s, 0);
300 if (datalen > 0) {
301 /*
302 * Switch to DATA IN phase but wait until initial data xfer is
303 * complete before raising the command completion interrupt
304 */
305 s->data_in_ready = false;
306 s->rregs[ESP_RSTAT] |= STAT_DI;
307 } else {
308 s->rregs[ESP_RSTAT] |= STAT_DO;
309 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
310 esp_raise_irq(s);
311 esp_lower_drq(s);
312 }
313 scsi_req_continue(s->current_req);
314 return;
315 }
316}
317
318static void do_cmd(ESPState *s)
319{
320 uint8_t busid = fifo8_pop(&s->cmdfifo);
321 uint32_t n;
322
323 s->cmdfifo_cdb_offset--;
324
325 /* Ignore extended messages for now */
326 if (s->cmdfifo_cdb_offset) {
327 fifo8_pop_buf(&s->cmdfifo, s->cmdfifo_cdb_offset, &n);
328 s->cmdfifo_cdb_offset = 0;
329 }
330
331 do_busid_cmd(s, busid);
332}
333
334static void satn_pdma_cb(ESPState *s)
335{
336 s->do_cmd = 0;
337 if (!fifo8_is_empty(&s->cmdfifo)) {
338 s->cmdfifo_cdb_offset = 1;
339 do_cmd(s);
340 }
341}
342
343static void handle_satn(ESPState *s)
344{
345 int32_t cmdlen;
346
347 if (s->dma && !s->dma_enabled) {
348 s->dma_cb = handle_satn;
349 return;
350 }
351 s->pdma_cb = satn_pdma_cb;
352 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
353 if (cmdlen > 0) {
354 s->cmdfifo_cdb_offset = 1;
355 do_cmd(s);
356 } else if (cmdlen == 0) {
357 s->do_cmd = 1;
358 /* Target present, but no cmd yet - switch to command phase */
359 s->rregs[ESP_RSEQ] = SEQ_CD;
360 s->rregs[ESP_RSTAT] = STAT_CD;
361 }
362}
363
364static void s_without_satn_pdma_cb(ESPState *s)
365{
366 uint32_t len;
367
368 s->do_cmd = 0;
369 len = fifo8_num_used(&s->cmdfifo);
370 if (len) {
371 s->cmdfifo_cdb_offset = 0;
372 do_busid_cmd(s, 0);
373 }
374}
375
376static void handle_s_without_atn(ESPState *s)
377{
378 int32_t cmdlen;
379
380 if (s->dma && !s->dma_enabled) {
381 s->dma_cb = handle_s_without_atn;
382 return;
383 }
384 s->pdma_cb = s_without_satn_pdma_cb;
385 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
386 if (cmdlen > 0) {
387 s->cmdfifo_cdb_offset = 0;
388 do_busid_cmd(s, 0);
389 } else if (cmdlen == 0) {
390 s->do_cmd = 1;
391 /* Target present, but no cmd yet - switch to command phase */
392 s->rregs[ESP_RSEQ] = SEQ_CD;
393 s->rregs[ESP_RSTAT] = STAT_CD;
394 }
395}
396
397static void satn_stop_pdma_cb(ESPState *s)
398{
399 s->do_cmd = 0;
400 if (!fifo8_is_empty(&s->cmdfifo)) {
401 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
402 s->do_cmd = 1;
403 s->cmdfifo_cdb_offset = 1;
404 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
405 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
406 s->rregs[ESP_RSEQ] = SEQ_CD;
407 esp_raise_irq(s);
408 }
409}
410
411static void handle_satn_stop(ESPState *s)
412{
413 int32_t cmdlen;
414
415 if (s->dma && !s->dma_enabled) {
416 s->dma_cb = handle_satn_stop;
417 return;
418 }
419 s->pdma_cb = satn_stop_pdma_cb;
420 cmdlen = get_cmd(s, 1);
421 if (cmdlen > 0) {
422 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
423 s->do_cmd = 1;
424 s->cmdfifo_cdb_offset = 1;
425 s->rregs[ESP_RSTAT] = STAT_MO;
426 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
427 s->rregs[ESP_RSEQ] = SEQ_MO;
428 esp_raise_irq(s);
429 } else if (cmdlen == 0) {
430 s->do_cmd = 1;
431 /* Target present, switch to message out phase */
432 s->rregs[ESP_RSEQ] = SEQ_MO;
433 s->rregs[ESP_RSTAT] = STAT_MO;
434 }
435}
436
437static void write_response_pdma_cb(ESPState *s)
438{
439 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
440 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
441 s->rregs[ESP_RSEQ] = SEQ_CD;
442 esp_raise_irq(s);
443}
444
445static void write_response(ESPState *s)
446{
447 uint32_t n;
448
449 trace_esp_write_response(s->status);
450
451 fifo8_reset(&s->fifo);
452 esp_fifo_push(s, s->status);
453 esp_fifo_push(s, 0);
454
455 if (s->dma) {
456 if (s->dma_memory_write) {
457 s->dma_memory_write(s->dma_opaque,
458 (uint8_t *)fifo8_pop_buf(&s->fifo, 2, &n), 2);
459 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
460 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
461 s->rregs[ESP_RSEQ] = SEQ_CD;
462 } else {
463 s->pdma_cb = write_response_pdma_cb;
464 esp_raise_drq(s);
465 return;
466 }
467 } else {
468 s->ti_size = 2;
469 s->rregs[ESP_RFLAGS] = 2;
470 }
471 esp_raise_irq(s);
472}
473
474static void esp_dma_done(ESPState *s)
475{
476 s->rregs[ESP_RSTAT] |= STAT_TC;
477 s->rregs[ESP_RINTR] |= INTR_BS;
478 s->rregs[ESP_RSEQ] = 0;
479 s->rregs[ESP_RFLAGS] = 0;
480 esp_set_tc(s, 0);
481 esp_raise_irq(s);
482}
483
484static void do_dma_pdma_cb(ESPState *s)
485{
486 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
487 int len;
488 uint32_t n;
489
490 if (s->do_cmd) {
491 s->ti_size = 0;
492 s->do_cmd = 0;
493 do_cmd(s);
494 esp_lower_drq(s);
495 return;
496 }
497
498 if (to_device) {
499 /* Copy FIFO data to device */
500 len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
501 memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
502 s->async_buf += len;
503 s->async_len -= len;
504 s->ti_size += len;
505 if (s->async_len == 0) {
506 scsi_req_continue(s->current_req);
507 return;
508 }
509
510 if (esp_get_tc(s) == 0) {
511 esp_lower_drq(s);
512 esp_dma_done(s);
513 }
514
515 return;
516 } else {
517 if (s->async_len == 0) {
518 if (s->current_req) {
519 /* Defer until the scsi layer has completed */
520 scsi_req_continue(s->current_req);
521 s->data_in_ready = false;
522 }
523 return;
524 }
525
526 if (esp_get_tc(s) != 0) {
527 /* Copy device data to FIFO */
528 len = MIN(s->async_len, fifo8_num_free(&s->fifo));
529 fifo8_push_all(&s->fifo, s->async_buf, len);
530 s->async_buf += len;
531 s->async_len -= len;
532 s->ti_size -= len;
533 esp_set_tc(s, esp_get_tc(s) - len);
534 return;
535 }
536
537 /* Partially filled a scsi buffer. Complete immediately. */
538 esp_lower_drq(s);
539 esp_dma_done(s);
540 }
541}
542
543static void esp_do_dma(ESPState *s)
544{
545 uint32_t len, cmdlen;
546 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
547 uint8_t buf[ESP_CMDFIFO_SZ];
548
549 len = esp_get_tc(s);
550 if (s->do_cmd) {
551 /*
552 * handle_ti_cmd() case: esp_do_dma() is called only from
553 * handle_ti_cmd() with do_cmd != NULL (see the assert())
554 */
555 cmdlen = fifo8_num_used(&s->cmdfifo);
556 trace_esp_do_dma(cmdlen, len);
557 if (s->dma_memory_read) {
558 s->dma_memory_read(s->dma_opaque, buf, len);
559 fifo8_push_all(&s->cmdfifo, buf, len);
560 } else {
561 s->pdma_cb = do_dma_pdma_cb;
562 esp_raise_drq(s);
563 return;
564 }
565 trace_esp_handle_ti_cmd(cmdlen);
566 s->ti_size = 0;
567 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
568 /* No command received */
569 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
570 return;
571 }
572
573 /* Command has been received */
574 s->do_cmd = 0;
575 do_cmd(s);
576 } else {
577 /*
578 * Extra message out bytes received: update cmdfifo_cdb_offset
579 * and then switch to commmand phase
580 */
581 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
582 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
583 s->rregs[ESP_RSEQ] = SEQ_CD;
584 s->rregs[ESP_RINTR] |= INTR_BS;
585 esp_raise_irq(s);
586 }
587 return;
588 }
589 if (s->async_len == 0) {
590 /* Defer until data is available. */
591 return;
592 }
593 if (len > s->async_len) {
594 len = s->async_len;
595 }
596 if (to_device) {
597 if (s->dma_memory_read) {
598 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
599 } else {
600 s->pdma_cb = do_dma_pdma_cb;
601 esp_raise_drq(s);
602 return;
603 }
604 } else {
605 if (s->dma_memory_write) {
606 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
607 } else {
608 /* Copy device data to FIFO */
609 len = MIN(len, fifo8_num_free(&s->fifo));
610 fifo8_push_all(&s->fifo, s->async_buf, len);
611 s->async_buf += len;
612 s->async_len -= len;
613 s->ti_size -= len;
614 esp_set_tc(s, esp_get_tc(s) - len);
615 s->pdma_cb = do_dma_pdma_cb;
616 esp_raise_drq(s);
617
618 /* Indicate transfer to FIFO is complete */
619 s->rregs[ESP_RSTAT] |= STAT_TC;
620 return;
621 }
622 }
623 esp_set_tc(s, esp_get_tc(s) - len);
624 s->async_buf += len;
625 s->async_len -= len;
626 if (to_device) {
627 s->ti_size += len;
628 } else {
629 s->ti_size -= len;
630 }
631 if (s->async_len == 0) {
632 scsi_req_continue(s->current_req);
633 /*
634 * If there is still data to be read from the device then
635 * complete the DMA operation immediately. Otherwise defer
636 * until the scsi layer has completed.
637 */
638 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
639 return;
640 }
641 }
642
643 /* Partially filled a scsi buffer. Complete immediately. */
644 esp_dma_done(s);
645 esp_lower_drq(s);
646}
647
648void esp_command_complete(SCSIRequest *req, size_t resid)
649{
650 ESPState *s = req->hba_private;
651
652 trace_esp_command_complete();
653 if (s->ti_size != 0) {
654 trace_esp_command_complete_unexpected();
655 }
656 s->ti_size = 0;
657 s->async_len = 0;
658 if (req->status) {
659 trace_esp_command_complete_fail();
660 }
661 s->status = req->status;
662 s->rregs[ESP_RSTAT] = STAT_ST;
663 esp_dma_done(s);
664 esp_lower_drq(s);
665 if (s->current_req) {
666 scsi_req_unref(s->current_req);
667 s->current_req = NULL;
668 s->current_dev = NULL;
669 }
670}
671
672void esp_transfer_data(SCSIRequest *req, uint32_t len)
673{
674 ESPState *s = req->hba_private;
675 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
676 uint32_t dmalen = esp_get_tc(s);
677
678 assert(!s->do_cmd);
679 trace_esp_transfer_data(dmalen, s->ti_size);
680 s->async_len = len;
681 s->async_buf = scsi_req_get_buf(req);
682
683 if (!to_device && !s->data_in_ready) {
684 /*
685 * Initial incoming data xfer is complete so raise command
686 * completion interrupt
687 */
688 s->data_in_ready = true;
689 s->rregs[ESP_RSTAT] |= STAT_TC;
690 s->rregs[ESP_RINTR] |= INTR_BS;
691 esp_raise_irq(s);
692
693 /*
694 * If data is ready to transfer and the TI command has already
695 * been executed, start DMA immediately. Otherwise DMA will start
696 * when host sends the TI command
697 */
698 if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) {
699 esp_do_dma(s);
700 }
701 return;
702 }
703
704 if (dmalen) {
705 esp_do_dma(s);
706 } else if (s->ti_size <= 0) {
707 /*
708 * If this was the last part of a DMA transfer then the
709 * completion interrupt is deferred to here.
710 */
711 esp_dma_done(s);
712 esp_lower_drq(s);
713 }
714}
715
716static void handle_ti(ESPState *s)
717{
718 uint32_t dmalen, cmdlen;
719
720 if (s->dma && !s->dma_enabled) {
721 s->dma_cb = handle_ti;
722 return;
723 }
724
725 dmalen = esp_get_tc(s);
726 if (s->dma) {
727 trace_esp_handle_ti(dmalen);
728 s->rregs[ESP_RSTAT] &= ~STAT_TC;
729 esp_do_dma(s);
730 } else if (s->do_cmd) {
731 cmdlen = fifo8_num_used(&s->cmdfifo);
732 trace_esp_handle_ti_cmd(cmdlen);
733 s->ti_size = 0;
734 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
735 /* No command received */
736 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
737 return;
738 }
739
740 /* Command has been received */
741 s->do_cmd = 0;
742 do_cmd(s);
743 } else {
744 /*
745 * Extra message out bytes received: update cmdfifo_cdb_offset
746 * and then switch to commmand phase
747 */
748 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
749 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
750 s->rregs[ESP_RSEQ] = SEQ_CD;
751 s->rregs[ESP_RINTR] |= INTR_BS;
752 esp_raise_irq(s);
753 }
754 }
755}
756
757void esp_hard_reset(ESPState *s)
758{
759 memset(s->rregs, 0, ESP_REGS);
760 memset(s->wregs, 0, ESP_REGS);
761 s->tchi_written = 0;
762 s->ti_size = 0;
763 fifo8_reset(&s->fifo);
764 fifo8_reset(&s->cmdfifo);
765 s->dma = 0;
766 s->do_cmd = 0;
767 s->dma_cb = NULL;
768
769 s->rregs[ESP_CFG1] = 7;
770}
771
772static void esp_soft_reset(ESPState *s)
773{
774 qemu_irq_lower(s->irq);
775 qemu_irq_lower(s->irq_data);
776 esp_hard_reset(s);
777}
778
779static void parent_esp_reset(ESPState *s, int irq, int level)
780{
781 if (level) {
782 esp_soft_reset(s);
783 }
784}
785
786uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
787{
788 uint32_t val;
789
790 switch (saddr) {
791 case ESP_FIFO:
792 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
793 /* Data out. */
794 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
795 s->rregs[ESP_FIFO] = 0;
796 } else {
797 s->ti_size--;
798 s->rregs[ESP_FIFO] = esp_fifo_pop(s);
799 }
800 val = s->rregs[ESP_FIFO];
801 break;
802 case ESP_RINTR:
803 /*
804 * Clear sequence step, interrupt register and all status bits
805 * except TC
806 */
807 val = s->rregs[ESP_RINTR];
808 s->rregs[ESP_RINTR] = 0;
809 s->rregs[ESP_RSTAT] &= ~STAT_TC;
810 s->rregs[ESP_RSEQ] = SEQ_0;
811 esp_lower_irq(s);
812 break;
813 case ESP_TCHI:
814 /* Return the unique id if the value has never been written */
815 if (!s->tchi_written) {
816 val = s->chip_id;
817 } else {
818 val = s->rregs[saddr];
819 }
820 break;
821 case ESP_RFLAGS:
822 /* Bottom 5 bits indicate number of bytes in FIFO */
823 val = fifo8_num_used(&s->fifo);
824 break;
825 default:
826 val = s->rregs[saddr];
827 break;
828 }
829
830 trace_esp_mem_readb(saddr, val);
831 return val;
832}
833
834void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
835{
836 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
837 switch (saddr) {
838 case ESP_TCHI:
839 s->tchi_written = true;
840 /* fall through */
841 case ESP_TCLO:
842 case ESP_TCMID:
843 s->rregs[ESP_RSTAT] &= ~STAT_TC;
844 break;
845 case ESP_FIFO:
846 if (s->do_cmd) {
847 esp_cmdfifo_push(s, val);
848 } else {
849 s->ti_size++;
850 esp_fifo_push(s, val);
851 }
852
853 /* Non-DMA transfers raise an interrupt after every byte */
854 if (s->rregs[ESP_CMD] == CMD_TI) {
855 s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS;
856 esp_raise_irq(s);
857 }
858 break;
859 case ESP_CMD:
860 s->rregs[saddr] = val;
861 if (val & CMD_DMA) {
862 s->dma = 1;
863 /* Reload DMA counter. */
864 if (esp_get_stc(s) == 0) {
865 esp_set_tc(s, 0x10000);
866 } else {
867 esp_set_tc(s, esp_get_stc(s));
868 }
869 } else {
870 s->dma = 0;
871 }
872 switch (val & CMD_CMD) {
873 case CMD_NOP:
874 trace_esp_mem_writeb_cmd_nop(val);
875 break;
876 case CMD_FLUSH:
877 trace_esp_mem_writeb_cmd_flush(val);
878 fifo8_reset(&s->fifo);
879 break;
880 case CMD_RESET:
881 trace_esp_mem_writeb_cmd_reset(val);
882 esp_soft_reset(s);
883 break;
884 case CMD_BUSRESET:
885 trace_esp_mem_writeb_cmd_bus_reset(val);
886 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
887 s->rregs[ESP_RINTR] |= INTR_RST;
888 esp_raise_irq(s);
889 }
890 break;
891 case CMD_TI:
892 trace_esp_mem_writeb_cmd_ti(val);
893 handle_ti(s);
894 break;
895 case CMD_ICCS:
896 trace_esp_mem_writeb_cmd_iccs(val);
897 write_response(s);
898 s->rregs[ESP_RINTR] |= INTR_FC;
899 s->rregs[ESP_RSTAT] |= STAT_MI;
900 break;
901 case CMD_MSGACC:
902 trace_esp_mem_writeb_cmd_msgacc(val);
903 s->rregs[ESP_RINTR] |= INTR_DC;
904 s->rregs[ESP_RSEQ] = 0;
905 s->rregs[ESP_RFLAGS] = 0;
906 esp_raise_irq(s);
907 break;
908 case CMD_PAD:
909 trace_esp_mem_writeb_cmd_pad(val);
910 s->rregs[ESP_RSTAT] = STAT_TC;
911 s->rregs[ESP_RINTR] |= INTR_FC;
912 s->rregs[ESP_RSEQ] = 0;
913 break;
914 case CMD_SATN:
915 trace_esp_mem_writeb_cmd_satn(val);
916 break;
917 case CMD_RSTATN:
918 trace_esp_mem_writeb_cmd_rstatn(val);
919 break;
920 case CMD_SEL:
921 trace_esp_mem_writeb_cmd_sel(val);
922 handle_s_without_atn(s);
923 break;
924 case CMD_SELATN:
925 trace_esp_mem_writeb_cmd_selatn(val);
926 handle_satn(s);
927 break;
928 case CMD_SELATNS:
929 trace_esp_mem_writeb_cmd_selatns(val);
930 handle_satn_stop(s);
931 break;
932 case CMD_ENSEL:
933 trace_esp_mem_writeb_cmd_ensel(val);
934 s->rregs[ESP_RINTR] = 0;
935 break;
936 case CMD_DISSEL:
937 trace_esp_mem_writeb_cmd_dissel(val);
938 s->rregs[ESP_RINTR] = 0;
939 esp_raise_irq(s);
940 break;
941 default:
942 trace_esp_error_unhandled_command(val);
943 break;
944 }
945 break;
946 case ESP_WBUSID ... ESP_WSYNO:
947 break;
948 case ESP_CFG1:
949 case ESP_CFG2: case ESP_CFG3:
950 case ESP_RES3: case ESP_RES4:
951 s->rregs[saddr] = val;
952 break;
953 case ESP_WCCF ... ESP_WTEST:
954 break;
955 default:
956 trace_esp_error_invalid_write(val, saddr);
957 return;
958 }
959 s->wregs[saddr] = val;
960}
961
962static bool esp_mem_accepts(void *opaque, hwaddr addr,
963 unsigned size, bool is_write,
964 MemTxAttrs attrs)
965{
966 return (size == 1) || (is_write && size == 4);
967}
968
969static bool esp_is_before_version_5(void *opaque, int version_id)
970{
971 ESPState *s = ESP(opaque);
972
973 version_id = MIN(version_id, s->mig_version_id);
974 return version_id < 5;
975}
976
977static bool esp_is_version_5(void *opaque, int version_id)
978{
979 ESPState *s = ESP(opaque);
980
981 version_id = MIN(version_id, s->mig_version_id);
982 return version_id == 5;
983}
984
985static int esp_pre_save(void *opaque)
986{
987 ESPState *s = ESP(opaque);
988
989 s->mig_version_id = vmstate_esp.version_id;
990 return 0;
991}
992
993static int esp_post_load(void *opaque, int version_id)
994{
995 ESPState *s = ESP(opaque);
996 int len, i;
997
998 version_id = MIN(version_id, s->mig_version_id);
999
1000 if (version_id < 5) {
1001 esp_set_tc(s, s->mig_dma_left);
1002
1003 /* Migrate ti_buf to fifo */
1004 len = s->mig_ti_wptr - s->mig_ti_rptr;
1005 for (i = 0; i < len; i++) {
1006 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1007 }
1008
1009 /* Migrate cmdbuf to cmdfifo */
1010 for (i = 0; i < s->mig_cmdlen; i++) {
1011 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1012 }
1013 }
1014
1015 s->mig_version_id = vmstate_esp.version_id;
1016 return 0;
1017}
1018
1019const VMStateDescription vmstate_esp = {
1020 .name = "esp",
1021 .version_id = 5,
1022 .minimum_version_id = 3,
1023 .pre_save = esp_pre_save,
1024 .post_load = esp_post_load,
1025 .fields = (VMStateField[]) {
1026 VMSTATE_BUFFER(rregs, ESPState),
1027 VMSTATE_BUFFER(wregs, ESPState),
1028 VMSTATE_INT32(ti_size, ESPState),
1029 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1030 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1031 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1032 VMSTATE_UINT32(status, ESPState),
1033 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1034 esp_is_before_version_5),
1035 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1036 esp_is_before_version_5),
1037 VMSTATE_UINT32(dma, ESPState),
1038 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1039 esp_is_before_version_5, 0, 16),
1040 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1041 esp_is_before_version_5, 16,
1042 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1043 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1044 VMSTATE_UINT32(do_cmd, ESPState),
1045 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1046 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1047 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1048 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1049 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1050 VMSTATE_END_OF_LIST()
1051 },
1052};
1053
1054static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1055 uint64_t val, unsigned int size)
1056{
1057 SysBusESPState *sysbus = opaque;
1058 ESPState *s = ESP(&sysbus->esp);
1059 uint32_t saddr;
1060
1061 saddr = addr >> sysbus->it_shift;
1062 esp_reg_write(s, saddr, val);
1063}
1064
1065static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1066 unsigned int size)
1067{
1068 SysBusESPState *sysbus = opaque;
1069 ESPState *s = ESP(&sysbus->esp);
1070 uint32_t saddr;
1071
1072 saddr = addr >> sysbus->it_shift;
1073 return esp_reg_read(s, saddr);
1074}
1075
1076static const MemoryRegionOps sysbus_esp_mem_ops = {
1077 .read = sysbus_esp_mem_read,
1078 .write = sysbus_esp_mem_write,
1079 .endianness = DEVICE_NATIVE_ENDIAN,
1080 .valid.accepts = esp_mem_accepts,
1081};
1082
1083static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1084 uint64_t val, unsigned int size)
1085{
1086 SysBusESPState *sysbus = opaque;
1087 ESPState *s = ESP(&sysbus->esp);
1088 uint32_t dmalen;
1089
1090 trace_esp_pdma_write(size);
1091
1092 switch (size) {
1093 case 1:
1094 esp_pdma_write(s, val);
1095 break;
1096 case 2:
1097 esp_pdma_write(s, val >> 8);
1098 esp_pdma_write(s, val);
1099 break;
1100 }
1101 dmalen = esp_get_tc(s);
1102 if (dmalen == 0 || fifo8_is_full(&s->fifo)) {
1103 s->pdma_cb(s);
1104 }
1105}
1106
1107static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1108 unsigned int size)
1109{
1110 SysBusESPState *sysbus = opaque;
1111 ESPState *s = ESP(&sysbus->esp);
1112 uint64_t val = 0;
1113
1114 trace_esp_pdma_read(size);
1115
1116 switch (size) {
1117 case 1:
1118 val = esp_pdma_read(s);
1119 break;
1120 case 2:
1121 val = esp_pdma_read(s);
1122 val = (val << 8) | esp_pdma_read(s);
1123 break;
1124 }
1125 if (fifo8_is_empty(&s->fifo)) {
1126 s->pdma_cb(s);
1127 }
1128 return val;
1129}
1130
1131static const MemoryRegionOps sysbus_esp_pdma_ops = {
1132 .read = sysbus_esp_pdma_read,
1133 .write = sysbus_esp_pdma_write,
1134 .endianness = DEVICE_NATIVE_ENDIAN,
1135 .valid.min_access_size = 1,
1136 .valid.max_access_size = 4,
1137 .impl.min_access_size = 1,
1138 .impl.max_access_size = 2,
1139};
1140
1141static const struct SCSIBusInfo esp_scsi_info = {
1142 .tcq = false,
1143 .max_target = ESP_MAX_DEVS,
1144 .max_lun = 7,
1145
1146 .transfer_data = esp_transfer_data,
1147 .complete = esp_command_complete,
1148 .cancel = esp_request_cancelled
1149};
1150
1151static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1152{
1153 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1154 ESPState *s = ESP(&sysbus->esp);
1155
1156 switch (irq) {
1157 case 0:
1158 parent_esp_reset(s, irq, level);
1159 break;
1160 case 1:
1161 esp_dma_enable(opaque, irq, level);
1162 break;
1163 }
1164}
1165
1166static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1167{
1168 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1169 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1170 ESPState *s = ESP(&sysbus->esp);
1171
1172 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1173 return;
1174 }
1175
1176 sysbus_init_irq(sbd, &s->irq);
1177 sysbus_init_irq(sbd, &s->irq_data);
1178 assert(sysbus->it_shift != -1);
1179
1180 s->chip_id = TCHI_FAS100A;
1181 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1182 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1183 sysbus_init_mmio(sbd, &sysbus->iomem);
1184 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1185 sysbus, "esp-pdma", 4);
1186 sysbus_init_mmio(sbd, &sysbus->pdma);
1187
1188 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1189
1190 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1191}
1192
1193static void sysbus_esp_hard_reset(DeviceState *dev)
1194{
1195 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1196 ESPState *s = ESP(&sysbus->esp);
1197
1198 esp_hard_reset(s);
1199}
1200
1201static void sysbus_esp_init(Object *obj)
1202{
1203 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1204
1205 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1206}
1207
1208static const VMStateDescription vmstate_sysbus_esp_scsi = {
1209 .name = "sysbusespscsi",
1210 .version_id = 2,
1211 .minimum_version_id = 1,
1212 .fields = (VMStateField[]) {
1213 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1214 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1215 VMSTATE_END_OF_LIST()
1216 }
1217};
1218
1219static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1220{
1221 DeviceClass *dc = DEVICE_CLASS(klass);
1222
1223 dc->realize = sysbus_esp_realize;
1224 dc->reset = sysbus_esp_hard_reset;
1225 dc->vmsd = &vmstate_sysbus_esp_scsi;
1226 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1227}
1228
1229static const TypeInfo sysbus_esp_info = {
1230 .name = TYPE_SYSBUS_ESP,
1231 .parent = TYPE_SYS_BUS_DEVICE,
1232 .instance_init = sysbus_esp_init,
1233 .instance_size = sizeof(SysBusESPState),
1234 .class_init = sysbus_esp_class_init,
1235};
1236
1237static void esp_finalize(Object *obj)
1238{
1239 ESPState *s = ESP(obj);
1240
1241 fifo8_destroy(&s->fifo);
1242 fifo8_destroy(&s->cmdfifo);
1243}
1244
1245static void esp_init(Object *obj)
1246{
1247 ESPState *s = ESP(obj);
1248
1249 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1250 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1251}
1252
1253static void esp_class_init(ObjectClass *klass, void *data)
1254{
1255 DeviceClass *dc = DEVICE_CLASS(klass);
1256
1257 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1258 dc->user_creatable = false;
1259 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1260}
1261
1262static const TypeInfo esp_info = {
1263 .name = TYPE_ESP,
1264 .parent = TYPE_DEVICE,
1265 .instance_init = esp_init,
1266 .instance_finalize = esp_finalize,
1267 .instance_size = sizeof(ESPState),
1268 .class_init = esp_class_init,
1269};
1270
1271static void esp_register_types(void)
1272{
1273 type_register_static(&sysbus_esp_info);
1274 type_register_static(&esp_info);
1275}
1276
1277type_init(esp_register_types)