4 * Copyright (c) 2010 qiaochong@loongson.cn
5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include <hw/pci/msi.h>
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/sysbus.h>
30 #include "monitor/monitor.h"
31 #include "sysemu/dma.h"
33 #include <hw/ide/pci.h>
34 #include <hw/ide/ahci.h>
36 /* #define DEBUG_AHCI */
39 #define DPRINTF(port, fmt, ...) \
40 do { fprintf(stderr, "ahci: %s: [%d] ", __FUNCTION__, port); \
41 fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
43 #define DPRINTF(port, fmt, ...) do {} while(0)
46 static void check_cmd(AHCIState
*s
, int port
);
47 static int handle_cmd(AHCIState
*s
,int port
,int slot
);
48 static void ahci_reset_port(AHCIState
*s
, int port
);
49 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
);
50 static void ahci_init_d2h(AHCIDevice
*ad
);
52 static uint32_t ahci_port_read(AHCIState
*s
, int port
, int offset
)
56 pr
= &s
->dev
[port
].port_regs
;
62 case PORT_LST_ADDR_HI
:
63 val
= pr
->lst_addr_hi
;
68 case PORT_FIS_ADDR_HI
:
69 val
= pr
->fis_addr_hi
;
81 val
= ((uint16_t)s
->dev
[port
].port
.ifs
[0].error
<< 8) |
82 s
->dev
[port
].port
.ifs
[0].status
;
88 if (s
->dev
[port
].port
.ifs
[0].bs
) {
89 val
= SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP
|
90 SATA_SCR_SSTATUS_SPD_GEN1
| SATA_SCR_SSTATUS_IPM_ACTIVE
;
92 val
= SATA_SCR_SSTATUS_DET_NODEV
;
102 pr
->scr_act
&= ~s
->dev
[port
].finished
;
103 s
->dev
[port
].finished
= 0;
113 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
118 static void ahci_irq_raise(AHCIState
*s
, AHCIDevice
*dev
)
120 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
122 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
124 DPRINTF(0, "raise irq\n");
126 if (pci_dev
&& msi_enabled(pci_dev
)) {
127 msi_notify(pci_dev
, 0);
129 qemu_irq_raise(s
->irq
);
133 static void ahci_irq_lower(AHCIState
*s
, AHCIDevice
*dev
)
135 AHCIPCIState
*d
= container_of(s
, AHCIPCIState
, ahci
);
137 (PCIDevice
*)object_dynamic_cast(OBJECT(d
), TYPE_PCI_DEVICE
);
139 DPRINTF(0, "lower irq\n");
141 if (!pci_dev
|| !msi_enabled(pci_dev
)) {
142 qemu_irq_lower(s
->irq
);
146 static void ahci_check_irq(AHCIState
*s
)
150 DPRINTF(-1, "check irq %#x\n", s
->control_regs
.irqstatus
);
152 s
->control_regs
.irqstatus
= 0;
153 for (i
= 0; i
< s
->ports
; i
++) {
154 AHCIPortRegs
*pr
= &s
->dev
[i
].port_regs
;
155 if (pr
->irq_stat
& pr
->irq_mask
) {
156 s
->control_regs
.irqstatus
|= (1 << i
);
160 if (s
->control_regs
.irqstatus
&&
161 (s
->control_regs
.ghc
& HOST_CTL_IRQ_EN
)) {
162 ahci_irq_raise(s
, NULL
);
164 ahci_irq_lower(s
, NULL
);
168 static void ahci_trigger_irq(AHCIState
*s
, AHCIDevice
*d
,
171 DPRINTF(d
->port_no
, "trigger irq %#x -> %x\n",
172 irq_type
, d
->port_regs
.irq_mask
& irq_type
);
174 d
->port_regs
.irq_stat
|= irq_type
;
178 static void map_page(AddressSpace
*as
, uint8_t **ptr
, uint64_t addr
,
184 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
187 *ptr
= dma_memory_map(as
, addr
, &len
, DMA_DIRECTION_FROM_DEVICE
);
189 dma_memory_unmap(as
, *ptr
, len
, DMA_DIRECTION_FROM_DEVICE
, len
);
194 static void ahci_port_write(AHCIState
*s
, int port
, int offset
, uint32_t val
)
196 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
198 DPRINTF(port
, "offset: 0x%x val: 0x%x\n", offset
, val
);
202 map_page(s
->as
, &s
->dev
[port
].lst
,
203 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
204 s
->dev
[port
].cur_cmd
= NULL
;
206 case PORT_LST_ADDR_HI
:
207 pr
->lst_addr_hi
= val
;
208 map_page(s
->as
, &s
->dev
[port
].lst
,
209 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
210 s
->dev
[port
].cur_cmd
= NULL
;
214 map_page(s
->as
, &s
->dev
[port
].res_fis
,
215 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
217 case PORT_FIS_ADDR_HI
:
218 pr
->fis_addr_hi
= val
;
219 map_page(s
->as
, &s
->dev
[port
].res_fis
,
220 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
223 pr
->irq_stat
&= ~val
;
227 pr
->irq_mask
= val
& 0xfdc000ff;
231 pr
->cmd
= val
& ~(PORT_CMD_LIST_ON
| PORT_CMD_FIS_ON
);
233 if (pr
->cmd
& PORT_CMD_START
) {
234 pr
->cmd
|= PORT_CMD_LIST_ON
;
237 if (pr
->cmd
& PORT_CMD_FIS_RX
) {
238 pr
->cmd
|= PORT_CMD_FIS_ON
;
241 /* XXX usually the FIS would be pending on the bus here and
242 issuing deferred until the OS enables FIS receival.
243 Instead, we only submit it once - which works in most
244 cases, but is a hack. */
245 if ((pr
->cmd
& PORT_CMD_FIS_ON
) &&
246 !s
->dev
[port
].init_d2h_sent
) {
247 ahci_init_d2h(&s
->dev
[port
]);
248 s
->dev
[port
].init_d2h_sent
= true;
254 s
->dev
[port
].port
.ifs
[0].error
= (val
>> 8) & 0xff;
255 s
->dev
[port
].port
.ifs
[0].status
= val
& 0xff;
264 if (((pr
->scr_ctl
& AHCI_SCR_SCTL_DET
) == 1) &&
265 ((val
& AHCI_SCR_SCTL_DET
) == 0)) {
266 ahci_reset_port(s
, port
);
278 pr
->cmd_issue
|= val
;
286 static uint64_t ahci_mem_read(void *opaque
, hwaddr addr
,
289 AHCIState
*s
= opaque
;
292 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
295 val
= s
->control_regs
.cap
;
298 val
= s
->control_regs
.ghc
;
301 val
= s
->control_regs
.irqstatus
;
303 case HOST_PORTS_IMPL
:
304 val
= s
->control_regs
.impl
;
307 val
= s
->control_regs
.version
;
311 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr
, val
);
312 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
313 (addr
< (AHCI_PORT_REGS_START_ADDR
+
314 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
315 val
= ahci_port_read(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
316 addr
& AHCI_PORT_ADDR_OFFSET_MASK
);
324 static void ahci_mem_write(void *opaque
, hwaddr addr
,
325 uint64_t val
, unsigned size
)
327 AHCIState
*s
= opaque
;
329 /* Only aligned reads are allowed on AHCI */
331 fprintf(stderr
, "ahci: Mis-aligned write to addr 0x"
332 TARGET_FMT_plx
"\n", addr
);
336 if (addr
< AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR
) {
337 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64
"\n", (unsigned) addr
, val
);
340 case HOST_CAP
: /* R/WO, RO */
341 /* FIXME handle R/WO */
343 case HOST_CTL
: /* R/W */
344 if (val
& HOST_CTL_RESET
) {
345 DPRINTF(-1, "HBA Reset\n");
348 s
->control_regs
.ghc
= (val
& 0x3) | HOST_CTL_AHCI_EN
;
352 case HOST_IRQ_STAT
: /* R/WC, RO */
353 s
->control_regs
.irqstatus
&= ~val
;
356 case HOST_PORTS_IMPL
: /* R/WO, RO */
357 /* FIXME handle R/WO */
359 case HOST_VERSION
: /* RO */
360 /* FIXME report write? */
363 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr
);
365 } else if ((addr
>= AHCI_PORT_REGS_START_ADDR
) &&
366 (addr
< (AHCI_PORT_REGS_START_ADDR
+
367 (s
->ports
* AHCI_PORT_ADDR_OFFSET_LEN
)))) {
368 ahci_port_write(s
, (addr
- AHCI_PORT_REGS_START_ADDR
) >> 7,
369 addr
& AHCI_PORT_ADDR_OFFSET_MASK
, val
);
374 static const MemoryRegionOps ahci_mem_ops
= {
375 .read
= ahci_mem_read
,
376 .write
= ahci_mem_write
,
377 .endianness
= DEVICE_LITTLE_ENDIAN
,
380 static uint64_t ahci_idp_read(void *opaque
, hwaddr addr
,
383 AHCIState
*s
= opaque
;
385 if (addr
== s
->idp_offset
) {
388 } else if (addr
== s
->idp_offset
+ 4) {
389 /* data register - do memory read at location selected by index */
390 return ahci_mem_read(opaque
, s
->idp_index
, size
);
396 static void ahci_idp_write(void *opaque
, hwaddr addr
,
397 uint64_t val
, unsigned size
)
399 AHCIState
*s
= opaque
;
401 if (addr
== s
->idp_offset
) {
402 /* index register - mask off reserved bits */
403 s
->idp_index
= (uint32_t)val
& ((AHCI_MEM_BAR_SIZE
- 1) & ~3);
404 } else if (addr
== s
->idp_offset
+ 4) {
405 /* data register - do memory write at location selected by index */
406 ahci_mem_write(opaque
, s
->idp_index
, val
, size
);
410 static const MemoryRegionOps ahci_idp_ops
= {
411 .read
= ahci_idp_read
,
412 .write
= ahci_idp_write
,
413 .endianness
= DEVICE_LITTLE_ENDIAN
,
417 static void ahci_reg_init(AHCIState
*s
)
421 s
->control_regs
.cap
= (s
->ports
- 1) |
422 (AHCI_NUM_COMMAND_SLOTS
<< 8) |
423 (AHCI_SUPPORTED_SPEED_GEN1
<< AHCI_SUPPORTED_SPEED
) |
424 HOST_CAP_NCQ
| HOST_CAP_AHCI
;
426 s
->control_regs
.impl
= (1 << s
->ports
) - 1;
428 s
->control_regs
.version
= AHCI_VERSION_1_0
;
430 for (i
= 0; i
< s
->ports
; i
++) {
431 s
->dev
[i
].port_state
= STATE_RUN
;
435 static void check_cmd(AHCIState
*s
, int port
)
437 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
440 if ((pr
->cmd
& PORT_CMD_START
) && pr
->cmd_issue
) {
441 for (slot
= 0; (slot
< 32) && pr
->cmd_issue
; slot
++) {
442 if ((pr
->cmd_issue
& (1U << slot
)) &&
443 !handle_cmd(s
, port
, slot
)) {
444 pr
->cmd_issue
&= ~(1U << slot
);
450 static void ahci_check_cmd_bh(void *opaque
)
452 AHCIDevice
*ad
= opaque
;
454 qemu_bh_delete(ad
->check_bh
);
457 if ((ad
->busy_slot
!= -1) &&
458 !(ad
->port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
))) {
460 ad
->port_regs
.cmd_issue
&= ~(1 << ad
->busy_slot
);
464 check_cmd(ad
->hba
, ad
->port_no
);
467 static void ahci_init_d2h(AHCIDevice
*ad
)
469 uint8_t init_fis
[20];
470 IDEState
*ide_state
= &ad
->port
.ifs
[0];
472 memset(init_fis
, 0, sizeof(init_fis
));
477 if (ide_state
->drive_kind
== IDE_CD
) {
478 init_fis
[5] = ide_state
->lcyl
;
479 init_fis
[6] = ide_state
->hcyl
;
482 ahci_write_fis_d2h(ad
, init_fis
);
485 static void ahci_reset_port(AHCIState
*s
, int port
)
487 AHCIDevice
*d
= &s
->dev
[port
];
488 AHCIPortRegs
*pr
= &d
->port_regs
;
489 IDEState
*ide_state
= &d
->port
.ifs
[0];
492 DPRINTF(port
, "reset port\n");
494 ide_bus_reset(&d
->port
);
495 ide_state
->ncq_queues
= AHCI_MAX_CMDS
;
501 d
->init_d2h_sent
= false;
503 ide_state
= &s
->dev
[port
].port
.ifs
[0];
504 if (!ide_state
->bs
) {
508 /* reset ncq queue */
509 for (i
= 0; i
< AHCI_MAX_CMDS
; i
++) {
510 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[i
];
511 if (!ncq_tfs
->used
) {
515 if (ncq_tfs
->aiocb
) {
516 bdrv_aio_cancel(ncq_tfs
->aiocb
);
517 ncq_tfs
->aiocb
= NULL
;
520 /* Maybe we just finished the request thanks to bdrv_aio_cancel() */
521 if (!ncq_tfs
->used
) {
525 qemu_sglist_destroy(&ncq_tfs
->sglist
);
529 s
->dev
[port
].port_state
= STATE_RUN
;
530 if (!ide_state
->bs
) {
531 s
->dev
[port
].port_regs
.sig
= 0;
532 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
533 } else if (ide_state
->drive_kind
== IDE_CD
) {
534 s
->dev
[port
].port_regs
.sig
= SATA_SIGNATURE_CDROM
;
535 ide_state
->lcyl
= 0x14;
536 ide_state
->hcyl
= 0xeb;
537 DPRINTF(port
, "set lcyl = %d\n", ide_state
->lcyl
);
538 ide_state
->status
= SEEK_STAT
| WRERR_STAT
| READY_STAT
;
540 s
->dev
[port
].port_regs
.sig
= SATA_SIGNATURE_DISK
;
541 ide_state
->status
= SEEK_STAT
| WRERR_STAT
;
544 ide_state
->error
= 1;
548 static void debug_print_fis(uint8_t *fis
, int cmd_len
)
553 fprintf(stderr
, "fis:");
554 for (i
= 0; i
< cmd_len
; i
++) {
555 if ((i
& 0xf) == 0) {
556 fprintf(stderr
, "\n%02x:",i
);
558 fprintf(stderr
, "%02x ",fis
[i
]);
560 fprintf(stderr
, "\n");
564 static void ahci_write_fis_sdb(AHCIState
*s
, int port
, uint32_t finished
)
566 AHCIPortRegs
*pr
= &s
->dev
[port
].port_regs
;
570 if (!s
->dev
[port
].res_fis
||
571 !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
575 sdb_fis
= &s
->dev
[port
].res_fis
[RES_FIS_SDBFIS
];
576 ide_state
= &s
->dev
[port
].port
.ifs
[0];
579 *(uint32_t*)sdb_fis
= 0;
582 sdb_fis
[0] = ide_state
->error
;
583 sdb_fis
[2] = ide_state
->status
& 0x77;
584 s
->dev
[port
].finished
|= finished
;
585 *(uint32_t*)(sdb_fis
+ 4) = cpu_to_le32(s
->dev
[port
].finished
);
587 ahci_trigger_irq(s
, &s
->dev
[port
], PORT_IRQ_STAT_SDBS
);
590 static void ahci_write_fis_d2h(AHCIDevice
*ad
, uint8_t *cmd_fis
)
592 AHCIPortRegs
*pr
= &ad
->port_regs
;
595 dma_addr_t cmd_len
= 0x80;
598 if (!ad
->res_fis
|| !(pr
->cmd
& PORT_CMD_FIS_RX
)) {
604 uint64_t tbl_addr
= le64_to_cpu(ad
->cur_cmd
->tbl_addr
);
605 cmd_fis
= dma_memory_map(ad
->hba
->as
, tbl_addr
, &cmd_len
,
606 DMA_DIRECTION_TO_DEVICE
);
610 d2h_fis
= &ad
->res_fis
[RES_FIS_RFIS
];
613 d2h_fis
[1] = (ad
->hba
->control_regs
.irqstatus
? (1 << 6) : 0);
614 d2h_fis
[2] = ad
->port
.ifs
[0].status
;
615 d2h_fis
[3] = ad
->port
.ifs
[0].error
;
617 d2h_fis
[4] = cmd_fis
[4];
618 d2h_fis
[5] = cmd_fis
[5];
619 d2h_fis
[6] = cmd_fis
[6];
620 d2h_fis
[7] = cmd_fis
[7];
621 d2h_fis
[8] = cmd_fis
[8];
622 d2h_fis
[9] = cmd_fis
[9];
623 d2h_fis
[10] = cmd_fis
[10];
624 d2h_fis
[11] = cmd_fis
[11];
625 d2h_fis
[12] = cmd_fis
[12];
626 d2h_fis
[13] = cmd_fis
[13];
627 for (i
= 14; i
< 20; i
++) {
631 if (d2h_fis
[2] & ERR_STAT
) {
632 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_STAT_TFES
);
635 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_D2H_REG_FIS
);
638 dma_memory_unmap(ad
->hba
->as
, cmd_fis
, cmd_len
,
639 DMA_DIRECTION_TO_DEVICE
, cmd_len
);
643 static int prdt_tbl_entry_size(const AHCI_SG
*tbl
)
645 return (le32_to_cpu(tbl
->flags_size
) & AHCI_PRDT_SIZE_MASK
) + 1;
648 static int ahci_populate_sglist(AHCIDevice
*ad
, QEMUSGList
*sglist
, int offset
)
650 AHCICmdHdr
*cmd
= ad
->cur_cmd
;
651 uint32_t opts
= le32_to_cpu(cmd
->opts
);
652 uint64_t prdt_addr
= le64_to_cpu(cmd
->tbl_addr
) + 0x80;
653 int sglist_alloc_hint
= opts
>> AHCI_CMD_HDR_PRDT_LEN
;
654 dma_addr_t prdt_len
= (sglist_alloc_hint
* sizeof(AHCI_SG
));
655 dma_addr_t real_prdt_len
= prdt_len
;
663 IDEBus
*bus
= &ad
->port
;
664 BusState
*qbus
= BUS(bus
);
666 if (!sglist_alloc_hint
) {
667 DPRINTF(ad
->port_no
, "no sg list given by guest: 0x%08x\n", opts
);
672 if (!(prdt
= dma_memory_map(ad
->hba
->as
, prdt_addr
, &prdt_len
,
673 DMA_DIRECTION_TO_DEVICE
))){
674 DPRINTF(ad
->port_no
, "map failed\n");
678 if (prdt_len
< real_prdt_len
) {
679 DPRINTF(ad
->port_no
, "mapped less than expected\n");
684 /* Get entries in the PRDT, init a qemu sglist accordingly */
685 if (sglist_alloc_hint
> 0) {
686 AHCI_SG
*tbl
= (AHCI_SG
*)prdt
;
688 for (i
= 0; i
< sglist_alloc_hint
; i
++) {
689 /* flags_size is zero-based */
690 tbl_entry_size
= prdt_tbl_entry_size(&tbl
[i
]);
691 if (offset
<= (sum
+ tbl_entry_size
)) {
693 off_pos
= offset
- sum
;
696 sum
+= tbl_entry_size
;
698 if ((off_idx
== -1) || (off_pos
< 0) || (off_pos
> tbl_entry_size
)) {
699 DPRINTF(ad
->port_no
, "%s: Incorrect offset! "
700 "off_idx: %d, off_pos: %d\n",
701 __func__
, off_idx
, off_pos
);
706 qemu_sglist_init(sglist
, qbus
->parent
, (sglist_alloc_hint
- off_idx
),
708 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[off_idx
].addr
+ off_pos
),
709 prdt_tbl_entry_size(&tbl
[off_idx
]) - off_pos
);
711 for (i
= off_idx
+ 1; i
< sglist_alloc_hint
; i
++) {
712 /* flags_size is zero-based */
713 qemu_sglist_add(sglist
, le64_to_cpu(tbl
[i
].addr
),
714 prdt_tbl_entry_size(&tbl
[i
]));
719 dma_memory_unmap(ad
->hba
->as
, prdt
, prdt_len
,
720 DMA_DIRECTION_TO_DEVICE
, prdt_len
);
724 static void ncq_cb(void *opaque
, int ret
)
726 NCQTransferState
*ncq_tfs
= (NCQTransferState
*)opaque
;
727 IDEState
*ide_state
= &ncq_tfs
->drive
->port
.ifs
[0];
729 /* Clear bit for this tag in SActive */
730 ncq_tfs
->drive
->port_regs
.scr_act
&= ~(1 << ncq_tfs
->tag
);
734 ide_state
->error
= ABRT_ERR
;
735 ide_state
->status
= READY_STAT
| ERR_STAT
;
736 ncq_tfs
->drive
->port_regs
.scr_err
|= (1 << ncq_tfs
->tag
);
738 ide_state
->status
= READY_STAT
| SEEK_STAT
;
741 ahci_write_fis_sdb(ncq_tfs
->drive
->hba
, ncq_tfs
->drive
->port_no
,
742 (1 << ncq_tfs
->tag
));
744 DPRINTF(ncq_tfs
->drive
->port_no
, "NCQ transfer tag %d finished\n",
747 bdrv_acct_done(ncq_tfs
->drive
->port
.ifs
[0].bs
, &ncq_tfs
->acct
);
748 qemu_sglist_destroy(&ncq_tfs
->sglist
);
752 static void process_ncq_command(AHCIState
*s
, int port
, uint8_t *cmd_fis
,
755 NCQFrame
*ncq_fis
= (NCQFrame
*)cmd_fis
;
756 uint8_t tag
= ncq_fis
->tag
>> 3;
757 NCQTransferState
*ncq_tfs
= &s
->dev
[port
].ncq_tfs
[tag
];
760 /* error - already in use */
761 fprintf(stderr
, "%s: tag %d already used\n", __FUNCTION__
, tag
);
766 ncq_tfs
->drive
= &s
->dev
[port
];
767 ncq_tfs
->slot
= slot
;
768 ncq_tfs
->lba
= ((uint64_t)ncq_fis
->lba5
<< 40) |
769 ((uint64_t)ncq_fis
->lba4
<< 32) |
770 ((uint64_t)ncq_fis
->lba3
<< 24) |
771 ((uint64_t)ncq_fis
->lba2
<< 16) |
772 ((uint64_t)ncq_fis
->lba1
<< 8) |
773 (uint64_t)ncq_fis
->lba0
;
775 /* Note: We calculate the sector count, but don't currently rely on it.
776 * The total size of the DMA buffer tells us the transfer size instead. */
777 ncq_tfs
->sector_count
= ((uint16_t)ncq_fis
->sector_count_high
<< 8) |
778 ncq_fis
->sector_count_low
;
780 DPRINTF(port
, "NCQ transfer LBA from %"PRId64
" to %"PRId64
", "
781 "drive max %"PRId64
"\n",
782 ncq_tfs
->lba
, ncq_tfs
->lba
+ ncq_tfs
->sector_count
- 2,
783 s
->dev
[port
].port
.ifs
[0].nb_sectors
- 1);
785 ahci_populate_sglist(&s
->dev
[port
], &ncq_tfs
->sglist
, 0);
788 switch(ncq_fis
->command
) {
789 case READ_FPDMA_QUEUED
:
790 DPRINTF(port
, "NCQ reading %d sectors from LBA %"PRId64
", "
792 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
794 DPRINTF(port
, "tag %d aio read %"PRId64
"\n",
795 ncq_tfs
->tag
, ncq_tfs
->lba
);
797 dma_acct_start(ncq_tfs
->drive
->port
.ifs
[0].bs
, &ncq_tfs
->acct
,
798 &ncq_tfs
->sglist
, BDRV_ACCT_READ
);
799 ncq_tfs
->aiocb
= dma_bdrv_read(ncq_tfs
->drive
->port
.ifs
[0].bs
,
800 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
803 case WRITE_FPDMA_QUEUED
:
804 DPRINTF(port
, "NCQ writing %d sectors to LBA %"PRId64
", tag %d\n",
805 ncq_tfs
->sector_count
-1, ncq_tfs
->lba
, ncq_tfs
->tag
);
807 DPRINTF(port
, "tag %d aio write %"PRId64
"\n",
808 ncq_tfs
->tag
, ncq_tfs
->lba
);
810 dma_acct_start(ncq_tfs
->drive
->port
.ifs
[0].bs
, &ncq_tfs
->acct
,
811 &ncq_tfs
->sglist
, BDRV_ACCT_WRITE
);
812 ncq_tfs
->aiocb
= dma_bdrv_write(ncq_tfs
->drive
->port
.ifs
[0].bs
,
813 &ncq_tfs
->sglist
, ncq_tfs
->lba
,
817 DPRINTF(port
, "error: tried to process non-NCQ command as NCQ\n");
818 qemu_sglist_destroy(&ncq_tfs
->sglist
);
823 static int handle_cmd(AHCIState
*s
, int port
, int slot
)
832 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
833 /* Engine currently busy, try again later */
834 DPRINTF(port
, "engine busy\n");
838 cmd
= &((AHCICmdHdr
*)s
->dev
[port
].lst
)[slot
];
840 if (!s
->dev
[port
].lst
) {
841 DPRINTF(port
, "error: lst not given but cmd handled");
845 /* remember current slot handle for later */
846 s
->dev
[port
].cur_cmd
= cmd
;
848 opts
= le32_to_cpu(cmd
->opts
);
849 tbl_addr
= le64_to_cpu(cmd
->tbl_addr
);
852 cmd_fis
= dma_memory_map(s
->as
, tbl_addr
, &cmd_len
,
853 DMA_DIRECTION_FROM_DEVICE
);
856 DPRINTF(port
, "error: guest passed us an invalid cmd fis\n");
860 /* The device we are working for */
861 ide_state
= &s
->dev
[port
].port
.ifs
[0];
863 if (!ide_state
->bs
) {
864 DPRINTF(port
, "error: guest accessed unused port");
868 debug_print_fis(cmd_fis
, 0x90);
869 //debug_print_fis(cmd_fis, (opts & AHCI_CMD_HDR_CMD_FIS_LEN) * 4);
871 switch (cmd_fis
[0]) {
872 case SATA_FIS_TYPE_REGISTER_H2D
:
875 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
876 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
882 switch (cmd_fis
[1]) {
883 case SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
:
888 DPRINTF(port
, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
889 "cmd_fis[2]=%02x\n", cmd_fis
[0], cmd_fis
[1],
895 switch (s
->dev
[port
].port_state
) {
897 if (cmd_fis
[15] & ATA_SRST
) {
898 s
->dev
[port
].port_state
= STATE_RESET
;
902 if (!(cmd_fis
[15] & ATA_SRST
)) {
903 ahci_reset_port(s
, port
);
908 if (cmd_fis
[1] == SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER
) {
910 /* Check for NCQ command */
911 if ((cmd_fis
[2] == READ_FPDMA_QUEUED
) ||
912 (cmd_fis
[2] == WRITE_FPDMA_QUEUED
)) {
913 process_ncq_command(s
, port
, cmd_fis
, slot
);
917 /* Decompose the FIS */
918 ide_state
->nsector
= (int64_t)((cmd_fis
[13] << 8) | cmd_fis
[12]);
919 ide_state
->feature
= cmd_fis
[3];
920 if (!ide_state
->nsector
) {
921 ide_state
->nsector
= 256;
924 if (ide_state
->drive_kind
!= IDE_CD
) {
926 * We set the sector depending on the sector defined in the FIS.
927 * Unfortunately, the spec isn't exactly obvious on this one.
929 * Apparently LBA48 commands set fis bytes 10,9,8,6,5,4 to the
930 * 48 bit sector number. ATA_CMD_READ_DMA_EXT is an example for
933 * Non-LBA48 commands however use 7[lower 4 bits],6,5,4 to define a
934 * 28-bit sector number. ATA_CMD_READ_DMA is an example for such
937 * Since the spec doesn't explicitly state what each field should
938 * do, I simply assume non-used fields as reserved and OR everything
939 * together, independent of the command.
941 ide_set_sector(ide_state
, ((uint64_t)cmd_fis
[10] << 40)
942 | ((uint64_t)cmd_fis
[9] << 32)
943 /* This is used for LBA48 commands */
944 | ((uint64_t)cmd_fis
[8] << 24)
945 /* This is used for non-LBA48 commands */
946 | ((uint64_t)(cmd_fis
[7] & 0xf) << 24)
947 | ((uint64_t)cmd_fis
[6] << 16)
948 | ((uint64_t)cmd_fis
[5] << 8)
952 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
953 * table to ide_state->io_buffer
955 if (opts
& AHCI_CMD_ATAPI
) {
956 memcpy(ide_state
->io_buffer
, &cmd_fis
[AHCI_COMMAND_TABLE_ACMD
], 0x10);
957 ide_state
->lcyl
= 0x14;
958 ide_state
->hcyl
= 0xeb;
959 debug_print_fis(ide_state
->io_buffer
, 0x10);
960 ide_state
->feature
= IDE_FEATURE_DMA
;
961 s
->dev
[port
].done_atapi_packet
= false;
962 /* XXX send PIO setup FIS */
965 ide_state
->error
= 0;
967 /* Reset transferred byte counter */
970 /* We're ready to process the command in FIS byte 2. */
971 ide_exec_cmd(&s
->dev
[port
].port
, cmd_fis
[2]);
973 if ((s
->dev
[port
].port
.ifs
[0].status
& (READY_STAT
|DRQ_STAT
|BUSY_STAT
)) ==
975 ahci_write_fis_d2h(&s
->dev
[port
], cmd_fis
);
980 dma_memory_unmap(s
->as
, cmd_fis
, cmd_len
, DMA_DIRECTION_FROM_DEVICE
,
983 if (s
->dev
[port
].port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
)) {
984 /* async command, complete later */
985 s
->dev
[port
].busy_slot
= slot
;
989 /* done handling the command */
993 /* DMA dev <-> ram */
994 static int ahci_start_transfer(IDEDMA
*dma
)
996 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
997 IDEState
*s
= &ad
->port
.ifs
[0];
998 uint32_t size
= (uint32_t)(s
->data_end
- s
->data_ptr
);
999 /* write == ram -> device */
1000 uint32_t opts
= le32_to_cpu(ad
->cur_cmd
->opts
);
1001 int is_write
= opts
& AHCI_CMD_WRITE
;
1002 int is_atapi
= opts
& AHCI_CMD_ATAPI
;
1005 if (is_atapi
&& !ad
->done_atapi_packet
) {
1006 /* already prepopulated iobuffer */
1007 ad
->done_atapi_packet
= true;
1011 if (!ahci_populate_sglist(ad
, &s
->sg
, 0)) {
1015 DPRINTF(ad
->port_no
, "%sing %d bytes on %s w/%s sglist\n",
1016 is_write
? "writ" : "read", size
, is_atapi
? "atapi" : "ata",
1017 has_sglist
? "" : "o");
1019 if (has_sglist
&& size
) {
1021 dma_buf_write(s
->data_ptr
, size
, &s
->sg
);
1023 dma_buf_read(s
->data_ptr
, size
, &s
->sg
);
1027 /* update number of transferred bytes */
1028 ad
->cur_cmd
->status
= cpu_to_le32(le32_to_cpu(ad
->cur_cmd
->status
) + size
);
1031 /* declare that we processed everything */
1032 s
->data_ptr
= s
->data_end
;
1035 qemu_sglist_destroy(&s
->sg
);
1038 s
->end_transfer_func(s
);
1040 if (!(s
->status
& DRQ_STAT
)) {
1042 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_STAT_DSS
);
1048 static void ahci_start_dma(IDEDMA
*dma
, IDEState
*s
,
1049 BlockDriverCompletionFunc
*dma_cb
)
1052 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1054 DPRINTF(ad
->port_no
, "\n");
1055 s
->io_buffer_offset
= 0;
1059 static int ahci_dma_prepare_buf(IDEDMA
*dma
, int is_write
)
1061 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1062 IDEState
*s
= &ad
->port
.ifs
[0];
1064 ahci_populate_sglist(ad
, &s
->sg
, 0);
1065 s
->io_buffer_size
= s
->sg
.size
;
1067 DPRINTF(ad
->port_no
, "len=%#x\n", s
->io_buffer_size
);
1068 return s
->io_buffer_size
!= 0;
1071 static int ahci_dma_rw_buf(IDEDMA
*dma
, int is_write
)
1073 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1074 IDEState
*s
= &ad
->port
.ifs
[0];
1075 uint8_t *p
= s
->io_buffer
+ s
->io_buffer_index
;
1076 int l
= s
->io_buffer_size
- s
->io_buffer_index
;
1078 if (ahci_populate_sglist(ad
, &s
->sg
, s
->io_buffer_offset
)) {
1083 dma_buf_read(p
, l
, &s
->sg
);
1085 dma_buf_write(p
, l
, &s
->sg
);
1088 /* free sglist that was created in ahci_populate_sglist() */
1089 qemu_sglist_destroy(&s
->sg
);
1091 /* update number of transferred bytes */
1092 ad
->cur_cmd
->status
= cpu_to_le32(le32_to_cpu(ad
->cur_cmd
->status
) + l
);
1093 s
->io_buffer_index
+= l
;
1094 s
->io_buffer_offset
+= l
;
1096 DPRINTF(ad
->port_no
, "len=%#x\n", l
);
1101 static int ahci_dma_set_unit(IDEDMA
*dma
, int unit
)
1103 /* only a single unit per link */
1107 static int ahci_dma_add_status(IDEDMA
*dma
, int status
)
1109 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1110 DPRINTF(ad
->port_no
, "set status: %x\n", status
);
1112 if (status
& BM_STATUS_INT
) {
1113 ahci_trigger_irq(ad
->hba
, ad
, PORT_IRQ_STAT_DSS
);
1119 static int ahci_dma_set_inactive(IDEDMA
*dma
)
1124 static int ahci_async_cmd_done(IDEDMA
*dma
)
1126 AHCIDevice
*ad
= DO_UPCAST(AHCIDevice
, dma
, dma
);
1128 DPRINTF(ad
->port_no
, "async cmd done\n");
1130 /* update d2h status */
1131 ahci_write_fis_d2h(ad
, NULL
);
1133 if (!ad
->check_bh
) {
1134 /* maybe we still have something to process, check later */
1135 ad
->check_bh
= qemu_bh_new(ahci_check_cmd_bh
, ad
);
1136 qemu_bh_schedule(ad
->check_bh
);
1142 static void ahci_irq_set(void *opaque
, int n
, int level
)
1146 static void ahci_dma_restart_cb(void *opaque
, int running
, RunState state
)
1150 static int ahci_dma_reset(IDEDMA
*dma
)
1155 static const IDEDMAOps ahci_dma_ops
= {
1156 .start_dma
= ahci_start_dma
,
1157 .start_transfer
= ahci_start_transfer
,
1158 .prepare_buf
= ahci_dma_prepare_buf
,
1159 .rw_buf
= ahci_dma_rw_buf
,
1160 .set_unit
= ahci_dma_set_unit
,
1161 .add_status
= ahci_dma_add_status
,
1162 .set_inactive
= ahci_dma_set_inactive
,
1163 .async_cmd_done
= ahci_async_cmd_done
,
1164 .restart_cb
= ahci_dma_restart_cb
,
1165 .reset
= ahci_dma_reset
,
1168 void ahci_init(AHCIState
*s
, DeviceState
*qdev
, AddressSpace
*as
, int ports
)
1175 s
->dev
= g_malloc0(sizeof(AHCIDevice
) * ports
);
1177 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
1178 memory_region_init_io(&s
->mem
, OBJECT(qdev
), &ahci_mem_ops
, s
,
1179 "ahci", AHCI_MEM_BAR_SIZE
);
1180 memory_region_init_io(&s
->idp
, OBJECT(qdev
), &ahci_idp_ops
, s
,
1183 irqs
= qemu_allocate_irqs(ahci_irq_set
, s
, s
->ports
);
1185 for (i
= 0; i
< s
->ports
; i
++) {
1186 AHCIDevice
*ad
= &s
->dev
[i
];
1188 ide_bus_new(&ad
->port
, sizeof(ad
->port
), qdev
, i
, 1);
1189 ide_init2(&ad
->port
, irqs
[i
]);
1193 ad
->port
.dma
= &ad
->dma
;
1194 ad
->port
.dma
->ops
= &ahci_dma_ops
;
1198 void ahci_uninit(AHCIState
*s
)
1200 memory_region_destroy(&s
->mem
);
1201 memory_region_destroy(&s
->idp
);
1205 void ahci_reset(AHCIState
*s
)
1210 s
->control_regs
.irqstatus
= 0;
1212 * The implementation of this bit is dependent upon the value of the
1213 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
1214 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
1215 * read-only and shall have a reset value of '1'.
1217 * We set HOST_CAP_AHCI so we must enable AHCI at reset.
1219 s
->control_regs
.ghc
= HOST_CTL_AHCI_EN
;
1221 for (i
= 0; i
< s
->ports
; i
++) {
1222 pr
= &s
->dev
[i
].port_regs
;
1226 pr
->cmd
= PORT_CMD_SPIN_UP
| PORT_CMD_POWER_ON
;
1227 ahci_reset_port(s
, i
);
1231 static const VMStateDescription vmstate_ahci_device
= {
1232 .name
= "ahci port",
1234 .fields
= (VMStateField
[]) {
1235 VMSTATE_IDE_BUS(port
, AHCIDevice
),
1236 VMSTATE_UINT32(port_state
, AHCIDevice
),
1237 VMSTATE_UINT32(finished
, AHCIDevice
),
1238 VMSTATE_UINT32(port_regs
.lst_addr
, AHCIDevice
),
1239 VMSTATE_UINT32(port_regs
.lst_addr_hi
, AHCIDevice
),
1240 VMSTATE_UINT32(port_regs
.fis_addr
, AHCIDevice
),
1241 VMSTATE_UINT32(port_regs
.fis_addr_hi
, AHCIDevice
),
1242 VMSTATE_UINT32(port_regs
.irq_stat
, AHCIDevice
),
1243 VMSTATE_UINT32(port_regs
.irq_mask
, AHCIDevice
),
1244 VMSTATE_UINT32(port_regs
.cmd
, AHCIDevice
),
1245 VMSTATE_UINT32(port_regs
.tfdata
, AHCIDevice
),
1246 VMSTATE_UINT32(port_regs
.sig
, AHCIDevice
),
1247 VMSTATE_UINT32(port_regs
.scr_stat
, AHCIDevice
),
1248 VMSTATE_UINT32(port_regs
.scr_ctl
, AHCIDevice
),
1249 VMSTATE_UINT32(port_regs
.scr_err
, AHCIDevice
),
1250 VMSTATE_UINT32(port_regs
.scr_act
, AHCIDevice
),
1251 VMSTATE_UINT32(port_regs
.cmd_issue
, AHCIDevice
),
1252 VMSTATE_BOOL(done_atapi_packet
, AHCIDevice
),
1253 VMSTATE_INT32(busy_slot
, AHCIDevice
),
1254 VMSTATE_BOOL(init_d2h_sent
, AHCIDevice
),
1255 VMSTATE_END_OF_LIST()
1259 static int ahci_state_post_load(void *opaque
, int version_id
)
1262 struct AHCIDevice
*ad
;
1263 AHCIState
*s
= opaque
;
1265 for (i
= 0; i
< s
->ports
; i
++) {
1267 AHCIPortRegs
*pr
= &ad
->port_regs
;
1269 map_page(s
->as
, &ad
->lst
,
1270 ((uint64_t)pr
->lst_addr_hi
<< 32) | pr
->lst_addr
, 1024);
1271 map_page(s
->as
, &ad
->res_fis
,
1272 ((uint64_t)pr
->fis_addr_hi
<< 32) | pr
->fis_addr
, 256);
1274 * All pending i/o should be flushed out on a migrate. However,
1275 * we might not have cleared the busy_slot since this is done
1276 * in a bh. Also, issue i/o against any slots that are pending.
1278 if ((ad
->busy_slot
!= -1) &&
1279 !(ad
->port
.ifs
[0].status
& (BUSY_STAT
|DRQ_STAT
))) {
1280 pr
->cmd_issue
&= ~(1 << ad
->busy_slot
);
1289 const VMStateDescription vmstate_ahci
= {
1292 .post_load
= ahci_state_post_load
,
1293 .fields
= (VMStateField
[]) {
1294 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev
, AHCIState
, ports
,
1295 vmstate_ahci_device
, AHCIDevice
),
1296 VMSTATE_UINT32(control_regs
.cap
, AHCIState
),
1297 VMSTATE_UINT32(control_regs
.ghc
, AHCIState
),
1298 VMSTATE_UINT32(control_regs
.irqstatus
, AHCIState
),
1299 VMSTATE_UINT32(control_regs
.impl
, AHCIState
),
1300 VMSTATE_UINT32(control_regs
.version
, AHCIState
),
1301 VMSTATE_UINT32(idp_index
, AHCIState
),
1302 VMSTATE_INT32_EQUAL(ports
, AHCIState
),
1303 VMSTATE_END_OF_LIST()
1307 #define TYPE_SYSBUS_AHCI "sysbus-ahci"
1308 #define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI)
1310 typedef struct SysbusAHCIState
{
1312 SysBusDevice parent_obj
;
1319 static const VMStateDescription vmstate_sysbus_ahci
= {
1320 .name
= "sysbus-ahci",
1321 .unmigratable
= 1, /* Still buggy under I/O load */
1322 .fields
= (VMStateField
[]) {
1323 VMSTATE_AHCI(ahci
, SysbusAHCIState
),
1324 VMSTATE_END_OF_LIST()
1328 static void sysbus_ahci_reset(DeviceState
*dev
)
1330 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1332 ahci_reset(&s
->ahci
);
1335 static void sysbus_ahci_realize(DeviceState
*dev
, Error
**errp
)
1337 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1338 SysbusAHCIState
*s
= SYSBUS_AHCI(dev
);
1340 ahci_init(&s
->ahci
, dev
, &address_space_memory
, s
->num_ports
);
1342 sysbus_init_mmio(sbd
, &s
->ahci
.mem
);
1343 sysbus_init_irq(sbd
, &s
->ahci
.irq
);
1346 static Property sysbus_ahci_properties
[] = {
1347 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState
, num_ports
, 1),
1348 DEFINE_PROP_END_OF_LIST(),
1351 static void sysbus_ahci_class_init(ObjectClass
*klass
, void *data
)
1353 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1355 dc
->realize
= sysbus_ahci_realize
;
1356 dc
->vmsd
= &vmstate_sysbus_ahci
;
1357 dc
->props
= sysbus_ahci_properties
;
1358 dc
->reset
= sysbus_ahci_reset
;
1359 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1362 static const TypeInfo sysbus_ahci_info
= {
1363 .name
= TYPE_SYSBUS_AHCI
,
1364 .parent
= TYPE_SYS_BUS_DEVICE
,
1365 .instance_size
= sizeof(SysbusAHCIState
),
1366 .class_init
= sysbus_ahci_class_init
,
1369 static void sysbus_ahci_register_types(void)
1371 type_register_static(&sysbus_ahci_info
);
1374 type_init(sysbus_ahci_register_types
)