]> git.proxmox.com Git - qemu.git/blame - hw/esp-pci.c
esp: move PCI emulation to a new file esp-pci.c
[qemu.git] / hw / esp-pci.c
CommitLineData
aebcf56f
HP
1/*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26#include "pci.h"
27#include "esp.h"
28#include "trace.h"
29#include "qemu-log.h"
30
31#define DMA_CMD 0x0
32#define DMA_STC 0x1
33#define DMA_SPA 0x2
34#define DMA_WBC 0x3
35#define DMA_WAC 0x4
36#define DMA_STAT 0x5
37#define DMA_SMDLA 0x6
38#define DMA_WMAC 0x7
39
40#define DMA_CMD_MASK 0x03
41#define DMA_CMD_DIAG 0x04
42#define DMA_CMD_MDL 0x10
43#define DMA_CMD_INTE_P 0x20
44#define DMA_CMD_INTE_D 0x40
45#define DMA_CMD_DIR 0x80
46
47#define DMA_STAT_PWDN 0x01
48#define DMA_STAT_ERROR 0x02
49#define DMA_STAT_ABORT 0x04
50#define DMA_STAT_DONE 0x08
51#define DMA_STAT_SCSIINT 0x10
52#define DMA_STAT_BCMBLT 0x20
53
54#define SBAC_STATUS 0x1000
55
56typedef struct PCIESPState {
57 PCIDevice dev;
58 MemoryRegion io;
59 uint32_t dma_regs[8];
60 uint32_t sbac;
61 ESPState esp;
62} PCIESPState;
63
64static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
65{
66 trace_esp_pci_dma_idle(val);
67 esp_dma_enable(&pci->esp, 0, 0);
68}
69
70static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
71{
72 trace_esp_pci_dma_blast(val);
73 qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
74}
75
76static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
77{
78 trace_esp_pci_dma_abort(val);
79 if (pci->esp.current_req) {
80 scsi_req_cancel(pci->esp.current_req);
81 }
82}
83
84static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
85{
86 trace_esp_pci_dma_start(val);
87
88 pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
89 pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA];
90 pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA];
91
92 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
93 | DMA_STAT_DONE | DMA_STAT_ABORT
94 | DMA_STAT_ERROR | DMA_STAT_PWDN);
95
96 esp_dma_enable(&pci->esp, 0, 1);
97}
98
99static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
100{
101 trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val);
102 switch (saddr) {
103 case DMA_CMD:
104 pci->dma_regs[saddr] = val;
105 switch (val & DMA_CMD_MASK) {
106 case 0x0: /* IDLE */
107 esp_pci_handle_idle(pci, val);
108 break;
109 case 0x1: /* BLAST */
110 esp_pci_handle_blast(pci, val);
111 break;
112 case 0x2: /* ABORT */
113 esp_pci_handle_abort(pci, val);
114 break;
115 case 0x3: /* START */
116 esp_pci_handle_start(pci, val);
117 break;
118 default: /* can't happen */
119 abort();
120 }
121 break;
122 case DMA_STC:
123 case DMA_SPA:
124 case DMA_SMDLA:
125 pci->dma_regs[saddr] = val;
126 break;
127 case DMA_STAT:
128 if (!(pci->sbac & SBAC_STATUS)) {
129 /* clear some bits on write */
130 uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
131 pci->dma_regs[DMA_STAT] &= ~(val & mask);
132 }
133 break;
134 default:
135 trace_esp_pci_error_invalid_write_dma(val, saddr);
136 return;
137 }
138}
139
140static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
141{
142 uint32_t val;
143
144 val = pci->dma_regs[saddr];
145 if (saddr == DMA_STAT) {
146 if (pci->esp.rregs[ESP_RSTAT] & STAT_INT) {
147 val |= DMA_STAT_SCSIINT;
148 }
149 if (pci->sbac & SBAC_STATUS) {
150 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
151 DMA_STAT_DONE);
152 }
153 }
154
155 trace_esp_pci_dma_read(saddr, val);
156 return val;
157}
158
159static void esp_pci_io_write(void *opaque, target_phys_addr_t addr,
160 uint64_t val, unsigned int size)
161{
162 PCIESPState *pci = opaque;
163
164 if (size < 4 || addr & 3) {
165 /* need to upgrade request: we only support 4-bytes accesses */
166 uint32_t current = 0, mask;
167 int shift;
168
169 if (addr < 0x40) {
170 current = pci->esp.wregs[addr >> 2];
171 } else if (addr < 0x60) {
172 current = pci->dma_regs[(addr - 0x40) >> 2];
173 } else if (addr < 0x74) {
174 current = pci->sbac;
175 }
176
177 shift = (4 - size) * 8;
178 mask = (~(uint32_t)0 << shift) >> shift;
179
180 shift = ((4 - (addr & 3)) & 3) * 8;
181 val <<= shift;
182 val |= current & ~(mask << shift);
183 addr &= ~3;
184 size = 4;
185 }
186
187 if (addr < 0x40) {
188 /* SCSI core reg */
189 esp_reg_write(&pci->esp, addr >> 2, val);
190 } else if (addr < 0x60) {
191 /* PCI DMA CCB */
192 esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
193 } else if (addr == 0x70) {
194 /* DMA SCSI Bus and control */
195 trace_esp_pci_sbac_write(pci->sbac, val);
196 pci->sbac = val;
197 } else {
198 trace_esp_pci_error_invalid_write((int)addr);
199 }
200}
201
202static uint64_t esp_pci_io_read(void *opaque, target_phys_addr_t addr,
203 unsigned int size)
204{
205 PCIESPState *pci = opaque;
206 uint32_t ret;
207
208 if (addr < 0x40) {
209 /* SCSI core reg */
210 ret = esp_reg_read(&pci->esp, addr >> 2);
211 } else if (addr < 0x60) {
212 /* PCI DMA CCB */
213 ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
214 } else if (addr == 0x70) {
215 /* DMA SCSI Bus and control */
216 trace_esp_pci_sbac_read(pci->sbac);
217 ret = pci->sbac;
218 } else {
219 /* Invalid region */
220 trace_esp_pci_error_invalid_read((int)addr);
221 ret = 0;
222 }
223
224 /* give only requested data */
225 ret >>= (addr & 3) * 8;
226 ret &= ~(~(uint64_t)0 << (8 * size));
227
228 return ret;
229}
230
231static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
232 DMADirection dir)
233{
234 dma_addr_t addr;
235 DMADirection expected_dir;
236
237 if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) {
238 expected_dir = DMA_DIRECTION_FROM_DEVICE;
239 } else {
240 expected_dir = DMA_DIRECTION_TO_DEVICE;
241 }
242
243 if (dir != expected_dir) {
244 trace_esp_pci_error_invalid_dma_direction();
245 return;
246 }
247
248 if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) {
249 qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
250 }
251
252 addr = pci->dma_regs[DMA_SPA];
253 if (pci->dma_regs[DMA_WBC] < len) {
254 len = pci->dma_regs[DMA_WBC];
255 }
256
257 pci_dma_rw(&pci->dev, addr, buf, len, dir);
258
259 /* update status registers */
260 pci->dma_regs[DMA_WBC] -= len;
261 pci->dma_regs[DMA_WAC] += len;
262}
263
264static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
265{
266 PCIESPState *pci = opaque;
267 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE);
268}
269
270static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len)
271{
272 PCIESPState *pci = opaque;
273 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE);
274}
275
276static const MemoryRegionOps esp_pci_io_ops = {
277 .read = esp_pci_io_read,
278 .write = esp_pci_io_write,
279 .endianness = DEVICE_LITTLE_ENDIAN,
280 .impl = {
281 .min_access_size = 1,
282 .max_access_size = 4,
283 },
284};
285
286static void esp_pci_hard_reset(DeviceState *dev)
287{
288 PCIESPState *pci = DO_UPCAST(PCIESPState, dev.qdev, dev);
289 esp_hard_reset(&pci->esp);
290 pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
291 | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
292 pci->dma_regs[DMA_WBC] &= ~0xffff;
293 pci->dma_regs[DMA_WAC] = 0xffffffff;
294 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
295 | DMA_STAT_DONE | DMA_STAT_ABORT
296 | DMA_STAT_ERROR);
297 pci->dma_regs[DMA_WMAC] = 0xfffffffd;
298}
299
300static const VMStateDescription vmstate_esp_pci_scsi = {
301 .name = "pciespscsi",
302 .version_id = 0,
303 .minimum_version_id = 0,
304 .minimum_version_id_old = 0,
305 .fields = (VMStateField[]) {
306 VMSTATE_PCI_DEVICE(dev, PCIESPState),
307 VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
308 VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
309 VMSTATE_END_OF_LIST()
310 }
311};
312
313static void esp_pci_command_complete(SCSIRequest *req, uint32_t status,
314 size_t resid)
315{
316 ESPState *s = req->hba_private;
317 PCIESPState *pci = container_of(s, PCIESPState, esp);
318
319 esp_command_complete(req, status, resid);
320 pci->dma_regs[DMA_WBC] = 0;
321 pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
322}
323
324static const struct SCSIBusInfo esp_pci_scsi_info = {
325 .tcq = false,
326 .max_target = ESP_MAX_DEVS,
327 .max_lun = 7,
328
329 .transfer_data = esp_transfer_data,
330 .complete = esp_pci_command_complete,
331 .cancel = esp_request_cancelled,
332};
333
334static int esp_pci_scsi_init(PCIDevice *dev)
335{
336 PCIESPState *pci = DO_UPCAST(PCIESPState, dev, dev);
337 ESPState *s = &pci->esp;
338 uint8_t *pci_conf;
339
340 pci_conf = pci->dev.config;
341
342 /* Interrupt pin A */
343 pci_conf[PCI_INTERRUPT_PIN] = 0x01;
344
345 s->dma_memory_read = esp_pci_dma_memory_read;
346 s->dma_memory_write = esp_pci_dma_memory_write;
347 s->dma_opaque = pci;
348 s->chip_id = TCHI_AM53C974;
349 memory_region_init_io(&pci->io, &esp_pci_io_ops, pci, "esp-io", 0x80);
350
351 pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
352 s->irq = pci->dev.irq[0];
353
354 scsi_bus_new(&s->bus, &dev->qdev, &esp_pci_scsi_info);
355 if (!dev->qdev.hotplugged) {
356 return scsi_bus_legacy_handle_cmdline(&s->bus);
357 }
358 return 0;
359}
360
361static void esp_pci_scsi_uninit(PCIDevice *d)
362{
363 PCIESPState *pci = DO_UPCAST(PCIESPState, dev, d);
364
365 memory_region_destroy(&pci->io);
366}
367
368static void esp_pci_class_init(ObjectClass *klass, void *data)
369{
370 DeviceClass *dc = DEVICE_CLASS(klass);
371 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
372
373 k->init = esp_pci_scsi_init;
374 k->exit = esp_pci_scsi_uninit;
375 k->vendor_id = PCI_VENDOR_ID_AMD;
376 k->device_id = PCI_DEVICE_ID_AMD_SCSI;
377 k->revision = 0x10;
378 k->class_id = PCI_CLASS_STORAGE_SCSI;
379 dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
380 dc->reset = esp_pci_hard_reset;
381 dc->vmsd = &vmstate_esp_pci_scsi;
382}
383
384static const TypeInfo esp_pci_info = {
385 .name = "am53c974",
386 .parent = TYPE_PCI_DEVICE,
387 .instance_size = sizeof(PCIESPState),
388 .class_init = esp_pci_class_init,
389};
390
391static void esp_pci_register_types(void)
392{
393 type_register_static(&esp_pci_info);
394}
395
396type_init(esp_pci_register_types)