]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU IDE Emulation: PCI Bus support. | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * Copyright (c) 2006 Openedhand Ltd. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | #include <hw/hw.h> | |
26 | #include <hw/i386/pc.h> | |
27 | #include <hw/pci/pci.h> | |
28 | #include <hw/isa/isa.h> | |
29 | #include "sysemu/block-backend.h" | |
30 | #include "sysemu/dma.h" | |
31 | #include "qemu/error-report.h" | |
32 | #include <hw/ide/pci.h> | |
33 | ||
34 | #define BMDMA_PAGE_SIZE 4096 | |
35 | ||
36 | #define BM_MIGRATION_COMPAT_STATUS_BITS \ | |
37 | (IDE_RETRY_DMA | IDE_RETRY_PIO | \ | |
38 | IDE_RETRY_READ | IDE_RETRY_FLUSH) | |
39 | ||
40 | static void bmdma_start_dma(IDEDMA *dma, IDEState *s, | |
41 | BlockCompletionFunc *dma_cb) | |
42 | { | |
43 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
44 | ||
45 | bm->dma_cb = dma_cb; | |
46 | bm->cur_prd_last = 0; | |
47 | bm->cur_prd_addr = 0; | |
48 | bm->cur_prd_len = 0; | |
49 | ||
50 | if (bm->status & BM_STATUS_DMAING) { | |
51 | bm->dma_cb(bmdma_active_if(bm), 0); | |
52 | } | |
53 | } | |
54 | ||
55 | /** | |
56 | * Prepare an sglist based on available PRDs. | |
57 | * @limit: How many bytes to prepare total. | |
58 | * | |
59 | * Returns the number of bytes prepared, -1 on error. | |
60 | * IDEState.io_buffer_size will contain the number of bytes described | |
61 | * by the PRDs, whether or not we added them to the sglist. | |
62 | */ | |
63 | static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit) | |
64 | { | |
65 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
66 | IDEState *s = bmdma_active_if(bm); | |
67 | PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); | |
68 | struct { | |
69 | uint32_t addr; | |
70 | uint32_t size; | |
71 | } prd; | |
72 | int l, len; | |
73 | ||
74 | pci_dma_sglist_init(&s->sg, pci_dev, | |
75 | s->nsector / (BMDMA_PAGE_SIZE / 512) + 1); | |
76 | s->io_buffer_size = 0; | |
77 | for(;;) { | |
78 | if (bm->cur_prd_len == 0) { | |
79 | /* end of table (with a fail safe of one page) */ | |
80 | if (bm->cur_prd_last || | |
81 | (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { | |
82 | return s->sg.size; | |
83 | } | |
84 | pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); | |
85 | bm->cur_addr += 8; | |
86 | prd.addr = le32_to_cpu(prd.addr); | |
87 | prd.size = le32_to_cpu(prd.size); | |
88 | len = prd.size & 0xfffe; | |
89 | if (len == 0) | |
90 | len = 0x10000; | |
91 | bm->cur_prd_len = len; | |
92 | bm->cur_prd_addr = prd.addr; | |
93 | bm->cur_prd_last = (prd.size & 0x80000000); | |
94 | } | |
95 | l = bm->cur_prd_len; | |
96 | if (l > 0) { | |
97 | uint64_t sg_len; | |
98 | ||
99 | /* Don't add extra bytes to the SGList; consume any remaining | |
100 | * PRDs from the guest, but ignore them. */ | |
101 | sg_len = MIN(limit - s->sg.size, bm->cur_prd_len); | |
102 | if (sg_len) { | |
103 | qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len); | |
104 | } | |
105 | ||
106 | bm->cur_prd_addr += l; | |
107 | bm->cur_prd_len -= l; | |
108 | s->io_buffer_size += l; | |
109 | } | |
110 | } | |
111 | ||
112 | qemu_sglist_destroy(&s->sg); | |
113 | s->io_buffer_size = 0; | |
114 | return -1; | |
115 | } | |
116 | ||
117 | /* return 0 if buffer completed */ | |
118 | static int bmdma_rw_buf(IDEDMA *dma, int is_write) | |
119 | { | |
120 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
121 | IDEState *s = bmdma_active_if(bm); | |
122 | PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); | |
123 | struct { | |
124 | uint32_t addr; | |
125 | uint32_t size; | |
126 | } prd; | |
127 | int l, len; | |
128 | ||
129 | for(;;) { | |
130 | l = s->io_buffer_size - s->io_buffer_index; | |
131 | if (l <= 0) | |
132 | break; | |
133 | if (bm->cur_prd_len == 0) { | |
134 | /* end of table (with a fail safe of one page) */ | |
135 | if (bm->cur_prd_last || | |
136 | (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) | |
137 | return 0; | |
138 | pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); | |
139 | bm->cur_addr += 8; | |
140 | prd.addr = le32_to_cpu(prd.addr); | |
141 | prd.size = le32_to_cpu(prd.size); | |
142 | len = prd.size & 0xfffe; | |
143 | if (len == 0) | |
144 | len = 0x10000; | |
145 | bm->cur_prd_len = len; | |
146 | bm->cur_prd_addr = prd.addr; | |
147 | bm->cur_prd_last = (prd.size & 0x80000000); | |
148 | } | |
149 | if (l > bm->cur_prd_len) | |
150 | l = bm->cur_prd_len; | |
151 | if (l > 0) { | |
152 | if (is_write) { | |
153 | pci_dma_write(pci_dev, bm->cur_prd_addr, | |
154 | s->io_buffer + s->io_buffer_index, l); | |
155 | } else { | |
156 | pci_dma_read(pci_dev, bm->cur_prd_addr, | |
157 | s->io_buffer + s->io_buffer_index, l); | |
158 | } | |
159 | bm->cur_prd_addr += l; | |
160 | bm->cur_prd_len -= l; | |
161 | s->io_buffer_index += l; | |
162 | } | |
163 | } | |
164 | return 1; | |
165 | } | |
166 | ||
167 | static void bmdma_set_inactive(IDEDMA *dma, bool more) | |
168 | { | |
169 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
170 | ||
171 | bm->dma_cb = NULL; | |
172 | if (more) { | |
173 | bm->status |= BM_STATUS_DMAING; | |
174 | } else { | |
175 | bm->status &= ~BM_STATUS_DMAING; | |
176 | } | |
177 | } | |
178 | ||
179 | static void bmdma_restart_dma(IDEDMA *dma) | |
180 | { | |
181 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
182 | ||
183 | bm->cur_addr = bm->addr; | |
184 | } | |
185 | ||
186 | static void bmdma_cancel(BMDMAState *bm) | |
187 | { | |
188 | if (bm->status & BM_STATUS_DMAING) { | |
189 | /* cancel DMA request */ | |
190 | bmdma_set_inactive(&bm->dma, false); | |
191 | } | |
192 | } | |
193 | ||
194 | static void bmdma_reset(IDEDMA *dma) | |
195 | { | |
196 | BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); | |
197 | ||
198 | #ifdef DEBUG_IDE | |
199 | printf("ide: dma_reset\n"); | |
200 | #endif | |
201 | bmdma_cancel(bm); | |
202 | bm->cmd = 0; | |
203 | bm->status = 0; | |
204 | bm->addr = 0; | |
205 | bm->cur_addr = 0; | |
206 | bm->cur_prd_last = 0; | |
207 | bm->cur_prd_addr = 0; | |
208 | bm->cur_prd_len = 0; | |
209 | } | |
210 | ||
211 | static void bmdma_irq(void *opaque, int n, int level) | |
212 | { | |
213 | BMDMAState *bm = opaque; | |
214 | ||
215 | if (!level) { | |
216 | /* pass through lower */ | |
217 | qemu_set_irq(bm->irq, level); | |
218 | return; | |
219 | } | |
220 | ||
221 | bm->status |= BM_STATUS_INT; | |
222 | ||
223 | /* trigger the real irq */ | |
224 | qemu_set_irq(bm->irq, level); | |
225 | } | |
226 | ||
227 | void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) | |
228 | { | |
229 | #ifdef DEBUG_IDE | |
230 | printf("%s: 0x%08x\n", __func__, val); | |
231 | #endif | |
232 | ||
233 | /* Ignore writes to SSBM if it keeps the old value */ | |
234 | if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { | |
235 | if (!(val & BM_CMD_START)) { | |
236 | /* First invoke the callbacks of all buffered requests | |
237 | * and flag those requests as orphaned. Ideally there | |
238 | * are no unbuffered (Scatter Gather DMA Requests or | |
239 | * write requests) pending and we can avoid to drain. */ | |
240 | IDEBufferedRequest *req; | |
241 | IDEState *s = idebus_active_if(bm->bus); | |
242 | QLIST_FOREACH(req, &s->buffered_requests, list) { | |
243 | if (!req->orphaned) { | |
244 | #ifdef DEBUG_IDE | |
245 | printf("%s: invoking cb %p of buffered request %p with" | |
246 | " -ECANCELED\n", __func__, req->original_cb, req); | |
247 | #endif | |
248 | req->original_cb(req->original_opaque, -ECANCELED); | |
249 | } | |
250 | req->orphaned = true; | |
251 | } | |
252 | /* | |
253 | * We can't cancel Scatter Gather DMA in the middle of the | |
254 | * operation or a partial (not full) DMA transfer would reach | |
255 | * the storage so we wait for completion instead (we beahve | |
256 | * like if the DMA was completed by the time the guest trying | |
257 | * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not | |
258 | * set). | |
259 | * | |
260 | * In the future we'll be able to safely cancel the I/O if the | |
261 | * whole DMA operation will be submitted to disk with a single | |
262 | * aio operation with preadv/pwritev. | |
263 | */ | |
264 | if (bm->bus->dma->aiocb) { | |
265 | #ifdef DEBUG_IDE | |
266 | printf("%s: draining all remaining requests", __func__); | |
267 | #endif | |
268 | blk_drain_all(); | |
269 | assert(bm->bus->dma->aiocb == NULL); | |
270 | } | |
271 | bm->status &= ~BM_STATUS_DMAING; | |
272 | } else { | |
273 | bm->cur_addr = bm->addr; | |
274 | if (!(bm->status & BM_STATUS_DMAING)) { | |
275 | bm->status |= BM_STATUS_DMAING; | |
276 | /* start dma transfer if possible */ | |
277 | if (bm->dma_cb) | |
278 | bm->dma_cb(bmdma_active_if(bm), 0); | |
279 | } | |
280 | } | |
281 | } | |
282 | ||
283 | bm->cmd = val & 0x09; | |
284 | } | |
285 | ||
286 | static uint64_t bmdma_addr_read(void *opaque, hwaddr addr, | |
287 | unsigned width) | |
288 | { | |
289 | BMDMAState *bm = opaque; | |
290 | uint32_t mask = (1ULL << (width * 8)) - 1; | |
291 | uint64_t data; | |
292 | ||
293 | data = (bm->addr >> (addr * 8)) & mask; | |
294 | #ifdef DEBUG_IDE | |
295 | printf("%s: 0x%08x\n", __func__, (unsigned)data); | |
296 | #endif | |
297 | return data; | |
298 | } | |
299 | ||
300 | static void bmdma_addr_write(void *opaque, hwaddr addr, | |
301 | uint64_t data, unsigned width) | |
302 | { | |
303 | BMDMAState *bm = opaque; | |
304 | int shift = addr * 8; | |
305 | uint32_t mask = (1ULL << (width * 8)) - 1; | |
306 | ||
307 | #ifdef DEBUG_IDE | |
308 | printf("%s: 0x%08x\n", __func__, (unsigned)data); | |
309 | #endif | |
310 | bm->addr &= ~(mask << shift); | |
311 | bm->addr |= ((data & mask) << shift) & ~3; | |
312 | } | |
313 | ||
314 | MemoryRegionOps bmdma_addr_ioport_ops = { | |
315 | .read = bmdma_addr_read, | |
316 | .write = bmdma_addr_write, | |
317 | .endianness = DEVICE_LITTLE_ENDIAN, | |
318 | }; | |
319 | ||
320 | static bool ide_bmdma_current_needed(void *opaque) | |
321 | { | |
322 | BMDMAState *bm = opaque; | |
323 | ||
324 | return (bm->cur_prd_len != 0); | |
325 | } | |
326 | ||
327 | static bool ide_bmdma_status_needed(void *opaque) | |
328 | { | |
329 | BMDMAState *bm = opaque; | |
330 | ||
331 | /* Older versions abused some bits in the status register for internal | |
332 | * error state. If any of these bits are set, we must add a subsection to | |
333 | * transfer the real status register */ | |
334 | uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; | |
335 | ||
336 | return ((bm->status & abused_bits) != 0); | |
337 | } | |
338 | ||
339 | static void ide_bmdma_pre_save(void *opaque) | |
340 | { | |
341 | BMDMAState *bm = opaque; | |
342 | uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; | |
343 | ||
344 | bm->migration_retry_unit = bm->bus->retry_unit; | |
345 | bm->migration_retry_sector_num = bm->bus->retry_sector_num; | |
346 | bm->migration_retry_nsector = bm->bus->retry_nsector; | |
347 | bm->migration_compat_status = | |
348 | (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits); | |
349 | } | |
350 | ||
351 | /* This function accesses bm->bus->error_status which is loaded only after | |
352 | * BMDMA itself. This is why the function is called from ide_pci_post_load | |
353 | * instead of being registered with VMState where it would run too early. */ | |
354 | static int ide_bmdma_post_load(void *opaque, int version_id) | |
355 | { | |
356 | BMDMAState *bm = opaque; | |
357 | uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; | |
358 | ||
359 | if (bm->status == 0) { | |
360 | bm->status = bm->migration_compat_status & ~abused_bits; | |
361 | bm->bus->error_status |= bm->migration_compat_status & abused_bits; | |
362 | } | |
363 | if (bm->bus->error_status) { | |
364 | bm->bus->retry_sector_num = bm->migration_retry_sector_num; | |
365 | bm->bus->retry_nsector = bm->migration_retry_nsector; | |
366 | bm->bus->retry_unit = bm->migration_retry_unit; | |
367 | } | |
368 | ||
369 | return 0; | |
370 | } | |
371 | ||
372 | static const VMStateDescription vmstate_bmdma_current = { | |
373 | .name = "ide bmdma_current", | |
374 | .version_id = 1, | |
375 | .minimum_version_id = 1, | |
376 | .needed = ide_bmdma_current_needed, | |
377 | .fields = (VMStateField[]) { | |
378 | VMSTATE_UINT32(cur_addr, BMDMAState), | |
379 | VMSTATE_UINT32(cur_prd_last, BMDMAState), | |
380 | VMSTATE_UINT32(cur_prd_addr, BMDMAState), | |
381 | VMSTATE_UINT32(cur_prd_len, BMDMAState), | |
382 | VMSTATE_END_OF_LIST() | |
383 | } | |
384 | }; | |
385 | ||
386 | static const VMStateDescription vmstate_bmdma_status = { | |
387 | .name ="ide bmdma/status", | |
388 | .version_id = 1, | |
389 | .minimum_version_id = 1, | |
390 | .needed = ide_bmdma_status_needed, | |
391 | .fields = (VMStateField[]) { | |
392 | VMSTATE_UINT8(status, BMDMAState), | |
393 | VMSTATE_END_OF_LIST() | |
394 | } | |
395 | }; | |
396 | ||
397 | static const VMStateDescription vmstate_bmdma = { | |
398 | .name = "ide bmdma", | |
399 | .version_id = 3, | |
400 | .minimum_version_id = 0, | |
401 | .pre_save = ide_bmdma_pre_save, | |
402 | .fields = (VMStateField[]) { | |
403 | VMSTATE_UINT8(cmd, BMDMAState), | |
404 | VMSTATE_UINT8(migration_compat_status, BMDMAState), | |
405 | VMSTATE_UINT32(addr, BMDMAState), | |
406 | VMSTATE_INT64(migration_retry_sector_num, BMDMAState), | |
407 | VMSTATE_UINT32(migration_retry_nsector, BMDMAState), | |
408 | VMSTATE_UINT8(migration_retry_unit, BMDMAState), | |
409 | VMSTATE_END_OF_LIST() | |
410 | }, | |
411 | .subsections = (const VMStateDescription*[]) { | |
412 | &vmstate_bmdma_current, | |
413 | &vmstate_bmdma_status, | |
414 | NULL | |
415 | } | |
416 | }; | |
417 | ||
418 | static int ide_pci_post_load(void *opaque, int version_id) | |
419 | { | |
420 | PCIIDEState *d = opaque; | |
421 | int i; | |
422 | ||
423 | for(i = 0; i < 2; i++) { | |
424 | /* current versions always store 0/1, but older version | |
425 | stored bigger values. We only need last bit */ | |
426 | d->bmdma[i].migration_retry_unit &= 1; | |
427 | ide_bmdma_post_load(&d->bmdma[i], -1); | |
428 | } | |
429 | ||
430 | return 0; | |
431 | } | |
432 | ||
433 | const VMStateDescription vmstate_ide_pci = { | |
434 | .name = "ide", | |
435 | .version_id = 3, | |
436 | .minimum_version_id = 0, | |
437 | .post_load = ide_pci_post_load, | |
438 | .fields = (VMStateField[]) { | |
439 | VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState), | |
440 | VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, | |
441 | vmstate_bmdma, BMDMAState), | |
442 | VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2), | |
443 | VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState), | |
444 | VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState), | |
445 | VMSTATE_END_OF_LIST() | |
446 | } | |
447 | }; | |
448 | ||
449 | void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table) | |
450 | { | |
451 | PCIIDEState *d = PCI_IDE(dev); | |
452 | static const int bus[4] = { 0, 0, 1, 1 }; | |
453 | static const int unit[4] = { 0, 1, 0, 1 }; | |
454 | int i; | |
455 | ||
456 | for (i = 0; i < 4; i++) { | |
457 | if (hd_table[i] == NULL) | |
458 | continue; | |
459 | ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]); | |
460 | } | |
461 | } | |
462 | ||
463 | static const struct IDEDMAOps bmdma_ops = { | |
464 | .start_dma = bmdma_start_dma, | |
465 | .prepare_buf = bmdma_prepare_buf, | |
466 | .rw_buf = bmdma_rw_buf, | |
467 | .restart_dma = bmdma_restart_dma, | |
468 | .set_inactive = bmdma_set_inactive, | |
469 | .reset = bmdma_reset, | |
470 | }; | |
471 | ||
472 | void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d) | |
473 | { | |
474 | if (bus->dma == &bm->dma) { | |
475 | return; | |
476 | } | |
477 | ||
478 | bm->dma.ops = &bmdma_ops; | |
479 | bus->dma = &bm->dma; | |
480 | bm->irq = bus->irq; | |
481 | bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0); | |
482 | bm->pci_dev = d; | |
483 | } | |
484 | ||
485 | static const TypeInfo pci_ide_type_info = { | |
486 | .name = TYPE_PCI_IDE, | |
487 | .parent = TYPE_PCI_DEVICE, | |
488 | .instance_size = sizeof(PCIIDEState), | |
489 | .abstract = true, | |
490 | }; | |
491 | ||
492 | static void pci_ide_register_types(void) | |
493 | { | |
494 | type_register_static(&pci_ide_type_info); | |
495 | } | |
496 | ||
497 | type_init(pci_ide_register_types) |