]> git.proxmox.com Git - qemu.git/blob - hw/ide/macio.c
arm11mpcore: Split off SCU device
[qemu.git] / hw / ide / macio.c
1 /*
2 * QEMU IDE Emulation: MacIO support.
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25 #include "hw/hw.h"
26 #include "hw/ppc/mac.h"
27 #include "hw/ppc/mac_dbdma.h"
28 #include "block/block.h"
29 #include "sysemu/dma.h"
30
31 #include <hw/ide/internal.h>
32
33 /* debug MACIO */
34 // #define DEBUG_MACIO
35
36 #ifdef DEBUG_MACIO
37 static const int debug_macio = 1;
38 #else
39 static const int debug_macio = 0;
40 #endif
41
42 #define MACIO_DPRINTF(fmt, ...) do { \
43 if (debug_macio) { \
44 printf(fmt , ## __VA_ARGS__); \
45 } \
46 } while (0)
47
48
49 /***********************************************************/
50 /* MacIO based PowerPC IDE */
51
52 #define MACIO_PAGE_SIZE 4096
53
54 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
55 {
56 DBDMA_io *io = opaque;
57 MACIOIDEState *m = io->opaque;
58 IDEState *s = idebus_active_if(&m->bus);
59 int unaligned;
60
61 if (ret < 0) {
62 m->aiocb = NULL;
63 qemu_sglist_destroy(&s->sg);
64 ide_atapi_io_error(s, ret);
65 io->remainder_len = 0;
66 goto done;
67 }
68
69 if (!m->dma_active) {
70 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
71 s->nsector, io->len, s->status);
72 /* data not ready yet, wait for the channel to get restarted */
73 io->processing = false;
74 return;
75 }
76
77 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
78
79 if (s->io_buffer_size > 0) {
80 m->aiocb = NULL;
81 qemu_sglist_destroy(&s->sg);
82
83 s->packet_transfer_size -= s->io_buffer_size;
84
85 s->io_buffer_index += s->io_buffer_size;
86 s->lba += s->io_buffer_index >> 11;
87 s->io_buffer_index &= 0x7ff;
88 }
89
90 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
91
92 MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io->remainder_len,
93 io->len, s->packet_transfer_size);
94 if (io->remainder_len && io->len) {
95 /* guest wants the rest of its previous transfer */
96 int remainder_len = MIN(io->remainder_len, io->len);
97
98 MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len);
99
100 cpu_physical_memory_write(io->addr, io->remainder + 0x200 -
101 remainder_len, remainder_len);
102
103 io->addr += remainder_len;
104 io->len -= remainder_len;
105 s->io_buffer_size = remainder_len;
106 io->remainder_len -= remainder_len;
107 /* treat remainder as individual transfer, start again */
108 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
109 &address_space_memory);
110 pmac_ide_atapi_transfer_cb(opaque, 0);
111 return;
112 }
113
114 if (!s->packet_transfer_size) {
115 MACIO_DPRINTF("end of transfer\n");
116 ide_atapi_cmd_ok(s);
117 m->dma_active = false;
118 }
119
120 if (io->len == 0) {
121 MACIO_DPRINTF("end of DMA\n");
122 goto done;
123 }
124
125 /* launch next transfer */
126
127 /* handle unaligned accesses first, get them over with and only do the
128 remaining bulk transfer using our async DMA helpers */
129 unaligned = io->len & 0x1ff;
130 if (unaligned) {
131 int sector_num = (s->lba << 2) + (s->io_buffer_index >> 9);
132 int nsector = io->len >> 9;
133
134 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
135 unaligned, io->addr + io->len - unaligned);
136
137 bdrv_read(s->bs, sector_num + nsector, io->remainder, 1);
138 cpu_physical_memory_write(io->addr + io->len - unaligned,
139 io->remainder, unaligned);
140
141 io->len -= unaligned;
142 }
143
144 MACIO_DPRINTF("io->len = %#x\n", io->len);
145
146 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
147 &address_space_memory);
148 qemu_sglist_add(&s->sg, io->addr, io->len);
149 io->addr += s->io_buffer_size;
150 io->remainder_len = MIN(s->packet_transfer_size - s->io_buffer_size,
151 (0x200 - unaligned) & 0x1ff);
152 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
153
154 /* We would read no data from the block layer, thus not get a callback.
155 Just fake completion manually. */
156 if (!io->len) {
157 pmac_ide_atapi_transfer_cb(opaque, 0);
158 return;
159 }
160
161 io->len = 0;
162
163 MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n",
164 (s->lba << 2) + (s->io_buffer_index >> 9),
165 s->packet_transfer_size, s->dma_cmd);
166
167 m->aiocb = dma_bdrv_read(s->bs, &s->sg,
168 (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9),
169 pmac_ide_atapi_transfer_cb, io);
170 return;
171
172 done:
173 MACIO_DPRINTF("done DMA\n");
174 bdrv_acct_done(s->bs, &s->acct);
175 io->dma_end(opaque);
176 }
177
178 static void pmac_ide_transfer_cb(void *opaque, int ret)
179 {
180 DBDMA_io *io = opaque;
181 MACIOIDEState *m = io->opaque;
182 IDEState *s = idebus_active_if(&m->bus);
183 int n = 0;
184 int64_t sector_num;
185 int unaligned;
186
187 if (ret < 0) {
188 MACIO_DPRINTF("DMA error\n");
189 m->aiocb = NULL;
190 qemu_sglist_destroy(&s->sg);
191 ide_dma_error(s);
192 io->remainder_len = 0;
193 goto done;
194 }
195
196 if (!m->dma_active) {
197 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
198 s->nsector, io->len, s->status);
199 /* data not ready yet, wait for the channel to get restarted */
200 io->processing = false;
201 return;
202 }
203
204 sector_num = ide_get_sector(s);
205 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
206 if (s->io_buffer_size > 0) {
207 m->aiocb = NULL;
208 qemu_sglist_destroy(&s->sg);
209 n = (s->io_buffer_size + 0x1ff) >> 9;
210 sector_num += n;
211 ide_set_sector(s, sector_num);
212 s->nsector -= n;
213 }
214
215 MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d "
216 "sector_num: %" PRId64 "\n",
217 io->remainder_len, io->len, s->nsector, sector_num);
218 if (io->remainder_len && io->len) {
219 /* guest wants the rest of its previous transfer */
220 int remainder_len = MIN(io->remainder_len, io->len);
221 uint8_t *p = &io->remainder[0x200 - remainder_len];
222
223 MACIO_DPRINTF("copying remainder %d bytes at %#" HWADDR_PRIx "\n",
224 remainder_len, io->addr);
225
226 switch (s->dma_cmd) {
227 case IDE_DMA_READ:
228 cpu_physical_memory_write(io->addr, p, remainder_len);
229 break;
230 case IDE_DMA_WRITE:
231 cpu_physical_memory_read(io->addr, p, remainder_len);
232 bdrv_write(s->bs, sector_num - 1, io->remainder, 1);
233 break;
234 case IDE_DMA_TRIM:
235 break;
236 }
237 io->addr += remainder_len;
238 io->len -= remainder_len;
239 io->remainder_len -= remainder_len;
240 }
241
242 if (s->nsector == 0 && !io->remainder_len) {
243 MACIO_DPRINTF("end of transfer\n");
244 s->status = READY_STAT | SEEK_STAT;
245 ide_set_irq(s->bus);
246 m->dma_active = false;
247 }
248
249 if (io->len == 0) {
250 MACIO_DPRINTF("end of DMA\n");
251 goto done;
252 }
253
254 /* launch next transfer */
255
256 s->io_buffer_index = 0;
257 s->io_buffer_size = MIN(io->len, s->nsector * 512);
258
259 /* handle unaligned accesses first, get them over with and only do the
260 remaining bulk transfer using our async DMA helpers */
261 unaligned = io->len & 0x1ff;
262 if (unaligned) {
263 int nsector = io->len >> 9;
264
265 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
266 unaligned, io->addr + io->len - unaligned);
267
268 switch (s->dma_cmd) {
269 case IDE_DMA_READ:
270 bdrv_read(s->bs, sector_num + nsector, io->remainder, 1);
271 cpu_physical_memory_write(io->addr + io->len - unaligned,
272 io->remainder, unaligned);
273 break;
274 case IDE_DMA_WRITE:
275 /* cache the contents in our io struct */
276 cpu_physical_memory_read(io->addr + io->len - unaligned,
277 io->remainder, unaligned);
278 break;
279 case IDE_DMA_TRIM:
280 break;
281 }
282
283 io->len -= unaligned;
284 }
285
286 MACIO_DPRINTF("io->len = %#x\n", io->len);
287
288 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
289 &address_space_memory);
290 qemu_sglist_add(&s->sg, io->addr, io->len);
291 io->addr += io->len + unaligned;
292 io->remainder_len = (0x200 - unaligned) & 0x1ff;
293 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
294
295 /* We would read no data from the block layer, thus not get a callback.
296 Just fake completion manually. */
297 if (!io->len) {
298 pmac_ide_transfer_cb(opaque, 0);
299 return;
300 }
301
302 io->len = 0;
303
304 MACIO_DPRINTF("sector_num=%" PRId64 " n=%d, nsector=%d, cmd_cmd=%d\n",
305 sector_num, n, s->nsector, s->dma_cmd);
306
307 switch (s->dma_cmd) {
308 case IDE_DMA_READ:
309 m->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num,
310 pmac_ide_transfer_cb, io);
311 break;
312 case IDE_DMA_WRITE:
313 m->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num,
314 pmac_ide_transfer_cb, io);
315 break;
316 case IDE_DMA_TRIM:
317 m->aiocb = dma_bdrv_io(s->bs, &s->sg, sector_num,
318 ide_issue_trim, pmac_ide_transfer_cb, io,
319 DMA_DIRECTION_TO_DEVICE);
320 break;
321 }
322 return;
323
324 done:
325 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
326 bdrv_acct_done(s->bs, &s->acct);
327 }
328 io->dma_end(io);
329 }
330
331 static void pmac_ide_transfer(DBDMA_io *io)
332 {
333 MACIOIDEState *m = io->opaque;
334 IDEState *s = idebus_active_if(&m->bus);
335
336 MACIO_DPRINTF("\n");
337
338 s->io_buffer_size = 0;
339 if (s->drive_kind == IDE_CD) {
340 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
341 pmac_ide_atapi_transfer_cb(io, 0);
342 return;
343 }
344
345 switch (s->dma_cmd) {
346 case IDE_DMA_READ:
347 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
348 break;
349 case IDE_DMA_WRITE:
350 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_WRITE);
351 break;
352 default:
353 break;
354 }
355
356 pmac_ide_transfer_cb(io, 0);
357 }
358
359 static void pmac_ide_flush(DBDMA_io *io)
360 {
361 MACIOIDEState *m = io->opaque;
362
363 if (m->aiocb) {
364 bdrv_drain_all();
365 }
366 }
367
368 /* PowerMac IDE memory IO */
369 static void pmac_ide_writeb (void *opaque,
370 hwaddr addr, uint32_t val)
371 {
372 MACIOIDEState *d = opaque;
373
374 addr = (addr & 0xFFF) >> 4;
375 switch (addr) {
376 case 1 ... 7:
377 ide_ioport_write(&d->bus, addr, val);
378 break;
379 case 8:
380 case 22:
381 ide_cmd_write(&d->bus, 0, val);
382 break;
383 default:
384 break;
385 }
386 }
387
388 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
389 {
390 uint8_t retval;
391 MACIOIDEState *d = opaque;
392
393 addr = (addr & 0xFFF) >> 4;
394 switch (addr) {
395 case 1 ... 7:
396 retval = ide_ioport_read(&d->bus, addr);
397 break;
398 case 8:
399 case 22:
400 retval = ide_status_read(&d->bus, 0);
401 break;
402 default:
403 retval = 0xFF;
404 break;
405 }
406 return retval;
407 }
408
409 static void pmac_ide_writew (void *opaque,
410 hwaddr addr, uint32_t val)
411 {
412 MACIOIDEState *d = opaque;
413
414 addr = (addr & 0xFFF) >> 4;
415 val = bswap16(val);
416 if (addr == 0) {
417 ide_data_writew(&d->bus, 0, val);
418 }
419 }
420
421 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
422 {
423 uint16_t retval;
424 MACIOIDEState *d = opaque;
425
426 addr = (addr & 0xFFF) >> 4;
427 if (addr == 0) {
428 retval = ide_data_readw(&d->bus, 0);
429 } else {
430 retval = 0xFFFF;
431 }
432 retval = bswap16(retval);
433 return retval;
434 }
435
436 static void pmac_ide_writel (void *opaque,
437 hwaddr addr, uint32_t val)
438 {
439 MACIOIDEState *d = opaque;
440
441 addr = (addr & 0xFFF) >> 4;
442 val = bswap32(val);
443 if (addr == 0) {
444 ide_data_writel(&d->bus, 0, val);
445 }
446 }
447
448 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
449 {
450 uint32_t retval;
451 MACIOIDEState *d = opaque;
452
453 addr = (addr & 0xFFF) >> 4;
454 if (addr == 0) {
455 retval = ide_data_readl(&d->bus, 0);
456 } else {
457 retval = 0xFFFFFFFF;
458 }
459 retval = bswap32(retval);
460 return retval;
461 }
462
463 static const MemoryRegionOps pmac_ide_ops = {
464 .old_mmio = {
465 .write = {
466 pmac_ide_writeb,
467 pmac_ide_writew,
468 pmac_ide_writel,
469 },
470 .read = {
471 pmac_ide_readb,
472 pmac_ide_readw,
473 pmac_ide_readl,
474 },
475 },
476 .endianness = DEVICE_NATIVE_ENDIAN,
477 };
478
479 static const VMStateDescription vmstate_pmac = {
480 .name = "ide",
481 .version_id = 3,
482 .minimum_version_id = 0,
483 .minimum_version_id_old = 0,
484 .fields = (VMStateField []) {
485 VMSTATE_IDE_BUS(bus, MACIOIDEState),
486 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
487 VMSTATE_END_OF_LIST()
488 }
489 };
490
491 static void macio_ide_reset(DeviceState *dev)
492 {
493 MACIOIDEState *d = MACIO_IDE(dev);
494
495 ide_bus_reset(&d->bus);
496 }
497
498 static int ide_nop(IDEDMA *dma)
499 {
500 return 0;
501 }
502
503 static int ide_nop_int(IDEDMA *dma, int x)
504 {
505 return 0;
506 }
507
508 static void ide_nop_restart(void *opaque, int x, RunState y)
509 {
510 }
511
512 static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
513 BlockDriverCompletionFunc *cb)
514 {
515 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
516
517 MACIO_DPRINTF("\n");
518 m->dma_active = true;
519 DBDMA_kick(m->dbdma);
520 }
521
522 static const IDEDMAOps dbdma_ops = {
523 .start_dma = ide_dbdma_start,
524 .start_transfer = ide_nop,
525 .prepare_buf = ide_nop_int,
526 .rw_buf = ide_nop_int,
527 .set_unit = ide_nop_int,
528 .add_status = ide_nop_int,
529 .set_inactive = ide_nop,
530 .restart_cb = ide_nop_restart,
531 .reset = ide_nop,
532 };
533
534 static void macio_ide_realizefn(DeviceState *dev, Error **errp)
535 {
536 MACIOIDEState *s = MACIO_IDE(dev);
537
538 ide_init2(&s->bus, s->irq);
539
540 /* Register DMA callbacks */
541 s->dma.ops = &dbdma_ops;
542 s->bus.dma = &s->dma;
543 }
544
545 static void macio_ide_initfn(Object *obj)
546 {
547 SysBusDevice *d = SYS_BUS_DEVICE(obj);
548 MACIOIDEState *s = MACIO_IDE(obj);
549
550 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
551 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
552 sysbus_init_mmio(d, &s->mem);
553 sysbus_init_irq(d, &s->irq);
554 sysbus_init_irq(d, &s->dma_irq);
555 }
556
557 static void macio_ide_class_init(ObjectClass *oc, void *data)
558 {
559 DeviceClass *dc = DEVICE_CLASS(oc);
560
561 dc->realize = macio_ide_realizefn;
562 dc->reset = macio_ide_reset;
563 dc->vmsd = &vmstate_pmac;
564 }
565
566 static const TypeInfo macio_ide_type_info = {
567 .name = TYPE_MACIO_IDE,
568 .parent = TYPE_SYS_BUS_DEVICE,
569 .instance_size = sizeof(MACIOIDEState),
570 .instance_init = macio_ide_initfn,
571 .class_init = macio_ide_class_init,
572 };
573
574 static void macio_ide_register_types(void)
575 {
576 type_register_static(&macio_ide_type_info);
577 }
578
579 /* hd_table must contain 2 block drivers */
580 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
581 {
582 int i;
583
584 for (i = 0; i < 2; i++) {
585 if (hd_table[i]) {
586 ide_create_drive(&s->bus, i, hd_table[i]);
587 }
588 }
589 }
590
591 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
592 {
593 s->dbdma = dbdma;
594 DBDMA_register_channel(dbdma, channel, s->dma_irq,
595 pmac_ide_transfer, pmac_ide_flush, s);
596 }
597
598 type_init(macio_ide_register_types)