]> git.proxmox.com Git - mirror_qemu.git/blob - hw/dma/xilinx_axidma.c
2c2d567a95c491b3ec16ad2f32ed25948a8ece84
[mirror_qemu.git] / hw / dma / xilinx_axidma.c
1 /*
2 * QEMU model of Xilinx AXI-DMA block.
3 *
4 * Copyright (c) 2011 Edgar E. Iglesias.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "hw/sysbus.h"
27 #include "qapi/error.h"
28 #include "qemu/timer.h"
29 #include "hw/hw.h"
30 #include "hw/irq.h"
31 #include "hw/ptimer.h"
32 #include "qemu/log.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/module.h"
35
36 #include "hw/stream.h"
37
38 #define D(x)
39
40 #define TYPE_XILINX_AXI_DMA "xlnx.axi-dma"
41 #define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream"
42 #define TYPE_XILINX_AXI_DMA_CONTROL_STREAM "xilinx-axi-dma-control-stream"
43
44 #define XILINX_AXI_DMA(obj) \
45 OBJECT_CHECK(XilinxAXIDMA, (obj), TYPE_XILINX_AXI_DMA)
46
47 #define XILINX_AXI_DMA_DATA_STREAM(obj) \
48 OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
49 TYPE_XILINX_AXI_DMA_DATA_STREAM)
50
51 #define XILINX_AXI_DMA_CONTROL_STREAM(obj) \
52 OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
53 TYPE_XILINX_AXI_DMA_CONTROL_STREAM)
54
55 #define R_DMACR (0x00 / 4)
56 #define R_DMASR (0x04 / 4)
57 #define R_CURDESC (0x08 / 4)
58 #define R_TAILDESC (0x10 / 4)
59 #define R_MAX (0x30 / 4)
60
61 #define CONTROL_PAYLOAD_WORDS 5
62 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
63
64 typedef struct XilinxAXIDMA XilinxAXIDMA;
65 typedef struct XilinxAXIDMAStreamSlave XilinxAXIDMAStreamSlave;
66
67 enum {
68 DMACR_RUNSTOP = 1,
69 DMACR_TAILPTR_MODE = 2,
70 DMACR_RESET = 4
71 };
72
73 enum {
74 DMASR_HALTED = 1,
75 DMASR_IDLE = 2,
76 DMASR_IOC_IRQ = 1 << 12,
77 DMASR_DLY_IRQ = 1 << 13,
78
79 DMASR_IRQ_MASK = 7 << 12
80 };
81
82 struct SDesc {
83 uint64_t nxtdesc;
84 uint64_t buffer_address;
85 uint64_t reserved;
86 uint32_t control;
87 uint32_t status;
88 uint8_t app[CONTROL_PAYLOAD_SIZE];
89 };
90
91 enum {
92 SDESC_CTRL_EOF = (1 << 26),
93 SDESC_CTRL_SOF = (1 << 27),
94
95 SDESC_CTRL_LEN_MASK = (1 << 23) - 1
96 };
97
98 enum {
99 SDESC_STATUS_EOF = (1 << 26),
100 SDESC_STATUS_SOF_BIT = 27,
101 SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
102 SDESC_STATUS_COMPLETE = (1 << 31)
103 };
104
105 struct Stream {
106 QEMUBH *bh;
107 ptimer_state *ptimer;
108 qemu_irq irq;
109
110 int nr;
111
112 struct SDesc desc;
113 int pos;
114 unsigned int complete_cnt;
115 uint32_t regs[R_MAX];
116 uint8_t app[20];
117 unsigned char txbuf[16 * 1024];
118 };
119
120 struct XilinxAXIDMAStreamSlave {
121 Object parent;
122
123 struct XilinxAXIDMA *dma;
124 };
125
126 struct XilinxAXIDMA {
127 SysBusDevice busdev;
128 MemoryRegion iomem;
129 uint32_t freqhz;
130 StreamSlave *tx_data_dev;
131 StreamSlave *tx_control_dev;
132 XilinxAXIDMAStreamSlave rx_data_dev;
133 XilinxAXIDMAStreamSlave rx_control_dev;
134
135 struct Stream streams[2];
136
137 StreamCanPushNotifyFn notify;
138 void *notify_opaque;
139 };
140
141 /*
142 * Helper calls to extract info from descriptors and other trivial
143 * state from regs.
144 */
145 static inline int stream_desc_sof(struct SDesc *d)
146 {
147 return d->control & SDESC_CTRL_SOF;
148 }
149
150 static inline int stream_desc_eof(struct SDesc *d)
151 {
152 return d->control & SDESC_CTRL_EOF;
153 }
154
155 static inline int stream_resetting(struct Stream *s)
156 {
157 return !!(s->regs[R_DMACR] & DMACR_RESET);
158 }
159
160 static inline int stream_running(struct Stream *s)
161 {
162 return s->regs[R_DMACR] & DMACR_RUNSTOP;
163 }
164
165 static inline int stream_idle(struct Stream *s)
166 {
167 return !!(s->regs[R_DMASR] & DMASR_IDLE);
168 }
169
170 static void stream_reset(struct Stream *s)
171 {
172 s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
173 s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */
174 }
175
176 /* Map an offset addr into a channel index. */
177 static inline int streamid_from_addr(hwaddr addr)
178 {
179 int sid;
180
181 sid = addr / (0x30);
182 sid &= 1;
183 return sid;
184 }
185
186 static void stream_desc_load(struct Stream *s, hwaddr addr)
187 {
188 struct SDesc *d = &s->desc;
189
190 cpu_physical_memory_read(addr, d, sizeof *d);
191
192 /* Convert from LE into host endianness. */
193 d->buffer_address = le64_to_cpu(d->buffer_address);
194 d->nxtdesc = le64_to_cpu(d->nxtdesc);
195 d->control = le32_to_cpu(d->control);
196 d->status = le32_to_cpu(d->status);
197 }
198
199 static void stream_desc_store(struct Stream *s, hwaddr addr)
200 {
201 struct SDesc *d = &s->desc;
202
203 /* Convert from host endianness into LE. */
204 d->buffer_address = cpu_to_le64(d->buffer_address);
205 d->nxtdesc = cpu_to_le64(d->nxtdesc);
206 d->control = cpu_to_le32(d->control);
207 d->status = cpu_to_le32(d->status);
208 cpu_physical_memory_write(addr, d, sizeof *d);
209 }
210
211 static void stream_update_irq(struct Stream *s)
212 {
213 unsigned int pending, mask, irq;
214
215 pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
216 mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
217
218 irq = pending & mask;
219
220 qemu_set_irq(s->irq, !!irq);
221 }
222
223 static void stream_reload_complete_cnt(struct Stream *s)
224 {
225 unsigned int comp_th;
226 comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
227 s->complete_cnt = comp_th;
228 }
229
230 static void timer_hit(void *opaque)
231 {
232 struct Stream *s = opaque;
233
234 stream_reload_complete_cnt(s);
235 s->regs[R_DMASR] |= DMASR_DLY_IRQ;
236 stream_update_irq(s);
237 }
238
239 static void stream_complete(struct Stream *s)
240 {
241 unsigned int comp_delay;
242
243 /* Start the delayed timer. */
244 comp_delay = s->regs[R_DMACR] >> 24;
245 if (comp_delay) {
246 ptimer_stop(s->ptimer);
247 ptimer_set_count(s->ptimer, comp_delay);
248 ptimer_run(s->ptimer, 1);
249 }
250
251 s->complete_cnt--;
252 if (s->complete_cnt == 0) {
253 /* Raise the IOC irq. */
254 s->regs[R_DMASR] |= DMASR_IOC_IRQ;
255 stream_reload_complete_cnt(s);
256 }
257 }
258
259 static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev,
260 StreamSlave *tx_control_dev)
261 {
262 uint32_t prev_d;
263 unsigned int txlen;
264
265 if (!stream_running(s) || stream_idle(s)) {
266 return;
267 }
268
269 while (1) {
270 stream_desc_load(s, s->regs[R_CURDESC]);
271
272 if (s->desc.status & SDESC_STATUS_COMPLETE) {
273 s->regs[R_DMASR] |= DMASR_HALTED;
274 break;
275 }
276
277 if (stream_desc_sof(&s->desc)) {
278 s->pos = 0;
279 stream_push(tx_control_dev, s->desc.app, sizeof(s->desc.app));
280 }
281
282 txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
283 if ((txlen + s->pos) > sizeof s->txbuf) {
284 hw_error("%s: too small internal txbuf! %d\n", __func__,
285 txlen + s->pos);
286 }
287
288 cpu_physical_memory_read(s->desc.buffer_address,
289 s->txbuf + s->pos, txlen);
290 s->pos += txlen;
291
292 if (stream_desc_eof(&s->desc)) {
293 stream_push(tx_data_dev, s->txbuf, s->pos);
294 s->pos = 0;
295 stream_complete(s);
296 }
297
298 /* Update the descriptor. */
299 s->desc.status = txlen | SDESC_STATUS_COMPLETE;
300 stream_desc_store(s, s->regs[R_CURDESC]);
301
302 /* Advance. */
303 prev_d = s->regs[R_CURDESC];
304 s->regs[R_CURDESC] = s->desc.nxtdesc;
305 if (prev_d == s->regs[R_TAILDESC]) {
306 s->regs[R_DMASR] |= DMASR_IDLE;
307 break;
308 }
309 }
310 }
311
312 static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf,
313 size_t len)
314 {
315 uint32_t prev_d;
316 unsigned int rxlen;
317 size_t pos = 0;
318 int sof = 1;
319
320 if (!stream_running(s) || stream_idle(s)) {
321 return 0;
322 }
323
324 while (len) {
325 stream_desc_load(s, s->regs[R_CURDESC]);
326
327 if (s->desc.status & SDESC_STATUS_COMPLETE) {
328 s->regs[R_DMASR] |= DMASR_HALTED;
329 break;
330 }
331
332 rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
333 if (rxlen > len) {
334 /* It fits. */
335 rxlen = len;
336 }
337
338 cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
339 len -= rxlen;
340 pos += rxlen;
341
342 /* Update the descriptor. */
343 if (!len) {
344 stream_complete(s);
345 memcpy(s->desc.app, s->app, sizeof(s->desc.app));
346 s->desc.status |= SDESC_STATUS_EOF;
347 }
348
349 s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
350 s->desc.status |= SDESC_STATUS_COMPLETE;
351 stream_desc_store(s, s->regs[R_CURDESC]);
352 sof = 0;
353
354 /* Advance. */
355 prev_d = s->regs[R_CURDESC];
356 s->regs[R_CURDESC] = s->desc.nxtdesc;
357 if (prev_d == s->regs[R_TAILDESC]) {
358 s->regs[R_DMASR] |= DMASR_IDLE;
359 break;
360 }
361 }
362
363 return pos;
364 }
365
366 static void xilinx_axidma_reset(DeviceState *dev)
367 {
368 int i;
369 XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
370
371 for (i = 0; i < 2; i++) {
372 stream_reset(&s->streams[i]);
373 }
374 }
375
376 static size_t
377 xilinx_axidma_control_stream_push(StreamSlave *obj, unsigned char *buf,
378 size_t len)
379 {
380 XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(obj);
381 struct Stream *s = &cs->dma->streams[1];
382
383 if (len != CONTROL_PAYLOAD_SIZE) {
384 hw_error("AXI DMA requires %d byte control stream payload\n",
385 (int)CONTROL_PAYLOAD_SIZE);
386 }
387
388 memcpy(s->app, buf, len);
389 return len;
390 }
391
392 static bool
393 xilinx_axidma_data_stream_can_push(StreamSlave *obj,
394 StreamCanPushNotifyFn notify,
395 void *notify_opaque)
396 {
397 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
398 struct Stream *s = &ds->dma->streams[1];
399
400 if (!stream_running(s) || stream_idle(s)) {
401 ds->dma->notify = notify;
402 ds->dma->notify_opaque = notify_opaque;
403 return false;
404 }
405
406 return true;
407 }
408
409 static size_t
410 xilinx_axidma_data_stream_push(StreamSlave *obj, unsigned char *buf, size_t len)
411 {
412 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
413 struct Stream *s = &ds->dma->streams[1];
414 size_t ret;
415
416 ret = stream_process_s2mem(s, buf, len);
417 stream_update_irq(s);
418 return ret;
419 }
420
421 static uint64_t axidma_read(void *opaque, hwaddr addr,
422 unsigned size)
423 {
424 XilinxAXIDMA *d = opaque;
425 struct Stream *s;
426 uint32_t r = 0;
427 int sid;
428
429 sid = streamid_from_addr(addr);
430 s = &d->streams[sid];
431
432 addr = addr % 0x30;
433 addr >>= 2;
434 switch (addr) {
435 case R_DMACR:
436 /* Simulate one cycles reset delay. */
437 s->regs[addr] &= ~DMACR_RESET;
438 r = s->regs[addr];
439 break;
440 case R_DMASR:
441 s->regs[addr] &= 0xffff;
442 s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
443 s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
444 r = s->regs[addr];
445 break;
446 default:
447 r = s->regs[addr];
448 D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
449 __func__, sid, addr * 4, r));
450 break;
451 }
452 return r;
453
454 }
455
456 static void axidma_write(void *opaque, hwaddr addr,
457 uint64_t value, unsigned size)
458 {
459 XilinxAXIDMA *d = opaque;
460 struct Stream *s;
461 int sid;
462
463 sid = streamid_from_addr(addr);
464 s = &d->streams[sid];
465
466 addr = addr % 0x30;
467 addr >>= 2;
468 switch (addr) {
469 case R_DMACR:
470 /* Tailptr mode is always on. */
471 value |= DMACR_TAILPTR_MODE;
472 /* Remember our previous reset state. */
473 value |= (s->regs[addr] & DMACR_RESET);
474 s->regs[addr] = value;
475
476 if (value & DMACR_RESET) {
477 stream_reset(s);
478 }
479
480 if ((value & 1) && !stream_resetting(s)) {
481 /* Start processing. */
482 s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
483 }
484 stream_reload_complete_cnt(s);
485 break;
486
487 case R_DMASR:
488 /* Mask away write to clear irq lines. */
489 value &= ~(value & DMASR_IRQ_MASK);
490 s->regs[addr] = value;
491 break;
492
493 case R_TAILDESC:
494 s->regs[addr] = value;
495 s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
496 if (!sid) {
497 stream_process_mem2s(s, d->tx_data_dev, d->tx_control_dev);
498 }
499 break;
500 default:
501 D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
502 __func__, sid, addr * 4, (unsigned)value));
503 s->regs[addr] = value;
504 break;
505 }
506 if (sid == 1 && d->notify) {
507 StreamCanPushNotifyFn notifytmp = d->notify;
508 d->notify = NULL;
509 notifytmp(d->notify_opaque);
510 }
511 stream_update_irq(s);
512 }
513
514 static const MemoryRegionOps axidma_ops = {
515 .read = axidma_read,
516 .write = axidma_write,
517 .endianness = DEVICE_NATIVE_ENDIAN,
518 };
519
520 static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
521 {
522 XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
523 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev);
524 XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(
525 &s->rx_control_dev);
526 Error *local_err = NULL;
527
528 object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA,
529 (Object **)&ds->dma,
530 object_property_allow_set_link,
531 OBJ_PROP_LINK_STRONG,
532 &local_err);
533 object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA,
534 (Object **)&cs->dma,
535 object_property_allow_set_link,
536 OBJ_PROP_LINK_STRONG,
537 &local_err);
538 if (local_err) {
539 goto xilinx_axidma_realize_fail;
540 }
541 object_property_set_link(OBJECT(ds), OBJECT(s), "dma", &local_err);
542 object_property_set_link(OBJECT(cs), OBJECT(s), "dma", &local_err);
543 if (local_err) {
544 goto xilinx_axidma_realize_fail;
545 }
546
547 int i;
548
549 for (i = 0; i < 2; i++) {
550 struct Stream *st = &s->streams[i];
551
552 st->nr = i;
553 st->bh = qemu_bh_new(timer_hit, st);
554 st->ptimer = ptimer_init(st->bh, PTIMER_POLICY_DEFAULT);
555 ptimer_set_freq(st->ptimer, s->freqhz);
556 }
557 return;
558
559 xilinx_axidma_realize_fail:
560 error_propagate(errp, local_err);
561 }
562
563 static void xilinx_axidma_init(Object *obj)
564 {
565 XilinxAXIDMA *s = XILINX_AXI_DMA(obj);
566 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
567
568 object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
569 TYPE_XILINX_AXI_DMA_DATA_STREAM);
570 object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
571 TYPE_XILINX_AXI_DMA_CONTROL_STREAM);
572 object_property_add_child(OBJECT(s), "axistream-connected-target",
573 (Object *)&s->rx_data_dev, &error_abort);
574 object_property_add_child(OBJECT(s), "axistream-control-connected-target",
575 (Object *)&s->rx_control_dev, &error_abort);
576
577 sysbus_init_irq(sbd, &s->streams[0].irq);
578 sysbus_init_irq(sbd, &s->streams[1].irq);
579
580 memory_region_init_io(&s->iomem, obj, &axidma_ops, s,
581 "xlnx.axi-dma", R_MAX * 4 * 2);
582 sysbus_init_mmio(sbd, &s->iomem);
583 }
584
585 static Property axidma_properties[] = {
586 DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000),
587 DEFINE_PROP_LINK("axistream-connected", XilinxAXIDMA,
588 tx_data_dev, TYPE_STREAM_SLAVE, StreamSlave *),
589 DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIDMA,
590 tx_control_dev, TYPE_STREAM_SLAVE, StreamSlave *),
591 DEFINE_PROP_END_OF_LIST(),
592 };
593
594 static void axidma_class_init(ObjectClass *klass, void *data)
595 {
596 DeviceClass *dc = DEVICE_CLASS(klass);
597
598 dc->realize = xilinx_axidma_realize,
599 dc->reset = xilinx_axidma_reset;
600 dc->props = axidma_properties;
601 }
602
603 static StreamSlaveClass xilinx_axidma_data_stream_class = {
604 .push = xilinx_axidma_data_stream_push,
605 .can_push = xilinx_axidma_data_stream_can_push,
606 };
607
608 static StreamSlaveClass xilinx_axidma_control_stream_class = {
609 .push = xilinx_axidma_control_stream_push,
610 };
611
612 static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data)
613 {
614 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
615
616 ssc->push = ((StreamSlaveClass *)data)->push;
617 ssc->can_push = ((StreamSlaveClass *)data)->can_push;
618 }
619
620 static const TypeInfo axidma_info = {
621 .name = TYPE_XILINX_AXI_DMA,
622 .parent = TYPE_SYS_BUS_DEVICE,
623 .instance_size = sizeof(XilinxAXIDMA),
624 .class_init = axidma_class_init,
625 .instance_init = xilinx_axidma_init,
626 };
627
628 static const TypeInfo xilinx_axidma_data_stream_info = {
629 .name = TYPE_XILINX_AXI_DMA_DATA_STREAM,
630 .parent = TYPE_OBJECT,
631 .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
632 .class_init = xilinx_axidma_stream_class_init,
633 .class_data = &xilinx_axidma_data_stream_class,
634 .interfaces = (InterfaceInfo[]) {
635 { TYPE_STREAM_SLAVE },
636 { }
637 }
638 };
639
640 static const TypeInfo xilinx_axidma_control_stream_info = {
641 .name = TYPE_XILINX_AXI_DMA_CONTROL_STREAM,
642 .parent = TYPE_OBJECT,
643 .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
644 .class_init = xilinx_axidma_stream_class_init,
645 .class_data = &xilinx_axidma_control_stream_class,
646 .interfaces = (InterfaceInfo[]) {
647 { TYPE_STREAM_SLAVE },
648 { }
649 }
650 };
651
652 static void xilinx_axidma_register_types(void)
653 {
654 type_register_static(&axidma_info);
655 type_register_static(&xilinx_axidma_data_stream_info);
656 type_register_static(&xilinx_axidma_control_stream_info);
657 }
658
659 type_init(xilinx_axidma_register_types)