]> git.proxmox.com Git - mirror_qemu.git/blame - hw/dma/etraxfs_dma.c
typedefs: Separate incomplete types and function types
[mirror_qemu.git] / hw / dma / etraxfs_dma.c
CommitLineData
1ba13a5d
EI
1/*
2 * QEMU ETRAX DMA Controller.
3 *
4 * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
0430891c 24#include "qemu/osdep.h"
83c9f4ca 25#include "hw/hw.h"
022c62cb 26#include "exec/address-spaces.h"
9c17d615 27#include "sysemu/sysemu.h"
1ba13a5d 28
0d09e41a 29#include "hw/cris/etraxfs_dma.h"
1ba13a5d
EI
30
31#define D(x)
32
c01c07bb
EI
33#define RW_DATA (0x0 / 4)
34#define RW_SAVED_DATA (0x58 / 4)
35#define RW_SAVED_DATA_BUF (0x5c / 4)
36#define RW_GROUP (0x60 / 4)
37#define RW_GROUP_DOWN (0x7c / 4)
38#define RW_CMD (0x80 / 4)
39#define RW_CFG (0x84 / 4)
40#define RW_STAT (0x88 / 4)
41#define RW_INTR_MASK (0x8c / 4)
42#define RW_ACK_INTR (0x90 / 4)
43#define R_INTR (0x94 / 4)
44#define R_MASKED_INTR (0x98 / 4)
45#define RW_STREAM_CMD (0x9c / 4)
46
47#define DMA_REG_MAX (0x100 / 4)
1ba13a5d
EI
48
49/* descriptors */
50
51// ------------------------------------------------------------ dma_descr_group
52typedef struct dma_descr_group {
41107bcb 53 uint32_t next;
1ba13a5d
EI
54 unsigned eol : 1;
55 unsigned tol : 1;
56 unsigned bol : 1;
57 unsigned : 1;
58 unsigned intr : 1;
59 unsigned : 2;
60 unsigned en : 1;
61 unsigned : 7;
62 unsigned dis : 1;
63 unsigned md : 16;
64 struct dma_descr_group *up;
65 union {
66 struct dma_descr_context *context;
67 struct dma_descr_group *group;
68 } down;
69} dma_descr_group;
70
71// ---------------------------------------------------------- dma_descr_context
72typedef struct dma_descr_context {
41107bcb 73 uint32_t next;
1ba13a5d
EI
74 unsigned eol : 1;
75 unsigned : 3;
76 unsigned intr : 1;
77 unsigned : 1;
78 unsigned store_mode : 1;
79 unsigned en : 1;
80 unsigned : 7;
81 unsigned dis : 1;
82 unsigned md0 : 16;
83 unsigned md1;
84 unsigned md2;
85 unsigned md3;
86 unsigned md4;
41107bcb
EI
87 uint32_t saved_data;
88 uint32_t saved_data_buf;
1ba13a5d
EI
89} dma_descr_context;
90
91// ------------------------------------------------------------- dma_descr_data
92typedef struct dma_descr_data {
41107bcb
EI
93 uint32_t next;
94 uint32_t buf;
1ba13a5d
EI
95 unsigned eol : 1;
96 unsigned : 2;
97 unsigned out_eop : 1;
98 unsigned intr : 1;
99 unsigned wait : 1;
100 unsigned : 2;
101 unsigned : 3;
102 unsigned in_eop : 1;
103 unsigned : 4;
104 unsigned md : 16;
41107bcb 105 uint32_t after;
1ba13a5d
EI
106} dma_descr_data;
107
108/* Constants */
109enum {
110 regk_dma_ack_pkt = 0x00000100,
111 regk_dma_anytime = 0x00000001,
112 regk_dma_array = 0x00000008,
113 regk_dma_burst = 0x00000020,
114 regk_dma_client = 0x00000002,
115 regk_dma_copy_next = 0x00000010,
116 regk_dma_copy_up = 0x00000020,
117 regk_dma_data_at_eol = 0x00000001,
118 regk_dma_dis_c = 0x00000010,
119 regk_dma_dis_g = 0x00000020,
120 regk_dma_idle = 0x00000001,
121 regk_dma_intern = 0x00000004,
122 regk_dma_load_c = 0x00000200,
123 regk_dma_load_c_n = 0x00000280,
124 regk_dma_load_c_next = 0x00000240,
125 regk_dma_load_d = 0x00000140,
126 regk_dma_load_g = 0x00000300,
127 regk_dma_load_g_down = 0x000003c0,
128 regk_dma_load_g_next = 0x00000340,
129 regk_dma_load_g_up = 0x00000380,
130 regk_dma_next_en = 0x00000010,
131 regk_dma_next_pkt = 0x00000010,
132 regk_dma_no = 0x00000000,
133 regk_dma_only_at_wait = 0x00000000,
134 regk_dma_restore = 0x00000020,
135 regk_dma_rst = 0x00000001,
136 regk_dma_running = 0x00000004,
137 regk_dma_rw_cfg_default = 0x00000000,
138 regk_dma_rw_cmd_default = 0x00000000,
139 regk_dma_rw_intr_mask_default = 0x00000000,
140 regk_dma_rw_stat_default = 0x00000101,
141 regk_dma_rw_stream_cmd_default = 0x00000000,
142 regk_dma_save_down = 0x00000020,
143 regk_dma_save_up = 0x00000020,
144 regk_dma_set_reg = 0x00000050,
145 regk_dma_set_w_size1 = 0x00000190,
146 regk_dma_set_w_size2 = 0x000001a0,
147 regk_dma_set_w_size4 = 0x000001c0,
148 regk_dma_stopped = 0x00000002,
149 regk_dma_store_c = 0x00000002,
150 regk_dma_store_descr = 0x00000000,
151 regk_dma_store_g = 0x00000004,
152 regk_dma_store_md = 0x00000001,
153 regk_dma_sw = 0x00000008,
154 regk_dma_update_down = 0x00000020,
155 regk_dma_yes = 0x00000001
156};
157
158enum dma_ch_state
159{
4487fd34 160 RST = 1,
1ba13a5d
EI
161 STOPPED = 2,
162 RUNNING = 4
163};
164
165struct fs_dma_channel
166{
96d7ddde 167 qemu_irq irq;
1ba13a5d
EI
168 struct etraxfs_dma_client *client;
169
1ba13a5d
EI
170 /* Internal status. */
171 int stream_cmd_src;
172 enum dma_ch_state state;
173
174 unsigned int input : 1;
175 unsigned int eol : 1;
176
177 struct dma_descr_group current_g;
178 struct dma_descr_context current_c;
179 struct dma_descr_data current_d;
180
66a0a2cb 181 /* Control registers. */
1ba13a5d
EI
182 uint32_t regs[DMA_REG_MAX];
183};
184
185struct fs_dma_ctrl
186{
9dcb06ce 187 MemoryRegion mmio;
1ba13a5d
EI
188 int nr_channels;
189 struct fs_dma_channel *channels;
492c30af
AL
190
191 QEMUBH *bh;
1ba13a5d
EI
192};
193
c01c07bb
EI
194static void DMA_run(void *opaque);
195static int channel_out_run(struct fs_dma_ctrl *ctrl, int c);
196
1ba13a5d
EI
197static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
198{
199 return ctrl->channels[c].regs[reg];
200}
201
202static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
203{
204 return channel_reg(ctrl, c, RW_CFG) & 2;
205}
206
207static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
208{
209 return (channel_reg(ctrl, c, RW_CFG) & 1)
210 && ctrl->channels[c].client;
211}
212
a8170e5e 213static inline int fs_channel(hwaddr addr)
1ba13a5d
EI
214{
215 /* Every channel has a 0x2000 ctrl register map. */
8da3ff18 216 return addr >> 13;
1ba13a5d
EI
217}
218
d297f464 219#ifdef USE_THIS_DEAD_CODE
1ba13a5d
EI
220static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
221{
a8170e5e 222 hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
1ba13a5d
EI
223
224 /* Load and decode. FIXME: handle endianness. */
225 cpu_physical_memory_read (addr,
226 (void *) &ctrl->channels[c].current_g,
227 sizeof ctrl->channels[c].current_g);
228}
229
230static void dump_c(int ch, struct dma_descr_context *c)
231{
232 printf("%s ch=%d\n", __func__, ch);
41107bcb
EI
233 printf("next=%x\n", c->next);
234 printf("saved_data=%x\n", c->saved_data);
235 printf("saved_data_buf=%x\n", c->saved_data_buf);
1ba13a5d
EI
236 printf("eol=%x\n", (uint32_t) c->eol);
237}
238
239static void dump_d(int ch, struct dma_descr_data *d)
240{
241 printf("%s ch=%d\n", __func__, ch);
41107bcb
EI
242 printf("next=%x\n", d->next);
243 printf("buf=%x\n", d->buf);
244 printf("after=%x\n", d->after);
1ba13a5d
EI
245 printf("intr=%x\n", (uint32_t) d->intr);
246 printf("out_eop=%x\n", (uint32_t) d->out_eop);
247 printf("in_eop=%x\n", (uint32_t) d->in_eop);
248 printf("eol=%x\n", (uint32_t) d->eol);
249}
d297f464 250#endif
1ba13a5d
EI
251
252static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
253{
a8170e5e 254 hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
1ba13a5d
EI
255
256 /* Load and decode. FIXME: handle endianness. */
257 cpu_physical_memory_read (addr,
258 (void *) &ctrl->channels[c].current_c,
259 sizeof ctrl->channels[c].current_c);
260
261 D(dump_c(c, &ctrl->channels[c].current_c));
262 /* I guess this should update the current pos. */
d297f464
EI
263 ctrl->channels[c].regs[RW_SAVED_DATA] =
264 (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
1ba13a5d 265 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
d297f464 266 (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
1ba13a5d
EI
267}
268
269static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
270{
a8170e5e 271 hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
1ba13a5d
EI
272
273 /* Load and decode. FIXME: handle endianness. */
41107bcb 274 D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
1ba13a5d
EI
275 cpu_physical_memory_read (addr,
276 (void *) &ctrl->channels[c].current_d,
277 sizeof ctrl->channels[c].current_d);
278
279 D(dump_d(c, &ctrl->channels[c].current_d));
fa1bdde4 280 ctrl->channels[c].regs[RW_DATA] = addr;
a8303d18
EI
281}
282
283static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
284{
a8170e5e 285 hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
a8303d18
EI
286
287 /* Encode and store. FIXME: handle endianness. */
41107bcb 288 D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
a8303d18
EI
289 D(dump_d(c, &ctrl->channels[c].current_d));
290 cpu_physical_memory_write (addr,
291 (void *) &ctrl->channels[c].current_c,
292 sizeof ctrl->channels[c].current_c);
1ba13a5d
EI
293}
294
295static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
296{
a8170e5e 297 hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
1ba13a5d 298
a8303d18 299 /* Encode and store. FIXME: handle endianness. */
41107bcb 300 D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
1ba13a5d
EI
301 cpu_physical_memory_write (addr,
302 (void *) &ctrl->channels[c].current_d,
303 sizeof ctrl->channels[c].current_d);
304}
305
306static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
307{
308 /* FIXME: */
309}
310
311static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
312{
313 if (ctrl->channels[c].client)
314 {
315 ctrl->channels[c].eol = 0;
316 ctrl->channels[c].state = RUNNING;
c01c07bb
EI
317 if (!ctrl->channels[c].input)
318 channel_out_run(ctrl, c);
1ba13a5d
EI
319 } else
320 printf("WARNING: starting DMA ch %d with no client\n", c);
1ab5f75c
EI
321
322 qemu_bh_schedule_idle(ctrl->bh);
1ba13a5d
EI
323}
324
325static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
326{
327 if (!channel_en(ctrl, c)
328 || channel_stopped(ctrl, c)
329 || ctrl->channels[c].state != RUNNING
330 /* Only reload the current data descriptor if it has eol set. */
331 || !ctrl->channels[c].current_d.eol) {
332 D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
333 c, ctrl->channels[c].state,
334 channel_stopped(ctrl, c),
335 channel_en(ctrl,c),
336 ctrl->channels[c].eol));
337 D(dump_d(c, &ctrl->channels[c].current_d));
338 return;
339 }
340
341 /* Reload the current descriptor. */
342 channel_load_d(ctrl, c);
343
344 /* If the current descriptor cleared the eol flag and we had already
345 reached eol state, do the continue. */
346 if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
41107bcb 347 D(printf("continue %d ok %x\n", c,
1ba13a5d
EI
348 ctrl->channels[c].current_d.next));
349 ctrl->channels[c].regs[RW_SAVED_DATA] =
d297f464 350 (uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
1ba13a5d 351 channel_load_d(ctrl, c);
c01c07bb
EI
352 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
353 (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
354
1ba13a5d
EI
355 channel_start(ctrl, c);
356 }
a8303d18 357 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
d297f464 358 (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
1ba13a5d
EI
359}
360
361static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
362{
363 unsigned int cmd = v & ((1 << 10) - 1);
364
d27b2e50
EI
365 D(printf("%s ch=%d cmd=%x\n",
366 __func__, c, cmd));
1ba13a5d
EI
367 if (cmd & regk_dma_load_d) {
368 channel_load_d(ctrl, c);
369 if (cmd & regk_dma_burst)
370 channel_start(ctrl, c);
371 }
372
373 if (cmd & regk_dma_load_c) {
374 channel_load_c(ctrl, c);
375 }
376}
377
378static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
379{
380 D(printf("%s %d\n", __func__, c));
381 ctrl->channels[c].regs[R_INTR] &=
382 ~(ctrl->channels[c].regs[RW_ACK_INTR]);
383
384 ctrl->channels[c].regs[R_MASKED_INTR] =
385 ctrl->channels[c].regs[R_INTR]
386 & ctrl->channels[c].regs[RW_INTR_MASK];
387
388 D(printf("%s: chan=%d masked_intr=%x\n", __func__,
389 c,
390 ctrl->channels[c].regs[R_MASKED_INTR]));
391
96d7ddde 392 qemu_set_irq(ctrl->channels[c].irq,
7a3161ba 393 !!ctrl->channels[c].regs[R_MASKED_INTR]);
1ba13a5d
EI
394}
395
1ab5f75c 396static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
1ba13a5d
EI
397{
398 uint32_t len;
399 uint32_t saved_data_buf;
400 unsigned char buf[2 * 1024];
401
73a511de
LP
402 struct dma_context_metadata meta;
403 bool send_context = true;
404
1ab5f75c
EI
405 if (ctrl->channels[c].eol)
406 return 0;
407
408 do {
73a511de 409 bool out_eop;
41107bcb 410 D(printf("ch=%d buf=%x after=%x\n",
c968ef8d
EI
411 c,
412 (uint32_t)ctrl->channels[c].current_d.buf,
41107bcb 413 (uint32_t)ctrl->channels[c].current_d.after));
c968ef8d 414
73a511de
LP
415 if (send_context) {
416 if (ctrl->channels[c].client->client.metadata_push) {
417 meta.metadata = ctrl->channels[c].current_d.md;
418 ctrl->channels[c].client->client.metadata_push(
419 ctrl->channels[c].client->client.opaque,
420 &meta);
421 }
422 send_context = false;
423 }
424
c01c07bb
EI
425 channel_load_d(ctrl, c);
426 saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
ea0f49a7
EI
427 len = (uint32_t)(unsigned long)
428 ctrl->channels[c].current_d.after;
c968ef8d
EI
429 len -= saved_data_buf;
430
431 if (len > sizeof buf)
432 len = sizeof buf;
433 cpu_physical_memory_read (saved_data_buf, buf, len);
434
73a511de
LP
435 out_eop = ((saved_data_buf + len) ==
436 ctrl->channels[c].current_d.after) &&
437 ctrl->channels[c].current_d.out_eop;
438
439 D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
440 saved_data_buf, len, out_eop));
c968ef8d 441
c3bce9d5
EI
442 if (ctrl->channels[c].client->client.push) {
443 if (len > 0) {
444 ctrl->channels[c].client->client.push(
445 ctrl->channels[c].client->client.opaque,
446 buf, len, out_eop);
447 }
448 } else {
c968ef8d
EI
449 printf("WARNING: DMA ch%d dataloss,"
450 " no attached client.\n", c);
c3bce9d5 451 }
c968ef8d
EI
452
453 saved_data_buf += len;
454
ea0f49a7
EI
455 if (saved_data_buf == (uint32_t)(unsigned long)
456 ctrl->channels[c].current_d.after) {
c968ef8d
EI
457 /* Done. Step to next. */
458 if (ctrl->channels[c].current_d.out_eop) {
73a511de 459 send_context = true;
c968ef8d
EI
460 }
461 if (ctrl->channels[c].current_d.intr) {
c968ef8d 462 /* data intr. */
c01c07bb
EI
463 D(printf("signal intr %d eol=%d\n",
464 len, ctrl->channels[c].current_d.eol));
c968ef8d
EI
465 ctrl->channels[c].regs[R_INTR] |= (1 << 2);
466 channel_update_irq(ctrl, c);
467 }
c01c07bb 468 channel_store_d(ctrl, c);
c968ef8d
EI
469 if (ctrl->channels[c].current_d.eol) {
470 D(printf("channel %d EOL\n", c));
471 ctrl->channels[c].eol = 1;
472
473 /* Mark the context as disabled. */
474 ctrl->channels[c].current_c.dis = 1;
475 channel_store_c(ctrl, c);
476
477 channel_stop(ctrl, c);
478 } else {
479 ctrl->channels[c].regs[RW_SAVED_DATA] =
ea0f49a7
EI
480 (uint32_t)(unsigned long)ctrl->
481 channels[c].current_d.next;
c968ef8d
EI
482 /* Load new descriptor. */
483 channel_load_d(ctrl, c);
484 saved_data_buf = (uint32_t)(unsigned long)
485 ctrl->channels[c].current_d.buf;
486 }
487
c968ef8d
EI
488 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
489 saved_data_buf;
490 D(dump_d(c, &ctrl->channels[c].current_d));
1ba13a5d 491 }
a8303d18 492 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
1ab5f75c
EI
493 } while (!ctrl->channels[c].eol);
494 return 1;
1ba13a5d
EI
495}
496
497static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
498 unsigned char *buf, int buflen, int eop)
499{
500 uint32_t len;
501 uint32_t saved_data_buf;
502
503 if (ctrl->channels[c].eol == 1)
504 return 0;
505
c01c07bb 506 channel_load_d(ctrl, c);
1ba13a5d 507 saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
ea0f49a7 508 len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
1ba13a5d
EI
509 len -= saved_data_buf;
510
511 if (len > buflen)
512 len = buflen;
513
514 cpu_physical_memory_write (saved_data_buf, buf, len);
515 saved_data_buf += len;
516
d297f464 517 if (saved_data_buf ==
ea0f49a7 518 (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
1ba13a5d
EI
519 || eop) {
520 uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
521
522 D(printf("in dscr end len=%d\n",
523 ctrl->channels[c].current_d.after
524 - ctrl->channels[c].current_d.buf));
41107bcb 525 ctrl->channels[c].current_d.after = saved_data_buf;
1ba13a5d
EI
526
527 /* Done. Step to next. */
528 if (ctrl->channels[c].current_d.intr) {
529 /* TODO: signal eop to the client. */
530 /* data intr. */
531 ctrl->channels[c].regs[R_INTR] |= 3;
532 }
533 if (eop) {
534 ctrl->channels[c].current_d.in_eop = 1;
535 ctrl->channels[c].regs[R_INTR] |= 8;
536 }
537 if (r_intr != ctrl->channels[c].regs[R_INTR])
538 channel_update_irq(ctrl, c);
539
540 channel_store_d(ctrl, c);
541 D(dump_d(c, &ctrl->channels[c].current_d));
542
543 if (ctrl->channels[c].current_d.eol) {
544 D(printf("channel %d EOL\n", c));
545 ctrl->channels[c].eol = 1;
a8303d18
EI
546
547 /* Mark the context as disabled. */
548 ctrl->channels[c].current_c.dis = 1;
549 channel_store_c(ctrl, c);
550
1ba13a5d
EI
551 channel_stop(ctrl, c);
552 } else {
553 ctrl->channels[c].regs[RW_SAVED_DATA] =
ea0f49a7
EI
554 (uint32_t)(unsigned long)ctrl->
555 channels[c].current_d.next;
1ba13a5d
EI
556 /* Load new descriptor. */
557 channel_load_d(ctrl, c);
ea0f49a7 558 saved_data_buf = (uint32_t)(unsigned long)
a8303d18 559 ctrl->channels[c].current_d.buf;
1ba13a5d
EI
560 }
561 }
562
563 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
564 return len;
565}
566
1ab5f75c 567static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
1ba13a5d 568{
1ab5f75c 569 if (ctrl->channels[c].client->client.pull) {
1ba13a5d
EI
570 ctrl->channels[c].client->client.pull(
571 ctrl->channels[c].client->client.opaque);
1ab5f75c
EI
572 return 1;
573 } else
574 return 0;
1ba13a5d
EI
575}
576
a8170e5e 577static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
1ba13a5d 578{
41107bcb 579 hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr);
1ba13a5d
EI
580 return 0;
581}
582
9dcb06ce 583static uint64_t
a8170e5e 584dma_read(void *opaque, hwaddr addr, unsigned int size)
1ba13a5d
EI
585{
586 struct fs_dma_ctrl *ctrl = opaque;
587 int c;
588 uint32_t r = 0;
589
9dcb06ce
EI
590 if (size != 4) {
591 dma_rinvalid(opaque, addr);
592 }
593
e6320485 594 /* Make addr relative to this channel and bounded to nr regs. */
8da3ff18 595 c = fs_channel(addr);
e6320485 596 addr &= 0xff;
c01c07bb 597 addr >>= 2;
1ba13a5d 598 switch (addr)
a8303d18 599 {
1ba13a5d
EI
600 case RW_STAT:
601 r = ctrl->channels[c].state & 7;
602 r |= ctrl->channels[c].eol << 5;
603 r |= ctrl->channels[c].stream_cmd_src << 8;
604 break;
605
a8303d18 606 default:
1ba13a5d 607 r = ctrl->channels[c].regs[addr];
41107bcb 608 D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n",
d27b2e50 609 __func__, c, addr));
a8303d18
EI
610 break;
611 }
1ba13a5d
EI
612 return r;
613}
614
615static void
a8170e5e 616dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
1ba13a5d 617{
41107bcb 618 hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr);
1ba13a5d
EI
619}
620
4487fd34
EI
621static void
622dma_update_state(struct fs_dma_ctrl *ctrl, int c)
623{
d11cf8cc
EI
624 if (ctrl->channels[c].regs[RW_CFG] & 2)
625 ctrl->channels[c].state = STOPPED;
626 if (!(ctrl->channels[c].regs[RW_CFG] & 1))
627 ctrl->channels[c].state = RST;
4487fd34
EI
628}
629
1ba13a5d 630static void
a8170e5e 631dma_write(void *opaque, hwaddr addr,
9dcb06ce 632 uint64_t val64, unsigned int size)
1ba13a5d
EI
633{
634 struct fs_dma_ctrl *ctrl = opaque;
9dcb06ce 635 uint32_t value = val64;
1ba13a5d
EI
636 int c;
637
9dcb06ce
EI
638 if (size != 4) {
639 dma_winvalid(opaque, addr, value);
640 }
641
e6320485 642 /* Make addr relative to this channel and bounded to nr regs. */
8da3ff18 643 c = fs_channel(addr);
e6320485 644 addr &= 0xff;
c01c07bb 645 addr >>= 2;
1ba13a5d 646 switch (addr)
a8303d18 647 {
1ba13a5d 648 case RW_DATA:
fa1bdde4 649 ctrl->channels[c].regs[addr] = value;
1ba13a5d
EI
650 break;
651
652 case RW_CFG:
653 ctrl->channels[c].regs[addr] = value;
4487fd34 654 dma_update_state(ctrl, c);
1ba13a5d
EI
655 break;
656 case RW_CMD:
657 /* continue. */
4487fd34
EI
658 if (value & ~1)
659 printf("Invalid store to ch=%d RW_CMD %x\n",
660 c, value);
1ba13a5d
EI
661 ctrl->channels[c].regs[addr] = value;
662 channel_continue(ctrl, c);
663 break;
664
665 case RW_SAVED_DATA:
666 case RW_SAVED_DATA_BUF:
667 case RW_GROUP:
668 case RW_GROUP_DOWN:
669 ctrl->channels[c].regs[addr] = value;
670 break;
671
672 case RW_ACK_INTR:
673 case RW_INTR_MASK:
674 ctrl->channels[c].regs[addr] = value;
675 channel_update_irq(ctrl, c);
676 if (addr == RW_ACK_INTR)
677 ctrl->channels[c].regs[RW_ACK_INTR] = 0;
678 break;
679
680 case RW_STREAM_CMD:
4487fd34
EI
681 if (value & ~1023)
682 printf("Invalid store to ch=%d "
683 "RW_STREAMCMD %x\n",
684 c, value);
1ba13a5d 685 ctrl->channels[c].regs[addr] = value;
d27b2e50 686 D(printf("stream_cmd ch=%d\n", c));
1ba13a5d
EI
687 channel_stream_cmd(ctrl, c, value);
688 break;
689
a8303d18 690 default:
41107bcb
EI
691 D(printf ("%s c=%d " TARGET_FMT_plx "\n",
692 __func__, c, addr));
a8303d18 693 break;
1ba13a5d
EI
694 }
695}
696
9dcb06ce
EI
697static const MemoryRegionOps dma_ops = {
698 .read = dma_read,
699 .write = dma_write,
700 .endianness = DEVICE_NATIVE_ENDIAN,
701 .valid = {
702 .min_access_size = 1,
703 .max_access_size = 4
704 }
1ba13a5d
EI
705};
706
1ab5f75c 707static int etraxfs_dmac_run(void *opaque)
1ba13a5d
EI
708{
709 struct fs_dma_ctrl *ctrl = opaque;
710 int i;
711 int p = 0;
712
713 for (i = 0;
714 i < ctrl->nr_channels;
715 i++)
716 {
717 if (ctrl->channels[i].state == RUNNING)
718 {
1ab5f75c
EI
719 if (ctrl->channels[i].input) {
720 p += channel_in_run(ctrl, i);
721 } else {
722 p += channel_out_run(ctrl, i);
723 }
1ba13a5d
EI
724 }
725 }
1ab5f75c 726 return p;
1ba13a5d
EI
727}
728
729int etraxfs_dmac_input(struct etraxfs_dma_client *client,
730 void *buf, int len, int eop)
731{
732 return channel_in_process(client->ctrl, client->channel,
733 buf, len, eop);
734}
735
736/* Connect an IRQ line with a channel. */
737void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
738{
739 struct fs_dma_ctrl *ctrl = opaque;
96d7ddde 740 ctrl->channels[c].irq = *line;
1ba13a5d
EI
741 ctrl->channels[c].input = input;
742}
743
744void etraxfs_dmac_connect_client(void *opaque, int c,
745 struct etraxfs_dma_client *cl)
746{
747 struct fs_dma_ctrl *ctrl = opaque;
748 cl->ctrl = ctrl;
749 cl->channel = c;
750 ctrl->channels[c].client = cl;
751}
752
753
492c30af 754static void DMA_run(void *opaque)
fa1bdde4 755{
492c30af 756 struct fs_dma_ctrl *etraxfs_dmac = opaque;
1ab5f75c
EI
757 int p = 1;
758
1354869c 759 if (runstate_is_running())
1ab5f75c
EI
760 p = etraxfs_dmac_run(etraxfs_dmac);
761
762 if (p)
763 qemu_bh_schedule_idle(etraxfs_dmac->bh);
fa1bdde4
EI
764}
765
a8170e5e 766void *etraxfs_dmac_init(hwaddr base, int nr_channels)
1ba13a5d
EI
767{
768 struct fs_dma_ctrl *ctrl = NULL;
1ba13a5d 769
7267c094 770 ctrl = g_malloc0(sizeof *ctrl);
1ba13a5d 771
492c30af 772 ctrl->bh = qemu_bh_new(DMA_run, ctrl);
492c30af 773
1ba13a5d 774 ctrl->nr_channels = nr_channels;
7267c094 775 ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
1ba13a5d 776
2c9b15ca 777 memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma",
9dcb06ce
EI
778 nr_channels * 0x2000);
779 memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
780
1ba13a5d 781 return ctrl;
1ba13a5d 782}