]> git.proxmox.com Git - mirror_qemu.git/blame - hw/dma.c
changed cpu_x86_in/out to cpu_in/out
[mirror_qemu.git] / hw / dma.c
CommitLineData
27503323
FB
1/*
2 * QEMU DMA emulation
3 *
4 * Copyright (c) 2003 Vassili Karpov (malc)
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24#include <stdio.h>
25#include <stdlib.h>
26#include <inttypes.h>
27
28#include "vl.h"
29#include "cpu.h"
30
31#define log(...) fprintf (stderr, "dma: " __VA_ARGS__)
32#ifdef DEBUG_DMA
33#define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
34#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
35#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
36#else
37#define lwarn(...)
38#define linfo(...)
39#define ldebug(...)
40#endif
41
42#define MEM_REAL(addr) ((addr)+(uint32_t)(phys_ram_base))
43#define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
44
45struct dma_regs {
46 int now[2];
47 uint16_t base[2];
48 uint8_t mode;
49 uint8_t page;
50 uint8_t dack;
51 uint8_t eop;
52 DMA_read_handler read_handler;
53 DMA_misc_handler misc_handler;
54};
55
56#define ADDR 0
57#define COUNT 1
58
59static struct dma_cont {
60 uint8_t status;
61 uint8_t command;
62 uint8_t mask;
63 uint8_t flip_flop;
64 struct dma_regs regs[4];
65} dma_controllers[2];
66
67enum {
68 CMD_MEMORY_TO_MEMORY = 0x01,
69 CMD_FIXED_ADDRESS = 0x02,
70 CMD_BLOCK_CONTROLLER = 0x04,
71 CMD_COMPRESSED_TIME = 0x08,
72 CMD_CYCLIC_PRIORITY = 0x10,
73 CMD_EXTENDED_WRITE = 0x20,
74 CMD_LOW_DREQ = 0x40,
75 CMD_LOW_DACK = 0x80,
76 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
77 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
78 | CMD_LOW_DREQ | CMD_LOW_DACK
79
80};
81
82static void write_page (struct CPUX86State *env, uint32_t nport, uint32_t data)
83{
84 int ichan;
85 int ncont;
86 static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
87
88 ncont = nport > 0x87;
89 ichan = channels[nport - 0x80 - (ncont << 3)];
90
91 if (-1 == ichan) {
92 log ("invalid channel %#x %#x\n", nport, data);
93 return;
94 }
95
96 dma_controllers[ncont].regs[ichan].page = data;
97}
98
99static void init_chan (int ncont, int ichan)
100{
101 struct dma_regs *r;
102
103 r = dma_controllers[ncont].regs + ichan;
104 r->now[ADDR] = r->base[0] << ncont;
105 r->now[COUNT] = 0;
106}
107
108static inline int getff (int ncont)
109{
110 int ff;
111
112 ff = dma_controllers[ncont].flip_flop;
113 dma_controllers[ncont].flip_flop = !ff;
114 return ff;
115}
116
117static uint32_t read_chan (struct CPUX86State *env, uint32_t nport)
118{
119 int ff;
120 int ncont, ichan, nreg;
121 struct dma_regs *r;
122 int val;
123
124 ncont = nport > 7;
125 ichan = (nport >> (1 + ncont)) & 3;
126 nreg = (nport >> ncont) & 1;
127 r = dma_controllers[ncont].regs + ichan;
128
129 ff = getff (ncont);
130
131 if (nreg)
132 val = (r->base[COUNT] << ncont) - r->now[COUNT];
133 else
134 val = r->now[ADDR] + r->now[COUNT];
135
136 return (val >> (ncont + (ff << 3))) & 0xff;
137}
138
139static void write_chan (uint32_t nport, int size, uint32_t data)
140{
141 int ncont, ichan, nreg;
142 struct dma_regs *r;
143
144 ncont = nport > 7;
145 ichan = (nport >> (1 + ncont)) & 3;
146 nreg = (nport >> ncont) & 1;
147 r = dma_controllers[ncont].regs + ichan;
148
149 if (2 == size) {
150 r->base[nreg] = data;
151 init_chan (ncont, ichan);
152 }
153 else {
154 if (getff (ncont)) {
155 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
156 init_chan (ncont, ichan);
157 }
158 else {
159 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
160 }
161 }
162}
163static void write_chanb (struct CPUX86State *env, uint32_t nport, uint32_t data)
164{
165 write_chan (nport, 1, data);
166}
167
168static void write_chanw (struct CPUX86State *env, uint32_t nport, uint32_t data)
169{
170 write_chan (nport, 2, data);
171}
172
173static void write_cont (struct CPUX86State *env, uint32_t nport, uint32_t data)
174{
175 int iport, ichan, ncont;
176 struct dma_cont *d;
177
178 ncont = nport > 0xf;
179 ichan = -1;
180
181 d = dma_controllers + ncont;
182 if (ncont) {
183 iport = ((nport - 0xd0) >> 1) + 8;
184 }
185 else {
186 iport = nport;
187 }
188
189 switch (iport) {
190 case 8: /* command */
191 if (data && (data | CMD_NOT_SUPPORTED)) {
192 log ("command %#x not supported\n", data);
193 goto error;
194 }
195 d->command = data;
196 break;
197
198 case 9:
199 ichan = data & 3;
200 if (data & 4) {
201 d->status |= 1 << (ichan + 4);
202 }
203 else {
204 d->status &= ~(1 << (ichan + 4));
205 }
206 d->status &= ~(1 << ichan);
207 break;
208
209 case 0xa: /* single mask */
210 if (data & 4)
211 d->mask |= 1 << (data & 3);
212 else
213 d->mask &= ~(1 << (data & 3));
214 break;
215
216 case 0xb: /* mode */
217 {
218#ifdef DMA_DEBUG
219 int op;
220 int ai;
221 int dir;
222 int opmode;
223
224 ichan = val & 3;
225 op = (val >> 2) & 3;
226 ai = (val >> 4) & 1;
227 dir = (val >> 5) & 1;
228 opmode = (val >> 6) & 3;
229
230 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
231 ichan, op, ai, dir, opmode);
232#endif
233
234 d->regs[ichan].mode = data;
235 break;
236 }
237
238 case 0xc: /* clear flip flop */
239 d->flip_flop = 0;
240 break;
241
242 case 0xd: /* reset */
243 d->flip_flop = 0;
244 d->mask = ~0;
245 d->status = 0;
246 d->command = 0;
247 break;
248
249 case 0xe: /* clear mask for all channels */
250 d->mask = 0;
251 break;
252
253 case 0xf: /* write mask for all channels */
254 d->mask = data;
255 break;
256
257 default:
258 log ("dma: unknown iport %#x\n", iport);
259 goto error;
260 }
261
262#ifdef DMA_DEBUG
263 if (0xc != iport) {
264 linfo ("nport %#06x, ncont %d, ichan % 2d, val %#06x\n",
265 nport, d != dma_controllers, ichan, data);
266 }
267#endif
268 return;
269
270 error:
271 abort ();
272}
273
274int DMA_get_channel_mode (int nchan)
275{
276 return dma_controllers[nchan > 3].regs[nchan & 3].mode;
277}
278
279void DMA_hold_DREQ (int nchan)
280{
281 int ncont, ichan;
282
283 ncont = nchan > 3;
284 ichan = nchan & 3;
285 linfo ("held cont=%d chan=%d\n", ncont, ichan);
286 dma_controllers[ncont].status |= 1 << (ichan + 4);
287}
288
289void DMA_release_DREQ (int nchan)
290{
291 int ncont, ichan;
292
293 ncont = nchan > 3;
294 ichan = nchan & 3;
295 linfo ("released cont=%d chan=%d\n", ncont, ichan);
296 dma_controllers[ncont].status &= ~(1 << (ichan + 4));
297}
298
299static void channel_run (int ncont, int ichan)
300{
301 struct dma_regs *r;
302 int n;
303 int irq;
304 uint32_t addr;
305/* int ai, dir; */
306
307 r = dma_controllers[ncont].regs + ichan;
308/* ai = r->mode & 16; */
309/* dir = r->mode & 32 ? -1 : 1; */
310
311 addr = MEM_REAL ((r->page << 16) | r->now[ADDR]);
312
313 irq = -1;
314 n = r->read_handler (addr, (r->base[COUNT] << ncont) + (1 << ncont), &irq);
315 r->now[COUNT] = n;
316
317 ldebug ("dma_pos %d irq %d size %d\n",
318 n, irq, (r->base[1] << ncont) + (1 << ncont));
319
320 if (-1 != irq) {
321 pic_set_irq (irq, 1);
322 }
323}
324
325void DMA_run (void)
326{
327 static int in_dma;
328 struct dma_cont *d;
329 int icont, ichan;
330
331 if (in_dma) {
332 log ("attempt to re-enter dma\n");
333 return;
334 }
335
336 in_dma = 1;
337 d = dma_controllers;
338
339 for (icont = 0; icont < 2; icont++, d++) {
340 for (ichan = 0; ichan < 4; ichan++) {
341 int mask;
342
343 mask = 1 << ichan;
344
345 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4))))
346 channel_run (icont, ichan);
347 }
348 }
349 in_dma = 0;
350}
351
352void DMA_register_channel (int nchan,
353 DMA_read_handler read_handler,
354 DMA_misc_handler misc_handler)
355{
356 struct dma_regs *r;
357 int ichan, ncont;
358
359 ncont = nchan > 3;
360 ichan = nchan & 3;
361
362 r = dma_controllers[ncont].regs + ichan;
363 r->read_handler = read_handler;
364 r->misc_handler = misc_handler;
365}
366
367void DMA_init (void)
368{
369 int i;
370 int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
371
372 for (i = 0; i < 8; i++) {
373 register_ioport_write (i, 1, write_chanb, 1);
374 register_ioport_write (i, 1, write_chanw, 2);
375
376 register_ioport_write (0xc0 + (i << 1), 1, write_chanb, 1);
377 register_ioport_write (0xc0 + (i << 1), 1, write_chanw, 2);
378
379 register_ioport_read (i, 1, read_chan, 1);
380 register_ioport_read (0xc0 + (i << 1), 1, read_chan, 2);
381 }
382
383 for (i = 0; i < LENOFA (page_port_list); i++) {
384 register_ioport_write (page_port_list[i] + 0x80, 1, write_page, 1);
385 register_ioport_write (page_port_list[i] + 0x88, 1, write_page, 1);
386 }
387
388 for (i = 0; i < 8; i++) {
389 register_ioport_write (i + 8, 1, write_cont, 1);
390 register_ioport_write (0xd0 + (i << 1), 1, write_cont, 1);
391 }
392
393 write_cont (NULL, 0xd, 0);
394 write_cont (NULL, 0xdd, 0);
395}