4 * Copyright (c) 2003 Vassili Karpov (malc)
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #define log(...) fprintf (stderr, "dma: " __VA_ARGS__)
33 #define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
34 #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
35 #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
42 #define MEM_REAL(addr) ((addr)+(uint32_t)(phys_ram_base))
43 #define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
52 DMA_read_handler read_handler
;
53 DMA_misc_handler misc_handler
;
59 static struct dma_cont
{
64 struct dma_regs regs
[4];
68 CMD_MEMORY_TO_MEMORY
= 0x01,
69 CMD_FIXED_ADDRESS
= 0x02,
70 CMD_BLOCK_CONTROLLER
= 0x04,
71 CMD_COMPRESSED_TIME
= 0x08,
72 CMD_CYCLIC_PRIORITY
= 0x10,
73 CMD_EXTENDED_WRITE
= 0x20,
76 CMD_NOT_SUPPORTED
= CMD_MEMORY_TO_MEMORY
| CMD_FIXED_ADDRESS
77 | CMD_COMPRESSED_TIME
| CMD_CYCLIC_PRIORITY
| CMD_EXTENDED_WRITE
78 | CMD_LOW_DREQ
| CMD_LOW_DACK
82 static void write_page (struct CPUX86State
*env
, uint32_t nport
, uint32_t data
)
86 static int channels
[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
89 ichan
= channels
[nport
- 0x80 - (ncont
<< 3)];
92 log ("invalid channel %#x %#x\n", nport
, data
);
96 dma_controllers
[ncont
].regs
[ichan
].page
= data
;
99 static void init_chan (int ncont
, int ichan
)
103 r
= dma_controllers
[ncont
].regs
+ ichan
;
104 r
->now
[ADDR
] = r
->base
[0] << ncont
;
108 static inline int getff (int ncont
)
112 ff
= dma_controllers
[ncont
].flip_flop
;
113 dma_controllers
[ncont
].flip_flop
= !ff
;
117 static uint32_t read_chan (struct CPUX86State
*env
, uint32_t nport
)
120 int ncont
, ichan
, nreg
;
125 ichan
= (nport
>> (1 + ncont
)) & 3;
126 nreg
= (nport
>> ncont
) & 1;
127 r
= dma_controllers
[ncont
].regs
+ ichan
;
132 val
= (r
->base
[COUNT
] << ncont
) - r
->now
[COUNT
];
134 val
= r
->now
[ADDR
] + r
->now
[COUNT
];
136 return (val
>> (ncont
+ (ff
<< 3))) & 0xff;
139 static void write_chan (uint32_t nport
, int size
, uint32_t data
)
141 int ncont
, ichan
, nreg
;
145 ichan
= (nport
>> (1 + ncont
)) & 3;
146 nreg
= (nport
>> ncont
) & 1;
147 r
= dma_controllers
[ncont
].regs
+ ichan
;
150 r
->base
[nreg
] = data
;
151 init_chan (ncont
, ichan
);
155 r
->base
[nreg
] = (r
->base
[nreg
] & 0xff) | ((data
<< 8) & 0xff00);
156 init_chan (ncont
, ichan
);
159 r
->base
[nreg
] = (r
->base
[nreg
] & 0xff00) | (data
& 0xff);
163 static void write_chanb (struct CPUX86State
*env
, uint32_t nport
, uint32_t data
)
165 write_chan (nport
, 1, data
);
168 static void write_chanw (struct CPUX86State
*env
, uint32_t nport
, uint32_t data
)
170 write_chan (nport
, 2, data
);
173 static void write_cont (struct CPUX86State
*env
, uint32_t nport
, uint32_t data
)
175 int iport
, ichan
, ncont
;
181 d
= dma_controllers
+ ncont
;
183 iport
= ((nport
- 0xd0) >> 1) + 8;
190 case 8: /* command */
191 if (data
&& (data
| CMD_NOT_SUPPORTED
)) {
192 log ("command %#x not supported\n", data
);
201 d
->status
|= 1 << (ichan
+ 4);
204 d
->status
&= ~(1 << (ichan
+ 4));
206 d
->status
&= ~(1 << ichan
);
209 case 0xa: /* single mask */
211 d
->mask
|= 1 << (data
& 3);
213 d
->mask
&= ~(1 << (data
& 3));
227 dir
= (val
>> 5) & 1;
228 opmode
= (val
>> 6) & 3;
230 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
231 ichan
, op
, ai
, dir
, opmode
);
234 d
->regs
[ichan
].mode
= data
;
238 case 0xc: /* clear flip flop */
242 case 0xd: /* reset */
249 case 0xe: /* clear mask for all channels */
253 case 0xf: /* write mask for all channels */
258 log ("dma: unknown iport %#x\n", iport
);
264 linfo ("nport %#06x, ncont %d, ichan % 2d, val %#06x\n",
265 nport
, d
!= dma_controllers
, ichan
, data
);
274 int DMA_get_channel_mode (int nchan
)
276 return dma_controllers
[nchan
> 3].regs
[nchan
& 3].mode
;
279 void DMA_hold_DREQ (int nchan
)
285 linfo ("held cont=%d chan=%d\n", ncont
, ichan
);
286 dma_controllers
[ncont
].status
|= 1 << (ichan
+ 4);
289 void DMA_release_DREQ (int nchan
)
295 linfo ("released cont=%d chan=%d\n", ncont
, ichan
);
296 dma_controllers
[ncont
].status
&= ~(1 << (ichan
+ 4));
299 static void channel_run (int ncont
, int ichan
)
307 r
= dma_controllers
[ncont
].regs
+ ichan
;
308 /* ai = r->mode & 16; */
309 /* dir = r->mode & 32 ? -1 : 1; */
311 addr
= MEM_REAL ((r
->page
<< 16) | r
->now
[ADDR
]);
314 n
= r
->read_handler (addr
, (r
->base
[COUNT
] << ncont
) + (1 << ncont
), &irq
);
317 ldebug ("dma_pos %d irq %d size %d\n",
318 n
, irq
, (r
->base
[1] << ncont
) + (1 << ncont
));
321 pic_set_irq (irq
, 1);
332 log ("attempt to re-enter dma\n");
339 for (icont
= 0; icont
< 2; icont
++, d
++) {
340 for (ichan
= 0; ichan
< 4; ichan
++) {
345 if ((0 == (d
->mask
& mask
)) && (0 != (d
->status
& (mask
<< 4))))
346 channel_run (icont
, ichan
);
352 void DMA_register_channel (int nchan
,
353 DMA_read_handler read_handler
,
354 DMA_misc_handler misc_handler
)
362 r
= dma_controllers
[ncont
].regs
+ ichan
;
363 r
->read_handler
= read_handler
;
364 r
->misc_handler
= misc_handler
;
370 int page_port_list
[] = { 0x1, 0x2, 0x3, 0x7 };
372 for (i
= 0; i
< 8; i
++) {
373 register_ioport_write (i
, 1, write_chanb
, 1);
374 register_ioport_write (i
, 1, write_chanw
, 2);
376 register_ioport_write (0xc0 + (i
<< 1), 1, write_chanb
, 1);
377 register_ioport_write (0xc0 + (i
<< 1), 1, write_chanw
, 2);
379 register_ioport_read (i
, 1, read_chan
, 1);
380 register_ioport_read (0xc0 + (i
<< 1), 1, read_chan
, 2);
383 for (i
= 0; i
< LENOFA (page_port_list
); i
++) {
384 register_ioport_write (page_port_list
[i
] + 0x80, 1, write_page
, 1);
385 register_ioport_write (page_port_list
[i
] + 0x88, 1, write_page
, 1);
388 for (i
= 0; i
< 8; i
++) {
389 register_ioport_write (i
+ 8, 1, write_cont
, 1);
390 register_ioport_write (0xd0 + (i
<< 1), 1, write_cont
, 1);
393 write_cont (NULL
, 0xd, 0);
394 write_cont (NULL
, 0xdd, 0);