]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/arm/mach-rpc/dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mach-rpc/dma.c
5 * Copyright (C) 1998 Russell King
7 * DMA functions specific to RiscPC architecture
9 #include <linux/mman.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/dma-mapping.h>
19 #include <mach/hardware.h>
20 #include <linux/uaccess.h>
22 #include <asm/mach/dma.h>
23 #include <asm/hardware/iomd.h>
26 struct dma_struct dma
;
28 unsigned long base
; /* Controller base address */
29 int irq
; /* Controller IRQ */
30 struct scatterlist cur_sg
; /* Current controller buffer */
44 #define TRANSFER_SIZE 2
47 #define ENDA (IOMD_IO0ENDA - IOMD_IO0CURA)
48 #define CURB (IOMD_IO0CURB - IOMD_IO0CURA)
49 #define ENDB (IOMD_IO0ENDB - IOMD_IO0CURA)
50 #define CR (IOMD_IO0CR - IOMD_IO0CURA)
51 #define ST (IOMD_IO0ST - IOMD_IO0CURA)
53 static void iomd_get_next_sg(struct scatterlist
*sg
, struct iomd_dma
*idma
)
55 unsigned long end
, offset
, flags
= 0;
58 sg
->dma_address
= idma
->dma_addr
;
59 offset
= sg
->dma_address
& ~PAGE_MASK
;
61 end
= offset
+ idma
->dma_len
;
66 if (offset
+ TRANSFER_SIZE
>= end
)
69 sg
->length
= end
- TRANSFER_SIZE
;
71 idma
->dma_len
-= end
- offset
;
72 idma
->dma_addr
+= end
- offset
;
74 if (idma
->dma_len
== 0) {
75 if (idma
->dma
.sgcount
> 1) {
76 idma
->dma
.sg
= sg_next(idma
->dma
.sg
);
77 idma
->dma_addr
= idma
->dma
.sg
->dma_address
;
78 idma
->dma_len
= idma
->dma
.sg
->length
;
86 flags
= DMA_END_S
| DMA_END_L
;
94 static irqreturn_t
iomd_dma_handle(int irq
, void *dev_id
)
96 struct iomd_dma
*idma
= dev_id
;
97 unsigned long base
= idma
->base
;
102 status
= iomd_readb(base
+ ST
);
103 if (!(status
& DMA_ST_INT
))
106 if ((idma
->state
^ status
) & DMA_ST_AB
)
107 iomd_get_next_sg(&idma
->cur_sg
, idma
);
109 switch (status
& (DMA_ST_OFL
| DMA_ST_AB
)) {
110 case DMA_ST_OFL
: /* OIA */
111 case DMA_ST_AB
: /* .IB */
112 iomd_writel(idma
->cur_sg
.dma_address
, base
+ CURA
);
113 iomd_writel(idma
->cur_sg
.length
, base
+ ENDA
);
114 idma
->state
= DMA_ST_AB
;
117 case DMA_ST_OFL
| DMA_ST_AB
: /* OIB */
119 iomd_writel(idma
->cur_sg
.dma_address
, base
+ CURB
);
120 iomd_writel(idma
->cur_sg
.length
, base
+ ENDB
);
125 if (status
& DMA_ST_OFL
&&
126 idma
->cur_sg
.length
== (DMA_END_S
|DMA_END_L
))
130 idma
->state
= ~DMA_ST_AB
;
136 static int iomd_request_dma(unsigned int chan
, dma_t
*dma
)
138 struct iomd_dma
*idma
= container_of(dma
, struct iomd_dma
, dma
);
140 return request_irq(idma
->irq
, iomd_dma_handle
,
141 0, idma
->dma
.device_id
, idma
);
144 static void iomd_free_dma(unsigned int chan
, dma_t
*dma
)
146 struct iomd_dma
*idma
= container_of(dma
, struct iomd_dma
, dma
);
148 free_irq(idma
->irq
, idma
);
151 static struct device isa_dma_dev
= {
152 .init_name
= "fallback device",
153 .coherent_dma_mask
= ~(dma_addr_t
)0,
154 .dma_mask
= &isa_dma_dev
.coherent_dma_mask
,
157 static void iomd_enable_dma(unsigned int chan
, dma_t
*dma
)
159 struct iomd_dma
*idma
= container_of(dma
, struct iomd_dma
, dma
);
160 unsigned long dma_base
= idma
->base
;
161 unsigned int ctrl
= TRANSFER_SIZE
| DMA_CR_E
;
163 if (idma
->dma
.invalid
) {
164 idma
->dma
.invalid
= 0;
167 * Cope with ISA-style drivers which expect cache
171 idma
->dma
.sg
= &idma
->dma
.buf
;
172 idma
->dma
.sgcount
= 1;
173 idma
->dma
.buf
.length
= idma
->dma
.count
;
174 idma
->dma
.buf
.dma_address
= dma_map_single(&isa_dma_dev
,
175 idma
->dma
.addr
, idma
->dma
.count
,
176 idma
->dma
.dma_mode
== DMA_MODE_READ
?
177 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
180 iomd_writeb(DMA_CR_C
, dma_base
+ CR
);
181 idma
->state
= DMA_ST_AB
;
184 if (idma
->dma
.dma_mode
== DMA_MODE_READ
)
187 iomd_writeb(ctrl
, dma_base
+ CR
);
188 enable_irq(idma
->irq
);
191 static void iomd_disable_dma(unsigned int chan
, dma_t
*dma
)
193 struct iomd_dma
*idma
= container_of(dma
, struct iomd_dma
, dma
);
194 unsigned long dma_base
= idma
->base
;
197 local_irq_save(flags
);
198 if (idma
->state
!= ~DMA_ST_AB
)
199 disable_irq(idma
->irq
);
200 iomd_writeb(0, dma_base
+ CR
);
201 local_irq_restore(flags
);
204 static int iomd_set_dma_speed(unsigned int chan
, dma_t
*dma
, int cycle
)
210 else if (cycle
<= 250)
212 else if (cycle
< 438)
217 tcr
= iomd_readb(IOMD_DMATCR
);
222 tcr
= (tcr
& ~0x03) | speed
;
226 tcr
= (tcr
& ~0x0c) | (speed
<< 2);
230 tcr
= (tcr
& ~0x30) | (speed
<< 4);
234 tcr
= (tcr
& ~0xc0) | (speed
<< 6);
241 iomd_writeb(tcr
, IOMD_DMATCR
);
246 static struct dma_ops iomd_dma_ops
= {
248 .request
= iomd_request_dma
,
249 .free
= iomd_free_dma
,
250 .enable
= iomd_enable_dma
,
251 .disable
= iomd_disable_dma
,
252 .setspeed
= iomd_set_dma_speed
,
255 static struct fiq_handler fh
= {
260 struct dma_struct dma
;
264 static void floppy_enable_dma(unsigned int chan
, dma_t
*dma
)
266 struct floppy_dma
*fdma
= container_of(dma
, struct floppy_dma
, dma
);
267 void *fiqhandler_start
;
268 unsigned int fiqhandler_length
;
274 if (fdma
->dma
.dma_mode
== DMA_MODE_READ
) {
275 extern unsigned char floppy_fiqin_start
, floppy_fiqin_end
;
276 fiqhandler_start
= &floppy_fiqin_start
;
277 fiqhandler_length
= &floppy_fiqin_end
- &floppy_fiqin_start
;
279 extern unsigned char floppy_fiqout_start
, floppy_fiqout_end
;
280 fiqhandler_start
= &floppy_fiqout_start
;
281 fiqhandler_length
= &floppy_fiqout_end
- &floppy_fiqout_start
;
284 regs
.ARM_r9
= fdma
->dma
.count
;
285 regs
.ARM_r10
= (unsigned long)fdma
->dma
.addr
;
286 regs
.ARM_fp
= (unsigned long)FLOPPYDMA_BASE
;
288 if (claim_fiq(&fh
)) {
289 printk("floppydma: couldn't claim FIQ.\n");
293 set_fiq_handler(fiqhandler_start
, fiqhandler_length
);
295 enable_fiq(fdma
->fiq
);
298 static void floppy_disable_dma(unsigned int chan
, dma_t
*dma
)
300 struct floppy_dma
*fdma
= container_of(dma
, struct floppy_dma
, dma
);
301 disable_fiq(fdma
->fiq
);
305 static int floppy_get_residue(unsigned int chan
, dma_t
*dma
)
312 static struct dma_ops floppy_dma_ops
= {
314 .enable
= floppy_enable_dma
,
315 .disable
= floppy_disable_dma
,
316 .residue
= floppy_get_residue
,
320 * This is virtual DMA - we don't need anything here.
322 static void sound_enable_disable_dma(unsigned int chan
, dma_t
*dma
)
326 static struct dma_ops sound_dma_ops
= {
328 .enable
= sound_enable_disable_dma
,
329 .disable
= sound_enable_disable_dma
,
332 static struct iomd_dma iomd_dma
[6];
334 static struct floppy_dma floppy_dma
= {
336 .d_ops
= &floppy_dma_ops
,
338 .fiq
= FIQ_FLOPPYDATA
,
341 static dma_t sound_dma
= {
342 .d_ops
= &sound_dma_ops
,
345 static int __init
rpc_dma_init(void)
350 iomd_writeb(0, IOMD_IO0CR
);
351 iomd_writeb(0, IOMD_IO1CR
);
352 iomd_writeb(0, IOMD_IO2CR
);
353 iomd_writeb(0, IOMD_IO3CR
);
355 iomd_writeb(0xa0, IOMD_DMATCR
);
358 * Setup DMA channels 2,3 to be for podules
359 * and channels 0,1 for internal devices
361 iomd_writeb(DMA_EXT_IO3
|DMA_EXT_IO2
, IOMD_DMAEXT
);
363 iomd_dma
[DMA_0
].base
= IOMD_IO0CURA
;
364 iomd_dma
[DMA_0
].irq
= IRQ_DMA0
;
365 iomd_dma
[DMA_1
].base
= IOMD_IO1CURA
;
366 iomd_dma
[DMA_1
].irq
= IRQ_DMA1
;
367 iomd_dma
[DMA_2
].base
= IOMD_IO2CURA
;
368 iomd_dma
[DMA_2
].irq
= IRQ_DMA2
;
369 iomd_dma
[DMA_3
].base
= IOMD_IO3CURA
;
370 iomd_dma
[DMA_3
].irq
= IRQ_DMA3
;
371 iomd_dma
[DMA_S0
].base
= IOMD_SD0CURA
;
372 iomd_dma
[DMA_S0
].irq
= IRQ_DMAS0
;
373 iomd_dma
[DMA_S1
].base
= IOMD_SD1CURA
;
374 iomd_dma
[DMA_S1
].irq
= IRQ_DMAS1
;
376 for (i
= DMA_0
; i
<= DMA_S1
; i
++) {
377 iomd_dma
[i
].dma
.d_ops
= &iomd_dma_ops
;
379 ret
= isa_dma_add(i
, &iomd_dma
[i
].dma
);
381 printk("IOMDDMA%u: unable to register: %d\n", i
, ret
);
384 ret
= isa_dma_add(DMA_VIRTUAL_FLOPPY
, &floppy_dma
.dma
);
386 printk("IOMDFLOPPY: unable to register: %d\n", ret
);
387 ret
= isa_dma_add(DMA_VIRTUAL_SOUND
, &sound_dma
);
389 printk("IOMDSOUND: unable to register: %d\n", ret
);
392 core_initcall(rpc_dma_init
);