]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - include/asm-arm/arch-ixp4xx/io.h
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[mirror_ubuntu-hirsute-kernel.git] / include / asm-arm / arch-ixp4xx / io.h
1 /*
2 * linux/include/asm-arm/arch-ixp4xx/io.h
3 *
4 * Author: Deepak Saxena <dsaxena@plexity.net>
5 *
6 * Copyright (C) 2002-2005 MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #ifndef __ASM_ARM_ARCH_IO_H
14 #define __ASM_ARM_ARCH_IO_H
15
16 #include <asm/hardware.h>
17
18 #define IO_SPACE_LIMIT 0xffff0000
19
20 #define BIT(x) ((1)<<(x))
21
22
23 extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
24 extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
25
26
27 /*
28 * IXP4xx provides two methods of accessing PCI memory space:
29 *
30 * 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
31 * To access PCI via this space, we simply ioremap() the BAR
32 * into the kernel and we can use the standard read[bwl]/write[bwl]
33 * macros. This is the preffered method due to speed but it
34 * limits the system to just 64MB of PCI memory. This can be
35 * problamatic if using video cards and other memory-heavy
36 * targets.
37 *
38 * 2) If > 64MB of memory space is required, the IXP4xx can be configured
39 * to use indirect registers to access PCI (as we do below for I/O
40 * transactions). This allows for up to 128MB (0x48000000 to 0x4fffffff)
41 * of memory on the bus. The disadvantage of this is that every
42 * PCI access requires three local register accesses plus a spinlock,
43 * but in some cases the performance hit is acceptable. In addition,
44 * you cannot mmap() PCI devices in this case.
45 *
46 */
47 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
48
49 #define __mem_pci(a) (a)
50
51 #else
52
53 #include <linux/mm.h>
54
55 /*
56 * In the case of using indirect PCI, we simply return the actual PCI
57 * address and our read/write implementation use that to drive the
58 * access registers. If something outside of PCI is ioremap'd, we
59 * fallback to the default.
60 */
61 static inline void __iomem *
62 __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype)
63 {
64 if((addr < PCIBIOS_MIN_MEM) || (addr > 0x4fffffff))
65 return __arm_ioremap(addr, size, mtype);
66
67 return (void *)addr;
68 }
69
70 static inline void
71 __ixp4xx_iounmap(void __iomem *addr)
72 {
73 if ((u32)addr >= VMALLOC_START)
74 __iounmap(addr);
75 }
76
77 #define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
78 #define __arch_iounmap(a) __ixp4xx_iounmap(a)
79
80 #define writeb(v, p) __ixp4xx_writeb(v, p)
81 #define writew(v, p) __ixp4xx_writew(v, p)
82 #define writel(v, p) __ixp4xx_writel(v, p)
83
84 #define writesb(p, v, l) __ixp4xx_writesb(p, v, l)
85 #define writesw(p, v, l) __ixp4xx_writesw(p, v, l)
86 #define writesl(p, v, l) __ixp4xx_writesl(p, v, l)
87
88 #define readb(p) __ixp4xx_readb(p)
89 #define readw(p) __ixp4xx_readw(p)
90 #define readl(p) __ixp4xx_readl(p)
91
92 #define readsb(p, v, l) __ixp4xx_readsb(p, v, l)
93 #define readsw(p, v, l) __ixp4xx_readsw(p, v, l)
94 #define readsl(p, v, l) __ixp4xx_readsl(p, v, l)
95
96 static inline void
97 __ixp4xx_writeb(u8 value, volatile void __iomem *p)
98 {
99 u32 addr = (u32)p;
100 u32 n, byte_enables, data;
101
102 if (addr >= VMALLOC_START) {
103 __raw_writeb(value, addr);
104 return;
105 }
106
107 n = addr % 4;
108 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
109 data = value << (8*n);
110 ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
111 }
112
113 static inline void
114 __ixp4xx_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count)
115 {
116 while (count--)
117 writeb(*vaddr++, bus_addr);
118 }
119
120 static inline void
121 __ixp4xx_writew(u16 value, volatile void __iomem *p)
122 {
123 u32 addr = (u32)p;
124 u32 n, byte_enables, data;
125
126 if (addr >= VMALLOC_START) {
127 __raw_writew(value, addr);
128 return;
129 }
130
131 n = addr % 4;
132 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
133 data = value << (8*n);
134 ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
135 }
136
137 static inline void
138 __ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count)
139 {
140 while (count--)
141 writew(*vaddr++, bus_addr);
142 }
143
144 static inline void
145 __ixp4xx_writel(u32 value, volatile void __iomem *p)
146 {
147 u32 addr = (u32)p;
148 if (addr >= VMALLOC_START) {
149 __raw_writel(value, addr);
150 return;
151 }
152
153 ixp4xx_pci_write(addr, NP_CMD_MEMWRITE, value);
154 }
155
156 static inline void
157 __ixp4xx_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count)
158 {
159 while (count--)
160 writel(*vaddr++, bus_addr);
161 }
162
163 static inline unsigned char
164 __ixp4xx_readb(const volatile void __iomem *p)
165 {
166 u32 addr = (u32)p;
167 u32 n, byte_enables, data;
168
169 if (addr >= VMALLOC_START)
170 return __raw_readb(addr);
171
172 n = addr % 4;
173 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
174 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
175 return 0xff;
176
177 return data >> (8*n);
178 }
179
180 static inline void
181 __ixp4xx_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count)
182 {
183 while (count--)
184 *vaddr++ = readb(bus_addr);
185 }
186
187 static inline unsigned short
188 __ixp4xx_readw(const volatile void __iomem *p)
189 {
190 u32 addr = (u32)p;
191 u32 n, byte_enables, data;
192
193 if (addr >= VMALLOC_START)
194 return __raw_readw(addr);
195
196 n = addr % 4;
197 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
198 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
199 return 0xffff;
200
201 return data>>(8*n);
202 }
203
204 static inline void
205 __ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count)
206 {
207 while (count--)
208 *vaddr++ = readw(bus_addr);
209 }
210
211 static inline unsigned long
212 __ixp4xx_readl(const volatile void __iomem *p)
213 {
214 u32 addr = (u32)p;
215 u32 data;
216
217 if (addr >= VMALLOC_START)
218 return __raw_readl(addr);
219
220 if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data))
221 return 0xffffffff;
222
223 return data;
224 }
225
226 static inline void
227 __ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
228 {
229 while (count--)
230 *vaddr++ = readl(bus_addr);
231 }
232
233
234 /*
235 * We can use the built-in functions b/c they end up calling writeb/readb
236 */
237 #define memset_io(c,v,l) _memset_io((c),(v),(l))
238 #define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l))
239 #define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l))
240
241 #endif
242
243 #ifndef CONFIG_PCI
244
245 #define __io(v) v
246
247 #else
248
249 /*
250 * IXP4xx does not have a transparent cpu -> PCI I/O translation
251 * window. Instead, it has a set of registers that must be tweaked
252 * with the proper byte lanes, command types, and address for the
253 * transaction. This means that we need to override the default
254 * I/O functions.
255 */
256 #define outb(p, v) __ixp4xx_outb(p, v)
257 #define outw(p, v) __ixp4xx_outw(p, v)
258 #define outl(p, v) __ixp4xx_outl(p, v)
259
260 #define outsb(p, v, l) __ixp4xx_outsb(p, v, l)
261 #define outsw(p, v, l) __ixp4xx_outsw(p, v, l)
262 #define outsl(p, v, l) __ixp4xx_outsl(p, v, l)
263
264 #define inb(p) __ixp4xx_inb(p)
265 #define inw(p) __ixp4xx_inw(p)
266 #define inl(p) __ixp4xx_inl(p)
267
268 #define insb(p, v, l) __ixp4xx_insb(p, v, l)
269 #define insw(p, v, l) __ixp4xx_insw(p, v, l)
270 #define insl(p, v, l) __ixp4xx_insl(p, v, l)
271
272
273 static inline void
274 __ixp4xx_outb(u8 value, u32 addr)
275 {
276 u32 n, byte_enables, data;
277 n = addr % 4;
278 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
279 data = value << (8*n);
280 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
281 }
282
283 static inline void
284 __ixp4xx_outsb(u32 io_addr, const u8 *vaddr, u32 count)
285 {
286 while (count--)
287 outb(*vaddr++, io_addr);
288 }
289
290 static inline void
291 __ixp4xx_outw(u16 value, u32 addr)
292 {
293 u32 n, byte_enables, data;
294 n = addr % 4;
295 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
296 data = value << (8*n);
297 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
298 }
299
300 static inline void
301 __ixp4xx_outsw(u32 io_addr, const u16 *vaddr, u32 count)
302 {
303 while (count--)
304 outw(cpu_to_le16(*vaddr++), io_addr);
305 }
306
307 static inline void
308 __ixp4xx_outl(u32 value, u32 addr)
309 {
310 ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
311 }
312
313 static inline void
314 __ixp4xx_outsl(u32 io_addr, const u32 *vaddr, u32 count)
315 {
316 while (count--)
317 outl(*vaddr++, io_addr);
318 }
319
320 static inline u8
321 __ixp4xx_inb(u32 addr)
322 {
323 u32 n, byte_enables, data;
324 n = addr % 4;
325 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
326 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
327 return 0xff;
328
329 return data >> (8*n);
330 }
331
332 static inline void
333 __ixp4xx_insb(u32 io_addr, u8 *vaddr, u32 count)
334 {
335 while (count--)
336 *vaddr++ = inb(io_addr);
337 }
338
339 static inline u16
340 __ixp4xx_inw(u32 addr)
341 {
342 u32 n, byte_enables, data;
343 n = addr % 4;
344 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
345 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
346 return 0xffff;
347
348 return data>>(8*n);
349 }
350
351 static inline void
352 __ixp4xx_insw(u32 io_addr, u16 *vaddr, u32 count)
353 {
354 while (count--)
355 *vaddr++ = le16_to_cpu(inw(io_addr));
356 }
357
358 static inline u32
359 __ixp4xx_inl(u32 addr)
360 {
361 u32 data;
362 if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
363 return 0xffffffff;
364
365 return data;
366 }
367
368 static inline void
369 __ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
370 {
371 while (count--)
372 *vaddr++ = inl(io_addr);
373 }
374
375 #define PIO_OFFSET 0x10000UL
376 #define PIO_MASK 0x0ffffUL
377
378 #define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \
379 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
380 static inline unsigned int
381 __ixp4xx_ioread8(const void __iomem *addr)
382 {
383 unsigned long port = (unsigned long __force)addr;
384 if (__is_io_address(port))
385 return (unsigned int)__ixp4xx_inb(port & PIO_MASK);
386 else
387 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
388 return (unsigned int)__raw_readb(port);
389 #else
390 return (unsigned int)__ixp4xx_readb(addr);
391 #endif
392 }
393
394 static inline void
395 __ixp4xx_ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
396 {
397 unsigned long port = (unsigned long __force)addr;
398 if (__is_io_address(port))
399 __ixp4xx_insb(port & PIO_MASK, vaddr, count);
400 else
401 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
402 __raw_readsb(addr, vaddr, count);
403 #else
404 __ixp4xx_readsb(addr, vaddr, count);
405 #endif
406 }
407
408 static inline unsigned int
409 __ixp4xx_ioread16(const void __iomem *addr)
410 {
411 unsigned long port = (unsigned long __force)addr;
412 if (__is_io_address(port))
413 return (unsigned int)__ixp4xx_inw(port & PIO_MASK);
414 else
415 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
416 return le16_to_cpu(__raw_readw((u32)port));
417 #else
418 return (unsigned int)__ixp4xx_readw(addr);
419 #endif
420 }
421
422 static inline void
423 __ixp4xx_ioread16_rep(const void __iomem *addr, void *vaddr, u32 count)
424 {
425 unsigned long port = (unsigned long __force)addr;
426 if (__is_io_address(port))
427 __ixp4xx_insw(port & PIO_MASK, vaddr, count);
428 else
429 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
430 __raw_readsw(addr, vaddr, count);
431 #else
432 __ixp4xx_readsw(addr, vaddr, count);
433 #endif
434 }
435
436 static inline unsigned int
437 __ixp4xx_ioread32(const void __iomem *addr)
438 {
439 unsigned long port = (unsigned long __force)addr;
440 if (__is_io_address(port))
441 return (unsigned int)__ixp4xx_inl(port & PIO_MASK);
442 else {
443 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
444 return le32_to_cpu(__raw_readl((u32)port));
445 #else
446 return (unsigned int)__ixp4xx_readl(addr);
447 #endif
448 }
449 }
450
451 static inline void
452 __ixp4xx_ioread32_rep(const void __iomem *addr, void *vaddr, u32 count)
453 {
454 unsigned long port = (unsigned long __force)addr;
455 if (__is_io_address(port))
456 __ixp4xx_insl(port & PIO_MASK, vaddr, count);
457 else
458 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
459 __raw_readsl(addr, vaddr, count);
460 #else
461 __ixp4xx_readsl(addr, vaddr, count);
462 #endif
463 }
464
465 static inline void
466 __ixp4xx_iowrite8(u8 value, void __iomem *addr)
467 {
468 unsigned long port = (unsigned long __force)addr;
469 if (__is_io_address(port))
470 __ixp4xx_outb(value, port & PIO_MASK);
471 else
472 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
473 __raw_writeb(value, port);
474 #else
475 __ixp4xx_writeb(value, addr);
476 #endif
477 }
478
479 static inline void
480 __ixp4xx_iowrite8_rep(void __iomem *addr, const void *vaddr, u32 count)
481 {
482 unsigned long port = (unsigned long __force)addr;
483 if (__is_io_address(port))
484 __ixp4xx_outsb(port & PIO_MASK, vaddr, count);
485 else
486 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
487 __raw_writesb(addr, vaddr, count);
488 #else
489 __ixp4xx_writesb(addr, vaddr, count);
490 #endif
491 }
492
493 static inline void
494 __ixp4xx_iowrite16(u16 value, void __iomem *addr)
495 {
496 unsigned long port = (unsigned long __force)addr;
497 if (__is_io_address(port))
498 __ixp4xx_outw(value, port & PIO_MASK);
499 else
500 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
501 __raw_writew(cpu_to_le16(value), addr);
502 #else
503 __ixp4xx_writew(value, addr);
504 #endif
505 }
506
507 static inline void
508 __ixp4xx_iowrite16_rep(void __iomem *addr, const void *vaddr, u32 count)
509 {
510 unsigned long port = (unsigned long __force)addr;
511 if (__is_io_address(port))
512 __ixp4xx_outsw(port & PIO_MASK, vaddr, count);
513 else
514 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
515 __raw_writesw(addr, vaddr, count);
516 #else
517 __ixp4xx_writesw(addr, vaddr, count);
518 #endif
519 }
520
521 static inline void
522 __ixp4xx_iowrite32(u32 value, void __iomem *addr)
523 {
524 unsigned long port = (unsigned long __force)addr;
525 if (__is_io_address(port))
526 __ixp4xx_outl(value, port & PIO_MASK);
527 else
528 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
529 __raw_writel(cpu_to_le32(value), port);
530 #else
531 __ixp4xx_writel(value, addr);
532 #endif
533 }
534
535 static inline void
536 __ixp4xx_iowrite32_rep(void __iomem *addr, const void *vaddr, u32 count)
537 {
538 unsigned long port = (unsigned long __force)addr;
539 if (__is_io_address(port))
540 __ixp4xx_outsl(port & PIO_MASK, vaddr, count);
541 else
542 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
543 __raw_writesl(addr, vaddr, count);
544 #else
545 __ixp4xx_writesl(addr, vaddr, count);
546 #endif
547 }
548
549 #define ioread8(p) __ixp4xx_ioread8(p)
550 #define ioread16(p) __ixp4xx_ioread16(p)
551 #define ioread32(p) __ixp4xx_ioread32(p)
552
553 #define ioread8_rep(p, v, c) __ixp4xx_ioread8_rep(p, v, c)
554 #define ioread16_rep(p, v, c) __ixp4xx_ioread16_rep(p, v, c)
555 #define ioread32_rep(p, v, c) __ixp4xx_ioread32_rep(p, v, c)
556
557 #define iowrite8(v,p) __ixp4xx_iowrite8(v,p)
558 #define iowrite16(v,p) __ixp4xx_iowrite16(v,p)
559 #define iowrite32(v,p) __ixp4xx_iowrite32(v,p)
560
561 #define iowrite8_rep(p, v, c) __ixp4xx_iowrite8_rep(p, v, c)
562 #define iowrite16_rep(p, v, c) __ixp4xx_iowrite16_rep(p, v, c)
563 #define iowrite32_rep(p, v, c) __ixp4xx_iowrite32_rep(p, v, c)
564
565 #define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET))
566 #define ioport_unmap(addr)
567 #endif // !CONFIG_PCI
568
569 #endif // __ASM_ARM_ARCH_IO_H
570