]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - include/asm-arm/arch-ixp4xx/io.h
Pull cpuidle into release branch
[mirror_ubuntu-hirsute-kernel.git] / include / asm-arm / arch-ixp4xx / io.h
1 /*
2 * linux/include/asm-arm/arch-ixp4xx/io.h
3 *
4 * Author: Deepak Saxena <dsaxena@plexity.net>
5 *
6 * Copyright (C) 2002-2005 MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #ifndef __ASM_ARM_ARCH_IO_H
14 #define __ASM_ARM_ARCH_IO_H
15
16 #include <asm/hardware.h>
17
18 #define IO_SPACE_LIMIT 0xffff0000
19
20 extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
21 extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
22
23
24 /*
25 * IXP4xx provides two methods of accessing PCI memory space:
26 *
27 * 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
28 * To access PCI via this space, we simply ioremap() the BAR
29 * into the kernel and we can use the standard read[bwl]/write[bwl]
30 * macros. This is the preffered method due to speed but it
31 * limits the system to just 64MB of PCI memory. This can be
32 * problamatic if using video cards and other memory-heavy
33 * targets.
34 *
35 * 2) If > 64MB of memory space is required, the IXP4xx can be configured
36 * to use indirect registers to access PCI (as we do below for I/O
37 * transactions). This allows for up to 128MB (0x48000000 to 0x4fffffff)
38 * of memory on the bus. The disadvantage of this is that every
39 * PCI access requires three local register accesses plus a spinlock,
40 * but in some cases the performance hit is acceptable. In addition,
41 * you cannot mmap() PCI devices in this case.
42 *
43 */
44 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
45
46 #define __mem_pci(a) (a)
47
48 #else
49
50 #include <linux/mm.h>
51
52 /*
53 * In the case of using indirect PCI, we simply return the actual PCI
54 * address and our read/write implementation use that to drive the
55 * access registers. If something outside of PCI is ioremap'd, we
56 * fallback to the default.
57 */
58 static inline void __iomem *
59 __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype)
60 {
61 if((addr < PCIBIOS_MIN_MEM) || (addr > 0x4fffffff))
62 return __arm_ioremap(addr, size, mtype);
63
64 return (void *)addr;
65 }
66
67 static inline void
68 __ixp4xx_iounmap(void __iomem *addr)
69 {
70 if ((u32)addr >= VMALLOC_START)
71 __iounmap(addr);
72 }
73
74 #define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
75 #define __arch_iounmap(a) __ixp4xx_iounmap(a)
76
77 #define writeb(v, p) __ixp4xx_writeb(v, p)
78 #define writew(v, p) __ixp4xx_writew(v, p)
79 #define writel(v, p) __ixp4xx_writel(v, p)
80
81 #define writesb(p, v, l) __ixp4xx_writesb(p, v, l)
82 #define writesw(p, v, l) __ixp4xx_writesw(p, v, l)
83 #define writesl(p, v, l) __ixp4xx_writesl(p, v, l)
84
85 #define readb(p) __ixp4xx_readb(p)
86 #define readw(p) __ixp4xx_readw(p)
87 #define readl(p) __ixp4xx_readl(p)
88
89 #define readsb(p, v, l) __ixp4xx_readsb(p, v, l)
90 #define readsw(p, v, l) __ixp4xx_readsw(p, v, l)
91 #define readsl(p, v, l) __ixp4xx_readsl(p, v, l)
92
93 static inline void
94 __ixp4xx_writeb(u8 value, volatile void __iomem *p)
95 {
96 u32 addr = (u32)p;
97 u32 n, byte_enables, data;
98
99 if (addr >= VMALLOC_START) {
100 __raw_writeb(value, addr);
101 return;
102 }
103
104 n = addr % 4;
105 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
106 data = value << (8*n);
107 ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
108 }
109
110 static inline void
111 __ixp4xx_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count)
112 {
113 while (count--)
114 writeb(*vaddr++, bus_addr);
115 }
116
117 static inline void
118 __ixp4xx_writew(u16 value, volatile void __iomem *p)
119 {
120 u32 addr = (u32)p;
121 u32 n, byte_enables, data;
122
123 if (addr >= VMALLOC_START) {
124 __raw_writew(value, addr);
125 return;
126 }
127
128 n = addr % 4;
129 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
130 data = value << (8*n);
131 ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
132 }
133
134 static inline void
135 __ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count)
136 {
137 while (count--)
138 writew(*vaddr++, bus_addr);
139 }
140
141 static inline void
142 __ixp4xx_writel(u32 value, volatile void __iomem *p)
143 {
144 u32 addr = (u32)p;
145 if (addr >= VMALLOC_START) {
146 __raw_writel(value, addr);
147 return;
148 }
149
150 ixp4xx_pci_write(addr, NP_CMD_MEMWRITE, value);
151 }
152
153 static inline void
154 __ixp4xx_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count)
155 {
156 while (count--)
157 writel(*vaddr++, bus_addr);
158 }
159
160 static inline unsigned char
161 __ixp4xx_readb(const volatile void __iomem *p)
162 {
163 u32 addr = (u32)p;
164 u32 n, byte_enables, data;
165
166 if (addr >= VMALLOC_START)
167 return __raw_readb(addr);
168
169 n = addr % 4;
170 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
171 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
172 return 0xff;
173
174 return data >> (8*n);
175 }
176
177 static inline void
178 __ixp4xx_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count)
179 {
180 while (count--)
181 *vaddr++ = readb(bus_addr);
182 }
183
184 static inline unsigned short
185 __ixp4xx_readw(const volatile void __iomem *p)
186 {
187 u32 addr = (u32)p;
188 u32 n, byte_enables, data;
189
190 if (addr >= VMALLOC_START)
191 return __raw_readw(addr);
192
193 n = addr % 4;
194 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
195 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
196 return 0xffff;
197
198 return data>>(8*n);
199 }
200
201 static inline void
202 __ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count)
203 {
204 while (count--)
205 *vaddr++ = readw(bus_addr);
206 }
207
208 static inline unsigned long
209 __ixp4xx_readl(const volatile void __iomem *p)
210 {
211 u32 addr = (u32)p;
212 u32 data;
213
214 if (addr >= VMALLOC_START)
215 return __raw_readl(addr);
216
217 if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data))
218 return 0xffffffff;
219
220 return data;
221 }
222
223 static inline void
224 __ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
225 {
226 while (count--)
227 *vaddr++ = readl(bus_addr);
228 }
229
230
231 /*
232 * We can use the built-in functions b/c they end up calling writeb/readb
233 */
234 #define memset_io(c,v,l) _memset_io((c),(v),(l))
235 #define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l))
236 #define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l))
237
238 #endif
239
240 #ifndef CONFIG_PCI
241
242 #define __io(v) v
243
244 #else
245
246 /*
247 * IXP4xx does not have a transparent cpu -> PCI I/O translation
248 * window. Instead, it has a set of registers that must be tweaked
249 * with the proper byte lanes, command types, and address for the
250 * transaction. This means that we need to override the default
251 * I/O functions.
252 */
253 #define outb(p, v) __ixp4xx_outb(p, v)
254 #define outw(p, v) __ixp4xx_outw(p, v)
255 #define outl(p, v) __ixp4xx_outl(p, v)
256
257 #define outsb(p, v, l) __ixp4xx_outsb(p, v, l)
258 #define outsw(p, v, l) __ixp4xx_outsw(p, v, l)
259 #define outsl(p, v, l) __ixp4xx_outsl(p, v, l)
260
261 #define inb(p) __ixp4xx_inb(p)
262 #define inw(p) __ixp4xx_inw(p)
263 #define inl(p) __ixp4xx_inl(p)
264
265 #define insb(p, v, l) __ixp4xx_insb(p, v, l)
266 #define insw(p, v, l) __ixp4xx_insw(p, v, l)
267 #define insl(p, v, l) __ixp4xx_insl(p, v, l)
268
269
270 static inline void
271 __ixp4xx_outb(u8 value, u32 addr)
272 {
273 u32 n, byte_enables, data;
274 n = addr % 4;
275 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
276 data = value << (8*n);
277 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
278 }
279
280 static inline void
281 __ixp4xx_outsb(u32 io_addr, const u8 *vaddr, u32 count)
282 {
283 while (count--)
284 outb(*vaddr++, io_addr);
285 }
286
287 static inline void
288 __ixp4xx_outw(u16 value, u32 addr)
289 {
290 u32 n, byte_enables, data;
291 n = addr % 4;
292 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
293 data = value << (8*n);
294 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
295 }
296
297 static inline void
298 __ixp4xx_outsw(u32 io_addr, const u16 *vaddr, u32 count)
299 {
300 while (count--)
301 outw(cpu_to_le16(*vaddr++), io_addr);
302 }
303
304 static inline void
305 __ixp4xx_outl(u32 value, u32 addr)
306 {
307 ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
308 }
309
310 static inline void
311 __ixp4xx_outsl(u32 io_addr, const u32 *vaddr, u32 count)
312 {
313 while (count--)
314 outl(*vaddr++, io_addr);
315 }
316
317 static inline u8
318 __ixp4xx_inb(u32 addr)
319 {
320 u32 n, byte_enables, data;
321 n = addr % 4;
322 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
323 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
324 return 0xff;
325
326 return data >> (8*n);
327 }
328
329 static inline void
330 __ixp4xx_insb(u32 io_addr, u8 *vaddr, u32 count)
331 {
332 while (count--)
333 *vaddr++ = inb(io_addr);
334 }
335
336 static inline u16
337 __ixp4xx_inw(u32 addr)
338 {
339 u32 n, byte_enables, data;
340 n = addr % 4;
341 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
342 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
343 return 0xffff;
344
345 return data>>(8*n);
346 }
347
348 static inline void
349 __ixp4xx_insw(u32 io_addr, u16 *vaddr, u32 count)
350 {
351 while (count--)
352 *vaddr++ = le16_to_cpu(inw(io_addr));
353 }
354
355 static inline u32
356 __ixp4xx_inl(u32 addr)
357 {
358 u32 data;
359 if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
360 return 0xffffffff;
361
362 return data;
363 }
364
365 static inline void
366 __ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
367 {
368 while (count--)
369 *vaddr++ = inl(io_addr);
370 }
371
372 #define PIO_OFFSET 0x10000UL
373 #define PIO_MASK 0x0ffffUL
374
375 #define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \
376 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
377 static inline unsigned int
378 __ixp4xx_ioread8(const void __iomem *addr)
379 {
380 unsigned long port = (unsigned long __force)addr;
381 if (__is_io_address(port))
382 return (unsigned int)__ixp4xx_inb(port & PIO_MASK);
383 else
384 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
385 return (unsigned int)__raw_readb(port);
386 #else
387 return (unsigned int)__ixp4xx_readb(addr);
388 #endif
389 }
390
391 static inline void
392 __ixp4xx_ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
393 {
394 unsigned long port = (unsigned long __force)addr;
395 if (__is_io_address(port))
396 __ixp4xx_insb(port & PIO_MASK, vaddr, count);
397 else
398 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
399 __raw_readsb(addr, vaddr, count);
400 #else
401 __ixp4xx_readsb(addr, vaddr, count);
402 #endif
403 }
404
405 static inline unsigned int
406 __ixp4xx_ioread16(const void __iomem *addr)
407 {
408 unsigned long port = (unsigned long __force)addr;
409 if (__is_io_address(port))
410 return (unsigned int)__ixp4xx_inw(port & PIO_MASK);
411 else
412 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
413 return le16_to_cpu(__raw_readw((u32)port));
414 #else
415 return (unsigned int)__ixp4xx_readw(addr);
416 #endif
417 }
418
419 static inline void
420 __ixp4xx_ioread16_rep(const void __iomem *addr, void *vaddr, u32 count)
421 {
422 unsigned long port = (unsigned long __force)addr;
423 if (__is_io_address(port))
424 __ixp4xx_insw(port & PIO_MASK, vaddr, count);
425 else
426 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
427 __raw_readsw(addr, vaddr, count);
428 #else
429 __ixp4xx_readsw(addr, vaddr, count);
430 #endif
431 }
432
433 static inline unsigned int
434 __ixp4xx_ioread32(const void __iomem *addr)
435 {
436 unsigned long port = (unsigned long __force)addr;
437 if (__is_io_address(port))
438 return (unsigned int)__ixp4xx_inl(port & PIO_MASK);
439 else {
440 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
441 return le32_to_cpu(__raw_readl((u32)port));
442 #else
443 return (unsigned int)__ixp4xx_readl(addr);
444 #endif
445 }
446 }
447
448 static inline void
449 __ixp4xx_ioread32_rep(const void __iomem *addr, void *vaddr, u32 count)
450 {
451 unsigned long port = (unsigned long __force)addr;
452 if (__is_io_address(port))
453 __ixp4xx_insl(port & PIO_MASK, vaddr, count);
454 else
455 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
456 __raw_readsl(addr, vaddr, count);
457 #else
458 __ixp4xx_readsl(addr, vaddr, count);
459 #endif
460 }
461
462 static inline void
463 __ixp4xx_iowrite8(u8 value, void __iomem *addr)
464 {
465 unsigned long port = (unsigned long __force)addr;
466 if (__is_io_address(port))
467 __ixp4xx_outb(value, port & PIO_MASK);
468 else
469 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
470 __raw_writeb(value, port);
471 #else
472 __ixp4xx_writeb(value, addr);
473 #endif
474 }
475
476 static inline void
477 __ixp4xx_iowrite8_rep(void __iomem *addr, const void *vaddr, u32 count)
478 {
479 unsigned long port = (unsigned long __force)addr;
480 if (__is_io_address(port))
481 __ixp4xx_outsb(port & PIO_MASK, vaddr, count);
482 else
483 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
484 __raw_writesb(addr, vaddr, count);
485 #else
486 __ixp4xx_writesb(addr, vaddr, count);
487 #endif
488 }
489
490 static inline void
491 __ixp4xx_iowrite16(u16 value, void __iomem *addr)
492 {
493 unsigned long port = (unsigned long __force)addr;
494 if (__is_io_address(port))
495 __ixp4xx_outw(value, port & PIO_MASK);
496 else
497 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
498 __raw_writew(cpu_to_le16(value), addr);
499 #else
500 __ixp4xx_writew(value, addr);
501 #endif
502 }
503
504 static inline void
505 __ixp4xx_iowrite16_rep(void __iomem *addr, const void *vaddr, u32 count)
506 {
507 unsigned long port = (unsigned long __force)addr;
508 if (__is_io_address(port))
509 __ixp4xx_outsw(port & PIO_MASK, vaddr, count);
510 else
511 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
512 __raw_writesw(addr, vaddr, count);
513 #else
514 __ixp4xx_writesw(addr, vaddr, count);
515 #endif
516 }
517
518 static inline void
519 __ixp4xx_iowrite32(u32 value, void __iomem *addr)
520 {
521 unsigned long port = (unsigned long __force)addr;
522 if (__is_io_address(port))
523 __ixp4xx_outl(value, port & PIO_MASK);
524 else
525 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
526 __raw_writel(cpu_to_le32(value), port);
527 #else
528 __ixp4xx_writel(value, addr);
529 #endif
530 }
531
532 static inline void
533 __ixp4xx_iowrite32_rep(void __iomem *addr, const void *vaddr, u32 count)
534 {
535 unsigned long port = (unsigned long __force)addr;
536 if (__is_io_address(port))
537 __ixp4xx_outsl(port & PIO_MASK, vaddr, count);
538 else
539 #ifndef CONFIG_IXP4XX_INDIRECT_PCI
540 __raw_writesl(addr, vaddr, count);
541 #else
542 __ixp4xx_writesl(addr, vaddr, count);
543 #endif
544 }
545
546 #define ioread8(p) __ixp4xx_ioread8(p)
547 #define ioread16(p) __ixp4xx_ioread16(p)
548 #define ioread32(p) __ixp4xx_ioread32(p)
549
550 #define ioread8_rep(p, v, c) __ixp4xx_ioread8_rep(p, v, c)
551 #define ioread16_rep(p, v, c) __ixp4xx_ioread16_rep(p, v, c)
552 #define ioread32_rep(p, v, c) __ixp4xx_ioread32_rep(p, v, c)
553
554 #define iowrite8(v,p) __ixp4xx_iowrite8(v,p)
555 #define iowrite16(v,p) __ixp4xx_iowrite16(v,p)
556 #define iowrite32(v,p) __ixp4xx_iowrite32(v,p)
557
558 #define iowrite8_rep(p, v, c) __ixp4xx_iowrite8_rep(p, v, c)
559 #define iowrite16_rep(p, v, c) __ixp4xx_iowrite16_rep(p, v, c)
560 #define iowrite32_rep(p, v, c) __ixp4xx_iowrite32_rep(p, v, c)
561
562 #define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET))
563 #define ioport_unmap(addr)
564 #endif // !CONFIG_PCI
565
566 #endif // __ASM_ARM_ARCH_IO_H
567