]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_IO_H |
2 | #define __ASM_SH_IO_H | |
3 | ||
4 | /* | |
5 | * Convention: | |
6 | * read{b,w,l}/write{b,w,l} are for PCI, | |
7 | * while in{b,w,l}/out{b,w,l} are for ISA | |
8 | * These may (will) be platform specific function. | |
9 | * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p | |
10 | * and 'string' versions: ins{b,w,l}/outs{b,w,l} | |
11 | * For read{b,w,l} and write{b,w,l} there are also __raw versions, which | |
12 | * do not have a memory barrier after them. | |
13 | * | |
b66c1a39 | 14 | * In addition, we have |
1da177e4 LT |
15 | * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O. |
16 | * which are processor specific. | |
17 | */ | |
18 | ||
19 | /* | |
20 | * We follow the Alpha convention here: | |
21 | * __inb expands to an inline function call (which calls via the mv) | |
22 | * _inb is a real function call (note ___raw fns are _ version of __raw) | |
23 | * inb by default expands to _inb, but the machine specific code may | |
24 | * define it to __inb if it chooses. | |
25 | */ | |
1da177e4 LT |
26 | #include <asm/cache.h> |
27 | #include <asm/system.h> | |
28 | #include <asm/addrspace.h> | |
29 | #include <asm/machvec.h> | |
b66c1a39 PM |
30 | #include <asm/pgtable.h> |
31 | #include <asm-generic/iomap.h> | |
32 | ||
33 | #ifdef __KERNEL__ | |
1da177e4 LT |
34 | |
35 | /* | |
36 | * Depending on which platform we are running on, we need different | |
37 | * I/O functions. | |
38 | */ | |
b66c1a39 PM |
39 | #define __IO_PREFIX generic |
40 | #include <asm/io_generic.h> | |
e7cc9a73 | 41 | #include <asm/io_trapped.h> |
b66c1a39 PM |
42 | |
43 | #define maybebadio(port) \ | |
44 | printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ | |
45 | __FUNCTION__, __LINE__, (port), (u32)__builtin_return_address(0)) | |
1da177e4 | 46 | |
1da177e4 LT |
47 | /* |
48 | * Since boards are able to define their own set of I/O routines through | |
49 | * their respective machine vector, we always wrap through the mv. | |
50 | * | |
51 | * Also, in the event that a board hasn't provided its own definition for | |
52 | * a given routine, it will be wrapped to generic code at run-time. | |
53 | */ | |
54 | ||
b66c1a39 PM |
55 | #define __inb(p) sh_mv.mv_inb((p)) |
56 | #define __inw(p) sh_mv.mv_inw((p)) | |
57 | #define __inl(p) sh_mv.mv_inl((p)) | |
58 | #define __outb(x,p) sh_mv.mv_outb((x),(p)) | |
59 | #define __outw(x,p) sh_mv.mv_outw((x),(p)) | |
60 | #define __outl(x,p) sh_mv.mv_outl((x),(p)) | |
61 | ||
62 | #define __inb_p(p) sh_mv.mv_inb_p((p)) | |
63 | #define __inw_p(p) sh_mv.mv_inw_p((p)) | |
64 | #define __inl_p(p) sh_mv.mv_inl_p((p)) | |
65 | #define __outb_p(x,p) sh_mv.mv_outb_p((x),(p)) | |
66 | #define __outw_p(x,p) sh_mv.mv_outw_p((x),(p)) | |
67 | #define __outl_p(x,p) sh_mv.mv_outl_p((x),(p)) | |
68 | ||
69 | #define __insb(p,b,c) sh_mv.mv_insb((p), (b), (c)) | |
70 | #define __insw(p,b,c) sh_mv.mv_insw((p), (b), (c)) | |
71 | #define __insl(p,b,c) sh_mv.mv_insl((p), (b), (c)) | |
72 | #define __outsb(p,b,c) sh_mv.mv_outsb((p), (b), (c)) | |
73 | #define __outsw(p,b,c) sh_mv.mv_outsw((p), (b), (c)) | |
74 | #define __outsl(p,b,c) sh_mv.mv_outsl((p), (b), (c)) | |
75 | ||
76 | #define __readb(a) sh_mv.mv_readb((a)) | |
77 | #define __readw(a) sh_mv.mv_readw((a)) | |
78 | #define __readl(a) sh_mv.mv_readl((a)) | |
79 | #define __writeb(v,a) sh_mv.mv_writeb((v),(a)) | |
80 | #define __writew(v,a) sh_mv.mv_writew((v),(a)) | |
81 | #define __writel(v,a) sh_mv.mv_writel((v),(a)) | |
82 | ||
83 | #define inb __inb | |
84 | #define inw __inw | |
85 | #define inl __inl | |
86 | #define outb __outb | |
87 | #define outw __outw | |
88 | #define outl __outl | |
89 | ||
90 | #define inb_p __inb_p | |
91 | #define inw_p __inw_p | |
92 | #define inl_p __inl_p | |
93 | #define outb_p __outb_p | |
94 | #define outw_p __outw_p | |
95 | #define outl_p __outl_p | |
96 | ||
97 | #define insb __insb | |
98 | #define insw __insw | |
99 | #define insl __insl | |
100 | #define outsb __outsb | |
101 | #define outsw __outsw | |
102 | #define outsl __outsl | |
103 | ||
104 | #define __raw_readb(a) __readb((void __iomem *)(a)) | |
105 | #define __raw_readw(a) __readw((void __iomem *)(a)) | |
106 | #define __raw_readl(a) __readl((void __iomem *)(a)) | |
107 | #define __raw_writeb(v, a) __writeb(v, (void __iomem *)(a)) | |
108 | #define __raw_writew(v, a) __writew(v, (void __iomem *)(a)) | |
109 | #define __raw_writel(v, a) __writel(v, (void __iomem *)(a)) | |
1da177e4 | 110 | |
05ae9158 PM |
111 | void __raw_writesl(unsigned long addr, const void *data, int longlen); |
112 | void __raw_readsl(unsigned long addr, void *data, int longlen); | |
113 | ||
1da177e4 LT |
114 | /* |
115 | * The platform header files may define some of these macros to use | |
116 | * the inlined versions where appropriate. These macros may also be | |
117 | * redefined by userlevel programs. | |
118 | */ | |
b66c1a39 | 119 | #ifdef __readb |
66c5227e | 120 | # define readb(a) ({ unsigned int r_ = __raw_readb(a); mb(); r_; }) |
1da177e4 LT |
121 | #endif |
122 | #ifdef __raw_readw | |
66c5227e | 123 | # define readw(a) ({ unsigned int r_ = __raw_readw(a); mb(); r_; }) |
1da177e4 LT |
124 | #endif |
125 | #ifdef __raw_readl | |
66c5227e | 126 | # define readl(a) ({ unsigned int r_ = __raw_readl(a); mb(); r_; }) |
1da177e4 LT |
127 | #endif |
128 | ||
129 | #ifdef __raw_writeb | |
b66c1a39 | 130 | # define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) |
1da177e4 LT |
131 | #endif |
132 | #ifdef __raw_writew | |
b66c1a39 | 133 | # define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) |
1da177e4 LT |
134 | #endif |
135 | #ifdef __raw_writel | |
b66c1a39 | 136 | # define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) |
1da177e4 LT |
137 | #endif |
138 | ||
da6b003a MD |
139 | #define __BUILD_MEMORY_STRING(bwlq, type) \ |
140 | \ | |
141 | static inline void writes##bwlq(volatile void __iomem *mem, \ | |
142 | const void *addr, unsigned int count) \ | |
143 | { \ | |
144 | const volatile type *__addr = addr; \ | |
145 | \ | |
146 | while (count--) { \ | |
147 | __raw_write##bwlq(*__addr, mem); \ | |
148 | __addr++; \ | |
149 | } \ | |
150 | } \ | |
151 | \ | |
152 | static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ | |
153 | unsigned int count) \ | |
154 | { \ | |
155 | volatile type *__addr = addr; \ | |
156 | \ | |
157 | while (count--) { \ | |
158 | *__addr = __raw_read##bwlq(mem); \ | |
159 | __addr++; \ | |
160 | } \ | |
161 | } | |
162 | ||
163 | __BUILD_MEMORY_STRING(b, u8) | |
164 | __BUILD_MEMORY_STRING(w, u16) | |
05ae9158 PM |
165 | #define writesl __raw_writesl |
166 | #define readsl __raw_readsl | |
167 | ||
1da177e4 LT |
168 | #define readb_relaxed(a) readb(a) |
169 | #define readw_relaxed(a) readw(a) | |
170 | #define readl_relaxed(a) readl(a) | |
171 | ||
b66c1a39 PM |
172 | /* Simple MMIO */ |
173 | #define ioread8(a) readb(a) | |
174 | #define ioread16(a) readw(a) | |
175 | #define ioread16be(a) be16_to_cpu(__raw_readw((a))) | |
176 | #define ioread32(a) readl(a) | |
177 | #define ioread32be(a) be32_to_cpu(__raw_readl((a))) | |
1da177e4 | 178 | |
b66c1a39 PM |
179 | #define iowrite8(v,a) writeb((v),(a)) |
180 | #define iowrite16(v,a) writew((v),(a)) | |
181 | #define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a)) | |
182 | #define iowrite32(v,a) writel((v),(a)) | |
183 | #define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a)) | |
184 | ||
c0ca41a2 MD |
185 | #define ioread8_rep(a, d, c) readsb((a), (d), (c)) |
186 | #define ioread16_rep(a, d, c) readsw((a), (d), (c)) | |
187 | #define ioread32_rep(a, d, c) readsl((a), (d), (c)) | |
b66c1a39 | 188 | |
c0ca41a2 MD |
189 | #define iowrite8_rep(a, s, c) writesb((a), (s), (c)) |
190 | #define iowrite16_rep(a, s, c) writesw((a), (s), (c)) | |
191 | #define iowrite32_rep(a, s, c) writesl((a), (s), (c)) | |
b66c1a39 PM |
192 | |
193 | #define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */ | |
1da177e4 | 194 | |
0f2c15ce PM |
195 | #define IO_SPACE_LIMIT 0xffffffff |
196 | ||
1da177e4 LT |
197 | /* |
198 | * This function provides a method for the generic case where a board-specific | |
b66c1a39 | 199 | * ioport_map simply needs to return the port + some arbitrary port base. |
1da177e4 LT |
200 | * |
201 | * We use this at board setup time to implicitly set the port base, and | |
b66c1a39 | 202 | * as a result, we can use the generic ioport_map. |
1da177e4 LT |
203 | */ |
204 | static inline void __set_io_port_base(unsigned long pbase) | |
205 | { | |
206 | extern unsigned long generic_io_base; | |
207 | ||
208 | generic_io_base = pbase; | |
209 | } | |
210 | ||
e7cc9a73 MD |
211 | #define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n)) |
212 | ||
1da177e4 | 213 | /* We really want to try and get these to memcpy etc */ |
b66c1a39 PM |
214 | extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long); |
215 | extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long); | |
216 | extern void memset_io(volatile void __iomem *, int, unsigned long); | |
1da177e4 LT |
217 | |
218 | /* SuperH on-chip I/O functions */ | |
b66c1a39 | 219 | static inline unsigned char ctrl_inb(unsigned long addr) |
1da177e4 LT |
220 | { |
221 | return *(volatile unsigned char*)addr; | |
222 | } | |
223 | ||
b66c1a39 | 224 | static inline unsigned short ctrl_inw(unsigned long addr) |
1da177e4 LT |
225 | { |
226 | return *(volatile unsigned short*)addr; | |
227 | } | |
228 | ||
b66c1a39 | 229 | static inline unsigned int ctrl_inl(unsigned long addr) |
1da177e4 LT |
230 | { |
231 | return *(volatile unsigned long*)addr; | |
232 | } | |
233 | ||
0f2c15ce PM |
234 | static inline unsigned long long ctrl_inq(unsigned long addr) |
235 | { | |
236 | return *(volatile unsigned long long*)addr; | |
237 | } | |
238 | ||
b66c1a39 | 239 | static inline void ctrl_outb(unsigned char b, unsigned long addr) |
1da177e4 LT |
240 | { |
241 | *(volatile unsigned char*)addr = b; | |
242 | } | |
243 | ||
b66c1a39 | 244 | static inline void ctrl_outw(unsigned short b, unsigned long addr) |
1da177e4 LT |
245 | { |
246 | *(volatile unsigned short*)addr = b; | |
247 | } | |
248 | ||
b66c1a39 | 249 | static inline void ctrl_outl(unsigned int b, unsigned long addr) |
1da177e4 LT |
250 | { |
251 | *(volatile unsigned long*)addr = b; | |
252 | } | |
253 | ||
0f2c15ce PM |
254 | static inline void ctrl_outq(unsigned long long b, unsigned long addr) |
255 | { | |
256 | *(volatile unsigned long long*)addr = b; | |
257 | } | |
258 | ||
959f85f8 PM |
259 | static inline void ctrl_delay(void) |
260 | { | |
da06b8d0 | 261 | #ifdef P2SEG |
959f85f8 | 262 | ctrl_inw(P2SEG); |
da06b8d0 | 263 | #endif |
959f85f8 PM |
264 | } |
265 | ||
ac490a48 PM |
266 | /* Quad-word real-mode I/O, don't ask.. */ |
267 | unsigned long long peek_real_address_q(unsigned long long addr); | |
268 | unsigned long long poke_real_address_q(unsigned long long addr, | |
269 | unsigned long long val); | |
270 | ||
da06b8d0 PM |
271 | #if !defined(CONFIG_MMU) |
272 | #define virt_to_phys(address) ((unsigned long)(address)) | |
273 | #define phys_to_virt(address) ((void *)(address)) | |
d02b08f6 | 274 | #else |
da06b8d0 PM |
275 | #define virt_to_phys(address) (__pa(address)) |
276 | #define phys_to_virt(address) (__va(address)) | |
a2d1a5fa | 277 | #endif |
1da177e4 | 278 | |
1da177e4 | 279 | /* |
da06b8d0 PM |
280 | * On 32-bit SH, we traditionally have the whole physical address space |
281 | * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do | |
282 | * not need to do anything but place the address in the proper segment. | |
283 | * This is true for P1 and P2 addresses, as well as some P3 ones. | |
284 | * However, most of the P3 addresses and newer cores using extended | |
285 | * addressing need to map through page tables, so the ioremap() | |
286 | * implementation becomes a bit more complicated. | |
1da177e4 | 287 | * |
da06b8d0 | 288 | * See arch/sh/mm/ioremap.c for additional notes on this. |
1da177e4 LT |
289 | * |
290 | * We cheat a bit and always return uncachable areas until we've fixed | |
b66c1a39 | 291 | * the drivers to handle caching properly. |
da06b8d0 PM |
292 | * |
293 | * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply | |
294 | * doesn't exist, so everything must go through page tables. | |
1da177e4 | 295 | */ |
b66c1a39 PM |
296 | #ifdef CONFIG_MMU |
297 | void __iomem *__ioremap(unsigned long offset, unsigned long size, | |
298 | unsigned long flags); | |
299 | void __iounmap(void __iomem *addr); | |
ccd80587 PM |
300 | |
301 | /* arch/sh/mm/ioremap_64.c */ | |
302 | unsigned long onchip_remap(unsigned long addr, unsigned long size, | |
303 | const char *name); | |
304 | extern void onchip_unmap(unsigned long vaddr); | |
b66c1a39 PM |
305 | #else |
306 | #define __ioremap(offset, size, flags) ((void __iomem *)(offset)) | |
307 | #define __iounmap(addr) do { } while (0) | |
ccd80587 PM |
308 | #define onchip_remap(addr, size, name) (addr) |
309 | #define onchip_unmap(addr) do { } while (0) | |
b66c1a39 PM |
310 | #endif /* CONFIG_MMU */ |
311 | ||
312 | static inline void __iomem * | |
313 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |
1da177e4 | 314 | { |
da06b8d0 | 315 | #ifdef CONFIG_SUPERH32 |
b66c1a39 | 316 | unsigned long last_addr = offset + size - 1; |
e7cc9a73 MD |
317 | #endif |
318 | void __iomem *ret; | |
b66c1a39 | 319 | |
e7cc9a73 MD |
320 | ret = __ioremap_trapped(offset, size); |
321 | if (ret) | |
322 | return ret; | |
323 | ||
324 | #ifdef CONFIG_SUPERH32 | |
b66c1a39 PM |
325 | /* |
326 | * For P1 and P2 space this is trivial, as everything is already | |
327 | * mapped. Uncached access for P1 addresses are done through P2. | |
328 | * In the P3 case or for addresses outside of the 29-bit space, | |
329 | * mapping must be done by the PMB or by using page tables. | |
330 | */ | |
331 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { | |
332 | if (unlikely(flags & _PAGE_CACHABLE)) | |
333 | return (void __iomem *)P1SEGADDR(offset); | |
334 | ||
335 | return (void __iomem *)P2SEGADDR(offset); | |
336 | } | |
da06b8d0 | 337 | #endif |
b66c1a39 PM |
338 | |
339 | return __ioremap(offset, size, flags); | |
1da177e4 LT |
340 | } |
341 | ||
b66c1a39 PM |
342 | #define ioremap(offset, size) \ |
343 | __ioremap_mode((offset), (size), 0) | |
344 | #define ioremap_nocache(offset, size) \ | |
345 | __ioremap_mode((offset), (size), 0) | |
346 | #define ioremap_cache(offset, size) \ | |
347 | __ioremap_mode((offset), (size), _PAGE_CACHABLE) | |
348 | #define p3_ioremap(offset, size, flags) \ | |
349 | __ioremap((offset), (size), (flags)) | |
350 | #define iounmap(addr) \ | |
351 | __iounmap((addr)) | |
352 | ||
1da177e4 LT |
353 | /* |
354 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
355 | * access | |
356 | */ | |
357 | #define xlate_dev_mem_ptr(p) __va(p) | |
358 | ||
359 | /* | |
360 | * Convert a virtual cached pointer to an uncached pointer | |
361 | */ | |
362 | #define xlate_dev_kmem_ptr(p) p | |
363 | ||
364 | #endif /* __KERNEL__ */ | |
365 | ||
366 | #endif /* __ASM_SH_IO_H */ |