]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/avr32/include/asm/io.h
Merge branches 'pm-cpu', 'pm-cpuidle' and 'pm-domains'
[mirror_ubuntu-zesty-kernel.git] / arch / avr32 / include / asm / io.h
1 #ifndef __ASM_AVR32_IO_H
2 #define __ASM_AVR32_IO_H
3
4 #include <linux/bug.h>
5 #include <linux/kernel.h>
6 #include <linux/string.h>
7 #include <linux/types.h>
8
9 #include <asm/addrspace.h>
10 #include <asm/byteorder.h>
11
12 #include <mach/io.h>
13
14 /* virt_to_phys will only work when address is in P1 or P2 */
15 static __inline__ unsigned long virt_to_phys(volatile void *address)
16 {
17 return PHYSADDR(address);
18 }
19
20 static __inline__ void * phys_to_virt(unsigned long address)
21 {
22 return (void *)P1SEGADDR(address);
23 }
24
25 #define cached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
26 #define uncached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
27 #define phys_to_cached(addr) ((void *)P1SEGADDR(addr))
28 #define phys_to_uncached(addr) ((void *)P2SEGADDR(addr))
29
30 /*
31 * Generic IO read/write. These perform native-endian accesses. Note
32 * that some architectures will want to re-define __raw_{read,write}w.
33 */
34 extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
35 extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
36 extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
37
38 extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
39 extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
40 extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
41
42 static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
43 {
44 *(volatile u8 __force *)addr = v;
45 }
46 static inline void __raw_writew(u16 v, volatile void __iomem *addr)
47 {
48 *(volatile u16 __force *)addr = v;
49 }
50 static inline void __raw_writel(u32 v, volatile void __iomem *addr)
51 {
52 *(volatile u32 __force *)addr = v;
53 }
54
55 static inline u8 __raw_readb(const volatile void __iomem *addr)
56 {
57 return *(const volatile u8 __force *)addr;
58 }
59 static inline u16 __raw_readw(const volatile void __iomem *addr)
60 {
61 return *(const volatile u16 __force *)addr;
62 }
63 static inline u32 __raw_readl(const volatile void __iomem *addr)
64 {
65 return *(const volatile u32 __force *)addr;
66 }
67
68 /* Convert I/O port address to virtual address */
69 #ifndef __io
70 # define __io(p) ((void *)phys_to_uncached(p))
71 #endif
72
73 /*
74 * Not really sure about the best way to slow down I/O on
75 * AVR32. Defining it as a no-op until we have an actual test case.
76 */
77 #define SLOW_DOWN_IO do { } while (0)
78
79 #define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \
80 static inline void \
81 pfx##write##bwl(type val, volatile void __iomem *addr) \
82 { \
83 volatile type *__addr; \
84 type __val; \
85 \
86 __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
87 __val = pfx##ioswab##bwl(__addr, val); \
88 \
89 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
90 \
91 *__addr = __val; \
92 } \
93 \
94 static inline type pfx##read##bwl(const volatile void __iomem *addr) \
95 { \
96 volatile type *__addr; \
97 type __val; \
98 \
99 __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
100 \
101 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
102 \
103 __val = *__addr; \
104 return pfx##ioswab##bwl(__addr, __val); \
105 }
106
107 #define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \
108 static inline void pfx##out##bwl##p(type val, unsigned long port) \
109 { \
110 volatile type *__addr; \
111 type __val; \
112 \
113 __addr = __io(__swizzle_addr_##bwl(port)); \
114 __val = pfx##ioswab##bwl(__addr, val); \
115 \
116 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
117 \
118 *__addr = __val; \
119 slow; \
120 } \
121 \
122 static inline type pfx##in##bwl##p(unsigned long port) \
123 { \
124 volatile type *__addr; \
125 type __val; \
126 \
127 __addr = __io(__swizzle_addr_##bwl(port)); \
128 \
129 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
130 \
131 __val = *__addr; \
132 slow; \
133 \
134 return pfx##ioswab##bwl(__addr, __val); \
135 }
136
137 #define __BUILD_MEMORY_PFX(bus, bwl, type) \
138 __BUILD_MEMORY_SINGLE(bus, bwl, type)
139
140 #define BUILDIO_MEM(bwl, type) \
141 __BUILD_MEMORY_PFX(, bwl, type) \
142 __BUILD_MEMORY_PFX(__mem_, bwl, type)
143
144 #define __BUILD_IOPORT_PFX(bus, bwl, type) \
145 __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \
146 __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
147
148 #define BUILDIO_IOPORT(bwl, type) \
149 __BUILD_IOPORT_PFX(, bwl, type) \
150 __BUILD_IOPORT_PFX(__mem_, bwl, type)
151
152 BUILDIO_MEM(b, u8)
153 BUILDIO_MEM(w, u16)
154 BUILDIO_MEM(l, u32)
155
156 BUILDIO_IOPORT(b, u8)
157 BUILDIO_IOPORT(w, u16)
158 BUILDIO_IOPORT(l, u32)
159
160 #define readb_relaxed readb
161 #define readw_relaxed readw
162 #define readl_relaxed readl
163
164 #define readb_be __raw_readb
165 #define readw_be __raw_readw
166 #define readl_be __raw_readl
167
168 #define writeb_relaxed writeb
169 #define writew_relaxed writew
170 #define writel_relaxed writel
171
172 #define writeb_be __raw_writeb
173 #define writew_be __raw_writew
174 #define writel_be __raw_writel
175
176 #define __BUILD_MEMORY_STRING(bwl, type) \
177 static inline void writes##bwl(volatile void __iomem *addr, \
178 const void *data, unsigned int count) \
179 { \
180 const type *__data = data; \
181 \
182 while (count--) \
183 __mem_write##bwl(*__data++, addr); \
184 } \
185 \
186 static inline void reads##bwl(const volatile void __iomem *addr, \
187 void *data, unsigned int count) \
188 { \
189 type *__data = data; \
190 \
191 while (count--) \
192 *__data++ = __mem_read##bwl(addr); \
193 }
194
195 #define __BUILD_IOPORT_STRING(bwl, type) \
196 static inline void outs##bwl(unsigned long port, const void *data, \
197 unsigned int count) \
198 { \
199 const type *__data = data; \
200 \
201 while (count--) \
202 __mem_out##bwl(*__data++, port); \
203 } \
204 \
205 static inline void ins##bwl(unsigned long port, void *data, \
206 unsigned int count) \
207 { \
208 type *__data = data; \
209 \
210 while (count--) \
211 *__data++ = __mem_in##bwl(port); \
212 }
213
214 #define BUILDSTRING(bwl, type) \
215 __BUILD_MEMORY_STRING(bwl, type) \
216 __BUILD_IOPORT_STRING(bwl, type)
217
218 BUILDSTRING(b, u8)
219 BUILDSTRING(w, u16)
220 BUILDSTRING(l, u32)
221
222 /*
223 * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
224 */
225 #ifndef ioread8
226
227 #define ioread8(p) ((unsigned int)readb(p))
228
229 #define ioread16(p) ((unsigned int)readw(p))
230 #define ioread16be(p) ((unsigned int)__raw_readw(p))
231
232 #define ioread32(p) ((unsigned int)readl(p))
233 #define ioread32be(p) ((unsigned int)__raw_readl(p))
234
235 #define iowrite8(v,p) writeb(v, p)
236
237 #define iowrite16(v,p) writew(v, p)
238 #define iowrite16be(v,p) __raw_writew(v, p)
239
240 #define iowrite32(v,p) writel(v, p)
241 #define iowrite32be(v,p) __raw_writel(v, p)
242
243 #define ioread8_rep(p,d,c) readsb(p,d,c)
244 #define ioread16_rep(p,d,c) readsw(p,d,c)
245 #define ioread32_rep(p,d,c) readsl(p,d,c)
246
247 #define iowrite8_rep(p,s,c) writesb(p,s,c)
248 #define iowrite16_rep(p,s,c) writesw(p,s,c)
249 #define iowrite32_rep(p,s,c) writesl(p,s,c)
250
251 #endif
252
253 static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
254 unsigned long count)
255 {
256 memcpy(to, (const void __force *)from, count);
257 }
258
259 static inline void memcpy_toio(volatile void __iomem *to, const void * from,
260 unsigned long count)
261 {
262 memcpy((void __force *)to, from, count);
263 }
264
265 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
266 unsigned long count)
267 {
268 memset((void __force *)addr, val, count);
269 }
270
271 #define mmiowb()
272
273 #define IO_SPACE_LIMIT 0xffffffff
274
275 extern void __iomem *__ioremap(unsigned long offset, size_t size,
276 unsigned long flags);
277 extern void __iounmap(void __iomem *addr);
278
279 /*
280 * ioremap - map bus memory into CPU space
281 * @offset bus address of the memory
282 * @size size of the resource to map
283 *
284 * ioremap performs a platform specific sequence of operations to make
285 * bus memory CPU accessible via the readb/.../writel functions and
286 * the other mmio helpers. The returned address is not guaranteed to
287 * be usable directly as a virtual address.
288 */
289 #define ioremap(offset, size) \
290 __ioremap((offset), (size), 0)
291
292 #define ioremap_nocache(offset, size) \
293 __ioremap((offset), (size), 0)
294
295 #define iounmap(addr) \
296 __iounmap(addr)
297
298 #define ioremap_wc ioremap_nocache
299 #define ioremap_wt ioremap_nocache
300 #define ioremap_uc ioremap_nocache
301
302 #define cached(addr) P1SEGADDR(addr)
303 #define uncached(addr) P2SEGADDR(addr)
304
305 #define virt_to_bus virt_to_phys
306 #define bus_to_virt phys_to_virt
307 #define page_to_bus page_to_phys
308 #define bus_to_page phys_to_page
309
310 /*
311 * Create a virtual mapping cookie for an IO port range. There exists
312 * no such thing as port-based I/O on AVR32, so a regular ioremap()
313 * should do what we need.
314 */
315 #define ioport_map(port, nr) ioremap(port, nr)
316 #define ioport_unmap(port) iounmap(port)
317
318 /*
319 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
320 * access
321 */
322 #define xlate_dev_mem_ptr(p) __va(p)
323
324 /*
325 * Convert a virtual cached pointer to an uncached pointer
326 */
327 #define xlate_dev_kmem_ptr(p) p
328
329 #endif /* __ASM_AVR32_IO_H */