]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 1995 Waldorf GmbH | |
966f4406 | 7 | * Copyright (C) 1994 - 2000, 06 Ralf Baechle |
1da177e4 LT |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | |
70342287 | 10 | * Author: Maciej W. Rozycki <macro@mips.com> |
1da177e4 LT |
11 | */ |
12 | #ifndef _ASM_IO_H | |
13 | #define _ASM_IO_H | |
14 | ||
9748e33e SS |
15 | #define ARCH_HAS_IOREMAP_WC |
16 | ||
1da177e4 LT |
17 | #include <linux/compiler.h> |
18 | #include <linux/kernel.h> | |
19 | #include <linux/types.h> | |
92d11594 | 20 | #include <linux/irqflags.h> |
1da177e4 LT |
21 | |
22 | #include <asm/addrspace.h> | |
4ae0452b | 23 | #include <asm/barrier.h> |
893a0574 | 24 | #include <asm/bug.h> |
1da177e4 LT |
25 | #include <asm/byteorder.h> |
26 | #include <asm/cpu.h> | |
27 | #include <asm/cpu-features.h> | |
140c1729 | 28 | #include <asm-generic/iomap.h> |
1da177e4 LT |
29 | #include <asm/page.h> |
30 | #include <asm/pgtable-bits.h> | |
31 | #include <asm/processor.h> | |
fe00f943 | 32 | #include <asm/string.h> |
1da177e4 | 33 | |
c3455b0e | 34 | #include <ioremap.h> |
1da177e4 LT |
35 | #include <mangle-port.h> |
36 | ||
1da177e4 | 37 | /* |
4912ba72 | 38 | * Raw operations are never swapped in software. OTOH values that raw |
1da177e4 LT |
39 | * operations are working on may or may not have been swapped by the bus |
40 | * hardware. An example use would be for flash memory that's used for | |
41 | * execute in place. | |
42 | */ | |
21a151d8 RB |
43 | # define __raw_ioswabb(a, x) (x) |
44 | # define __raw_ioswabw(a, x) (x) | |
45 | # define __raw_ioswabl(a, x) (x) | |
46 | # define __raw_ioswabq(a, x) (x) | |
47 | # define ____raw_ioswabq(a, x) (x) | |
1da177e4 | 48 | |
8b656253 MR |
49 | # define __relaxed_ioswabb ioswabb |
50 | # define __relaxed_ioswabw ioswabw | |
51 | # define __relaxed_ioswabl ioswabl | |
52 | # define __relaxed_ioswabq ioswabq | |
53 | ||
a8433137 | 54 | /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ |
1da177e4 | 55 | |
1da177e4 LT |
56 | #define IO_SPACE_LIMIT 0xffff |
57 | ||
58 | /* | |
59 | * On MIPS I/O ports are memory mapped, so we access them using normal | |
60 | * load/store instructions. mips_io_port_base is the virtual address to | |
61 | * which all ports are being mapped. For sake of efficiency some code | |
62 | * assumes that this is an address that can be loaded with a single lui | |
63 | * instruction, so the lower 16 bits must be zero. Should be true on | |
64 | * on any sane architecture; generic code does not use this assumption. | |
65 | */ | |
12051b31 | 66 | extern unsigned long mips_io_port_base; |
1da177e4 | 67 | |
966f4406 RB |
68 | static inline void set_io_port_base(unsigned long base) |
69 | { | |
12051b31 | 70 | mips_io_port_base = base; |
966f4406 | 71 | } |
1da177e4 | 72 | |
b962aeb0 PB |
73 | /* |
74 | * Provide the necessary definitions for generic iomap. We make use of | |
75 | * mips_io_port_base for iomap(), but we don't reserve any low addresses for | |
76 | * use with I/O ports. | |
77 | */ | |
4ae0452b | 78 | |
b962aeb0 PB |
79 | #define HAVE_ARCH_PIO_SIZE |
80 | #define PIO_OFFSET mips_io_port_base | |
81 | #define PIO_MASK IO_SPACE_LIMIT | |
82 | #define PIO_RESERVED 0x0UL | |
4ae0452b MR |
83 | |
84 | /* | |
85 | * Enforce in-order execution of data I/O. In the MIPS architecture | |
86 | * these are equivalent to corresponding platform-specific memory | |
87 | * barriers defined in <asm/barrier.h>. API pinched from PowerPC, | |
88 | * with sync additionally defined. | |
89 | */ | |
90 | #define iobarrier_rw() mb() | |
91 | #define iobarrier_r() rmb() | |
92 | #define iobarrier_w() wmb() | |
93 | #define iobarrier_sync() iob() | |
b962aeb0 | 94 | |
1da177e4 LT |
95 | /* |
96 | * virt_to_phys - map virtual addresses to physical | |
97 | * @address: address to remap | |
98 | * | |
99 | * The returned physical address is the physical (CPU) mapping for | |
100 | * the memory address given. It is only valid to use this function on | |
101 | * addresses directly mapped or allocated via kmalloc. | |
102 | * | |
103 | * This function does not give bus mappings for DMA transfers. In | |
104 | * almost all conceivable cases a device driver should not be using | |
105 | * this function | |
106 | */ | |
99e3b942 | 107 | static inline unsigned long virt_to_phys(volatile const void *address) |
1da177e4 | 108 | { |
49c426ba | 109 | return __pa(address); |
1da177e4 LT |
110 | } |
111 | ||
112 | /* | |
113 | * phys_to_virt - map physical address to virtual | |
114 | * @address: address to remap | |
115 | * | |
116 | * The returned virtual address is a current CPU mapping for | |
117 | * the memory address given. It is only valid to use this function on | |
118 | * addresses that have a kernel mapping | |
119 | * | |
120 | * This function does not handle bus mappings for DMA transfers. In | |
121 | * almost all conceivable cases a device driver should not be using | |
122 | * this function | |
123 | */ | |
124 | static inline void * phys_to_virt(unsigned long address) | |
125 | { | |
6f284a2c | 126 | return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); |
1da177e4 LT |
127 | } |
128 | ||
129 | /* | |
130 | * ISA I/O bus memory addresses are 1:1 with the physical address. | |
131 | */ | |
0494d7ff | 132 | static inline unsigned long isa_virt_to_bus(volatile void *address) |
1da177e4 | 133 | { |
0494d7ff | 134 | return virt_to_phys(address); |
1da177e4 LT |
135 | } |
136 | ||
0494d7ff | 137 | static inline void *isa_bus_to_virt(unsigned long address) |
1da177e4 | 138 | { |
0494d7ff | 139 | return phys_to_virt(address); |
1da177e4 LT |
140 | } |
141 | ||
1da177e4 LT |
142 | /* |
143 | * However PCI ones are not necessarily 1:1 and therefore these interfaces | |
144 | * are forbidden in portable PCI drivers. | |
145 | * | |
146 | * Allow them for x86 for legacy drivers, though. | |
147 | */ | |
148 | #define virt_to_bus virt_to_phys | |
149 | #define bus_to_virt phys_to_virt | |
150 | ||
1da177e4 LT |
151 | /* |
152 | * Change "struct page" to physical address. | |
153 | */ | |
154 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | |
155 | ||
15d45cce | 156 | extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags); |
d89e36d8 | 157 | extern void __iounmap(const volatile void __iomem *addr); |
1da177e4 | 158 | |
15d45cce | 159 | static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size, |
1da177e4 LT |
160 | unsigned long flags) |
161 | { | |
5ddcb3c3 AN |
162 | void __iomem *addr = plat_ioremap(offset, size, flags); |
163 | ||
164 | if (addr) | |
165 | return addr; | |
166 | ||
15d45cce | 167 | #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) |
c3455b0e | 168 | |
1da177e4 LT |
169 | if (cpu_has_64bit_addresses) { |
170 | u64 base = UNCAC_BASE; | |
171 | ||
172 | /* | |
173 | * R10000 supports a 2 bit uncached attribute therefore | |
174 | * UNCAC_BASE may not equal IO_BASE. | |
175 | */ | |
176 | if (flags == _CACHE_UNCACHED) | |
177 | base = (u64) IO_BASE; | |
fe00f943 | 178 | return (void __iomem *) (unsigned long) (base + offset); |
c3455b0e MR |
179 | } else if (__builtin_constant_p(offset) && |
180 | __builtin_constant_p(size) && __builtin_constant_p(flags)) { | |
15d45cce | 181 | phys_addr_t phys_addr, last_addr; |
c3455b0e MR |
182 | |
183 | phys_addr = fixup_bigphys_addr(offset, size); | |
184 | ||
185 | /* Don't allow wraparound or zero size. */ | |
186 | last_addr = phys_addr + size - 1; | |
187 | if (!size || last_addr < phys_addr) | |
188 | return NULL; | |
189 | ||
190 | /* | |
191 | * Map uncached objects in the low 512MB of address | |
192 | * space using KSEG1. | |
193 | */ | |
194 | if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && | |
195 | flags == _CACHE_UNCACHED) | |
c0cf5001 AN |
196 | return (void __iomem *) |
197 | (unsigned long)CKSEG1ADDR(phys_addr); | |
1da177e4 LT |
198 | } |
199 | ||
200 | return __ioremap(offset, size, flags); | |
c3455b0e MR |
201 | |
202 | #undef __IS_LOW512 | |
1da177e4 LT |
203 | } |
204 | ||
b3a428b4 HN |
205 | /* |
206 | * ioremap_prot - map bus memory into CPU space | |
207 | * @offset: bus address of the memory | |
208 | * @size: size of the resource to map | |
209 | ||
210 | * ioremap_prot gives the caller control over cache coherency attributes (CCA) | |
211 | */ | |
212 | static inline void __iomem *ioremap_prot(phys_addr_t offset, | |
213 | unsigned long size, unsigned long prot_val) { | |
214 | return __ioremap_mode(offset, size, prot_val & _CACHE_MASK); | |
215 | } | |
216 | ||
1da177e4 LT |
217 | /* |
218 | * ioremap - map bus memory into CPU space | |
219 | * @offset: bus address of the memory | |
220 | * @size: size of the resource to map | |
221 | * | |
222 | * ioremap performs a platform specific sequence of operations to | |
223 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
224 | * writew/writel functions and the other mmio helpers. The returned | |
225 | * address is not guaranteed to be usable directly as a virtual | |
226 | * address. | |
227 | */ | |
228 | #define ioremap(offset, size) \ | |
229 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) | |
230 | ||
231 | /* | |
232 | * ioremap_nocache - map bus memory into CPU space | |
233 | * @offset: bus address of the memory | |
234 | * @size: size of the resource to map | |
235 | * | |
236 | * ioremap_nocache performs a platform specific sequence of operations to | |
237 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
238 | * writew/writel functions and the other mmio helpers. The returned | |
239 | * address is not guaranteed to be usable directly as a virtual | |
240 | * address. | |
241 | * | |
242 | * This version of ioremap ensures that the memory is marked uncachable | |
243 | * on the CPU as well as honouring existing caching rules from things like | |
244 | * the PCI bus. Note that there are other caches and buffers on many | |
25985edc | 245 | * busses. In particular driver authors should read up on PCI writes |
1da177e4 LT |
246 | * |
247 | * It's useful if some control registers are in such an area and | |
248 | * write combining or read caching is not desirable: | |
249 | */ | |
250 | #define ioremap_nocache(offset, size) \ | |
251 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) | |
da11f98f | 252 | #define ioremap_uc ioremap_nocache |
1da177e4 | 253 | |
778e2ac5 | 254 | /* |
60af0d94 | 255 | * ioremap_cache - map bus memory into CPU space |
70342287 RB |
256 | * @offset: bus address of the memory |
257 | * @size: size of the resource to map | |
778e2ac5 | 258 | * |
60af0d94 | 259 | * ioremap_cache performs a platform specific sequence of operations to |
778e2ac5 RB |
260 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
261 | * writew/writel functions and the other mmio helpers. The returned | |
262 | * address is not guaranteed to be usable directly as a virtual | |
263 | * address. | |
264 | * | |
265 | * This version of ioremap ensures that the memory is marked cachable by | |
70342287 | 266 | * the CPU. Also enables full write-combining. Useful for some |
778e2ac5 RB |
267 | * memory-like regions on I/O busses. |
268 | */ | |
60af0d94 | 269 | #define ioremap_cache(offset, size) \ |
35133692 | 270 | __ioremap_mode((offset), (size), _page_cachable_default) |
778e2ac5 | 271 | |
9748e33e SS |
272 | /* |
273 | * ioremap_wc - map bus memory into CPU space | |
274 | * @offset: bus address of the memory | |
275 | * @size: size of the resource to map | |
276 | * | |
277 | * ioremap_wc performs a platform specific sequence of operations to | |
278 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
279 | * writew/writel functions and the other mmio helpers. The returned | |
280 | * address is not guaranteed to be usable directly as a virtual | |
281 | * address. | |
282 | * | |
283 | * This version of ioremap ensures that the memory is marked uncachable | |
284 | * but accelerated by means of write-combining feature. It is specifically | |
285 | * useful for PCIe prefetchable windows, which may vastly improve a | |
286 | * communications performance. If it was determined on boot stage, what | |
287 | * CPU CCA doesn't support UCA, the method shall fall-back to the | |
288 | * _CACHE_UNCACHED option (see cpu_probe() method). | |
289 | */ | |
290 | #define ioremap_wc(offset, size) \ | |
291 | __ioremap_mode((offset), (size), boot_cpu_data.writecombine) | |
292 | ||
d89e36d8 | 293 | static inline void iounmap(const volatile void __iomem *addr) |
1da177e4 | 294 | { |
5ddcb3c3 AN |
295 | if (plat_iounmap(addr)) |
296 | return; | |
297 | ||
c3455b0e MR |
298 | #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) |
299 | ||
300 | if (cpu_has_64bit_addresses || | |
301 | (__builtin_constant_p(addr) && __IS_KSEG1(addr))) | |
1da177e4 LT |
302 | return; |
303 | ||
304 | __iounmap(addr); | |
1da177e4 | 305 | |
c3455b0e MR |
306 | #undef __IS_KSEG1 |
307 | } | |
1da177e4 | 308 | |
c824ad16 | 309 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3) |
1e820da3 | 310 | #define war_io_reorder_wmb() wmb() |
8faca49a | 311 | #else |
f6b7aeee | 312 | #define war_io_reorder_wmb() barrier() |
8faca49a DD |
313 | #endif |
314 | ||
8b656253 | 315 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \ |
1da177e4 LT |
316 | \ |
317 | static inline void pfx##write##bwlq(type val, \ | |
318 | volatile void __iomem *mem) \ | |
319 | { \ | |
320 | volatile type *__mem; \ | |
321 | type __val; \ | |
322 | \ | |
3d474dac MR |
323 | if (barrier) \ |
324 | iobarrier_rw(); \ | |
325 | else \ | |
326 | war_io_reorder_wmb(); \ | |
8faca49a | 327 | \ |
1da177e4 LT |
328 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ |
329 | \ | |
a8433137 | 330 | __val = pfx##ioswab##bwlq(__mem, val); \ |
1da177e4 | 331 | \ |
70342287 | 332 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ |
1da177e4 LT |
333 | *__mem = __val; \ |
334 | else if (cpu_has_64bits) { \ | |
335 | unsigned long __flags; \ | |
336 | type __tmp; \ | |
337 | \ | |
338 | if (irq) \ | |
339 | local_irq_save(__flags); \ | |
340 | __asm__ __volatile__( \ | |
378ed6f0 PB |
341 | ".set push" "\t\t# __writeq""\n\t" \ |
342 | ".set arch=r4000" "\n\t" \ | |
70342287 RB |
343 | "dsll32 %L0, %L0, 0" "\n\t" \ |
344 | "dsrl32 %L0, %L0, 0" "\n\t" \ | |
345 | "dsll32 %M0, %M0, 0" "\n\t" \ | |
1da177e4 LT |
346 | "or %L0, %L0, %M0" "\n\t" \ |
347 | "sd %L0, %2" "\n\t" \ | |
378ed6f0 | 348 | ".set pop" "\n" \ |
1da177e4 | 349 | : "=r" (__tmp) \ |
b77bb37a | 350 | : "0" (__val), "m" (*__mem)); \ |
1da177e4 LT |
351 | if (irq) \ |
352 | local_irq_restore(__flags); \ | |
353 | } else \ | |
354 | BUG(); \ | |
355 | } \ | |
356 | \ | |
b887d3f2 | 357 | static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ |
1da177e4 LT |
358 | { \ |
359 | volatile type *__mem; \ | |
360 | type __val; \ | |
361 | \ | |
362 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ | |
363 | \ | |
3d474dac MR |
364 | if (barrier) \ |
365 | iobarrier_rw(); \ | |
366 | \ | |
70342287 | 367 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ |
1da177e4 LT |
368 | __val = *__mem; \ |
369 | else if (cpu_has_64bits) { \ | |
370 | unsigned long __flags; \ | |
371 | \ | |
049b13c3 TS |
372 | if (irq) \ |
373 | local_irq_save(__flags); \ | |
1da177e4 | 374 | __asm__ __volatile__( \ |
378ed6f0 PB |
375 | ".set push" "\t\t# __readq" "\n\t" \ |
376 | ".set arch=r4000" "\n\t" \ | |
1da177e4 | 377 | "ld %L0, %1" "\n\t" \ |
70342287 | 378 | "dsra32 %M0, %L0, 0" "\n\t" \ |
1da177e4 | 379 | "sll %L0, %L0, 0" "\n\t" \ |
378ed6f0 | 380 | ".set pop" "\n" \ |
1da177e4 | 381 | : "=r" (__val) \ |
b77bb37a | 382 | : "m" (*__mem)); \ |
049b13c3 TS |
383 | if (irq) \ |
384 | local_irq_restore(__flags); \ | |
1da177e4 LT |
385 | } else { \ |
386 | __val = 0; \ | |
387 | BUG(); \ | |
388 | } \ | |
389 | \ | |
a1cc7034 | 390 | /* prevent prefetching of coherent DMA data prematurely */ \ |
8b656253 MR |
391 | if (!relax) \ |
392 | rmb(); \ | |
a8433137 | 393 | return pfx##ioswab##bwlq(__mem, __val); \ |
1da177e4 LT |
394 | } |
395 | ||
8b656253 | 396 | #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \ |
1da177e4 LT |
397 | \ |
398 | static inline void pfx##out##bwlq##p(type val, unsigned long port) \ | |
399 | { \ | |
400 | volatile type *__addr; \ | |
401 | type __val; \ | |
402 | \ | |
3d474dac MR |
403 | if (barrier) \ |
404 | iobarrier_rw(); \ | |
405 | else \ | |
406 | war_io_reorder_wmb(); \ | |
8faca49a | 407 | \ |
a8433137 | 408 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
1da177e4 | 409 | \ |
a8433137 | 410 | __val = pfx##ioswab##bwlq(__addr, val); \ |
1da177e4 | 411 | \ |
9d58f302 RB |
412 | /* Really, we want this to be atomic */ \ |
413 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ | |
414 | \ | |
415 | *__addr = __val; \ | |
1da177e4 LT |
416 | } \ |
417 | \ | |
418 | static inline type pfx##in##bwlq##p(unsigned long port) \ | |
419 | { \ | |
420 | volatile type *__addr; \ | |
421 | type __val; \ | |
422 | \ | |
a8433137 | 423 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
1da177e4 | 424 | \ |
9d58f302 RB |
425 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
426 | \ | |
3d474dac MR |
427 | if (barrier) \ |
428 | iobarrier_rw(); \ | |
429 | \ | |
9d58f302 | 430 | __val = *__addr; \ |
1da177e4 | 431 | \ |
18f3e95b | 432 | /* prevent prefetching of coherent DMA data prematurely */ \ |
8b656253 MR |
433 | if (!relax) \ |
434 | rmb(); \ | |
a8433137 | 435 | return pfx##ioswab##bwlq(__addr, __val); \ |
1da177e4 LT |
436 | } |
437 | ||
8b656253 | 438 | #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \ |
1da177e4 | 439 | \ |
8b656253 | 440 | __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1) |
1da177e4 | 441 | |
9d58f302 | 442 | #define BUILDIO_MEM(bwlq, type) \ |
1da177e4 | 443 | \ |
8b656253 MR |
444 | __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \ |
445 | __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \ | |
446 | __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \ | |
447 | __BUILD_MEMORY_PFX(, bwlq, type, 0) | |
9d58f302 RB |
448 | |
449 | BUILDIO_MEM(b, u8) | |
450 | BUILDIO_MEM(w, u16) | |
451 | BUILDIO_MEM(l, u32) | |
1e279144 | 452 | #ifdef CONFIG_64BIT |
9d58f302 | 453 | BUILDIO_MEM(q, u64) |
1e279144 SS |
454 | #else |
455 | __BUILD_MEMORY_PFX(__raw_, q, u64, 0) | |
456 | __BUILD_MEMORY_PFX(__mem_, q, u64, 0) | |
457 | #endif | |
9d58f302 RB |
458 | |
459 | #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ | |
8b656253 MR |
460 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \ |
461 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p) | |
9d58f302 RB |
462 | |
463 | #define BUILDIO_IOPORT(bwlq, type) \ | |
464 | __BUILD_IOPORT_PFX(, bwlq, type) \ | |
290f10ae | 465 | __BUILD_IOPORT_PFX(__mem_, bwlq, type) |
9d58f302 RB |
466 | |
467 | BUILDIO_IOPORT(b, u8) | |
468 | BUILDIO_IOPORT(w, u16) | |
469 | BUILDIO_IOPORT(l, u32) | |
470 | #ifdef CONFIG_64BIT | |
471 | BUILDIO_IOPORT(q, u64) | |
472 | #endif | |
1da177e4 LT |
473 | |
474 | #define __BUILDIO(bwlq, type) \ | |
475 | \ | |
8b656253 | 476 | __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0) |
1da177e4 | 477 | |
1da177e4 LT |
478 | __BUILDIO(q, u64) |
479 | ||
8b656253 MR |
480 | #define readb_relaxed __relaxed_readb |
481 | #define readw_relaxed __relaxed_readw | |
482 | #define readl_relaxed __relaxed_readl | |
1e279144 | 483 | #ifdef CONFIG_64BIT |
8b656253 | 484 | #define readq_relaxed __relaxed_readq |
1e279144 | 485 | #endif |
1da177e4 | 486 | |
8b656253 MR |
487 | #define writeb_relaxed __relaxed_writeb |
488 | #define writew_relaxed __relaxed_writew | |
489 | #define writel_relaxed __relaxed_writel | |
1e279144 | 490 | #ifdef CONFIG_64BIT |
8b656253 | 491 | #define writeq_relaxed __relaxed_writeq |
1e279144 | 492 | #endif |
edd4201e | 493 | |
f868ba29 FF |
494 | #define readb_be(addr) \ |
495 | __raw_readb((__force unsigned *)(addr)) | |
496 | #define readw_be(addr) \ | |
497 | be16_to_cpu(__raw_readw((__force unsigned *)(addr))) | |
498 | #define readl_be(addr) \ | |
499 | be32_to_cpu(__raw_readl((__force unsigned *)(addr))) | |
500 | #define readq_be(addr) \ | |
501 | be64_to_cpu(__raw_readq((__force unsigned *)(addr))) | |
502 | ||
503 | #define writeb_be(val, addr) \ | |
504 | __raw_writeb((val), (__force unsigned *)(addr)) | |
505 | #define writew_be(val, addr) \ | |
506 | __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) | |
507 | #define writel_be(val, addr) \ | |
508 | __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) | |
509 | #define writeq_be(val, addr) \ | |
510 | __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) | |
511 | ||
1da177e4 LT |
512 | /* |
513 | * Some code tests for these symbols | |
514 | */ | |
1e279144 | 515 | #ifdef CONFIG_64BIT |
1da177e4 LT |
516 | #define readq readq |
517 | #define writeq writeq | |
1e279144 | 518 | #endif |
1da177e4 LT |
519 | |
520 | #define __BUILD_MEMORY_STRING(bwlq, type) \ | |
521 | \ | |
99289a4e AG |
522 | static inline void writes##bwlq(volatile void __iomem *mem, \ |
523 | const void *addr, unsigned int count) \ | |
1da177e4 | 524 | { \ |
99289a4e | 525 | const volatile type *__addr = addr; \ |
1da177e4 LT |
526 | \ |
527 | while (count--) { \ | |
290f10ae | 528 | __mem_write##bwlq(*__addr, mem); \ |
1da177e4 LT |
529 | __addr++; \ |
530 | } \ | |
531 | } \ | |
532 | \ | |
533 | static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ | |
534 | unsigned int count) \ | |
535 | { \ | |
536 | volatile type *__addr = addr; \ | |
537 | \ | |
538 | while (count--) { \ | |
290f10ae | 539 | *__addr = __mem_read##bwlq(mem); \ |
1da177e4 LT |
540 | __addr++; \ |
541 | } \ | |
542 | } | |
543 | ||
544 | #define __BUILD_IOPORT_STRING(bwlq, type) \ | |
545 | \ | |
ecba36da | 546 | static inline void outs##bwlq(unsigned long port, const void *addr, \ |
1da177e4 LT |
547 | unsigned int count) \ |
548 | { \ | |
ecba36da | 549 | const volatile type *__addr = addr; \ |
1da177e4 LT |
550 | \ |
551 | while (count--) { \ | |
290f10ae | 552 | __mem_out##bwlq(*__addr, port); \ |
1da177e4 LT |
553 | __addr++; \ |
554 | } \ | |
555 | } \ | |
556 | \ | |
557 | static inline void ins##bwlq(unsigned long port, void *addr, \ | |
558 | unsigned int count) \ | |
559 | { \ | |
560 | volatile type *__addr = addr; \ | |
561 | \ | |
562 | while (count--) { \ | |
290f10ae | 563 | *__addr = __mem_in##bwlq(port); \ |
1da177e4 LT |
564 | __addr++; \ |
565 | } \ | |
566 | } | |
567 | ||
568 | #define BUILDSTRING(bwlq, type) \ | |
569 | \ | |
570 | __BUILD_MEMORY_STRING(bwlq, type) \ | |
571 | __BUILD_IOPORT_STRING(bwlq, type) | |
572 | ||
573 | BUILDSTRING(b, u8) | |
574 | BUILDSTRING(w, u16) | |
575 | BUILDSTRING(l, u32) | |
9d58f302 | 576 | #ifdef CONFIG_64BIT |
1da177e4 | 577 | BUILDSTRING(q, u64) |
9d58f302 | 578 | #endif |
1da177e4 | 579 | |
fe00f943 RB |
580 | static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) |
581 | { | |
582 | memset((void __force *) addr, val, count); | |
583 | } | |
584 | static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) | |
585 | { | |
586 | memcpy(dst, (void __force *) src, count); | |
587 | } | |
588 | static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) | |
589 | { | |
590 | memcpy((void __force *) dst, src, count); | |
591 | } | |
1da177e4 | 592 | |
1da177e4 LT |
593 | /* |
594 | * The caches on some architectures aren't dma-coherent and have need to | |
595 | * handle this in software. There are three types of operations that | |
596 | * can be applied to dma buffers. | |
597 | * | |
598 | * - dma_cache_wback_inv(start, size) makes caches and coherent by | |
599 | * writing the content of the caches back to memory, if necessary. | |
600 | * The function also invalidates the affected part of the caches as | |
601 | * necessary before DMA transfers from outside to memory. | |
602 | * - dma_cache_wback(start, size) makes caches and coherent by | |
603 | * writing the content of the caches back to memory, if necessary. | |
604 | * The function also invalidates the affected part of the caches as | |
605 | * necessary before DMA transfers from outside to memory. | |
606 | * - dma_cache_inv(start, size) invalidates the affected parts of the | |
607 | * caches. Dirty lines of the caches may be written back or simply | |
608 | * be discarded. This operation is necessary before dma operations | |
609 | * to the memory. | |
622a9edd RB |
610 | * |
611 | * This API used to be exported; it now is for arch code internal use only. | |
1da177e4 | 612 | */ |
972dc3b7 | 613 | #ifdef CONFIG_DMA_NONCOHERENT |
1da177e4 LT |
614 | |
615 | extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); | |
616 | extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); | |
617 | extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); | |
618 | ||
21a151d8 RB |
619 | #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) |
620 | #define dma_cache_wback(start, size) _dma_cache_wback(start, size) | |
621 | #define dma_cache_inv(start, size) _dma_cache_inv(start, size) | |
1da177e4 LT |
622 | |
623 | #else /* Sane hardware */ | |
624 | ||
70342287 | 625 | #define dma_cache_wback_inv(start,size) \ |
1da177e4 LT |
626 | do { (void) (start); (void) (size); } while (0) |
627 | #define dma_cache_wback(start,size) \ | |
628 | do { (void) (start); (void) (size); } while (0) | |
629 | #define dma_cache_inv(start,size) \ | |
630 | do { (void) (start); (void) (size); } while (0) | |
631 | ||
972dc3b7 | 632 | #endif /* CONFIG_DMA_NONCOHERENT */ |
1da177e4 LT |
633 | |
634 | /* | |
635 | * Read a 32-bit register that requires a 64-bit read cycle on the bus. | |
636 | * Avoid interrupt mucking, just adjust the address for 4-byte access. | |
637 | * Assume the addresses are 8-byte aligned. | |
638 | */ | |
639 | #ifdef __MIPSEB__ | |
640 | #define __CSR_32_ADJUST 4 | |
641 | #else | |
642 | #define __CSR_32_ADJUST 0 | |
643 | #endif | |
644 | ||
21a151d8 | 645 | #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) |
1da177e4 LT |
646 | #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) |
647 | ||
648 | /* | |
649 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
650 | * access | |
651 | */ | |
652 | #define xlate_dev_mem_ptr(p) __va(p) | |
653 | ||
654 | /* | |
655 | * Convert a virtual cached pointer to an uncached pointer | |
656 | */ | |
657 | #define xlate_dev_kmem_ptr(p) p | |
658 | ||
d8c825e2 PB |
659 | void __ioread64_copy(void *to, const void __iomem *from, size_t count); |
660 | ||
1da177e4 | 661 | #endif /* _ASM_IO_H */ |