]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/include/asm/io.h
mfd: wm5110: Make DSPn_STATUS_3 readable
[mirror_ubuntu-zesty-kernel.git] / arch / mips / include / asm / io.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
11 */
12 #ifndef _ASM_IO_H
13 #define _ASM_IO_H
14
15 #include <linux/compiler.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/irqflags.h>
19
20 #include <asm/addrspace.h>
21 #include <asm/bug.h>
22 #include <asm/byteorder.h>
23 #include <asm/cpu.h>
24 #include <asm/cpu-features.h>
25 #include <asm-generic/iomap.h>
26 #include <asm/page.h>
27 #include <asm/pgtable-bits.h>
28 #include <asm/processor.h>
29 #include <asm/string.h>
30
31 #include <ioremap.h>
32 #include <mangle-port.h>
33
34 /*
35 * Slowdown I/O port space accesses for antique hardware.
36 */
37 #undef CONF_SLOWDOWN_IO
38
39 /*
40 * Raw operations are never swapped in software. OTOH values that raw
41 * operations are working on may or may not have been swapped by the bus
42 * hardware. An example use would be for flash memory that's used for
43 * execute in place.
44 */
45 # define __raw_ioswabb(a, x) (x)
46 # define __raw_ioswabw(a, x) (x)
47 # define __raw_ioswabl(a, x) (x)
48 # define __raw_ioswabq(a, x) (x)
49 # define ____raw_ioswabq(a, x) (x)
50
51 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
52
53 #define IO_SPACE_LIMIT 0xffff
54
55 /*
56 * On MIPS I/O ports are memory mapped, so we access them using normal
57 * load/store instructions. mips_io_port_base is the virtual address to
58 * which all ports are being mapped. For sake of efficiency some code
59 * assumes that this is an address that can be loaded with a single lui
60 * instruction, so the lower 16 bits must be zero. Should be true on
61 * on any sane architecture; generic code does not use this assumption.
62 */
63 extern const unsigned long mips_io_port_base;
64
65 /*
66 * Gcc will generate code to load the value of mips_io_port_base after each
67 * function call which may be fairly wasteful in some cases. So we don't
68 * play quite by the book. We tell gcc mips_io_port_base is a long variable
69 * which solves the code generation issue. Now we need to violate the
70 * aliasing rules a little to make initialization possible and finally we
71 * will need the barrier() to fight side effects of the aliasing chat.
72 * This trickery will eventually collapse under gcc's optimizer. Oh well.
73 */
74 static inline void set_io_port_base(unsigned long base)
75 {
76 * (unsigned long *) &mips_io_port_base = base;
77 barrier();
78 }
79
80 /*
81 * Thanks to James van Artsdalen for a better timing-fix than
82 * the two short jumps: using outb's to a nonexistent port seems
83 * to guarantee better timings even on fast machines.
84 *
85 * On the other hand, I'd like to be sure of a non-existent port:
86 * I feel a bit unsafe about using 0x80 (should be safe, though)
87 *
88 * Linus
89 *
90 */
91
92 #define __SLOW_DOWN_IO \
93 __asm__ __volatile__( \
94 "sb\t$0,0x80(%0)" \
95 : : "r" (mips_io_port_base));
96
97 #ifdef CONF_SLOWDOWN_IO
98 #ifdef REALLY_SLOW_IO
99 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
100 #else
101 #define SLOW_DOWN_IO __SLOW_DOWN_IO
102 #endif
103 #else
104 #define SLOW_DOWN_IO
105 #endif
106
107 /*
108 * virt_to_phys - map virtual addresses to physical
109 * @address: address to remap
110 *
111 * The returned physical address is the physical (CPU) mapping for
112 * the memory address given. It is only valid to use this function on
113 * addresses directly mapped or allocated via kmalloc.
114 *
115 * This function does not give bus mappings for DMA transfers. In
116 * almost all conceivable cases a device driver should not be using
117 * this function
118 */
119 static inline unsigned long virt_to_phys(volatile const void *address)
120 {
121 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
122 }
123
124 /*
125 * phys_to_virt - map physical address to virtual
126 * @address: address to remap
127 *
128 * The returned virtual address is a current CPU mapping for
129 * the memory address given. It is only valid to use this function on
130 * addresses that have a kernel mapping
131 *
132 * This function does not handle bus mappings for DMA transfers. In
133 * almost all conceivable cases a device driver should not be using
134 * this function
135 */
136 static inline void * phys_to_virt(unsigned long address)
137 {
138 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
139 }
140
141 /*
142 * ISA I/O bus memory addresses are 1:1 with the physical address.
143 */
144 static inline unsigned long isa_virt_to_bus(volatile void * address)
145 {
146 return (unsigned long)address - PAGE_OFFSET;
147 }
148
149 static inline void * isa_bus_to_virt(unsigned long address)
150 {
151 return (void *)(address + PAGE_OFFSET);
152 }
153
154 #define isa_page_to_bus page_to_phys
155
156 /*
157 * However PCI ones are not necessarily 1:1 and therefore these interfaces
158 * are forbidden in portable PCI drivers.
159 *
160 * Allow them for x86 for legacy drivers, though.
161 */
162 #define virt_to_bus virt_to_phys
163 #define bus_to_virt phys_to_virt
164
165 /*
166 * Change "struct page" to physical address.
167 */
168 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
169
170 extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
171 extern void __iounmap(const volatile void __iomem *addr);
172
173 static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
174 unsigned long flags)
175 {
176 void __iomem *addr = plat_ioremap(offset, size, flags);
177
178 if (addr)
179 return addr;
180
181 #define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
182
183 if (cpu_has_64bit_addresses) {
184 u64 base = UNCAC_BASE;
185
186 /*
187 * R10000 supports a 2 bit uncached attribute therefore
188 * UNCAC_BASE may not equal IO_BASE.
189 */
190 if (flags == _CACHE_UNCACHED)
191 base = (u64) IO_BASE;
192 return (void __iomem *) (unsigned long) (base + offset);
193 } else if (__builtin_constant_p(offset) &&
194 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
195 phys_t phys_addr, last_addr;
196
197 phys_addr = fixup_bigphys_addr(offset, size);
198
199 /* Don't allow wraparound or zero size. */
200 last_addr = phys_addr + size - 1;
201 if (!size || last_addr < phys_addr)
202 return NULL;
203
204 /*
205 * Map uncached objects in the low 512MB of address
206 * space using KSEG1.
207 */
208 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
209 flags == _CACHE_UNCACHED)
210 return (void __iomem *)
211 (unsigned long)CKSEG1ADDR(phys_addr);
212 }
213
214 return __ioremap(offset, size, flags);
215
216 #undef __IS_LOW512
217 }
218
219 /*
220 * ioremap - map bus memory into CPU space
221 * @offset: bus address of the memory
222 * @size: size of the resource to map
223 *
224 * ioremap performs a platform specific sequence of operations to
225 * make bus memory CPU accessible via the readb/readw/readl/writeb/
226 * writew/writel functions and the other mmio helpers. The returned
227 * address is not guaranteed to be usable directly as a virtual
228 * address.
229 */
230 #define ioremap(offset, size) \
231 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
232
233 /*
234 * ioremap_nocache - map bus memory into CPU space
235 * @offset: bus address of the memory
236 * @size: size of the resource to map
237 *
238 * ioremap_nocache performs a platform specific sequence of operations to
239 * make bus memory CPU accessible via the readb/readw/readl/writeb/
240 * writew/writel functions and the other mmio helpers. The returned
241 * address is not guaranteed to be usable directly as a virtual
242 * address.
243 *
244 * This version of ioremap ensures that the memory is marked uncachable
245 * on the CPU as well as honouring existing caching rules from things like
246 * the PCI bus. Note that there are other caches and buffers on many
247 * busses. In particular driver authors should read up on PCI writes
248 *
249 * It's useful if some control registers are in such an area and
250 * write combining or read caching is not desirable:
251 */
252 #define ioremap_nocache(offset, size) \
253 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
254
255 /*
256 * ioremap_cachable - map bus memory into CPU space
257 * @offset: bus address of the memory
258 * @size: size of the resource to map
259 *
260 * ioremap_nocache performs a platform specific sequence of operations to
261 * make bus memory CPU accessible via the readb/readw/readl/writeb/
262 * writew/writel functions and the other mmio helpers. The returned
263 * address is not guaranteed to be usable directly as a virtual
264 * address.
265 *
266 * This version of ioremap ensures that the memory is marked cachable by
267 * the CPU. Also enables full write-combining. Useful for some
268 * memory-like regions on I/O busses.
269 */
270 #define ioremap_cachable(offset, size) \
271 __ioremap_mode((offset), (size), _page_cachable_default)
272
273 /*
274 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
275 * requests a cachable mapping, ioremap_uncached_accelerated requests a
276 * mapping using the uncached accelerated mode which isn't supported on
277 * all processors.
278 */
279 #define ioremap_cacheable_cow(offset, size) \
280 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
281 #define ioremap_uncached_accelerated(offset, size) \
282 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
283
284 static inline void iounmap(const volatile void __iomem *addr)
285 {
286 if (plat_iounmap(addr))
287 return;
288
289 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
290
291 if (cpu_has_64bit_addresses ||
292 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
293 return;
294
295 __iounmap(addr);
296
297 #undef __IS_KSEG1
298 }
299
300 #ifdef CONFIG_CPU_CAVIUM_OCTEON
301 #define war_octeon_io_reorder_wmb() wmb()
302 #else
303 #define war_octeon_io_reorder_wmb() do { } while (0)
304 #endif
305
306 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
307 \
308 static inline void pfx##write##bwlq(type val, \
309 volatile void __iomem *mem) \
310 { \
311 volatile type *__mem; \
312 type __val; \
313 \
314 war_octeon_io_reorder_wmb(); \
315 \
316 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
317 \
318 __val = pfx##ioswab##bwlq(__mem, val); \
319 \
320 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
321 *__mem = __val; \
322 else if (cpu_has_64bits) { \
323 unsigned long __flags; \
324 type __tmp; \
325 \
326 if (irq) \
327 local_irq_save(__flags); \
328 __asm__ __volatile__( \
329 ".set mips3" "\t\t# __writeq""\n\t" \
330 "dsll32 %L0, %L0, 0" "\n\t" \
331 "dsrl32 %L0, %L0, 0" "\n\t" \
332 "dsll32 %M0, %M0, 0" "\n\t" \
333 "or %L0, %L0, %M0" "\n\t" \
334 "sd %L0, %2" "\n\t" \
335 ".set mips0" "\n" \
336 : "=r" (__tmp) \
337 : "0" (__val), "m" (*__mem)); \
338 if (irq) \
339 local_irq_restore(__flags); \
340 } else \
341 BUG(); \
342 } \
343 \
344 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
345 { \
346 volatile type *__mem; \
347 type __val; \
348 \
349 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
350 \
351 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
352 __val = *__mem; \
353 else if (cpu_has_64bits) { \
354 unsigned long __flags; \
355 \
356 if (irq) \
357 local_irq_save(__flags); \
358 __asm__ __volatile__( \
359 ".set mips3" "\t\t# __readq" "\n\t" \
360 "ld %L0, %1" "\n\t" \
361 "dsra32 %M0, %L0, 0" "\n\t" \
362 "sll %L0, %L0, 0" "\n\t" \
363 ".set mips0" "\n" \
364 : "=r" (__val) \
365 : "m" (*__mem)); \
366 if (irq) \
367 local_irq_restore(__flags); \
368 } else { \
369 __val = 0; \
370 BUG(); \
371 } \
372 \
373 return pfx##ioswab##bwlq(__mem, __val); \
374 }
375
376 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
377 \
378 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
379 { \
380 volatile type *__addr; \
381 type __val; \
382 \
383 war_octeon_io_reorder_wmb(); \
384 \
385 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
386 \
387 __val = pfx##ioswab##bwlq(__addr, val); \
388 \
389 /* Really, we want this to be atomic */ \
390 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
391 \
392 *__addr = __val; \
393 slow; \
394 } \
395 \
396 static inline type pfx##in##bwlq##p(unsigned long port) \
397 { \
398 volatile type *__addr; \
399 type __val; \
400 \
401 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
402 \
403 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
404 \
405 __val = *__addr; \
406 slow; \
407 \
408 return pfx##ioswab##bwlq(__addr, __val); \
409 }
410
411 #define __BUILD_MEMORY_PFX(bus, bwlq, type) \
412 \
413 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
414
415 #define BUILDIO_MEM(bwlq, type) \
416 \
417 __BUILD_MEMORY_PFX(__raw_, bwlq, type) \
418 __BUILD_MEMORY_PFX(, bwlq, type) \
419 __BUILD_MEMORY_PFX(__mem_, bwlq, type) \
420
421 BUILDIO_MEM(b, u8)
422 BUILDIO_MEM(w, u16)
423 BUILDIO_MEM(l, u32)
424 BUILDIO_MEM(q, u64)
425
426 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
427 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
428 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
429
430 #define BUILDIO_IOPORT(bwlq, type) \
431 __BUILD_IOPORT_PFX(, bwlq, type) \
432 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
433
434 BUILDIO_IOPORT(b, u8)
435 BUILDIO_IOPORT(w, u16)
436 BUILDIO_IOPORT(l, u32)
437 #ifdef CONFIG_64BIT
438 BUILDIO_IOPORT(q, u64)
439 #endif
440
441 #define __BUILDIO(bwlq, type) \
442 \
443 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
444
445 __BUILDIO(q, u64)
446
447 #define readb_relaxed readb
448 #define readw_relaxed readw
449 #define readl_relaxed readl
450 #define readq_relaxed readq
451
452 #define readb_be(addr) \
453 __raw_readb((__force unsigned *)(addr))
454 #define readw_be(addr) \
455 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
456 #define readl_be(addr) \
457 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
458 #define readq_be(addr) \
459 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
460
461 #define writeb_be(val, addr) \
462 __raw_writeb((val), (__force unsigned *)(addr))
463 #define writew_be(val, addr) \
464 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
465 #define writel_be(val, addr) \
466 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
467 #define writeq_be(val, addr) \
468 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
469
470 /*
471 * Some code tests for these symbols
472 */
473 #define readq readq
474 #define writeq writeq
475
476 #define __BUILD_MEMORY_STRING(bwlq, type) \
477 \
478 static inline void writes##bwlq(volatile void __iomem *mem, \
479 const void *addr, unsigned int count) \
480 { \
481 const volatile type *__addr = addr; \
482 \
483 while (count--) { \
484 __mem_write##bwlq(*__addr, mem); \
485 __addr++; \
486 } \
487 } \
488 \
489 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
490 unsigned int count) \
491 { \
492 volatile type *__addr = addr; \
493 \
494 while (count--) { \
495 *__addr = __mem_read##bwlq(mem); \
496 __addr++; \
497 } \
498 }
499
500 #define __BUILD_IOPORT_STRING(bwlq, type) \
501 \
502 static inline void outs##bwlq(unsigned long port, const void *addr, \
503 unsigned int count) \
504 { \
505 const volatile type *__addr = addr; \
506 \
507 while (count--) { \
508 __mem_out##bwlq(*__addr, port); \
509 __addr++; \
510 } \
511 } \
512 \
513 static inline void ins##bwlq(unsigned long port, void *addr, \
514 unsigned int count) \
515 { \
516 volatile type *__addr = addr; \
517 \
518 while (count--) { \
519 *__addr = __mem_in##bwlq(port); \
520 __addr++; \
521 } \
522 }
523
524 #define BUILDSTRING(bwlq, type) \
525 \
526 __BUILD_MEMORY_STRING(bwlq, type) \
527 __BUILD_IOPORT_STRING(bwlq, type)
528
529 BUILDSTRING(b, u8)
530 BUILDSTRING(w, u16)
531 BUILDSTRING(l, u32)
532 #ifdef CONFIG_64BIT
533 BUILDSTRING(q, u64)
534 #endif
535
536
537 #ifdef CONFIG_CPU_CAVIUM_OCTEON
538 #define mmiowb() wmb()
539 #else
540 /* Depends on MIPS II instruction set */
541 #define mmiowb() asm volatile ("sync" ::: "memory")
542 #endif
543
544 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
545 {
546 memset((void __force *) addr, val, count);
547 }
548 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
549 {
550 memcpy(dst, (void __force *) src, count);
551 }
552 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
553 {
554 memcpy((void __force *) dst, src, count);
555 }
556
557 /*
558 * The caches on some architectures aren't dma-coherent and have need to
559 * handle this in software. There are three types of operations that
560 * can be applied to dma buffers.
561 *
562 * - dma_cache_wback_inv(start, size) makes caches and coherent by
563 * writing the content of the caches back to memory, if necessary.
564 * The function also invalidates the affected part of the caches as
565 * necessary before DMA transfers from outside to memory.
566 * - dma_cache_wback(start, size) makes caches and coherent by
567 * writing the content of the caches back to memory, if necessary.
568 * The function also invalidates the affected part of the caches as
569 * necessary before DMA transfers from outside to memory.
570 * - dma_cache_inv(start, size) invalidates the affected parts of the
571 * caches. Dirty lines of the caches may be written back or simply
572 * be discarded. This operation is necessary before dma operations
573 * to the memory.
574 *
575 * This API used to be exported; it now is for arch code internal use only.
576 */
577 #ifdef CONFIG_DMA_NONCOHERENT
578
579 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
580 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
581 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
582
583 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
584 #define dma_cache_wback(start, size) _dma_cache_wback(start, size)
585 #define dma_cache_inv(start, size) _dma_cache_inv(start, size)
586
587 #else /* Sane hardware */
588
589 #define dma_cache_wback_inv(start,size) \
590 do { (void) (start); (void) (size); } while (0)
591 #define dma_cache_wback(start,size) \
592 do { (void) (start); (void) (size); } while (0)
593 #define dma_cache_inv(start,size) \
594 do { (void) (start); (void) (size); } while (0)
595
596 #endif /* CONFIG_DMA_NONCOHERENT */
597
598 /*
599 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
600 * Avoid interrupt mucking, just adjust the address for 4-byte access.
601 * Assume the addresses are 8-byte aligned.
602 */
603 #ifdef __MIPSEB__
604 #define __CSR_32_ADJUST 4
605 #else
606 #define __CSR_32_ADJUST 0
607 #endif
608
609 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
610 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
611
612 /*
613 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
614 * access
615 */
616 #define xlate_dev_mem_ptr(p) __va(p)
617
618 /*
619 * Convert a virtual cached pointer to an uncached pointer
620 */
621 #define xlate_dev_kmem_ptr(p) p
622
623 #endif /* _ASM_IO_H */