]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - include/asm-generic/io.h
io: define stronger ordering for the default readX() implementation
[mirror_ubuntu-eoan-kernel.git] / include / asm-generic / io.h
CommitLineData
3f7e212d
AB
1/* Generic I/O port emulation, based on MN10300 code
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef __ASM_GENERIC_IO_H
12#define __ASM_GENERIC_IO_H
13
14#include <asm/page.h> /* I/O is all done through memory accesses */
9216efaf 15#include <linux/string.h> /* for memset() and memcpy() */
3f7e212d
AB
16#include <linux/types.h>
17
18#ifdef CONFIG_GENERIC_IOMAP
19#include <asm-generic/iomap.h>
20#endif
21
66eab4df
MT
22#include <asm-generic/pci_iomap.h>
23
35dbc0e0 24#ifndef mmiowb
3f7e212d 25#define mmiowb() do {} while (0)
35dbc0e0 26#endif
3f7e212d 27
64e2c673
SK
28#ifndef __io_br
29#define __io_br() barrier()
30#endif
31
32/* prevent prefetching of coherent DMA data ahead of a dma-complete */
33#ifndef __io_ar
34#ifdef rmb
35#define __io_ar() rmb()
36#else
37#define __io_ar() barrier()
38#endif
39#endif
40
41/* flush writes to coherent DMA data before possibly triggering a DMA read */
42#ifndef __io_bw
43#ifdef wmb
44#define __io_bw() wmb()
45#else
46#define __io_bw() barrier()
47#endif
48#endif
49
50/* serialize device access against a spin_unlock, usually handled there. */
51#ifndef __io_aw
52#define __io_aw() barrier()
53#endif
54
55#ifndef __io_pbw
56#define __io_pbw() __io_bw()
57#endif
58
59#ifndef __io_paw
60#define __io_paw() __io_aw()
61#endif
62
63#ifndef __io_pbr
64#define __io_pbr() __io_br()
65#endif
66
67#ifndef __io_par
68#define __io_par() __io_ar()
69#endif
70
71
3f7e212d 72/*
9216efaf
TR
73 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
74 *
75 * On some architectures memory mapped IO needs to be accessed differently.
76 * On the simple architectures, we just read/write the memory location
77 * directly.
3f7e212d 78 */
9216efaf 79
35dbc0e0 80#ifndef __raw_readb
9216efaf 81#define __raw_readb __raw_readb
3f7e212d
AB
82static inline u8 __raw_readb(const volatile void __iomem *addr)
83{
9216efaf 84 return *(const volatile u8 __force *)addr;
3f7e212d 85}
35dbc0e0 86#endif
3f7e212d 87
35dbc0e0 88#ifndef __raw_readw
9216efaf 89#define __raw_readw __raw_readw
3f7e212d
AB
90static inline u16 __raw_readw(const volatile void __iomem *addr)
91{
9216efaf 92 return *(const volatile u16 __force *)addr;
3f7e212d 93}
35dbc0e0 94#endif
3f7e212d 95
35dbc0e0 96#ifndef __raw_readl
9216efaf 97#define __raw_readl __raw_readl
3f7e212d
AB
98static inline u32 __raw_readl(const volatile void __iomem *addr)
99{
9216efaf 100 return *(const volatile u32 __force *)addr;
3f7e212d 101}
35dbc0e0 102#endif
3f7e212d 103
9216efaf
TR
104#ifdef CONFIG_64BIT
105#ifndef __raw_readq
106#define __raw_readq __raw_readq
107static inline u64 __raw_readq(const volatile void __iomem *addr)
7292e7e0 108{
9216efaf 109 return *(const volatile u64 __force *)addr;
7292e7e0 110}
9216efaf
TR
111#endif
112#endif /* CONFIG_64BIT */
3f7e212d 113
35dbc0e0 114#ifndef __raw_writeb
9216efaf
TR
115#define __raw_writeb __raw_writeb
116static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
3f7e212d 117{
9216efaf 118 *(volatile u8 __force *)addr = value;
3f7e212d 119}
35dbc0e0 120#endif
3f7e212d 121
35dbc0e0 122#ifndef __raw_writew
9216efaf
TR
123#define __raw_writew __raw_writew
124static inline void __raw_writew(u16 value, volatile void __iomem *addr)
3f7e212d 125{
9216efaf 126 *(volatile u16 __force *)addr = value;
3f7e212d 127}
35dbc0e0 128#endif
3f7e212d 129
35dbc0e0 130#ifndef __raw_writel
9216efaf
TR
131#define __raw_writel __raw_writel
132static inline void __raw_writel(u32 value, volatile void __iomem *addr)
3f7e212d 133{
9216efaf 134 *(volatile u32 __force *)addr = value;
3f7e212d 135}
35dbc0e0 136#endif
3f7e212d 137
3f7e212d 138#ifdef CONFIG_64BIT
9216efaf
TR
139#ifndef __raw_writeq
140#define __raw_writeq __raw_writeq
141static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
3f7e212d 142{
9216efaf 143 *(volatile u64 __force *)addr = value;
3f7e212d 144}
cd248341 145#endif
9216efaf 146#endif /* CONFIG_64BIT */
cd248341 147
9216efaf
TR
148/*
149 * {read,write}{b,w,l,q}() access little endian memory and return result in
150 * native endianness.
151 */
3f7e212d 152
9216efaf
TR
153#ifndef readb
154#define readb readb
155static inline u8 readb(const volatile void __iomem *addr)
3f7e212d 156{
032d59e1
SK
157 u8 val;
158
159 __io_br();
160 val = __raw_readb(addr);
161 __io_ar();
162 return val;
3f7e212d 163}
3f7e212d
AB
164#endif
165
9216efaf
TR
166#ifndef readw
167#define readw readw
168static inline u16 readw(const volatile void __iomem *addr)
169{
032d59e1
SK
170 u16 val;
171
172 __io_br();
173 val = __le16_to_cpu(__raw_readw(addr));
174 __io_ar();
175 return val;
9216efaf 176}
7dc59bdd
G
177#endif
178
9216efaf
TR
179#ifndef readl
180#define readl readl
181static inline u32 readl(const volatile void __iomem *addr)
3f7e212d 182{
032d59e1
SK
183 u32 val;
184
185 __io_br();
186 val = __le32_to_cpu(__raw_readl(addr));
187 __io_ar();
188 return val;
3f7e212d 189}
9216efaf 190#endif
3f7e212d 191
9216efaf
TR
192#ifdef CONFIG_64BIT
193#ifndef readq
194#define readq readq
195static inline u64 readq(const volatile void __iomem *addr)
3f7e212d 196{
032d59e1
SK
197 u64 val;
198
199 __io_br();
200 val = __le64_to_cpu(__raw_readq(addr));
201 __io_ar();
202 return val;
3f7e212d 203}
9216efaf
TR
204#endif
205#endif /* CONFIG_64BIT */
3f7e212d 206
9216efaf
TR
207#ifndef writeb
208#define writeb writeb
209static inline void writeb(u8 value, volatile void __iomem *addr)
3f7e212d 210{
9216efaf 211 __raw_writeb(value, addr);
3f7e212d 212}
9216efaf 213#endif
3f7e212d 214
9216efaf
TR
215#ifndef writew
216#define writew writew
217static inline void writew(u16 value, volatile void __iomem *addr)
3f7e212d 218{
9216efaf 219 __raw_writew(cpu_to_le16(value), addr);
3f7e212d 220}
9216efaf 221#endif
3f7e212d 222
9216efaf
TR
223#ifndef writel
224#define writel writel
225static inline void writel(u32 value, volatile void __iomem *addr)
3f7e212d 226{
9216efaf 227 __raw_writel(__cpu_to_le32(value), addr);
3f7e212d 228}
9216efaf 229#endif
3f7e212d 230
9216efaf
TR
231#ifdef CONFIG_64BIT
232#ifndef writeq
233#define writeq writeq
234static inline void writeq(u64 value, volatile void __iomem *addr)
3f7e212d 235{
9216efaf 236 __raw_writeq(__cpu_to_le64(value), addr);
3f7e212d 237}
9216efaf
TR
238#endif
239#endif /* CONFIG_64BIT */
3f7e212d 240
1c8d2969
AB
241/*
242 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
243 * are not guaranteed to provide ordering against spinlocks or memory
244 * accesses.
245 */
246#ifndef readb_relaxed
247#define readb_relaxed readb
248#endif
249
250#ifndef readw_relaxed
251#define readw_relaxed readw
252#endif
253
254#ifndef readl_relaxed
255#define readl_relaxed readl
256#endif
257
e511267b 258#if defined(readq) && !defined(readq_relaxed)
1c8d2969
AB
259#define readq_relaxed readq
260#endif
261
9439eb3a
WD
262#ifndef writeb_relaxed
263#define writeb_relaxed writeb
264#endif
265
9439eb3a
WD
266#ifndef writew_relaxed
267#define writew_relaxed writew
268#endif
269
9439eb3a
WD
270#ifndef writel_relaxed
271#define writel_relaxed writel
272#endif
3f7e212d 273
e511267b 274#if defined(writeq) && !defined(writeq_relaxed)
1c8d2969
AB
275#define writeq_relaxed writeq
276#endif
277
9ab3a7a0
TR
278/*
279 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
280 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
281 */
282#ifndef readsb
283#define readsb readsb
284static inline void readsb(const volatile void __iomem *addr, void *buffer,
285 unsigned int count)
3f7e212d
AB
286{
287 if (count) {
288 u8 *buf = buffer;
9ab3a7a0 289
3f7e212d 290 do {
9ab3a7a0 291 u8 x = __raw_readb(addr);
3f7e212d
AB
292 *buf++ = x;
293 } while (--count);
294 }
295}
35dbc0e0 296#endif
3f7e212d 297
9ab3a7a0
TR
298#ifndef readsw
299#define readsw readsw
300static inline void readsw(const volatile void __iomem *addr, void *buffer,
301 unsigned int count)
3f7e212d
AB
302{
303 if (count) {
304 u16 *buf = buffer;
9ab3a7a0 305
3f7e212d 306 do {
9ab3a7a0 307 u16 x = __raw_readw(addr);
3f7e212d
AB
308 *buf++ = x;
309 } while (--count);
310 }
311}
35dbc0e0 312#endif
3f7e212d 313
9ab3a7a0
TR
314#ifndef readsl
315#define readsl readsl
316static inline void readsl(const volatile void __iomem *addr, void *buffer,
317 unsigned int count)
3f7e212d
AB
318{
319 if (count) {
320 u32 *buf = buffer;
9ab3a7a0 321
3f7e212d 322 do {
9ab3a7a0 323 u32 x = __raw_readl(addr);
3f7e212d
AB
324 *buf++ = x;
325 } while (--count);
326 }
327}
35dbc0e0 328#endif
3f7e212d 329
9ab3a7a0
TR
330#ifdef CONFIG_64BIT
331#ifndef readsq
332#define readsq readsq
333static inline void readsq(const volatile void __iomem *addr, void *buffer,
334 unsigned int count)
335{
336 if (count) {
337 u64 *buf = buffer;
338
339 do {
340 u64 x = __raw_readq(addr);
341 *buf++ = x;
342 } while (--count);
343 }
344}
345#endif
346#endif /* CONFIG_64BIT */
347
348#ifndef writesb
349#define writesb writesb
350static inline void writesb(volatile void __iomem *addr, const void *buffer,
351 unsigned int count)
3f7e212d
AB
352{
353 if (count) {
354 const u8 *buf = buffer;
9ab3a7a0 355
3f7e212d 356 do {
9ab3a7a0 357 __raw_writeb(*buf++, addr);
3f7e212d
AB
358 } while (--count);
359 }
360}
35dbc0e0 361#endif
3f7e212d 362
9ab3a7a0
TR
363#ifndef writesw
364#define writesw writesw
365static inline void writesw(volatile void __iomem *addr, const void *buffer,
366 unsigned int count)
3f7e212d
AB
367{
368 if (count) {
369 const u16 *buf = buffer;
9ab3a7a0 370
3f7e212d 371 do {
9ab3a7a0 372 __raw_writew(*buf++, addr);
3f7e212d
AB
373 } while (--count);
374 }
375}
35dbc0e0 376#endif
3f7e212d 377
9ab3a7a0
TR
378#ifndef writesl
379#define writesl writesl
380static inline void writesl(volatile void __iomem *addr, const void *buffer,
381 unsigned int count)
3f7e212d
AB
382{
383 if (count) {
384 const u32 *buf = buffer;
9ab3a7a0 385
3f7e212d 386 do {
9ab3a7a0 387 __raw_writel(*buf++, addr);
3f7e212d
AB
388 } while (--count);
389 }
390}
35dbc0e0 391#endif
3f7e212d 392
9ab3a7a0
TR
393#ifdef CONFIG_64BIT
394#ifndef writesq
395#define writesq writesq
396static inline void writesq(volatile void __iomem *addr, const void *buffer,
397 unsigned int count)
398{
399 if (count) {
400 const u64 *buf = buffer;
401
402 do {
403 __raw_writeq(*buf++, addr);
404 } while (--count);
405 }
406}
407#endif
408#endif /* CONFIG_64BIT */
3f7e212d 409
9216efaf
TR
410#ifndef PCI_IOBASE
411#define PCI_IOBASE ((void __iomem *)0)
412#endif
413
7dc59bdd
G
414#ifndef IO_SPACE_LIMIT
415#define IO_SPACE_LIMIT 0xffff
416#endif
3f7e212d 417
9216efaf
TR
418/*
419 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
420 * implemented on hardware that needs an additional delay for I/O accesses to
421 * take effect.
422 */
423
424#ifndef inb
425#define inb inb
426static inline u8 inb(unsigned long addr)
427{
428 return readb(PCI_IOBASE + addr);
429}
430#endif
431
432#ifndef inw
433#define inw inw
434static inline u16 inw(unsigned long addr)
435{
436 return readw(PCI_IOBASE + addr);
437}
438#endif
439
440#ifndef inl
441#define inl inl
442static inline u32 inl(unsigned long addr)
443{
444 return readl(PCI_IOBASE + addr);
445}
446#endif
447
448#ifndef outb
449#define outb outb
450static inline void outb(u8 value, unsigned long addr)
451{
452 writeb(value, PCI_IOBASE + addr);
453}
454#endif
455
456#ifndef outw
457#define outw outw
458static inline void outw(u16 value, unsigned long addr)
459{
460 writew(value, PCI_IOBASE + addr);
461}
462#endif
463
464#ifndef outl
465#define outl outl
466static inline void outl(u32 value, unsigned long addr)
467{
468 writel(value, PCI_IOBASE + addr);
469}
470#endif
471
472#ifndef inb_p
473#define inb_p inb_p
474static inline u8 inb_p(unsigned long addr)
475{
476 return inb(addr);
477}
478#endif
479
480#ifndef inw_p
481#define inw_p inw_p
482static inline u16 inw_p(unsigned long addr)
483{
484 return inw(addr);
485}
486#endif
487
488#ifndef inl_p
489#define inl_p inl_p
490static inline u32 inl_p(unsigned long addr)
491{
492 return inl(addr);
493}
494#endif
495
496#ifndef outb_p
497#define outb_p outb_p
498static inline void outb_p(u8 value, unsigned long addr)
499{
500 outb(value, addr);
501}
502#endif
503
504#ifndef outw_p
505#define outw_p outw_p
506static inline void outw_p(u16 value, unsigned long addr)
507{
508 outw(value, addr);
509}
510#endif
511
512#ifndef outl_p
513#define outl_p outl_p
514static inline void outl_p(u32 value, unsigned long addr)
515{
516 outl(value, addr);
517}
518#endif
519
9ab3a7a0
TR
520/*
521 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
522 * single I/O port multiple times.
523 */
524
525#ifndef insb
526#define insb insb
527static inline void insb(unsigned long addr, void *buffer, unsigned int count)
528{
529 readsb(PCI_IOBASE + addr, buffer, count);
530}
531#endif
532
533#ifndef insw
534#define insw insw
535static inline void insw(unsigned long addr, void *buffer, unsigned int count)
536{
537 readsw(PCI_IOBASE + addr, buffer, count);
538}
539#endif
540
541#ifndef insl
542#define insl insl
543static inline void insl(unsigned long addr, void *buffer, unsigned int count)
544{
545 readsl(PCI_IOBASE + addr, buffer, count);
546}
547#endif
548
549#ifndef outsb
550#define outsb outsb
551static inline void outsb(unsigned long addr, const void *buffer,
552 unsigned int count)
553{
554 writesb(PCI_IOBASE + addr, buffer, count);
555}
556#endif
557
558#ifndef outsw
559#define outsw outsw
560static inline void outsw(unsigned long addr, const void *buffer,
561 unsigned int count)
562{
563 writesw(PCI_IOBASE + addr, buffer, count);
564}
565#endif
566
567#ifndef outsl
568#define outsl outsl
569static inline void outsl(unsigned long addr, const void *buffer,
570 unsigned int count)
571{
572 writesl(PCI_IOBASE + addr, buffer, count);
573}
574#endif
575
576#ifndef insb_p
577#define insb_p insb_p
578static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
579{
580 insb(addr, buffer, count);
581}
582#endif
583
584#ifndef insw_p
585#define insw_p insw_p
586static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
587{
588 insw(addr, buffer, count);
589}
590#endif
591
592#ifndef insl_p
593#define insl_p insl_p
594static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
595{
596 insl(addr, buffer, count);
597}
598#endif
599
600#ifndef outsb_p
601#define outsb_p outsb_p
602static inline void outsb_p(unsigned long addr, const void *buffer,
603 unsigned int count)
604{
605 outsb(addr, buffer, count);
606}
607#endif
608
609#ifndef outsw_p
610#define outsw_p outsw_p
611static inline void outsw_p(unsigned long addr, const void *buffer,
612 unsigned int count)
613{
614 outsw(addr, buffer, count);
615}
616#endif
617
618#ifndef outsl_p
619#define outsl_p outsl_p
620static inline void outsl_p(unsigned long addr, const void *buffer,
621 unsigned int count)
622{
623 outsl(addr, buffer, count);
624}
625#endif
626
9216efaf
TR
627#ifndef CONFIG_GENERIC_IOMAP
628#ifndef ioread8
629#define ioread8 ioread8
630static inline u8 ioread8(const volatile void __iomem *addr)
631{
632 return readb(addr);
633}
634#endif
635
636#ifndef ioread16
637#define ioread16 ioread16
638static inline u16 ioread16(const volatile void __iomem *addr)
639{
640 return readw(addr);
641}
642#endif
643
644#ifndef ioread32
645#define ioread32 ioread32
646static inline u32 ioread32(const volatile void __iomem *addr)
647{
648 return readl(addr);
649}
650#endif
651
9e44fb18
HG
652#ifdef CONFIG_64BIT
653#ifndef ioread64
654#define ioread64 ioread64
655static inline u64 ioread64(const volatile void __iomem *addr)
656{
657 return readq(addr);
658}
659#endif
660#endif /* CONFIG_64BIT */
661
9216efaf
TR
662#ifndef iowrite8
663#define iowrite8 iowrite8
664static inline void iowrite8(u8 value, volatile void __iomem *addr)
665{
666 writeb(value, addr);
667}
668#endif
669
670#ifndef iowrite16
671#define iowrite16 iowrite16
672static inline void iowrite16(u16 value, volatile void __iomem *addr)
673{
674 writew(value, addr);
675}
676#endif
677
678#ifndef iowrite32
679#define iowrite32 iowrite32
680static inline void iowrite32(u32 value, volatile void __iomem *addr)
681{
682 writel(value, addr);
683}
684#endif
685
9e44fb18
HG
686#ifdef CONFIG_64BIT
687#ifndef iowrite64
688#define iowrite64 iowrite64
689static inline void iowrite64(u64 value, volatile void __iomem *addr)
690{
691 writeq(value, addr);
692}
693#endif
694#endif /* CONFIG_64BIT */
695
9216efaf
TR
696#ifndef ioread16be
697#define ioread16be ioread16be
698static inline u16 ioread16be(const volatile void __iomem *addr)
699{
7a1aedba 700 return swab16(readw(addr));
9216efaf
TR
701}
702#endif
703
704#ifndef ioread32be
705#define ioread32be ioread32be
706static inline u32 ioread32be(const volatile void __iomem *addr)
707{
7a1aedba 708 return swab32(readl(addr));
9216efaf
TR
709}
710#endif
711
9e44fb18
HG
712#ifdef CONFIG_64BIT
713#ifndef ioread64be
714#define ioread64be ioread64be
715static inline u64 ioread64be(const volatile void __iomem *addr)
716{
717 return swab64(readq(addr));
718}
719#endif
720#endif /* CONFIG_64BIT */
721
9216efaf
TR
722#ifndef iowrite16be
723#define iowrite16be iowrite16be
724static inline void iowrite16be(u16 value, void volatile __iomem *addr)
725{
7a1aedba 726 writew(swab16(value), addr);
9216efaf
TR
727}
728#endif
729
730#ifndef iowrite32be
731#define iowrite32be iowrite32be
732static inline void iowrite32be(u32 value, volatile void __iomem *addr)
733{
7a1aedba 734 writel(swab32(value), addr);
9216efaf
TR
735}
736#endif
9ab3a7a0 737
9e44fb18
HG
738#ifdef CONFIG_64BIT
739#ifndef iowrite64be
740#define iowrite64be iowrite64be
741static inline void iowrite64be(u64 value, volatile void __iomem *addr)
742{
743 writeq(swab64(value), addr);
744}
745#endif
746#endif /* CONFIG_64BIT */
747
9ab3a7a0
TR
748#ifndef ioread8_rep
749#define ioread8_rep ioread8_rep
750static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
751 unsigned int count)
752{
753 readsb(addr, buffer, count);
754}
755#endif
756
757#ifndef ioread16_rep
758#define ioread16_rep ioread16_rep
759static inline void ioread16_rep(const volatile void __iomem *addr,
760 void *buffer, unsigned int count)
761{
762 readsw(addr, buffer, count);
763}
764#endif
765
766#ifndef ioread32_rep
767#define ioread32_rep ioread32_rep
768static inline void ioread32_rep(const volatile void __iomem *addr,
769 void *buffer, unsigned int count)
770{
771 readsl(addr, buffer, count);
772}
773#endif
774
9e44fb18
HG
775#ifdef CONFIG_64BIT
776#ifndef ioread64_rep
777#define ioread64_rep ioread64_rep
778static inline void ioread64_rep(const volatile void __iomem *addr,
779 void *buffer, unsigned int count)
780{
781 readsq(addr, buffer, count);
782}
783#endif
784#endif /* CONFIG_64BIT */
785
9ab3a7a0
TR
786#ifndef iowrite8_rep
787#define iowrite8_rep iowrite8_rep
788static inline void iowrite8_rep(volatile void __iomem *addr,
789 const void *buffer,
790 unsigned int count)
791{
792 writesb(addr, buffer, count);
793}
794#endif
795
796#ifndef iowrite16_rep
797#define iowrite16_rep iowrite16_rep
798static inline void iowrite16_rep(volatile void __iomem *addr,
799 const void *buffer,
800 unsigned int count)
801{
802 writesw(addr, buffer, count);
803}
804#endif
805
806#ifndef iowrite32_rep
807#define iowrite32_rep iowrite32_rep
808static inline void iowrite32_rep(volatile void __iomem *addr,
809 const void *buffer,
810 unsigned int count)
811{
812 writesl(addr, buffer, count);
813}
814#endif
9e44fb18
HG
815
816#ifdef CONFIG_64BIT
817#ifndef iowrite64_rep
818#define iowrite64_rep iowrite64_rep
819static inline void iowrite64_rep(volatile void __iomem *addr,
820 const void *buffer,
821 unsigned int count)
822{
823 writesq(addr, buffer, count);
824}
825#endif
826#endif /* CONFIG_64BIT */
9216efaf
TR
827#endif /* CONFIG_GENERIC_IOMAP */
828
3f7e212d
AB
829#ifdef __KERNEL__
830
831#include <linux/vmalloc.h>
9216efaf 832#define __io_virt(x) ((void __force *)(x))
3f7e212d
AB
833
834#ifndef CONFIG_GENERIC_IOMAP
3f7e212d 835struct pci_dev;
cd248341
JG
836extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
837
838#ifndef pci_iounmap
9216efaf 839#define pci_iounmap pci_iounmap
3f7e212d
AB
840static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
841{
842}
cd248341 843#endif
3f7e212d
AB
844#endif /* CONFIG_GENERIC_IOMAP */
845
846/*
847 * Change virtual addresses to physical addresses and vv.
848 * These are pretty trivial
849 */
cd248341 850#ifndef virt_to_phys
9216efaf 851#define virt_to_phys virt_to_phys
3f7e212d
AB
852static inline unsigned long virt_to_phys(volatile void *address)
853{
854 return __pa((unsigned long)address);
855}
9216efaf 856#endif
3f7e212d 857
9216efaf
TR
858#ifndef phys_to_virt
859#define phys_to_virt phys_to_virt
3f7e212d
AB
860static inline void *phys_to_virt(unsigned long address)
861{
862 return __va(address);
863}
cd248341 864#endif
3f7e212d 865
8c7ea50c
LR
866/**
867 * DOC: ioremap() and ioremap_*() variants
868 *
869 * If you have an IOMMU your architecture is expected to have both ioremap()
870 * and iounmap() implemented otherwise the asm-generic helpers will provide a
871 * direct mapping.
872 *
873 * There are ioremap_*() call variants, if you have no IOMMU we naturally will
874 * default to direct mapping for all of them, you can override these defaults.
875 * If you have an IOMMU you are highly encouraged to provide your own
876 * ioremap variant implementation as there currently is no safe architecture
877 * agnostic default. To avoid possible improper behaviour default asm-generic
878 * ioremap_*() variants all return NULL when an IOMMU is available. If you've
879 * defined your own ioremap_*() variant you must then declare your own
880 * ioremap_*() variant as defined to itself to avoid the default NULL return.
881 */
882
883#ifdef CONFIG_MMU
884
885#ifndef ioremap_uc
886#define ioremap_uc ioremap_uc
887static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
888{
889 return NULL;
890}
891#endif
892
893#else /* !CONFIG_MMU */
894
3f7e212d
AB
895/*
896 * Change "struct page" to physical address.
f1ecc698
JB
897 *
898 * This implementation is for the no-MMU case only... if you have an MMU
899 * you'll need to provide your own definitions.
3f7e212d 900 */
9216efaf 901
9216efaf
TR
902#ifndef ioremap
903#define ioremap ioremap
904static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
3f7e212d 905{
9216efaf 906 return (void __iomem *)(unsigned long)offset;
3f7e212d 907}
9216efaf 908#endif
3f7e212d 909
9216efaf
TR
910#ifndef __ioremap
911#define __ioremap __ioremap
912static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
913 unsigned long flags)
914{
915 return ioremap(offset, size);
916}
917#endif
3f7e212d
AB
918
919#ifndef ioremap_nocache
9216efaf
TR
920#define ioremap_nocache ioremap_nocache
921static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
922{
923 return ioremap(offset, size);
924}
3f7e212d
AB
925#endif
926
e4b6be33
LR
927#ifndef ioremap_uc
928#define ioremap_uc ioremap_uc
929static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
930{
931 return ioremap_nocache(offset, size);
932}
933#endif
934
3f7e212d 935#ifndef ioremap_wc
9216efaf
TR
936#define ioremap_wc ioremap_wc
937static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
938{
939 return ioremap_nocache(offset, size);
940}
3f7e212d
AB
941#endif
942
d838270e
TK
943#ifndef ioremap_wt
944#define ioremap_wt ioremap_wt
945static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
946{
947 return ioremap_nocache(offset, size);
948}
949#endif
950
9216efaf
TR
951#ifndef iounmap
952#define iounmap iounmap
d838270e 953
e66d3c49 954static inline void iounmap(void __iomem *addr)
3f7e212d
AB
955{
956}
9216efaf 957#endif
f1ecc698 958#endif /* CONFIG_MMU */
3f7e212d 959
ce816fa8 960#ifdef CONFIG_HAS_IOPORT_MAP
3f7e212d 961#ifndef CONFIG_GENERIC_IOMAP
9216efaf
TR
962#ifndef ioport_map
963#define ioport_map ioport_map
3f7e212d
AB
964static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
965{
112eeaa7 966 return PCI_IOBASE + (port & IO_SPACE_LIMIT);
3f7e212d 967}
9216efaf 968#endif
3f7e212d 969
9216efaf
TR
970#ifndef ioport_unmap
971#define ioport_unmap ioport_unmap
3f7e212d
AB
972static inline void ioport_unmap(void __iomem *p)
973{
974}
9216efaf 975#endif
3f7e212d
AB
976#else /* CONFIG_GENERIC_IOMAP */
977extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
978extern void ioport_unmap(void __iomem *p);
979#endif /* CONFIG_GENERIC_IOMAP */
ce816fa8 980#endif /* CONFIG_HAS_IOPORT_MAP */
3f7e212d 981
eabc2a7c
AS
982/*
983 * Convert a virtual cached pointer to an uncached pointer
984 */
576ebd74 985#ifndef xlate_dev_kmem_ptr
9216efaf
TR
986#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
987static inline void *xlate_dev_kmem_ptr(void *addr)
988{
989 return addr;
990}
576ebd74 991#endif
9216efaf 992
576ebd74 993#ifndef xlate_dev_mem_ptr
9216efaf
TR
994#define xlate_dev_mem_ptr xlate_dev_mem_ptr
995static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
996{
997 return __va(addr);
998}
999#endif
1000
1001#ifndef unxlate_dev_mem_ptr
1002#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
1003static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1004{
1005}
576ebd74 1006#endif
3f7e212d 1007
c93d0312 1008#ifdef CONFIG_VIRT_TO_BUS
3f7e212d 1009#ifndef virt_to_bus
9216efaf 1010static inline unsigned long virt_to_bus(void *address)
3f7e212d 1011{
9216efaf 1012 return (unsigned long)address;
3f7e212d
AB
1013}
1014
1015static inline void *bus_to_virt(unsigned long address)
1016{
9216efaf 1017 return (void *)address;
3f7e212d
AB
1018}
1019#endif
c93d0312 1020#endif
3f7e212d 1021
cd248341 1022#ifndef memset_io
9216efaf 1023#define memset_io memset_io
c2327da0
AS
1024/**
1025 * memset_io Set a range of I/O memory to a constant value
1026 * @addr: The beginning of the I/O-memory range to set
1027 * @val: The value to set the memory to
1028 * @count: The number of bytes to set
1029 *
1030 * Set a range of I/O memory to a given value.
1031 */
9216efaf
TR
1032static inline void memset_io(volatile void __iomem *addr, int value,
1033 size_t size)
1034{
1035 memset(__io_virt(addr), value, size);
1036}
cd248341
JG
1037#endif
1038
1039#ifndef memcpy_fromio
9216efaf 1040#define memcpy_fromio memcpy_fromio
c2327da0
AS
1041/**
1042 * memcpy_fromio Copy a block of data from I/O memory
1043 * @dst: The (RAM) destination for the copy
1044 * @src: The (I/O memory) source for the data
1045 * @count: The number of bytes to copy
1046 *
1047 * Copy a block of data from I/O memory.
1048 */
9216efaf
TR
1049static inline void memcpy_fromio(void *buffer,
1050 const volatile void __iomem *addr,
1051 size_t size)
1052{
1053 memcpy(buffer, __io_virt(addr), size);
1054}
cd248341 1055#endif
9216efaf 1056
cd248341 1057#ifndef memcpy_toio
9216efaf 1058#define memcpy_toio memcpy_toio
c2327da0
AS
1059/**
1060 * memcpy_toio Copy a block of data into I/O memory
1061 * @dst: The (I/O memory) destination for the copy
1062 * @src: The (RAM) source for the data
1063 * @count: The number of bytes to copy
1064 *
1065 * Copy a block of data to I/O memory.
1066 */
9216efaf
TR
1067static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1068 size_t size)
1069{
1070 memcpy(__io_virt(addr), buffer, size);
1071}
cd248341 1072#endif
3f7e212d
AB
1073
1074#endif /* __KERNEL__ */
1075
1076#endif /* __ASM_GENERIC_IO_H */