]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - include/asm-generic/io.h
io: define several IO & PIO barrier types for the asm-generic version
[mirror_ubuntu-focal-kernel.git] / include / asm-generic / io.h
CommitLineData
3f7e212d
AB
1/* Generic I/O port emulation, based on MN10300 code
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef __ASM_GENERIC_IO_H
12#define __ASM_GENERIC_IO_H
13
14#include <asm/page.h> /* I/O is all done through memory accesses */
9216efaf 15#include <linux/string.h> /* for memset() and memcpy() */
3f7e212d
AB
16#include <linux/types.h>
17
18#ifdef CONFIG_GENERIC_IOMAP
19#include <asm-generic/iomap.h>
20#endif
21
66eab4df
MT
22#include <asm-generic/pci_iomap.h>
23
35dbc0e0 24#ifndef mmiowb
3f7e212d 25#define mmiowb() do {} while (0)
35dbc0e0 26#endif
3f7e212d 27
64e2c673
SK
28#ifndef __io_br
29#define __io_br() barrier()
30#endif
31
32/* prevent prefetching of coherent DMA data ahead of a dma-complete */
33#ifndef __io_ar
34#ifdef rmb
35#define __io_ar() rmb()
36#else
37#define __io_ar() barrier()
38#endif
39#endif
40
41/* flush writes to coherent DMA data before possibly triggering a DMA read */
42#ifndef __io_bw
43#ifdef wmb
44#define __io_bw() wmb()
45#else
46#define __io_bw() barrier()
47#endif
48#endif
49
50/* serialize device access against a spin_unlock, usually handled there. */
51#ifndef __io_aw
52#define __io_aw() barrier()
53#endif
54
55#ifndef __io_pbw
56#define __io_pbw() __io_bw()
57#endif
58
59#ifndef __io_paw
60#define __io_paw() __io_aw()
61#endif
62
63#ifndef __io_pbr
64#define __io_pbr() __io_br()
65#endif
66
67#ifndef __io_par
68#define __io_par() __io_ar()
69#endif
70
71
3f7e212d 72/*
9216efaf
TR
73 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
74 *
75 * On some architectures memory mapped IO needs to be accessed differently.
76 * On the simple architectures, we just read/write the memory location
77 * directly.
3f7e212d 78 */
9216efaf 79
35dbc0e0 80#ifndef __raw_readb
9216efaf 81#define __raw_readb __raw_readb
3f7e212d
AB
82static inline u8 __raw_readb(const volatile void __iomem *addr)
83{
9216efaf 84 return *(const volatile u8 __force *)addr;
3f7e212d 85}
35dbc0e0 86#endif
3f7e212d 87
35dbc0e0 88#ifndef __raw_readw
9216efaf 89#define __raw_readw __raw_readw
3f7e212d
AB
90static inline u16 __raw_readw(const volatile void __iomem *addr)
91{
9216efaf 92 return *(const volatile u16 __force *)addr;
3f7e212d 93}
35dbc0e0 94#endif
3f7e212d 95
35dbc0e0 96#ifndef __raw_readl
9216efaf 97#define __raw_readl __raw_readl
3f7e212d
AB
98static inline u32 __raw_readl(const volatile void __iomem *addr)
99{
9216efaf 100 return *(const volatile u32 __force *)addr;
3f7e212d 101}
35dbc0e0 102#endif
3f7e212d 103
9216efaf
TR
104#ifdef CONFIG_64BIT
105#ifndef __raw_readq
106#define __raw_readq __raw_readq
107static inline u64 __raw_readq(const volatile void __iomem *addr)
7292e7e0 108{
9216efaf 109 return *(const volatile u64 __force *)addr;
7292e7e0 110}
9216efaf
TR
111#endif
112#endif /* CONFIG_64BIT */
3f7e212d 113
35dbc0e0 114#ifndef __raw_writeb
9216efaf
TR
115#define __raw_writeb __raw_writeb
116static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
3f7e212d 117{
9216efaf 118 *(volatile u8 __force *)addr = value;
3f7e212d 119}
35dbc0e0 120#endif
3f7e212d 121
35dbc0e0 122#ifndef __raw_writew
9216efaf
TR
123#define __raw_writew __raw_writew
124static inline void __raw_writew(u16 value, volatile void __iomem *addr)
3f7e212d 125{
9216efaf 126 *(volatile u16 __force *)addr = value;
3f7e212d 127}
35dbc0e0 128#endif
3f7e212d 129
35dbc0e0 130#ifndef __raw_writel
9216efaf
TR
131#define __raw_writel __raw_writel
132static inline void __raw_writel(u32 value, volatile void __iomem *addr)
3f7e212d 133{
9216efaf 134 *(volatile u32 __force *)addr = value;
3f7e212d 135}
35dbc0e0 136#endif
3f7e212d 137
3f7e212d 138#ifdef CONFIG_64BIT
9216efaf
TR
139#ifndef __raw_writeq
140#define __raw_writeq __raw_writeq
141static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
3f7e212d 142{
9216efaf 143 *(volatile u64 __force *)addr = value;
3f7e212d 144}
cd248341 145#endif
9216efaf 146#endif /* CONFIG_64BIT */
cd248341 147
9216efaf
TR
148/*
149 * {read,write}{b,w,l,q}() access little endian memory and return result in
150 * native endianness.
151 */
3f7e212d 152
9216efaf
TR
153#ifndef readb
154#define readb readb
155static inline u8 readb(const volatile void __iomem *addr)
3f7e212d 156{
9216efaf 157 return __raw_readb(addr);
3f7e212d 158}
3f7e212d
AB
159#endif
160
9216efaf
TR
161#ifndef readw
162#define readw readw
163static inline u16 readw(const volatile void __iomem *addr)
164{
165 return __le16_to_cpu(__raw_readw(addr));
166}
7dc59bdd
G
167#endif
168
9216efaf
TR
169#ifndef readl
170#define readl readl
171static inline u32 readl(const volatile void __iomem *addr)
3f7e212d 172{
9216efaf 173 return __le32_to_cpu(__raw_readl(addr));
3f7e212d 174}
9216efaf 175#endif
3f7e212d 176
9216efaf
TR
177#ifdef CONFIG_64BIT
178#ifndef readq
179#define readq readq
180static inline u64 readq(const volatile void __iomem *addr)
3f7e212d 181{
9216efaf 182 return __le64_to_cpu(__raw_readq(addr));
3f7e212d 183}
9216efaf
TR
184#endif
185#endif /* CONFIG_64BIT */
3f7e212d 186
9216efaf
TR
187#ifndef writeb
188#define writeb writeb
189static inline void writeb(u8 value, volatile void __iomem *addr)
3f7e212d 190{
9216efaf 191 __raw_writeb(value, addr);
3f7e212d 192}
9216efaf 193#endif
3f7e212d 194
9216efaf
TR
195#ifndef writew
196#define writew writew
197static inline void writew(u16 value, volatile void __iomem *addr)
3f7e212d 198{
9216efaf 199 __raw_writew(cpu_to_le16(value), addr);
3f7e212d 200}
9216efaf 201#endif
3f7e212d 202
9216efaf
TR
203#ifndef writel
204#define writel writel
205static inline void writel(u32 value, volatile void __iomem *addr)
3f7e212d 206{
9216efaf 207 __raw_writel(__cpu_to_le32(value), addr);
3f7e212d 208}
9216efaf 209#endif
3f7e212d 210
9216efaf
TR
211#ifdef CONFIG_64BIT
212#ifndef writeq
213#define writeq writeq
214static inline void writeq(u64 value, volatile void __iomem *addr)
3f7e212d 215{
9216efaf 216 __raw_writeq(__cpu_to_le64(value), addr);
3f7e212d 217}
9216efaf
TR
218#endif
219#endif /* CONFIG_64BIT */
3f7e212d 220
1c8d2969
AB
221/*
222 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
223 * are not guaranteed to provide ordering against spinlocks or memory
224 * accesses.
225 */
226#ifndef readb_relaxed
227#define readb_relaxed readb
228#endif
229
230#ifndef readw_relaxed
231#define readw_relaxed readw
232#endif
233
234#ifndef readl_relaxed
235#define readl_relaxed readl
236#endif
237
e511267b 238#if defined(readq) && !defined(readq_relaxed)
1c8d2969
AB
239#define readq_relaxed readq
240#endif
241
9439eb3a
WD
242#ifndef writeb_relaxed
243#define writeb_relaxed writeb
244#endif
245
9439eb3a
WD
246#ifndef writew_relaxed
247#define writew_relaxed writew
248#endif
249
9439eb3a
WD
250#ifndef writel_relaxed
251#define writel_relaxed writel
252#endif
3f7e212d 253
e511267b 254#if defined(writeq) && !defined(writeq_relaxed)
1c8d2969
AB
255#define writeq_relaxed writeq
256#endif
257
9ab3a7a0
TR
258/*
259 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
260 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
261 */
262#ifndef readsb
263#define readsb readsb
264static inline void readsb(const volatile void __iomem *addr, void *buffer,
265 unsigned int count)
3f7e212d
AB
266{
267 if (count) {
268 u8 *buf = buffer;
9ab3a7a0 269
3f7e212d 270 do {
9ab3a7a0 271 u8 x = __raw_readb(addr);
3f7e212d
AB
272 *buf++ = x;
273 } while (--count);
274 }
275}
35dbc0e0 276#endif
3f7e212d 277
9ab3a7a0
TR
278#ifndef readsw
279#define readsw readsw
280static inline void readsw(const volatile void __iomem *addr, void *buffer,
281 unsigned int count)
3f7e212d
AB
282{
283 if (count) {
284 u16 *buf = buffer;
9ab3a7a0 285
3f7e212d 286 do {
9ab3a7a0 287 u16 x = __raw_readw(addr);
3f7e212d
AB
288 *buf++ = x;
289 } while (--count);
290 }
291}
35dbc0e0 292#endif
3f7e212d 293
9ab3a7a0
TR
294#ifndef readsl
295#define readsl readsl
296static inline void readsl(const volatile void __iomem *addr, void *buffer,
297 unsigned int count)
3f7e212d
AB
298{
299 if (count) {
300 u32 *buf = buffer;
9ab3a7a0 301
3f7e212d 302 do {
9ab3a7a0 303 u32 x = __raw_readl(addr);
3f7e212d
AB
304 *buf++ = x;
305 } while (--count);
306 }
307}
35dbc0e0 308#endif
3f7e212d 309
9ab3a7a0
TR
310#ifdef CONFIG_64BIT
311#ifndef readsq
312#define readsq readsq
313static inline void readsq(const volatile void __iomem *addr, void *buffer,
314 unsigned int count)
315{
316 if (count) {
317 u64 *buf = buffer;
318
319 do {
320 u64 x = __raw_readq(addr);
321 *buf++ = x;
322 } while (--count);
323 }
324}
325#endif
326#endif /* CONFIG_64BIT */
327
328#ifndef writesb
329#define writesb writesb
330static inline void writesb(volatile void __iomem *addr, const void *buffer,
331 unsigned int count)
3f7e212d
AB
332{
333 if (count) {
334 const u8 *buf = buffer;
9ab3a7a0 335
3f7e212d 336 do {
9ab3a7a0 337 __raw_writeb(*buf++, addr);
3f7e212d
AB
338 } while (--count);
339 }
340}
35dbc0e0 341#endif
3f7e212d 342
9ab3a7a0
TR
343#ifndef writesw
344#define writesw writesw
345static inline void writesw(volatile void __iomem *addr, const void *buffer,
346 unsigned int count)
3f7e212d
AB
347{
348 if (count) {
349 const u16 *buf = buffer;
9ab3a7a0 350
3f7e212d 351 do {
9ab3a7a0 352 __raw_writew(*buf++, addr);
3f7e212d
AB
353 } while (--count);
354 }
355}
35dbc0e0 356#endif
3f7e212d 357
9ab3a7a0
TR
358#ifndef writesl
359#define writesl writesl
360static inline void writesl(volatile void __iomem *addr, const void *buffer,
361 unsigned int count)
3f7e212d
AB
362{
363 if (count) {
364 const u32 *buf = buffer;
9ab3a7a0 365
3f7e212d 366 do {
9ab3a7a0 367 __raw_writel(*buf++, addr);
3f7e212d
AB
368 } while (--count);
369 }
370}
35dbc0e0 371#endif
3f7e212d 372
9ab3a7a0
TR
373#ifdef CONFIG_64BIT
374#ifndef writesq
375#define writesq writesq
376static inline void writesq(volatile void __iomem *addr, const void *buffer,
377 unsigned int count)
378{
379 if (count) {
380 const u64 *buf = buffer;
381
382 do {
383 __raw_writeq(*buf++, addr);
384 } while (--count);
385 }
386}
387#endif
388#endif /* CONFIG_64BIT */
3f7e212d 389
9216efaf
TR
390#ifndef PCI_IOBASE
391#define PCI_IOBASE ((void __iomem *)0)
392#endif
393
7dc59bdd
G
394#ifndef IO_SPACE_LIMIT
395#define IO_SPACE_LIMIT 0xffff
396#endif
3f7e212d 397
9216efaf
TR
398/*
399 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
400 * implemented on hardware that needs an additional delay for I/O accesses to
401 * take effect.
402 */
403
404#ifndef inb
405#define inb inb
406static inline u8 inb(unsigned long addr)
407{
408 return readb(PCI_IOBASE + addr);
409}
410#endif
411
412#ifndef inw
413#define inw inw
414static inline u16 inw(unsigned long addr)
415{
416 return readw(PCI_IOBASE + addr);
417}
418#endif
419
420#ifndef inl
421#define inl inl
422static inline u32 inl(unsigned long addr)
423{
424 return readl(PCI_IOBASE + addr);
425}
426#endif
427
428#ifndef outb
429#define outb outb
430static inline void outb(u8 value, unsigned long addr)
431{
432 writeb(value, PCI_IOBASE + addr);
433}
434#endif
435
436#ifndef outw
437#define outw outw
438static inline void outw(u16 value, unsigned long addr)
439{
440 writew(value, PCI_IOBASE + addr);
441}
442#endif
443
444#ifndef outl
445#define outl outl
446static inline void outl(u32 value, unsigned long addr)
447{
448 writel(value, PCI_IOBASE + addr);
449}
450#endif
451
452#ifndef inb_p
453#define inb_p inb_p
454static inline u8 inb_p(unsigned long addr)
455{
456 return inb(addr);
457}
458#endif
459
460#ifndef inw_p
461#define inw_p inw_p
462static inline u16 inw_p(unsigned long addr)
463{
464 return inw(addr);
465}
466#endif
467
468#ifndef inl_p
469#define inl_p inl_p
470static inline u32 inl_p(unsigned long addr)
471{
472 return inl(addr);
473}
474#endif
475
476#ifndef outb_p
477#define outb_p outb_p
478static inline void outb_p(u8 value, unsigned long addr)
479{
480 outb(value, addr);
481}
482#endif
483
484#ifndef outw_p
485#define outw_p outw_p
486static inline void outw_p(u16 value, unsigned long addr)
487{
488 outw(value, addr);
489}
490#endif
491
492#ifndef outl_p
493#define outl_p outl_p
494static inline void outl_p(u32 value, unsigned long addr)
495{
496 outl(value, addr);
497}
498#endif
499
9ab3a7a0
TR
500/*
501 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
502 * single I/O port multiple times.
503 */
504
505#ifndef insb
506#define insb insb
507static inline void insb(unsigned long addr, void *buffer, unsigned int count)
508{
509 readsb(PCI_IOBASE + addr, buffer, count);
510}
511#endif
512
513#ifndef insw
514#define insw insw
515static inline void insw(unsigned long addr, void *buffer, unsigned int count)
516{
517 readsw(PCI_IOBASE + addr, buffer, count);
518}
519#endif
520
521#ifndef insl
522#define insl insl
523static inline void insl(unsigned long addr, void *buffer, unsigned int count)
524{
525 readsl(PCI_IOBASE + addr, buffer, count);
526}
527#endif
528
529#ifndef outsb
530#define outsb outsb
531static inline void outsb(unsigned long addr, const void *buffer,
532 unsigned int count)
533{
534 writesb(PCI_IOBASE + addr, buffer, count);
535}
536#endif
537
538#ifndef outsw
539#define outsw outsw
540static inline void outsw(unsigned long addr, const void *buffer,
541 unsigned int count)
542{
543 writesw(PCI_IOBASE + addr, buffer, count);
544}
545#endif
546
547#ifndef outsl
548#define outsl outsl
549static inline void outsl(unsigned long addr, const void *buffer,
550 unsigned int count)
551{
552 writesl(PCI_IOBASE + addr, buffer, count);
553}
554#endif
555
556#ifndef insb_p
557#define insb_p insb_p
558static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
559{
560 insb(addr, buffer, count);
561}
562#endif
563
564#ifndef insw_p
565#define insw_p insw_p
566static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
567{
568 insw(addr, buffer, count);
569}
570#endif
571
572#ifndef insl_p
573#define insl_p insl_p
574static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
575{
576 insl(addr, buffer, count);
577}
578#endif
579
580#ifndef outsb_p
581#define outsb_p outsb_p
582static inline void outsb_p(unsigned long addr, const void *buffer,
583 unsigned int count)
584{
585 outsb(addr, buffer, count);
586}
587#endif
588
589#ifndef outsw_p
590#define outsw_p outsw_p
591static inline void outsw_p(unsigned long addr, const void *buffer,
592 unsigned int count)
593{
594 outsw(addr, buffer, count);
595}
596#endif
597
598#ifndef outsl_p
599#define outsl_p outsl_p
600static inline void outsl_p(unsigned long addr, const void *buffer,
601 unsigned int count)
602{
603 outsl(addr, buffer, count);
604}
605#endif
606
9216efaf
TR
607#ifndef CONFIG_GENERIC_IOMAP
608#ifndef ioread8
609#define ioread8 ioread8
610static inline u8 ioread8(const volatile void __iomem *addr)
611{
612 return readb(addr);
613}
614#endif
615
616#ifndef ioread16
617#define ioread16 ioread16
618static inline u16 ioread16(const volatile void __iomem *addr)
619{
620 return readw(addr);
621}
622#endif
623
624#ifndef ioread32
625#define ioread32 ioread32
626static inline u32 ioread32(const volatile void __iomem *addr)
627{
628 return readl(addr);
629}
630#endif
631
9e44fb18
HG
632#ifdef CONFIG_64BIT
633#ifndef ioread64
634#define ioread64 ioread64
635static inline u64 ioread64(const volatile void __iomem *addr)
636{
637 return readq(addr);
638}
639#endif
640#endif /* CONFIG_64BIT */
641
9216efaf
TR
642#ifndef iowrite8
643#define iowrite8 iowrite8
644static inline void iowrite8(u8 value, volatile void __iomem *addr)
645{
646 writeb(value, addr);
647}
648#endif
649
650#ifndef iowrite16
651#define iowrite16 iowrite16
652static inline void iowrite16(u16 value, volatile void __iomem *addr)
653{
654 writew(value, addr);
655}
656#endif
657
658#ifndef iowrite32
659#define iowrite32 iowrite32
660static inline void iowrite32(u32 value, volatile void __iomem *addr)
661{
662 writel(value, addr);
663}
664#endif
665
9e44fb18
HG
666#ifdef CONFIG_64BIT
667#ifndef iowrite64
668#define iowrite64 iowrite64
669static inline void iowrite64(u64 value, volatile void __iomem *addr)
670{
671 writeq(value, addr);
672}
673#endif
674#endif /* CONFIG_64BIT */
675
9216efaf
TR
676#ifndef ioread16be
677#define ioread16be ioread16be
678static inline u16 ioread16be(const volatile void __iomem *addr)
679{
7a1aedba 680 return swab16(readw(addr));
9216efaf
TR
681}
682#endif
683
684#ifndef ioread32be
685#define ioread32be ioread32be
686static inline u32 ioread32be(const volatile void __iomem *addr)
687{
7a1aedba 688 return swab32(readl(addr));
9216efaf
TR
689}
690#endif
691
9e44fb18
HG
692#ifdef CONFIG_64BIT
693#ifndef ioread64be
694#define ioread64be ioread64be
695static inline u64 ioread64be(const volatile void __iomem *addr)
696{
697 return swab64(readq(addr));
698}
699#endif
700#endif /* CONFIG_64BIT */
701
9216efaf
TR
702#ifndef iowrite16be
703#define iowrite16be iowrite16be
704static inline void iowrite16be(u16 value, void volatile __iomem *addr)
705{
7a1aedba 706 writew(swab16(value), addr);
9216efaf
TR
707}
708#endif
709
710#ifndef iowrite32be
711#define iowrite32be iowrite32be
712static inline void iowrite32be(u32 value, volatile void __iomem *addr)
713{
7a1aedba 714 writel(swab32(value), addr);
9216efaf
TR
715}
716#endif
9ab3a7a0 717
9e44fb18
HG
718#ifdef CONFIG_64BIT
719#ifndef iowrite64be
720#define iowrite64be iowrite64be
721static inline void iowrite64be(u64 value, volatile void __iomem *addr)
722{
723 writeq(swab64(value), addr);
724}
725#endif
726#endif /* CONFIG_64BIT */
727
9ab3a7a0
TR
728#ifndef ioread8_rep
729#define ioread8_rep ioread8_rep
730static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
731 unsigned int count)
732{
733 readsb(addr, buffer, count);
734}
735#endif
736
737#ifndef ioread16_rep
738#define ioread16_rep ioread16_rep
739static inline void ioread16_rep(const volatile void __iomem *addr,
740 void *buffer, unsigned int count)
741{
742 readsw(addr, buffer, count);
743}
744#endif
745
746#ifndef ioread32_rep
747#define ioread32_rep ioread32_rep
748static inline void ioread32_rep(const volatile void __iomem *addr,
749 void *buffer, unsigned int count)
750{
751 readsl(addr, buffer, count);
752}
753#endif
754
9e44fb18
HG
755#ifdef CONFIG_64BIT
756#ifndef ioread64_rep
757#define ioread64_rep ioread64_rep
758static inline void ioread64_rep(const volatile void __iomem *addr,
759 void *buffer, unsigned int count)
760{
761 readsq(addr, buffer, count);
762}
763#endif
764#endif /* CONFIG_64BIT */
765
9ab3a7a0
TR
766#ifndef iowrite8_rep
767#define iowrite8_rep iowrite8_rep
768static inline void iowrite8_rep(volatile void __iomem *addr,
769 const void *buffer,
770 unsigned int count)
771{
772 writesb(addr, buffer, count);
773}
774#endif
775
776#ifndef iowrite16_rep
777#define iowrite16_rep iowrite16_rep
778static inline void iowrite16_rep(volatile void __iomem *addr,
779 const void *buffer,
780 unsigned int count)
781{
782 writesw(addr, buffer, count);
783}
784#endif
785
786#ifndef iowrite32_rep
787#define iowrite32_rep iowrite32_rep
788static inline void iowrite32_rep(volatile void __iomem *addr,
789 const void *buffer,
790 unsigned int count)
791{
792 writesl(addr, buffer, count);
793}
794#endif
9e44fb18
HG
795
796#ifdef CONFIG_64BIT
797#ifndef iowrite64_rep
798#define iowrite64_rep iowrite64_rep
799static inline void iowrite64_rep(volatile void __iomem *addr,
800 const void *buffer,
801 unsigned int count)
802{
803 writesq(addr, buffer, count);
804}
805#endif
806#endif /* CONFIG_64BIT */
9216efaf
TR
807#endif /* CONFIG_GENERIC_IOMAP */
808
3f7e212d
AB
809#ifdef __KERNEL__
810
811#include <linux/vmalloc.h>
9216efaf 812#define __io_virt(x) ((void __force *)(x))
3f7e212d
AB
813
814#ifndef CONFIG_GENERIC_IOMAP
3f7e212d 815struct pci_dev;
cd248341
JG
816extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
817
818#ifndef pci_iounmap
9216efaf 819#define pci_iounmap pci_iounmap
3f7e212d
AB
820static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
821{
822}
cd248341 823#endif
3f7e212d
AB
824#endif /* CONFIG_GENERIC_IOMAP */
825
826/*
827 * Change virtual addresses to physical addresses and vv.
828 * These are pretty trivial
829 */
cd248341 830#ifndef virt_to_phys
9216efaf 831#define virt_to_phys virt_to_phys
3f7e212d
AB
832static inline unsigned long virt_to_phys(volatile void *address)
833{
834 return __pa((unsigned long)address);
835}
9216efaf 836#endif
3f7e212d 837
9216efaf
TR
838#ifndef phys_to_virt
839#define phys_to_virt phys_to_virt
3f7e212d
AB
840static inline void *phys_to_virt(unsigned long address)
841{
842 return __va(address);
843}
cd248341 844#endif
3f7e212d 845
8c7ea50c
LR
846/**
847 * DOC: ioremap() and ioremap_*() variants
848 *
849 * If you have an IOMMU your architecture is expected to have both ioremap()
850 * and iounmap() implemented otherwise the asm-generic helpers will provide a
851 * direct mapping.
852 *
853 * There are ioremap_*() call variants, if you have no IOMMU we naturally will
854 * default to direct mapping for all of them, you can override these defaults.
855 * If you have an IOMMU you are highly encouraged to provide your own
856 * ioremap variant implementation as there currently is no safe architecture
857 * agnostic default. To avoid possible improper behaviour default asm-generic
858 * ioremap_*() variants all return NULL when an IOMMU is available. If you've
859 * defined your own ioremap_*() variant you must then declare your own
860 * ioremap_*() variant as defined to itself to avoid the default NULL return.
861 */
862
863#ifdef CONFIG_MMU
864
865#ifndef ioremap_uc
866#define ioremap_uc ioremap_uc
867static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
868{
869 return NULL;
870}
871#endif
872
873#else /* !CONFIG_MMU */
874
3f7e212d
AB
875/*
876 * Change "struct page" to physical address.
f1ecc698
JB
877 *
878 * This implementation is for the no-MMU case only... if you have an MMU
879 * you'll need to provide your own definitions.
3f7e212d 880 */
9216efaf 881
9216efaf
TR
882#ifndef ioremap
883#define ioremap ioremap
884static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
3f7e212d 885{
9216efaf 886 return (void __iomem *)(unsigned long)offset;
3f7e212d 887}
9216efaf 888#endif
3f7e212d 889
9216efaf
TR
890#ifndef __ioremap
891#define __ioremap __ioremap
892static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
893 unsigned long flags)
894{
895 return ioremap(offset, size);
896}
897#endif
3f7e212d
AB
898
899#ifndef ioremap_nocache
9216efaf
TR
900#define ioremap_nocache ioremap_nocache
901static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
902{
903 return ioremap(offset, size);
904}
3f7e212d
AB
905#endif
906
e4b6be33
LR
907#ifndef ioremap_uc
908#define ioremap_uc ioremap_uc
909static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
910{
911 return ioremap_nocache(offset, size);
912}
913#endif
914
3f7e212d 915#ifndef ioremap_wc
9216efaf
TR
916#define ioremap_wc ioremap_wc
917static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
918{
919 return ioremap_nocache(offset, size);
920}
3f7e212d
AB
921#endif
922
d838270e
TK
923#ifndef ioremap_wt
924#define ioremap_wt ioremap_wt
925static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
926{
927 return ioremap_nocache(offset, size);
928}
929#endif
930
9216efaf
TR
931#ifndef iounmap
932#define iounmap iounmap
d838270e 933
e66d3c49 934static inline void iounmap(void __iomem *addr)
3f7e212d
AB
935{
936}
9216efaf 937#endif
f1ecc698 938#endif /* CONFIG_MMU */
3f7e212d 939
ce816fa8 940#ifdef CONFIG_HAS_IOPORT_MAP
3f7e212d 941#ifndef CONFIG_GENERIC_IOMAP
9216efaf
TR
942#ifndef ioport_map
943#define ioport_map ioport_map
3f7e212d
AB
944static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
945{
112eeaa7 946 return PCI_IOBASE + (port & IO_SPACE_LIMIT);
3f7e212d 947}
9216efaf 948#endif
3f7e212d 949
9216efaf
TR
950#ifndef ioport_unmap
951#define ioport_unmap ioport_unmap
3f7e212d
AB
952static inline void ioport_unmap(void __iomem *p)
953{
954}
9216efaf 955#endif
3f7e212d
AB
956#else /* CONFIG_GENERIC_IOMAP */
957extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
958extern void ioport_unmap(void __iomem *p);
959#endif /* CONFIG_GENERIC_IOMAP */
ce816fa8 960#endif /* CONFIG_HAS_IOPORT_MAP */
3f7e212d 961
eabc2a7c
AS
962/*
963 * Convert a virtual cached pointer to an uncached pointer
964 */
576ebd74 965#ifndef xlate_dev_kmem_ptr
9216efaf
TR
966#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
967static inline void *xlate_dev_kmem_ptr(void *addr)
968{
969 return addr;
970}
576ebd74 971#endif
9216efaf 972
576ebd74 973#ifndef xlate_dev_mem_ptr
9216efaf
TR
974#define xlate_dev_mem_ptr xlate_dev_mem_ptr
975static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
976{
977 return __va(addr);
978}
979#endif
980
981#ifndef unxlate_dev_mem_ptr
982#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
983static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
984{
985}
576ebd74 986#endif
3f7e212d 987
c93d0312 988#ifdef CONFIG_VIRT_TO_BUS
3f7e212d 989#ifndef virt_to_bus
9216efaf 990static inline unsigned long virt_to_bus(void *address)
3f7e212d 991{
9216efaf 992 return (unsigned long)address;
3f7e212d
AB
993}
994
995static inline void *bus_to_virt(unsigned long address)
996{
9216efaf 997 return (void *)address;
3f7e212d
AB
998}
999#endif
c93d0312 1000#endif
3f7e212d 1001
cd248341 1002#ifndef memset_io
9216efaf 1003#define memset_io memset_io
c2327da0
AS
1004/**
1005 * memset_io Set a range of I/O memory to a constant value
1006 * @addr: The beginning of the I/O-memory range to set
1007 * @val: The value to set the memory to
1008 * @count: The number of bytes to set
1009 *
1010 * Set a range of I/O memory to a given value.
1011 */
9216efaf
TR
1012static inline void memset_io(volatile void __iomem *addr, int value,
1013 size_t size)
1014{
1015 memset(__io_virt(addr), value, size);
1016}
cd248341
JG
1017#endif
1018
1019#ifndef memcpy_fromio
9216efaf 1020#define memcpy_fromio memcpy_fromio
c2327da0
AS
1021/**
1022 * memcpy_fromio Copy a block of data from I/O memory
1023 * @dst: The (RAM) destination for the copy
1024 * @src: The (I/O memory) source for the data
1025 * @count: The number of bytes to copy
1026 *
1027 * Copy a block of data from I/O memory.
1028 */
9216efaf
TR
1029static inline void memcpy_fromio(void *buffer,
1030 const volatile void __iomem *addr,
1031 size_t size)
1032{
1033 memcpy(buffer, __io_virt(addr), size);
1034}
cd248341 1035#endif
9216efaf 1036
cd248341 1037#ifndef memcpy_toio
9216efaf 1038#define memcpy_toio memcpy_toio
c2327da0
AS
1039/**
1040 * memcpy_toio Copy a block of data into I/O memory
1041 * @dst: The (I/O memory) destination for the copy
1042 * @src: The (RAM) source for the data
1043 * @count: The number of bytes to copy
1044 *
1045 * Copy a block of data to I/O memory.
1046 */
9216efaf
TR
1047static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1048 size_t size)
1049{
1050 memcpy(__io_virt(addr), buffer, size);
1051}
cd248341 1052#endif
3f7e212d
AB
1053
1054#endif /* __KERNEL__ */
1055
1056#endif /* __ASM_GENERIC_IO_H */