]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_IO_H |
2 | #define _ASM_X86_IO_H | |
e045fb2a | 3 | |
b310f381 | 4 | #define ARCH_HAS_IOREMAP_WC |
5 | ||
c1f64a58 | 6 | #include <linux/compiler.h> |
2c5643b1 | 7 | #include <asm-generic/int-ll64.h> |
976e8f67 | 8 | #include <asm/page.h> |
c1f64a58 LT |
9 | |
10 | #define build_mmio_read(name, size, type, reg, barrier) \ | |
11 | static inline type name(const volatile void __iomem *addr) \ | |
1c5b0eb6 | 12 | { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
c1f64a58 LT |
13 | :"m" (*(volatile type __force *)addr) barrier); return ret; } |
14 | ||
15 | #define build_mmio_write(name, size, type, reg, barrier) \ | |
16 | static inline void name(type val, volatile void __iomem *addr) \ | |
17 | { asm volatile("mov" size " %0,%1": :reg (val), \ | |
18 | "m" (*(volatile type __force *)addr) barrier); } | |
19 | ||
1c5b0eb6 MP |
20 | build_mmio_read(readb, "b", unsigned char, "=q", :"memory") |
21 | build_mmio_read(readw, "w", unsigned short, "=r", :"memory") | |
22 | build_mmio_read(readl, "l", unsigned int, "=r", :"memory") | |
c1f64a58 | 23 | |
1c5b0eb6 MP |
24 | build_mmio_read(__readb, "b", unsigned char, "=q", ) |
25 | build_mmio_read(__readw, "w", unsigned short, "=r", ) | |
26 | build_mmio_read(__readl, "l", unsigned int, "=r", ) | |
c1f64a58 LT |
27 | |
28 | build_mmio_write(writeb, "b", unsigned char, "q", :"memory") | |
29 | build_mmio_write(writew, "w", unsigned short, "r", :"memory") | |
30 | build_mmio_write(writel, "l", unsigned int, "r", :"memory") | |
31 | ||
32 | build_mmio_write(__writeb, "b", unsigned char, "q", ) | |
33 | build_mmio_write(__writew, "w", unsigned short, "r", ) | |
34 | build_mmio_write(__writel, "l", unsigned int, "r", ) | |
35 | ||
36 | #define readb_relaxed(a) __readb(a) | |
37 | #define readw_relaxed(a) __readw(a) | |
38 | #define readl_relaxed(a) __readl(a) | |
39 | #define __raw_readb __readb | |
40 | #define __raw_readw __readw | |
41 | #define __raw_readl __readl | |
42 | ||
43 | #define __raw_writeb __writeb | |
44 | #define __raw_writew __writew | |
45 | #define __raw_writel __writel | |
46 | ||
47 | #define mmiowb() barrier() | |
48 | ||
49 | #ifdef CONFIG_X86_64 | |
93093d09 | 50 | |
1c5b0eb6 | 51 | build_mmio_read(readq, "q", unsigned long, "=r", :"memory") |
c1f64a58 | 52 | build_mmio_write(writeq, "q", unsigned long, "r", :"memory") |
c1f64a58 | 53 | |
93093d09 | 54 | #else |
2c5643b1 HM |
55 | |
56 | static inline __u64 readq(const volatile void __iomem *addr) | |
57 | { | |
58 | const volatile u32 __iomem *p = addr; | |
a0b1131e | 59 | u32 low, high; |
2c5643b1 | 60 | |
a0b1131e IM |
61 | low = readl(p); |
62 | high = readl(p + 1); | |
2c5643b1 | 63 | |
a0b1131e | 64 | return low + ((u64)high << 32); |
2c5643b1 HM |
65 | } |
66 | ||
67 | static inline void writeq(__u64 val, volatile void __iomem *addr) | |
68 | { | |
69 | writel(val, addr); | |
70 | writel(val >> 32, addr+4); | |
71 | } | |
72 | ||
a0b1131e IM |
73 | #endif |
74 | ||
93093d09 IM |
75 | #define readq_relaxed(a) readq(a) |
76 | ||
77 | #define __raw_readq(a) readq(a) | |
78 | #define __raw_writeq(val, addr) writeq(val, addr) | |
79 | ||
a0b1131e | 80 | /* Let people know that we have them */ |
93093d09 IM |
81 | #define readq readq |
82 | #define writeq writeq | |
2c5643b1 | 83 | |
976e8f67 JF |
84 | /** |
85 | * virt_to_phys - map virtual addresses to physical | |
86 | * @address: address to remap | |
87 | * | |
88 | * The returned physical address is the physical (CPU) mapping for | |
89 | * the memory address given. It is only valid to use this function on | |
90 | * addresses directly mapped or allocated via kmalloc. | |
91 | * | |
92 | * This function does not give bus mappings for DMA transfers. In | |
93 | * almost all conceivable cases a device driver should not be using | |
94 | * this function | |
95 | */ | |
96 | ||
97 | static inline phys_addr_t virt_to_phys(volatile void *address) | |
98 | { | |
99 | return __pa(address); | |
100 | } | |
101 | ||
102 | /** | |
103 | * phys_to_virt - map physical address to virtual | |
104 | * @address: address to remap | |
105 | * | |
106 | * The returned virtual address is a current CPU mapping for | |
107 | * the memory address given. It is only valid to use this function on | |
108 | * addresses that have a kernel mapping | |
109 | * | |
110 | * This function does not handle bus mappings for DMA transfers. In | |
111 | * almost all conceivable cases a device driver should not be using | |
112 | * this function | |
113 | */ | |
114 | ||
115 | static inline void *phys_to_virt(phys_addr_t address) | |
116 | { | |
117 | return __va(address); | |
118 | } | |
119 | ||
120 | /* | |
121 | * Change "struct page" to physical address. | |
122 | */ | |
123 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | |
124 | ||
125 | /* | |
126 | * ISA I/O bus memory addresses are 1:1 with the physical address. | |
127 | */ | |
128 | #define isa_virt_to_bus virt_to_phys | |
129 | #define isa_page_to_bus page_to_phys | |
130 | #define isa_bus_to_virt phys_to_virt | |
131 | ||
132 | /* | |
133 | * However PCI ones are not necessarily 1:1 and therefore these interfaces | |
134 | * are forbidden in portable PCI drivers. | |
135 | * | |
136 | * Allow them on x86 for legacy drivers, though. | |
137 | */ | |
138 | #define virt_to_bus virt_to_phys | |
139 | #define bus_to_virt phys_to_virt | |
140 | ||
141 | ||
96a388de TG |
142 | #ifdef CONFIG_X86_32 |
143 | # include "io_32.h" | |
144 | #else | |
145 | # include "io_64.h" | |
146 | #endif | |
e045fb2a | 147 | |
148 | extern void *xlate_dev_mem_ptr(unsigned long phys); | |
149 | extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); | |
150 | ||
3a96ce8c | 151 | extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
152 | unsigned long prot_val); | |
1774a5be | 153 | extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); |
3a96ce8c | 154 | |
4583ed51 JF |
155 | /* |
156 | * early_ioremap() and early_iounmap() are for temporary early boot-time | |
157 | * mappings, before the real ioremap() is functional. | |
158 | * A boot-time mapping is currently limited to at most 16 pages. | |
159 | */ | |
160 | extern void early_ioremap_init(void); | |
4583ed51 | 161 | extern void early_ioremap_reset(void); |
1d6cf1fe HH |
162 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
163 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); | |
164 | extern void early_iounmap(void __iomem *addr, unsigned long size); | |
4583ed51 JF |
165 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); |
166 | ||
167 | ||
1965aae3 | 168 | #endif /* _ASM_X86_IO_H */ |