]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Implement the default iomap interfaces | |
3 | * | |
4 | * (C) Copyright 2004 Linus Torvalds | |
5 | */ | |
6 | #include <linux/pci.h> | |
9ac7849e TH |
7 | #include <linux/io.h> |
8 | ||
1da177e4 | 9 | #include <linux/module.h> |
1da177e4 LT |
10 | |
11 | /* | |
12 | * Read/write from/to an (offsettable) iomem cookie. It might be a PIO | |
13 | * access or a MMIO access, these functions don't care. The info is | |
14 | * encoded in the hardware mapping set up by the mapping functions | |
15 | * (or the cookie itself, depending on implementation and hw). | |
16 | * | |
17 | * The generic routines don't assume any hardware mappings, and just | |
18 | * encode the PIO/MMIO as part of the cookie. They coldly assume that | |
19 | * the MMIO IO mappings are not in the low address range. | |
20 | * | |
21 | * Architectures for which this is not true can't use this generic | |
22 | * implementation and should do their own copy. | |
23 | */ | |
24 | ||
25 | #ifndef HAVE_ARCH_PIO_SIZE | |
26 | /* | |
27 | * We encode the physical PIO addresses (0-0xffff) into the | |
28 | * pointer by offsetting them with a constant (0x10000) and | |
29 | * assuming that all the low addresses are always PIO. That means | |
30 | * we can do some sanity checks on the low bits, and don't | |
31 | * need to just take things for granted. | |
32 | */ | |
33 | #define PIO_OFFSET 0x10000UL | |
34 | #define PIO_MASK 0x0ffffUL | |
35 | #define PIO_RESERVED 0x40000UL | |
36 | #endif | |
37 | ||
6cbf0c70 LT |
38 | static void bad_io_access(unsigned long port, const char *access) |
39 | { | |
40 | static int count = 10; | |
41 | if (count) { | |
42 | count--; | |
5cd2b459 | 43 | WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); |
6cbf0c70 LT |
44 | } |
45 | } | |
46 | ||
1da177e4 LT |
47 | /* |
48 | * Ugly macros are a way of life. | |
49 | */ | |
1da177e4 LT |
50 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
51 | unsigned long port = (unsigned long __force)addr; \ | |
6cbf0c70 LT |
52 | if (port >= PIO_RESERVED) { \ |
53 | is_mmio; \ | |
54 | } else if (port > PIO_OFFSET) { \ | |
1da177e4 LT |
55 | port &= PIO_MASK; \ |
56 | is_pio; \ | |
6cbf0c70 LT |
57 | } else \ |
58 | bad_io_access(port, #is_pio ); \ | |
1da177e4 LT |
59 | } while (0) |
60 | ||
34ba8a5c LT |
61 | #ifndef pio_read16be |
62 | #define pio_read16be(port) swab16(inw(port)) | |
63 | #define pio_read32be(port) swab32(inl(port)) | |
64 | #endif | |
65 | ||
66 | #ifndef mmio_read16be | |
67 | #define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr)) | |
68 | #define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr)) | |
69 | #endif | |
70 | ||
9f741cb8 | 71 | unsigned int ioread8(void __iomem *addr) |
1da177e4 LT |
72 | { |
73 | IO_COND(addr, return inb(port), return readb(addr)); | |
6cbf0c70 | 74 | return 0xff; |
1da177e4 | 75 | } |
9f741cb8 | 76 | unsigned int ioread16(void __iomem *addr) |
1da177e4 LT |
77 | { |
78 | IO_COND(addr, return inw(port), return readw(addr)); | |
6cbf0c70 | 79 | return 0xffff; |
1da177e4 | 80 | } |
9f741cb8 | 81 | unsigned int ioread16be(void __iomem *addr) |
dae409a2 | 82 | { |
34ba8a5c | 83 | IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr)); |
6cbf0c70 | 84 | return 0xffff; |
dae409a2 | 85 | } |
9f741cb8 | 86 | unsigned int ioread32(void __iomem *addr) |
1da177e4 LT |
87 | { |
88 | IO_COND(addr, return inl(port), return readl(addr)); | |
6cbf0c70 | 89 | return 0xffffffff; |
1da177e4 | 90 | } |
9f741cb8 | 91 | unsigned int ioread32be(void __iomem *addr) |
dae409a2 | 92 | { |
34ba8a5c | 93 | IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr)); |
6cbf0c70 | 94 | return 0xffffffff; |
dae409a2 | 95 | } |
1da177e4 LT |
96 | EXPORT_SYMBOL(ioread8); |
97 | EXPORT_SYMBOL(ioread16); | |
dae409a2 | 98 | EXPORT_SYMBOL(ioread16be); |
1da177e4 | 99 | EXPORT_SYMBOL(ioread32); |
dae409a2 | 100 | EXPORT_SYMBOL(ioread32be); |
1da177e4 | 101 | |
34ba8a5c LT |
102 | #ifndef pio_write16be |
103 | #define pio_write16be(val,port) outw(swab16(val),port) | |
104 | #define pio_write32be(val,port) outl(swab32(val),port) | |
105 | #endif | |
106 | ||
107 | #ifndef mmio_write16be | |
108 | #define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port) | |
109 | #define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port) | |
110 | #endif | |
111 | ||
9f741cb8 | 112 | void iowrite8(u8 val, void __iomem *addr) |
1da177e4 LT |
113 | { |
114 | IO_COND(addr, outb(val,port), writeb(val, addr)); | |
115 | } | |
9f741cb8 | 116 | void iowrite16(u16 val, void __iomem *addr) |
1da177e4 LT |
117 | { |
118 | IO_COND(addr, outw(val,port), writew(val, addr)); | |
119 | } | |
9f741cb8 | 120 | void iowrite16be(u16 val, void __iomem *addr) |
dae409a2 | 121 | { |
34ba8a5c | 122 | IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr)); |
dae409a2 | 123 | } |
9f741cb8 | 124 | void iowrite32(u32 val, void __iomem *addr) |
1da177e4 LT |
125 | { |
126 | IO_COND(addr, outl(val,port), writel(val, addr)); | |
127 | } | |
9f741cb8 | 128 | void iowrite32be(u32 val, void __iomem *addr) |
dae409a2 | 129 | { |
34ba8a5c | 130 | IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr)); |
dae409a2 | 131 | } |
1da177e4 LT |
132 | EXPORT_SYMBOL(iowrite8); |
133 | EXPORT_SYMBOL(iowrite16); | |
dae409a2 | 134 | EXPORT_SYMBOL(iowrite16be); |
1da177e4 | 135 | EXPORT_SYMBOL(iowrite32); |
dae409a2 | 136 | EXPORT_SYMBOL(iowrite32be); |
1da177e4 LT |
137 | |
138 | /* | |
139 | * These are the "repeat MMIO read/write" functions. | |
140 | * Note the "__raw" accesses, since we don't want to | |
141 | * convert to CPU byte order. We write in "IO byte | |
142 | * order" (we also don't have IO barriers). | |
143 | */ | |
34ba8a5c | 144 | #ifndef mmio_insb |
1da177e4 LT |
145 | static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) |
146 | { | |
147 | while (--count >= 0) { | |
148 | u8 data = __raw_readb(addr); | |
149 | *dst = data; | |
150 | dst++; | |
151 | } | |
152 | } | |
153 | static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) | |
154 | { | |
155 | while (--count >= 0) { | |
156 | u16 data = __raw_readw(addr); | |
157 | *dst = data; | |
158 | dst++; | |
159 | } | |
160 | } | |
161 | static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) | |
162 | { | |
163 | while (--count >= 0) { | |
164 | u32 data = __raw_readl(addr); | |
165 | *dst = data; | |
166 | dst++; | |
167 | } | |
168 | } | |
34ba8a5c | 169 | #endif |
1da177e4 | 170 | |
34ba8a5c | 171 | #ifndef mmio_outsb |
1da177e4 LT |
172 | static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) |
173 | { | |
174 | while (--count >= 0) { | |
175 | __raw_writeb(*src, addr); | |
176 | src++; | |
177 | } | |
178 | } | |
179 | static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) | |
180 | { | |
181 | while (--count >= 0) { | |
182 | __raw_writew(*src, addr); | |
183 | src++; | |
184 | } | |
185 | } | |
186 | static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) | |
187 | { | |
188 | while (--count >= 0) { | |
189 | __raw_writel(*src, addr); | |
190 | src++; | |
191 | } | |
192 | } | |
34ba8a5c | 193 | #endif |
1da177e4 | 194 | |
9f741cb8 | 195 | void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) |
1da177e4 LT |
196 | { |
197 | IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count)); | |
198 | } | |
9f741cb8 | 199 | void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) |
1da177e4 LT |
200 | { |
201 | IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count)); | |
202 | } | |
9f741cb8 | 203 | void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) |
1da177e4 LT |
204 | { |
205 | IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count)); | |
206 | } | |
207 | EXPORT_SYMBOL(ioread8_rep); | |
208 | EXPORT_SYMBOL(ioread16_rep); | |
209 | EXPORT_SYMBOL(ioread32_rep); | |
210 | ||
9f741cb8 | 211 | void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) |
1da177e4 LT |
212 | { |
213 | IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count)); | |
214 | } | |
9f741cb8 | 215 | void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) |
1da177e4 LT |
216 | { |
217 | IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count)); | |
218 | } | |
9f741cb8 | 219 | void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) |
1da177e4 LT |
220 | { |
221 | IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count)); | |
222 | } | |
223 | EXPORT_SYMBOL(iowrite8_rep); | |
224 | EXPORT_SYMBOL(iowrite16_rep); | |
225 | EXPORT_SYMBOL(iowrite32_rep); | |
226 | ||
227 | /* Create a virtual mapping cookie for an IO port range */ | |
228 | void __iomem *ioport_map(unsigned long port, unsigned int nr) | |
229 | { | |
230 | if (port > PIO_MASK) | |
231 | return NULL; | |
232 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); | |
233 | } | |
234 | ||
235 | void ioport_unmap(void __iomem *addr) | |
236 | { | |
237 | /* Nothing to do */ | |
238 | } | |
239 | EXPORT_SYMBOL(ioport_map); | |
240 | EXPORT_SYMBOL(ioport_unmap); | |
241 | ||
5ca24814 REB |
242 | /** |
243 | * pci_iomap - create a virtual mapping cookie for a PCI BAR | |
244 | * @dev: PCI device that owns the BAR | |
245 | * @bar: BAR number | |
246 | * @maxlen: length of the memory to map | |
247 | * | |
248 | * Using this function you will get a __iomem address to your device BAR. | |
249 | * You can access it using ioread*() and iowrite*(). These functions hide | |
250 | * the details if this is a MMIO or PIO address space and will just do what | |
251 | * you expect from them in the correct way. | |
252 | * | |
253 | * @maxlen specifies the maximum length to map. If you want to get access to | |
254 | * the complete BAR without checking for its length first, pass %0 here. | |
255 | * */ | |
1da177e4 LT |
256 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
257 | { | |
b9e76a00 | 258 | resource_size_t start = pci_resource_start(dev, bar); |
b70d3a2c | 259 | resource_size_t len = pci_resource_len(dev, bar); |
1da177e4 LT |
260 | unsigned long flags = pci_resource_flags(dev, bar); |
261 | ||
262 | if (!len || !start) | |
263 | return NULL; | |
264 | if (maxlen && len > maxlen) | |
265 | len = maxlen; | |
266 | if (flags & IORESOURCE_IO) | |
267 | return ioport_map(start, len); | |
268 | if (flags & IORESOURCE_MEM) { | |
269 | if (flags & IORESOURCE_CACHEABLE) | |
270 | return ioremap(start, len); | |
271 | return ioremap_nocache(start, len); | |
272 | } | |
273 | /* What? */ | |
274 | return NULL; | |
275 | } | |
276 | ||
277 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) | |
278 | { | |
279 | IO_COND(addr, /* nothing */, iounmap(addr)); | |
280 | } | |
281 | EXPORT_SYMBOL(pci_iomap); | |
282 | EXPORT_SYMBOL(pci_iounmap); |