]>
Commit | Line | Data |
---|---|---|
b4d0d230 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
739d875d | 2 | /* Generic I/O port emulation. |
3f7e212d AB |
3 | * |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
3f7e212d AB |
6 | */ |
7 | #ifndef __ASM_GENERIC_IO_H | |
8 | #define __ASM_GENERIC_IO_H | |
9 | ||
10 | #include <asm/page.h> /* I/O is all done through memory accesses */ | |
9216efaf | 11 | #include <linux/string.h> /* for memset() and memcpy() */ |
3f7e212d AB |
12 | #include <linux/types.h> |
13 | ||
14 | #ifdef CONFIG_GENERIC_IOMAP | |
15 | #include <asm-generic/iomap.h> | |
16 | #endif | |
17 | ||
60ca1e5a | 18 | #include <asm/mmiowb.h> |
66eab4df MT |
19 | #include <asm-generic/pci_iomap.h> |
20 | ||
64e2c673 SK |
21 | #ifndef __io_br |
22 | #define __io_br() barrier() | |
23 | #endif | |
24 | ||
25 | /* prevent prefetching of coherent DMA data ahead of a dma-complete */ | |
26 | #ifndef __io_ar | |
27 | #ifdef rmb | |
abbbbc83 | 28 | #define __io_ar(v) rmb() |
64e2c673 | 29 | #else |
abbbbc83 | 30 | #define __io_ar(v) barrier() |
64e2c673 SK |
31 | #endif |
32 | #endif | |
33 | ||
34 | /* flush writes to coherent DMA data before possibly triggering a DMA read */ | |
35 | #ifndef __io_bw | |
36 | #ifdef wmb | |
37 | #define __io_bw() wmb() | |
38 | #else | |
39 | #define __io_bw() barrier() | |
40 | #endif | |
41 | #endif | |
42 | ||
43 | /* serialize device access against a spin_unlock, usually handled there. */ | |
44 | #ifndef __io_aw | |
60ca1e5a | 45 | #define __io_aw() mmiowb_set_pending() |
64e2c673 SK |
46 | #endif |
47 | ||
48 | #ifndef __io_pbw | |
49 | #define __io_pbw() __io_bw() | |
50 | #endif | |
51 | ||
52 | #ifndef __io_paw | |
53 | #define __io_paw() __io_aw() | |
54 | #endif | |
55 | ||
56 | #ifndef __io_pbr | |
57 | #define __io_pbr() __io_br() | |
58 | #endif | |
59 | ||
60 | #ifndef __io_par | |
abbbbc83 | 61 | #define __io_par(v) __io_ar(v) |
64e2c673 SK |
62 | #endif |
63 | ||
64 | ||
3f7e212d | 65 | /* |
9216efaf TR |
66 | * __raw_{read,write}{b,w,l,q}() access memory in native endianness. |
67 | * | |
68 | * On some architectures memory mapped IO needs to be accessed differently. | |
69 | * On the simple architectures, we just read/write the memory location | |
70 | * directly. | |
3f7e212d | 71 | */ |
9216efaf | 72 | |
35dbc0e0 | 73 | #ifndef __raw_readb |
9216efaf | 74 | #define __raw_readb __raw_readb |
3f7e212d AB |
75 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
76 | { | |
9216efaf | 77 | return *(const volatile u8 __force *)addr; |
3f7e212d | 78 | } |
35dbc0e0 | 79 | #endif |
3f7e212d | 80 | |
35dbc0e0 | 81 | #ifndef __raw_readw |
9216efaf | 82 | #define __raw_readw __raw_readw |
3f7e212d AB |
83 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
84 | { | |
9216efaf | 85 | return *(const volatile u16 __force *)addr; |
3f7e212d | 86 | } |
35dbc0e0 | 87 | #endif |
3f7e212d | 88 | |
35dbc0e0 | 89 | #ifndef __raw_readl |
9216efaf | 90 | #define __raw_readl __raw_readl |
3f7e212d AB |
91 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
92 | { | |
9216efaf | 93 | return *(const volatile u32 __force *)addr; |
3f7e212d | 94 | } |
35dbc0e0 | 95 | #endif |
3f7e212d | 96 | |
9216efaf TR |
97 | #ifdef CONFIG_64BIT |
98 | #ifndef __raw_readq | |
99 | #define __raw_readq __raw_readq | |
100 | static inline u64 __raw_readq(const volatile void __iomem *addr) | |
7292e7e0 | 101 | { |
9216efaf | 102 | return *(const volatile u64 __force *)addr; |
7292e7e0 | 103 | } |
9216efaf TR |
104 | #endif |
105 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 106 | |
35dbc0e0 | 107 | #ifndef __raw_writeb |
9216efaf TR |
108 | #define __raw_writeb __raw_writeb |
109 | static inline void __raw_writeb(u8 value, volatile void __iomem *addr) | |
3f7e212d | 110 | { |
9216efaf | 111 | *(volatile u8 __force *)addr = value; |
3f7e212d | 112 | } |
35dbc0e0 | 113 | #endif |
3f7e212d | 114 | |
35dbc0e0 | 115 | #ifndef __raw_writew |
9216efaf TR |
116 | #define __raw_writew __raw_writew |
117 | static inline void __raw_writew(u16 value, volatile void __iomem *addr) | |
3f7e212d | 118 | { |
9216efaf | 119 | *(volatile u16 __force *)addr = value; |
3f7e212d | 120 | } |
35dbc0e0 | 121 | #endif |
3f7e212d | 122 | |
35dbc0e0 | 123 | #ifndef __raw_writel |
9216efaf TR |
124 | #define __raw_writel __raw_writel |
125 | static inline void __raw_writel(u32 value, volatile void __iomem *addr) | |
3f7e212d | 126 | { |
9216efaf | 127 | *(volatile u32 __force *)addr = value; |
3f7e212d | 128 | } |
35dbc0e0 | 129 | #endif |
3f7e212d | 130 | |
3f7e212d | 131 | #ifdef CONFIG_64BIT |
9216efaf TR |
132 | #ifndef __raw_writeq |
133 | #define __raw_writeq __raw_writeq | |
134 | static inline void __raw_writeq(u64 value, volatile void __iomem *addr) | |
3f7e212d | 135 | { |
9216efaf | 136 | *(volatile u64 __force *)addr = value; |
3f7e212d | 137 | } |
cd248341 | 138 | #endif |
9216efaf | 139 | #endif /* CONFIG_64BIT */ |
cd248341 | 140 | |
9216efaf TR |
141 | /* |
142 | * {read,write}{b,w,l,q}() access little endian memory and return result in | |
143 | * native endianness. | |
144 | */ | |
3f7e212d | 145 | |
9216efaf TR |
146 | #ifndef readb |
147 | #define readb readb | |
148 | static inline u8 readb(const volatile void __iomem *addr) | |
3f7e212d | 149 | { |
032d59e1 SK |
150 | u8 val; |
151 | ||
152 | __io_br(); | |
153 | val = __raw_readb(addr); | |
abbbbc83 | 154 | __io_ar(val); |
032d59e1 | 155 | return val; |
3f7e212d | 156 | } |
3f7e212d AB |
157 | #endif |
158 | ||
9216efaf TR |
159 | #ifndef readw |
160 | #define readw readw | |
161 | static inline u16 readw(const volatile void __iomem *addr) | |
162 | { | |
032d59e1 SK |
163 | u16 val; |
164 | ||
165 | __io_br(); | |
c1d55d50 | 166 | val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); |
abbbbc83 | 167 | __io_ar(val); |
032d59e1 | 168 | return val; |
9216efaf | 169 | } |
7dc59bdd G |
170 | #endif |
171 | ||
9216efaf TR |
172 | #ifndef readl |
173 | #define readl readl | |
174 | static inline u32 readl(const volatile void __iomem *addr) | |
3f7e212d | 175 | { |
032d59e1 SK |
176 | u32 val; |
177 | ||
178 | __io_br(); | |
c1d55d50 | 179 | val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); |
abbbbc83 | 180 | __io_ar(val); |
032d59e1 | 181 | return val; |
3f7e212d | 182 | } |
9216efaf | 183 | #endif |
3f7e212d | 184 | |
9216efaf TR |
185 | #ifdef CONFIG_64BIT |
186 | #ifndef readq | |
187 | #define readq readq | |
188 | static inline u64 readq(const volatile void __iomem *addr) | |
3f7e212d | 189 | { |
032d59e1 SK |
190 | u64 val; |
191 | ||
192 | __io_br(); | |
193 | val = __le64_to_cpu(__raw_readq(addr)); | |
abbbbc83 | 194 | __io_ar(val); |
032d59e1 | 195 | return val; |
3f7e212d | 196 | } |
9216efaf TR |
197 | #endif |
198 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 199 | |
9216efaf TR |
200 | #ifndef writeb |
201 | #define writeb writeb | |
202 | static inline void writeb(u8 value, volatile void __iomem *addr) | |
3f7e212d | 203 | { |
755bd04a | 204 | __io_bw(); |
9216efaf | 205 | __raw_writeb(value, addr); |
755bd04a | 206 | __io_aw(); |
3f7e212d | 207 | } |
9216efaf | 208 | #endif |
3f7e212d | 209 | |
9216efaf TR |
210 | #ifndef writew |
211 | #define writew writew | |
212 | static inline void writew(u16 value, volatile void __iomem *addr) | |
3f7e212d | 213 | { |
755bd04a | 214 | __io_bw(); |
c1d55d50 | 215 | __raw_writew((u16 __force)cpu_to_le16(value), addr); |
755bd04a | 216 | __io_aw(); |
3f7e212d | 217 | } |
9216efaf | 218 | #endif |
3f7e212d | 219 | |
9216efaf TR |
220 | #ifndef writel |
221 | #define writel writel | |
222 | static inline void writel(u32 value, volatile void __iomem *addr) | |
3f7e212d | 223 | { |
755bd04a | 224 | __io_bw(); |
c1d55d50 | 225 | __raw_writel((u32 __force)__cpu_to_le32(value), addr); |
755bd04a | 226 | __io_aw(); |
3f7e212d | 227 | } |
9216efaf | 228 | #endif |
3f7e212d | 229 | |
9216efaf TR |
230 | #ifdef CONFIG_64BIT |
231 | #ifndef writeq | |
232 | #define writeq writeq | |
233 | static inline void writeq(u64 value, volatile void __iomem *addr) | |
3f7e212d | 234 | { |
755bd04a | 235 | __io_bw(); |
9216efaf | 236 | __raw_writeq(__cpu_to_le64(value), addr); |
755bd04a | 237 | __io_aw(); |
3f7e212d | 238 | } |
9216efaf TR |
239 | #endif |
240 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 241 | |
1c8d2969 AB |
242 | /* |
243 | * {read,write}{b,w,l,q}_relaxed() are like the regular version, but | |
244 | * are not guaranteed to provide ordering against spinlocks or memory | |
245 | * accesses. | |
246 | */ | |
247 | #ifndef readb_relaxed | |
8875c554 SK |
248 | #define readb_relaxed readb_relaxed |
249 | static inline u8 readb_relaxed(const volatile void __iomem *addr) | |
250 | { | |
251 | return __raw_readb(addr); | |
252 | } | |
1c8d2969 AB |
253 | #endif |
254 | ||
255 | #ifndef readw_relaxed | |
8875c554 SK |
256 | #define readw_relaxed readw_relaxed |
257 | static inline u16 readw_relaxed(const volatile void __iomem *addr) | |
258 | { | |
259 | return __le16_to_cpu(__raw_readw(addr)); | |
260 | } | |
1c8d2969 AB |
261 | #endif |
262 | ||
263 | #ifndef readl_relaxed | |
8875c554 SK |
264 | #define readl_relaxed readl_relaxed |
265 | static inline u32 readl_relaxed(const volatile void __iomem *addr) | |
266 | { | |
267 | return __le32_to_cpu(__raw_readl(addr)); | |
268 | } | |
1c8d2969 AB |
269 | #endif |
270 | ||
e511267b | 271 | #if defined(readq) && !defined(readq_relaxed) |
8875c554 SK |
272 | #define readq_relaxed readq_relaxed |
273 | static inline u64 readq_relaxed(const volatile void __iomem *addr) | |
274 | { | |
275 | return __le64_to_cpu(__raw_readq(addr)); | |
276 | } | |
1c8d2969 AB |
277 | #endif |
278 | ||
9439eb3a | 279 | #ifndef writeb_relaxed |
a71e7c44 SK |
280 | #define writeb_relaxed writeb_relaxed |
281 | static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) | |
282 | { | |
283 | __raw_writeb(value, addr); | |
284 | } | |
9439eb3a WD |
285 | #endif |
286 | ||
9439eb3a | 287 | #ifndef writew_relaxed |
a71e7c44 SK |
288 | #define writew_relaxed writew_relaxed |
289 | static inline void writew_relaxed(u16 value, volatile void __iomem *addr) | |
290 | { | |
291 | __raw_writew(cpu_to_le16(value), addr); | |
292 | } | |
9439eb3a WD |
293 | #endif |
294 | ||
9439eb3a | 295 | #ifndef writel_relaxed |
a71e7c44 SK |
296 | #define writel_relaxed writel_relaxed |
297 | static inline void writel_relaxed(u32 value, volatile void __iomem *addr) | |
298 | { | |
299 | __raw_writel(__cpu_to_le32(value), addr); | |
300 | } | |
9439eb3a | 301 | #endif |
3f7e212d | 302 | |
e511267b | 303 | #if defined(writeq) && !defined(writeq_relaxed) |
a71e7c44 SK |
304 | #define writeq_relaxed writeq_relaxed |
305 | static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) | |
306 | { | |
307 | __raw_writeq(__cpu_to_le64(value), addr); | |
308 | } | |
1c8d2969 AB |
309 | #endif |
310 | ||
9ab3a7a0 TR |
311 | /* |
312 | * {read,write}s{b,w,l,q}() repeatedly access the same memory address in | |
313 | * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). | |
314 | */ | |
315 | #ifndef readsb | |
316 | #define readsb readsb | |
317 | static inline void readsb(const volatile void __iomem *addr, void *buffer, | |
318 | unsigned int count) | |
3f7e212d AB |
319 | { |
320 | if (count) { | |
321 | u8 *buf = buffer; | |
9ab3a7a0 | 322 | |
3f7e212d | 323 | do { |
9ab3a7a0 | 324 | u8 x = __raw_readb(addr); |
3f7e212d AB |
325 | *buf++ = x; |
326 | } while (--count); | |
327 | } | |
328 | } | |
35dbc0e0 | 329 | #endif |
3f7e212d | 330 | |
9ab3a7a0 TR |
331 | #ifndef readsw |
332 | #define readsw readsw | |
333 | static inline void readsw(const volatile void __iomem *addr, void *buffer, | |
334 | unsigned int count) | |
3f7e212d AB |
335 | { |
336 | if (count) { | |
337 | u16 *buf = buffer; | |
9ab3a7a0 | 338 | |
3f7e212d | 339 | do { |
9ab3a7a0 | 340 | u16 x = __raw_readw(addr); |
3f7e212d AB |
341 | *buf++ = x; |
342 | } while (--count); | |
343 | } | |
344 | } | |
35dbc0e0 | 345 | #endif |
3f7e212d | 346 | |
9ab3a7a0 TR |
347 | #ifndef readsl |
348 | #define readsl readsl | |
349 | static inline void readsl(const volatile void __iomem *addr, void *buffer, | |
350 | unsigned int count) | |
3f7e212d AB |
351 | { |
352 | if (count) { | |
353 | u32 *buf = buffer; | |
9ab3a7a0 | 354 | |
3f7e212d | 355 | do { |
9ab3a7a0 | 356 | u32 x = __raw_readl(addr); |
3f7e212d AB |
357 | *buf++ = x; |
358 | } while (--count); | |
359 | } | |
360 | } | |
35dbc0e0 | 361 | #endif |
3f7e212d | 362 | |
9ab3a7a0 TR |
363 | #ifdef CONFIG_64BIT |
364 | #ifndef readsq | |
365 | #define readsq readsq | |
366 | static inline void readsq(const volatile void __iomem *addr, void *buffer, | |
367 | unsigned int count) | |
368 | { | |
369 | if (count) { | |
370 | u64 *buf = buffer; | |
371 | ||
372 | do { | |
373 | u64 x = __raw_readq(addr); | |
374 | *buf++ = x; | |
375 | } while (--count); | |
376 | } | |
377 | } | |
378 | #endif | |
379 | #endif /* CONFIG_64BIT */ | |
380 | ||
381 | #ifndef writesb | |
382 | #define writesb writesb | |
383 | static inline void writesb(volatile void __iomem *addr, const void *buffer, | |
384 | unsigned int count) | |
3f7e212d AB |
385 | { |
386 | if (count) { | |
387 | const u8 *buf = buffer; | |
9ab3a7a0 | 388 | |
3f7e212d | 389 | do { |
9ab3a7a0 | 390 | __raw_writeb(*buf++, addr); |
3f7e212d AB |
391 | } while (--count); |
392 | } | |
393 | } | |
35dbc0e0 | 394 | #endif |
3f7e212d | 395 | |
9ab3a7a0 TR |
396 | #ifndef writesw |
397 | #define writesw writesw | |
398 | static inline void writesw(volatile void __iomem *addr, const void *buffer, | |
399 | unsigned int count) | |
3f7e212d AB |
400 | { |
401 | if (count) { | |
402 | const u16 *buf = buffer; | |
9ab3a7a0 | 403 | |
3f7e212d | 404 | do { |
9ab3a7a0 | 405 | __raw_writew(*buf++, addr); |
3f7e212d AB |
406 | } while (--count); |
407 | } | |
408 | } | |
35dbc0e0 | 409 | #endif |
3f7e212d | 410 | |
9ab3a7a0 TR |
411 | #ifndef writesl |
412 | #define writesl writesl | |
413 | static inline void writesl(volatile void __iomem *addr, const void *buffer, | |
414 | unsigned int count) | |
3f7e212d AB |
415 | { |
416 | if (count) { | |
417 | const u32 *buf = buffer; | |
9ab3a7a0 | 418 | |
3f7e212d | 419 | do { |
9ab3a7a0 | 420 | __raw_writel(*buf++, addr); |
3f7e212d AB |
421 | } while (--count); |
422 | } | |
423 | } | |
35dbc0e0 | 424 | #endif |
3f7e212d | 425 | |
9ab3a7a0 TR |
426 | #ifdef CONFIG_64BIT |
427 | #ifndef writesq | |
428 | #define writesq writesq | |
429 | static inline void writesq(volatile void __iomem *addr, const void *buffer, | |
430 | unsigned int count) | |
431 | { | |
432 | if (count) { | |
433 | const u64 *buf = buffer; | |
434 | ||
435 | do { | |
436 | __raw_writeq(*buf++, addr); | |
437 | } while (--count); | |
438 | } | |
439 | } | |
440 | #endif | |
441 | #endif /* CONFIG_64BIT */ | |
3f7e212d | 442 | |
9216efaf TR |
443 | #ifndef PCI_IOBASE |
444 | #define PCI_IOBASE ((void __iomem *)0) | |
445 | #endif | |
446 | ||
7dc59bdd G |
447 | #ifndef IO_SPACE_LIMIT |
448 | #define IO_SPACE_LIMIT 0xffff | |
449 | #endif | |
3f7e212d | 450 | |
9216efaf TR |
451 | /* |
452 | * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be | |
453 | * implemented on hardware that needs an additional delay for I/O accesses to | |
454 | * take effect. | |
455 | */ | |
456 | ||
f009c89d JG |
457 | #if !defined(inb) && !defined(_inb) |
458 | #define _inb _inb | |
214ba358 | 459 | static inline u8 _inb(unsigned long addr) |
9216efaf | 460 | { |
87fe2d54 SK |
461 | u8 val; |
462 | ||
463 | __io_pbr(); | |
464 | val = __raw_readb(PCI_IOBASE + addr); | |
abbbbc83 | 465 | __io_par(val); |
87fe2d54 | 466 | return val; |
9216efaf TR |
467 | } |
468 | #endif | |
469 | ||
f009c89d JG |
470 | #if !defined(inw) && !defined(_inw) |
471 | #define _inw _inw | |
472 | static inline u16 _inw(unsigned long addr) | |
9216efaf | 473 | { |
87fe2d54 SK |
474 | u16 val; |
475 | ||
476 | __io_pbr(); | |
c1d55d50 | 477 | val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); |
abbbbc83 | 478 | __io_par(val); |
87fe2d54 | 479 | return val; |
9216efaf TR |
480 | } |
481 | #endif | |
482 | ||
f009c89d JG |
483 | #if !defined(inl) && !defined(_inl) |
484 | #define _inl _inl | |
214ba358 | 485 | static inline u32 _inl(unsigned long addr) |
9216efaf | 486 | { |
87fe2d54 SK |
487 | u32 val; |
488 | ||
489 | __io_pbr(); | |
c1d55d50 | 490 | val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); |
abbbbc83 | 491 | __io_par(val); |
87fe2d54 | 492 | return val; |
9216efaf TR |
493 | } |
494 | #endif | |
495 | ||
f009c89d JG |
496 | #if !defined(outb) && !defined(_outb) |
497 | #define _outb _outb | |
498 | static inline void _outb(u8 value, unsigned long addr) | |
9216efaf | 499 | { |
a7851aa5 SK |
500 | __io_pbw(); |
501 | __raw_writeb(value, PCI_IOBASE + addr); | |
502 | __io_paw(); | |
9216efaf TR |
503 | } |
504 | #endif | |
505 | ||
f009c89d JG |
506 | #if !defined(outw) && !defined(_outw) |
507 | #define _outw _outw | |
508 | static inline void _outw(u16 value, unsigned long addr) | |
9216efaf | 509 | { |
a7851aa5 | 510 | __io_pbw(); |
c1d55d50 | 511 | __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); |
a7851aa5 | 512 | __io_paw(); |
9216efaf TR |
513 | } |
514 | #endif | |
515 | ||
f009c89d JG |
516 | #if !defined(outl) && !defined(_outl) |
517 | #define _outl _outl | |
518 | static inline void _outl(u32 value, unsigned long addr) | |
9216efaf | 519 | { |
a7851aa5 | 520 | __io_pbw(); |
c1d55d50 | 521 | __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); |
a7851aa5 | 522 | __io_paw(); |
9216efaf TR |
523 | } |
524 | #endif | |
525 | ||
f009c89d JG |
526 | #include <linux/logic_pio.h> |
527 | ||
528 | #ifndef inb | |
529 | #define inb _inb | |
530 | #endif | |
531 | ||
532 | #ifndef inw | |
533 | #define inw _inw | |
534 | #endif | |
535 | ||
536 | #ifndef inl | |
537 | #define inl _inl | |
538 | #endif | |
539 | ||
540 | #ifndef outb | |
541 | #define outb _outb | |
542 | #endif | |
543 | ||
544 | #ifndef outw | |
545 | #define outw _outw | |
546 | #endif | |
547 | ||
548 | #ifndef outl | |
549 | #define outl _outl | |
550 | #endif | |
551 | ||
9216efaf TR |
552 | #ifndef inb_p |
553 | #define inb_p inb_p | |
554 | static inline u8 inb_p(unsigned long addr) | |
555 | { | |
556 | return inb(addr); | |
557 | } | |
558 | #endif | |
559 | ||
560 | #ifndef inw_p | |
561 | #define inw_p inw_p | |
562 | static inline u16 inw_p(unsigned long addr) | |
563 | { | |
564 | return inw(addr); | |
565 | } | |
566 | #endif | |
567 | ||
568 | #ifndef inl_p | |
569 | #define inl_p inl_p | |
570 | static inline u32 inl_p(unsigned long addr) | |
571 | { | |
572 | return inl(addr); | |
573 | } | |
574 | #endif | |
575 | ||
576 | #ifndef outb_p | |
577 | #define outb_p outb_p | |
578 | static inline void outb_p(u8 value, unsigned long addr) | |
579 | { | |
580 | outb(value, addr); | |
581 | } | |
582 | #endif | |
583 | ||
584 | #ifndef outw_p | |
585 | #define outw_p outw_p | |
586 | static inline void outw_p(u16 value, unsigned long addr) | |
587 | { | |
588 | outw(value, addr); | |
589 | } | |
590 | #endif | |
591 | ||
592 | #ifndef outl_p | |
593 | #define outl_p outl_p | |
594 | static inline void outl_p(u32 value, unsigned long addr) | |
595 | { | |
596 | outl(value, addr); | |
597 | } | |
598 | #endif | |
599 | ||
9ab3a7a0 TR |
600 | /* |
601 | * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a | |
602 | * single I/O port multiple times. | |
603 | */ | |
604 | ||
605 | #ifndef insb | |
606 | #define insb insb | |
607 | static inline void insb(unsigned long addr, void *buffer, unsigned int count) | |
608 | { | |
609 | readsb(PCI_IOBASE + addr, buffer, count); | |
610 | } | |
611 | #endif | |
612 | ||
613 | #ifndef insw | |
614 | #define insw insw | |
615 | static inline void insw(unsigned long addr, void *buffer, unsigned int count) | |
616 | { | |
617 | readsw(PCI_IOBASE + addr, buffer, count); | |
618 | } | |
619 | #endif | |
620 | ||
621 | #ifndef insl | |
622 | #define insl insl | |
623 | static inline void insl(unsigned long addr, void *buffer, unsigned int count) | |
624 | { | |
625 | readsl(PCI_IOBASE + addr, buffer, count); | |
626 | } | |
627 | #endif | |
628 | ||
629 | #ifndef outsb | |
630 | #define outsb outsb | |
631 | static inline void outsb(unsigned long addr, const void *buffer, | |
632 | unsigned int count) | |
633 | { | |
634 | writesb(PCI_IOBASE + addr, buffer, count); | |
635 | } | |
636 | #endif | |
637 | ||
638 | #ifndef outsw | |
639 | #define outsw outsw | |
640 | static inline void outsw(unsigned long addr, const void *buffer, | |
641 | unsigned int count) | |
642 | { | |
643 | writesw(PCI_IOBASE + addr, buffer, count); | |
644 | } | |
645 | #endif | |
646 | ||
647 | #ifndef outsl | |
648 | #define outsl outsl | |
649 | static inline void outsl(unsigned long addr, const void *buffer, | |
650 | unsigned int count) | |
651 | { | |
652 | writesl(PCI_IOBASE + addr, buffer, count); | |
653 | } | |
654 | #endif | |
655 | ||
656 | #ifndef insb_p | |
657 | #define insb_p insb_p | |
658 | static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) | |
659 | { | |
660 | insb(addr, buffer, count); | |
661 | } | |
662 | #endif | |
663 | ||
664 | #ifndef insw_p | |
665 | #define insw_p insw_p | |
666 | static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) | |
667 | { | |
668 | insw(addr, buffer, count); | |
669 | } | |
670 | #endif | |
671 | ||
672 | #ifndef insl_p | |
673 | #define insl_p insl_p | |
674 | static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) | |
675 | { | |
676 | insl(addr, buffer, count); | |
677 | } | |
678 | #endif | |
679 | ||
680 | #ifndef outsb_p | |
681 | #define outsb_p outsb_p | |
682 | static inline void outsb_p(unsigned long addr, const void *buffer, | |
683 | unsigned int count) | |
684 | { | |
685 | outsb(addr, buffer, count); | |
686 | } | |
687 | #endif | |
688 | ||
689 | #ifndef outsw_p | |
690 | #define outsw_p outsw_p | |
691 | static inline void outsw_p(unsigned long addr, const void *buffer, | |
692 | unsigned int count) | |
693 | { | |
694 | outsw(addr, buffer, count); | |
695 | } | |
696 | #endif | |
697 | ||
698 | #ifndef outsl_p | |
699 | #define outsl_p outsl_p | |
700 | static inline void outsl_p(unsigned long addr, const void *buffer, | |
701 | unsigned int count) | |
702 | { | |
703 | outsl(addr, buffer, count); | |
704 | } | |
705 | #endif | |
706 | ||
9216efaf TR |
707 | #ifndef CONFIG_GENERIC_IOMAP |
708 | #ifndef ioread8 | |
709 | #define ioread8 ioread8 | |
710 | static inline u8 ioread8(const volatile void __iomem *addr) | |
711 | { | |
712 | return readb(addr); | |
713 | } | |
714 | #endif | |
715 | ||
716 | #ifndef ioread16 | |
717 | #define ioread16 ioread16 | |
718 | static inline u16 ioread16(const volatile void __iomem *addr) | |
719 | { | |
720 | return readw(addr); | |
721 | } | |
722 | #endif | |
723 | ||
724 | #ifndef ioread32 | |
725 | #define ioread32 ioread32 | |
726 | static inline u32 ioread32(const volatile void __iomem *addr) | |
727 | { | |
728 | return readl(addr); | |
729 | } | |
730 | #endif | |
731 | ||
9e44fb18 HG |
732 | #ifdef CONFIG_64BIT |
733 | #ifndef ioread64 | |
734 | #define ioread64 ioread64 | |
735 | static inline u64 ioread64(const volatile void __iomem *addr) | |
736 | { | |
737 | return readq(addr); | |
738 | } | |
739 | #endif | |
740 | #endif /* CONFIG_64BIT */ | |
741 | ||
9216efaf TR |
742 | #ifndef iowrite8 |
743 | #define iowrite8 iowrite8 | |
744 | static inline void iowrite8(u8 value, volatile void __iomem *addr) | |
745 | { | |
746 | writeb(value, addr); | |
747 | } | |
748 | #endif | |
749 | ||
750 | #ifndef iowrite16 | |
751 | #define iowrite16 iowrite16 | |
752 | static inline void iowrite16(u16 value, volatile void __iomem *addr) | |
753 | { | |
754 | writew(value, addr); | |
755 | } | |
756 | #endif | |
757 | ||
758 | #ifndef iowrite32 | |
759 | #define iowrite32 iowrite32 | |
760 | static inline void iowrite32(u32 value, volatile void __iomem *addr) | |
761 | { | |
762 | writel(value, addr); | |
763 | } | |
764 | #endif | |
765 | ||
9e44fb18 HG |
766 | #ifdef CONFIG_64BIT |
767 | #ifndef iowrite64 | |
768 | #define iowrite64 iowrite64 | |
769 | static inline void iowrite64(u64 value, volatile void __iomem *addr) | |
770 | { | |
771 | writeq(value, addr); | |
772 | } | |
773 | #endif | |
774 | #endif /* CONFIG_64BIT */ | |
775 | ||
9216efaf TR |
776 | #ifndef ioread16be |
777 | #define ioread16be ioread16be | |
778 | static inline u16 ioread16be(const volatile void __iomem *addr) | |
779 | { | |
7a1aedba | 780 | return swab16(readw(addr)); |
9216efaf TR |
781 | } |
782 | #endif | |
783 | ||
784 | #ifndef ioread32be | |
785 | #define ioread32be ioread32be | |
786 | static inline u32 ioread32be(const volatile void __iomem *addr) | |
787 | { | |
7a1aedba | 788 | return swab32(readl(addr)); |
9216efaf TR |
789 | } |
790 | #endif | |
791 | ||
9e44fb18 HG |
792 | #ifdef CONFIG_64BIT |
793 | #ifndef ioread64be | |
794 | #define ioread64be ioread64be | |
795 | static inline u64 ioread64be(const volatile void __iomem *addr) | |
796 | { | |
797 | return swab64(readq(addr)); | |
798 | } | |
799 | #endif | |
800 | #endif /* CONFIG_64BIT */ | |
801 | ||
9216efaf TR |
802 | #ifndef iowrite16be |
803 | #define iowrite16be iowrite16be | |
804 | static inline void iowrite16be(u16 value, void volatile __iomem *addr) | |
805 | { | |
7a1aedba | 806 | writew(swab16(value), addr); |
9216efaf TR |
807 | } |
808 | #endif | |
809 | ||
810 | #ifndef iowrite32be | |
811 | #define iowrite32be iowrite32be | |
812 | static inline void iowrite32be(u32 value, volatile void __iomem *addr) | |
813 | { | |
7a1aedba | 814 | writel(swab32(value), addr); |
9216efaf TR |
815 | } |
816 | #endif | |
9ab3a7a0 | 817 | |
9e44fb18 HG |
818 | #ifdef CONFIG_64BIT |
819 | #ifndef iowrite64be | |
820 | #define iowrite64be iowrite64be | |
821 | static inline void iowrite64be(u64 value, volatile void __iomem *addr) | |
822 | { | |
823 | writeq(swab64(value), addr); | |
824 | } | |
825 | #endif | |
826 | #endif /* CONFIG_64BIT */ | |
827 | ||
9ab3a7a0 TR |
828 | #ifndef ioread8_rep |
829 | #define ioread8_rep ioread8_rep | |
830 | static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, | |
831 | unsigned int count) | |
832 | { | |
833 | readsb(addr, buffer, count); | |
834 | } | |
835 | #endif | |
836 | ||
837 | #ifndef ioread16_rep | |
838 | #define ioread16_rep ioread16_rep | |
839 | static inline void ioread16_rep(const volatile void __iomem *addr, | |
840 | void *buffer, unsigned int count) | |
841 | { | |
842 | readsw(addr, buffer, count); | |
843 | } | |
844 | #endif | |
845 | ||
846 | #ifndef ioread32_rep | |
847 | #define ioread32_rep ioread32_rep | |
848 | static inline void ioread32_rep(const volatile void __iomem *addr, | |
849 | void *buffer, unsigned int count) | |
850 | { | |
851 | readsl(addr, buffer, count); | |
852 | } | |
853 | #endif | |
854 | ||
9e44fb18 HG |
855 | #ifdef CONFIG_64BIT |
856 | #ifndef ioread64_rep | |
857 | #define ioread64_rep ioread64_rep | |
858 | static inline void ioread64_rep(const volatile void __iomem *addr, | |
859 | void *buffer, unsigned int count) | |
860 | { | |
861 | readsq(addr, buffer, count); | |
862 | } | |
863 | #endif | |
864 | #endif /* CONFIG_64BIT */ | |
865 | ||
9ab3a7a0 TR |
866 | #ifndef iowrite8_rep |
867 | #define iowrite8_rep iowrite8_rep | |
868 | static inline void iowrite8_rep(volatile void __iomem *addr, | |
869 | const void *buffer, | |
870 | unsigned int count) | |
871 | { | |
872 | writesb(addr, buffer, count); | |
873 | } | |
874 | #endif | |
875 | ||
876 | #ifndef iowrite16_rep | |
877 | #define iowrite16_rep iowrite16_rep | |
878 | static inline void iowrite16_rep(volatile void __iomem *addr, | |
879 | const void *buffer, | |
880 | unsigned int count) | |
881 | { | |
882 | writesw(addr, buffer, count); | |
883 | } | |
884 | #endif | |
885 | ||
886 | #ifndef iowrite32_rep | |
887 | #define iowrite32_rep iowrite32_rep | |
888 | static inline void iowrite32_rep(volatile void __iomem *addr, | |
889 | const void *buffer, | |
890 | unsigned int count) | |
891 | { | |
892 | writesl(addr, buffer, count); | |
893 | } | |
894 | #endif | |
9e44fb18 HG |
895 | |
896 | #ifdef CONFIG_64BIT | |
897 | #ifndef iowrite64_rep | |
898 | #define iowrite64_rep iowrite64_rep | |
899 | static inline void iowrite64_rep(volatile void __iomem *addr, | |
900 | const void *buffer, | |
901 | unsigned int count) | |
902 | { | |
903 | writesq(addr, buffer, count); | |
904 | } | |
905 | #endif | |
906 | #endif /* CONFIG_64BIT */ | |
9216efaf TR |
907 | #endif /* CONFIG_GENERIC_IOMAP */ |
908 | ||
3f7e212d AB |
909 | #ifdef __KERNEL__ |
910 | ||
911 | #include <linux/vmalloc.h> | |
9216efaf | 912 | #define __io_virt(x) ((void __force *)(x)) |
3f7e212d | 913 | |
3f7e212d AB |
914 | /* |
915 | * Change virtual addresses to physical addresses and vv. | |
916 | * These are pretty trivial | |
917 | */ | |
cd248341 | 918 | #ifndef virt_to_phys |
9216efaf | 919 | #define virt_to_phys virt_to_phys |
3f7e212d AB |
920 | static inline unsigned long virt_to_phys(volatile void *address) |
921 | { | |
922 | return __pa((unsigned long)address); | |
923 | } | |
9216efaf | 924 | #endif |
3f7e212d | 925 | |
9216efaf TR |
926 | #ifndef phys_to_virt |
927 | #define phys_to_virt phys_to_virt | |
3f7e212d AB |
928 | static inline void *phys_to_virt(unsigned long address) |
929 | { | |
930 | return __va(address); | |
931 | } | |
cd248341 | 932 | #endif |
3f7e212d | 933 | |
8c7ea50c LR |
934 | /** |
935 | * DOC: ioremap() and ioremap_*() variants | |
936 | * | |
97c9801a | 937 | * Architectures with an MMU are expected to provide ioremap() and iounmap() |
80b0ca98 CH |
938 | * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide |
939 | * a default nop-op implementation that expect that the physical address used | |
940 | * for MMIO are already marked as uncached, and can be used as kernel virtual | |
941 | * addresses. | |
8c7ea50c | 942 | * |
97c9801a CH |
943 | * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes |
944 | * for specific drivers if the architecture choses to implement them. If they | |
7c566bb5 HM |
945 | * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() |
946 | * can provide stricter non-posted write semantics if the architecture | |
947 | * implements them. | |
8c7ea50c | 948 | */ |
e9713395 | 949 | #ifndef CONFIG_MMU |
9216efaf TR |
950 | #ifndef ioremap |
951 | #define ioremap ioremap | |
952 | static inline void __iomem *ioremap(phys_addr_t offset, size_t size) | |
3f7e212d | 953 | { |
9216efaf | 954 | return (void __iomem *)(unsigned long)offset; |
3f7e212d | 955 | } |
9216efaf | 956 | #endif |
3f7e212d | 957 | |
b3ada9d0 GH |
958 | #ifndef iounmap |
959 | #define iounmap iounmap | |
b3ada9d0 GH |
960 | static inline void iounmap(void __iomem *addr) |
961 | { | |
962 | } | |
963 | #endif | |
80b0ca98 | 964 | #elif defined(CONFIG_GENERIC_IOREMAP) |
ca5999fd | 965 | #include <linux/pgtable.h> |
80b0ca98 CH |
966 | |
967 | void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); | |
968 | void iounmap(volatile void __iomem *addr); | |
969 | ||
970 | static inline void __iomem *ioremap(phys_addr_t addr, size_t size) | |
971 | { | |
972 | /* _PAGE_IOREMAP needs to be supplied by the architecture */ | |
973 | return ioremap_prot(addr, size, _PAGE_IOREMAP); | |
974 | } | |
975 | #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ | |
97c9801a | 976 | |
3f7e212d | 977 | #ifndef ioremap_wc |
d092a870 | 978 | #define ioremap_wc ioremap |
3f7e212d AB |
979 | #endif |
980 | ||
d838270e | 981 | #ifndef ioremap_wt |
d092a870 | 982 | #define ioremap_wt ioremap |
d838270e TK |
983 | #endif |
984 | ||
e9713395 CH |
985 | /* |
986 | * ioremap_uc is special in that we do require an explicit architecture | |
987 | * implementation. In general you do not want to use this function in a | |
988 | * driver and use plain ioremap, which is uncached by default. Similarly | |
989 | * architectures should not implement it unless they have a very good | |
990 | * reason. | |
991 | */ | |
992 | #ifndef ioremap_uc | |
993 | #define ioremap_uc ioremap_uc | |
994 | static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) | |
995 | { | |
996 | return NULL; | |
997 | } | |
ea962928 | 998 | #endif |
7c566bb5 HM |
999 | |
1000 | /* | |
1001 | * ioremap_np needs an explicit architecture implementation, as it | |
1002 | * requests stronger semantics than regular ioremap(). Portable drivers | |
1003 | * should instead use one of the higher-level abstractions, like | |
1004 | * devm_ioremap_resource(), to choose the correct variant for any given | |
1005 | * device and bus. Portable drivers with a good reason to want non-posted | |
1006 | * write semantics should always provide an ioremap() fallback in case | |
1007 | * ioremap_np() is not available. | |
1008 | */ | |
1009 | #ifndef ioremap_np | |
1010 | #define ioremap_np ioremap_np | |
1011 | static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) | |
1012 | { | |
1013 | return NULL; | |
1014 | } | |
1015 | #endif | |
1016 | ||
ce816fa8 | 1017 | #ifdef CONFIG_HAS_IOPORT_MAP |
3f7e212d | 1018 | #ifndef CONFIG_GENERIC_IOMAP |
9216efaf TR |
1019 | #ifndef ioport_map |
1020 | #define ioport_map ioport_map | |
3f7e212d AB |
1021 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) |
1022 | { | |
500dd232 AM |
1023 | port &= IO_SPACE_LIMIT; |
1024 | return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; | |
3f7e212d | 1025 | } |
f5810e5c LP |
1026 | #define __pci_ioport_unmap __pci_ioport_unmap |
1027 | static inline void __pci_ioport_unmap(void __iomem *p) | |
1028 | { | |
1029 | uintptr_t start = (uintptr_t) PCI_IOBASE; | |
1030 | uintptr_t addr = (uintptr_t) p; | |
1031 | ||
1032 | if (addr >= start && addr < start + IO_SPACE_LIMIT) | |
1033 | return; | |
1034 | iounmap(p); | |
1035 | } | |
9216efaf | 1036 | #endif |
3f7e212d | 1037 | |
9216efaf TR |
1038 | #ifndef ioport_unmap |
1039 | #define ioport_unmap ioport_unmap | |
3f7e212d AB |
1040 | static inline void ioport_unmap(void __iomem *p) |
1041 | { | |
1042 | } | |
9216efaf | 1043 | #endif |
3f7e212d AB |
1044 | #else /* CONFIG_GENERIC_IOMAP */ |
1045 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | |
1046 | extern void ioport_unmap(void __iomem *p); | |
1047 | #endif /* CONFIG_GENERIC_IOMAP */ | |
ce816fa8 | 1048 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
3f7e212d | 1049 | |
f5810e5c LP |
1050 | #ifndef CONFIG_GENERIC_IOMAP |
1051 | struct pci_dev; | |
1052 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | |
1053 | ||
1054 | #ifndef __pci_ioport_unmap | |
1055 | static inline void __pci_ioport_unmap(void __iomem *p) {} | |
1056 | #endif | |
1057 | ||
1058 | #ifndef pci_iounmap | |
1059 | #define pci_iounmap pci_iounmap | |
1060 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) | |
1061 | { | |
1062 | __pci_ioport_unmap(p); | |
1063 | } | |
1064 | #endif | |
1065 | #endif /* CONFIG_GENERIC_IOMAP */ | |
1066 | ||
576ebd74 | 1067 | #ifndef xlate_dev_mem_ptr |
9216efaf TR |
1068 | #define xlate_dev_mem_ptr xlate_dev_mem_ptr |
1069 | static inline void *xlate_dev_mem_ptr(phys_addr_t addr) | |
1070 | { | |
1071 | return __va(addr); | |
1072 | } | |
1073 | #endif | |
1074 | ||
1075 | #ifndef unxlate_dev_mem_ptr | |
1076 | #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr | |
1077 | static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) | |
1078 | { | |
1079 | } | |
576ebd74 | 1080 | #endif |
3f7e212d | 1081 | |
c93d0312 | 1082 | #ifdef CONFIG_VIRT_TO_BUS |
3f7e212d | 1083 | #ifndef virt_to_bus |
9216efaf | 1084 | static inline unsigned long virt_to_bus(void *address) |
3f7e212d | 1085 | { |
9216efaf | 1086 | return (unsigned long)address; |
3f7e212d AB |
1087 | } |
1088 | ||
1089 | static inline void *bus_to_virt(unsigned long address) | |
1090 | { | |
9216efaf | 1091 | return (void *)address; |
3f7e212d AB |
1092 | } |
1093 | #endif | |
c93d0312 | 1094 | #endif |
3f7e212d | 1095 | |
cd248341 | 1096 | #ifndef memset_io |
9216efaf | 1097 | #define memset_io memset_io |
c2327da0 AS |
1098 | /** |
1099 | * memset_io Set a range of I/O memory to a constant value | |
1100 | * @addr: The beginning of the I/O-memory range to set | |
1101 | * @val: The value to set the memory to | |
1102 | * @count: The number of bytes to set | |
1103 | * | |
1104 | * Set a range of I/O memory to a given value. | |
1105 | */ | |
9216efaf TR |
1106 | static inline void memset_io(volatile void __iomem *addr, int value, |
1107 | size_t size) | |
1108 | { | |
1109 | memset(__io_virt(addr), value, size); | |
1110 | } | |
cd248341 JG |
1111 | #endif |
1112 | ||
1113 | #ifndef memcpy_fromio | |
9216efaf | 1114 | #define memcpy_fromio memcpy_fromio |
c2327da0 AS |
1115 | /** |
1116 | * memcpy_fromio Copy a block of data from I/O memory | |
1117 | * @dst: The (RAM) destination for the copy | |
1118 | * @src: The (I/O memory) source for the data | |
1119 | * @count: The number of bytes to copy | |
1120 | * | |
1121 | * Copy a block of data from I/O memory. | |
1122 | */ | |
9216efaf TR |
1123 | static inline void memcpy_fromio(void *buffer, |
1124 | const volatile void __iomem *addr, | |
1125 | size_t size) | |
1126 | { | |
1127 | memcpy(buffer, __io_virt(addr), size); | |
1128 | } | |
cd248341 | 1129 | #endif |
9216efaf | 1130 | |
cd248341 | 1131 | #ifndef memcpy_toio |
9216efaf | 1132 | #define memcpy_toio memcpy_toio |
c2327da0 AS |
1133 | /** |
1134 | * memcpy_toio Copy a block of data into I/O memory | |
1135 | * @dst: The (I/O memory) destination for the copy | |
1136 | * @src: The (RAM) source for the data | |
1137 | * @count: The number of bytes to copy | |
1138 | * | |
1139 | * Copy a block of data to I/O memory. | |
1140 | */ | |
9216efaf TR |
1141 | static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, |
1142 | size_t size) | |
1143 | { | |
1144 | memcpy(__io_virt(addr), buffer, size); | |
1145 | } | |
cd248341 | 1146 | #endif |
3f7e212d | 1147 | |
527701ed PD |
1148 | #ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED |
1149 | extern int devmem_is_allowed(unsigned long pfn); | |
1150 | #endif | |
1151 | ||
3f7e212d AB |
1152 | #endif /* __KERNEL__ */ |
1153 | ||
1154 | #endif /* __ASM_GENERIC_IO_H */ |