]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * arch/parisc/mm/ioremap.c | |
4 | * | |
1da177e4 | 5 | * (C) Copyright 1995 1996 Linus Torvalds |
b2d6b9fb | 6 | * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de> |
e0565a1c | 7 | * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org> |
1da177e4 LT |
8 | */ |
9 | ||
10 | #include <linux/vmalloc.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/module.h> | |
e34067fd | 13 | #include <linux/io.h> |
1da177e4 | 14 | #include <asm/pgalloc.h> |
1da177e4 | 15 | |
1da177e4 LT |
16 | /* |
17 | * Generic mapping function (not visible outside): | |
18 | */ | |
19 | ||
20 | /* | |
21 | * Remap an arbitrary physical address space into the kernel virtual | |
e0565a1c | 22 | * address space. |
1da177e4 LT |
23 | * |
24 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
25 | * have to convert them into an offset in a page-aligned mapping, but the | |
26 | * caller shouldn't need to know that small detail. | |
27 | */ | |
28 | void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | |
29 | { | |
e51ec241 | 30 | void __iomem *addr; |
cb4ab59c HD |
31 | struct vm_struct *area; |
32 | unsigned long offset, last_addr; | |
e34067fd | 33 | pgprot_t pgprot; |
cb4ab59c | 34 | |
29ef8295 | 35 | #ifdef CONFIG_EISA |
1da177e4 LT |
36 | unsigned long end = phys_addr + size - 1; |
37 | /* Support EISA addresses */ | |
10267cdd HD |
38 | if ((phys_addr >= 0x00080000 && end < 0x000fffff) || |
39 | (phys_addr >= 0x00500000 && end < 0x03bfffff)) { | |
40 | phys_addr |= F_EXTEND(0xfc000000); | |
b2d6b9fb | 41 | flags |= _PAGE_NO_CACHE; |
1da177e4 | 42 | } |
1da177e4 LT |
43 | #endif |
44 | ||
1da177e4 LT |
45 | /* Don't allow wraparound or zero size */ |
46 | last_addr = phys_addr + size - 1; | |
47 | if (!size || last_addr < phys_addr) | |
48 | return NULL; | |
49 | ||
50 | /* | |
51 | * Don't allow anybody to remap normal RAM that we're using.. | |
52 | */ | |
53 | if (phys_addr < virt_to_phys(high_memory)) { | |
54 | char *t_addr, *t_end; | |
55 | struct page *page; | |
56 | ||
57 | t_addr = __va(phys_addr); | |
58 | t_end = t_addr + (size - 1); | |
59 | ||
e0565a1c KM |
60 | for (page = virt_to_page(t_addr); |
61 | page <= virt_to_page(t_end); page++) { | |
1da177e4 LT |
62 | if(!PageReserved(page)) |
63 | return NULL; | |
e0565a1c | 64 | } |
1da177e4 LT |
65 | } |
66 | ||
e34067fd HS |
67 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | |
68 | _PAGE_ACCESSED | flags); | |
69 | ||
1da177e4 LT |
70 | /* |
71 | * Mappings have to be page-aligned | |
72 | */ | |
73 | offset = phys_addr & ~PAGE_MASK; | |
74 | phys_addr &= PAGE_MASK; | |
a292dfa0 | 75 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
1da177e4 LT |
76 | |
77 | /* | |
78 | * Ok, go for it.. | |
79 | */ | |
80 | area = get_vm_area(size, VM_IOREMAP); | |
81 | if (!area) | |
82 | return NULL; | |
e0565a1c | 83 | |
e51ec241 | 84 | addr = (void __iomem *) area->addr; |
e34067fd HS |
85 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, |
86 | phys_addr, pgprot)) { | |
1da177e4 LT |
87 | vfree(addr); |
88 | return NULL; | |
89 | } | |
e0565a1c | 90 | |
e51ec241 | 91 | return (void __iomem *) (offset + (char __iomem *)addr); |
1da177e4 | 92 | } |
d345fd36 | 93 | EXPORT_SYMBOL(__ioremap); |
1da177e4 | 94 | |
01232e93 | 95 | void iounmap(const volatile void __iomem *addr) |
1da177e4 | 96 | { |
1da177e4 LT |
97 | if (addr > high_memory) |
98 | return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); | |
1da177e4 | 99 | } |
d345fd36 | 100 | EXPORT_SYMBOL(iounmap); |