]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - lib/ioremap.c
Merge tag 'ntb-4.13-bugfixes' of git://github.com/jonmason/ntb
[mirror_ubuntu-artful-kernel.git] / lib / ioremap.c
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8 #include <linux/vmalloc.h>
9 #include <linux/mm.h>
10 #include <linux/sched.h>
11 #include <linux/io.h>
12 #include <linux/export.h>
13 #include <asm/cacheflush.h>
14 #include <asm/pgtable.h>
15
16 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
17 static int __read_mostly ioremap_p4d_capable;
18 static int __read_mostly ioremap_pud_capable;
19 static int __read_mostly ioremap_pmd_capable;
20 static int __read_mostly ioremap_huge_disabled;
21
22 static int __init set_nohugeiomap(char *str)
23 {
24 ioremap_huge_disabled = 1;
25 return 0;
26 }
27 early_param("nohugeiomap", set_nohugeiomap);
28
29 void __init ioremap_huge_init(void)
30 {
31 if (!ioremap_huge_disabled) {
32 if (arch_ioremap_pud_supported())
33 ioremap_pud_capable = 1;
34 if (arch_ioremap_pmd_supported())
35 ioremap_pmd_capable = 1;
36 }
37 }
38
39 static inline int ioremap_p4d_enabled(void)
40 {
41 return ioremap_p4d_capable;
42 }
43
44 static inline int ioremap_pud_enabled(void)
45 {
46 return ioremap_pud_capable;
47 }
48
49 static inline int ioremap_pmd_enabled(void)
50 {
51 return ioremap_pmd_capable;
52 }
53
54 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
55 static inline int ioremap_p4d_enabled(void) { return 0; }
56 static inline int ioremap_pud_enabled(void) { return 0; }
57 static inline int ioremap_pmd_enabled(void) { return 0; }
58 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
59
60 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
61 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
62 {
63 pte_t *pte;
64 u64 pfn;
65
66 pfn = phys_addr >> PAGE_SHIFT;
67 pte = pte_alloc_kernel(pmd, addr);
68 if (!pte)
69 return -ENOMEM;
70 do {
71 BUG_ON(!pte_none(*pte));
72 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
73 pfn++;
74 } while (pte++, addr += PAGE_SIZE, addr != end);
75 return 0;
76 }
77
78 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
79 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
80 {
81 pmd_t *pmd;
82 unsigned long next;
83
84 phys_addr -= addr;
85 pmd = pmd_alloc(&init_mm, pud, addr);
86 if (!pmd)
87 return -ENOMEM;
88 do {
89 next = pmd_addr_end(addr, end);
90
91 if (ioremap_pmd_enabled() &&
92 ((next - addr) == PMD_SIZE) &&
93 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
94 if (pmd_set_huge(pmd, phys_addr + addr, prot))
95 continue;
96 }
97
98 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
99 return -ENOMEM;
100 } while (pmd++, addr = next, addr != end);
101 return 0;
102 }
103
104 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
105 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
106 {
107 pud_t *pud;
108 unsigned long next;
109
110 phys_addr -= addr;
111 pud = pud_alloc(&init_mm, p4d, addr);
112 if (!pud)
113 return -ENOMEM;
114 do {
115 next = pud_addr_end(addr, end);
116
117 if (ioremap_pud_enabled() &&
118 ((next - addr) == PUD_SIZE) &&
119 IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
120 if (pud_set_huge(pud, phys_addr + addr, prot))
121 continue;
122 }
123
124 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
125 return -ENOMEM;
126 } while (pud++, addr = next, addr != end);
127 return 0;
128 }
129
130 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
131 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
132 {
133 p4d_t *p4d;
134 unsigned long next;
135
136 phys_addr -= addr;
137 p4d = p4d_alloc(&init_mm, pgd, addr);
138 if (!p4d)
139 return -ENOMEM;
140 do {
141 next = p4d_addr_end(addr, end);
142
143 if (ioremap_p4d_enabled() &&
144 ((next - addr) == P4D_SIZE) &&
145 IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
146 if (p4d_set_huge(p4d, phys_addr + addr, prot))
147 continue;
148 }
149
150 if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
151 return -ENOMEM;
152 } while (p4d++, addr = next, addr != end);
153 return 0;
154 }
155
156 int ioremap_page_range(unsigned long addr,
157 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
158 {
159 pgd_t *pgd;
160 unsigned long start;
161 unsigned long next;
162 int err;
163
164 BUG_ON(addr >= end);
165
166 start = addr;
167 phys_addr -= addr;
168 pgd = pgd_offset_k(addr);
169 do {
170 next = pgd_addr_end(addr, end);
171 err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
172 if (err)
173 break;
174 } while (pgd++, addr = next, addr != end);
175
176 flush_cache_vmap(start, end);
177
178 return err;
179 }