]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/mm/ioremap_32.c
x86: early_ioremap_init(), enhance warnings
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / ioremap_32.c
1 /*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/io.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
20
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
23
24 /*
25 * Generic mapping function (not visible outside):
26 */
27
28 /*
29 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses
31 * directly.
32 *
33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail.
36 */
37 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
38 {
39 void __iomem * addr;
40 struct vm_struct * area;
41 unsigned long offset, last_addr;
42 pgprot_t prot;
43
44 /* Don't allow wraparound or zero size */
45 last_addr = phys_addr + size - 1;
46 if (!size || last_addr < phys_addr)
47 return NULL;
48
49 /*
50 * Don't remap the low PCI/ISA area, it's always mapped..
51 */
52 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
53 return (void __iomem *) phys_to_virt(phys_addr);
54
55 /*
56 * Don't allow anybody to remap normal RAM that we're using..
57 */
58 if (phys_addr <= virt_to_phys(high_memory - 1)) {
59 char *t_addr, *t_end;
60 struct page *page;
61
62 t_addr = __va(phys_addr);
63 t_end = t_addr + (size - 1);
64
65 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
66 if(!PageReserved(page))
67 return NULL;
68 }
69
70 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
71 | _PAGE_ACCESSED | flags);
72
73 /*
74 * Mappings have to be page-aligned
75 */
76 offset = phys_addr & ~PAGE_MASK;
77 phys_addr &= PAGE_MASK;
78 size = PAGE_ALIGN(last_addr+1) - phys_addr;
79
80 /*
81 * Ok, go for it..
82 */
83 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
84 if (!area)
85 return NULL;
86 area->phys_addr = phys_addr;
87 addr = (void __iomem *) area->addr;
88 if (ioremap_page_range((unsigned long) addr,
89 (unsigned long) addr + size, phys_addr, prot)) {
90 vunmap((void __force *) addr);
91 return NULL;
92 }
93 return (void __iomem *) (offset + (char __iomem *)addr);
94 }
95 EXPORT_SYMBOL(__ioremap);
96
97 /**
98 * ioremap_nocache - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap_nocache performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * This version of ioremap ensures that the memory is marked uncachable
109 * on the CPU as well as honouring existing caching rules from things like
110 * the PCI bus. Note that there are other caches and buffers on many
111 * busses. In particular driver authors should read up on PCI writes
112 *
113 * It's useful if some control registers are in such an area and
114 * write combining or read caching is not desirable:
115 *
116 * Must be freed with iounmap.
117 */
118
119 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
120 {
121 unsigned long last_addr;
122 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
123 if (!p)
124 return p;
125
126 /* Guaranteed to be > phys_addr, as per __ioremap() */
127 last_addr = phys_addr + size - 1;
128
129 if (last_addr < virt_to_phys(high_memory) - 1) {
130 struct page *ppage = virt_to_page(__va(phys_addr));
131 unsigned long npages;
132
133 phys_addr &= PAGE_MASK;
134
135 /* This might overflow and become zero.. */
136 last_addr = PAGE_ALIGN(last_addr);
137
138 /* .. but that's ok, because modulo-2**n arithmetic will make
139 * the page-aligned "last - first" come out right.
140 */
141 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
142
143 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
144 iounmap(p);
145 p = NULL;
146 }
147 global_flush_tlb();
148 }
149
150 return p;
151 }
152 EXPORT_SYMBOL(ioremap_nocache);
153
154 /**
155 * iounmap - Free a IO remapping
156 * @addr: virtual address from ioremap_*
157 *
158 * Caller must ensure there is only one unmapping for the same pointer.
159 */
160 void iounmap(volatile void __iomem *addr)
161 {
162 struct vm_struct *p, *o;
163
164 if ((void __force *)addr <= high_memory)
165 return;
166
167 /*
168 * __ioremap special-cases the PCI/ISA range by not instantiating a
169 * vm_area and by simply returning an address into the kernel mapping
170 * of ISA space. So handle that here.
171 */
172 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
173 addr < phys_to_virt(ISA_END_ADDRESS))
174 return;
175
176 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
177
178 /* Use the vm area unlocked, assuming the caller
179 ensures there isn't another iounmap for the same address
180 in parallel. Reuse of the virtual address is prevented by
181 leaving it in the global lists until we're done with it.
182 cpa takes care of the direct mappings. */
183 read_lock(&vmlist_lock);
184 for (p = vmlist; p; p = p->next) {
185 if (p->addr == addr)
186 break;
187 }
188 read_unlock(&vmlist_lock);
189
190 if (!p) {
191 printk("iounmap: bad address %p\n", addr);
192 dump_stack();
193 return;
194 }
195
196 /* Reset the direct mapping. Can block */
197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
198 change_page_attr(virt_to_page(__va(p->phys_addr)),
199 get_vm_area_size(p) >> PAGE_SHIFT,
200 PAGE_KERNEL);
201 global_flush_tlb();
202 }
203
204 /* Finally remove it */
205 o = remove_vm_area((void *)addr);
206 BUG_ON(p != o || o == NULL);
207 kfree(p);
208 }
209 EXPORT_SYMBOL(iounmap);
210
211
212 int __initdata early_ioremap_debug;
213
214 static int __init early_ioremap_debug_setup(char *str)
215 {
216 early_ioremap_debug = 1;
217
218 return 0;
219 }
220 early_param("early_ioremap_debug", early_ioremap_debug_setup);
221
222 static __initdata int after_paging_init;
223 static __initdata unsigned long bm_pte[1024]
224 __attribute__((aligned(PAGE_SIZE)));
225
226 static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
227 {
228 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
229 }
230
231 static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
232 {
233 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
234 }
235
236 void __init early_ioremap_init(void)
237 {
238 unsigned long *pgd;
239
240 if (early_ioremap_debug)
241 printk("early_ioremap_init()\n");
242
243 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
244 *pgd = __pa(bm_pte) | _PAGE_TABLE;
245 memset(bm_pte, 0, sizeof(bm_pte));
246 /*
247 * The boot-ioremap range spans multiple pgds, for which
248 * we are not prepared:
249 */
250 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
251 WARN_ON(1);
252 printk("pgd %p != %p\n",
253 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
254 printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
255 fix_to_virt(FIX_BTMAP_BEGIN));
256 printk("fix_to_virt(FIX_BTMAP_END): %08lx\n",
257 fix_to_virt(FIX_BTMAP_END));
258
259 printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
260 printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
261 }
262 }
263
264 void __init early_ioremap_clear(void)
265 {
266 unsigned long *pgd;
267
268 if (early_ioremap_debug)
269 printk("early_ioremap_clear()\n");
270
271 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
272 *pgd = 0;
273 __flush_tlb_all();
274 }
275
276 void __init early_ioremap_reset(void)
277 {
278 enum fixed_addresses idx;
279 unsigned long *pte, phys, addr;
280
281 after_paging_init = 1;
282 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
283 addr = fix_to_virt(idx);
284 pte = early_ioremap_pte(addr);
285 if (!*pte & _PAGE_PRESENT) {
286 phys = *pte & PAGE_MASK;
287 set_fixmap(idx, phys);
288 }
289 }
290 }
291
292 static void __init __early_set_fixmap(enum fixed_addresses idx,
293 unsigned long phys, pgprot_t flags)
294 {
295 unsigned long *pte, addr = __fix_to_virt(idx);
296
297 if (idx >= __end_of_fixed_addresses) {
298 BUG();
299 return;
300 }
301 pte = early_ioremap_pte(addr);
302 if (pgprot_val(flags))
303 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
304 else
305 *pte = 0;
306 __flush_tlb_one(addr);
307 }
308
309 static inline void __init early_set_fixmap(enum fixed_addresses idx,
310 unsigned long phys)
311 {
312 if (after_paging_init)
313 set_fixmap(idx, phys);
314 else
315 __early_set_fixmap(idx, phys, PAGE_KERNEL);
316 }
317
318 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
319 {
320 if (after_paging_init)
321 clear_fixmap(idx);
322 else
323 __early_set_fixmap(idx, 0, __pgprot(0));
324 }
325
326
327 int __initdata early_ioremap_nested;
328
329 static int __init check_early_ioremap_leak(void)
330 {
331 if (!early_ioremap_nested)
332 return 0;
333
334 printk(KERN_WARNING
335 "Debug warning: early ioremap leak of %d areas detected.\n",
336 early_ioremap_nested);
337 printk(KERN_WARNING
338 "please boot with early_ioremap_debug and report the dmesg.\n");
339 WARN_ON(1);
340
341 return 1;
342 }
343 late_initcall(check_early_ioremap_leak);
344
345 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
346 {
347 unsigned long offset, last_addr;
348 unsigned int nrpages, nesting;
349 enum fixed_addresses idx0, idx;
350
351 WARN_ON(system_state != SYSTEM_BOOTING);
352
353 nesting = early_ioremap_nested;
354 if (early_ioremap_debug) {
355 printk("early_ioremap(%08lx, %08lx) [%d] => ",
356 phys_addr, size, nesting);
357 dump_stack();
358 }
359
360 /* Don't allow wraparound or zero size */
361 last_addr = phys_addr + size - 1;
362 if (!size || last_addr < phys_addr) {
363 WARN_ON(1);
364 return NULL;
365 }
366
367 if (nesting >= FIX_BTMAPS_NESTING) {
368 WARN_ON(1);
369 return NULL;
370 }
371 early_ioremap_nested++;
372 /*
373 * Mappings have to be page-aligned
374 */
375 offset = phys_addr & ~PAGE_MASK;
376 phys_addr &= PAGE_MASK;
377 size = PAGE_ALIGN(last_addr) - phys_addr;
378
379 /*
380 * Mappings have to fit in the FIX_BTMAP area.
381 */
382 nrpages = size >> PAGE_SHIFT;
383 if (nrpages > NR_FIX_BTMAPS) {
384 WARN_ON(1);
385 return NULL;
386 }
387
388 /*
389 * Ok, go for it..
390 */
391 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
392 idx = idx0;
393 while (nrpages > 0) {
394 early_set_fixmap(idx, phys_addr);
395 phys_addr += PAGE_SIZE;
396 --idx;
397 --nrpages;
398 }
399 if (early_ioremap_debug)
400 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
401
402 return (void*) (offset + fix_to_virt(idx0));
403 }
404
405 void __init early_iounmap(void *addr, unsigned long size)
406 {
407 unsigned long virt_addr;
408 unsigned long offset;
409 unsigned int nrpages;
410 enum fixed_addresses idx;
411 unsigned int nesting;
412
413 nesting = --early_ioremap_nested;
414 WARN_ON(nesting < 0);
415
416 if (early_ioremap_debug) {
417 printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting);
418 dump_stack();
419 }
420
421 virt_addr = (unsigned long)addr;
422 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
423 WARN_ON(1);
424 return;
425 }
426 offset = virt_addr & ~PAGE_MASK;
427 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
428
429 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
430 while (nrpages > 0) {
431 early_clear_fixmap(idx);
432 --idx;
433 --nrpages;
434 }
435 }
436
437 void __this_fixmap_does_not_exist(void)
438 {
439 WARN_ON(1);
440 }