]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/x86/mm/ioremap_32.c
x86: make c_p_a unconditional in ioremap
[mirror_ubuntu-kernels.git] / arch / x86 / mm / ioremap_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
1da177e4 9#include <linux/init.h>
a148ecfd 10#include <linux/io.h>
3cbd09e4
TG
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14
1da177e4 15#include <asm/cacheflush.h>
3cbd09e4
TG
16#include <asm/e820.h>
17#include <asm/fixmap.h>
1da177e4 18#include <asm/pgtable.h>
3cbd09e4 19#include <asm/tlbflush.h>
1da177e4 20
1da177e4
LT
21/*
22 * Remap an arbitrary physical address space into the kernel virtual
23 * address space. Needed when the kernel wants to access high addresses
24 * directly.
25 *
26 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
27 * have to convert them into an offset in a page-aligned mapping, but the
28 * caller shouldn't need to know that small detail.
29 */
91eebf40
TG
30void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
31 unsigned long flags)
1da177e4 32{
91eebf40
TG
33 void __iomem *addr;
34 struct vm_struct *area;
1da177e4 35 unsigned long offset, last_addr;
a148ecfd 36 pgprot_t prot;
1da177e4
LT
37
38 /* Don't allow wraparound or zero size */
39 last_addr = phys_addr + size - 1;
40 if (!size || last_addr < phys_addr)
41 return NULL;
42
43 /*
44 * Don't remap the low PCI/ISA area, it's always mapped..
45 */
46 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
47 return (void __iomem *) phys_to_virt(phys_addr);
48
49 /*
50 * Don't allow anybody to remap normal RAM that we're using..
51 */
52 if (phys_addr <= virt_to_phys(high_memory - 1)) {
53 char *t_addr, *t_end;
54 struct page *page;
55
56 t_addr = __va(phys_addr);
57 t_end = t_addr + (size - 1);
91eebf40
TG
58
59 for (page = virt_to_page(t_addr);
60 page <= virt_to_page(t_end); page++)
61 if (!PageReserved(page))
1da177e4
LT
62 return NULL;
63 }
64
a4034349 65 prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
a148ecfd 66
1da177e4
LT
67 /*
68 * Mappings have to be page-aligned
69 */
70 offset = phys_addr & ~PAGE_MASK;
71 phys_addr &= PAGE_MASK;
72 size = PAGE_ALIGN(last_addr+1) - phys_addr;
73
74 /*
75 * Ok, go for it..
76 */
74ff2857 77 area = get_vm_area(size, VM_IOREMAP);
1da177e4
LT
78 if (!area)
79 return NULL;
80 area->phys_addr = phys_addr;
81 addr = (void __iomem *) area->addr;
82 if (ioremap_page_range((unsigned long) addr,
91eebf40 83 (unsigned long) addr + size, phys_addr, prot)) {
1da177e4
LT
84 vunmap((void __force *) addr);
85 return NULL;
86 }
87 return (void __iomem *) (offset + (char __iomem *)addr);
88}
129f6946 89EXPORT_SYMBOL(__ioremap);
1da177e4
LT
90
91/**
92 * ioremap_nocache - map bus memory into CPU space
93 * @offset: bus address of the memory
94 * @size: size of the resource to map
95 *
96 * ioremap_nocache performs a platform specific sequence of operations to
97 * make bus memory CPU accessible via the readb/readw/readl/writeb/
98 * writew/writel functions and the other mmio helpers. The returned
99 * address is not guaranteed to be usable directly as a virtual
91eebf40 100 * address.
1da177e4
LT
101 *
102 * This version of ioremap ensures that the memory is marked uncachable
103 * on the CPU as well as honouring existing caching rules from things like
91eebf40 104 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
105 * busses. In particular driver authors should read up on PCI writes
106 *
107 * It's useful if some control registers are in such an area and
108 * write combining or read caching is not desirable:
91eebf40 109 *
1da177e4
LT
110 * Must be freed with iounmap.
111 */
91eebf40 112void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
1da177e4
LT
113{
114 unsigned long last_addr;
4138cc34 115 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
91eebf40
TG
116
117 if (!p)
118 return p;
1da177e4
LT
119
120 /* Guaranteed to be > phys_addr, as per __ioremap() */
121 last_addr = phys_addr + size - 1;
122
123 if (last_addr < virt_to_phys(high_memory) - 1) {
91eebf40 124 struct page *ppage = virt_to_page(__va(phys_addr));
1da177e4
LT
125 unsigned long npages;
126
127 phys_addr &= PAGE_MASK;
128
129 /* This might overflow and become zero.. */
130 last_addr = PAGE_ALIGN(last_addr);
131
132 /* .. but that's ok, because modulo-2**n arithmetic will make
91eebf40
TG
133 * the page-aligned "last - first" come out right.
134 */
1da177e4
LT
135 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
136
91eebf40
TG
137 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
138 iounmap(p);
1da177e4
LT
139 p = NULL;
140 }
141 global_flush_tlb();
142 }
143
91eebf40 144 return p;
1da177e4 145}
129f6946 146EXPORT_SYMBOL(ioremap_nocache);
1da177e4 147
bf5421c3
AK
148/**
149 * iounmap - Free a IO remapping
150 * @addr: virtual address from ioremap_*
151 *
152 * Caller must ensure there is only one unmapping for the same pointer.
153 */
1da177e4
LT
154void iounmap(volatile void __iomem *addr)
155{
bf5421c3 156 struct vm_struct *p, *o;
c23a4e96
AM
157
158 if ((void __force *)addr <= high_memory)
1da177e4
LT
159 return;
160
161 /*
162 * __ioremap special-cases the PCI/ISA range by not instantiating a
163 * vm_area and by simply returning an address into the kernel mapping
164 * of ISA space. So handle that here.
165 */
166 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 167 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
168 return;
169
91eebf40
TG
170 addr = (volatile void __iomem *)
171 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
172
173 /* Use the vm area unlocked, assuming the caller
174 ensures there isn't another iounmap for the same address
175 in parallel. Reuse of the virtual address is prevented by
176 leaving it in the global lists until we're done with it.
177 cpa takes care of the direct mappings. */
178 read_lock(&vmlist_lock);
179 for (p = vmlist; p; p = p->next) {
180 if (p->addr == addr)
181 break;
182 }
183 read_unlock(&vmlist_lock);
184
185 if (!p) {
91eebf40 186 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 187 dump_stack();
bf5421c3 188 return;
1da177e4
LT
189 }
190
bf5421c3 191 /* Reset the direct mapping. Can block */
74ff2857 192 if (p->phys_addr < virt_to_phys(high_memory) - 1) {
1da177e4 193 change_page_attr(virt_to_page(__va(p->phys_addr)),
9585116b 194 get_vm_area_size(p) >> PAGE_SHIFT,
1da177e4
LT
195 PAGE_KERNEL);
196 global_flush_tlb();
91eebf40 197 }
bf5421c3
AK
198
199 /* Finally remove it */
200 o = remove_vm_area((void *)addr);
201 BUG_ON(p != o || o == NULL);
91eebf40 202 kfree(p);
1da177e4 203}
129f6946 204EXPORT_SYMBOL(iounmap);
1da177e4 205
d18d6d65
IM
206
207int __initdata early_ioremap_debug;
208
209static int __init early_ioremap_debug_setup(char *str)
210{
211 early_ioremap_debug = 1;
212
793b24a2 213 return 0;
d18d6d65 214}
793b24a2 215early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 216
0947b2f3
HY
217static __initdata int after_paging_init;
218static __initdata unsigned long bm_pte[1024]
219 __attribute__((aligned(PAGE_SIZE)));
220
beacfaac 221static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
0947b2f3
HY
222{
223 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
224}
225
beacfaac 226static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
0947b2f3
HY
227{
228 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
229}
230
beacfaac 231void __init early_ioremap_init(void)
0947b2f3
HY
232{
233 unsigned long *pgd;
234
d18d6d65 235 if (early_ioremap_debug)
91eebf40 236 printk(KERN_DEBUG "early_ioremap_init()\n");
d18d6d65 237
beacfaac 238 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
239 *pgd = __pa(bm_pte) | _PAGE_TABLE;
240 memset(bm_pte, 0, sizeof(bm_pte));
0e3a9549
IM
241 /*
242 * The boot-ioremap range spans multiple pgds, for which
243 * we are not prepared:
244 */
245 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
246 WARN_ON(1);
91eebf40
TG
247 printk(KERN_WARNING "pgd %p != %p\n",
248 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
249 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
250 fix_to_virt(FIX_BTMAP_BEGIN));
251 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
252 fix_to_virt(FIX_BTMAP_END));
253
254 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
255 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
256 FIX_BTMAP_BEGIN);
0e3a9549 257 }
0947b2f3
HY
258}
259
beacfaac 260void __init early_ioremap_clear(void)
0947b2f3
HY
261{
262 unsigned long *pgd;
263
d18d6d65 264 if (early_ioremap_debug)
91eebf40 265 printk(KERN_DEBUG "early_ioremap_clear()\n");
d18d6d65 266
beacfaac 267 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
268 *pgd = 0;
269 __flush_tlb_all();
270}
271
beacfaac 272void __init early_ioremap_reset(void)
0947b2f3
HY
273{
274 enum fixed_addresses idx;
275 unsigned long *pte, phys, addr;
276
277 after_paging_init = 1;
64a8f852 278 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 279 addr = fix_to_virt(idx);
beacfaac 280 pte = early_ioremap_pte(addr);
0947b2f3
HY
281 if (!*pte & _PAGE_PRESENT) {
282 phys = *pte & PAGE_MASK;
283 set_fixmap(idx, phys);
284 }
285 }
286}
287
beacfaac 288static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
289 unsigned long phys, pgprot_t flags)
290{
291 unsigned long *pte, addr = __fix_to_virt(idx);
292
293 if (idx >= __end_of_fixed_addresses) {
294 BUG();
295 return;
296 }
beacfaac 297 pte = early_ioremap_pte(addr);
0947b2f3
HY
298 if (pgprot_val(flags))
299 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
300 else
301 *pte = 0;
302 __flush_tlb_one(addr);
303}
304
beacfaac 305static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
306 unsigned long phys)
307{
308 if (after_paging_init)
309 set_fixmap(idx, phys);
310 else
beacfaac 311 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
312}
313
beacfaac 314static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
315{
316 if (after_paging_init)
317 clear_fixmap(idx);
318 else
beacfaac 319 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
320}
321
1b42f516
IM
322
323int __initdata early_ioremap_nested;
324
d690b2af
IM
325static int __init check_early_ioremap_leak(void)
326{
327 if (!early_ioremap_nested)
328 return 0;
329
330 printk(KERN_WARNING
91eebf40
TG
331 "Debug warning: early ioremap leak of %d areas detected.\n",
332 early_ioremap_nested);
d690b2af 333 printk(KERN_WARNING
91eebf40 334 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
335 WARN_ON(1);
336
337 return 1;
338}
339late_initcall(check_early_ioremap_leak);
340
beacfaac 341void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
342{
343 unsigned long offset, last_addr;
1b42f516
IM
344 unsigned int nrpages, nesting;
345 enum fixed_addresses idx0, idx;
346
347 WARN_ON(system_state != SYSTEM_BOOTING);
348
349 nesting = early_ioremap_nested;
d18d6d65 350 if (early_ioremap_debug) {
91eebf40
TG
351 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
352 phys_addr, size, nesting);
d18d6d65
IM
353 dump_stack();
354 }
1da177e4
LT
355
356 /* Don't allow wraparound or zero size */
357 last_addr = phys_addr + size - 1;
bd796ed0
IM
358 if (!size || last_addr < phys_addr) {
359 WARN_ON(1);
1da177e4 360 return NULL;
bd796ed0 361 }
1da177e4 362
bd796ed0
IM
363 if (nesting >= FIX_BTMAPS_NESTING) {
364 WARN_ON(1);
1b42f516 365 return NULL;
bd796ed0 366 }
1b42f516 367 early_ioremap_nested++;
1da177e4
LT
368 /*
369 * Mappings have to be page-aligned
370 */
371 offset = phys_addr & ~PAGE_MASK;
372 phys_addr &= PAGE_MASK;
373 size = PAGE_ALIGN(last_addr) - phys_addr;
374
375 /*
376 * Mappings have to fit in the FIX_BTMAP area.
377 */
378 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
379 if (nrpages > NR_FIX_BTMAPS) {
380 WARN_ON(1);
1da177e4 381 return NULL;
bd796ed0 382 }
1da177e4
LT
383
384 /*
385 * Ok, go for it..
386 */
1b42f516
IM
387 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
388 idx = idx0;
1da177e4 389 while (nrpages > 0) {
beacfaac 390 early_set_fixmap(idx, phys_addr);
1da177e4
LT
391 phys_addr += PAGE_SIZE;
392 --idx;
393 --nrpages;
394 }
d18d6d65
IM
395 if (early_ioremap_debug)
396 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 397
91eebf40 398 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
399}
400
beacfaac 401void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
402{
403 unsigned long virt_addr;
404 unsigned long offset;
405 unsigned int nrpages;
406 enum fixed_addresses idx;
1b42f516
IM
407 unsigned int nesting;
408
409 nesting = --early_ioremap_nested;
bd796ed0 410 WARN_ON(nesting < 0);
1da177e4 411
d18d6d65 412 if (early_ioremap_debug) {
91eebf40
TG
413 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
414 size, nesting);
d18d6d65
IM
415 dump_stack();
416 }
417
1da177e4 418 virt_addr = (unsigned long)addr;
bd796ed0
IM
419 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
420 WARN_ON(1);
1da177e4 421 return;
bd796ed0 422 }
1da177e4
LT
423 offset = virt_addr & ~PAGE_MASK;
424 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
425
1b42f516 426 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 427 while (nrpages > 0) {
beacfaac 428 early_clear_fixmap(idx);
1da177e4
LT
429 --idx;
430 --nrpages;
431 }
432}
1b42f516
IM
433
434void __this_fixmap_does_not_exist(void)
435{
436 WARN_ON(1);
437}