]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/mm/ioremap.c
x86: fix HT cpu booting on 32-bit
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
1da177e4 16#include <asm/cacheflush.h>
3cbd09e4
TG
17#include <asm/e820.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/pgtable.h>
3cbd09e4 20#include <asm/tlbflush.h>
f6df72e7 21#include <asm/pgalloc.h>
d7677d40 22#include <asm/pat.h>
1da177e4 23
240d3a7c
TG
24#ifdef CONFIG_X86_64
25
26unsigned long __phys_addr(unsigned long x)
27{
28 if (x >= __START_KERNEL_map)
29 return x - __START_KERNEL_map + phys_base;
30 return x - PAGE_OFFSET;
31}
32EXPORT_SYMBOL(__phys_addr);
33
e3100c82
TG
34static inline int phys_addr_valid(unsigned long addr)
35{
36 return addr < (1UL << boot_cpu_data.x86_phys_bits);
37}
38
39#else
40
41static inline int phys_addr_valid(unsigned long addr)
42{
43 return 1;
44}
45
240d3a7c
TG
46#endif
47
5f5192b9
TG
48int page_is_ram(unsigned long pagenr)
49{
756a6c68 50 resource_size_t addr, end;
5f5192b9
TG
51 int i;
52
d8a9e6a5
AV
53 /*
54 * A special case is the first 4Kb of memory;
55 * This is a BIOS owned area, not kernel ram, but generally
56 * not listed as such in the E820 table.
57 */
58 if (pagenr == 0)
59 return 0;
60
156fbc3f
AV
61 /*
62 * Second special case: Some BIOSen report the PC BIOS
63 * area (640->1Mb) as ram even though it is not.
64 */
65 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66 pagenr < (BIOS_END >> PAGE_SHIFT))
67 return 0;
d8a9e6a5 68
5f5192b9
TG
69 for (i = 0; i < e820.nr_map; i++) {
70 /*
71 * Not usable memory:
72 */
73 if (e820.map[i].type != E820_RAM)
74 continue;
5f5192b9
TG
75 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 77
950f9d95 78
5f5192b9
TG
79 if ((pagenr >= addr) && (pagenr < end))
80 return 1;
81 }
82 return 0;
83}
84
e9332cac
TG
85/*
86 * Fix up the linear direct mapping of the kernel to avoid cache attribute
87 * conflicts.
88 */
3a96ce8c 89int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90 unsigned long prot_val)
e9332cac 91{
d806e5ee 92 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 93 int err;
e9332cac 94
3a96ce8c 95 switch (prot_val) {
96 case _PAGE_CACHE_UC:
d806e5ee 97 default:
1219333d 98 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 99 break;
b310f381 100 case _PAGE_CACHE_WC:
101 err = _set_memory_wc(vaddr, nrpages);
102 break;
3a96ce8c 103 case _PAGE_CACHE_WB:
1219333d 104 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
105 break;
106 }
e9332cac
TG
107
108 return err;
109}
110
1da177e4
LT
111/*
112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
114 * directly.
115 *
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
119 */
23016969
CL
120static void __iomem *__ioremap_caller(resource_size_t phys_addr,
121 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 122{
756a6c68
IM
123 unsigned long pfn, offset, vaddr;
124 resource_size_t last_addr;
91eebf40 125 struct vm_struct *area;
d7677d40 126 unsigned long new_prot_val;
d806e5ee 127 pgprot_t prot;
dee7cbb2 128 int retval;
1da177e4
LT
129
130 /* Don't allow wraparound or zero size */
131 last_addr = phys_addr + size - 1;
132 if (!size || last_addr < phys_addr)
133 return NULL;
134
e3100c82 135 if (!phys_addr_valid(phys_addr)) {
6997ab49 136 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 137 (unsigned long long)phys_addr);
e3100c82
TG
138 WARN_ON_ONCE(1);
139 return NULL;
140 }
141
1da177e4
LT
142 /*
143 * Don't remap the low PCI/ISA area, it's always mapped..
144 */
145 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
4b40fcee 146 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
147
148 /*
149 * Don't allow anybody to remap normal RAM that we're using..
150 */
2544a873
IM
151 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
152 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
bdd3cee2 153
ba748d22
IM
154 int is_ram = page_is_ram(pfn);
155
156 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 157 return NULL;
ba748d22 158 WARN_ON_ONCE(is_ram);
1da177e4
LT
159 }
160
d7677d40 161 /*
162 * Mappings have to be page-aligned
163 */
164 offset = phys_addr & ~PAGE_MASK;
165 phys_addr &= PAGE_MASK;
166 size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
dee7cbb2
VP
168 retval = reserve_memtype(phys_addr, phys_addr + size,
169 prot_val, &new_prot_val);
170 if (retval) {
b450e5e8 171 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
172 return NULL;
173 }
174
175 if (prot_val != new_prot_val) {
d7677d40 176 /*
177 * Do not fallback to certain memory types with certain
178 * requested type:
de33c442
SS
179 * - request is uc-, return cannot be write-back
180 * - request is uc-, return cannot be write-combine
b310f381 181 * - request is write-combine, return cannot be write-back
d7677d40 182 */
de33c442 183 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
b310f381 184 (new_prot_val == _PAGE_CACHE_WB ||
185 new_prot_val == _PAGE_CACHE_WC)) ||
186 (prot_val == _PAGE_CACHE_WC &&
d7677d40 187 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 188 pr_debug(
6997ab49 189 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
190 (unsigned long long)phys_addr,
191 (unsigned long long)(phys_addr + size),
6997ab49 192 prot_val, new_prot_val);
d7677d40 193 free_memtype(phys_addr, phys_addr + size);
194 return NULL;
195 }
196 prot_val = new_prot_val;
197 }
198
3a96ce8c 199 switch (prot_val) {
200 case _PAGE_CACHE_UC:
d806e5ee 201 default:
55c62682 202 prot = PAGE_KERNEL_NOCACHE;
d806e5ee 203 break;
de33c442
SS
204 case _PAGE_CACHE_UC_MINUS:
205 prot = PAGE_KERNEL_UC_MINUS;
206 break;
b310f381 207 case _PAGE_CACHE_WC:
208 prot = PAGE_KERNEL_WC;
209 break;
3a96ce8c 210 case _PAGE_CACHE_WB:
d806e5ee
TG
211 prot = PAGE_KERNEL;
212 break;
213 }
a148ecfd 214
1da177e4
LT
215 /*
216 * Ok, go for it..
217 */
23016969 218 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
219 if (!area)
220 return NULL;
221 area->phys_addr = phys_addr;
e66aadbe
TG
222 vaddr = (unsigned long) area->addr;
223 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 224 free_memtype(phys_addr, phys_addr + size);
b16bf712 225 free_vm_area(area);
1da177e4
LT
226 return NULL;
227 }
e9332cac 228
3a96ce8c 229 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 230 free_memtype(phys_addr, phys_addr + size);
e66aadbe 231 vunmap(area->addr);
e9332cac
TG
232 return NULL;
233 }
234
e66aadbe 235 return (void __iomem *) (vaddr + offset);
1da177e4 236}
1da177e4
LT
237
238/**
239 * ioremap_nocache - map bus memory into CPU space
240 * @offset: bus address of the memory
241 * @size: size of the resource to map
242 *
243 * ioremap_nocache performs a platform specific sequence of operations to
244 * make bus memory CPU accessible via the readb/readw/readl/writeb/
245 * writew/writel functions and the other mmio helpers. The returned
246 * address is not guaranteed to be usable directly as a virtual
91eebf40 247 * address.
1da177e4
LT
248 *
249 * This version of ioremap ensures that the memory is marked uncachable
250 * on the CPU as well as honouring existing caching rules from things like
91eebf40 251 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
252 * busses. In particular driver authors should read up on PCI writes
253 *
254 * It's useful if some control registers are in such an area and
255 * write combining or read caching is not desirable:
91eebf40 256 *
1da177e4
LT
257 * Must be freed with iounmap.
258 */
b9e76a00 259void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 260{
de33c442
SS
261 /*
262 * Ideally, this should be:
263 * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
264 *
265 * Till we fix all X drivers to use ioremap_wc(), we will use
266 * UC MINUS.
267 */
268 unsigned long val = _PAGE_CACHE_UC_MINUS;
269
270 return __ioremap_caller(phys_addr, size, val,
23016969 271 __builtin_return_address(0));
1da177e4 272}
129f6946 273EXPORT_SYMBOL(ioremap_nocache);
1da177e4 274
b310f381 275/**
276 * ioremap_wc - map memory into CPU space write combined
277 * @offset: bus address of the memory
278 * @size: size of the resource to map
279 *
280 * This version of ioremap ensures that the memory is marked write combining.
281 * Write combining allows faster writes to some hardware devices.
282 *
283 * Must be freed with iounmap.
284 */
285void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
286{
287 if (pat_wc_enabled)
23016969
CL
288 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
289 __builtin_return_address(0));
b310f381 290 else
291 return ioremap_nocache(phys_addr, size);
292}
293EXPORT_SYMBOL(ioremap_wc);
294
b9e76a00 295void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 296{
23016969
CL
297 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
298 __builtin_return_address(0));
5f868152
TG
299}
300EXPORT_SYMBOL(ioremap_cache);
301
bf5421c3
AK
302/**
303 * iounmap - Free a IO remapping
304 * @addr: virtual address from ioremap_*
305 *
306 * Caller must ensure there is only one unmapping for the same pointer.
307 */
1da177e4
LT
308void iounmap(volatile void __iomem *addr)
309{
bf5421c3 310 struct vm_struct *p, *o;
c23a4e96
AM
311
312 if ((void __force *)addr <= high_memory)
1da177e4
LT
313 return;
314
315 /*
316 * __ioremap special-cases the PCI/ISA range by not instantiating a
317 * vm_area and by simply returning an address into the kernel mapping
318 * of ISA space. So handle that here.
319 */
320 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 321 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
322 return;
323
91eebf40
TG
324 addr = (volatile void __iomem *)
325 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
326
327 /* Use the vm area unlocked, assuming the caller
328 ensures there isn't another iounmap for the same address
329 in parallel. Reuse of the virtual address is prevented by
330 leaving it in the global lists until we're done with it.
331 cpa takes care of the direct mappings. */
332 read_lock(&vmlist_lock);
333 for (p = vmlist; p; p = p->next) {
334 if (p->addr == addr)
335 break;
336 }
337 read_unlock(&vmlist_lock);
338
339 if (!p) {
91eebf40 340 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 341 dump_stack();
bf5421c3 342 return;
1da177e4
LT
343 }
344
d7677d40 345 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
346
bf5421c3
AK
347 /* Finally remove it */
348 o = remove_vm_area((void *)addr);
349 BUG_ON(p != o || o == NULL);
91eebf40 350 kfree(p);
1da177e4 351}
129f6946 352EXPORT_SYMBOL(iounmap);
1da177e4 353
e045fb2a 354/*
355 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
356 * access
357 */
358void *xlate_dev_mem_ptr(unsigned long phys)
359{
360 void *addr;
361 unsigned long start = phys & PAGE_MASK;
362
363 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
364 if (page_is_ram(start >> PAGE_SHIFT))
365 return __va(phys);
366
367 addr = (void *)ioremap(start, PAGE_SIZE);
368 if (addr)
369 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
370
371 return addr;
372}
373
374void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
375{
376 if (page_is_ram(phys >> PAGE_SHIFT))
377 return;
378
379 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
380 return;
381}
382
240d3a7c 383#ifdef CONFIG_X86_32
d18d6d65
IM
384
385int __initdata early_ioremap_debug;
386
387static int __init early_ioremap_debug_setup(char *str)
388{
389 early_ioremap_debug = 1;
390
793b24a2 391 return 0;
d18d6d65 392}
793b24a2 393early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 394
0947b2f3 395static __initdata int after_paging_init;
c92a7a54
IC
396static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
397 __section(.bss.page_aligned);
0947b2f3 398
551889a6 399static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 400{
37cc8d7f
JF
401 /* Don't assume we're using swapper_pg_dir at this point */
402 pgd_t *base = __va(read_cr3());
403 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
404 pud_t *pud = pud_offset(pgd, addr);
405 pmd_t *pmd = pmd_offset(pud, addr);
406
407 return pmd;
0947b2f3
HY
408}
409
551889a6 410static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 411{
551889a6 412 return &bm_pte[pte_index(addr)];
0947b2f3
HY
413}
414
beacfaac 415void __init early_ioremap_init(void)
0947b2f3 416{
551889a6 417 pmd_t *pmd;
0947b2f3 418
d18d6d65 419 if (early_ioremap_debug)
adafdf6a 420 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 421
551889a6 422 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 423 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 424 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 425
0e3a9549 426 /*
551889a6 427 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
428 * we are not prepared:
429 */
551889a6 430 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 431 WARN_ON(1);
551889a6
IC
432 printk(KERN_WARNING "pmd %p != %p\n",
433 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 434 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 435 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 436 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 437 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
438
439 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
440 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
441 FIX_BTMAP_BEGIN);
0e3a9549 442 }
0947b2f3
HY
443}
444
beacfaac 445void __init early_ioremap_clear(void)
0947b2f3 446{
551889a6 447 pmd_t *pmd;
0947b2f3 448
d18d6d65 449 if (early_ioremap_debug)
adafdf6a 450 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 451
551889a6
IC
452 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
453 pmd_clear(pmd);
6944a9c8 454 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
455 __flush_tlb_all();
456}
457
beacfaac 458void __init early_ioremap_reset(void)
0947b2f3
HY
459{
460 enum fixed_addresses idx;
551889a6
IC
461 unsigned long addr, phys;
462 pte_t *pte;
0947b2f3
HY
463
464 after_paging_init = 1;
64a8f852 465 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 466 addr = fix_to_virt(idx);
beacfaac 467 pte = early_ioremap_pte(addr);
551889a6
IC
468 if (pte_present(*pte)) {
469 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
470 set_fixmap(idx, phys);
471 }
472 }
473}
474
beacfaac 475static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
476 unsigned long phys, pgprot_t flags)
477{
551889a6
IC
478 unsigned long addr = __fix_to_virt(idx);
479 pte_t *pte;
0947b2f3
HY
480
481 if (idx >= __end_of_fixed_addresses) {
482 BUG();
483 return;
484 }
beacfaac 485 pte = early_ioremap_pte(addr);
0947b2f3 486 if (pgprot_val(flags))
551889a6 487 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 488 else
551889a6 489 pte_clear(NULL, addr, pte);
0947b2f3
HY
490 __flush_tlb_one(addr);
491}
492
beacfaac 493static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
494 unsigned long phys)
495{
496 if (after_paging_init)
497 set_fixmap(idx, phys);
498 else
beacfaac 499 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
500}
501
beacfaac 502static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
503{
504 if (after_paging_init)
505 clear_fixmap(idx);
506 else
beacfaac 507 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
508}
509
1b42f516
IM
510
511int __initdata early_ioremap_nested;
512
d690b2af
IM
513static int __init check_early_ioremap_leak(void)
514{
515 if (!early_ioremap_nested)
516 return 0;
517
518 printk(KERN_WARNING
91eebf40
TG
519 "Debug warning: early ioremap leak of %d areas detected.\n",
520 early_ioremap_nested);
d690b2af 521 printk(KERN_WARNING
91eebf40 522 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
523 WARN_ON(1);
524
525 return 1;
526}
527late_initcall(check_early_ioremap_leak);
528
beacfaac 529void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
530{
531 unsigned long offset, last_addr;
1b42f516
IM
532 unsigned int nrpages, nesting;
533 enum fixed_addresses idx0, idx;
534
535 WARN_ON(system_state != SYSTEM_BOOTING);
536
537 nesting = early_ioremap_nested;
d18d6d65 538 if (early_ioremap_debug) {
adafdf6a 539 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 540 phys_addr, size, nesting);
d18d6d65
IM
541 dump_stack();
542 }
1da177e4
LT
543
544 /* Don't allow wraparound or zero size */
545 last_addr = phys_addr + size - 1;
bd796ed0
IM
546 if (!size || last_addr < phys_addr) {
547 WARN_ON(1);
1da177e4 548 return NULL;
bd796ed0 549 }
1da177e4 550
bd796ed0
IM
551 if (nesting >= FIX_BTMAPS_NESTING) {
552 WARN_ON(1);
1b42f516 553 return NULL;
bd796ed0 554 }
1b42f516 555 early_ioremap_nested++;
1da177e4
LT
556 /*
557 * Mappings have to be page-aligned
558 */
559 offset = phys_addr & ~PAGE_MASK;
560 phys_addr &= PAGE_MASK;
561 size = PAGE_ALIGN(last_addr) - phys_addr;
562
563 /*
564 * Mappings have to fit in the FIX_BTMAP area.
565 */
566 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
567 if (nrpages > NR_FIX_BTMAPS) {
568 WARN_ON(1);
1da177e4 569 return NULL;
bd796ed0 570 }
1da177e4
LT
571
572 /*
573 * Ok, go for it..
574 */
1b42f516
IM
575 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
576 idx = idx0;
1da177e4 577 while (nrpages > 0) {
beacfaac 578 early_set_fixmap(idx, phys_addr);
1da177e4
LT
579 phys_addr += PAGE_SIZE;
580 --idx;
581 --nrpages;
582 }
d18d6d65
IM
583 if (early_ioremap_debug)
584 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 585
91eebf40 586 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
587}
588
beacfaac 589void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
590{
591 unsigned long virt_addr;
592 unsigned long offset;
593 unsigned int nrpages;
594 enum fixed_addresses idx;
1b42f516
IM
595 unsigned int nesting;
596
597 nesting = --early_ioremap_nested;
bd796ed0 598 WARN_ON(nesting < 0);
1da177e4 599
d18d6d65 600 if (early_ioremap_debug) {
adafdf6a 601 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 602 size, nesting);
d18d6d65
IM
603 dump_stack();
604 }
605
1da177e4 606 virt_addr = (unsigned long)addr;
bd796ed0
IM
607 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
608 WARN_ON(1);
1da177e4 609 return;
bd796ed0 610 }
1da177e4
LT
611 offset = virt_addr & ~PAGE_MASK;
612 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
613
1b42f516 614 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 615 while (nrpages > 0) {
beacfaac 616 early_clear_fixmap(idx);
1da177e4
LT
617 --idx;
618 --nrpages;
619 }
620}
1b42f516
IM
621
622void __this_fixmap_does_not_exist(void)
623{
624 WARN_ON(1);
625}
240d3a7c
TG
626
627#endif /* CONFIG_X86_32 */