]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/mm/ioremap.c
x86: move page_is_ram() function
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
1da177e4 16#include <asm/cacheflush.h>
3cbd09e4
TG
17#include <asm/e820.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/pgtable.h>
3cbd09e4 20#include <asm/tlbflush.h>
1da177e4 21
240d3a7c
TG
22#ifdef CONFIG_X86_64
23
24unsigned long __phys_addr(unsigned long x)
25{
26 if (x >= __START_KERNEL_map)
27 return x - __START_KERNEL_map + phys_base;
28 return x - PAGE_OFFSET;
29}
30EXPORT_SYMBOL(__phys_addr);
31
32#endif
33
5f5192b9
TG
34int page_is_ram(unsigned long pagenr)
35{
36 unsigned long addr, end;
37 int i;
38
39 for (i = 0; i < e820.nr_map; i++) {
40 /*
41 * Not usable memory:
42 */
43 if (e820.map[i].type != E820_RAM)
44 continue;
45 /*
46 * !!!FIXME!!! Some BIOSen report areas as RAM that
47 * are not. Notably the 640->1Mb area. We need a sanity
48 * check here.
49 */
50 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
51 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
52 if ((pagenr >= addr) && (pagenr < end))
53 return 1;
54 }
55 return 0;
56}
57
e9332cac
TG
58/*
59 * Fix up the linear direct mapping of the kernel to avoid cache attribute
60 * conflicts.
61 */
62static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
63 pgprot_t prot)
64{
65 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
66 int err, level;
67
68 /* No change for pages after the last mapping */
69 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
70 return 0;
71
72 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
73 vaddr = (unsigned long) __va(phys_addr);
74
75 /*
76 * If there is no identity map for this address,
77 * change_page_attr_addr is unnecessary
78 */
79 if (!lookup_address(vaddr, &level))
80 return 0;
81
82 /*
83 * Must use an address here and not struct page because the
84 * phys addr can be a in hole between nodes and not have a
85 * memmap entry.
86 */
87 err = change_page_attr_addr(vaddr, npages, prot);
240d3a7c 88
e9332cac
TG
89 if (!err)
90 global_flush_tlb();
91
92 return err;
93}
94
1da177e4
LT
95/*
96 * Remap an arbitrary physical address space into the kernel virtual
97 * address space. Needed when the kernel wants to access high addresses
98 * directly.
99 *
100 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
101 * have to convert them into an offset in a page-aligned mapping, but the
102 * caller shouldn't need to know that small detail.
103 */
91eebf40
TG
104void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
105 unsigned long flags)
1da177e4 106{
91eebf40
TG
107 void __iomem *addr;
108 struct vm_struct *area;
1da177e4 109 unsigned long offset, last_addr;
e9332cac 110 pgprot_t pgprot;
1da177e4
LT
111
112 /* Don't allow wraparound or zero size */
113 last_addr = phys_addr + size - 1;
114 if (!size || last_addr < phys_addr)
115 return NULL;
116
117 /*
118 * Don't remap the low PCI/ISA area, it's always mapped..
119 */
120 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
4b40fcee 121 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4 122
240d3a7c 123#ifdef CONFIG_X86_32
1da177e4
LT
124 /*
125 * Don't allow anybody to remap normal RAM that we're using..
126 */
127 if (phys_addr <= virt_to_phys(high_memory - 1)) {
128 char *t_addr, *t_end;
129 struct page *page;
130
131 t_addr = __va(phys_addr);
132 t_end = t_addr + (size - 1);
91eebf40
TG
133
134 for (page = virt_to_page(t_addr);
135 page <= virt_to_page(t_end); page++)
136 if (!PageReserved(page))
1da177e4
LT
137 return NULL;
138 }
240d3a7c 139#endif
1da177e4 140
e9332cac 141 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
a148ecfd 142
1da177e4
LT
143 /*
144 * Mappings have to be page-aligned
145 */
146 offset = phys_addr & ~PAGE_MASK;
147 phys_addr &= PAGE_MASK;
148 size = PAGE_ALIGN(last_addr+1) - phys_addr;
149
150 /*
151 * Ok, go for it..
152 */
74ff2857 153 area = get_vm_area(size, VM_IOREMAP);
1da177e4
LT
154 if (!area)
155 return NULL;
156 area->phys_addr = phys_addr;
157 addr = (void __iomem *) area->addr;
e9332cac
TG
158 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
159 phys_addr, pgprot)) {
e4c1b977 160 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
1da177e4
LT
161 return NULL;
162 }
e9332cac
TG
163
164 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
165 vunmap(addr);
166 return NULL;
167 }
168
1da177e4
LT
169 return (void __iomem *) (offset + (char __iomem *)addr);
170}
129f6946 171EXPORT_SYMBOL(__ioremap);
1da177e4
LT
172
173/**
174 * ioremap_nocache - map bus memory into CPU space
175 * @offset: bus address of the memory
176 * @size: size of the resource to map
177 *
178 * ioremap_nocache performs a platform specific sequence of operations to
179 * make bus memory CPU accessible via the readb/readw/readl/writeb/
180 * writew/writel functions and the other mmio helpers. The returned
181 * address is not guaranteed to be usable directly as a virtual
91eebf40 182 * address.
1da177e4
LT
183 *
184 * This version of ioremap ensures that the memory is marked uncachable
185 * on the CPU as well as honouring existing caching rules from things like
91eebf40 186 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
187 * busses. In particular driver authors should read up on PCI writes
188 *
189 * It's useful if some control registers are in such an area and
190 * write combining or read caching is not desirable:
91eebf40 191 *
1da177e4
LT
192 * Must be freed with iounmap.
193 */
91eebf40 194void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
1da177e4 195{
e9332cac 196 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
1da177e4 197}
129f6946 198EXPORT_SYMBOL(ioremap_nocache);
1da177e4 199
bf5421c3
AK
200/**
201 * iounmap - Free a IO remapping
202 * @addr: virtual address from ioremap_*
203 *
204 * Caller must ensure there is only one unmapping for the same pointer.
205 */
1da177e4
LT
206void iounmap(volatile void __iomem *addr)
207{
bf5421c3 208 struct vm_struct *p, *o;
c23a4e96
AM
209
210 if ((void __force *)addr <= high_memory)
1da177e4
LT
211 return;
212
213 /*
214 * __ioremap special-cases the PCI/ISA range by not instantiating a
215 * vm_area and by simply returning an address into the kernel mapping
216 * of ISA space. So handle that here.
217 */
218 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 219 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
220 return;
221
91eebf40
TG
222 addr = (volatile void __iomem *)
223 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
224
225 /* Use the vm area unlocked, assuming the caller
226 ensures there isn't another iounmap for the same address
227 in parallel. Reuse of the virtual address is prevented by
228 leaving it in the global lists until we're done with it.
229 cpa takes care of the direct mappings. */
230 read_lock(&vmlist_lock);
231 for (p = vmlist; p; p = p->next) {
232 if (p->addr == addr)
233 break;
234 }
235 read_unlock(&vmlist_lock);
236
237 if (!p) {
91eebf40 238 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 239 dump_stack();
bf5421c3 240 return;
1da177e4
LT
241 }
242
bf5421c3 243 /* Reset the direct mapping. Can block */
e9332cac 244 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
bf5421c3
AK
245
246 /* Finally remove it */
247 o = remove_vm_area((void *)addr);
248 BUG_ON(p != o || o == NULL);
91eebf40 249 kfree(p);
1da177e4 250}
129f6946 251EXPORT_SYMBOL(iounmap);
1da177e4 252
240d3a7c 253#ifdef CONFIG_X86_32
d18d6d65
IM
254
255int __initdata early_ioremap_debug;
256
257static int __init early_ioremap_debug_setup(char *str)
258{
259 early_ioremap_debug = 1;
260
793b24a2 261 return 0;
d18d6d65 262}
793b24a2 263early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 264
0947b2f3
HY
265static __initdata int after_paging_init;
266static __initdata unsigned long bm_pte[1024]
267 __attribute__((aligned(PAGE_SIZE)));
268
beacfaac 269static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
0947b2f3
HY
270{
271 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
272}
273
beacfaac 274static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
0947b2f3
HY
275{
276 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
277}
278
beacfaac 279void __init early_ioremap_init(void)
0947b2f3
HY
280{
281 unsigned long *pgd;
282
d18d6d65 283 if (early_ioremap_debug)
91eebf40 284 printk(KERN_DEBUG "early_ioremap_init()\n");
d18d6d65 285
beacfaac 286 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
287 *pgd = __pa(bm_pte) | _PAGE_TABLE;
288 memset(bm_pte, 0, sizeof(bm_pte));
0e3a9549
IM
289 /*
290 * The boot-ioremap range spans multiple pgds, for which
291 * we are not prepared:
292 */
293 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
294 WARN_ON(1);
91eebf40
TG
295 printk(KERN_WARNING "pgd %p != %p\n",
296 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
297 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
298 fix_to_virt(FIX_BTMAP_BEGIN));
299 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
300 fix_to_virt(FIX_BTMAP_END));
301
302 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
303 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
304 FIX_BTMAP_BEGIN);
0e3a9549 305 }
0947b2f3
HY
306}
307
beacfaac 308void __init early_ioremap_clear(void)
0947b2f3
HY
309{
310 unsigned long *pgd;
311
d18d6d65 312 if (early_ioremap_debug)
91eebf40 313 printk(KERN_DEBUG "early_ioremap_clear()\n");
d18d6d65 314
beacfaac 315 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
316 *pgd = 0;
317 __flush_tlb_all();
318}
319
beacfaac 320void __init early_ioremap_reset(void)
0947b2f3
HY
321{
322 enum fixed_addresses idx;
323 unsigned long *pte, phys, addr;
324
325 after_paging_init = 1;
64a8f852 326 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 327 addr = fix_to_virt(idx);
beacfaac 328 pte = early_ioremap_pte(addr);
0947b2f3
HY
329 if (!*pte & _PAGE_PRESENT) {
330 phys = *pte & PAGE_MASK;
331 set_fixmap(idx, phys);
332 }
333 }
334}
335
beacfaac 336static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
337 unsigned long phys, pgprot_t flags)
338{
339 unsigned long *pte, addr = __fix_to_virt(idx);
340
341 if (idx >= __end_of_fixed_addresses) {
342 BUG();
343 return;
344 }
beacfaac 345 pte = early_ioremap_pte(addr);
0947b2f3
HY
346 if (pgprot_val(flags))
347 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
348 else
349 *pte = 0;
350 __flush_tlb_one(addr);
351}
352
beacfaac 353static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
354 unsigned long phys)
355{
356 if (after_paging_init)
357 set_fixmap(idx, phys);
358 else
beacfaac 359 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
360}
361
beacfaac 362static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
363{
364 if (after_paging_init)
365 clear_fixmap(idx);
366 else
beacfaac 367 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
368}
369
1b42f516
IM
370
371int __initdata early_ioremap_nested;
372
d690b2af
IM
373static int __init check_early_ioremap_leak(void)
374{
375 if (!early_ioremap_nested)
376 return 0;
377
378 printk(KERN_WARNING
91eebf40
TG
379 "Debug warning: early ioremap leak of %d areas detected.\n",
380 early_ioremap_nested);
d690b2af 381 printk(KERN_WARNING
91eebf40 382 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
383 WARN_ON(1);
384
385 return 1;
386}
387late_initcall(check_early_ioremap_leak);
388
beacfaac 389void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
390{
391 unsigned long offset, last_addr;
1b42f516
IM
392 unsigned int nrpages, nesting;
393 enum fixed_addresses idx0, idx;
394
395 WARN_ON(system_state != SYSTEM_BOOTING);
396
397 nesting = early_ioremap_nested;
d18d6d65 398 if (early_ioremap_debug) {
91eebf40
TG
399 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
400 phys_addr, size, nesting);
d18d6d65
IM
401 dump_stack();
402 }
1da177e4
LT
403
404 /* Don't allow wraparound or zero size */
405 last_addr = phys_addr + size - 1;
bd796ed0
IM
406 if (!size || last_addr < phys_addr) {
407 WARN_ON(1);
1da177e4 408 return NULL;
bd796ed0 409 }
1da177e4 410
bd796ed0
IM
411 if (nesting >= FIX_BTMAPS_NESTING) {
412 WARN_ON(1);
1b42f516 413 return NULL;
bd796ed0 414 }
1b42f516 415 early_ioremap_nested++;
1da177e4
LT
416 /*
417 * Mappings have to be page-aligned
418 */
419 offset = phys_addr & ~PAGE_MASK;
420 phys_addr &= PAGE_MASK;
421 size = PAGE_ALIGN(last_addr) - phys_addr;
422
423 /*
424 * Mappings have to fit in the FIX_BTMAP area.
425 */
426 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
427 if (nrpages > NR_FIX_BTMAPS) {
428 WARN_ON(1);
1da177e4 429 return NULL;
bd796ed0 430 }
1da177e4
LT
431
432 /*
433 * Ok, go for it..
434 */
1b42f516
IM
435 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
436 idx = idx0;
1da177e4 437 while (nrpages > 0) {
beacfaac 438 early_set_fixmap(idx, phys_addr);
1da177e4
LT
439 phys_addr += PAGE_SIZE;
440 --idx;
441 --nrpages;
442 }
d18d6d65
IM
443 if (early_ioremap_debug)
444 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 445
91eebf40 446 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
447}
448
beacfaac 449void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
450{
451 unsigned long virt_addr;
452 unsigned long offset;
453 unsigned int nrpages;
454 enum fixed_addresses idx;
1b42f516
IM
455 unsigned int nesting;
456
457 nesting = --early_ioremap_nested;
bd796ed0 458 WARN_ON(nesting < 0);
1da177e4 459
d18d6d65 460 if (early_ioremap_debug) {
91eebf40
TG
461 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
462 size, nesting);
d18d6d65
IM
463 dump_stack();
464 }
465
1da177e4 466 virt_addr = (unsigned long)addr;
bd796ed0
IM
467 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
468 WARN_ON(1);
1da177e4 469 return;
bd796ed0 470 }
1da177e4
LT
471 offset = virt_addr & ~PAGE_MASK;
472 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
473
1b42f516 474 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 475 while (nrpages > 0) {
beacfaac 476 early_clear_fixmap(idx);
1da177e4
LT
477 --idx;
478 --nrpages;
479 }
480}
1b42f516
IM
481
482void __this_fixmap_does_not_exist(void)
483{
484 WARN_ON(1);
485}
240d3a7c
TG
486
487#endif /* CONFIG_X86_32 */