]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/i386/mm/boot_ioremap.c | |
3 | * | |
4 | * Re-map functions for early boot-time before paging_init() when the | |
5 | * boot-time pagetables are still in use | |
6 | * | |
7 | * Written by Dave Hansen <haveblue@us.ibm.com> | |
8 | */ | |
9 | ||
10 | ||
11 | /* | |
12 | * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE | |
27b46d76 | 13 | * keeps that from happening. If anyone has a better way, I'm listening. |
1da177e4 LT |
14 | * |
15 | * boot_pte_t is defined only if this all works correctly | |
16 | */ | |
17 | ||
1da177e4 | 18 | #undef CONFIG_X86_PAE |
da181a8b | 19 | #undef CONFIG_PARAVIRT |
1da177e4 LT |
20 | #include <asm/page.h> |
21 | #include <asm/pgtable.h> | |
22 | #include <asm/tlbflush.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/stddef.h> | |
25 | ||
26 | /* | |
27 | * I'm cheating here. It is known that the two boot PTE pages are | |
28 | * allocated next to each other. I'm pretending that they're just | |
29 | * one big array. | |
30 | */ | |
31 | ||
32 | #define BOOT_PTE_PTRS (PTRS_PER_PTE*2) | |
24fd425e | 33 | |
34 | static unsigned long boot_pte_index(unsigned long vaddr) | |
35 | { | |
36 | return __pa(vaddr) >> PAGE_SHIFT; | |
37 | } | |
1da177e4 LT |
38 | |
39 | static inline boot_pte_t* boot_vaddr_to_pte(void *address) | |
40 | { | |
41 | boot_pte_t* boot_pg = (boot_pte_t*)pg0; | |
42 | return &boot_pg[boot_pte_index((unsigned long)address)]; | |
43 | } | |
44 | ||
45 | /* | |
46 | * This is only for a caller who is clever enough to page-align | |
47 | * phys_addr and virtual_source, and who also has a preference | |
48 | * about which virtual address from which to steal ptes | |
49 | */ | |
50 | static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, | |
51 | void* virtual_source) | |
52 | { | |
53 | boot_pte_t* pte; | |
54 | int i; | |
55 | char *vaddr = virtual_source; | |
56 | ||
57 | pte = boot_vaddr_to_pte(virtual_source); | |
58 | for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) { | |
59 | set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL)); | |
60 | __flush_tlb_one(&vaddr[i*PAGE_SIZE]); | |
61 | } | |
62 | } | |
63 | ||
64 | /* the virtual space we're going to remap comes from this array */ | |
65 | #define BOOT_IOREMAP_PAGES 4 | |
66 | #define BOOT_IOREMAP_SIZE (BOOT_IOREMAP_PAGES*PAGE_SIZE) | |
67 | static __initdata char boot_ioremap_space[BOOT_IOREMAP_SIZE] | |
68 | __attribute__ ((aligned (PAGE_SIZE))); | |
69 | ||
70 | /* | |
71 | * This only applies to things which need to ioremap before paging_init() | |
72 | * bt_ioremap() and plain ioremap() are both useless at this point. | |
73 | * | |
74 | * When used, we're still using the boot-time pagetables, which only | |
75 | * have 2 PTE pages mapping the first 8MB | |
76 | * | |
77 | * There is no unmap. The boot-time PTE pages aren't used after boot. | |
78 | * If you really want the space back, just remap it yourself. | |
79 | * boot_ioremap(&ioremap_space-PAGE_OFFSET, BOOT_IOREMAP_SIZE) | |
80 | */ | |
81 | __init void* boot_ioremap(unsigned long phys_addr, unsigned long size) | |
82 | { | |
83 | unsigned long last_addr, offset; | |
84 | unsigned int nrpages; | |
85 | ||
86 | last_addr = phys_addr + size - 1; | |
87 | ||
88 | /* page align the requested address */ | |
89 | offset = phys_addr & ~PAGE_MASK; | |
90 | phys_addr &= PAGE_MASK; | |
91 | size = PAGE_ALIGN(last_addr) - phys_addr; | |
92 | ||
93 | nrpages = size >> PAGE_SHIFT; | |
94 | if (nrpages > BOOT_IOREMAP_PAGES) | |
95 | return NULL; | |
96 | ||
97 | __boot_ioremap(phys_addr, nrpages, boot_ioremap_space); | |
98 | ||
99 | return &boot_ioremap_space[offset]; | |
100 | } |