]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/sched.h> |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/mm.h> | |
27eb0b28 | 5 | #include <linux/nmi.h> |
1da177e4 LT |
6 | #include <linux/swap.h> |
7 | #include <linux/smp.h> | |
8 | #include <linux/highmem.h> | |
1da177e4 LT |
9 | #include <linux/pagemap.h> |
10 | #include <linux/spinlock.h> | |
11 | ||
bda9eb32 | 12 | #include <asm/cpu_entry_area.h> |
1da177e4 LT |
13 | #include <asm/pgtable.h> |
14 | #include <asm/pgalloc.h> | |
15 | #include <asm/fixmap.h> | |
66441bd3 | 16 | #include <asm/e820/api.h> |
1da177e4 LT |
17 | #include <asm/tlb.h> |
18 | #include <asm/tlbflush.h> | |
56f0e74c | 19 | #include <asm/io.h> |
1da177e4 | 20 | |
2b688dfd PE |
21 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
22 | ||
1da177e4 LT |
23 | /* |
24 | * Associate a virtual page frame with a given physical page frame | |
25 | * and protection flags for that frame. | |
26 | */ | |
d494a961 | 27 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
1da177e4 LT |
28 | { |
29 | pgd_t *pgd; | |
e0c4f675 | 30 | p4d_t *p4d; |
1da177e4 LT |
31 | pud_t *pud; |
32 | pmd_t *pmd; | |
33 | pte_t *pte; | |
34 | ||
35 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
36 | if (pgd_none(*pgd)) { | |
37 | BUG(); | |
38 | return; | |
39 | } | |
e0c4f675 KS |
40 | p4d = p4d_offset(pgd, vaddr); |
41 | if (p4d_none(*p4d)) { | |
42 | BUG(); | |
43 | return; | |
44 | } | |
45 | pud = pud_offset(p4d, vaddr); | |
1da177e4 LT |
46 | if (pud_none(*pud)) { |
47 | BUG(); | |
48 | return; | |
49 | } | |
50 | pmd = pmd_offset(pud, vaddr); | |
51 | if (pmd_none(*pmd)) { | |
52 | BUG(); | |
53 | return; | |
54 | } | |
55 | pte = pte_offset_kernel(pmd, vaddr); | |
dcb32d99 | 56 | if (!pte_none(pteval)) |
b40c7579 | 57 | set_pte_at(&init_mm, vaddr, pte, pteval); |
b0bfece4 JB |
58 | else |
59 | pte_clear(&init_mm, vaddr, pte); | |
1da177e4 LT |
60 | |
61 | /* | |
62 | * It's enough to flush this one mapping. | |
63 | * (PGE mappings get flushed as well) | |
1da177e4 LT |
64 | */ |
65 | __flush_tlb_one(vaddr); | |
66 | } | |
67 | ||
052e7994 JF |
68 | unsigned long __FIXADDR_TOP = 0xfffff000; |
69 | EXPORT_SYMBOL(__FIXADDR_TOP); | |
052e7994 | 70 | |
bef1568d YL |
71 | /* |
72 | * vmalloc=size forces the vmalloc area to be exactly 'size' | |
73 | * bytes. This can be used to increase (or decrease) the | |
74 | * vmalloc area - the default is 128m. | |
75 | */ | |
76 | static int __init parse_vmalloc(char *arg) | |
77 | { | |
78 | if (!arg) | |
79 | return -EINVAL; | |
80 | ||
e621bd18 DY |
81 | /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ |
82 | __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; | |
bef1568d YL |
83 | return 0; |
84 | } | |
85 | early_param("vmalloc", parse_vmalloc); | |
86 | ||
87 | /* | |
88 | * reservetop=size reserves a hole at the top of the kernel address space which | |
89 | * a hypervisor can load into later. Needed for dynamically loaded hypervisors, | |
90 | * so relocating the fixmap can be done before paging initialization. | |
91 | */ | |
92 | static int __init parse_reservetop(char *arg) | |
93 | { | |
94 | unsigned long address; | |
95 | ||
96 | if (!arg) | |
97 | return -EINVAL; | |
98 | ||
99 | address = memparse(arg, &arg); | |
100 | reserve_top_address(address); | |
5b7c73e0 | 101 | early_ioremap_init(); |
bef1568d YL |
102 | return 0; |
103 | } | |
104 | early_param("reservetop", parse_reservetop); |