]>
Commit | Line | Data |
---|---|---|
5033cba0 EB |
1 | /* |
2 | * machine_kexec.c - handle transition of Linux booting another kernel | |
3 | * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> | |
4 | * | |
5 | * This source code is licensed under the GNU General Public License, | |
6 | * Version 2. See the file COPYING for more details. | |
7 | */ | |
8 | ||
9 | #include <linux/mm.h> | |
10 | #include <linux/kexec.h> | |
11 | #include <linux/delay.h> | |
12 | #include <asm/pgtable.h> | |
13 | #include <asm/pgalloc.h> | |
14 | #include <asm/tlbflush.h> | |
15 | #include <asm/mmu_context.h> | |
16 | #include <asm/io.h> | |
17 | #include <asm/apic.h> | |
18 | #include <asm/cpufeature.h> | |
e7b47cca | 19 | #include <asm/desc.h> |
4bb0d3ec | 20 | #include <asm/system.h> |
5033cba0 EB |
21 | |
22 | #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) | |
23 | ||
24 | #define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | |
25 | #define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | |
26 | #define L2_ATTR (_PAGE_PRESENT) | |
27 | ||
28 | #define LEVEL0_SIZE (1UL << 12UL) | |
29 | ||
30 | #ifndef CONFIG_X86_PAE | |
31 | #define LEVEL1_SIZE (1UL << 22UL) | |
32 | static u32 pgtable_level1[1024] PAGE_ALIGNED; | |
33 | ||
34 | static void identity_map_page(unsigned long address) | |
35 | { | |
36 | unsigned long level1_index, level2_index; | |
37 | u32 *pgtable_level2; | |
38 | ||
39 | /* Find the current page table */ | |
40 | pgtable_level2 = __va(read_cr3()); | |
41 | ||
42 | /* Find the indexes of the physical address to identity map */ | |
43 | level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; | |
44 | level2_index = address / LEVEL1_SIZE; | |
45 | ||
46 | /* Identity map the page table entry */ | |
47 | pgtable_level1[level1_index] = address | L0_ATTR; | |
48 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; | |
49 | ||
50 | /* Flush the tlb so the new mapping takes effect. | |
51 | * Global tlb entries are not flushed but that is not an issue. | |
52 | */ | |
53 | load_cr3(pgtable_level2); | |
54 | } | |
55 | ||
56 | #else | |
57 | #define LEVEL1_SIZE (1UL << 21UL) | |
58 | #define LEVEL2_SIZE (1UL << 30UL) | |
59 | static u64 pgtable_level1[512] PAGE_ALIGNED; | |
60 | static u64 pgtable_level2[512] PAGE_ALIGNED; | |
61 | ||
62 | static void identity_map_page(unsigned long address) | |
63 | { | |
64 | unsigned long level1_index, level2_index, level3_index; | |
65 | u64 *pgtable_level3; | |
66 | ||
67 | /* Find the current page table */ | |
68 | pgtable_level3 = __va(read_cr3()); | |
69 | ||
70 | /* Find the indexes of the physical address to identity map */ | |
71 | level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE; | |
72 | level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE; | |
73 | level3_index = address / LEVEL2_SIZE; | |
74 | ||
75 | /* Identity map the page table entry */ | |
76 | pgtable_level1[level1_index] = address | L0_ATTR; | |
77 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; | |
72414d3f MS |
78 | set_64bit(&pgtable_level3[level3_index], |
79 | __pa(pgtable_level2) | L2_ATTR); | |
5033cba0 EB |
80 | |
81 | /* Flush the tlb so the new mapping takes effect. | |
82 | * Global tlb entries are not flushed but that is not an issue. | |
83 | */ | |
84 | load_cr3(pgtable_level3); | |
85 | } | |
86 | #endif | |
87 | ||
5033cba0 EB |
88 | static void set_idt(void *newidt, __u16 limit) |
89 | { | |
e7b47cca | 90 | struct Xgt_desc_struct curidt; |
5033cba0 EB |
91 | |
92 | /* ia32 supports unaliged loads & stores */ | |
e7b47cca EB |
93 | curidt.size = limit; |
94 | curidt.address = (unsigned long)newidt; | |
5033cba0 EB |
95 | |
96 | __asm__ __volatile__ ( | |
e7b47cca EB |
97 | "lidtl %0\n" |
98 | : : "m" (curidt) | |
5033cba0 EB |
99 | ); |
100 | }; | |
101 | ||
102 | ||
103 | static void set_gdt(void *newgdt, __u16 limit) | |
104 | { | |
e7b47cca | 105 | struct Xgt_desc_struct curgdt; |
5033cba0 EB |
106 | |
107 | /* ia32 supports unaligned loads & stores */ | |
e7b47cca EB |
108 | curgdt.size = limit; |
109 | curgdt.address = (unsigned long)newgdt; | |
5033cba0 EB |
110 | |
111 | __asm__ __volatile__ ( | |
e7b47cca EB |
112 | "lgdtl %0\n" |
113 | : : "m" (curgdt) | |
5033cba0 EB |
114 | ); |
115 | }; | |
116 | ||
117 | static void load_segments(void) | |
118 | { | |
119 | #define __STR(X) #X | |
120 | #define STR(X) __STR(X) | |
121 | ||
122 | __asm__ __volatile__ ( | |
123 | "\tljmp $"STR(__KERNEL_CS)",$1f\n" | |
124 | "\t1:\n" | |
125 | "\tmovl $"STR(__KERNEL_DS)",%eax\n" | |
126 | "\tmovl %eax,%ds\n" | |
127 | "\tmovl %eax,%es\n" | |
128 | "\tmovl %eax,%fs\n" | |
129 | "\tmovl %eax,%gs\n" | |
130 | "\tmovl %eax,%ss\n" | |
131 | ); | |
132 | #undef STR | |
133 | #undef __STR | |
134 | } | |
135 | ||
136 | typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( | |
72414d3f MS |
137 | unsigned long indirection_page, |
138 | unsigned long reboot_code_buffer, | |
139 | unsigned long start_address, | |
140 | unsigned int has_pae) ATTRIB_NORET; | |
5033cba0 EB |
141 | |
142 | const extern unsigned char relocate_new_kernel[]; | |
143 | extern void relocate_new_kernel_end(void); | |
144 | const extern unsigned int relocate_new_kernel_size; | |
145 | ||
146 | /* | |
147 | * A architecture hook called to validate the | |
148 | * proposed image and prepare the control pages | |
149 | * as needed. The pages for KEXEC_CONTROL_CODE_SIZE | |
150 | * have been allocated, but the segments have yet | |
151 | * been copied into the kernel. | |
152 | * | |
153 | * Do what every setup is needed on image and the | |
154 | * reboot code buffer to allow us to avoid allocations | |
155 | * later. | |
156 | * | |
157 | * Currently nothing. | |
158 | */ | |
159 | int machine_kexec_prepare(struct kimage *image) | |
160 | { | |
161 | return 0; | |
162 | } | |
163 | ||
164 | /* | |
165 | * Undo anything leftover by machine_kexec_prepare | |
166 | * when an image is freed. | |
167 | */ | |
168 | void machine_kexec_cleanup(struct kimage *image) | |
169 | { | |
170 | } | |
171 | ||
172 | /* | |
173 | * Do not allocate memory (or fail in any way) in machine_kexec(). | |
174 | * We are past the point of no return, committed to rebooting now. | |
175 | */ | |
176 | NORET_TYPE void machine_kexec(struct kimage *image) | |
177 | { | |
178 | unsigned long page_list; | |
179 | unsigned long reboot_code_buffer; | |
72414d3f | 180 | |
5033cba0 EB |
181 | relocate_new_kernel_t rnk; |
182 | ||
183 | /* Interrupts aren't acceptable while we reboot */ | |
184 | local_irq_disable(); | |
185 | ||
186 | /* Compute some offsets */ | |
72414d3f MS |
187 | reboot_code_buffer = page_to_pfn(image->control_code_page) |
188 | << PAGE_SHIFT; | |
5033cba0 EB |
189 | page_list = image->head; |
190 | ||
191 | /* Set up an identity mapping for the reboot_code_buffer */ | |
192 | identity_map_page(reboot_code_buffer); | |
193 | ||
194 | /* copy it out */ | |
72414d3f MS |
195 | memcpy((void *)reboot_code_buffer, relocate_new_kernel, |
196 | relocate_new_kernel_size); | |
5033cba0 EB |
197 | |
198 | /* The segment registers are funny things, they are | |
199 | * automatically loaded from a table, in memory wherever you | |
200 | * set them to a specific selector, but this table is never | |
201 | * accessed again you set the segment to a different selector. | |
202 | * | |
203 | * The more common model is are caches where the behide | |
204 | * the scenes work is done, but is also dropped at arbitrary | |
205 | * times. | |
206 | * | |
207 | * I take advantage of this here by force loading the | |
208 | * segments, before I zap the gdt with an invalid value. | |
209 | */ | |
210 | load_segments(); | |
211 | /* The gdt & idt are now invalid. | |
212 | * If you want to load them you must set up your own idt & gdt. | |
213 | */ | |
214 | set_gdt(phys_to_virt(0),0); | |
215 | set_idt(phys_to_virt(0),0); | |
216 | ||
217 | /* now call it */ | |
218 | rnk = (relocate_new_kernel_t) reboot_code_buffer; | |
219 | (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae); | |
220 | } |