]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/x86/boot/compressed/head_64.S
06e71c2c16bf012c006fab90caa72bb9d25cb8db
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / boot / compressed / head_64.S
1 /*
2 * linux/boot/head.S
3 *
4 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
5 */
6
7 /*
8 * head.S contains the 32-bit startup code.
9 *
10 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
11 * the page directory will exist. The startup code will be overwritten by
12 * the page directory. [According to comments etc elsewhere on a compressed
13 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
14 *
15 * Page 0 is deliberately kept safe, since System Management Mode code in
16 * laptops may need to access the BIOS data stored there. This is also
17 * useful for future device drivers that either access the BIOS via VM86
18 * mode.
19 */
20
21 /*
22 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
23 */
24 .code32
25 .text
26
27 #include <linux/init.h>
28 #include <linux/linkage.h>
29 #include <asm/segment.h>
30 #include <asm/boot.h>
31 #include <asm/msr.h>
32 #include <asm/processor-flags.h>
33 #include <asm/asm-offsets.h>
34
35 __HEAD
36 .code32
37 ENTRY(startup_32)
38 /*
39 * 32bit entry is 0 and it is ABI so immutable!
40 * If we come here directly from a bootloader,
41 * kernel(text+data+bss+brk) ramdisk, zero_page, command line
42 * all need to be under the 4G limit.
43 */
44 cld
45 /*
46 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
47 * us to not reload segments
48 */
49 testb $(1<<6), BP_loadflags(%esi)
50 jnz 1f
51
52 cli
53 movl $(__BOOT_DS), %eax
54 movl %eax, %ds
55 movl %eax, %es
56 movl %eax, %ss
57 1:
58
59 /*
60 * Calculate the delta between where we were compiled to run
61 * at and where we were actually loaded at. This can only be done
62 * with a short local call on x86. Nothing else will tell us what
63 * address we are running at. The reserved chunk of the real-mode
64 * data at 0x1e4 (defined as a scratch field) are used as the stack
65 * for this calculation. Only 4 bytes are needed.
66 */
67 leal (BP_scratch+4)(%esi), %esp
68 call 1f
69 1: popl %ebp
70 subl $1b, %ebp
71
72 /* setup a stack and make sure cpu supports long mode. */
73 movl $boot_stack_end, %eax
74 addl %ebp, %eax
75 movl %eax, %esp
76
77 call verify_cpu
78 testl %eax, %eax
79 jnz no_longmode
80
81 /*
82 * Compute the delta between where we were compiled to run at
83 * and where the code will actually run at.
84 *
85 * %ebp contains the address we are loaded at by the boot loader and %ebx
86 * contains the address where we should move the kernel image temporarily
87 * for safe in-place decompression.
88 */
89
90 #ifdef CONFIG_RELOCATABLE
91 movl %ebp, %ebx
92 movl BP_kernel_alignment(%esi), %eax
93 decl %eax
94 addl %eax, %ebx
95 notl %eax
96 andl %eax, %ebx
97 #else
98 movl $LOAD_PHYSICAL_ADDR, %ebx
99 #endif
100
101 /* Target address to relocate to for decompression */
102 addl $z_extract_offset, %ebx
103
104 /*
105 * Prepare for entering 64 bit mode
106 */
107
108 /* Load new GDT with the 64bit segments using 32bit descriptor */
109 leal gdt(%ebp), %eax
110 movl %eax, gdt+2(%ebp)
111 lgdt gdt(%ebp)
112
113 /* Enable PAE mode */
114 movl $(X86_CR4_PAE), %eax
115 movl %eax, %cr4
116
117 /*
118 * Build early 4G boot pagetable
119 */
120 /* Initialize Page tables to 0 */
121 leal pgtable(%ebx), %edi
122 xorl %eax, %eax
123 movl $((4096*6)/4), %ecx
124 rep stosl
125
126 /* Build Level 4 */
127 leal pgtable + 0(%ebx), %edi
128 leal 0x1007 (%edi), %eax
129 movl %eax, 0(%edi)
130
131 /* Build Level 3 */
132 leal pgtable + 0x1000(%ebx), %edi
133 leal 0x1007(%edi), %eax
134 movl $4, %ecx
135 1: movl %eax, 0x00(%edi)
136 addl $0x00001000, %eax
137 addl $8, %edi
138 decl %ecx
139 jnz 1b
140
141 /* Build Level 2 */
142 leal pgtable + 0x2000(%ebx), %edi
143 movl $0x00000183, %eax
144 movl $2048, %ecx
145 1: movl %eax, 0(%edi)
146 addl $0x00200000, %eax
147 addl $8, %edi
148 decl %ecx
149 jnz 1b
150
151 /* Enable the boot page tables */
152 leal pgtable(%ebx), %eax
153 movl %eax, %cr3
154
155 /* Enable Long mode in EFER (Extended Feature Enable Register) */
156 movl $MSR_EFER, %ecx
157 rdmsr
158 btsl $_EFER_LME, %eax
159 wrmsr
160
161 /* After gdt is loaded */
162 xorl %eax, %eax
163 lldt %ax
164 movl $0x20, %eax
165 ltr %ax
166
167 /*
168 * Setup for the jump to 64bit mode
169 *
170 * When the jump is performend we will be in long mode but
171 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
172 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
173 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
174 * We place all of the values on our mini stack so lret can
175 * used to perform that far jump.
176 */
177 pushl $__KERNEL_CS
178 leal startup_64(%ebp), %eax
179 pushl %eax
180
181 /* Enter paged protected Mode, activating Long Mode */
182 movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
183 movl %eax, %cr0
184
185 /* Jump from 32bit compatibility mode into 64bit mode. */
186 lret
187 ENDPROC(startup_32)
188
189 .code64
190 .org 0x200
191 ENTRY(startup_64)
192 /*
193 * 64bit entry is 0x200 and it is ABI so immutable!
194 * We come here either from startup_32 or directly from a
195 * 64bit bootloader.
196 * If we come here from a bootloader, kernel(text+data+bss+brk),
197 * ramdisk, zero_page, command line could be above 4G.
198 * We depend on an identity mapped page table being provided
199 * that maps our entire kernel(text+data+bss+brk), zero page
200 * and command line.
201 */
202 #ifdef CONFIG_EFI_STUB
203 /*
204 * The entry point for the PE/COFF executable is efi_pe_entry, so
205 * only legacy boot loaders will execute this jmp.
206 */
207 jmp preferred_addr
208
209 ENTRY(efi_pe_entry)
210 mov %rcx, %rdi
211 mov %rdx, %rsi
212 pushq %rdi
213 pushq %rsi
214 call make_boot_params
215 cmpq $0,%rax
216 je 1f
217 mov %rax, %rdx
218 popq %rsi
219 popq %rdi
220
221 ENTRY(efi_stub_entry)
222 call efi_main
223 movq %rax,%rsi
224 cmpq $0,%rax
225 jne 2f
226 1:
227 /* EFI init failed, so hang. */
228 hlt
229 jmp 1b
230 2:
231 call 3f
232 3:
233 popq %rax
234 subq $3b, %rax
235 subq BP_pref_address(%rsi), %rax
236 add BP_code32_start(%esi), %eax
237 leaq preferred_addr(%rax), %rax
238 jmp *%rax
239
240 preferred_addr:
241 #endif
242
243 /* Setup data segments. */
244 xorl %eax, %eax
245 movl %eax, %ds
246 movl %eax, %es
247 movl %eax, %ss
248 movl %eax, %fs
249 movl %eax, %gs
250
251 /*
252 * Compute the decompressed kernel start address. It is where
253 * we were loaded at aligned to a 2M boundary. %rbp contains the
254 * decompressed kernel start address.
255 *
256 * If it is a relocatable kernel then decompress and run the kernel
257 * from load address aligned to 2MB addr, otherwise decompress and
258 * run the kernel from LOAD_PHYSICAL_ADDR
259 *
260 * We cannot rely on the calculation done in 32-bit mode, since we
261 * may have been invoked via the 64-bit entry point.
262 */
263
264 /* Start with the delta to where the kernel will run at. */
265 #ifdef CONFIG_RELOCATABLE
266 leaq startup_32(%rip) /* - $startup_32 */, %rbp
267 movl BP_kernel_alignment(%rsi), %eax
268 decl %eax
269 addq %rax, %rbp
270 notq %rax
271 andq %rax, %rbp
272 #else
273 movq $LOAD_PHYSICAL_ADDR, %rbp
274 #endif
275
276 /* Target address to relocate to for decompression */
277 leaq z_extract_offset(%rbp), %rbx
278
279 /* Set up the stack */
280 leaq boot_stack_end(%rbx), %rsp
281
282 /* Zero EFLAGS */
283 pushq $0
284 popfq
285
286 /*
287 * Copy the compressed kernel to the end of our buffer
288 * where decompression in place becomes safe.
289 */
290 pushq %rsi
291 leaq (_bss-8)(%rip), %rsi
292 leaq (_bss-8)(%rbx), %rdi
293 movq $_bss /* - $startup_32 */, %rcx
294 shrq $3, %rcx
295 std
296 rep movsq
297 cld
298 popq %rsi
299
300 /*
301 * Jump to the relocated address.
302 */
303 leaq relocated(%rbx), %rax
304 jmp *%rax
305
306 .text
307 relocated:
308
309 /*
310 * Clear BSS (stack is currently empty)
311 */
312 xorl %eax, %eax
313 leaq _bss(%rip), %rdi
314 leaq _ebss(%rip), %rcx
315 subq %rdi, %rcx
316 shrq $3, %rcx
317 rep stosq
318
319 /*
320 * Adjust our own GOT
321 */
322 leaq _got(%rip), %rdx
323 leaq _egot(%rip), %rcx
324 1:
325 cmpq %rcx, %rdx
326 jae 2f
327 addq %rbx, (%rdx)
328 addq $8, %rdx
329 jmp 1b
330 2:
331
332 /*
333 * Do the decompression, and jump to the new kernel..
334 */
335 pushq %rsi /* Save the real mode argument */
336 movq %rsi, %rdi /* real mode address */
337 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
338 leaq input_data(%rip), %rdx /* input_data */
339 movl $z_input_len, %ecx /* input_len */
340 movq %rbp, %r8 /* output target address */
341 call decompress_kernel
342 popq %rsi
343
344 /*
345 * Jump to the decompressed kernel.
346 */
347 jmp *%rbp
348
349 .code32
350 no_longmode:
351 /* This isn't an x86-64 CPU so hang */
352 1:
353 hlt
354 jmp 1b
355
356 #include "../../kernel/verify_cpu.S"
357
358 .data
359 gdt:
360 .word gdt_end - gdt
361 .long gdt
362 .word 0
363 .quad 0x0000000000000000 /* NULL descriptor */
364 .quad 0x00af9a000000ffff /* __KERNEL_CS */
365 .quad 0x00cf92000000ffff /* __KERNEL_DS */
366 .quad 0x0080890000000000 /* TS descriptor */
367 .quad 0x0000000000000000 /* TS continued */
368 gdt_end:
369
370 /*
371 * Stack and heap for uncompression
372 */
373 .bss
374 .balign 4
375 boot_heap:
376 .fill BOOT_HEAP_SIZE, 1, 0
377 boot_stack:
378 .fill BOOT_STACK_SIZE, 1, 0
379 boot_stack_end:
380
381 /*
382 * Space for page tables (not in .bss so not zeroed)
383 */
384 .section ".pgtable","a",@nobits
385 .balign 4096
386 pgtable:
387 .fill 6*4096, 1, 0