]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/head_64.S
x86/mm: Add support to enable SME in early boot processing
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / head_64.S
CommitLineData
1da177e4 1/*
5b171e82 2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
1da177e4
LT
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
1ab60e0f 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
1da177e4
LT
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
f6c2e333 14#include <linux/init.h>
1da177e4 15#include <asm/segment.h>
67dcbb6b 16#include <asm/pgtable.h>
1da177e4
LT
17#include <asm/page.h>
18#include <asm/msr.h>
19#include <asm/cache.h>
369101da 20#include <asm/processor-flags.h>
b12d8db8 21#include <asm/percpu.h>
9900aa2f 22#include <asm/nops.h>
7bbcdb1c 23#include "../entry/calling.h"
784d5699 24#include <asm/export.h>
1ab60e0f 25
49a69787
GOC
26#ifdef CONFIG_PARAVIRT
27#include <asm/asm-offsets.h>
28#include <asm/paravirt.h>
ffc4bc9c 29#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
49a69787 30#else
ffc4bc9c 31#define GET_CR2_INTO(reg) movq %cr2, reg
9900aa2f 32#define INTERRUPT_RETURN iretq
49a69787
GOC
33#endif
34
3ad2f3fb 35/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
1ab60e0f
VG
36 * because we need identity-mapped pages.
37 *
1da177e4
LT
38 */
39
032370b9 40#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
a6523748
EH
41#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
42
032370b9
KS
43PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
44PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
a6523748
EH
45L3_START_KERNEL = pud_index(__START_KERNEL_map)
46
1da177e4 47 .text
4ae59b91 48 __HEAD
1ab60e0f
VG
49 .code64
50 .globl startup_64
51startup_64:
1da177e4 52 /*
1256276c 53 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
1ab60e0f
VG
54 * and someone has loaded an identity mapped page table
55 * for us. These identity mapped page tables map all of the
56 * kernel pages and possibly all of memory.
57 *
8170e6be 58 * %rsi holds a physical pointer to real_mode_data.
1ab60e0f
VG
59 *
60 * We come here either directly from a 64bit bootloader, or from
5b171e82 61 * arch/x86/boot/compressed/head_64.S.
1ab60e0f
VG
62 *
63 * We only come here initially at boot nothing else comes here.
64 *
65 * Since we may be loaded at an address different from what we were
66 * compiled to run at we first fixup the physical addresses in our page
67 * tables and then reload them.
1da177e4
LT
68 */
69
22dc3918
JP
70 /* Set up the stack for verify_cpu(), similar to initial_stack below */
71 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
91ed140d 72
04633df0
BP
73 /* Sanitize CPU configuration */
74 call verify_cpu
75
5868f365
TL
76 /*
77 * Perform pagetable fixups. Additionally, if SME is active, encrypt
78 * the kernel and retrieve the modifier (SME encryption mask if SME
79 * is active) to be added to the initial pgdir entry that will be
80 * programmed into CR3.
81 */
1ab60e0f 82 leaq _text(%rip), %rdi
c88d7150
KS
83 pushq %rsi
84 call __startup_64
85 popq %rsi
1da177e4 86
5868f365
TL
87 /* Form the CR3 value being sure to include the CR3 modifier */
88 addq $(early_top_pgt - __START_KERNEL_map), %rax
8170e6be 89 jmp 1f
90b1c208 90ENTRY(secondary_startup_64)
1ab60e0f 91 /*
1256276c 92 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
1ab60e0f
VG
93 * and someone has loaded a mapped page table.
94 *
8170e6be 95 * %rsi holds a physical pointer to real_mode_data.
1ab60e0f
VG
96 *
97 * We come here either from startup_64 (using physical addresses)
98 * or from trampoline.S (using virtual addresses).
99 *
100 * Using virtual addresses from trampoline.S removes the need
101 * to have any identity mapped pages in the kernel page table
102 * after the boot processor executes this code.
1da177e4
LT
103 */
104
04633df0
BP
105 /* Sanitize CPU configuration */
106 call verify_cpu
107
5868f365
TL
108 /*
109 * Retrieve the modifier (SME encryption mask if SME is active) to be
110 * added to the initial pgdir entry that will be programmed into CR3.
111 */
112 pushq %rsi
113 call __startup_secondary_64
114 popq %rsi
115
116 /* Form the CR3 value being sure to include the CR3 modifier */
117 addq $(init_top_pgt - __START_KERNEL_map), %rax
8170e6be
PA
1181:
119
032370b9 120 /* Enable PAE mode, PGE and LA57 */
8170e6be 121 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
032370b9
KS
122#ifdef CONFIG_X86_5LEVEL
123 orl $X86_CR4_LA57, %ecx
124#endif
8170e6be 125 movq %rcx, %cr4
1da177e4 126
032370b9 127 /* Setup early boot stage 4-/5-level pagetables. */
1ab60e0f 128 addq phys_base(%rip), %rax
1da177e4
LT
129 movq %rax, %cr3
130
1ab60e0f
VG
131 /* Ensure I am executing from virtual addresses */
132 movq $1f, %rax
133 jmp *%rax
1341:
135
1da177e4
LT
136 /* Check if nx is implemented */
137 movl $0x80000001, %eax
138 cpuid
139 movl %edx,%edi
140
141 /* Setup EFER (Extended Feature Enable Register) */
142 movl $MSR_EFER, %ecx
143 rdmsr
1ab60e0f
VG
144 btsl $_EFER_SCE, %eax /* Enable System Call */
145 btl $20,%edi /* No Execute supported? */
1da177e4
LT
146 jnc 1f
147 btsl $_EFER_NX, %eax
78d77df7 148 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
1ab60e0f 1491: wrmsr /* Make changes effective */
1da177e4
LT
150
151 /* Setup cr0 */
369101da
CG
152#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
153 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
154 X86_CR0_PG)
155 movl $CR0_STATE, %eax
1da177e4
LT
156 /* Make changes effective */
157 movq %rax, %cr0
158
159 /* Setup a boot time stack */
b32f96c7 160 movq initial_stack(%rip), %rsp
1da177e4
LT
161
162 /* zero EFLAGS after setting rsp */
163 pushq $0
164 popfq
165
166 /*
167 * We must switch to a new descriptor in kernel space for the GDT
168 * because soon the kernel won't have access anymore to the userspace
169 * addresses where we're currently running on. We have to do that here
170 * because in 32bit we couldn't load a 64bit linear address.
171 */
a939098a 172 lgdt early_gdt_descr(%rip)
1da177e4 173
8ec6993d
BG
174 /* set up data segments */
175 xorl %eax,%eax
ffb60175
ZA
176 movl %eax,%ds
177 movl %eax,%ss
178 movl %eax,%es
179
180 /*
181 * We don't really need to load %fs or %gs, but load them anyway
182 * to kill any stale realmode selectors. This allows execution
183 * under VT hardware.
184 */
185 movl %eax,%fs
186 movl %eax,%gs
187
f32ff538
TH
188 /* Set up %gs.
189 *
947e76cd
BG
190 * The base of %gs always points to the bottom of the irqstack
191 * union. If the stack protector canary is enabled, it is
192 * located at %gs:40. Note that, on SMP, the boot cpu uses
193 * init data section till per cpu areas are set up.
f32ff538 194 */
1da177e4 195 movl $MSR_GS_BASE,%ecx
650fb439
BG
196 movl initial_gs(%rip),%eax
197 movl initial_gs+4(%rip),%edx
a9468df5 198 wrmsr
1da177e4 199
8170e6be 200 /* rsi is pointer to real mode structure with interesting info.
1da177e4 201 pass it to C */
8170e6be 202 movq %rsi, %rdi
a9468df5 203
79d243a0 204.Ljump_to_C_code:
a9468df5
JP
205 /*
206 * Jump to run C code and to be on a real kernel address.
1da177e4 207 * Since we are running on identity-mapped space we have to jump
26374c7b
EB
208 * to the full 64bit address, this is only possible as indirect
209 * jump. In addition we need to ensure %cs is set so we make this
210 * a far return.
8170e6be
PA
211 *
212 * Note: do not change to far jump indirect with 64bit offset.
213 *
214 * AMD does not support far jump indirect with 64bit offset.
215 * AMD64 Architecture Programmer's Manual, Volume 3: states only
216 * JMP FAR mem16:16 FF /5 Far jump indirect,
217 * with the target specified by a far pointer in memory.
218 * JMP FAR mem16:32 FF /5 Far jump indirect,
219 * with the target specified by a far pointer in memory.
220 *
221 * Intel64 does support 64bit offset.
222 * Software Developer Manual Vol 2: states:
223 * FF /5 JMP m16:16 Jump far, absolute indirect,
224 * address given in m16:16
225 * FF /5 JMP m16:32 Jump far, absolute indirect,
226 * address given in m16:32.
227 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
228 * address given in m16:64.
1da177e4 229 */
31dcfec1
JP
230 pushq $.Lafter_lret # put return address on stack for unwinder
231 xorq %rbp, %rbp # clear frame pointer
595c1e64 232 movq initial_code(%rip), %rax
26374c7b
EB
233 pushq $__KERNEL_CS # set correct cs
234 pushq %rax # target address in negative space
235 lretq
31dcfec1 236.Lafter_lret:
79d243a0 237ENDPROC(secondary_startup_64)
1da177e4 238
04633df0
BP
239#include "verify_cpu.S"
240
42e78e97
FY
241#ifdef CONFIG_HOTPLUG_CPU
242/*
243 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
244 * up already except stack. We just set up stack here. Then call
79d243a0 245 * start_secondary() via .Ljump_to_C_code.
42e78e97
FY
246 */
247ENTRY(start_cpu0)
a9468df5 248 movq initial_stack(%rip), %rsp
79d243a0 249 jmp .Ljump_to_C_code
42e78e97
FY
250ENDPROC(start_cpu0)
251#endif
252
b32f96c7 253 /* Both SMP bootup and ACPI suspend change these variables */
da5968ae 254 __REFDATA
8170e6be
PA
255 .balign 8
256 GLOBAL(initial_code)
1da177e4 257 .quad x86_64_start_kernel
8170e6be 258 GLOBAL(initial_gs)
2add8e23 259 .quad INIT_PER_CPU_VAR(irq_stack_union)
b32f96c7 260 GLOBAL(initial_stack)
22dc3918
JP
261 /*
262 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
263 * unwinder reliably detect the end of the stack.
264 */
265 .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
b9af7c0d 266 __FINITDATA
1da177e4 267
1ab60e0f
VG
268bad_address:
269 jmp bad_address
270
8170e6be 271 __INIT
cdeb6048 272ENTRY(early_idt_handler_array)
9900aa2f
PA
273 # 104(%rsp) %rflags
274 # 96(%rsp) %cs
275 # 88(%rsp) %rip
276 # 80(%rsp) error code
749c970a
AK
277 i = 0
278 .rept NUM_EXCEPTION_VECTORS
cdeb6048 279 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
9900aa2f
PA
280 pushq $0 # Dummy error code, to make stack frame uniform
281 .endif
282 pushq $i # 72(%rsp) Vector number
cdeb6048 283 jmp early_idt_handler_common
749c970a 284 i = i + 1
cdeb6048 285 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
749c970a 286 .endr
cdeb6048 287ENDPROC(early_idt_handler_array)
8866cd9d 288
cdeb6048
AL
289early_idt_handler_common:
290 /*
291 * The stack is the hardware frame, an error code or zero, and the
292 * vector number.
293 */
9900aa2f
PA
294 cld
295
b957591f 296 incl early_recursion_flag(%rip)
9900aa2f 297
7bbcdb1c
AL
298 /* The vector number is currently in the pt_regs->di slot. */
299 pushq %rsi /* pt_regs->si */
300 movq 8(%rsp), %rsi /* RSI = vector number */
301 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
302 pushq %rdx /* pt_regs->dx */
303 pushq %rcx /* pt_regs->cx */
304 pushq %rax /* pt_regs->ax */
305 pushq %r8 /* pt_regs->r8 */
306 pushq %r9 /* pt_regs->r9 */
307 pushq %r10 /* pt_regs->r10 */
308 pushq %r11 /* pt_regs->r11 */
309 pushq %rbx /* pt_regs->bx */
310 pushq %rbp /* pt_regs->bp */
311 pushq %r12 /* pt_regs->r12 */
312 pushq %r13 /* pt_regs->r13 */
313 pushq %r14 /* pt_regs->r14 */
314 pushq %r15 /* pt_regs->r15 */
315
7bbcdb1c 316 cmpq $14,%rsi /* Page fault? */
8170e6be 317 jnz 10f
7bbcdb1c 318 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */
8170e6be
PA
319 call early_make_pgtable
320 andl %eax,%eax
7bbcdb1c 321 jz 20f /* All good */
9900aa2f 322
8170e6be 32310:
7bbcdb1c 324 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
9900aa2f 325 call early_fixup_exception
076f9776 326
0e861fbb 32720:
9900aa2f 328 decl early_recursion_flag(%rip)
7bbcdb1c 329 jmp restore_regs_and_iret
cdeb6048 330ENDPROC(early_idt_handler_common)
9900aa2f 331
8170e6be
PA
332 __INITDATA
333
9900aa2f 334 .balign 4
0e861fbb 335GLOBAL(early_recursion_flag)
b957591f 336 .long 0
1da177e4 337
f0cf5d1a 338#define NEXT_PAGE(name) \
67dcbb6b 339 .balign PAGE_SIZE; \
8170e6be 340GLOBAL(name)
f0cf5d1a 341
67dcbb6b 342/* Automate the creation of 1 to 1 mapping pmd entries */
0e192b99
CG
343#define PMDS(START, PERM, COUNT) \
344 i = 0 ; \
345 .rept (COUNT) ; \
346 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
347 i = i + 1 ; \
67dcbb6b
VG
348 .endr
349
8170e6be 350 __INITDATA
65ade2f8 351NEXT_PAGE(early_top_pgt)
8170e6be 352 .fill 511,8,0
032370b9
KS
353#ifdef CONFIG_X86_5LEVEL
354 .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
355#else
8170e6be 356 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
032370b9 357#endif
8170e6be
PA
358
359NEXT_PAGE(early_dynamic_pgts)
360 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
361
b9af7c0d 362 .data
8170e6be
PA
363
364#ifndef CONFIG_XEN
65ade2f8 365NEXT_PAGE(init_top_pgt)
8170e6be
PA
366 .fill 512,8,0
367#else
65ade2f8 368NEXT_PAGE(init_top_pgt)
8170e6be 369 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
032370b9 370 .org init_top_pgt + PGD_PAGE_OFFSET*8, 0
8170e6be 371 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
032370b9 372 .org init_top_pgt + PGD_START_KERNEL*8, 0
cfd243d4 373 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
8170e6be 374 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
1da177e4 375
f0cf5d1a 376NEXT_PAGE(level3_ident_pgt)
67dcbb6b 377 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
8170e6be
PA
378 .fill 511, 8, 0
379NEXT_PAGE(level2_ident_pgt)
380 /* Since I easily can, map the first 1G.
381 * Don't set NX because code runs from these pages.
382 */
383 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
384#endif
1da177e4 385
032370b9
KS
386#ifdef CONFIG_X86_5LEVEL
387NEXT_PAGE(level4_kernel_pgt)
388 .fill 511,8,0
389 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
390#endif
391
f0cf5d1a 392NEXT_PAGE(level3_kernel_pgt)
a6523748 393 .fill L3_START_KERNEL,8,0
1da177e4 394 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
67dcbb6b 395 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
b1c931e3
EB
396 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
397
f0cf5d1a 398NEXT_PAGE(level2_kernel_pgt)
88f3aec7 399 /*
85eb69a1 400 * 512 MB kernel mapping. We spend a full page on this pagetable
88f3aec7
IM
401 * anyway.
402 *
403 * The kernel code+data+bss must not be bigger than that.
404 *
85eb69a1 405 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
88f3aec7
IM
406 * If you want to increase this then increase MODULES_VADDR
407 * too.)
408 */
8490638c 409 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
d4afe414 410 KERNEL_IMAGE_SIZE/PMD_SIZE)
1da177e4 411
8170e6be
PA
412NEXT_PAGE(level2_fixmap_pgt)
413 .fill 506,8,0
414 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
415 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
416 .fill 5,8,0
417
418NEXT_PAGE(level1_fixmap_pgt)
419 .fill 512,8,0
1ab60e0f 420
67dcbb6b 421#undef PMDS
1da177e4 422
f0cf5d1a 423 .data
1da177e4 424 .align 16
a939098a
GC
425 .globl early_gdt_descr
426early_gdt_descr:
427 .word GDT_ENTRIES*8-1
3e5d8f97 428early_gdt_descr_base:
2add8e23 429 .quad INIT_PER_CPU_VAR(gdt_page)
1da177e4 430
1ab60e0f
VG
431ENTRY(phys_base)
432 /* This must match the first entry in level2_kernel_pgt */
433 .quad 0x0000000000000000
784d5699 434EXPORT_SYMBOL(phys_base)
1ab60e0f 435
8c5e5ac3 436#include "../../x86/xen/xen-head.S"
1da177e4 437
02b7da37 438 __PAGE_ALIGNED_BSS
8170e6be 439NEXT_PAGE(empty_zero_page)
e57113bc 440 .skip PAGE_SIZE
784d5699 441EXPORT_SYMBOL(empty_zero_page)
ef7f0d6a 442