]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * | |
3 | * Trampoline.S Derived from Setup.S by Linus Torvalds | |
4 | * | |
5 | * 4 Jan 1997 Michael Chastain: changed to gnu as. | |
90b1c208 | 6 | * 15 Sept 2005 Eric Biederman: 64bit PIC support |
1da177e4 LT |
7 | * |
8 | * Entry: CS:IP point to the start of our code, we are | |
9 | * in real mode with no stack, but the rest of the | |
10 | * trampoline page to make our stack and everything else | |
11 | * is a mystery. | |
12 | * | |
13 | * In fact we don't actually need a stack so we don't | |
14 | * set one up. | |
15 | * | |
16 | * On entry to trampoline_data, the processor is in real mode | |
17 | * with 16-bit addressing and 16-bit data. CS has some value | |
18 | * and IP is zero. Thus, data addresses need to be absolute | |
19 | * (no relocation) and are taken with regard to r_base. | |
20 | * | |
90b1c208 VG |
21 | * With the addition of trampoline_level4_pgt this code can |
22 | * now enter a 64bit kernel that lives at arbitrary 64bit | |
23 | * physical addresses. | |
24 | * | |
1da177e4 LT |
25 | * If you work on this file, check the object module with objdump |
26 | * --full-contents --reloc to make sure there are no relocation | |
90b1c208 | 27 | * entries. |
1da177e4 LT |
28 | */ |
29 | ||
30 | #include <linux/linkage.h> | |
90b1c208 | 31 | #include <asm/pgtable.h> |
1da177e4 | 32 | #include <asm/page.h> |
90b1c208 VG |
33 | #include <asm/msr.h> |
34 | #include <asm/segment.h> | |
1da177e4 LT |
35 | |
36 | .data | |
37 | ||
38 | .code16 | |
39 | ||
40 | ENTRY(trampoline_data) | |
41 | r_base = . | |
90b1c208 | 42 | cli # We should be safe anyway |
1da177e4 LT |
43 | wbinvd |
44 | mov %cs, %ax # Code and data in the same place | |
45 | mov %ax, %ds | |
90b1c208 VG |
46 | mov %ax, %es |
47 | mov %ax, %ss | |
1da177e4 | 48 | |
1da177e4 LT |
49 | |
50 | movl $0xA5A5A5A5, trampoline_data - r_base | |
51 | # write marker for master knows we're running | |
52 | ||
90b1c208 VG |
53 | # Setup stack |
54 | movw $(trampoline_stack_end - r_base), %sp | |
55 | ||
56 | call verify_cpu # Verify the cpu supports long mode | |
a4831e08 VG |
57 | testl %eax, %eax # Check for return code |
58 | jnz no_longmode | |
90b1c208 VG |
59 | |
60 | mov %cs, %ax | |
61 | movzx %ax, %esi # Find the 32bit trampoline location | |
62 | shll $4, %esi | |
63 | ||
64 | # Fixup the vectors | |
65 | addl %esi, startup_32_vector - r_base | |
66 | addl %esi, startup_64_vector - r_base | |
67 | addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer | |
68 | ||
983d5dbd VG |
69 | /* |
70 | * GDT tables in non default location kernel can be beyond 16MB and | |
71 | * lgdt will not be able to load the address as in real mode default | |
72 | * operand size is 16bit. Use lgdtl instead to force operand size | |
73 | * to 32 bit. | |
74 | */ | |
75 | ||
90b1c208 VG |
76 | lidtl tidt - r_base # load idt with 0, 0 |
77 | lgdtl tgdt - r_base # load gdt with whatever is appropriate | |
1da177e4 LT |
78 | |
79 | xor %ax, %ax | |
80 | inc %ax # protected mode (PE) bit | |
81 | lmsw %ax # into protected mode | |
90b1c208 VG |
82 | |
83 | # flush prefetch and jump to startup_32 | |
84 | ljmpl *(startup_32_vector - r_base) | |
85 | ||
86 | .code32 | |
87 | .balign 4 | |
88 | startup_32: | |
89 | movl $__KERNEL_DS, %eax # Initialize the %ds segment register | |
90 | movl %eax, %ds | |
91 | ||
92 | xorl %eax, %eax | |
93 | btsl $5, %eax # Enable PAE mode | |
94 | movl %eax, %cr4 | |
95 | ||
96 | # Setup trampoline 4 level pagetables | |
97 | leal (trampoline_level4_pgt - r_base)(%esi), %eax | |
98 | movl %eax, %cr3 | |
99 | ||
100 | movl $MSR_EFER, %ecx | |
101 | movl $(1 << _EFER_LME), %eax # Enable Long Mode | |
102 | xorl %edx, %edx | |
103 | wrmsr | |
104 | ||
105 | xorl %eax, %eax | |
106 | btsl $31, %eax # Enable paging and in turn activate Long Mode | |
107 | btsl $0, %eax # Enable protected mode | |
108 | movl %eax, %cr0 | |
109 | ||
110 | /* | |
111 | * At this point we're in long mode but in 32bit compatibility mode | |
112 | * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn | |
113 | * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use | |
114 | * the new gdt/idt that has __KERNEL_CS with CS.L = 1. | |
115 | */ | |
116 | ljmp *(startup_64_vector - r_base)(%esi) | |
117 | ||
118 | .code64 | |
119 | .balign 4 | |
120 | startup_64: | |
121 | # Now jump into the kernel using virtual addresses | |
122 | movq $secondary_startup_64, %rax | |
123 | jmp *%rax | |
124 | ||
125 | .code16 | |
90b1c208 VG |
126 | no_longmode: |
127 | hlt | |
128 | jmp no_longmode | |
e0a84f68 | 129 | #include "verify_cpu_64.S" |
1da177e4 LT |
130 | |
131 | # Careful these need to be in the same 64K segment as the above; | |
90b1c208 | 132 | tidt: |
1da177e4 LT |
133 | .word 0 # idt limit = 0 |
134 | .word 0, 0 # idt base = 0L | |
135 | ||
90b1c208 VG |
136 | # Duplicate the global descriptor table |
137 | # so the kernel can live anywhere | |
138 | .balign 4 | |
139 | tgdt: | |
140 | .short tgdt_end - tgdt # gdt limit | |
141 | .long tgdt - r_base | |
142 | .short 0 | |
143 | .quad 0x00cf9b000000ffff # __KERNEL32_CS | |
144 | .quad 0x00af9b000000ffff # __KERNEL_CS | |
145 | .quad 0x00cf93000000ffff # __KERNEL_DS | |
146 | tgdt_end: | |
147 | ||
148 | .balign 4 | |
149 | startup_32_vector: | |
150 | .long startup_32 - r_base | |
151 | .word __KERNEL32_CS, 0 | |
152 | ||
153 | .balign 4 | |
154 | startup_64_vector: | |
155 | .long startup_64 - r_base | |
156 | .word __KERNEL_CS, 0 | |
157 | ||
158 | trampoline_stack: | |
159 | .org 0x1000 | |
160 | trampoline_stack_end: | |
161 | ENTRY(trampoline_level4_pgt) | |
162 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | |
163 | .fill 510,8,0 | |
164 | .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE | |
1da177e4 | 165 | |
90b1c208 | 166 | ENTRY(trampoline_end) |